hash
stringlengths 40
40
| date
stringdate 2022-04-19 15:26:27
2025-03-21 10:49:23
| author
stringclasses 86
values | commit_message
stringlengths 12
115
| is_merge
bool 1
class | git_diff
stringlengths 214
553k
⌀ | type
stringclasses 15
values | masked_commit_message
stringlengths 8
110
|
|---|---|---|---|---|---|---|---|
2a971b0fffb77b2df005dc366d9415a605f7efb7
|
2024-02-13 19:02:46
|
Cancai Cai
|
chore: update link to official website link (#3299)
| false
|
diff --git a/README.md b/README.md
index 3cb0db03ee24..1135e15b2ddd 100644
--- a/README.md
+++ b/README.md
@@ -175,7 +175,7 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
## Acknowledgement
- GreptimeDB uses [Apache Arrow™](https://arrow.apache.org/) as the memory model and [Apache Parquet™](https://parquet.apache.org/) as the persistent file format.
-- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://github.com/apache/arrow-datafusion).
+- GreptimeDB's query engine is powered by [Apache Arrow DataFusion™](https://arrow.apache.org/datafusion/).
- [Apache OpenDAL™](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
chore
|
update link to official website link (#3299)
|
856bba5d95fbbdee4efe1622f1ab34abe5726c0f
|
2025-01-03 13:42:49
|
yihong
|
fix: better fmt check from 40s to 4s (#5279)
| false
|
diff --git a/scripts/check-snafu.py b/scripts/check-snafu.py
index b91950692bd8..662f6758d53d 100644
--- a/scripts/check-snafu.py
+++ b/scripts/check-snafu.py
@@ -14,6 +14,7 @@
import os
import re
+from multiprocessing import Pool
def find_rust_files(directory):
@@ -33,13 +34,11 @@ def extract_branch_names(file_content):
return pattern.findall(file_content)
-def check_snafu_in_files(branch_name, rust_files):
+def check_snafu_in_files(branch_name, rust_files_content):
branch_name_snafu = f"{branch_name}Snafu"
- for rust_file in rust_files:
- with open(rust_file, "r") as file:
- content = file.read()
- if branch_name_snafu in content:
- return True
+ for content in rust_files_content.values():
+ if branch_name_snafu in content:
+ return True
return False
@@ -49,21 +48,24 @@ def main():
for error_file in error_files:
with open(error_file, "r") as file:
- content = file.read()
- branch_names.extend(extract_branch_names(content))
+ branch_names.extend(extract_branch_names(file.read()))
+
+ # Read all rust files into memory once
+ rust_files_content = {}
+ for rust_file in other_rust_files:
+ with open(rust_file, "r") as file:
+ rust_files_content[rust_file] = file.read()
- unused_snafu = [
- branch_name
- for branch_name in branch_names
- if not check_snafu_in_files(branch_name, other_rust_files)
- ]
+ with Pool() as pool:
+ results = pool.starmap(
+ check_snafu_in_files, [(bn, rust_files_content) for bn in branch_names]
+ )
+ unused_snafu = [bn for bn, found in zip(branch_names, results) if not found]
if unused_snafu:
print("Unused error variants:")
for name in unused_snafu:
print(name)
-
- if unused_snafu:
raise SystemExit(1)
diff --git a/src/common/datasource/tests/orc/write.py b/src/common/datasource/tests/orc/write.py
index f0e279229996..aa97c09a6367 100644
--- a/src/common/datasource/tests/orc/write.py
+++ b/src/common/datasource/tests/orc/write.py
@@ -35,10 +35,23 @@
"bigint_other": [5, -5, 1, 5, 5],
"utf8_increase": ["a", "bb", "ccc", "dddd", "eeeee"],
"utf8_decrease": ["eeeee", "dddd", "ccc", "bb", "a"],
- "timestamp_simple": [datetime.datetime(2023, 4, 1, 20, 15, 30, 2000), datetime.datetime.fromtimestamp(int('1629617204525777000')/1000000000), datetime.datetime(2023, 1, 1), datetime.datetime(2023, 2, 1), datetime.datetime(2023, 3, 1)],
- "date_simple": [datetime.date(2023, 4, 1), datetime.date(2023, 3, 1), datetime.date(2023, 1, 1), datetime.date(2023, 2, 1), datetime.date(2023, 3, 1)]
+ "timestamp_simple": [
+ datetime.datetime(2023, 4, 1, 20, 15, 30, 2000),
+ datetime.datetime.fromtimestamp(int("1629617204525777000") / 1000000000),
+ datetime.datetime(2023, 1, 1),
+ datetime.datetime(2023, 2, 1),
+ datetime.datetime(2023, 3, 1),
+ ],
+ "date_simple": [
+ datetime.date(2023, 4, 1),
+ datetime.date(2023, 3, 1),
+ datetime.date(2023, 1, 1),
+ datetime.date(2023, 2, 1),
+ datetime.date(2023, 3, 1),
+ ],
}
+
def infer_schema(data):
schema = "struct<"
for key, value in data.items():
@@ -56,7 +69,7 @@ def infer_schema(data):
elif key.startswith("date"):
dt = "date"
else:
- print(key,value,dt)
+ print(key, value, dt)
raise NotImplementedError
if key.startswith("double"):
dt = "double"
@@ -68,7 +81,6 @@ def infer_schema(data):
return schema
-
def _write(
schema: str,
data,
|
fix
|
better fmt check from 40s to 4s (#5279)
|
e3927ea6f715952140f96d37a65384638381a5c6
|
2024-11-25 07:43:11
|
Ruihang Xia
|
fix: prevent metadata region from inheriting database ttl (#5044)
| false
|
diff --git a/src/metric-engine/src/engine/create.rs b/src/metric-engine/src/engine/create.rs
index 0523dd1e5389..dd3b5cb1f358 100644
--- a/src/metric-engine/src/engine/create.rs
+++ b/src/metric-engine/src/engine/create.rs
@@ -458,7 +458,7 @@ impl MetricEngineInner {
// remove TTL and APPEND_MODE option
let mut options = request.options.clone();
- options.remove(TTL_KEY);
+ options.insert(TTL_KEY.to_string(), "10000 years".to_string());
options.remove(APPEND_MODE_KEY);
RegionCreateRequest {
@@ -724,6 +724,9 @@ mod test {
metadata_region_request.region_dir,
"/test_dir/metadata/".to_string()
);
- assert!(!metadata_region_request.options.contains_key("ttl"));
+ assert_eq!(
+ metadata_region_request.options.get("ttl").unwrap(),
+ "10000 years"
+ );
}
}
|
fix
|
prevent metadata region from inheriting database ttl (#5044)
|
9c79baca4bd90b6f1c849a9652ccde0cef271bec
|
2024-10-29 13:27:17
|
Zhenchi
|
feat(index): support building inverted index for the field column on Mito (#4887)
| false
|
diff --git a/src/index/src/inverted_index/search/index_apply/predicates_apply.rs b/src/index/src/inverted_index/search/index_apply/predicates_apply.rs
index face61bcedcb..bead0761d7b2 100644
--- a/src/index/src/inverted_index/search/index_apply/predicates_apply.rs
+++ b/src/index/src/inverted_index/search/index_apply/predicates_apply.rs
@@ -114,17 +114,17 @@ impl PredicatesIndexApplier {
.partition_in_place(|(_, ps)| ps.iter().any(|p| matches!(p, Predicate::InList(_))));
let mut iter = predicates.into_iter();
for _ in 0..in_list_index {
- let (tag_name, predicates) = iter.next().unwrap();
+ let (column_name, predicates) = iter.next().unwrap();
let fst_applier = Box::new(KeysFstApplier::try_from(predicates)?) as _;
- fst_appliers.push((tag_name, fst_applier));
+ fst_appliers.push((column_name, fst_applier));
}
- for (tag_name, predicates) in iter {
+ for (column_name, predicates) in iter {
if predicates.is_empty() {
continue;
}
let fst_applier = Box::new(IntersectionFstApplier::try_from(predicates)?) as _;
- fst_appliers.push((tag_name, fst_applier));
+ fst_appliers.push((column_name, fst_applier));
}
Ok(PredicatesIndexApplier { fst_appliers })
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index 2c61124180f5..6e2a0344338b 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -393,20 +393,29 @@ impl ScanRegion {
.and_then(|c| c.index_cache())
.cloned();
+ // TODO(zhongzc): currently we only index tag columns, need to support field columns.
+ let ignore_column_ids = &self
+ .version
+ .options
+ .index_options
+ .inverted_index
+ .ignore_column_ids;
+ let indexed_column_ids = self
+ .version
+ .metadata
+ .primary_key
+ .iter()
+ .filter(|id| !ignore_column_ids.contains(id))
+ .copied()
+ .collect::<HashSet<_>>();
+
InvertedIndexApplierBuilder::new(
self.access_layer.region_dir().to_string(),
self.access_layer.object_store().clone(),
file_cache,
index_cache,
self.version.metadata.as_ref(),
- self.version
- .options
- .index_options
- .inverted_index
- .ignore_column_ids
- .iter()
- .copied()
- .collect(),
+ indexed_column_ids,
self.access_layer.puffin_manager_factory().clone(),
)
.build(&self.request.filters)
diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs
index 2fcfd8ee8cfe..f0ee66ab01c3 100644
--- a/src/mito2/src/sst/index.rs
+++ b/src/mito2/src/sst/index.rs
@@ -20,6 +20,7 @@ pub(crate) mod puffin_manager;
mod statistics;
mod store;
+use std::collections::HashSet;
use std::num::NonZeroUsize;
use common_telemetry::{debug, warn};
@@ -212,13 +213,28 @@ impl<'a> IndexerBuilder<'a> {
segment_row_count = row_group_size;
}
+ // TODO(zhongzc): currently we only index tag columns, need to support field columns.
+ let indexed_column_ids = self
+ .metadata
+ .primary_key
+ .iter()
+ .filter(|id| {
+ !self
+ .index_options
+ .inverted_index
+ .ignore_column_ids
+ .contains(id)
+ })
+ .copied()
+ .collect::<HashSet<_>>();
+
let indexer = InvertedIndexer::new(
self.file_id,
self.metadata,
self.intermediate_manager.clone(),
self.inverted_index_config.mem_threshold_on_create(),
segment_row_count,
- &self.index_options.inverted_index.ignore_column_ids,
+ indexed_column_ids,
);
Some(indexer)
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder.rs b/src/mito2/src/sst/index/inverted_index/applier/builder.rs
index 6d37ffc02305..603cf5aa23fd 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder.rs
@@ -20,7 +20,6 @@ mod regex_match;
use std::collections::{HashMap, HashSet};
-use api::v1::SemanticType;
use common_telemetry::warn;
use datafusion_common::ScalarValue;
use datafusion_expr::{BinaryExpr, Expr, Operator};
@@ -55,8 +54,8 @@ pub(crate) struct InvertedIndexApplierBuilder<'a> {
/// Metadata of the region, used to get metadata like column type.
metadata: &'a RegionMetadata,
- /// Column ids to ignore.
- ignore_column_ids: HashSet<ColumnId>,
+ /// Column ids of the columns that are indexed.
+ indexed_column_ids: HashSet<ColumnId>,
/// Stores predicates during traversal on the Expr tree.
output: HashMap<ColumnId, Vec<Predicate>>,
@@ -76,7 +75,7 @@ impl<'a> InvertedIndexApplierBuilder<'a> {
file_cache: Option<FileCacheRef>,
index_cache: Option<InvertedIndexCacheRef>,
metadata: &'a RegionMetadata,
- ignore_column_ids: HashSet<ColumnId>,
+ indexed_column_ids: HashSet<ColumnId>,
puffin_manager_factory: PuffinManagerFactory,
) -> Self {
Self {
@@ -84,7 +83,7 @@ impl<'a> InvertedIndexApplierBuilder<'a> {
object_store,
file_cache,
metadata,
- ignore_column_ids,
+ indexed_column_ids,
output: HashMap::default(),
index_cache,
puffin_manager_factory,
@@ -156,9 +155,9 @@ impl<'a> InvertedIndexApplierBuilder<'a> {
self.output.entry(column_id).or_default().push(predicate);
}
- /// Helper function to get the column id and the column type of a tag column.
+ /// Helper function to get the column id and the column type of a column.
/// Returns `None` if the column is not a tag column or if the column is ignored.
- fn tag_column_id_and_type(
+ fn column_id_and_type(
&self,
column_name: &str,
) -> Result<Option<(ColumnId, ConcreteDataType)>> {
@@ -169,11 +168,7 @@ impl<'a> InvertedIndexApplierBuilder<'a> {
column: column_name,
})?;
- if self.ignore_column_ids.contains(&column.column_id) {
- return Ok(None);
- }
-
- if column.semantic_type != SemanticType::Tag {
+ if !self.indexed_column_ids.contains(&column.column_id) {
return Ok(None);
}
@@ -330,7 +325,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/between.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/between.rs
index ae4de2170a11..0a196e6f1ac6 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/between.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/between.rs
@@ -28,7 +28,7 @@ impl InvertedIndexApplierBuilder<'_> {
let Some(column_name) = Self::column_name(&between.expr) else {
return Ok(());
};
- let Some((column_id, data_type)) = self.tag_column_id_and_type(column_name)? else {
+ let Some((column_id, data_type)) = self.column_id_and_type(column_name)? else {
return Ok(());
};
let Some(low) = Self::nonnull_lit(&between.low) else {
@@ -78,7 +78,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -121,7 +121,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -147,7 +147,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -159,7 +159,24 @@ mod tests {
};
builder.collect_between(&between).unwrap();
- assert!(builder.output.is_empty());
+
+ let predicates = builder.output.get(&3).unwrap();
+ assert_eq!(predicates.len(), 1);
+ assert_eq!(
+ predicates[0],
+ Predicate::Range(RangePredicate {
+ range: Range {
+ lower: Some(Bound {
+ inclusive: true,
+ value: encoded_string("abc"),
+ }),
+ upper: Some(Bound {
+ inclusive: true,
+ value: encoded_string("def"),
+ }),
+ }
+ })
+ );
}
#[test]
@@ -173,7 +190,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -200,7 +217,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs
index 2c9fa861eaea..cdaec9f94e95 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs
@@ -114,7 +114,7 @@ impl InvertedIndexApplierBuilder<'_> {
let Some(lit) = Self::nonnull_lit(literal) else {
return Ok(());
};
- let Some((column_id, data_type)) = self.tag_column_id_and_type(column_name)? else {
+ let Some((column_id, data_type)) = self.column_id_and_type(column_name)? else {
return Ok(());
};
@@ -234,7 +234,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -263,7 +263,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -283,14 +283,28 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
builder
.collect_comparison_expr(&field_column(), &Operator::Lt, &string_lit("abc"))
.unwrap();
- assert!(builder.output.is_empty());
+
+ let predicates = builder.output.get(&3).unwrap();
+ assert_eq!(predicates.len(), 1);
+ assert_eq!(
+ predicates[0],
+ Predicate::Range(RangePredicate {
+ range: Range {
+ lower: None,
+ upper: Some(Bound {
+ inclusive: false,
+ value: encoded_string("abc"),
+ }),
+ }
+ })
+ );
}
#[test]
@@ -304,7 +318,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs
index 6d142d64025e..1d07cca48724 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs
@@ -31,7 +31,7 @@ impl InvertedIndexApplierBuilder<'_> {
let Some(lit) = Self::nonnull_lit(right).or_else(|| Self::nonnull_lit(left)) else {
return Ok(());
};
- let Some((column_id, data_type)) = self.tag_column_id_and_type(column_name)? else {
+ let Some((column_id, data_type)) = self.column_id_and_type(column_name)? else {
return Ok(());
};
@@ -59,7 +59,7 @@ impl InvertedIndexApplierBuilder<'_> {
let Some(lit) = Self::nonnull_lit(right).or_else(|| Self::nonnull_lit(left)) else {
return Ok(());
};
- let Some((column_id, data_type)) = self.tag_column_id_and_type(column_name)? else {
+ let Some((column_id, data_type)) = self.column_id_and_type(column_name)? else {
return Ok(());
};
@@ -140,7 +140,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -178,14 +178,22 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
builder
.collect_eq(&field_column(), &string_lit("abc"))
.unwrap();
- assert!(builder.output.is_empty());
+
+ let predicates = builder.output.get(&3).unwrap();
+ assert_eq!(predicates.len(), 1);
+ assert_eq!(
+ predicates[0],
+ Predicate::InList(InListPredicate {
+ list: HashSet::from_iter([encoded_string("abc")])
+ })
+ );
}
#[test]
@@ -199,7 +207,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -219,7 +227,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -239,7 +247,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -298,7 +306,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -336,7 +344,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/in_list.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/in_list.rs
index c8cf9c4d16b6..6a520ba401d3 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/in_list.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/in_list.rs
@@ -29,7 +29,7 @@ impl InvertedIndexApplierBuilder<'_> {
let Some(column_name) = Self::column_name(&inlist.expr) else {
return Ok(());
};
- let Some((column_id, data_type)) = self.tag_column_id_and_type(column_name)? else {
+ let Some((column_id, data_type)) = self.column_id_and_type(column_name)? else {
return Ok(());
};
@@ -71,7 +71,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -104,7 +104,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -129,7 +129,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -140,7 +140,15 @@ mod tests {
};
builder.collect_inlist(&in_list).unwrap();
- assert!(builder.output.is_empty());
+
+ let predicates = builder.output.get(&3).unwrap();
+ assert_eq!(predicates.len(), 1);
+ assert_eq!(
+ predicates[0],
+ Predicate::InList(InListPredicate {
+ list: HashSet::from_iter([encoded_string("foo"), encoded_string("bar")])
+ })
+ );
}
#[test]
@@ -154,7 +162,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -181,7 +189,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/regex_match.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/regex_match.rs
index a60d9d9c0f5c..7fdf7f3de55c 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/regex_match.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/regex_match.rs
@@ -25,7 +25,7 @@ impl InvertedIndexApplierBuilder<'_> {
let Some(column_name) = Self::column_name(column) else {
return Ok(());
};
- let Some((column_id, data_type)) = self.tag_column_id_and_type(column_name)? else {
+ let Some((column_id, data_type)) = self.column_id_and_type(column_name)? else {
return Ok(());
};
if !data_type.is_string() {
@@ -65,7 +65,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -94,7 +94,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -102,7 +102,14 @@ mod tests {
.collect_regex_match(&field_column(), &string_lit("abc"))
.unwrap();
- assert!(builder.output.is_empty());
+ let predicates = builder.output.get(&3).unwrap();
+ assert_eq!(predicates.len(), 1);
+ assert_eq!(
+ predicates[0],
+ Predicate::RegexMatch(RegexMatchPredicate {
+ pattern: "abc".to_string()
+ })
+ );
}
#[test]
@@ -116,7 +123,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
@@ -138,7 +145,7 @@ mod tests {
None,
None,
&metadata,
- HashSet::default(),
+ HashSet::from_iter([1, 2, 3]),
facotry,
);
diff --git a/src/mito2/src/sst/index/inverted_index/creator.rs b/src/mito2/src/sst/index/inverted_index/creator.rs
index b2b11048193f..6db1ef6e0b7b 100644
--- a/src/mito2/src/sst/index/inverted_index/creator.rs
+++ b/src/mito2/src/sst/index/inverted_index/creator.rs
@@ -36,6 +36,7 @@ use crate::error::{
PushIndexValueSnafu, Result,
};
use crate::read::Batch;
+use crate::row_converter::SortField;
use crate::sst::file::FileId;
use crate::sst::index::intermediate::{IntermediateLocation, IntermediateManager};
use crate::sst::index::inverted_index::codec::{IndexValueCodec, IndexValuesCodec};
@@ -72,7 +73,7 @@ pub struct InvertedIndexer {
memory_usage: Arc<AtomicUsize>,
/// Ids of indexed columns.
- column_ids: HashSet<ColumnId>,
+ indexed_column_ids: HashSet<ColumnId>,
}
impl InvertedIndexer {
@@ -84,7 +85,7 @@ impl InvertedIndexer {
intermediate_manager: IntermediateManager,
memory_usage_threshold: Option<usize>,
segment_row_count: NonZeroUsize,
- ignore_column_ids: &[ColumnId],
+ indexed_column_ids: HashSet<ColumnId>,
) -> Self {
let temp_file_provider = Arc::new(TempFileProvider::new(
IntermediateLocation::new(&metadata.region_id, &sst_file_id),
@@ -102,14 +103,6 @@ impl InvertedIndexer {
let index_creator = Box::new(SortIndexCreator::new(sorter, segment_row_count));
let codec = IndexValuesCodec::from_tag_columns(metadata.primary_key_columns());
- let mut column_ids = metadata
- .primary_key_columns()
- .map(|c| c.column_id)
- .collect::<HashSet<_>>();
- for id in ignore_column_ids {
- column_ids.remove(id);
- }
-
Self {
codec,
index_creator,
@@ -118,7 +111,7 @@ impl InvertedIndexer {
stats: Statistics::new(TYPE_INVERTED_INDEX),
aborted: false,
memory_usage,
- column_ids,
+ indexed_column_ids,
}
}
@@ -189,7 +182,7 @@ impl InvertedIndexer {
guard.inc_row_count(n);
for ((col_id, col_id_str), field, value) in self.codec.decode(batch.primary_key())? {
- if !self.column_ids.contains(col_id) {
+ if !self.indexed_column_ids.contains(col_id) {
continue;
}
@@ -210,6 +203,32 @@ impl InvertedIndexer {
.context(PushIndexValueSnafu)?;
}
+ for field in batch.fields() {
+ if !self.indexed_column_ids.contains(&field.column_id) {
+ continue;
+ }
+
+ let sort_field = SortField::new(field.data.data_type());
+ let col_id_str = field.column_id.to_string();
+ for i in 0..n {
+ self.value_buf.clear();
+ let value = field.data.get_ref(i);
+
+ if value.is_null() {
+ self.index_creator
+ .push_with_name(&col_id_str, None)
+ .await
+ .context(PushIndexValueSnafu)?;
+ } else {
+ IndexValueCodec::encode_nonnull_value(value, &sort_field, &mut self.value_buf)?;
+ self.index_creator
+ .push_with_name(&col_id_str, Some(&self.value_buf))
+ .await
+ .context(PushIndexValueSnafu)?;
+ }
+ }
+ }
+
Ok(())
}
@@ -269,7 +288,7 @@ impl InvertedIndexer {
}
pub fn column_ids(&self) -> impl Iterator<Item = ColumnId> + '_ {
- self.column_ids.iter().copied()
+ self.indexed_column_ids.iter().copied()
}
pub fn memory_usage(&self) -> usize {
@@ -297,6 +316,7 @@ mod tests {
use super::*;
use crate::cache::index::InvertedIndexCache;
+ use crate::read::BatchColumn;
use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
use crate::sst::index::inverted_index::applier::builder::InvertedIndexApplierBuilder;
use crate::sst::index::puffin_manager::PuffinManagerFactory;
@@ -340,12 +360,25 @@ mod tests {
semantic_type: SemanticType::Timestamp,
column_id: 3,
})
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "field_u64",
+ ConcreteDataType::uint64_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Field,
+ column_id: 4,
+ })
.primary_key(vec![1, 2]);
Arc::new(builder.build().unwrap())
}
- fn new_batch(num_rows: usize, str_tag: impl AsRef<str>, i32_tag: impl Into<i32>) -> Batch {
+ fn new_batch(
+ str_tag: impl AsRef<str>,
+ i32_tag: impl Into<i32>,
+ u64_field: impl IntoIterator<Item = u64>,
+ ) -> Batch {
let fields = vec![
SortField::new(ConcreteDataType::string_datatype()),
SortField::new(ConcreteDataType::int32_datatype()),
@@ -354,6 +387,12 @@ mod tests {
let row: [ValueRef; 2] = [str_tag.as_ref().into(), i32_tag.into().into()];
let primary_key = codec.encode(row.into_iter()).unwrap();
+ let u64_field = BatchColumn {
+ column_id: 4,
+ data: Arc::new(UInt64Vector::from_iter_values(u64_field)),
+ };
+ let num_rows = u64_field.data.len();
+
Batch::new(
primary_key,
Arc::new(UInt64Vector::from_iter_values(
@@ -365,14 +404,14 @@ mod tests {
Arc::new(UInt8Vector::from_iter_values(
iter::repeat(1).take(num_rows),
)),
- vec![],
+ vec![u64_field],
)
.unwrap()
}
async fn build_applier_factory(
prefix: &str,
- tags: BTreeSet<(&'static str, i32)>,
+ rows: BTreeSet<(&'static str, i32, [u64; 2])>,
) -> impl Fn(DfExpr) -> BoxFuture<'static, Vec<usize>> {
let (d, factory) = PuffinManagerFactory::new_for_test_async(prefix).await;
let region_dir = "region0".to_string();
@@ -383,6 +422,7 @@ mod tests {
let intm_mgr = new_intm_mgr(d.path().to_string_lossy()).await;
let memory_threshold = None;
let segment_row_count = 2;
+ let indexed_column_ids = HashSet::from_iter([1, 2, 4]);
let mut creator = InvertedIndexer::new(
sst_file_id,
@@ -390,18 +430,18 @@ mod tests {
intm_mgr,
memory_threshold,
NonZeroUsize::new(segment_row_count).unwrap(),
- &[],
+ indexed_column_ids.clone(),
);
- for (str_tag, i32_tag) in &tags {
- let batch = new_batch(segment_row_count, str_tag, *i32_tag);
+ for (str_tag, i32_tag, u64_field) in &rows {
+ let batch = new_batch(str_tag, *i32_tag, u64_field.iter().copied());
creator.update(&batch).await.unwrap();
}
let puffin_manager = factory.build(object_store.clone());
let mut writer = puffin_manager.writer(&file_path).await.unwrap();
let (row_count, _) = creator.finish(&mut writer).await.unwrap();
- assert_eq!(row_count, tags.len() * segment_row_count);
+ assert_eq!(row_count, rows.len() * segment_row_count);
writer.finish().await.unwrap();
move |expr| {
@@ -413,7 +453,7 @@ mod tests {
None,
Some(cache),
®ion_metadata,
- Default::default(),
+ indexed_column_ids.clone(),
factory.clone(),
)
.build(&[expr])
@@ -433,19 +473,19 @@ mod tests {
#[tokio::test]
async fn test_create_and_query_get_key() {
- let tags = BTreeSet::from_iter([
- ("aaa", 1),
- ("aaa", 2),
- ("aaa", 3),
- ("aab", 1),
- ("aab", 2),
- ("aab", 3),
- ("abc", 1),
- ("abc", 2),
- ("abc", 3),
+ let rows = BTreeSet::from_iter([
+ ("aaa", 1, [1, 2]),
+ ("aaa", 2, [2, 3]),
+ ("aaa", 3, [3, 4]),
+ ("aab", 1, [4, 5]),
+ ("aab", 2, [5, 6]),
+ ("aab", 3, [6, 7]),
+ ("abc", 1, [7, 8]),
+ ("abc", 2, [8, 9]),
+ ("abc", 3, [9, 10]),
]);
- let applier_factory = build_applier_factory("test_create_and_query_get_key_", tags).await;
+ let applier_factory = build_applier_factory("test_create_and_query_get_key_", rows).await;
let expr = col("tag_str").eq(lit("aaa"));
let res = applier_factory(expr).await;
@@ -468,23 +508,27 @@ mod tests {
let expr = col("tag_str").in_list(vec![lit("aaa"), lit("abc")], false);
let res = applier_factory(expr).await;
assert_eq!(res, vec![0, 1, 2, 6, 7, 8]);
+
+ let expr = col("field_u64").eq(lit(2u64));
+ let res = applier_factory(expr).await;
+ assert_eq!(res, vec![0, 1]);
}
#[tokio::test]
async fn test_create_and_query_range() {
- let tags = BTreeSet::from_iter([
- ("aaa", 1),
- ("aaa", 2),
- ("aaa", 3),
- ("aab", 1),
- ("aab", 2),
- ("aab", 3),
- ("abc", 1),
- ("abc", 2),
- ("abc", 3),
+ let rows = BTreeSet::from_iter([
+ ("aaa", 1, [1, 2]),
+ ("aaa", 2, [2, 3]),
+ ("aaa", 3, [3, 4]),
+ ("aab", 1, [4, 5]),
+ ("aab", 2, [5, 6]),
+ ("aab", 3, [6, 7]),
+ ("abc", 1, [7, 8]),
+ ("abc", 2, [8, 9]),
+ ("abc", 3, [9, 10]),
]);
- let applier_factory = build_applier_factory("test_create_and_query_range_", tags).await;
+ let applier_factory = build_applier_factory("test_create_and_query_range_", rows).await;
let expr = col("tag_str").between(lit("aaa"), lit("aab"));
let res = applier_factory(expr).await;
@@ -501,24 +545,28 @@ mod tests {
let expr = col("tag_i32").between(lit(2), lit(2));
let res = applier_factory(expr).await;
assert_eq!(res, vec![1, 4, 7]);
+
+ let expr = col("field_u64").between(lit(2u64), lit(5u64));
+ let res = applier_factory(expr).await;
+ assert_eq!(res, vec![0, 1, 2, 3, 4]);
}
#[tokio::test]
async fn test_create_and_query_comparison() {
- let tags = BTreeSet::from_iter([
- ("aaa", 1),
- ("aaa", 2),
- ("aaa", 3),
- ("aab", 1),
- ("aab", 2),
- ("aab", 3),
- ("abc", 1),
- ("abc", 2),
- ("abc", 3),
+ let rows = BTreeSet::from_iter([
+ ("aaa", 1, [1, 2]),
+ ("aaa", 2, [2, 3]),
+ ("aaa", 3, [3, 4]),
+ ("aab", 1, [4, 5]),
+ ("aab", 2, [5, 6]),
+ ("aab", 3, [6, 7]),
+ ("abc", 1, [7, 8]),
+ ("abc", 2, [8, 9]),
+ ("abc", 3, [9, 10]),
]);
let applier_factory =
- build_applier_factory("test_create_and_query_comparison_", tags).await;
+ build_applier_factory("test_create_and_query_comparison_", rows).await;
let expr = col("tag_str").lt(lit("aab"));
let res = applier_factory(expr).await;
@@ -528,6 +576,10 @@ mod tests {
let res = applier_factory(expr).await;
assert_eq!(res, vec![0, 3, 6]);
+ let expr = col("field_u64").lt(lit(2u64));
+ let res = applier_factory(expr).await;
+ assert_eq!(res, vec![0]);
+
let expr = col("tag_str").gt(lit("aab"));
let res = applier_factory(expr).await;
assert_eq!(res, vec![6, 7, 8]);
@@ -536,6 +588,10 @@ mod tests {
let res = applier_factory(expr).await;
assert_eq!(res, vec![2, 5, 8]);
+ let expr = col("field_u64").gt(lit(8u64));
+ let res = applier_factory(expr).await;
+ assert_eq!(res, vec![7, 8]);
+
let expr = col("tag_str").lt_eq(lit("aab"));
let res = applier_factory(expr).await;
assert_eq!(res, vec![0, 1, 2, 3, 4, 5]);
@@ -544,6 +600,10 @@ mod tests {
let res = applier_factory(expr).await;
assert_eq!(res, vec![0, 1, 3, 4, 6, 7]);
+ let expr = col("field_u64").lt_eq(lit(2u64));
+ let res = applier_factory(expr).await;
+ assert_eq!(res, vec![0, 1]);
+
let expr = col("tag_str").gt_eq(lit("aab"));
let res = applier_factory(expr).await;
assert_eq!(res, vec![3, 4, 5, 6, 7, 8]);
@@ -552,6 +612,10 @@ mod tests {
let res = applier_factory(expr).await;
assert_eq!(res, vec![1, 2, 4, 5, 7, 8]);
+ let expr = col("field_u64").gt_eq(lit(8u64));
+ let res = applier_factory(expr).await;
+ assert_eq!(res, vec![6, 7, 8]);
+
let expr = col("tag_str")
.gt(lit("aaa"))
.and(col("tag_str").lt(lit("abc")));
@@ -561,23 +625,29 @@ mod tests {
let expr = col("tag_i32").gt(lit(1)).and(col("tag_i32").lt(lit(3)));
let res = applier_factory(expr).await;
assert_eq!(res, vec![1, 4, 7]);
+
+ let expr = col("field_u64")
+ .gt(lit(2u64))
+ .and(col("field_u64").lt(lit(9u64)));
+ let res = applier_factory(expr).await;
+ assert_eq!(res, vec![1, 2, 3, 4, 5, 6, 7]);
}
#[tokio::test]
async fn test_create_and_query_regex() {
- let tags = BTreeSet::from_iter([
- ("aaa", 1),
- ("aaa", 2),
- ("aaa", 3),
- ("aab", 1),
- ("aab", 2),
- ("aab", 3),
- ("abc", 1),
- ("abc", 2),
- ("abc", 3),
+ let rows = BTreeSet::from_iter([
+ ("aaa", 1, [1, 2]),
+ ("aaa", 2, [2, 3]),
+ ("aaa", 3, [3, 4]),
+ ("aab", 1, [4, 5]),
+ ("aab", 2, [5, 6]),
+ ("aab", 3, [6, 7]),
+ ("abc", 1, [7, 8]),
+ ("abc", 2, [8, 9]),
+ ("abc", 3, [9, 10]),
]);
- let applier_factory = build_applier_factory("test_create_and_query_regex_", tags).await;
+ let applier_factory = build_applier_factory("test_create_and_query_regex_", rows).await;
let expr = binary_expr(col("tag_str"), Operator::RegexMatch, lit(".*"));
let res = applier_factory(expr).await;
|
feat
|
support building inverted index for the field column on Mito (#4887)
|
4cd5ec7769ba9d0d902ab2e6a8d5cabc302521d9
|
2024-07-25 08:12:18
|
dennis zhuang
|
docs: update readme (#4430)
| false
|
diff --git a/README.md b/README.md
index 6b268af03a92..f8b4a6d7d938 100644
--- a/README.md
+++ b/README.md
@@ -50,7 +50,7 @@
## Introduction
-**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Events**, and **Logs** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
+**GreptimeDB** is an open-source unified time-series database for **Metrics**, **Logs**, and **Events** (also **Traces** in plan). You can gain real-time insights from Edge to Cloud at any scale.
## Why GreptimeDB
@@ -58,7 +58,7 @@ Our core developers have been building time-series data platforms for years. Bas
* **Unified all kinds of time series**
- GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics and events. It supports analyzing metrics and events with SQL and PromQL, and doing streaming with continuous aggregation.
+ GreptimeDB treats all time series as contextual events with timestamp, and thus unifies the processing of metrics, logs, and events. It supports analyzing metrics, logs, and events with SQL and PromQL, and doing streaming with continuous aggregation.
* **Cloud-Edge collaboration**
|
docs
|
update readme (#4430)
|
32c3ac4fcfa6938c70a2beee0005cecb1d49c882
|
2023-08-15 14:58:09
|
zyy17
|
refactor: improve the image building performance (#2175)
| false
|
diff --git a/.github/actions/build-dev-builder-image/action.yml b/.github/actions/build-dev-builder-image/action.yml
index e4a88a5d1136..c1755113a6fe 100644
--- a/.github/actions/build-dev-builder-image/action.yml
+++ b/.github/actions/build-dev-builder-image/action.yml
@@ -42,10 +42,21 @@ runs:
username: ${{ inputs.dockerhub-image-registry-username }}
password: ${{ inputs.dockerhub-image-registry-token }}
- - name: Build and push dev builder image to dockerhub
+ - name: Build and push ubuntu dev builder image to dockerhub
shell: bash
run:
make dev-builder \
+ BASE_IMAGE=ubuntu \
+ BUILDX_MULTI_PLATFORM_BUILD=true \
+ IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
+ IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
+ IMAGE_TAG=${{ inputs.version }}
+
+ - name: Build and push centos dev builder image to dockerhub
+ shell: bash
+ run:
+ make dev-builder \
+ BASE_IMAGE=centos \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.dockerhub-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.dockerhub-image-namespace }} \
@@ -59,11 +70,23 @@ runs:
username: ${{ inputs.acr-image-registry-username }}
password: ${{ inputs.acr-image-registry-password }}
- - name: Build and push dev builder image to ACR
+ - name: Build and push ubuntu dev builder image to ACR
+ shell: bash
+ continue-on-error: true
+ run: # buildx will cache the images that already built, so it will not take long time to build the images again.
+ make dev-builder \
+ BASE_IMAGE=ubuntu \
+ BUILDX_MULTI_PLATFORM_BUILD=true \
+ IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
+ IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
+ IMAGE_TAG=${{ inputs.version }}
+
+ - name: Build and push centos dev builder image to ACR
shell: bash
continue-on-error: true
run: # buildx will cache the images that already built, so it will not take long time to build the images again.
make dev-builder \
+ BASE_IMAGE=centos \
BUILDX_MULTI_PLATFORM_BUILD=true \
IMAGE_REGISTRY=${{ inputs.acr-image-registry }} \
IMAGE_NAMESPACE=${{ inputs.acr-image-namespace }} \
diff --git a/.github/actions/build-greptime-binary/action.yml b/.github/actions/build-greptime-binary/action.yml
index aecd931f0eae..f9e47f424f62 100644
--- a/.github/actions/build-greptime-binary/action.yml
+++ b/.github/actions/build-greptime-binary/action.yml
@@ -43,7 +43,7 @@ runs:
shell: bash
run: |
cd ${{ inputs.working-dir }} && \
- make build-greptime-by-buildx \
+ make build-by-dev-builder \
CARGO_PROFILE=${{ inputs.cargo-profile }} \
FEATURES=${{ inputs.features }} \
BASE_IMAGE=${{ inputs.base-image }}
@@ -52,7 +52,7 @@ runs:
uses: ./.github/actions/upload-artifacts
with:
artifacts-dir: ${{ inputs.artifacts-dir }}
- target-file: ./greptime
+ target-file: ./target/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }}
release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
aws-access-key-id: ${{ inputs.aws-access-key-id }}
diff --git a/.github/actions/build-images/action.yml b/.github/actions/build-images/action.yml
index 88196a574828..c339adc0ee39 100644
--- a/.github/actions/build-images/action.yml
+++ b/.github/actions/build-images/action.yml
@@ -40,7 +40,7 @@ runs:
image-registry-password: ${{ inputs.image-registry-password }}
image-name: ${{ inputs.image-name }}
image-tag: ${{ inputs.version }}
- docker-file: docker/ci/Dockerfile
+ docker-file: docker/ci/ubuntu/Dockerfile
amd64-artifact-name: greptime-linux-amd64-pyo3-${{ inputs.version }}
arm64-artifact-name: greptime-linux-arm64-pyo3-${{ inputs.version }}
platforms: linux/amd64,linux/arm64
@@ -56,7 +56,7 @@ runs:
image-registry-password: ${{ inputs.image-registry-password }}
image-name: ${{ inputs.image-name }}-centos
image-tag: ${{ inputs.version }}
- docker-file: docker/ci/Dockerfile-centos
+ docker-file: docker/ci/centos/Dockerfile
amd64-artifact-name: greptime-linux-amd64-centos-${{ inputs.version }}
platforms: linux/amd64
push-latest-tag: ${{ inputs.push-latest-tag }}
diff --git a/.github/actions/build-linux-artifacts/action.yml b/.github/actions/build-linux-artifacts/action.yml
index 018ee191534e..381f45126f59 100644
--- a/.github/actions/build-linux-artifacts/action.yml
+++ b/.github/actions/build-linux-artifacts/action.yml
@@ -87,6 +87,11 @@ runs:
upload-to-s3: ${{ inputs.upload-to-s3 }}
working-dir: ${{ inputs.working-dir }}
+ - name: Clean up the target directory # Clean up the target directory for the centos7 base image, or it will still use the objects of last build.
+ shell: bash
+ run: |
+ rm -rf ./target/
+
- name: Build greptime on centos base image
uses: ./.github/actions/build-greptime-binary
if: ${{ inputs.arch == 'amd64' && inputs.dev-mode == 'false' }} # Only build centos7 base image for amd64.
diff --git a/.github/actions/upload-artifacts/action.yml b/.github/actions/upload-artifacts/action.yml
index 34132d211e65..e02a392ba702 100644
--- a/.github/actions/upload-artifacts/action.yml
+++ b/.github/actions/upload-artifacts/action.yml
@@ -66,17 +66,13 @@ runs:
name: ${{ inputs.artifacts-dir }}.sha256sum
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
- - name: Configure AWS credentials
- if: ${{ inputs.upload-to-s3 == 'true' }}
- uses: aws-actions/configure-aws-credentials@v2
- with:
- aws-access-key-id: ${{ inputs.aws-access-key-id }}
- aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
- aws-region: ${{ inputs.aws-region }}
-
- name: Upload artifacts to S3
if: ${{ inputs.upload-to-s3 == 'true' }}
uses: nick-invision/retry@v2
+ env:
+ AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
+ AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
+ AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
with:
max_attempts: 20
timeout_minutes: 5
diff --git a/.github/workflows/dev-build.yml b/.github/workflows/dev-build.yml
index c54fd889b6cc..a0182d19badc 100644
--- a/.github/workflows/dev-build.yml
+++ b/.github/workflows/dev-build.yml
@@ -334,11 +334,11 @@ jobs:
if: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
with:
payload: |
- {"text": "GreptimeDB ${{ env.NEXT_RELEASE_VERSION }} build successful"}
+ {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notifiy nightly build failed result
uses: slackapi/[email protected]
if: ${{ needs.release-images-to-dockerhub.outputs.build-result != 'success' }}
with:
payload: |
- {"text": "GreptimeDB ${{ env.NEXT_RELEASE_VERSION }} build failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'"}
+ {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml
index 234c200158b6..d776e6c52d82 100644
--- a/.github/workflows/nightly-build.yml
+++ b/.github/workflows/nightly-build.yml
@@ -299,11 +299,11 @@ jobs:
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
with:
payload: |
- {"text": "GreptimeDB nightly build successful"}
+ {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has completed successfully."}
- name: Notifiy nightly build failed result
uses: slackapi/[email protected]
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result != 'success' }}
with:
payload: |
- {"text": "GreptimeDB nightly build failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/nightly-build.yml'"}
+ {"text": "GreptimeDB's ${{ env.NEXT_RELEASE_VERSION }} build has failed, please check 'https://github.com/GreptimeTeam/greptimedb/actions/workflows/${{ env.NEXT_RELEASE_VERSION }}-build.yml'."}
diff --git a/Makefile b/Makefile
index f8e932e78a38..e2855296466f 100644
--- a/Makefile
+++ b/Makefile
@@ -12,6 +12,8 @@ BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu
RUST_TOOLCHAIN ?= $(shell cat rust-toolchain.toml | grep channel | cut -d'"' -f2)
CARGO_REGISTRY_CACHE ?= ${HOME}/.cargo/registry
+ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
+OUTPUT_DIR := $(shell if [ "$(RELEASE)" = "true" ]; then echo "release"; elif [ ! -z "$(CARGO_PROFILE)" ]; then echo "$(CARGO_PROFILE)" ; else echo "debug"; fi)
# The arguments for running integration tests.
ETCD_VERSION ?= v3.5.9
@@ -43,6 +45,10 @@ ifneq ($(strip $(TARGET)),)
CARGO_BUILD_OPTS += --target ${TARGET}
endif
+ifneq ($(strip $(RELEASE)),)
+ CARGO_BUILD_OPTS += --release
+endif
+
ifeq ($(BUILDX_MULTI_PLATFORM_BUILD), true)
BUILDX_MULTI_PLATFORM_BUILD_OPTS := --platform linux/amd64,linux/arm64 --push
else
@@ -52,26 +58,20 @@ endif
##@ Build
.PHONY: build
-build: ## Build debug version greptime. If USE_DEV_BUILDER is true, the binary will be built in dev-builder.
-ifeq ($(USE_DEV_BUILDER), true)
- docker run --network=host \
- -v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
- -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder:latest \
- make build CARGO_PROFILE=${CARGO_PROFILE} FEATURES=${FEATURES} TARGET_DIR=${TARGET_DIR}
-else
+build: ## Build debug version greptime.
cargo build ${CARGO_BUILD_OPTS}
-endif
-.PHONY: release
-release: ## Build release version greptime. If USE_DEV_BUILDER is true, the binary will be built in dev-builder.
-ifeq ($(USE_DEV_BUILDER), true)
+.POHNY: build-by-dev-builder
+build-by-dev-builder: ## Build greptime by dev-builder.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry \
- -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder:latest \
- make release CARGO_PROFILE=${CARGO_PROFILE} FEATURES=${FEATURES} TARGET_DIR=${TARGET_DIR}
-else
- cargo build --release ${CARGO_BUILD_OPTS}
-endif
+ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
+ make build \
+ CARGO_PROFILE=${CARGO_PROFILE} \
+ FEATURES=${FEATURES} \
+ TARGET_DIR=${TARGET_DIR} \
+ TARGET=${TARGET} \
+ RELEASE=${RELEASE}
.PHONY: clean
clean: ## Clean the project.
@@ -90,30 +90,27 @@ check-toml: ## Check all TOML files.
taplo format --check
.PHONY: docker-image
-docker-image: multi-platform-buildx ## Build docker image.
+docker-image: build-by-dev-builder ## Build docker image.
+ mkdir -p ${ARCH} && \
+ cp ./target/${OUTPUT_DIR}/greptime ${ARCH}/greptime && \
+ docker build -f docker/ci/${BASE_IMAGE}/Dockerfile -t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb:${IMAGE_TAG} . && \
+ rm -r ${ARCH}
+
+.PHONY: docker-image-buildx
+docker-image-buildx: multi-platform-buildx ## Build docker image by buildx.
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
- --build-arg="CARGO_PROFILE=${CARGO_PROFILE}" --build-arg="FEATURES=${FEATURES}" \
- -f docker/${BASE_IMAGE}/Dockerfile \
+ --build-arg="CARGO_PROFILE=${CARGO_PROFILE}" \
+ --build-arg="FEATURES=${FEATURES}" \
+ --build-arg="OUTPUT_DIR=${OUTPUT_DIR}" \
+ -f docker/buildx/${BASE_IMAGE}/Dockerfile \
-t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
-.PHONY: build-greptime-by-buildx
-build-greptime-by-buildx: multi-platform-buildx ## Build greptime binary by docker buildx. The binary will be copied to the current directory.
- docker buildx build --builder ${BUILDX_BUILDER_NAME} \
- --target=builder \
- --build-arg="CARGO_PROFILE=${CARGO_PROFILE}" --build-arg="FEATURES=${FEATURES}" \
- -f docker/${BASE_IMAGE}/Dockerfile \
- -t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb-builder:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
-
- docker run --rm -v ${PWD}:/data \
- --entrypoint cp ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/greptimedb-builder:${IMAGE_TAG} \
- /out/target/${CARGO_PROFILE}/greptime /data/greptime
-
.PHONY: dev-builder
dev-builder: multi-platform-buildx ## Build dev-builder image.
docker buildx build --builder ${BUILDX_BUILDER_NAME} \
--build-arg="RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
- -f docker/dev-builder/Dockerfile \
- -t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
+ -f docker/dev-builder/${BASE_IMAGE}/Dockerfile \
+ -t ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:${IMAGE_TAG} ${BUILDX_MULTI_PLATFORM_BUILD_OPTS} .
.PHONY: multi-platform-buildx
multi-platform-buildx: ## Create buildx multi-platform builder.
@@ -155,7 +152,7 @@ stop-etcd: ## Stop single node etcd for testing purpose.
run-it-in-container: start-etcd ## Run integration tests in dev-builder.
docker run --network=host \
-v ${PWD}:/greptimedb -v ${CARGO_REGISTRY_CACHE}:/root/.cargo/registry -v /tmp:/tmp \
- -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder:latest \
+ -w /greptimedb ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/dev-builder-${BASE_IMAGE}:latest \
make test sqlness-test BUILD_JOBS=${BUILD_JOBS}
##@ General
diff --git a/docker/centos/Dockerfile b/docker/buildx/centos/Dockerfile
similarity index 86%
rename from docker/centos/Dockerfile
rename to docker/buildx/centos/Dockerfile
index 843fd827f81b..9f4f525f5e84 100644
--- a/docker/centos/Dockerfile
+++ b/docker/buildx/centos/Dockerfile
@@ -2,6 +2,7 @@ FROM centos:7 as builder
ARG CARGO_PROFILE
ARG FEATURES
+ARG OUTPUT_DIR
ENV LANG en_US.utf8
WORKDIR /greptimedb
@@ -13,7 +14,8 @@ RUN yum install -y epel-release \
openssl-devel \
centos-release-scl \
rh-python38 \
- rh-python38-python-devel
+ rh-python38-python-devel \
+ which
# Install protoc
RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
@@ -35,17 +37,18 @@ RUN --mount=target=.,rw \
# Export the binary to the clean image.
FROM centos:7 as base
-ARG CARGO_PROFILE
+ARG OUTPUT_DIR
RUN yum install -y epel-release \
openssl \
openssl-devel \
centos-release-scl \
rh-python38 \
- rh-python38-python-devel
+ rh-python38-python-devel \
+ which
WORKDIR /greptime
-COPY --from=builder /out/target/${CARGO_PROFILE}/greptime /greptime/bin/
+COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]
diff --git a/docker/ubuntu/Dockerfile b/docker/buildx/ubuntu/Dockerfile
similarity index 91%
rename from docker/ubuntu/Dockerfile
rename to docker/buildx/ubuntu/Dockerfile
index cdb09f774eed..e005a0015e9a 100644
--- a/docker/ubuntu/Dockerfile
+++ b/docker/buildx/ubuntu/Dockerfile
@@ -2,6 +2,7 @@ FROM ubuntu:22.04 as builder
ARG CARGO_PROFILE
ARG FEATURES
+ARG OUTPUT_DIR
ENV LANG en_US.utf8
WORKDIR /greptimedb
@@ -25,7 +26,7 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-mo
ENV PATH /root/.cargo/bin/:$PATH
# Build the project in release mode.
-RUN --mount=target=.,rw \
+RUN --mount=target=. \
--mount=type=cache,target=/root/.cargo/registry \
make build \
CARGO_PROFILE=${CARGO_PROFILE} \
@@ -36,7 +37,7 @@ RUN --mount=target=.,rw \
# TODO(zyy17): Maybe should use the more secure container image.
FROM ubuntu:22.04 as base
-ARG CARGO_PROFILE
+ARG OUTPUT_DIR
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get \
-y install ca-certificates \
@@ -50,7 +51,7 @@ COPY ./docker/python/requirements.txt /etc/greptime/requirements.txt
RUN python3 -m pip install -r /etc/greptime/requirements.txt
WORKDIR /greptime
-COPY --from=builder /out/target/${CARGO_PROFILE}/greptime /greptime/bin/
+COPY --from=builder /out/target/${OUTPUT_DIR}/greptime /greptime/bin/
ENV PATH /greptime/bin/:$PATH
ENTRYPOINT ["greptime"]
diff --git a/docker/ci/Dockerfile-centos b/docker/ci/centos/Dockerfile
similarity index 100%
rename from docker/ci/Dockerfile-centos
rename to docker/ci/centos/Dockerfile
diff --git a/docker/ci/Dockerfile b/docker/ci/ubuntu/Dockerfile
similarity index 100%
rename from docker/ci/Dockerfile
rename to docker/ci/ubuntu/Dockerfile
diff --git a/docker/dev-builder/centos/Dockerfile b/docker/dev-builder/centos/Dockerfile
new file mode 100644
index 000000000000..111c5ca42843
--- /dev/null
+++ b/docker/dev-builder/centos/Dockerfile
@@ -0,0 +1,29 @@
+FROM centos:7 as builder
+
+ENV LANG en_US.utf8
+
+# Install dependencies
+RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
+RUN yum install -y epel-release \
+ openssl \
+ openssl-devel \
+ centos-release-scl \
+ rh-python38 \
+ rh-python38-python-devel \
+ which
+
+# Install protoc
+RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
+RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
+
+# Install Rust
+SHELL ["/bin/bash", "-c"]
+RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
+ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
+
+# Install Rust toolchains.
+ARG RUST_TOOLCHAIN
+RUN rustup toolchain install ${RUST_TOOLCHAIN}
+
+# Install nextest.
+RUN cargo install cargo-nextest --locked
diff --git a/docker/dev-builder/Dockerfile b/docker/dev-builder/ubuntu/Dockerfile
similarity index 100%
rename from docker/dev-builder/Dockerfile
rename to docker/dev-builder/ubuntu/Dockerfile
|
refactor
|
improve the image building performance (#2175)
|
ff2784da0f3903f8c43c99bd7d961abedeb61982
|
2023-04-18 20:35:43
|
LFC
|
test: add `SELECT ... LIMIT ...` test cases for distributed mode (#1419)
| false
|
diff --git a/src/frontend/src/tests/instance_test.rs b/src/frontend/src/tests/instance_test.rs
index cea7763e650d..49eb3a6c2405 100644
--- a/src/frontend/src/tests/instance_test.rs
+++ b/src/frontend/src/tests/instance_test.rs
@@ -302,7 +302,7 @@ async fn test_execute_insert_query_with_i64_timestamp(instance: Arc<dyn MockInst
}
}
-#[apply(standalone_instance_case)]
+#[apply(both_instances_cases)]
async fn test_execute_query(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
diff --git a/tests/cases/standalone/limit/limit.result b/tests/cases/standalone/common/limit/limit.result
similarity index 100%
rename from tests/cases/standalone/limit/limit.result
rename to tests/cases/standalone/common/limit/limit.result
diff --git a/tests/cases/standalone/limit/limit.sql b/tests/cases/standalone/common/limit/limit.sql
similarity index 100%
rename from tests/cases/standalone/limit/limit.sql
rename to tests/cases/standalone/common/limit/limit.sql
diff --git a/tests/cases/standalone/order/limit.result b/tests/cases/standalone/common/order/limit.result
similarity index 100%
rename from tests/cases/standalone/order/limit.result
rename to tests/cases/standalone/common/order/limit.result
diff --git a/tests/cases/standalone/order/limit.sql b/tests/cases/standalone/common/order/limit.sql
similarity index 100%
rename from tests/cases/standalone/order/limit.sql
rename to tests/cases/standalone/common/order/limit.sql
diff --git a/tests/cases/standalone/order/limit_union.result b/tests/cases/standalone/common/order/limit_union.result
similarity index 100%
rename from tests/cases/standalone/order/limit_union.result
rename to tests/cases/standalone/common/order/limit_union.result
diff --git a/tests/cases/standalone/order/limit_union.sql b/tests/cases/standalone/common/order/limit_union.sql
similarity index 100%
rename from tests/cases/standalone/order/limit_union.sql
rename to tests/cases/standalone/common/order/limit_union.sql
|
test
|
add `SELECT ... LIMIT ...` test cases for distributed mode (#1419)
|
7b606ed289293f2ed3fb5de0b1aedce56f0a5b29
|
2023-09-19 14:36:09
|
Yingwen
|
feat(mito): make use of options in RegionCreate/OpenRequest (#2436)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index ca4617e50687..ad24d1ed2178 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2737,6 +2737,9 @@ name = "deranged"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946"
+dependencies = [
+ "serde",
+]
[[package]]
name = "derive-new"
@@ -4477,6 +4480,7 @@ checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
dependencies = [
"equivalent",
"hashbrown 0.14.0",
+ "serde",
]
[[package]]
@@ -5497,6 +5501,7 @@ dependencies = [
"regex",
"serde",
"serde_json",
+ "serde_with",
"smallvec",
"snafu",
"store-api",
@@ -8663,6 +8668,35 @@ dependencies = [
"serde",
]
+[[package]]
+name = "serde_with"
+version = "3.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237"
+dependencies = [
+ "base64 0.21.3",
+ "chrono",
+ "hex",
+ "indexmap 1.9.3",
+ "indexmap 2.0.0",
+ "serde",
+ "serde_json",
+ "serde_with_macros",
+ "time 0.3.28",
+]
+
+[[package]]
+name = "serde_with_macros"
+version = "3.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e6be15c453eb305019bfa438b1593c731f36a289a7853f7707ee29e870b3b3c"
+dependencies = [
+ "darling 0.20.3",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.29",
+]
+
[[package]]
name = "serde_yaml"
version = "0.9.25"
diff --git a/src/cmd/src/cli/bench.rs b/src/cmd/src/cli/bench.rs
index fbeb70fef5e6..54373cfa7ac9 100644
--- a/src/cmd/src/cli/bench.rs
+++ b/src/cmd/src/cli/bench.rs
@@ -120,7 +120,6 @@ fn create_table_info(table_id: TableId, table_name: TableName) -> RawTableInfo {
created_on: chrono::DateTime::default(),
primary_key_indices: vec![],
next_column_id: columns as u32 + 1,
- engine_options: Default::default(),
value_indices: vec![],
options: Default::default(),
region_numbers: (1..=100).collect(),
diff --git a/src/common/meta/src/key/table_info.rs b/src/common/meta/src/key/table_info.rs
index 9ca861444603..50d7e56d5164 100644
--- a/src/common/meta/src/key/table_info.rs
+++ b/src/common/meta/src/key/table_info.rs
@@ -275,7 +275,6 @@ mod tests {
created_on: chrono::DateTime::default(),
primary_key_indices: vec![0, 1],
next_column_id: 3,
- engine_options: Default::default(),
value_indices: vec![2, 3],
options: Default::default(),
region_numbers: vec![1],
diff --git a/src/meta-srv/src/procedure/utils.rs b/src/meta-srv/src/procedure/utils.rs
index 965ae2603713..b884e70a0a03 100644
--- a/src/meta-srv/src/procedure/utils.rs
+++ b/src/meta-srv/src/procedure/utils.rs
@@ -100,7 +100,6 @@ pub mod mock {
#[cfg(test)]
pub mod test_data {
- use std::collections::HashMap;
use std::sync::Arc;
use chrono::DateTime;
@@ -178,7 +177,6 @@ pub mod test_data {
engine: MITO2_ENGINE.to_string(),
next_column_id: 3,
region_numbers: vec![1, 2, 3],
- engine_options: HashMap::new(),
options: TableOptions::default(),
created_on: DateTime::default(),
partition_key_indices: vec![],
diff --git a/src/meta-srv/src/table_routes.rs b/src/meta-srv/src/table_routes.rs
index cbdfd12263a9..170082aae5ce 100644
--- a/src/meta-srv/src/table_routes.rs
+++ b/src/meta-srv/src/table_routes.rs
@@ -70,8 +70,6 @@ pub(crate) async fn fetch_tables(
#[cfg(test)]
pub(crate) mod tests {
- use std::collections::HashMap;
-
use chrono::DateTime;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
use common_meta::key::TableMetadataManagerRef;
@@ -103,7 +101,6 @@ pub(crate) mod tests {
engine: MITO_ENGINE.to_string(),
next_column_id: 1,
region_numbers: vec![1, 2, 3, 4],
- engine_options: HashMap::new(),
options: TableOptions::default(),
created_on: DateTime::default(),
partition_key_indices: vec![],
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index 37c0ec53d444..0a1ff27f9e4c 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -46,6 +46,7 @@ prost.workspace = true
regex = "1.5"
serde = { version = "1.0", features = ["derive"] }
serde_json.workspace = true
+serde_with = "3"
smallvec.workspace = true
snafu.workspace = true
store-api = { workspace = true }
diff --git a/src/mito2/src/compaction.rs b/src/mito2/src/compaction.rs
index 8aff4414875d..23ce3517e0ed 100644
--- a/src/mito2/src/compaction.rs
+++ b/src/mito2/src/compaction.rs
@@ -20,12 +20,11 @@ mod twcs;
use std::collections::HashMap;
use std::sync::Arc;
-use std::time::Duration;
use common_telemetry::{debug, error};
pub use picker::CompactionPickerRef;
use snafu::ResultExt;
-use store_api::storage::{CompactionStrategy, RegionId, TwcsOptions};
+use store_api::storage::RegionId;
use tokio::sync::mpsc::{self, Sender};
use crate::access_layer::AccessLayerRef;
@@ -33,6 +32,7 @@ use crate::compaction::twcs::TwcsPicker;
use crate::error::{
CompactRegionSnafu, Error, RegionClosedSnafu, RegionDroppedSnafu, RegionTruncatedSnafu, Result,
};
+use crate::region::options::CompactionOptions;
use crate::region::version::{VersionControlRef, VersionRef};
use crate::request::{OptionOutputTx, OutputTx, WorkerRequest};
use crate::schedule::scheduler::SchedulerRef;
@@ -42,7 +42,6 @@ use crate::sst::file_purger::FilePurgerRef;
pub struct CompactionRequest {
pub(crate) current_version: VersionRef,
pub(crate) access_layer: AccessLayerRef,
- pub(crate) ttl: Option<Duration>,
pub(crate) compaction_time_window: Option<i64>,
/// Sender to send notification to the region worker.
pub(crate) request_sender: mpsc::Sender<WorkerRequest>,
@@ -64,13 +63,13 @@ impl CompactionRequest {
}
}
-/// Builds compaction picker according to [CompactionStrategy].
-pub fn compaction_strategy_to_picker(strategy: &CompactionStrategy) -> CompactionPickerRef {
+/// Builds compaction picker according to [CompactionOptions].
+pub fn compaction_options_to_picker(strategy: &CompactionOptions) -> CompactionPickerRef {
match strategy {
- CompactionStrategy::Twcs(twcs_opts) => Arc::new(TwcsPicker::new(
+ CompactionOptions::Twcs(twcs_opts) => Arc::new(TwcsPicker::new(
twcs_opts.max_active_window_files,
twcs_opts.max_inactive_window_files,
- twcs_opts.time_window_seconds,
+ twcs_opts.time_window_seconds(),
)) as Arc<_>,
}
}
@@ -175,9 +174,7 @@ impl CompactionScheduler {
///
/// If the region has nothing to compact, it removes the region from the status map.
fn schedule_compaction_request(&mut self, request: CompactionRequest) -> Result<()> {
- // TODO(hl): build picker according to region options.
- let picker =
- compaction_strategy_to_picker(&CompactionStrategy::Twcs(TwcsOptions::default()));
+ let picker = compaction_options_to_picker(&request.current_version.options.compaction);
let region_id = request.region_id();
debug!(
"Pick compaction strategy {:?} for region: {}",
@@ -309,8 +306,6 @@ impl CompactionStatus {
let mut req = CompactionRequest {
current_version,
access_layer: self.access_layer.clone(),
- // TODO(hl): get TTL info from region metadata
- ttl: None,
// TODO(hl): get persisted region compaction time window
compaction_time_window: None,
request_sender: request_sender.clone(),
diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs
index 9812e2c00f5d..5f03a3aa5fdb 100644
--- a/src/mito2/src/compaction/twcs.rs
+++ b/src/mito2/src/compaction/twcs.rs
@@ -120,7 +120,6 @@ impl Picker for TwcsPicker {
let CompactionRequest {
current_version,
access_layer,
- ttl,
compaction_time_window,
request_sender,
waiters,
@@ -131,6 +130,7 @@ impl Picker for TwcsPicker {
let region_id = region_metadata.region_id;
let levels = current_version.ssts.levels();
+ let ttl = current_version.options.ttl;
let expired_ssts = get_expired_ssts(levels, ttl, Timestamp::current_millis());
if !expired_ssts.is_empty() {
info!("Expired SSTs in region {}: {:?}", region_id, expired_ssts);
@@ -376,7 +376,6 @@ impl CompactionTask for TwcsCompactionTask {
notify,
})
.await;
- // TODO(hl): handle reschedule
}
}
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
index 8b343305e220..3ca21a1aec91 100644
--- a/src/mito2/src/config.rs
+++ b/src/mito2/src/config.rs
@@ -25,8 +25,6 @@ use serde::{Deserialize, Serialize};
const DEFAULT_NUM_WORKERS: usize = 1;
/// Default max running background job.
const DEFAULT_MAX_BG_JOB: usize = 4;
-/// Default region write buffer size.
-pub(crate) const DEFAULT_WRITE_BUFFER_SIZE: ReadableSize = ReadableSize::mb(32);
/// Configuration for [MitoEngine](crate::engine::MitoEngine).
#[derive(Debug, Serialize, Deserialize, Clone)]
diff --git a/src/mito2/src/engine/create_test.rs b/src/mito2/src/engine/create_test.rs
index 2b9c8bae9e84..b5cd6615d0d1 100644
--- a/src/mito2/src/engine/create_test.rs
+++ b/src/mito2/src/engine/create_test.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::time::Duration;
+
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use store_api::region_engine::RegionEngine;
@@ -77,3 +79,25 @@ async fn test_engine_create_existing_region() {
"unexpected err: {err}"
);
}
+
+#[tokio::test]
+async fn test_engine_create_with_options() {
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new()
+ .insert_option("ttl", "10d")
+ .build();
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+
+ assert!(engine.is_region_exists(region_id));
+ let region = engine.get_region(region_id).unwrap();
+ assert_eq!(
+ Duration::from_secs(3600 * 24 * 10),
+ region.version().options.ttl.unwrap()
+ );
+}
diff --git a/src/mito2/src/engine/open_test.rs b/src/mito2/src/engine/open_test.rs
index edacb0e067ef..e8254cf71d05 100644
--- a/src/mito2/src/engine/open_test.rs
+++ b/src/mito2/src/engine/open_test.rs
@@ -13,12 +13,15 @@
// limitations under the License.
use std::collections::HashMap;
+use std::time::Duration;
use api::v1::Rows;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use store_api::region_engine::RegionEngine;
-use store_api::region_request::{RegionOpenRequest, RegionPutRequest, RegionRequest};
+use store_api::region_request::{
+ RegionCloseRequest, RegionOpenRequest, RegionPutRequest, RegionRequest,
+};
use store_api::storage::RegionId;
use crate::config::MitoConfig;
@@ -125,3 +128,42 @@ async fn test_engine_open_readonly() {
engine.set_writable(region_id, true).unwrap();
put_rows(&engine, region_id, rows).await;
}
+
+#[tokio::test]
+async fn test_engine_region_open_with_options() {
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new().build();
+ let region_dir = request.region_dir.clone();
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+
+ // Close the region.
+ engine
+ .handle_request(region_id, RegionRequest::Close(RegionCloseRequest {}))
+ .await
+ .unwrap();
+
+ // Open the region again with options.
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Open(RegionOpenRequest {
+ engine: String::new(),
+ region_dir,
+ options: HashMap::from([("ttl".to_string(), "4d".to_string())]),
+ }),
+ )
+ .await
+ .unwrap();
+
+ let region = engine.get_region(region_id).unwrap();
+ assert_eq!(
+ Duration::from_secs(3600 * 24 * 4),
+ region.version().options.ttl.unwrap()
+ );
+}
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 7986db4c8a81..bcc10d175c5c 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -453,6 +453,12 @@ pub enum Error {
region_id: RegionId,
location: Location,
},
+
+ #[snafu(display("Invalid options, source: {}", source))]
+ JsonOptions {
+ source: serde_json::Error,
+ location: Location,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -522,6 +528,7 @@ impl ErrorExt for Error {
CompatReader { .. } => StatusCode::Unexpected,
InvalidRegionRequest { source, .. } => source.status_code(),
RegionReadonly { .. } => StatusCode::RegionReadonly,
+ JsonOptions { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/mito2/src/lib.rs b/src/mito2/src/lib.rs
index a2a3252462ea..2feb69676ce8 100644
--- a/src/mito2/src/lib.rs
+++ b/src/mito2/src/lib.rs
@@ -36,7 +36,7 @@ pub mod memtable;
mod metrics;
#[allow(dead_code)]
pub mod read;
-mod region;
+pub mod region;
mod region_write_ctx;
#[allow(dead_code)]
pub mod request;
diff --git a/src/mito2/src/region.rs b/src/mito2/src/region.rs
index 44f82fd36790..b9b5aea3fa34 100644
--- a/src/mito2/src/region.rs
+++ b/src/mito2/src/region.rs
@@ -15,6 +15,7 @@
//! Mito region.
pub(crate) mod opener;
+pub mod options;
pub(crate) mod version;
use std::collections::HashMap;
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index 157eff1a4a5d..8b2b533c8b3d 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -14,6 +14,7 @@
//! Region opener.
+use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, AtomicI64};
use std::sync::Arc;
@@ -32,6 +33,7 @@ use crate::config::MitoConfig;
use crate::error::{RegionCorruptedSnafu, RegionNotFoundSnafu, Result};
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
use crate::memtable::MemtableBuilderRef;
+use crate::region::options::RegionOptions;
use crate::region::version::{VersionBuilder, VersionControl, VersionControlRef};
use crate::region::MitoRegion;
use crate::region_write_ctx::RegionWriteCtx;
@@ -48,6 +50,7 @@ pub(crate) struct RegionOpener {
object_store: ObjectStore,
region_dir: String,
scheduler: SchedulerRef,
+ options: HashMap<String, String>,
}
impl RegionOpener {
@@ -65,6 +68,7 @@ impl RegionOpener {
object_store,
region_dir: String::new(),
scheduler,
+ options: HashMap::new(),
}
}
@@ -80,6 +84,12 @@ impl RegionOpener {
self
}
+ /// Sets options for the region.
+ pub(crate) fn options(mut self, value: HashMap<String, String>) -> Self {
+ self.options = value;
+ self
+ }
+
/// Writes region manifest and creates a new region.
///
/// # Panics
@@ -100,7 +110,10 @@ impl RegionOpener {
let mutable = self.memtable_builder.build(&metadata);
- let version = VersionBuilder::new(metadata, mutable).build();
+ let options = RegionOptions::try_from(&self.options)?;
+ let version = VersionBuilder::new(metadata, mutable)
+ .options(options)
+ .build();
let version_control = Arc::new(VersionControl::new(version));
let access_layer = Arc::new(AccessLayer::new(self.region_dir, self.object_store.clone()));
@@ -152,11 +165,13 @@ impl RegionOpener {
let access_layer = Arc::new(AccessLayer::new(self.region_dir, self.object_store.clone()));
let file_purger = Arc::new(LocalFilePurger::new(self.scheduler, access_layer.clone()));
let mutable = self.memtable_builder.build(&metadata);
+ let options = RegionOptions::try_from(&self.options)?;
let version = VersionBuilder::new(metadata, mutable)
.add_files(file_purger.clone(), manifest.files.values().cloned())
.flushed_entry_id(manifest.flushed_entry_id)
.flushed_sequence(manifest.flushed_sequence)
.truncated_entry_id(manifest.truncated_entry_id)
+ .options(options)
.build();
let flushed_entry_id = version.flushed_entry_id;
let version_control = Arc::new(VersionControl::new(version));
diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs
new file mode 100644
index 000000000000..c8ef80ddf513
--- /dev/null
+++ b/src/mito2/src/region/options.rs
@@ -0,0 +1,237 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Options for a region.
+
+use std::collections::HashMap;
+use std::time::Duration;
+
+use serde::Deserialize;
+use serde_json::Value;
+use serde_with::{serde_as, with_prefix, DisplayFromStr};
+use snafu::ResultExt;
+
+use crate::error::{Error, JsonOptionsSnafu, Result};
+
+/// Options that affect the entire region.
+///
+/// Users need to specify the options while creating/opening a region.
+#[derive(Debug, Default, Clone, PartialEq, Eq, Deserialize)]
+#[serde(default)]
+pub struct RegionOptions {
+ /// Region SST files TTL.
+ #[serde(with = "humantime_serde")]
+ pub ttl: Option<Duration>,
+ /// Compaction options.
+ pub compaction: CompactionOptions,
+}
+
+impl TryFrom<&HashMap<String, String>> for RegionOptions {
+ type Error = Error;
+
+ fn try_from(options_map: &HashMap<String, String>) -> Result<Self> {
+ let value = options_map_to_value(options_map);
+ let json = serde_json::to_string(&value).context(JsonOptionsSnafu)?;
+
+ // #[serde(flatten)] doesn't work with #[serde(default)] so we need to parse
+ // each field manually instead of using #[serde(flatten)] for `compaction`.
+ // See https://github.com/serde-rs/serde/issues/1626
+ let options: RegionOptionsWithoutEnum =
+ serde_json::from_str(&json).context(JsonOptionsSnafu)?;
+ let compaction: CompactionOptions = serde_json::from_str(&json).unwrap_or_default();
+
+ Ok(RegionOptions {
+ ttl: options.ttl,
+ compaction,
+ })
+ }
+}
+
+/// Options for compactions
+#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
+#[serde(tag = "compaction.type")]
+#[serde(rename_all = "lowercase")]
+pub enum CompactionOptions {
+ /// Time window compaction strategy.
+ #[serde(with = "prefix_twcs")]
+ Twcs(TwcsOptions),
+}
+
+impl Default for CompactionOptions {
+ fn default() -> Self {
+ Self::Twcs(TwcsOptions::default())
+ }
+}
+
+/// Time window compaction options.
+#[serde_as]
+#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
+#[serde(default)]
+pub struct TwcsOptions {
+ /// Max num of files that can be kept in active writing time window.
+ #[serde_as(as = "DisplayFromStr")]
+ pub max_active_window_files: usize,
+ /// Max num of files that can be kept in inactive time window.
+ #[serde_as(as = "DisplayFromStr")]
+ pub max_inactive_window_files: usize,
+ /// Compaction time window defined when creating tables.
+ #[serde(with = "humantime_serde")]
+ pub time_window: Option<Duration>,
+}
+
+with_prefix!(prefix_twcs "compaction.twcs.");
+
+impl TwcsOptions {
+ /// Returns time window in second resolution.
+ pub fn time_window_seconds(&self) -> Option<i64> {
+ self.time_window.and_then(|window| {
+ let window_secs = window.as_secs();
+ if window_secs == 0 {
+ None
+ } else {
+ window_secs.try_into().ok()
+ }
+ })
+ }
+}
+
+impl Default for TwcsOptions {
+ fn default() -> Self {
+ Self {
+ max_active_window_files: 4,
+ max_inactive_window_files: 1,
+ time_window: None,
+ }
+ }
+}
+
+/// We need to define a new struct without enum fields as `#[serde(default)]` does not
+/// support external tagging.
+#[derive(Debug, Deserialize)]
+#[serde(default)]
+struct RegionOptionsWithoutEnum {
+ /// Region SST files TTL.
+ #[serde(with = "humantime_serde")]
+ ttl: Option<Duration>,
+}
+
+impl Default for RegionOptionsWithoutEnum {
+ fn default() -> Self {
+ let options = RegionOptions::default();
+ RegionOptionsWithoutEnum { ttl: options.ttl }
+ }
+}
+
+/// Converts the `options` map to a json object.
+///
+/// Converts all key-values to lowercase and replaces "null" strings by `null` json values.
+fn options_map_to_value(options: &HashMap<String, String>) -> Value {
+ let map = options
+ .iter()
+ .map(|(key, value)| {
+ let (key, value) = (key.to_lowercase(), value.to_lowercase());
+
+ if value == "null" {
+ (key, Value::Null)
+ } else {
+ (key, Value::from(value))
+ }
+ })
+ .collect();
+ Value::Object(map)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn make_map(options: &[(&str, &str)]) -> HashMap<String, String> {
+ options
+ .iter()
+ .map(|(k, v)| (k.to_string(), v.to_string()))
+ .collect()
+ }
+
+ #[test]
+ fn test_empty_region_options() {
+ let map = make_map(&[]);
+ let options = RegionOptions::try_from(&map).unwrap();
+ assert_eq!(RegionOptions::default(), options);
+ }
+
+ #[test]
+ fn test_with_ttl() {
+ let map = make_map(&[("ttl", "7d")]);
+ let options = RegionOptions::try_from(&map).unwrap();
+ let expect = RegionOptions {
+ ttl: Some(Duration::from_secs(3600 * 24 * 7)),
+ ..Default::default()
+ };
+ assert_eq!(expect, options);
+ }
+
+ #[test]
+ fn test_without_compaction_type() {
+ // If `compaction.type` is not provided, we ignore all compaction
+ // related options. Actually serde does not support deserialize
+ // an enum without knowning its type.
+ let map = make_map(&[
+ ("compaction.twcs.max_active_window_files", "8"),
+ ("compaction.twcs.time_window", "2h"),
+ ]);
+ let options = RegionOptions::try_from(&map).unwrap();
+ let expect = RegionOptions::default();
+ assert_eq!(expect, options);
+ }
+
+ #[test]
+ fn test_with_compaction_type() {
+ let map = make_map(&[
+ ("compaction.twcs.max_active_window_files", "8"),
+ ("compaction.twcs.time_window", "2h"),
+ ("compaction.type", "twcs"),
+ ]);
+ let options = RegionOptions::try_from(&map).unwrap();
+ let expect = RegionOptions {
+ compaction: CompactionOptions::Twcs(TwcsOptions {
+ max_active_window_files: 8,
+ time_window: Some(Duration::from_secs(3600 * 2)),
+ ..Default::default()
+ }),
+ ..Default::default()
+ };
+ assert_eq!(expect, options);
+ }
+
+ #[test]
+ fn test_with_all() {
+ let map = make_map(&[
+ ("ttl", "7d"),
+ ("compaction.twcs.max_active_window_files", "8"),
+ ("compaction.twcs.max_inactive_window_files", "2"),
+ ("compaction.twcs.time_window", "2h"),
+ ("compaction.type", "twcs"),
+ ]);
+ let options = RegionOptions::try_from(&map).unwrap();
+ let expect = RegionOptions {
+ ttl: Some(Duration::from_secs(3600 * 24 * 7)),
+ compaction: CompactionOptions::Twcs(TwcsOptions {
+ max_active_window_files: 8,
+ max_inactive_window_files: 2,
+ time_window: Some(Duration::from_secs(3600 * 2)),
+ }),
+ };
+ assert_eq!(expect, options);
+ }
+}
diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs
index 87cc71bb29be..e88fafedaf08 100644
--- a/src/mito2/src/region/version.rs
+++ b/src/mito2/src/region/version.rs
@@ -31,6 +31,7 @@ use store_api::storage::SequenceNumber;
use crate::manifest::action::RegionEdit;
use crate::memtable::version::{MemtableVersion, MemtableVersionRef};
use crate::memtable::{MemtableBuilderRef, MemtableId, MemtableRef};
+use crate::region::options::RegionOptions;
use crate::sst::file::FileMeta;
use crate::sst::file_purger::FilePurgerRef;
use crate::sst::version::{SstVersion, SstVersionRef};
@@ -204,7 +205,8 @@ pub(crate) struct Version {
///
/// Used to check if it is a flush task during the truncating table.
pub(crate) truncated_entry_id: Option<EntryId>,
- // TODO(yingwen): RegionOptions.
+ /// Options of the region.
+ pub(crate) options: RegionOptions,
}
pub(crate) type VersionRef = Arc<Version>;
@@ -217,6 +219,7 @@ pub(crate) struct VersionBuilder {
flushed_entry_id: EntryId,
flushed_sequence: SequenceNumber,
truncated_entry_id: Option<EntryId>,
+ options: RegionOptions,
}
impl VersionBuilder {
@@ -229,6 +232,7 @@ impl VersionBuilder {
flushed_entry_id: 0,
flushed_sequence: 0,
truncated_entry_id: None,
+ options: RegionOptions::default(),
}
}
@@ -241,6 +245,7 @@ impl VersionBuilder {
flushed_entry_id: version.flushed_entry_id,
flushed_sequence: version.flushed_sequence,
truncated_entry_id: version.truncated_entry_id,
+ options: version.options.clone(),
}
}
@@ -274,6 +279,12 @@ impl VersionBuilder {
self
}
+ /// Sets options.
+ pub(crate) fn options(mut self, options: RegionOptions) -> Self {
+ self.options = options;
+ self
+ }
+
/// Apply edit to the builder.
pub(crate) fn apply_edit(mut self, edit: RegionEdit, file_purger: FilePurgerRef) -> Self {
if let Some(entry_id) = edit.flushed_entry_id {
@@ -324,6 +335,7 @@ impl VersionBuilder {
flushed_entry_id: self.flushed_entry_id,
flushed_sequence: self.flushed_sequence,
truncated_entry_id: self.truncated_entry_id,
+ options: self.options,
}
}
}
diff --git a/src/mito2/src/request.rs b/src/mito2/src/request.rs
index 1fb24446909a..1fbcbc962338 100644
--- a/src/mito2/src/request.rs
+++ b/src/mito2/src/request.rs
@@ -16,14 +16,12 @@
use std::collections::HashMap;
use std::sync::Arc;
-use std::time::Duration;
use api::helper::{
is_column_type_value_eq, is_semantic_type_eq, proto_value_type, to_column_data_type,
to_proto_value,
};
use api::v1::{ColumnDataType, ColumnSchema, OpType, Rows, SemanticType, Value};
-use common_base::readable_size::ReadableSize;
use common_query::Output;
use common_query::Output::AffectedRows;
use common_telemetry::tracing::log::info;
@@ -37,10 +35,9 @@ use store_api::region_request::{
RegionAlterRequest, RegionCloseRequest, RegionCompactRequest, RegionCreateRequest,
RegionDropRequest, RegionFlushRequest, RegionOpenRequest, RegionRequest, RegionTruncateRequest,
};
-use store_api::storage::{CompactionStrategy, RegionId, SequenceNumber};
+use store_api::storage::{RegionId, SequenceNumber};
use tokio::sync::oneshot::{self, Receiver, Sender};
-use crate::config::DEFAULT_WRITE_BUFFER_SIZE;
use crate::error::{
CompactRegionSnafu, CreateDefaultSnafu, Error, FillDefaultSnafu, FlushRegionSnafu,
InvalidRequestSnafu, Result,
@@ -50,29 +47,6 @@ use crate::sst::file::FileMeta;
use crate::sst::file_purger::{FilePurgerRef, PurgeRequest};
use crate::wal::EntryId;
-/// Options that affect the entire region.
-///
-/// Users need to specify the options while creating/opening a region.
-#[derive(Debug)]
-pub struct RegionOptions {
- /// Region memtable max size in bytes.
- pub write_buffer_size: Option<ReadableSize>,
- /// Region SST files TTL.
- pub ttl: Option<Duration>,
- /// Compaction strategy.
- pub compaction_strategy: CompactionStrategy,
-}
-
-impl Default for RegionOptions {
- fn default() -> Self {
- RegionOptions {
- write_buffer_size: Some(DEFAULT_WRITE_BUFFER_SIZE),
- ttl: None,
- compaction_strategy: CompactionStrategy::default(),
- }
- }
-}
-
/// Request to write a region.
#[derive(Debug)]
pub struct WriteRequest {
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index 43195da18f82..2d81f0cf63b7 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -209,6 +209,7 @@ pub struct CreateRequestBuilder {
tag_num: usize,
field_num: usize,
create_if_not_exists: bool,
+ options: HashMap<String, String>,
}
impl Default for CreateRequestBuilder {
@@ -218,6 +219,7 @@ impl Default for CreateRequestBuilder {
tag_num: 1,
field_num: 1,
create_if_not_exists: false,
+ options: HashMap::new(),
}
}
}
@@ -247,6 +249,11 @@ impl CreateRequestBuilder {
self
}
+ pub fn insert_option(mut self, key: &str, value: &str) -> Self {
+ self.options.insert(key.to_string(), value.to_string());
+ self
+ }
+
pub fn build(&self) -> RegionCreateRequest {
let mut column_id = 0;
let mut column_metadatas = Vec::with_capacity(self.tag_num + self.field_num + 1);
@@ -292,7 +299,7 @@ impl CreateRequestBuilder {
column_metadatas,
primary_key,
create_if_not_exists: self.create_if_not_exists,
- options: HashMap::default(),
+ options: self.options.clone(),
region_dir: self.region_dir.clone(),
}
}
diff --git a/src/mito2/src/worker/handle_create.rs b/src/mito2/src/worker/handle_create.rs
index 31fee02008ed..6db800a3e604 100644
--- a/src/mito2/src/worker/handle_create.rs
+++ b/src/mito2/src/worker/handle_create.rs
@@ -62,6 +62,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
)
.metadata(metadata)
.region_dir(&request.region_dir)
+ .options(request.options)
.create(&self.config)
.await?;
diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs
index 5614178c0841..e6de10b66933 100644
--- a/src/mito2/src/worker/handle_open.rs
+++ b/src/mito2/src/worker/handle_open.rs
@@ -62,6 +62,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
self.scheduler.clone(),
)
.region_dir(&request.region_dir)
+ .options(request.options)
.open(&self.config, &self.wal)
.await?;
diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs
index 460f971df53f..768f03a35878 100644
--- a/src/operator/src/statement/ddl.rs
+++ b/src/operator/src/statement/ddl.rs
@@ -446,7 +446,6 @@ fn create_table_info(
engine: create_table.engine.clone(),
next_column_id: column_schemas.len() as u32,
region_numbers: vec![],
- engine_options: HashMap::new(),
options: table_options,
created_on: DateTime::default(),
partition_key_indices,
diff --git a/src/query/src/sql/show_create_table.rs b/src/query/src/sql/show_create_table.rs
index f67623ce7e8e..97d8aba4fbbf 100644
--- a/src/query/src/sql/show_create_table.rs
+++ b/src/query/src/sql/show_create_table.rs
@@ -228,7 +228,6 @@ mod tests {
.value_indices(vec![2, 3])
.engine("mito".to_string())
.next_column_id(0)
- .engine_options(Default::default())
.options(Default::default())
.created_on(Default::default())
.region_numbers(regions)
@@ -297,7 +296,6 @@ WITH(
.primary_key_indices(vec![])
.engine("file".to_string())
.next_column_id(0)
- .engine_options(Default::default())
.options(options)
.created_on(Default::default())
.build()
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index 0f8880a14ce0..0a0c598c5625 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -107,9 +107,6 @@ pub struct TableMeta {
#[builder(default, setter(into))]
pub region_numbers: Vec<u32>,
pub next_column_id: ColumnId,
- /// Options for table engine.
- #[builder(default)]
- pub engine_options: HashMap<String, String>,
/// Table options.
#[builder(default)]
pub options: TableOptions,
@@ -229,7 +226,6 @@ impl TableMeta {
let mut builder = TableMetaBuilder::default();
let _ = builder
.engine(&self.engine)
- .engine_options(self.engine_options.clone())
.options(self.options.clone())
.created_on(self.created_on)
.region_numbers(self.region_numbers.clone())
@@ -531,7 +527,6 @@ pub struct RawTableMeta {
pub engine: String,
pub next_column_id: ColumnId,
pub region_numbers: Vec<u32>,
- pub engine_options: HashMap<String, String>,
pub options: TableOptions,
pub created_on: DateTime<Utc>,
#[serde(default)]
@@ -547,7 +542,6 @@ impl From<TableMeta> for RawTableMeta {
engine: meta.engine,
next_column_id: meta.next_column_id,
region_numbers: meta.region_numbers,
- engine_options: meta.engine_options,
options: meta.options,
created_on: meta.created_on,
partition_key_indices: meta.partition_key_indices,
@@ -566,7 +560,6 @@ impl TryFrom<RawTableMeta> for TableMeta {
engine: raw.engine,
region_numbers: raw.region_numbers,
next_column_id: raw.next_column_id,
- engine_options: raw.engine_options,
options: raw.options,
created_on: raw.created_on,
partition_key_indices: raw.partition_key_indices,
diff --git a/src/table/src/test_util/memtable.rs b/src/table/src/test_util/memtable.rs
index 35dbdf3d30d8..cb36bac2c77c 100644
--- a/src/table/src/test_util/memtable.rs
+++ b/src/table/src/test_util/memtable.rs
@@ -73,7 +73,6 @@ impl MemTable {
.value_indices(vec![])
.engine("mito".to_string())
.next_column_id(0)
- .engine_options(Default::default())
.options(Default::default())
.created_on(Default::default())
.region_numbers(regions)
diff --git a/src/table/src/test_util/table_info.rs b/src/table/src/test_util/table_info.rs
index ae061ccb02ae..7ddf6c019cca 100644
--- a/src/table/src/test_util/table_info.rs
+++ b/src/table/src/test_util/table_info.rs
@@ -29,7 +29,6 @@ pub fn test_table_info(
.value_indices(vec![])
.engine("mito".to_string())
.next_column_id(0)
- .engine_options(Default::default())
.options(Default::default())
.created_on(Default::default())
.region_numbers(vec![1])
|
feat
|
make use of options in RegionCreate/OpenRequest (#2436)
|
e67b0eb259d63d3abf1b09887fb0486aba5f9540
|
2022-09-15 19:09:05
|
Ning Sun
|
feat: Initial support of postgresql wire protocol (#229)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index b0cd00bb901b..dafdb2e3b6f5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -42,28 +42,37 @@ dependencies = [
[[package]]
name = "aho-corasick"
-version = "0.7.18"
+version = "0.7.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
+checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
dependencies = [
"memchr",
]
[[package]]
name = "alloc-no-stdlib"
-version = "2.0.3"
+version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3"
+checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3"
[[package]]
name = "alloc-stdlib"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2"
+checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece"
dependencies = [
"alloc-no-stdlib",
]
+[[package]]
+name = "android_system_properties"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "ansi_term"
version = "0.12.1"
@@ -75,18 +84,18 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.57"
+version = "1.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc"
+checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
[[package]]
name = "api"
version = "0.1.0"
dependencies = [
"datatypes",
- "prost 0.11.0",
+ "prost",
"snafu",
- "tonic 0.8.0",
+ "tonic",
"tonic-build",
]
@@ -107,9 +116,9 @@ checksum = "b3f9eb837c6a783fbf002e3e5cc7925a3aa6893d6d42f9169517528983777590"
[[package]]
name = "arc-swap"
-version = "1.5.0"
+version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f"
+checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
[[package]]
name = "array-init-cursor"
@@ -179,9 +188,9 @@ checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109"
[[package]]
name = "async-channel"
-version = "1.6.1"
+version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319"
+checksum = "e14485364214912d3b19cc3435dde4df66065127f05fa0d75c712f36f12c2f28"
dependencies = [
"concurrent-queue",
"event-listener",
@@ -203,9 +212,9 @@ dependencies = [
[[package]]
name = "async-compression"
-version = "0.3.12"
+version = "0.3.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2bf394cfbbe876f0ac67b13b6ca819f9c9f2fb9ec67223cceb1555fbab1c31a"
+checksum = "345fd392ab01f746c717b1357165b76f0b67a60192007b234058c9045fdcf695"
dependencies = [
"brotli",
"flate2",
@@ -238,9 +247,9 @@ dependencies = [
[[package]]
name = "async-trait"
-version = "0.1.53"
+version = "0.1.57"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600"
+checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f"
dependencies = [
"proc-macro2",
"quote",
@@ -272,9 +281,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "axum"
-version = "0.5.4"
+version = "0.5.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4af7447fc1214c1f3a1ace861d0216a6c8bb13965b64bbad9650f375b67689a"
+checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043"
dependencies = [
"async-trait",
"axum-core",
@@ -284,7 +293,7 @@ dependencies = [
"http",
"http-body",
"hyper",
- "itoa 1.0.1",
+ "itoa 1.0.3",
"matchit",
"memchr",
"mime",
@@ -303,9 +312,9 @@ dependencies = [
[[package]]
name = "axum-core"
-version = "0.2.4"
+version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da31c0ed7b4690e2c78fe4b880d21cd7db04a346ebc658b4270251b695437f17"
+checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b"
dependencies = [
"async-trait",
"bytes",
@@ -313,13 +322,15 @@ dependencies = [
"http",
"http-body",
"mime",
+ "tower-layer",
+ "tower-service",
]
[[package]]
name = "axum-macros"
-version = "0.2.0"
+version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "63bcb0d395bc5dd286e61aada9fc48201eb70e232f006f9d6c330c9db2f256f5"
+checksum = "6293dae2ec708e679da6736e857cf8532886ef258e92930f38279c12641628b8"
dependencies = [
"heck 0.4.0",
"proc-macro2",
@@ -329,9 +340,9 @@ dependencies = [
[[package]]
name = "axum-test-helper"
-version = "0.1.0"
+version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1063a76d89cb97b2dc78a61c3a45a9867ba586600bf185d2ec9fa8858fdafbe0"
+checksum = "7b5f0c689f3a3cb707ea097813153343b74dcf73b3e46dedb25be91a24050913"
dependencies = [
"axum",
"bytes",
@@ -359,9 +370,9 @@ dependencies = [
[[package]]
name = "backtrace"
-version = "0.3.65"
+version = "0.3.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61"
+checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7"
dependencies = [
"addr2line",
"cc",
@@ -469,9 +480,9 @@ dependencies = [
[[package]]
name = "block-buffer"
-version = "0.10.2"
+version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324"
+checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
dependencies = [
"generic-array",
]
@@ -511,24 +522,24 @@ dependencies = [
[[package]]
name = "bumpalo"
-version = "3.9.1"
+version = "3.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899"
+checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d"
[[package]]
name = "bytemuck"
-version = "1.9.1"
+version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdead85bdec19c194affaeeb670c0e41fe23de31459efd1c174d049269cf02cc"
+checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da"
dependencies = [
"bytemuck_derive",
]
[[package]]
name = "bytemuck_derive"
-version = "1.1.0"
+version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "562e382481975bc61d11275ac5e62a19abd00b0547d99516a415336f183dcd0e"
+checksum = "1b9e1f5fa78f69496407a27ae9ed989e3c3b072310286f5ef385525e4cbc24a9"
dependencies = [
"proc-macro2",
"quote",
@@ -543,9 +554,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "bytes"
-version = "1.1.0"
+version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
+checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db"
dependencies = [
"serde",
]
@@ -568,12 +579,9 @@ dependencies = [
[[package]]
name = "cast"
-version = "0.2.7"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a"
-dependencies = [
- "rustc_version",
-]
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "castaway"
@@ -629,12 +637,12 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
-version = "0.4.19"
+version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
+checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1"
dependencies = [
+ "iana-time-zone",
"js-sys",
- "libc",
"num-integer",
"num-traits",
"serde",
@@ -645,24 +653,24 @@ dependencies = [
[[package]]
name = "chrono-tz"
-version = "0.6.1"
+version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "58549f1842da3080ce63002102d5bc954c7bc843d4f47818e642abdc36253552"
+checksum = "29c39203181991a7dd4343b8005bd804e7a9a37afb8ac070e43771e8c820bbde"
dependencies = [
"chrono",
"chrono-tz-build",
- "phf",
+ "phf 0.11.1",
]
[[package]]
name = "chrono-tz-build"
-version = "0.0.2"
+version = "0.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db058d493fb2f65f41861bfed7e3fe6335264a9f0f92710cab5bdf01fef09069"
+checksum = "6f509c3a87b33437b05e2458750a0700e5bdd6956176773e6c7d6dd15a283a0c"
dependencies = [
"parse-zoneinfo",
- "phf",
- "phf_codegen",
+ "phf 0.11.1",
+ "phf_codegen 0.11.1",
]
[[package]]
@@ -693,16 +701,16 @@ dependencies = [
[[package]]
name = "clap"
-version = "3.1.17"
+version = "3.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "47582c09be7c8b32c0ab3a6181825ababb713fde6fff20fc573a3870dd45c6a0"
+checksum = "1ed5341b2301a26ab80be5cbdced622e80ed808483c52e45e3310a877d3b37d7"
dependencies = [
"atty",
"bitflags",
"clap_derive",
"clap_lex",
"indexmap",
- "lazy_static",
+ "once_cell",
"strsim 0.10.0",
"termcolor",
"textwrap 0.15.0",
@@ -710,9 +718,9 @@ dependencies = [
[[package]]
name = "clap_derive"
-version = "3.1.7"
+version = "3.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a3aab4734e083b809aaf5794e14e756d1c798d2c69c7f7de7a09a2f5214993c1"
+checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65"
dependencies = [
"heck 0.4.0",
"proc-macro-error",
@@ -723,9 +731,9 @@ dependencies = [
[[package]]
name = "clap_lex"
-version = "0.2.0"
+version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213"
+checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
dependencies = [
"os_str_bytes",
]
@@ -748,7 +756,7 @@ dependencies = [
"datatypes",
"snafu",
"tokio",
- "tonic 0.8.0",
+ "tonic",
"tracing",
"tracing-subscriber",
]
@@ -777,7 +785,7 @@ dependencies = [
name = "cmd"
version = "0.1.0"
dependencies = [
- "clap 3.1.17",
+ "clap 3.2.21",
"common-error",
"common-telemetry",
"datanode",
@@ -927,7 +935,7 @@ dependencies = [
"once_cell",
"opentelemetry",
"opentelemetry-jaeger",
- "parking_lot 0.12.0",
+ "parking_lot 0.12.1",
"tracing",
"tracing-appender",
"tracing-bunyan-formatter",
@@ -950,9 +958,9 @@ dependencies = [
[[package]]
name = "concurrent-queue"
-version = "1.2.2"
+version = "1.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3"
+checksum = "af4780a44ab5696ea9e28294517f1fffb421a83a25af521333c838635509db9c"
dependencies = [
"cache-padded",
]
@@ -973,21 +981,21 @@ dependencies = [
[[package]]
name = "console-api"
-version = "0.2.0"
+version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24cb05777feccbb2642d4f2df44d0505601a2cd88ca517d8c913f263a5a8dc8b"
+checksum = "e57ff02e8ad8e06ab9731d5dc72dc23bef9200778eae1a89d555d8c42e5d4a86"
dependencies = [
- "prost 0.10.4",
- "prost-types 0.10.1",
- "tonic 0.7.2",
+ "prost",
+ "prost-types",
+ "tonic",
"tracing-core",
]
[[package]]
name = "console-subscriber"
-version = "0.1.5"
+version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f21a16ee925aa9d2bad2e296beffd6c5b1bfaad50af509d305b8e7f23af20fb"
+checksum = "22a3a81dfaf6b66bce5d159eddae701e3a002f194d378cbf7be5f053c281d9be"
dependencies = [
"console-api",
"crossbeam-channel",
@@ -995,13 +1003,13 @@ dependencies = [
"futures",
"hdrhistogram",
"humantime",
- "prost-types 0.10.1",
+ "prost-types",
"serde",
"serde_json",
"thread_local",
"tokio",
"tokio-stream",
- "tonic 0.7.2",
+ "tonic",
"tracing",
"tracing-core",
"tracing-subscriber",
@@ -1031,9 +1039,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
[[package]]
name = "cpufeatures"
-version = "0.2.2"
+version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b"
+checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320"
dependencies = [
"libc",
]
@@ -1064,9 +1072,9 @@ dependencies = [
[[package]]
name = "criterion"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10"
+checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f"
dependencies = [
"atty",
"cast",
@@ -1090,9 +1098,9 @@ dependencies = [
[[package]]
name = "criterion-plot"
-version = "0.4.4"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57"
+checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876"
dependencies = [
"cast",
"itertools",
@@ -1100,9 +1108,9 @@ dependencies = [
[[package]]
name = "crossbeam"
-version = "0.8.1"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845"
+checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c"
dependencies = [
"cfg-if",
"crossbeam-channel",
@@ -1114,9 +1122,9 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
-version = "0.5.4"
+version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53"
+checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
dependencies = [
"cfg-if",
"crossbeam-utils",
@@ -1124,9 +1132,9 @@ dependencies = [
[[package]]
name = "crossbeam-deque"
-version = "0.8.1"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e"
+checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
dependencies = [
"cfg-if",
"crossbeam-epoch",
@@ -1135,23 +1143,23 @@ dependencies = [
[[package]]
name = "crossbeam-epoch"
-version = "0.9.8"
+version = "0.9.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c"
+checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1"
dependencies = [
"autocfg",
"cfg-if",
"crossbeam-utils",
- "lazy_static",
"memoffset",
+ "once_cell",
"scopeguard",
]
[[package]]
name = "crossbeam-queue"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2"
+checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7"
dependencies = [
"cfg-if",
"crossbeam-utils",
@@ -1159,12 +1167,12 @@ dependencies = [
[[package]]
name = "crossbeam-utils"
-version = "0.8.8"
+version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38"
+checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc"
dependencies = [
"cfg-if",
- "lazy_static",
+ "once_cell",
]
[[package]]
@@ -1175,9 +1183,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
[[package]]
name = "crypto-common"
-version = "0.1.3"
+version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8"
+checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array",
"typenum",
@@ -1207,9 +1215,9 @@ dependencies = [
[[package]]
name = "ctor"
-version = "0.1.22"
+version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f877be4f7c9f246b183111634f75baa039715e3f46ce860677d3b19a69fb229c"
+checksum = "cdffe87e1d521a10f9696f833fe502293ea446d7f256c06128293a4119bdf4cb"
dependencies = [
"quote",
"syn",
@@ -1217,9 +1225,9 @@ dependencies = [
[[package]]
name = "curl"
-version = "0.4.43"
+version = "0.4.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37d855aeef205b43f65a5001e0997d81f8efca7badad4fad7d897aa7f0d0651f"
+checksum = "509bd11746c7ac09ebd19f0b17782eae80aadee26237658a6b4808afb5c11a22"
dependencies = [
"curl-sys",
"libc",
@@ -1232,9 +1240,9 @@ dependencies = [
[[package]]
name = "curl-sys"
-version = "0.4.55+curl-7.83.1"
+version = "0.4.56+curl-7.83.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23734ec77368ec583c2e61dd3f0b0e5c98b93abe6d2a004ca06b91dd7e3e2762"
+checksum = "6093e169dd4de29e468fa649fbae11cdcd5551c81fe5bf1b0677adad7ef3d26f"
dependencies = [
"cc",
"libc",
@@ -1295,12 +1303,12 @@ dependencies = [
"datafusion-expr",
"datafusion-physical-expr",
"futures",
- "hashbrown 0.12.1",
+ "hashbrown",
"lazy_static",
"log",
"num_cpus",
"ordered-float 2.10.0",
- "parking_lot 0.12.0",
+ "parking_lot 0.12.1",
"parquet2",
"paste",
"pin-project-lite",
@@ -1346,7 +1354,7 @@ dependencies = [
"chrono",
"datafusion-common",
"datafusion-expr",
- "hashbrown 0.12.1",
+ "hashbrown",
"lazy_static",
"md-5",
"ordered-float 2.10.0",
@@ -1399,7 +1407,7 @@ dependencies = [
"tempdir",
"tokio",
"tokio-stream",
- "tonic 0.8.0",
+ "tonic",
"tower",
"tower-http",
]
@@ -1423,6 +1431,17 @@ dependencies = [
"snafu",
]
+[[package]]
+name = "derive-new"
+version = "0.5.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
[[package]]
name = "derive_builder"
version = "0.11.2"
@@ -1520,9 +1539,9 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
[[package]]
name = "either"
-version = "1.6.1"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
[[package]]
name = "encode_unicode"
@@ -1590,9 +1609,9 @@ dependencies = [
[[package]]
name = "event-listener"
-version = "2.5.2"
+version = "2.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71"
+checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
[[package]]
name = "exitcode"
@@ -1600,6 +1619,12 @@ version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de853764b47027c2e862a995c34978ffa63c1501f2e15f987ba11bd4f9bba193"
+[[package]]
+name = "fallible-iterator"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
+
[[package]]
name = "fallible-streaming-iterator"
version = "0.1.9"
@@ -1608,9 +1633,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
[[package]]
name = "fastrand"
-version = "1.7.0"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf"
+checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
dependencies = [
"instant",
]
@@ -1628,19 +1653,17 @@ dependencies = [
[[package]]
name = "fixedbitset"
-version = "0.4.1"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e"
+checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flate2"
-version = "1.0.23"
+version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b39522e96686d38f4bc984b9198e3a0613264abaebaff2c5c918bfa6b6da09af"
+checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6"
dependencies = [
- "cfg-if",
"crc32fast",
- "libc",
"libz-sys",
"miniz_oxide",
]
@@ -1668,11 +1691,10 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "form_urlencoded"
-version = "1.0.1"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191"
+checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8"
dependencies = [
- "matches",
"percent-encoding",
]
@@ -1714,7 +1736,7 @@ dependencies = [
"sql",
"tempdir",
"tokio",
- "tonic 0.8.0",
+ "tonic",
"tower",
]
@@ -1796,9 +1818,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "futures"
-version = "0.3.21"
+version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e"
+checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c"
dependencies = [
"futures-channel",
"futures-core",
@@ -1811,9 +1833,9 @@ dependencies = [
[[package]]
name = "futures-channel"
-version = "0.3.21"
+version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010"
+checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050"
dependencies = [
"futures-core",
"futures-sink",
@@ -1821,15 +1843,15 @@ dependencies = [
[[package]]
name = "futures-core"
-version = "0.3.21"
+version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3"
+checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf"
[[package]]
name = "futures-executor"
-version = "0.3.21"
+version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6"
+checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab"
dependencies = [
"futures-core",
"futures-task",
@@ -1838,9 +1860,9 @@ dependencies = [
[[package]]
name = "futures-io"
-version = "0.3.21"
+version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b"
+checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68"
[[package]]
name = "futures-lite"
@@ -1859,9 +1881,9 @@ dependencies = [
[[package]]
name = "futures-macro"
-version = "0.3.21"
+version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512"
+checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17"
dependencies = [
"proc-macro2",
"quote",
@@ -1870,21 +1892,21 @@ dependencies = [
[[package]]
name = "futures-sink"
-version = "0.3.21"
+version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868"
+checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56"
[[package]]
name = "futures-task"
-version = "0.3.21"
+version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a"
+checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1"
[[package]]
name = "futures-util"
-version = "0.3.21"
+version = "0.3.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a"
+checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90"
dependencies = [
"futures-channel",
"futures-core",
@@ -1900,9 +1922,9 @@ dependencies = [
[[package]]
name = "generic-array"
-version = "0.14.5"
+version = "0.14.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803"
+checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
dependencies = [
"typenum",
"version_check",
@@ -1920,22 +1942,34 @@ dependencies = [
[[package]]
name = "getrandom"
-version = "0.2.6"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad"
+checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6"
dependencies = [
"cfg-if",
"js-sys",
"libc",
- "wasi 0.10.2+wasi-snapshot-preview1",
+ "wasi 0.11.0+wasi-snapshot-preview1",
"wasm-bindgen",
]
+[[package]]
+name = "getset"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e45727250e75cc04ff2846a66397da8ef2b3db8e40e0cef4df67950a07621eb9"
+dependencies = [
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
[[package]]
name = "gimli"
-version = "0.26.1"
+version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4"
+checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d"
[[package]]
name = "glob"
@@ -1945,9 +1979,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
[[package]]
name = "h2"
-version = "0.3.13"
+version = "0.3.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57"
+checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be"
dependencies = [
"bytes",
"fnv",
@@ -1958,7 +1992,7 @@ dependencies = [
"indexmap",
"slab",
"tokio",
- "tokio-util 0.7.3",
+ "tokio-util",
"tracing",
]
@@ -1976,28 +2010,21 @@ checksum = "74721d007512d0cb3338cd20f0654ac913920061a4c4d0d8708edb3f2a698c0c"
[[package]]
name = "hashbrown"
-version = "0.11.2"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
-
-[[package]]
-name = "hashbrown"
-version = "0.12.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
"ahash",
]
[[package]]
name = "hdrhistogram"
-version = "7.5.0"
+version = "7.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "31672b7011be2c4f7456c4ddbcb40e7e9a4a9fad8efe49a6ebaf5f307d0109c0"
+checksum = "6ea9fe3952d32674a14e0975009a3547af9ea364995b5ec1add2e23c2ae523ab"
dependencies = [
"base64",
"byteorder",
- "crossbeam-channel",
"flate2",
"nom",
"num-traits",
@@ -2050,20 +2077,20 @@ dependencies = [
[[package]]
name = "http"
-version = "0.2.7"
+version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb"
+checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399"
dependencies = [
"bytes",
"fnv",
- "itoa 1.0.1",
+ "itoa 1.0.3",
]
[[package]]
name = "http-body"
-version = "0.4.4"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6"
+checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
dependencies = [
"bytes",
"http",
@@ -2078,9 +2105,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29"
[[package]]
name = "httparse"
-version = "1.7.1"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c"
+checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904"
[[package]]
name = "httpdate"
@@ -2096,9 +2123,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "hyper"
-version = "0.14.18"
+version = "0.14.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2"
+checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac"
dependencies = [
"bytes",
"futures-channel",
@@ -2109,7 +2136,7 @@ dependencies = [
"http-body",
"httparse",
"httpdate",
- "itoa 1.0.1",
+ "itoa 1.0.3",
"pin-project-lite",
"socket2",
"tokio",
@@ -2143,6 +2170,20 @@ dependencies = [
"tokio-native-tls",
]
+[[package]]
+name = "iana-time-zone"
+version = "0.1.48"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "237a0714f28b1ee39ccec0770ccb544eb02c9ef2c82bb096230eefcffa6468b0"
+dependencies = [
+ "android_system_properties",
+ "core-foundation-sys",
+ "js-sys",
+ "once_cell",
+ "wasm-bindgen",
+ "winapi",
+]
+
[[package]]
name = "ident_case"
version = "1.0.1"
@@ -2151,23 +2192,22 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idna"
-version = "0.2.3"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8"
+checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6"
dependencies = [
- "matches",
"unicode-bidi",
"unicode-normalization",
]
[[package]]
name = "indexmap"
-version = "1.8.1"
+version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee"
+checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
dependencies = [
"autocfg",
- "hashbrown 0.11.2",
+ "hashbrown",
]
[[package]]
@@ -2181,9 +2221,9 @@ dependencies = [
[[package]]
name = "integer-encoding"
-version = "3.0.3"
+version = "3.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e85a1509a128c855368e135cffcde7eac17d8e1083f41e2b98c58bc1a5074be"
+checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02"
dependencies = [
"async-trait",
"futures-util",
@@ -2252,9 +2292,9 @@ dependencies = [
[[package]]
name = "itertools"
-version = "0.10.3"
+version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3"
+checksum = "d8bf247779e67a9082a4790b45e71ac7cfd1321331a5c856a74a9faebdab78d0"
dependencies = [
"either",
]
@@ -2267,9 +2307,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
[[package]]
name = "itoa"
-version = "1.0.1"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35"
+checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754"
[[package]]
name = "jobserver"
@@ -2282,18 +2322,18 @@ dependencies = [
[[package]]
name = "js-sys"
-version = "0.3.57"
+version = "0.3.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397"
+checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
dependencies = [
"wasm-bindgen",
]
[[package]]
name = "jsonwebtoken"
-version = "8.1.0"
+version = "8.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc9051c17f81bae79440afa041b3a278e1de71bfb96d32454b477fd4703ccb6f"
+checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c"
dependencies = [
"base64",
"pem",
@@ -2323,18 +2363,18 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "lexical"
-version = "6.1.0"
+version = "6.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ccd3e434c16f0164124ade12dcdee324fcc3dafb1cad0c7f1d8c2451a1aa6886"
+checksum = "c7aefb36fd43fef7003334742cbf77b243fcd36418a1d1bdd480d613a67968f6"
dependencies = [
"lexical-core",
]
[[package]]
name = "lexical-core"
-version = "0.8.3"
+version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "92912c4af2e7d9075be3e5e3122c4d7263855fa6cce34fbece4dd08e5884624d"
+checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46"
dependencies = [
"lexical-parse-float",
"lexical-parse-integer",
@@ -2345,9 +2385,9 @@ dependencies = [
[[package]]
name = "lexical-parse-float"
-version = "0.8.3"
+version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f518eed87c3be6debe6d26b855c97358d8a11bf05acec137e5f53080f5ad2dd8"
+checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f"
dependencies = [
"lexical-parse-integer",
"lexical-util",
@@ -2356,9 +2396,9 @@ dependencies = [
[[package]]
name = "lexical-parse-integer"
-version = "0.8.3"
+version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "afc852ec67c6538bbb2b9911116a385b24510e879a69ab516e6a151b15a79168"
+checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9"
dependencies = [
"lexical-util",
"static_assertions",
@@ -2366,18 +2406,18 @@ dependencies = [
[[package]]
name = "lexical-util"
-version = "0.8.3"
+version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c72a9d52c5c4e62fa2cdc2cb6c694a39ae1382d9c2a17a466f18e272a0930eb1"
+checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc"
dependencies = [
"static_assertions",
]
[[package]]
name = "lexical-write-float"
-version = "0.8.4"
+version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a89ec1d062e481210c309b672f73a0567b7855f21e7d2fae636df44d12e97f9"
+checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862"
dependencies = [
"lexical-util",
"lexical-write-integer",
@@ -2386,9 +2426,9 @@ dependencies = [
[[package]]
name = "lexical-write-integer"
-version = "0.8.3"
+version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "094060bd2a7c2ff3a16d5304a6ae82727cb3cc9d1c70f813cc73f744c319337e"
+checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446"
dependencies = [
"lexical-util",
"static_assertions",
@@ -2446,9 +2486,9 @@ checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d"
[[package]]
name = "lock_api"
-version = "0.4.7"
+version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
+checksum = "9f80bf5aacaf25cbfc8210d1cfb718f2bf3b11c4c54e5afe36c236853a8ec390"
dependencies = [
"autocfg",
"scopeguard",
@@ -2498,14 +2538,14 @@ version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a"
dependencies = [
- "hashbrown 0.12.1",
+ "hashbrown",
]
[[package]]
name = "lz4"
-version = "1.23.3"
+version = "1.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4edcb94251b1c375c459e5abe9fb0168c1c826c3370172684844f8f3f8d1a885"
+checksum = "7e9e2dd86df36ce760a60f6ff6ad526f7ba1f14ba0356f8254fb6905e6494df1"
dependencies = [
"libc",
"lz4-sys",
@@ -2513,9 +2553,9 @@ dependencies = [
[[package]]
name = "lz4-sys"
-version = "1.9.3"
+version = "1.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7be8908e2ed6f31c02db8a9fa962f03e36c53fbfde437363eae3306b85d7e17"
+checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900"
dependencies = [
"cc",
"libc",
@@ -2523,9 +2563,9 @@ dependencies = [
[[package]]
name = "lz4_flex"
-version = "0.9.4"
+version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c038063f7a78126c539d666a0323a2032de5e7366012cd14a6eafc5ba290bbd6"
+checksum = "1a8cbbb2831780bc3b9c15a41f5b49222ef756b6730a95f3decfdd15903eb5a3"
dependencies = [
"twox-hash",
]
@@ -2577,9 +2617,9 @@ dependencies = [
[[package]]
name = "md-5"
-version = "0.10.1"
+version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "658646b21e0b72f7866c7038ab086d3d5e1cd6271f060fd37defb241949d0582"
+checksum = "66b48670c893079d3c2ed79114e3644b7004df1c361a4e0ad52e2e6940d07c3d"
dependencies = [
"digest",
]
@@ -2635,7 +2675,7 @@ dependencies = [
"indexmap",
"metrics 0.20.1",
"metrics-util",
- "parking_lot 0.12.0",
+ "parking_lot 0.12.1",
"portable-atomic",
"quanta",
"thiserror",
@@ -2671,10 +2711,10 @@ checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
- "hashbrown 0.12.1",
+ "hashbrown",
"metrics 0.20.1",
"num_cpus",
- "parking_lot 0.12.0",
+ "parking_lot 0.12.1",
"portable-atomic",
"quanta",
"sketches-ddsketch",
@@ -2732,18 +2772,18 @@ dependencies = [
[[package]]
name = "miniz_oxide"
-version = "0.5.1"
+version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082"
+checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34"
dependencies = [
"adler",
]
[[package]]
name = "minstant"
-version = "0.1.1"
+version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2cb320648b7883b43ce5dfbc5c6f4a84038194c3f67b4fcb7d05c994e6006557"
+checksum = "bc5dcfca9a0725105ac948b84cfeb69c3942814c696326743797215413f854b9"
dependencies = [
"ctor",
"libc",
@@ -2752,9 +2792,9 @@ dependencies = [
[[package]]
name = "mio"
-version = "0.8.3"
+version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799"
+checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf"
dependencies = [
"libc",
"log",
@@ -2815,7 +2855,7 @@ dependencies = [
"thiserror",
"tokio",
"tokio-native-tls",
- "tokio-util 0.7.3",
+ "tokio-util",
"twox-hash",
"url",
]
@@ -2854,7 +2894,7 @@ dependencies = [
"smallvec",
"subprocess",
"thiserror",
- "time 0.3.9",
+ "time 0.3.14",
"uuid",
]
@@ -2965,9 +3005,9 @@ dependencies = [
[[package]]
name = "num-complex"
-version = "0.4.1"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "97fbc387afefefd5e9e39493299f3069e14a140dd34dc19b4c1c1a8fddb6a790"
+checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19"
dependencies = [
"num-traits",
"serde",
@@ -2996,9 +3036,9 @@ dependencies = [
[[package]]
name = "num-rational"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a"
+checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0"
dependencies = [
"autocfg",
"num-bigint",
@@ -3058,9 +3098,9 @@ dependencies = [
[[package]]
name = "object"
-version = "0.28.3"
+version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "40bec70ba014595f99f7aa110b84331ffe1ee9aece7fe6f387cc7e3ecda4d456"
+checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53"
dependencies = [
"memchr",
]
@@ -3109,14 +3149,14 @@ dependencies = [
"metrics 0.19.0",
"minitrace",
"once_cell",
- "parking_lot 0.12.0",
+ "parking_lot 0.12.1",
"percent-encoding",
"pin-project",
"quick-xml",
"reqsign",
"serde",
"thiserror",
- "time 0.3.9",
+ "time 0.3.14",
"tokio",
]
@@ -3136,9 +3176,9 @@ dependencies = [
[[package]]
name = "openssl"
-version = "0.10.40"
+version = "0.10.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e"
+checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0"
dependencies = [
"bitflags",
"cfg-if",
@@ -3168,9 +3208,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-sys"
-version = "0.9.73"
+version = "0.9.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0"
+checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f"
dependencies = [
"autocfg",
"cc",
@@ -3265,14 +3305,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a"
dependencies = [
"dlv-list",
- "hashbrown 0.12.1",
+ "hashbrown",
]
[[package]]
name = "os_str_bytes"
-version = "6.0.0"
+version = "6.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64"
+checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff"
[[package]]
name = "parking"
@@ -3293,9 +3333,9 @@ dependencies = [
[[package]]
name = "parking_lot"
-version = "0.12.0"
+version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58"
+checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core 0.9.3",
@@ -3373,9 +3413,9 @@ dependencies = [
[[package]]
name = "paste"
-version = "1.0.7"
+version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc"
+checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
[[package]]
name = "peeking_take_while"
@@ -3385,36 +3425,65 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
[[package]]
name = "pem"
-version = "1.0.2"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9a3b09a20e374558580a4914d3b7d89bd61b954a5a5e1dcbea98753addb1947"
+checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4"
dependencies = [
"base64",
]
[[package]]
name = "percent-encoding"
-version = "2.1.0"
+version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
+checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
[[package]]
name = "petgraph"
-version = "0.6.0"
+version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f"
+checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143"
dependencies = [
"fixedbitset",
"indexmap",
]
+[[package]]
+name = "pgwire"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41526874eeba2f8b06a3ed14510e29080b9fab15b86849fb3a7a733c9bc610b0"
+dependencies = [
+ "async-trait",
+ "bytes",
+ "derive-new",
+ "futures",
+ "getset",
+ "log",
+ "postgres-types",
+ "rand 0.8.5",
+ "thiserror",
+ "time 0.3.14",
+ "tokio",
+ "tokio-util",
+]
+
[[package]]
name = "phf"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259"
dependencies = [
- "phf_shared",
+ "phf_shared 0.10.0",
+]
+
+[[package]]
+name = "phf"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c"
+dependencies = [
+ "phf_shared 0.11.1",
]
[[package]]
@@ -3423,8 +3492,18 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fb1c3a8bc4dd4e5cfce29b44ffc14bedd2ee294559a294e2a4d4c9e9a6a13cd"
dependencies = [
- "phf_generator",
- "phf_shared",
+ "phf_generator 0.10.0",
+ "phf_shared 0.10.0",
+]
+
+[[package]]
+name = "phf_codegen"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a56ac890c5e3ca598bbdeaa99964edb5b0258a583a9eb6ef4e89fc85d9224770"
+dependencies = [
+ "phf_generator 0.11.1",
+ "phf_shared 0.11.1",
]
[[package]]
@@ -3433,7 +3512,17 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6"
dependencies = [
- "phf_shared",
+ "phf_shared 0.10.0",
+ "rand 0.8.5",
+]
+
+[[package]]
+name = "phf_generator"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1181c94580fa345f50f19d738aaa39c0ed30a600d95cb2d3e23f94266f14fbf"
+dependencies = [
+ "phf_shared 0.11.1",
"rand 0.8.5",
]
@@ -3442,6 +3531,15 @@ name = "phf_shared"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096"
+dependencies = [
+ "siphasher",
+]
+
+[[package]]
+name = "phf_shared"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676"
dependencies = [
"siphasher",
"uncased",
@@ -3449,18 +3547,18 @@ dependencies = [
[[package]]
name = "pin-project"
-version = "1.0.10"
+version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e"
+checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
-version = "1.0.10"
+version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb"
+checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55"
dependencies = [
"proc-macro2",
"quote",
@@ -3496,9 +3594,9 @@ dependencies = [
[[package]]
name = "plotters"
-version = "0.3.1"
+version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a"
+checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97"
dependencies = [
"num-traits",
"plotters-backend",
@@ -3509,15 +3607,15 @@ dependencies = [
[[package]]
name = "plotters-backend"
-version = "0.3.2"
+version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c"
+checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142"
[[package]]
name = "plotters-svg"
-version = "0.3.1"
+version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9"
+checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f"
dependencies = [
"plotters-backend",
]
@@ -3535,10 +3633,11 @@ dependencies = [
[[package]]
name = "polling"
-version = "2.2.0"
+version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259"
+checksum = "899b00b9c8ab553c743b3e11e87c5c7d423b2a2de229ba95b24a756344748011"
dependencies = [
+ "autocfg",
"cfg-if",
"libc",
"log",
@@ -3548,9 +3647,38 @@ dependencies = [
[[package]]
name = "portable-atomic"
-version = "0.3.13"
+version = "0.3.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "15eb2c6e362923af47e13c23ca5afb859e83d54452c55b0b9ac763b8f7c1ac16"
+
+[[package]]
+name = "postgres-protocol"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "878c6cbf956e03af9aa8204b407b9cbf47c072164800aa918c516cd4b056c50c"
+dependencies = [
+ "base64",
+ "byteorder",
+ "bytes",
+ "fallible-iterator",
+ "hmac",
+ "md-5",
+ "memchr",
+ "rand 0.8.5",
+ "sha2",
+ "stringprep",
+]
+
+[[package]]
+name = "postgres-types"
+version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b303a15aeda678da614ab23306232dbd282d532f8c5919cedd41b66b9dc96560"
+checksum = "73d946ec7d256b04dfadc4e6a3292324e6f417124750fc5c0950f981b703a0f1"
+dependencies = [
+ "bytes",
+ "fallible-iterator",
+ "postgres-protocol",
+]
[[package]]
name = "ppv-lite86"
@@ -3560,9 +3688,9 @@ checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
[[package]]
name = "prettyplease"
-version = "0.1.16"
+version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da6ffbe862780245013cb1c0a48c4e44b7d665548088f91f6b90876d0625e4c2"
+checksum = "a49e86d2c26a24059894a3afa13fd17d063419b05dfb83f06d9c3566060c3f5a"
dependencies = [
"proc-macro2",
"syn",
@@ -3611,21 +3739,11 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
[[package]]
name = "proc-macro2"
-version = "1.0.38"
+version = "1.0.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9027b48e9d4c9175fa2218adf3557f91c1137021739951d4932f5f8268ac48aa"
+checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab"
dependencies = [
- "unicode-xid",
-]
-
-[[package]]
-name = "prost"
-version = "0.10.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e"
-dependencies = [
- "bytes",
- "prost-derive 0.10.1",
+ "unicode-ident",
]
[[package]]
@@ -3635,7 +3753,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7"
dependencies = [
"bytes",
- "prost-derive 0.11.0",
+ "prost-derive",
]
[[package]]
@@ -3651,26 +3769,13 @@ dependencies = [
"log",
"multimap",
"petgraph",
- "prost 0.11.0",
- "prost-types 0.11.1",
+ "prost",
+ "prost-types",
"regex",
"tempfile",
"which",
]
-[[package]]
-name = "prost-derive"
-version = "0.10.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc"
-dependencies = [
- "anyhow",
- "itertools",
- "proc-macro2",
- "quote",
- "syn",
-]
-
[[package]]
name = "prost-derive"
version = "0.11.0"
@@ -3684,16 +3789,6 @@ dependencies = [
"syn",
]
-[[package]]
-name = "prost-types"
-version = "0.10.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68"
-dependencies = [
- "bytes",
- "prost 0.10.4",
-]
-
[[package]]
name = "prost-types"
version = "0.11.1"
@@ -3701,7 +3796,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e"
dependencies = [
"bytes",
- "prost 0.11.0",
+ "prost",
]
[[package]]
@@ -3761,28 +3856,19 @@ dependencies = [
[[package]]
name = "quick-xml"
-version = "0.23.0"
+version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9279fbdacaad3baf559d8cabe0acc3d06e30ea14931af31af79578ac0946decc"
+checksum = "11bafc859c6815fbaffbbbf4229ecb767ac913fecb27f9ad4343662e9ef099ea"
dependencies = [
"memchr",
"serde",
]
-[[package]]
-name = "quickcheck"
-version = "1.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6"
-dependencies = [
- "rand 0.8.5",
-]
-
[[package]]
name = "quote"
-version = "1.0.18"
+version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1"
+checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
dependencies = [
"proc-macro2",
]
@@ -3873,9 +3959,9 @@ dependencies = [
[[package]]
name = "raw-cpuid"
-version = "10.3.0"
+version = "10.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "738bc47119e3eeccc7e94c4a506901aea5e7b4944ecd0829cbebf4af04ceda12"
+checksum = "a6823ea29436221176fe662da99998ad3b4db2c7f31e7b6f5fe43adccd6320bb"
dependencies = [
"bitflags",
]
@@ -3921,9 +4007,9 @@ dependencies = [
[[package]]
name = "redox_syscall"
-version = "0.2.13"
+version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42"
+checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags",
]
@@ -3941,9 +4027,9 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.5.5"
+version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
+checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
dependencies = [
"aho-corasick",
"memchr",
@@ -3961,9 +4047,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
-version = "0.6.25"
+version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
+checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]]
name = "remove_dir_all"
@@ -3999,14 +4085,14 @@ dependencies = [
"serde",
"serde_json",
"sha2",
- "time 0.3.9",
+ "time 0.3.14",
]
[[package]]
name = "reqwest"
-version = "0.11.10"
+version = "0.11.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "46a1f7aa4f35e5e8b4160449f51afc758f0ce6454315a9fa7d0d113e958c41eb"
+checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92"
dependencies = [
"base64",
"bytes",
@@ -4032,7 +4118,8 @@ dependencies = [
"serde_urlencoded",
"tokio",
"tokio-native-tls",
- "tokio-util 0.6.9",
+ "tokio-util",
+ "tower-service",
"url",
"wasm-bindgen",
"wasm-bindgen-futures",
@@ -4064,9 +4151,9 @@ dependencies = [
[[package]]
name = "retain_mut"
-version = "0.1.7"
+version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8c31b5c4033f8fdde8700e4657be2c497e7288f01515be52168c631e2e4d4086"
+checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0"
[[package]]
name = "ring"
@@ -4106,9 +4193,9 @@ dependencies = [
[[package]]
name = "rust_decimal"
-version = "1.25.0"
+version = "1.26.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34a3bb58e85333f1ab191bf979104b586ebd77475bc6681882825f4532dfe87c"
+checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c"
dependencies = [
"arrayvec",
"num-traits",
@@ -4261,8 +4348,8 @@ dependencies = [
"log",
"num-bigint",
"num-traits",
- "phf",
- "phf_codegen",
+ "phf 0.10.1",
+ "phf_codegen 0.10.0",
"rustpython-ast",
"tiny-keccak",
"unic-emoji-char",
@@ -4313,7 +4400,7 @@ dependencies = [
"num_enum",
"once_cell",
"optional",
- "parking_lot 0.12.0",
+ "parking_lot 0.12.1",
"paste",
"rand 0.8.5",
"result-like",
@@ -4351,9 +4438,9 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.6"
+version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f"
+checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8"
[[package]]
name = "rustyline"
@@ -4381,9 +4468,9 @@ dependencies = [
[[package]]
name = "ryu"
-version = "1.0.9"
+version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f"
+checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
[[package]]
name = "same-file"
@@ -4402,12 +4489,12 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71"
[[package]]
name = "schannel"
-version = "0.1.19"
+version = "0.1.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75"
+checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2"
dependencies = [
"lazy_static",
- "winapi",
+ "windows-sys",
]
[[package]]
@@ -4459,9 +4546,9 @@ dependencies = [
[[package]]
name = "security-framework"
-version = "2.6.1"
+version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc"
+checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c"
dependencies = [
"bitflags",
"core-foundation",
@@ -4482,15 +4569,15 @@ dependencies = [
[[package]]
name = "semver"
-version = "1.0.10"
+version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a41d061efea015927ac527063765e73601444cdc344ba855bc7bd44578b25e1c"
+checksum = "93f6841e709003d68bb2deee8c343572bf446003ec20a583e76f7b15cebf3711"
[[package]]
name = "serde"
-version = "1.0.137"
+version = "1.0.144"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1"
+checksum = "0f747710de3dcd43b88c9168773254e809d8ddbdf9653b84e2554ab219f17860"
dependencies = [
"serde_derive",
]
@@ -4507,9 +4594,9 @@ dependencies = [
[[package]]
name = "serde_derive"
-version = "1.0.137"
+version = "1.0.144"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be"
+checksum = "94ed3a816fb1d101812f83e789f888322c34e291f894f19590dc310963e87a00"
dependencies = [
"proc-macro2",
"quote",
@@ -4518,12 +4605,12 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.81"
+version = "1.0.85"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c"
+checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44"
dependencies = [
"indexmap",
- "itoa 1.0.1",
+ "itoa 1.0.3",
"ryu",
"serde",
]
@@ -4535,7 +4622,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
dependencies = [
"form_urlencoded",
- "itoa 1.0.1",
+ "itoa 1.0.3",
"ryu",
"serde",
]
@@ -4558,11 +4645,13 @@ dependencies = [
"common-time",
"datatypes",
"futures",
+ "hex",
"hyper",
"metrics 0.20.1",
"mysql_async",
"num_cpus",
"opensrv-mysql",
+ "pgwire",
"query",
"rand 0.8.5",
"script",
@@ -4571,8 +4660,9 @@ dependencies = [
"snafu",
"test-util",
"tokio",
+ "tokio-postgres",
"tokio-stream",
- "tonic 0.8.0",
+ "tonic",
"tower",
"tower-http",
]
@@ -4590,9 +4680,9 @@ dependencies = [
[[package]]
name = "sha2"
-version = "0.10.2"
+version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676"
+checksum = "cf9db03534dff993187064c4e0c05a5708d2a9728ace9a8959b77bedf415dac5"
dependencies = [
"cfg-if",
"cpufeatures",
@@ -4643,14 +4733,14 @@ checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a"
[[package]]
name = "simple_asn1"
-version = "0.6.1"
+version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a762b1c38b9b990c694b9c2f8abe3372ce6a9ceaae6bca39cfc46e054f45745"
+checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085"
dependencies = [
"num-bigint",
"num-traits",
"thiserror",
- "time 0.3.9",
+ "time 0.3.14",
]
[[package]]
@@ -4667,9 +4757,12 @@ checksum = "ceb945e54128e09c43d8e4f1277851bd5044c6fc540bbaa2ad888f60b3da9ae7"
[[package]]
name = "slab"
-version = "0.4.6"
+version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32"
+checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef"
+dependencies = [
+ "autocfg",
+]
[[package]]
name = "sluice"
@@ -4719,12 +4812,12 @@ checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451"
[[package]]
name = "socket2"
-version = "0.4.5"
+version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca642ba17f8b2995138b1d7711829c92e98c0a25ea019de790f4f09279c4e296"
+checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd"
dependencies = [
"libc",
- "windows-sys",
+ "winapi",
]
[[package]]
@@ -4816,7 +4909,7 @@ dependencies = [
"object-store",
"paste",
"planus",
- "prost 0.11.0",
+ "prost",
"rand 0.8.5",
"regex",
"serde",
@@ -4825,7 +4918,7 @@ dependencies = [
"store-api",
"tempdir",
"tokio",
- "tonic 0.8.0",
+ "tonic",
"tonic-build",
"uuid",
]
@@ -4858,18 +4951,18 @@ checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0"
[[package]]
name = "streaming-decompression"
-version = "0.1.0"
+version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9bc687acd5dc742c4a7094f2927a8614a68e4743ef682e7a2f9f0f711656cc92"
+checksum = "bf6cc3b19bfb128a8ad11026086e31d3ce9ad23f8ea37354b31383a187c44cf3"
dependencies = [
"fallible-streaming-iterator",
]
[[package]]
name = "streaming-iterator"
-version = "0.1.5"
+version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "303235c177994a476226b80d076bd333b7b560fb05bd242a10609d11b07f81f5"
+checksum = "0085b81d5d4e57f264d492641cf80ea508c96d9a0e47c6296e8f016504e28fd7"
[[package]]
name = "streaming-stats"
@@ -4886,6 +4979,16 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3ff2f71c82567c565ba4b3009a9350a96a7269eaa4001ebedae926230bc2254"
+[[package]]
+name = "stringprep"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1"
+dependencies = [
+ "unicode-bidi",
+ "unicode-normalization",
+]
+
[[package]]
name = "strsim"
version = "0.8.0"
@@ -4954,13 +5057,13 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
-version = "1.0.92"
+version = "1.0.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ff7c592601f11445996a06f8ad0c27f094a58857c2f89e97974ab9235b92c52"
+checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13"
dependencies = [
"proc-macro2",
"quote",
- "unicode-xid",
+ "unicode-ident",
]
[[package]]
@@ -5107,18 +5210,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb"
[[package]]
name = "thiserror"
-version = "1.0.31"
+version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a"
+checksum = "c53f98874615aea268107765aa1ed8f6116782501d18e53d08b471733bea6c85"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.31"
+version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a"
+checksum = "f8b463991b4eab2d801e724172285ec4195c650e8ec79b149e6c2a8e6dd3f783"
dependencies = [
"proc-macro2",
"quote",
@@ -5179,14 +5282,13 @@ dependencies = [
[[package]]
name = "time"
-version = "0.3.9"
+version = "0.3.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd"
+checksum = "3c3f9a28b618c3a6b9251b6908e9c99e04b9e5c02e6581ccbb67d59c34ef7f9b"
dependencies = [
- "itoa 1.0.1",
+ "itoa 1.0.3",
"libc",
"num_threads",
- "quickcheck",
"time-macros",
]
@@ -5238,9 +5340,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
[[package]]
name = "tokio"
-version = "1.20.1"
+version = "1.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581"
+checksum = "0020c875007ad96677dcc890298f4b942882c5d4eb7cc8f439fc3bf813dc9c95"
dependencies = [
"autocfg",
"bytes",
@@ -5249,7 +5351,7 @@ dependencies = [
"mio",
"num_cpus",
"once_cell",
- "parking_lot 0.12.0",
+ "parking_lot 0.12.1",
"pin-project-lite",
"signal-hook-registry",
"socket2",
@@ -5270,9 +5372,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
-version = "1.7.0"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7"
+checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484"
dependencies = [
"proc-macro2",
"quote",
@@ -5290,48 +5392,58 @@ dependencies = [
]
[[package]]
-name = "tokio-stream"
-version = "0.1.8"
+name = "tokio-postgres"
+version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3"
+checksum = "29a12c1b3e0704ae7dfc25562629798b29c72e6b1d0a681b6f29ab4ae5e7f7bf"
dependencies = [
- "futures-core",
+ "async-trait",
+ "byteorder",
+ "bytes",
+ "fallible-iterator",
+ "futures-channel",
+ "futures-util",
+ "log",
+ "parking_lot 0.12.1",
+ "percent-encoding",
+ "phf 0.11.1",
"pin-project-lite",
+ "postgres-protocol",
+ "postgres-types",
+ "socket2",
"tokio",
+ "tokio-util",
]
[[package]]
-name = "tokio-test"
-version = "0.4.2"
+name = "tokio-stream"
+version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3"
+checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9"
dependencies = [
- "async-stream",
- "bytes",
"futures-core",
+ "pin-project-lite",
"tokio",
- "tokio-stream",
]
[[package]]
-name = "tokio-util"
-version = "0.6.9"
+name = "tokio-test"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0"
+checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3"
dependencies = [
+ "async-stream",
"bytes",
"futures-core",
- "futures-sink",
- "log",
- "pin-project-lite",
"tokio",
+ "tokio-stream",
]
[[package]]
name = "tokio-util"
-version = "0.7.3"
+version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45"
+checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740"
dependencies = [
"bytes",
"futures-core",
@@ -5352,41 +5464,9 @@ dependencies = [
[[package]]
name = "tonic"
-version = "0.7.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5be9d60db39854b30b835107500cf0aca0b0d14d6e1c3de124217c23a29c2ddb"
-dependencies = [
- "async-stream",
- "async-trait",
- "axum",
- "base64",
- "bytes",
- "futures-core",
- "futures-util",
- "h2",
- "http",
- "http-body",
- "hyper",
- "hyper-timeout",
- "percent-encoding",
- "pin-project",
- "prost 0.10.4",
- "prost-derive 0.10.1",
- "tokio",
- "tokio-stream",
- "tokio-util 0.7.3",
- "tower",
- "tower-layer",
- "tower-service",
- "tracing",
- "tracing-futures",
-]
-
-[[package]]
-name = "tonic"
-version = "0.8.0"
+version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "498f271adc46acce75d66f639e4d35b31b2394c295c82496727dafa16d465dd2"
+checksum = "11cd56bdb54ef93935a6a79dbd1d91f1ebd4c64150fd61654031fd6b8b775c91"
dependencies = [
"async-stream",
"async-trait",
@@ -5402,11 +5482,11 @@ dependencies = [
"hyper-timeout",
"percent-encoding",
"pin-project",
- "prost 0.11.0",
- "prost-derive 0.11.0",
+ "prost",
+ "prost-derive",
"tokio",
"tokio-stream",
- "tokio-util 0.7.3",
+ "tokio-util",
"tower",
"tower-layer",
"tower-service",
@@ -5429,9 +5509,9 @@ dependencies = [
[[package]]
name = "tower"
-version = "0.4.12"
+version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e"
+checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
dependencies = [
"futures-core",
"futures-util",
@@ -5442,7 +5522,7 @@ dependencies = [
"rand 0.8.5",
"slab",
"tokio",
- "tokio-util 0.7.3",
+ "tokio-util",
"tower-layer",
"tower-service",
"tracing",
@@ -5450,9 +5530,9 @@ dependencies = [
[[package]]
name = "tower-http"
-version = "0.3.3"
+version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d342c6d58709c0a6d48d48dabbb62d4ef955cf5f0f3bbfd845838e7ae88dbae"
+checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba"
dependencies = [
"async-compression",
"base64",
@@ -5470,7 +5550,7 @@ dependencies = [
"percent-encoding",
"pin-project-lite",
"tokio",
- "tokio-util 0.7.3",
+ "tokio-util",
"tower",
"tower-layer",
"tower-service",
@@ -5486,15 +5566,15 @@ checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62"
[[package]]
name = "tower-service"
-version = "0.3.1"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6"
+checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
[[package]]
name = "tracing"
-version = "0.1.34"
+version = "0.1.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09"
+checksum = "2fce9567bd60a67d08a16488756721ba392f24f29006402881e43b19aac64307"
dependencies = [
"cfg-if",
"log",
@@ -5510,15 +5590,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
dependencies = [
"crossbeam-channel",
- "time 0.3.9",
+ "time 0.3.14",
"tracing-subscriber",
]
[[package]]
name = "tracing-attributes"
-version = "0.1.21"
+version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c"
+checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2"
dependencies = [
"proc-macro2",
"quote",
@@ -5527,15 +5607,15 @@ dependencies = [
[[package]]
name = "tracing-bunyan-formatter"
-version = "0.3.2"
+version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd99ff040622c69c0fc4bd3ea5fe16630ce46400a79bd41339391b2d416ea24c"
+checksum = "a788f2119fde477cd33823330c14004fa8cdac6892fd6f12181bbda9dbf14fc9"
dependencies = [
"gethostname",
"log",
"serde",
"serde_json",
- "time 0.3.9",
+ "time 0.3.14",
"tracing",
"tracing-core",
"tracing-log",
@@ -5544,11 +5624,11 @@ dependencies = [
[[package]]
name = "tracing-core"
-version = "0.1.26"
+version = "0.1.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f"
+checksum = "5aeea4303076558a00714b823f9ad67d58a3bbda1df83d8827d21193156e22f7"
dependencies = [
- "lazy_static",
+ "once_cell",
"valuable",
]
@@ -5577,10 +5657,11 @@ dependencies = [
[[package]]
name = "tracing-opentelemetry"
-version = "0.17.2"
+version = "0.17.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f9378e96a9361190ae297e7f3a8ff644aacd2897f244b1ff81f381669196fa6"
+checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f"
dependencies = [
+ "once_cell",
"opentelemetry",
"tracing",
"tracing-core",
@@ -5590,13 +5671,13 @@ dependencies = [
[[package]]
name = "tracing-subscriber"
-version = "0.3.11"
+version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596"
+checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b"
dependencies = [
"ansi_term",
- "lazy_static",
"matchers",
+ "once_cell",
"regex",
"sharded-slab",
"smallvec",
@@ -5743,32 +5824,32 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "623f59e6af2a98bdafeb93fa277ac8e1e40440973001ca15cf4ae1541cd16d56"
+[[package]]
+name = "unicode-ident"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcc811dc4066ac62f84f11307873c4850cb653bfa9b1719cee2bd2204a4bc5dd"
+
[[package]]
name = "unicode-normalization"
-version = "0.1.19"
+version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9"
+checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6"
dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-segmentation"
-version = "1.9.0"
+version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99"
+checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a"
[[package]]
name = "unicode-width"
-version = "0.1.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
-
-[[package]]
-name = "unicode-xid"
-version = "0.2.3"
+version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
[[package]]
name = "unicode_names2"
@@ -5784,13 +5865,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
[[package]]
name = "url"
-version = "2.2.2"
+version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c"
+checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643"
dependencies = [
"form_urlencoded",
"idna",
- "matches",
"percent-encoding",
]
@@ -5886,9 +5966,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
-version = "0.2.80"
+version = "0.2.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad"
+checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
@@ -5896,13 +5976,13 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.80"
+version = "0.2.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4"
+checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
dependencies = [
"bumpalo",
- "lazy_static",
"log",
+ "once_cell",
"proc-macro2",
"quote",
"syn",
@@ -5911,9 +5991,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-futures"
-version = "0.4.30"
+version = "0.4.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f741de44b75e14c35df886aff5f1eb73aa114fa5d4d00dcd37b5e01259bf3b2"
+checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d"
dependencies = [
"cfg-if",
"js-sys",
@@ -5923,9 +6003,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.80"
+version = "0.2.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5"
+checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -5933,9 +6013,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.80"
+version = "0.2.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b"
+checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
dependencies = [
"proc-macro2",
"quote",
@@ -5946,15 +6026,15 @@ dependencies = [
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.80"
+version = "0.2.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744"
+checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
[[package]]
name = "web-sys"
-version = "0.3.57"
+version = "0.3.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283"
+checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -5971,13 +6051,13 @@ dependencies = [
[[package]]
name = "which"
-version = "4.2.5"
+version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae"
+checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b"
dependencies = [
"either",
- "lazy_static",
"libc",
+ "once_cell",
]
[[package]]
@@ -6080,18 +6160,18 @@ dependencies = [
[[package]]
name = "zstd"
-version = "0.10.0+zstd.1.5.2"
+version = "0.10.2+zstd.1.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3b1365becbe415f3f0fcd024e2f7b45bacfb5bdd055f0dc113571394114e7bdd"
+checksum = "5f4a6bd64f22b5e3e94b4e238669ff9f10815c27a5180108b849d24174a83847"
dependencies = [
"zstd-safe",
]
[[package]]
name = "zstd-safe"
-version = "4.1.4+zstd.1.5.2"
+version = "4.1.6+zstd.1.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f7cd17c9af1a4d6c24beb1cc54b17e2ef7b593dc92f19e9d9acad8b182bbaee"
+checksum = "94b61c51bb270702d6167b8ce67340d2754b088d0c091b06e593aa772c3ee9bb"
dependencies = [
"libc",
"zstd-sys",
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 30760a108fbe..0381ea3a0742 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -5,6 +5,10 @@ wal_dir = '/tmp/greptimedb/wal'
mysql_addr = '0.0.0.0:3306'
mysql_runtime_size = 4
+# applied when postgres feature enbaled
+postgres_addr = '0.0.0.0:5432'
+postgres_runtime_size = 4
+
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index 1cb37040adc0..f77be51b1316 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -21,3 +21,7 @@ toml = "0.5"
[dev-dependencies]
serde = "1.0"
tempdir = "0.3"
+
+[features]
+default = ["postgres"]
+postgres = ["datanode/postgres"]
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 34e93b1e39f6..57973bb55d35 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -39,6 +39,9 @@ struct StartCommand {
rpc_addr: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
+ #[cfg(feature = "postgres")]
+ #[clap(long)]
+ postgres_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
}
@@ -78,6 +81,10 @@ impl TryFrom<StartCommand> for DatanodeOptions {
if let Some(addr) = cmd.mysql_addr {
opts.mysql_addr = addr;
}
+ #[cfg(feature = "postgres")]
+ if let Some(addr) = cmd.postgres_addr {
+ opts.postgres_addr = addr;
+ }
Ok(opts)
}
@@ -95,6 +102,8 @@ mod tests {
http_addr: None,
rpc_addr: None,
mysql_addr: None,
+ #[cfg(feature = "postgres")]
+ postgres_addr: None,
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
@@ -106,6 +115,13 @@ mod tests {
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
assert_eq!("0.0.0.0:3306".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
+
+ #[cfg(feature = "postgres")]
+ {
+ assert_eq!("0.0.0.0:5432".to_string(), options.postgres_addr);
+ assert_eq!(4, options.postgres_runtime_size);
+ }
+
match options.storage {
ObjectStoreConfig::File { data_dir } => {
assert_eq!("/tmp/greptimedb/data/".to_string(), data_dir)
diff --git a/src/common/recordbatch/src/lib.rs b/src/common/recordbatch/src/lib.rs
index e8d291881953..dafacf75065f 100644
--- a/src/common/recordbatch/src/lib.rs
+++ b/src/common/recordbatch/src/lib.rs
@@ -71,6 +71,7 @@ impl RecordBatches {
self.schema.clone()
}
+ // TODO: a new name that to avoid misunderstanding it as an allocation operation
pub fn to_vec(self) -> Vec<RecordBatch> {
self.batches
}
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index 427263802969..f83560efb170 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -4,10 +4,11 @@ version = "0.1.0"
edition = "2021"
[features]
-default = ["python"]
+default = ["python", "postgres"]
python = [
"dep:script"
]
+postgres = ["servers/postgres"]
[dependencies]
api = { path = "../api" }
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 1ae4b0c56fcd..0ad523eecd99 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -26,6 +26,10 @@ pub struct DatanodeOptions {
pub rpc_addr: String,
pub mysql_addr: String,
pub mysql_runtime_size: u32,
+ #[cfg(feature = "postgres")]
+ pub postgres_addr: String,
+ #[cfg(feature = "postgres")]
+ pub postgres_runtime_size: u32,
pub wal_dir: String,
pub storage: ObjectStoreConfig,
}
@@ -37,6 +41,10 @@ impl Default for DatanodeOptions {
rpc_addr: "0.0.0.0:3001".to_string(),
mysql_addr: "0.0.0.0:3306".to_string(),
mysql_runtime_size: 2,
+ #[cfg(feature = "postgres")]
+ postgres_addr: "0.0.0.0:5432".to_string(),
+ #[cfg(feature = "postgres")]
+ postgres_runtime_size: 2,
wal_dir: "/tmp/greptimedb/wal".to_string(),
storage: ObjectStoreConfig::default(),
}
diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs
index 26d0faae4602..182a4b577522 100644
--- a/src/datanode/src/server.rs
+++ b/src/datanode/src/server.rs
@@ -7,6 +7,7 @@ use common_runtime::Builder as RuntimeBuilder;
use servers::grpc::GrpcServer;
use servers::http::HttpServer;
use servers::mysql::server::MysqlServer;
+use servers::postgres::PostgresServer;
use servers::server::Server;
use snafu::ResultExt;
use tokio::try_join;
@@ -20,6 +21,8 @@ pub struct Services {
http_server: HttpServer,
grpc_server: GrpcServer,
mysql_server: Box<dyn Server>,
+ #[cfg(feature = "postgres")]
+ postgres_server: Box<dyn Server>,
}
impl Services {
@@ -31,34 +34,49 @@ impl Services {
.build()
.context(error::RuntimeResourceSnafu)?,
);
+ #[cfg(feature = "postgres")]
+ let postgres_io_runtime = Arc::new(
+ RuntimeBuilder::default()
+ .worker_threads(opts.postgres_runtime_size as usize)
+ .thread_name("postgres-io-handlers")
+ .build()
+ .context(error::RuntimeResourceSnafu)?,
+ );
Ok(Self {
http_server: HttpServer::new(instance.clone()),
grpc_server: GrpcServer::new(instance.clone(), instance.clone()),
- mysql_server: MysqlServer::create_server(instance, mysql_io_runtime),
+ mysql_server: MysqlServer::create_server(instance.clone(), mysql_io_runtime),
+ #[cfg(feature = "postgres")]
+ postgres_server: Box::new(PostgresServer::new(instance, postgres_io_runtime)),
})
}
// TODO(LFC): make servers started on demand (not starting mysql if no needed, for example)
pub async fn start(&mut self, opts: &DatanodeOptions) -> Result<()> {
- let http_addr = &opts.http_addr;
- let http_addr: SocketAddr = http_addr
- .parse()
- .context(error::ParseAddrSnafu { addr: http_addr })?;
+ let http_addr: SocketAddr = opts.http_addr.parse().context(error::ParseAddrSnafu {
+ addr: &opts.http_addr,
+ })?;
+
+ let grpc_addr: SocketAddr = opts.rpc_addr.parse().context(error::ParseAddrSnafu {
+ addr: &opts.rpc_addr,
+ })?;
- let grpc_addr = &opts.rpc_addr;
- let grpc_addr: SocketAddr = grpc_addr
- .parse()
- .context(error::ParseAddrSnafu { addr: grpc_addr })?;
+ let mysql_addr: SocketAddr = opts.mysql_addr.parse().context(error::ParseAddrSnafu {
+ addr: &opts.mysql_addr,
+ })?;
- let mysql_addr = &opts.mysql_addr;
- let mysql_addr: SocketAddr = mysql_addr
- .parse()
- .context(error::ParseAddrSnafu { addr: mysql_addr })?;
+ #[cfg(feature = "postgres")]
+ let postgres_addr: SocketAddr =
+ opts.postgres_addr.parse().context(error::ParseAddrSnafu {
+ addr: &opts.postgres_addr,
+ })?;
try_join!(
self.http_server.start(http_addr),
self.grpc_server.start(grpc_addr),
self.mysql_server.start(mysql_addr),
+ #[cfg(feature = "postgres")]
+ self.postgres_server.start(postgres_addr),
)
.context(error::StartServerSnafu)?;
Ok(())
diff --git a/src/datatypes/src/type_id.rs b/src/datatypes/src/type_id.rs
index 28a81d13f3ec..148e3e999549 100644
--- a/src/datatypes/src/type_id.rs
+++ b/src/datatypes/src/type_id.rs
@@ -1,3 +1,4 @@
+#[cfg(any(test, feature = "test"))]
use crate::data_type::ConcreteDataType;
/// Unique identifier for logical data type.
diff --git a/src/datatypes/src/vectors/list.rs b/src/datatypes/src/vectors/list.rs
index d06a48f8fbef..6269f550a928 100644
--- a/src/datatypes/src/vectors/list.rs
+++ b/src/datatypes/src/vectors/list.rs
@@ -268,6 +268,7 @@ impl MutableVector for ListVectorBuilder {
#[cfg(test)]
mod tests {
use arrow::array::{MutableListArray, MutablePrimitiveArray, TryExtend};
+ use serde_json::json;
use super::*;
use crate::types::ListType;
@@ -426,8 +427,8 @@ mod tests {
let list_vector = ListVector::from(arrow_array);
assert_eq!(
- "Ok([Array([Number(1), Number(2), Number(3)]), Null, Array([Number(4), Null, Number(6)])])",
- format!("{:?}", list_vector.serialize_to_json())
+ vec![json!([1, 2, 3]), json!(null), json!([4, null, 6]),],
+ list_vector.serialize_to_json().unwrap()
);
}
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 281f8a309030..9d7143dae863 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -16,10 +16,12 @@ common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
futures = "0.3"
+hex = { version = "0.4", optional = true }
hyper = { version = "0.14", features = ["full"] }
metrics = "0.20"
num_cpus = "1.13"
opensrv-mysql = "0.1"
+pgwire = { version = "0.3", optional = true }
query = { path = "../query" }
serde = "1.0"
serde_json = "1.0"
@@ -30,6 +32,10 @@ tonic = "0.8"
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.3", features = ["full"] }
+[features]
+default = ["postgres"]
+postgres = ["hex", "pgwire"]
+
[dev-dependencies]
catalog = { path = "../catalog" }
common-base = { path = "../common/base" }
@@ -37,3 +43,4 @@ mysql_async = "0.30"
rand = "0.8"
script = { path = "../script", features = ["python"] }
test-util = { path = "../../test-util" }
+tokio-postgres = "0.7"
diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs
index b469916ef16f..d59ba05e2192 100644
--- a/src/servers/src/lib.rs
+++ b/src/servers/src/lib.rs
@@ -2,5 +2,7 @@ pub mod error;
pub mod grpc;
pub mod http;
pub mod mysql;
+#[cfg(feature = "postgres")]
+pub mod postgres;
pub mod query_handler;
pub mod server;
diff --git a/src/servers/src/postgres/handler.rs b/src/servers/src/postgres/handler.rs
new file mode 100644
index 000000000000..1fd8f32d3135
--- /dev/null
+++ b/src/servers/src/postgres/handler.rs
@@ -0,0 +1,295 @@
+use std::ops::Deref;
+
+use async_trait::async_trait;
+use common_query::Output;
+use common_recordbatch::{util, RecordBatch};
+use common_time::timestamp::TimeUnit;
+use datatypes::prelude::{ConcreteDataType, Value};
+use datatypes::schema::SchemaRef;
+use pgwire::api::portal::Portal;
+use pgwire::api::query::{ExtendedQueryHandler, SimpleQueryHandler};
+use pgwire::api::results::{FieldInfo, Response, Tag, TextQueryResponseBuilder};
+use pgwire::api::{ClientInfo, Type};
+use pgwire::error::{PgWireError, PgWireResult};
+
+use crate::error::{self, Error, Result};
+use crate::query_handler::SqlQueryHandlerRef;
+
+pub struct PostgresServerHandler {
+ query_handler: SqlQueryHandlerRef,
+}
+
+impl PostgresServerHandler {
+ pub fn new(query_handler: SqlQueryHandlerRef) -> Self {
+ PostgresServerHandler { query_handler }
+ }
+}
+
+#[async_trait]
+impl SimpleQueryHandler for PostgresServerHandler {
+ async fn do_query<C>(&self, _client: &C, query: &str) -> PgWireResult<Response>
+ where
+ C: ClientInfo + Unpin + Send + Sync,
+ {
+ let output = self
+ .query_handler
+ .do_query(query)
+ .await
+ .map_err(|e| PgWireError::ApiError(Box::new(e)))?;
+
+ match output {
+ Output::AffectedRows(rows) => Ok(Response::Execution(Tag::new_for_execution(
+ "OK",
+ Some(rows),
+ ))),
+ Output::Stream(record_stream) => {
+ let schema = record_stream.schema();
+ let recordbatches = util::collect(record_stream)
+ .await
+ .map_err(|e| PgWireError::ApiError(Box::new(e)))?;
+ recordbatches_to_query_response(recordbatches.iter(), schema)
+ }
+ Output::RecordBatches(recordbatches) => {
+ let schema = recordbatches.schema();
+ recordbatches_to_query_response(recordbatches.to_vec().iter(), schema)
+ }
+ }
+ }
+}
+
+fn recordbatches_to_query_response<'a, I>(
+ recordbatches: I,
+ schema: SchemaRef,
+) -> PgWireResult<Response>
+where
+ I: Iterator<Item = &'a RecordBatch>,
+{
+ let pg_schema = schema_to_pg(schema).map_err(|e| PgWireError::ApiError(Box::new(e)))?;
+ let mut builder = TextQueryResponseBuilder::new(pg_schema);
+
+ for recordbatch in recordbatches {
+ for row in recordbatch.rows() {
+ let row = row.map_err(|e| PgWireError::ApiError(Box::new(e)))?;
+ for value in row.into_iter() {
+ encode_value(&value, &mut builder)?;
+ }
+ builder.finish_row();
+ }
+ }
+
+ Ok(Response::Query(builder.build()))
+}
+
+fn schema_to_pg(origin: SchemaRef) -> Result<Vec<FieldInfo>> {
+ origin
+ .column_schemas()
+ .iter()
+ .map(|col| {
+ Ok(FieldInfo::new(
+ col.name.clone(),
+ None,
+ None,
+ type_translate(&col.data_type)?,
+ ))
+ })
+ .collect::<Result<Vec<FieldInfo>>>()
+}
+
+fn encode_value(value: &Value, builder: &mut TextQueryResponseBuilder) -> PgWireResult<()> {
+ match value {
+ Value::Null => builder.append_field(None::<i8>),
+ Value::Boolean(v) => builder.append_field(Some(v)),
+ Value::UInt8(v) => builder.append_field(Some(v)),
+ Value::UInt16(v) => builder.append_field(Some(v)),
+ Value::UInt32(v) => builder.append_field(Some(v)),
+ Value::UInt64(v) => builder.append_field(Some(v)),
+ Value::Int8(v) => builder.append_field(Some(v)),
+ Value::Int16(v) => builder.append_field(Some(v)),
+ Value::Int32(v) => builder.append_field(Some(v)),
+ Value::Int64(v) => builder.append_field(Some(v)),
+ Value::Float32(v) => builder.append_field(Some(v.0)),
+ Value::Float64(v) => builder.append_field(Some(v.0)),
+ Value::String(v) => builder.append_field(Some(v.as_utf8())),
+ Value::Binary(v) => builder.append_field(Some(hex::encode(v.deref()))),
+ Value::Date(v) => builder.append_field(Some(v.val())),
+ Value::DateTime(v) => builder.append_field(Some(v.val())),
+ Value::Timestamp(v) => builder.append_field(Some(v.convert_to(TimeUnit::Millisecond))),
+ Value::List(_) => Err(PgWireError::ApiError(Box::new(Error::Internal {
+ err_msg: format!(
+ "cannot write value {:?} in postgres protocol: unimplemented",
+ &value
+ ),
+ }))),
+ }
+}
+
+fn type_translate(origin: &ConcreteDataType) -> Result<Type> {
+ match origin {
+ &ConcreteDataType::Null(_) => Ok(Type::UNKNOWN),
+ &ConcreteDataType::Boolean(_) => Ok(Type::BOOL),
+ &ConcreteDataType::Int8(_) | &ConcreteDataType::UInt8(_) => Ok(Type::CHAR),
+ &ConcreteDataType::Int16(_) | &ConcreteDataType::UInt16(_) => Ok(Type::INT2),
+ &ConcreteDataType::Int32(_) | &ConcreteDataType::UInt32(_) => Ok(Type::INT4),
+ &ConcreteDataType::Int64(_) | &ConcreteDataType::UInt64(_) => Ok(Type::INT8),
+ &ConcreteDataType::Float32(_) => Ok(Type::FLOAT4),
+ &ConcreteDataType::Float64(_) => Ok(Type::FLOAT8),
+ &ConcreteDataType::Binary(_) => Ok(Type::BYTEA),
+ &ConcreteDataType::String(_) => Ok(Type::VARCHAR),
+ &ConcreteDataType::Date(_) => Ok(Type::DATE),
+ &ConcreteDataType::DateTime(_) => Ok(Type::TIMESTAMP),
+ &ConcreteDataType::Timestamp(_) => Ok(Type::TIMESTAMP),
+ &ConcreteDataType::List(_) => error::InternalSnafu {
+ err_msg: format!("not implemented for column datatype {:?}", origin),
+ }
+ .fail(),
+ }
+}
+
+#[async_trait]
+impl ExtendedQueryHandler for PostgresServerHandler {
+ async fn do_query<C>(&self, _client: &mut C, _portal: &Portal) -> PgWireResult<Response>
+ where
+ C: ClientInfo + Unpin + Send + Sync,
+ {
+ unimplemented!()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use std::sync::Arc;
+
+ use datatypes::schema::{ColumnSchema, Schema};
+ use datatypes::value::ListValue;
+ use pgwire::api::results::FieldInfo;
+ use pgwire::api::Type;
+
+ use super::*;
+
+ #[test]
+ fn test_schema_convert() {
+ let column_schemas = vec![
+ ColumnSchema::new("nulls", ConcreteDataType::null_datatype(), true),
+ ColumnSchema::new("bools", ConcreteDataType::boolean_datatype(), true),
+ ColumnSchema::new("int8s", ConcreteDataType::int8_datatype(), true),
+ ColumnSchema::new("int16s", ConcreteDataType::int16_datatype(), true),
+ ColumnSchema::new("int32s", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new("int64s", ConcreteDataType::int64_datatype(), true),
+ ColumnSchema::new("uint8s", ConcreteDataType::uint8_datatype(), true),
+ ColumnSchema::new("uint16s", ConcreteDataType::uint16_datatype(), true),
+ ColumnSchema::new("uint32s", ConcreteDataType::uint32_datatype(), true),
+ ColumnSchema::new("uint64s", ConcreteDataType::uint64_datatype(), true),
+ ColumnSchema::new("float32s", ConcreteDataType::float32_datatype(), true),
+ ColumnSchema::new("float64s", ConcreteDataType::float64_datatype(), true),
+ ColumnSchema::new("binaries", ConcreteDataType::binary_datatype(), true),
+ ColumnSchema::new("strings", ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new(
+ "timestamps",
+ ConcreteDataType::timestamp_millis_datatype(),
+ true,
+ ),
+ ColumnSchema::new("dates", ConcreteDataType::date_datatype(), true),
+ ];
+ let pg_field_info = vec![
+ FieldInfo::new("nulls".into(), None, None, Type::UNKNOWN),
+ FieldInfo::new("bools".into(), None, None, Type::BOOL),
+ FieldInfo::new("int8s".into(), None, None, Type::CHAR),
+ FieldInfo::new("int16s".into(), None, None, Type::INT2),
+ FieldInfo::new("int32s".into(), None, None, Type::INT4),
+ FieldInfo::new("int64s".into(), None, None, Type::INT8),
+ FieldInfo::new("uint8s".into(), None, None, Type::CHAR),
+ FieldInfo::new("uint16s".into(), None, None, Type::INT2),
+ FieldInfo::new("uint32s".into(), None, None, Type::INT4),
+ FieldInfo::new("uint64s".into(), None, None, Type::INT8),
+ FieldInfo::new("float32s".into(), None, None, Type::FLOAT4),
+ FieldInfo::new("float64s".into(), None, None, Type::FLOAT8),
+ FieldInfo::new("binaries".into(), None, None, Type::BYTEA),
+ FieldInfo::new("strings".into(), None, None, Type::VARCHAR),
+ FieldInfo::new("timestamps".into(), None, None, Type::TIMESTAMP),
+ FieldInfo::new("dates".into(), None, None, Type::DATE),
+ ];
+ let schema = Arc::new(Schema::new(column_schemas));
+ let fs = schema_to_pg(schema).unwrap();
+ assert_eq!(fs, pg_field_info);
+ }
+
+ #[test]
+ fn test_encode_text_format_data() {
+ let schema = vec![
+ FieldInfo::new("nulls".into(), None, None, Type::UNKNOWN),
+ FieldInfo::new("bools".into(), None, None, Type::BOOL),
+ FieldInfo::new("uint8s".into(), None, None, Type::CHAR),
+ FieldInfo::new("uint16s".into(), None, None, Type::INT2),
+ FieldInfo::new("uint32s".into(), None, None, Type::INT4),
+ FieldInfo::new("uint64s".into(), None, None, Type::INT8),
+ FieldInfo::new("int8s".into(), None, None, Type::CHAR),
+ FieldInfo::new("int8s".into(), None, None, Type::CHAR),
+ FieldInfo::new("int16s".into(), None, None, Type::INT2),
+ FieldInfo::new("int16s".into(), None, None, Type::INT2),
+ FieldInfo::new("int32s".into(), None, None, Type::INT4),
+ FieldInfo::new("int32s".into(), None, None, Type::INT4),
+ FieldInfo::new("int64s".into(), None, None, Type::INT8),
+ FieldInfo::new("int64s".into(), None, None, Type::INT8),
+ FieldInfo::new("float32s".into(), None, None, Type::FLOAT4),
+ FieldInfo::new("float32s".into(), None, None, Type::FLOAT4),
+ FieldInfo::new("float32s".into(), None, None, Type::FLOAT4),
+ FieldInfo::new("float64s".into(), None, None, Type::FLOAT8),
+ FieldInfo::new("float64s".into(), None, None, Type::FLOAT8),
+ FieldInfo::new("float64s".into(), None, None, Type::FLOAT8),
+ FieldInfo::new("strings".into(), None, None, Type::VARCHAR),
+ FieldInfo::new("binaries".into(), None, None, Type::BYTEA),
+ FieldInfo::new("dates".into(), None, None, Type::DATE),
+ FieldInfo::new("datetimes".into(), None, None, Type::TIMESTAMP),
+ FieldInfo::new("timestamps".into(), None, None, Type::TIMESTAMP),
+ ];
+
+ let values = vec![
+ Value::Null,
+ Value::Boolean(true),
+ Value::UInt8(u8::MAX),
+ Value::UInt16(u16::MAX),
+ Value::UInt32(u32::MAX),
+ Value::UInt64(u64::MAX),
+ Value::Int8(i8::MAX),
+ Value::Int8(i8::MIN),
+ Value::Int16(i16::MAX),
+ Value::Int16(i16::MIN),
+ Value::Int32(i32::MAX),
+ Value::Int32(i32::MIN),
+ Value::Int64(i64::MAX),
+ Value::Int64(i64::MIN),
+ Value::Float32(f32::MAX.into()),
+ Value::Float32(f32::MIN.into()),
+ Value::Float32(0f32.into()),
+ Value::Float64(f64::MAX.into()),
+ Value::Float64(f64::MIN.into()),
+ Value::Float64(0f64.into()),
+ Value::String("greptime".into()),
+ Value::Binary("greptime".as_bytes().into()),
+ Value::Date(1001i32.into()),
+ Value::DateTime(1000001i64.into()),
+ Value::Timestamp(1000001i64.into()),
+ ];
+ let mut builder = TextQueryResponseBuilder::new(schema);
+ for i in values {
+ assert!(encode_value(&i, &mut builder).is_ok());
+ }
+
+ let err = encode_value(
+ &Value::List(ListValue::new(
+ Some(Box::new(vec![])),
+ ConcreteDataType::int8_datatype(),
+ )),
+ &mut builder,
+ )
+ .unwrap_err();
+ match err {
+ PgWireError::ApiError(e) => {
+ assert!(format!("{}", e).contains("Internal error:"));
+ }
+ _ => {
+ unreachable!()
+ }
+ }
+ }
+}
diff --git a/src/servers/src/postgres/mod.rs b/src/servers/src/postgres/mod.rs
new file mode 100644
index 000000000000..b7e04fe86951
--- /dev/null
+++ b/src/servers/src/postgres/mod.rs
@@ -0,0 +1,4 @@
+mod handler;
+mod server;
+
+pub use server::PostgresServer;
diff --git a/src/servers/src/postgres/server.rs b/src/servers/src/postgres/server.rs
new file mode 100644
index 000000000000..f34e60d9de6d
--- /dev/null
+++ b/src/servers/src/postgres/server.rs
@@ -0,0 +1,136 @@
+use std::future::Future;
+use std::net::SocketAddr;
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use common_runtime::Runtime;
+use common_telemetry::logging::{error, info};
+use futures::future::AbortHandle;
+use futures::future::AbortRegistration;
+use futures::future::Abortable;
+use futures::StreamExt;
+use pgwire::api::auth::noop::NoopStartupHandler;
+use pgwire::tokio::process_socket;
+use snafu::prelude::*;
+use tokio;
+use tokio::task::JoinHandle;
+use tokio_stream::wrappers::TcpListenerStream;
+
+use crate::error::{self, Result};
+use crate::postgres::handler::PostgresServerHandler;
+use crate::query_handler::SqlQueryHandlerRef;
+use crate::server::Server;
+
+pub struct PostgresServer {
+ // See MySQL module for usage of these types
+ abort_handle: AbortHandle,
+ abort_registration: Option<AbortRegistration>,
+
+ // A handle holding the TCP accepting task.
+ join_handle: Option<JoinHandle<()>>,
+
+ auth_handler: Arc<NoopStartupHandler>,
+ query_handler: Arc<PostgresServerHandler>,
+ io_runtime: Arc<Runtime>,
+}
+
+impl PostgresServer {
+ /// Creates a new Postgres server with provided query_handler and async runtime
+ pub fn new(query_handler: SqlQueryHandlerRef, io_runtime: Arc<Runtime>) -> PostgresServer {
+ let (abort_handle, registration) = AbortHandle::new_pair();
+ let postgres_handler = Arc::new(PostgresServerHandler::new(query_handler));
+ let startup_handler = Arc::new(NoopStartupHandler);
+ PostgresServer {
+ abort_handle,
+ abort_registration: Some(registration),
+ join_handle: None,
+
+ auth_handler: startup_handler,
+ query_handler: postgres_handler,
+
+ io_runtime,
+ }
+ }
+
+ async fn bind(addr: SocketAddr) -> Result<(TcpListenerStream, SocketAddr)> {
+ let listener = tokio::net::TcpListener::bind(addr)
+ .await
+ .context(error::TokioIoSnafu {
+ err_msg: format!("Failed to bind addr {}", addr),
+ })?;
+ // get actually bond addr in case input addr use port 0
+ let addr = listener.local_addr()?;
+ info!("Postgres server is bound to {}", addr);
+ Ok((TcpListenerStream::new(listener), addr))
+ }
+
+ fn accept(&self, accepting_stream: Abortable<TcpListenerStream>) -> impl Future<Output = ()> {
+ let io_runtime = self.io_runtime.clone();
+ let auth_handler = self.auth_handler.clone();
+ let query_handler = self.query_handler.clone();
+
+ accepting_stream.for_each(move |tcp_stream| {
+ let io_runtime = io_runtime.clone();
+ let auth_handler = auth_handler.clone();
+ let query_handler = query_handler.clone();
+
+ async move {
+ match tcp_stream {
+ Err(error) => error!("Broken pipe: {}", error), // IoError doesn't impl ErrorExt.
+ Ok(io_stream) => {
+ io_runtime.spawn(async move {
+ process_socket(
+ io_stream,
+ auth_handler.clone(),
+ query_handler.clone(),
+ query_handler.clone(),
+ )
+ .await;
+ });
+ }
+ };
+ }
+ })
+ }
+}
+
+#[async_trait]
+impl Server for PostgresServer {
+ async fn shutdown(&mut self) -> Result<()> {
+ match self.join_handle.take() {
+ Some(join_handle) => {
+ self.abort_handle.abort();
+
+ if let Err(error) = join_handle.await {
+ // Couldn't use `error!(e; xxx)` as JoinError doesn't implement ErrorExt.
+ error!(
+ "Unexpected error during shutdown Postgres server, error: {}",
+ error
+ );
+ } else {
+ info!("Postgres server is shutdown.")
+ }
+ Ok(())
+ }
+ None => error::InternalSnafu {
+ err_msg: "Postgres server is not started.",
+ }
+ .fail()?,
+ }
+ }
+
+ async fn start(&mut self, listening: SocketAddr) -> Result<SocketAddr> {
+ match self.abort_registration.take() {
+ Some(registration) => {
+ let (stream, listener) = Self::bind(listening).await?;
+ let stream = Abortable::new(stream, registration);
+ self.join_handle = Some(tokio::spawn(self.accept(stream)));
+ Ok(listener)
+ }
+ None => error::InternalSnafu {
+ err_msg: "Postgres server has been started.",
+ }
+ .fail()?,
+ }
+ }
+}
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index 22f8666db5b5..54abbe7935f5 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -18,6 +18,8 @@ use script::{
engine::{CompileContext, EvalContext, Script, ScriptEngine},
python::{PyEngine, PyScript},
};
+#[cfg(feature = "postgres")]
+mod postgres;
struct DummyInstance {
query_engine: QueryEngineRef,
diff --git a/src/servers/tests/postgres/mod.rs b/src/servers/tests/postgres/mod.rs
new file mode 100644
index 000000000000..8d9abddf9bea
--- /dev/null
+++ b/src/servers/tests/postgres/mod.rs
@@ -0,0 +1,169 @@
+use std::net::SocketAddr;
+use std::sync::Arc;
+use std::time::Duration;
+
+use common_runtime::Builder as RuntimeBuilder;
+use rand::rngs::StdRng;
+use rand::Rng;
+use servers::error::Result;
+use servers::postgres::PostgresServer;
+use servers::server::Server;
+use test_util::MemTable;
+use tokio_postgres::{Client, Error as PgError, NoTls, SimpleQueryMessage};
+
+use crate::create_testing_sql_query_handler;
+
+fn create_postgres_server(table: MemTable) -> Result<Box<dyn Server>> {
+ let query_handler = create_testing_sql_query_handler(table);
+ let io_runtime = Arc::new(
+ RuntimeBuilder::default()
+ .worker_threads(4)
+ .thread_name("postgres-io-handlers")
+ .build()
+ .unwrap(),
+ );
+ Ok(Box::new(PostgresServer::new(query_handler, io_runtime)))
+}
+
+#[tokio::test]
+pub async fn test_start_postgres_server() -> Result<()> {
+ let table = MemTable::default_numbers_table();
+
+ let mut pg_server = create_postgres_server(table)?;
+ let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
+ let result = pg_server.start(listening).await;
+ assert!(result.is_ok());
+
+ let result = pg_server.start(listening).await;
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("Postgres server has been started."));
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+async fn test_shutdown_pg_server() -> Result<()> {
+ common_telemetry::init_default_ut_logging();
+
+ let table = MemTable::default_numbers_table();
+
+ let mut postgres_server = create_postgres_server(table)?;
+ let result = postgres_server.shutdown().await;
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("Postgres server is not started."));
+
+ let listening = "127.0.0.1:5432".parse::<SocketAddr>().unwrap();
+ let server_addr = postgres_server.start(listening).await.unwrap();
+ let server_port = server_addr.port();
+
+ let mut join_handles = vec![];
+ for _ in 0..2 {
+ join_handles.push(tokio::spawn(async move {
+ for _ in 0..1000 {
+ match create_connection(server_port).await {
+ Ok(connection) => {
+ let rows = connection
+ .simple_query("SELECT uint32s FROM numbers LIMIT 1")
+ .await
+ .unwrap();
+ let result_text = unwrap_results(&rows)[0];
+ let result: i32 = result_text.parse().unwrap();
+ assert_eq!(result, 0);
+ tokio::time::sleep(Duration::from_millis(10)).await;
+ }
+ Err(e) => {
+ return Err(e);
+ }
+ }
+ }
+ Ok(())
+ }))
+ }
+
+ tokio::time::sleep(Duration::from_millis(100)).await;
+ let result = postgres_server.shutdown().await;
+ assert!(result.is_ok());
+
+ for handle in join_handles.iter_mut() {
+ let result = handle.await.unwrap();
+ assert!(result.is_err());
+ let error = result.unwrap_err().to_string();
+ assert!(error.contains("Connection refused") || error.contains("Connection reset by peer"));
+ }
+
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
+async fn test_query_pg_concurrently() -> Result<()> {
+ common_telemetry::init_default_ut_logging();
+
+ let table = MemTable::default_numbers_table();
+
+ let mut pg_server = create_postgres_server(table)?;
+ let listening = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
+ let server_addr = pg_server.start(listening).await.unwrap();
+ let server_port = server_addr.port();
+
+ let threads = 4;
+ let expect_executed_queries_per_worker = 300;
+ let mut join_handles = vec![];
+ for _i in 0..threads {
+ join_handles.push(tokio::spawn(async move {
+ let mut rand: StdRng = rand::SeedableRng::from_entropy();
+
+ let mut client = create_connection(server_port).await.unwrap();
+
+ for _k in 0..expect_executed_queries_per_worker {
+ let expected: u32 = rand.gen_range(0..100);
+ let result: u32 = unwrap_results(
+ client
+ .simple_query(&format!(
+ "SELECT uint32s FROM numbers WHERE uint32s = {}",
+ expected
+ ))
+ .await
+ .unwrap()
+ .as_ref(),
+ )[0]
+ .parse()
+ .unwrap();
+ assert_eq!(result, expected);
+
+ // 1/100 chance to reconnect
+ let should_recreate_conn = expected == 1;
+ if should_recreate_conn {
+ client = create_connection(server_port).await.unwrap();
+ }
+ }
+ expect_executed_queries_per_worker
+ }))
+ }
+ let mut total_pending_queries = threads * expect_executed_queries_per_worker;
+ for handle in join_handles.iter_mut() {
+ total_pending_queries -= handle.await.unwrap();
+ }
+ assert_eq!(0, total_pending_queries);
+ Ok(())
+}
+
+async fn create_connection(port: u16) -> std::result::Result<Client, PgError> {
+ let url = format!("host=127.0.0.1 port={} connect_timeout=2", port);
+ let (client, conn) = tokio_postgres::connect(&url, NoTls).await?;
+ tokio::spawn(conn);
+ Ok(client)
+}
+
+fn resolve_result(resp: &SimpleQueryMessage, col_index: usize) -> Option<&str> {
+ match resp {
+ &SimpleQueryMessage::Row(ref r) => r.get(col_index),
+ _ => None,
+ }
+}
+
+fn unwrap_results(resp: &[SimpleQueryMessage]) -> Vec<&str> {
+ resp.iter().filter_map(|m| resolve_result(m, 0)).collect()
+}
|
feat
|
Initial support of postgresql wire protocol (#229)
|
e23628a4e09a3b7951fce5b7de4fa588ed25d99c
|
2025-03-06 09:03:17
|
liyang
|
ci: bump dev-builder image version to 2024-12-25-a71b93dd-20250305072908 (#5651)
| false
|
diff --git a/Makefile b/Makefile
index c2f6f72200cb..81537ae9761d 100644
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest
-DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-9d0fa5d5-20250124085746
+DEV_BUILDER_IMAGE_TAG ?= 2024-12-25-a71b93dd-20250305072908
BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu
|
ci
|
bump dev-builder image version to 2024-12-25-a71b93dd-20250305072908 (#5651)
|
2b912d93fbe5a83cbc6ebb78ad52a8dd4b5cb78b
|
2024-07-15 14:50:04
|
discord9
|
feat: flow perf&fix df func call (#4347)
| false
|
diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs
index e22894c19de7..80491a6861ca 100644
--- a/src/flow/src/adapter.rs
+++ b/src/flow/src/adapter.rs
@@ -18,7 +18,7 @@
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
-use std::time::{Instant, SystemTime};
+use std::time::{Duration, Instant, SystemTime};
use api::v1::{RowDeleteRequest, RowDeleteRequests, RowInsertRequest, RowInsertRequests};
use common_config::Configurable;
@@ -51,7 +51,7 @@ use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
use crate::compute::ErrCollector;
use crate::error::{ExternalSnafu, InternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
use crate::expr::GlobalId;
-use crate::repr::{self, DiffRow, Row};
+use crate::repr::{self, DiffRow, Row, BATCH_SIZE};
use crate::transform::sql_to_flow_plan;
mod flownode_impl;
@@ -67,7 +67,7 @@ mod table_source;
use crate::error::Error;
use crate::FrontendInvoker;
-// TODO(discord9): replace this with `GREPTIME_TIMESTAMP` before v0.9
+// `GREPTIME_TIMESTAMP` is not used to distinguish when table is created automatically by flow
pub const AUTO_CREATED_PLACEHOLDER_TS_COL: &str = "__ts_placeholder";
pub const UPDATE_AT_TS_COL: &str = "update_at";
@@ -212,8 +212,6 @@ pub fn diff_row_to_request(rows: Vec<DiffRow>) -> Vec<DiffRequest> {
/// This impl block contains methods to send writeback requests to frontend
impl FlowWorkerManager {
- /// TODO(discord9): merge all same type of diff row into one requests
- ///
/// Return the number of requests it made
pub async fn send_writeback_requests(&self) -> Result<usize, Error> {
let all_reqs = self.generate_writeback_request().await;
@@ -464,7 +462,6 @@ impl FlowWorkerManager {
shutdown: Option<broadcast::Receiver<()>>,
) -> JoinHandle<()> {
info!("Starting flownode manager's background task");
- // TODO(discord9): add heartbeat tasks here
common_runtime::spawn_bg(async move {
self.run(shutdown).await;
})
@@ -484,21 +481,31 @@ impl FlowWorkerManager {
}
}
+ async fn get_buf_size(&self) -> usize {
+ self.node_context.read().await.get_send_buf_size().await
+ }
+
/// Trigger dataflow running, and then send writeback request to the source sender
///
/// note that this method didn't handle input mirror request, as this should be handled by grpc server
pub async fn run(&self, mut shutdown: Option<broadcast::Receiver<()>>) {
debug!("Starting to run");
+ let default_interval = Duration::from_secs(1);
+ let mut avg_spd = 0; // rows/sec
+ let mut since_last_run = tokio::time::Instant::now();
loop {
// TODO(discord9): only run when new inputs arrive or scheduled to
- if let Err(err) = self.run_available(true).await {
+ let row_cnt = self.run_available(true).await.unwrap_or_else(|err| {
common_telemetry::error!(err;"Run available errors");
- }
- // TODO(discord9): error handling
+ 0
+ });
+
if let Err(err) = self.send_writeback_requests().await {
common_telemetry::error!(err;"Send writeback request errors");
};
self.log_all_errors().await;
+
+ // determine if need to shutdown
match &shutdown.as_mut().map(|s| s.try_recv()) {
Some(Ok(())) => {
info!("Shutdown flow's main loop");
@@ -515,7 +522,25 @@ impl FlowWorkerManager {
}
None => (),
}
- tokio::time::sleep(std::time::Duration::from_secs(1)).await;
+
+ // for now we want to batch rows until there is around `BATCH_SIZE` rows in send buf
+ // before trigger a run of flow's worker
+ // (plus one for prevent div by zero)
+ let wait_for = since_last_run.elapsed();
+
+ let cur_spd = row_cnt * 1000 / wait_for.as_millis().max(1) as usize;
+ // rapid increase, slow decay
+ avg_spd = if cur_spd > avg_spd {
+ cur_spd
+ } else {
+ (9 * avg_spd + cur_spd) / 10
+ };
+ debug!("avg_spd={} r/s, cur_spd={} r/s", avg_spd, cur_spd);
+ let new_wait = BATCH_SIZE * 1000 / avg_spd.max(1); //in ms
+ let new_wait = Duration::from_millis(new_wait as u64).min(default_interval);
+ debug!("Wait for {} ms, row_cnt={}", new_wait.as_millis(), row_cnt);
+ since_last_run = tokio::time::Instant::now();
+ tokio::time::sleep(new_wait).await;
}
// flow is now shutdown, drop frontend_invoker early so a ref cycle(in standalone mode) can be prevent:
// FlowWorkerManager.frontend_invoker -> FrontendInvoker.inserter
@@ -528,8 +553,10 @@ impl FlowWorkerManager {
///
/// set `blocking` to true to wait until lock is acquired
/// and false to return immediately if lock is not acquired
+ /// return numbers of rows send to worker
/// TODO(discord9): add flag for subgraph that have input since last run
- pub async fn run_available(&self, blocking: bool) -> Result<(), Error> {
+ pub async fn run_available(&self, blocking: bool) -> Result<usize, Error> {
+ let mut row_cnt = 0;
loop {
let now = self.tick_manager.tick();
for worker in self.worker_handles.iter() {
@@ -539,35 +566,33 @@ impl FlowWorkerManager {
} else if let Ok(worker) = worker.try_lock() {
worker.run_available(now).await?;
} else {
- return Ok(());
+ return Ok(row_cnt);
}
}
- // first check how many inputs were sent
+ // check row send and rows remain in send buf
let (flush_res, buf_len) = if blocking {
let ctx = self.node_context.read().await;
(ctx.flush_all_sender().await, ctx.get_send_buf_size().await)
} else {
match self.node_context.try_read() {
Ok(ctx) => (ctx.flush_all_sender().await, ctx.get_send_buf_size().await),
- Err(_) => return Ok(()),
+ Err(_) => return Ok(row_cnt),
}
};
match flush_res {
- Ok(_) => (),
+ Ok(r) => row_cnt += r,
Err(err) => {
common_telemetry::error!("Flush send buf errors: {:?}", err);
break;
}
};
- // if no thing in send buf then break
- if buf_len == 0 {
+ // if not enough rows, break
+ if buf_len < BATCH_SIZE {
break;
- } else {
- debug!("Send buf len = {}", buf_len);
}
}
- Ok(())
+ Ok(row_cnt)
}
/// send write request to related source sender
@@ -583,8 +608,6 @@ impl FlowWorkerManager {
);
let table_id = region_id.table_id();
self.node_context.read().await.send(table_id, rows).await?;
- // TODO(discord9): put it in a background task?
- // self.run_available(false).await?;
Ok(())
}
}
diff --git a/src/flow/src/adapter/node_context.rs b/src/flow/src/adapter/node_context.rs
index e8defc7652a6..812faa41d92c 100644
--- a/src/flow/src/adapter/node_context.rs
+++ b/src/flow/src/adapter/node_context.rs
@@ -14,7 +14,7 @@
//! Node context, prone to change with every incoming requests
-use std::collections::{BTreeMap, BTreeSet, HashMap, VecDeque};
+use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::sync::Arc;
use common_telemetry::debug;
@@ -65,54 +65,64 @@ pub struct FlownodeContext {
/// backpressure and adjust dataflow running duration to avoid blocking
#[derive(Debug)]
pub struct SourceSender {
+ // TODO(discord9): make it all Vec<DiffRow>?
sender: broadcast::Sender<DiffRow>,
- send_buf: RwLock<VecDeque<DiffRow>>,
+ send_buf_tx: mpsc::UnboundedSender<Vec<DiffRow>>,
+ send_buf_rx: RwLock<mpsc::UnboundedReceiver<Vec<DiffRow>>>,
}
impl Default for SourceSender {
fn default() -> Self {
+ let (send_buf_tx, send_buf_rx) = mpsc::unbounded_channel();
Self {
// TODO(discord9): found a better way then increase this to prevent lagging and hence missing input data
sender: broadcast::Sender::new(BROADCAST_CAP * 2),
- send_buf: Default::default(),
+ send_buf_tx,
+ send_buf_rx: RwLock::new(send_buf_rx),
}
}
}
impl SourceSender {
+ /// max number of iterations to try flush send buf
+ const MAX_ITERATIONS: usize = 16;
pub fn get_receiver(&self) -> broadcast::Receiver<DiffRow> {
self.sender.subscribe()
}
/// send as many as possible rows from send buf
/// until send buf is empty or broadchannel is full
- pub async fn try_send_all(&self) -> Result<usize, Error> {
+ pub async fn try_flush(&self) -> Result<usize, Error> {
let mut row_cnt = 0;
- loop {
- let mut send_buf = self.send_buf.write().await;
+ let mut iterations = 0;
+ while iterations < Self::MAX_ITERATIONS {
+ let mut send_buf = self.send_buf_rx.write().await;
// if inner sender channel is empty or send buf is empty, there
// is nothing to do for now, just break
if self.sender.len() >= BROADCAST_CAP || send_buf.is_empty() {
break;
}
- if let Some(row) = send_buf.pop_front() {
- self.sender
- .send(row)
- .map_err(|err| {
- InternalSnafu {
- reason: format!("Failed to send row, error = {:?}", err),
- }
- .build()
- })
- .with_context(|_| EvalSnafu)?;
- row_cnt += 1;
+ if let Some(rows) = send_buf.recv().await {
+ for row in rows {
+ self.sender
+ .send(row)
+ .map_err(|err| {
+ InternalSnafu {
+ reason: format!("Failed to send row, error = {:?}", err),
+ }
+ .build()
+ })
+ .with_context(|_| EvalSnafu)?;
+ row_cnt += 1;
+ }
}
+ iterations += 1;
}
if row_cnt > 0 {
debug!("Send {} rows", row_cnt);
debug!(
"Remaining Send buf.len() = {}",
- self.send_buf.read().await.len()
+ self.send_buf_rx.read().await.len()
);
}
@@ -121,11 +131,14 @@ impl SourceSender {
/// return number of rows it actual send(including what's in the buffer)
pub async fn send_rows(&self, rows: Vec<DiffRow>) -> Result<usize, Error> {
- self.send_buf.write().await.extend(rows);
-
- let row_cnt = self.try_send_all().await?;
+ self.send_buf_tx.send(rows).map_err(|e| {
+ crate::error::InternalSnafu {
+ reason: format!("Failed to send row, error = {:?}", e),
+ }
+ .build()
+ })?;
- Ok(row_cnt)
+ Ok(0)
}
}
@@ -150,7 +163,7 @@ impl FlownodeContext {
pub async fn flush_all_sender(&self) -> Result<usize, Error> {
let mut sum = 0;
for sender in self.source_sender.values() {
- sender.try_send_all().await.inspect(|x| sum += x)?;
+ sender.try_flush().await.inspect(|x| sum += x)?;
}
Ok(sum)
}
@@ -159,7 +172,7 @@ impl FlownodeContext {
pub async fn get_send_buf_size(&self) -> usize {
let mut sum = 0;
for sender in self.source_sender.values() {
- sum += sender.send_buf.read().await.len();
+ sum += sender.send_buf_rx.read().await.len();
}
sum
}
diff --git a/src/flow/src/compute/render/map.rs b/src/flow/src/compute/render/map.rs
index d2278dc3b358..272be4acc684 100644
--- a/src/flow/src/compute/render/map.rs
+++ b/src/flow/src/compute/render/map.rs
@@ -146,6 +146,16 @@ fn mfp_subgraph(
// find all updates that need to be send from arrangement
let output_kv = arrange.read().get_updates_in_range(range);
+ err_collector.run(|| {
+ snafu::ensure!(
+ mfp_plan.is_temporal() || output_kv.is_empty(),
+ crate::expr::error::InternalSnafu {
+ reason: "Output from future should be empty since temporal filter is not applied"
+ }
+ );
+ Ok(())
+ });
+
// the output is expected to be key -> empty val
let output = output_kv
.into_iter()
diff --git a/src/flow/src/compute/render/reduce.rs b/src/flow/src/compute/render/reduce.rs
index d44c290d9474..5d5761656c84 100644
--- a/src/flow/src/compute/render/reduce.rs
+++ b/src/flow/src/compute/render/reduce.rs
@@ -187,6 +187,39 @@ fn split_row_to_key_val(
}
}
+/// split a row into key and val by evaluate the key and val plan
+fn batch_split_rows_to_key_val(
+ rows: impl IntoIterator<Item = DiffRow>,
+ key_val_plan: KeyValPlan,
+ err_collector: ErrCollector,
+) -> impl IntoIterator<Item = KeyValDiffRow> {
+ let mut row_buf = Row::new(vec![]);
+ rows.into_iter().filter_map(
+ move |(mut row, sys_time, diff): DiffRow| -> Option<KeyValDiffRow> {
+ err_collector.run(|| {
+ let len = row.len();
+ if let Some(key) = key_val_plan
+ .key_plan
+ .evaluate_into(&mut row.inner, &mut row_buf)?
+ {
+ // reuse the row as buffer
+ row.inner.resize(len, Value::Null);
+ // val_plan is not supported to carry any filter predicate,
+ let val = key_val_plan
+ .val_plan
+ .evaluate_into(&mut row.inner, &mut row_buf)?
+ .context(InternalSnafu {
+ reason: "val_plan should not contain any filter predicate",
+ })?;
+ Ok(Some(((key, val), sys_time, diff)))
+ } else {
+ Ok(None)
+ }
+ })?
+ },
+ )
+}
+
/// reduce subgraph, reduce the input data into a single row
/// output is concat from key and val
fn reduce_subgraph(
@@ -204,13 +237,7 @@ fn reduce_subgraph(
send,
}: SubgraphArg,
) {
- let mut row_buf = Row::empty();
- let key_val = data.into_iter().filter_map(|(row, sys_time, diff)| {
- // error is collected and then the row is skipped
- err_collector
- .run(|| split_row_to_key_val(row, sys_time, diff, key_val_plan, &mut row_buf))
- .flatten()
- });
+ let key_val = batch_split_rows_to_key_val(data, key_val_plan.clone(), err_collector.clone());
// from here for distinct reduce and accum reduce, things are drastically different
// for distinct reduce the arrange store the output,
// but for accum reduce the arrange store the accum state, and output is
diff --git a/src/flow/src/compute/render/src_sink.rs b/src/flow/src/compute/render/src_sink.rs
index fd757852ca70..8ee6efb1ee25 100644
--- a/src/flow/src/compute/render/src_sink.rs
+++ b/src/flow/src/compute/render/src_sink.rs
@@ -96,12 +96,8 @@ impl<'referred, 'df> Context<'referred, 'df> {
}
}
let all = prev_avail.chain(to_send).collect_vec();
- if !all.is_empty() || !to_arrange.is_empty() {
- debug!(
- "Rendered Source All send: {} rows, not yet send: {} rows",
- all.len(),
- to_arrange.len()
- );
+ if !to_arrange.is_empty() {
+ debug!("Source Operator buffered {} rows", to_arrange.len());
}
err_collector.run(|| arranged.apply_updates(now, to_arrange));
send.give(all);
diff --git a/src/flow/src/expr/linear.rs b/src/flow/src/expr/linear.rs
index 5eaf3ebd3547..0a2ea7a14152 100644
--- a/src/flow/src/expr/linear.rs
+++ b/src/flow/src/expr/linear.rs
@@ -587,6 +587,10 @@ pub struct MfpPlan {
}
impl MfpPlan {
+ /// Indicates if the `MfpPlan` contains temporal predicates. That is have outputs that may occur in future.
+ pub fn is_temporal(&self) -> bool {
+ !self.lower_bounds.is_empty() || !self.upper_bounds.is_empty()
+ }
/// find `now` in `predicates` and put them into lower/upper temporal bounds for temporal filter to use
pub fn create_from(mut mfp: MapFilterProject) -> Result<Self, Error> {
let mut lower_bounds = Vec::new();
diff --git a/src/flow/src/repr.rs b/src/flow/src/repr.rs
index e28689be4008..06571146f606 100644
--- a/src/flow/src/repr.rs
+++ b/src/flow/src/repr.rs
@@ -56,6 +56,8 @@ pub type KeyValDiffRow = ((Row, Row), Timestamp, Diff);
/// TODO(discord9): add config for this, so cpu&mem usage can be balanced and configured by this
pub const BROADCAST_CAP: usize = 65535;
+pub const BATCH_SIZE: usize = BROADCAST_CAP / 2;
+
/// Convert a value that is or can be converted to Datetime to internal timestamp
///
/// support types are: `Date`, `DateTime`, `TimeStamp`, `i64`
diff --git a/src/flow/src/transform/expr.rs b/src/flow/src/transform/expr.rs
index b2784b08bcc1..0eb1460c493f 100644
--- a/src/flow/src/transform/expr.rs
+++ b/src/flow/src/transform/expr.rs
@@ -16,6 +16,7 @@
use std::sync::Arc;
+use common_error::ext::BoxedError;
use common_telemetry::debug;
use datafusion_physical_expr::PhysicalExpr;
use datatypes::data_type::ConcreteDataType as CDT;
@@ -27,20 +28,23 @@ use substrait_proto::proto::function_argument::ArgType;
use substrait_proto::proto::Expression;
use crate::error::{
- DatafusionSnafu, DatatypesSnafu, Error, EvalSnafu, InvalidQuerySnafu, NotImplementedSnafu,
- PlanSnafu,
+ DatafusionSnafu, DatatypesSnafu, Error, EvalSnafu, ExternalSnafu, InvalidQuerySnafu,
+ NotImplementedSnafu, PlanSnafu, UnexpectedSnafu,
};
use crate::expr::{
BinaryFunc, DfScalarFunction, RawDfScalarFn, ScalarExpr, TypedExpr, UnaryFunc,
UnmaterializableFunc, VariadicFunc,
};
use crate::repr::{ColumnType, RelationDesc, RelationType};
-use crate::transform::literal::{from_substrait_literal, from_substrait_type};
+use crate::transform::literal::{
+ from_substrait_literal, from_substrait_type, to_substrait_literal,
+};
use crate::transform::{substrait_proto, FunctionExtensions};
-// TODO(discord9): found proper place for this
+
+// TODO(discord9): refactor plan to substrait convert of `arrow_cast` function thus remove this function
/// ref to `arrow_schema::datatype` for type name
-fn typename_to_cdt(name: &str) -> CDT {
- match name {
+fn typename_to_cdt(name: &str) -> Result<CDT, Error> {
+ let ret = match name {
"Int8" => CDT::int8_datatype(),
"Int16" => CDT::int16_datatype(),
"Int32" => CDT::int32_datatype(),
@@ -53,10 +57,22 @@ fn typename_to_cdt(name: &str) -> CDT {
"Float64" => CDT::float64_datatype(),
"Boolean" => CDT::boolean_datatype(),
"String" => CDT::string_datatype(),
- "Date" => CDT::date_datatype(),
+ "Date" | "Date32" | "Date64" => CDT::date_datatype(),
"Timestamp" => CDT::timestamp_second_datatype(),
- _ => CDT::null_datatype(),
- }
+ "Timestamp(Second, None)" => CDT::timestamp_second_datatype(),
+ "Timestamp(Millisecond, None)" => CDT::timestamp_millisecond_datatype(),
+ "Timestamp(Microsecond, None)" => CDT::timestamp_microsecond_datatype(),
+ "Timestamp(Nanosecond, None)" => CDT::timestamp_nanosecond_datatype(),
+ "Time32(Second)" | "Time64(Second)" => CDT::time_second_datatype(),
+ "Time32(Millisecond)" | "Time64(Millisecond)" => CDT::time_millisecond_datatype(),
+ "Time32(Microsecond)" | "Time64(Microsecond)" => CDT::time_microsecond_datatype(),
+ "Time32(Nanosecond)" | "Time64(Nanosecond)" => CDT::time_nanosecond_datatype(),
+ _ => NotImplementedSnafu {
+ reason: format!("Unrecognized typename: {}", name),
+ }
+ .fail()?,
+ };
+ Ok(ret)
}
/// Convert [`ScalarFunction`] to corresponding Datafusion's [`PhysicalExpr`]
@@ -138,29 +154,72 @@ fn is_proto_literal(arg: &substrait_proto::proto::FunctionArgument) -> bool {
)
}
+fn build_proto_lit(
+ lit: substrait_proto::proto::expression::Literal,
+) -> substrait_proto::proto::FunctionArgument {
+ use substrait_proto::proto;
+ proto::FunctionArgument {
+ arg_type: Some(ArgType::Value(Expression {
+ rex_type: Some(proto::expression::RexType::Literal(lit)),
+ })),
+ }
+}
+
/// rewrite ScalarFunction's arguments to Columns 0..n so nested exprs are still handled by us instead of datafusion
///
/// specially, if a argument is a literal, the replacement will not happen
-fn rewrite_scalar_function(f: &ScalarFunction) -> ScalarFunction {
+fn rewrite_scalar_function(
+ f: &ScalarFunction,
+ arg_typed_exprs: &[TypedExpr],
+) -> Result<ScalarFunction, Error> {
let mut f_rewrite = f.clone();
for (idx, raw_expr) in f_rewrite.arguments.iter_mut().enumerate() {
- if !is_proto_literal(raw_expr) {
- *raw_expr = proto_col(idx)
+ // only replace it with col(idx) if it is not literal
+ // will try best to determine if it is literal, i.e. for function like `cast(<literal>)` will try
+ // in both world to understand if it results in a literal
+ match (
+ is_proto_literal(raw_expr),
+ arg_typed_exprs[idx].expr.is_literal(),
+ ) {
+ (false, false) => *raw_expr = proto_col(idx),
+ (true, _) => (),
+ (false, true) => {
+ if let ScalarExpr::Literal(val, ty) = &arg_typed_exprs[idx].expr {
+ let df_val = val
+ .try_to_scalar_value(ty)
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ let lit_sub = to_substrait_literal(&df_val)?;
+ // put const-folded literal back to df to simplify stuff
+ *raw_expr = build_proto_lit(lit_sub);
+ } else {
+ UnexpectedSnafu {
+ reason: format!(
+ "Expect value to be literal, but found {:?}",
+ arg_typed_exprs[idx].expr
+ ),
+ }
+ .fail()?
+ }
+ }
}
}
- f_rewrite
+ Ok(f_rewrite)
}
impl TypedExpr {
pub async fn from_substrait_to_datafusion_scalar_func(
f: &ScalarFunction,
- arg_exprs_typed: Vec<TypedExpr>,
+ arg_typed_exprs: Vec<TypedExpr>,
extensions: &FunctionExtensions,
) -> Result<TypedExpr, Error> {
- let (arg_exprs, arg_types): (Vec<_>, Vec<_>) =
- arg_exprs_typed.into_iter().map(|e| (e.expr, e.typ)).unzip();
+ let (arg_exprs, arg_types): (Vec<_>, Vec<_>) = arg_typed_exprs
+ .clone()
+ .into_iter()
+ .map(|e| (e.expr, e.typ))
+ .unzip();
debug!("Before rewrite: {:?}", f);
- let f_rewrite = rewrite_scalar_function(f);
+ let f_rewrite = rewrite_scalar_function(f, &arg_typed_exprs)?;
debug!("After rewrite: {:?}", f_rewrite);
let input_schema = RelationType::new(arg_types).into_unnamed();
let raw_fn =
@@ -240,12 +299,21 @@ impl TypedExpr {
.with_context(|| InvalidQuerySnafu {
reason: "array_cast's second argument must be a literal string",
})?;
- let cast_to = typename_to_cdt(&cast_to);
- let func = UnaryFunc::Cast(cast_to);
+ let cast_to = typename_to_cdt(&cast_to)?;
+ let func = UnaryFunc::Cast(cast_to.clone());
let arg = arg_exprs[0].clone();
- let ret_type = ColumnType::new_nullable(func.signature().output.clone());
+ // constant folding here since some datafusion function require it for constant arg(i.e. `DATE_BIN`)
+ if arg.is_literal() {
+ let res = func.eval(&[], &arg).context(EvalSnafu)?;
+ Ok(TypedExpr::new(
+ ScalarExpr::Literal(res, cast_to.clone()),
+ ColumnType::new_nullable(cast_to),
+ ))
+ } else {
+ let ret_type = ColumnType::new_nullable(func.signature().output.clone());
- Ok(TypedExpr::new(arg.call_unary(func), ret_type))
+ Ok(TypedExpr::new(arg.call_unary(func), ret_type))
+ }
}
2 if BinaryFunc::is_valid_func_name(fn_name) => {
let (func, signature) =
@@ -602,28 +670,9 @@ mod test {
let expected = TypedPlan {
schema: RelationType::new(vec![ColumnType::new(CDT::int16_datatype(), true)])
.into_unnamed(),
- plan: Plan::Mfp {
- input: Box::new(
- Plan::Get {
- id: crate::expr::Id::Global(GlobalId::User(0)),
- }
- .with_types(
- RelationType::new(vec![ColumnType::new(
- ConcreteDataType::uint32_datatype(),
- false,
- )])
- .into_named(vec![Some("number".to_string())]),
- ),
- ),
- mfp: MapFilterProject::new(1)
- .map(vec![ScalarExpr::Literal(
- Value::Int64(1),
- CDT::int64_datatype(),
- )
- .call_unary(UnaryFunc::Cast(CDT::int16_datatype()))])
- .unwrap()
- .project(vec![1])
- .unwrap(),
+ plan: Plan::Constant {
+ // cast of literal is constant folded
+ rows: vec![(repr::Row::new(vec![Value::from(1i16)]), i64::MIN, 1)],
},
};
assert_eq!(flow_plan.unwrap(), expected);
diff --git a/src/flow/src/transform/literal.rs b/src/flow/src/transform/literal.rs
index bd0f041dd825..255ceadb54ca 100644
--- a/src/flow/src/transform/literal.rs
+++ b/src/flow/src/transform/literal.rs
@@ -12,23 +12,93 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::array::TryFromSliceError;
+
+use bytes::Bytes;
use common_decimal::Decimal128;
use common_time::{Date, Timestamp};
+use datafusion_common::ScalarValue;
use datatypes::data_type::ConcreteDataType as CDT;
use datatypes::value::Value;
+use num_traits::FromBytes;
+use snafu::ensure;
+use substrait::substrait_proto_df::proto::expression::literal::user_defined::Val;
+use substrait::substrait_proto_df::proto::expression::literal::UserDefined;
use substrait::variation_const::{
DATE_32_TYPE_VARIATION_REF, DATE_64_TYPE_VARIATION_REF, DEFAULT_TYPE_VARIATION_REF,
+ INTERVAL_DAY_TIME_TYPE_REF, INTERVAL_DAY_TIME_TYPE_URL, INTERVAL_MONTH_DAY_NANO_TYPE_REF,
+ INTERVAL_MONTH_DAY_NANO_TYPE_URL, INTERVAL_YEAR_MONTH_TYPE_REF, INTERVAL_YEAR_MONTH_TYPE_URL,
TIMESTAMP_MICRO_TYPE_VARIATION_REF, TIMESTAMP_MILLI_TYPE_VARIATION_REF,
TIMESTAMP_NANO_TYPE_VARIATION_REF, TIMESTAMP_SECOND_TYPE_VARIATION_REF,
UNSIGNED_INTEGER_TYPE_VARIATION_REF,
};
use substrait_proto::proto::expression::literal::LiteralType;
use substrait_proto::proto::expression::Literal;
-use substrait_proto::proto::r#type::Kind;
+use substrait_proto::proto::r#type::{self, parameter, Kind, Parameter};
+use substrait_proto::proto::Type;
-use crate::error::{Error, NotImplementedSnafu, PlanSnafu};
+use crate::error::{Error, NotImplementedSnafu, PlanSnafu, UnexpectedSnafu};
use crate::transform::substrait_proto;
+/// TODO(discord9): this is copy from datafusion-substrait since the original function is not public, will be replace once is exported
+pub(crate) fn to_substrait_literal(value: &ScalarValue) -> Result<Literal, Error> {
+ if value.is_null() {
+ return not_impl_err!("Unsupported literal: {value:?}");
+ }
+ let (literal_type, type_variation_reference) = match value {
+ ScalarValue::Boolean(Some(b)) => (LiteralType::Boolean(*b), DEFAULT_TYPE_VARIATION_REF),
+ ScalarValue::Int8(Some(n)) => (LiteralType::I8(*n as i32), DEFAULT_TYPE_VARIATION_REF),
+ ScalarValue::UInt8(Some(n)) => (
+ LiteralType::I8(*n as i32),
+ UNSIGNED_INTEGER_TYPE_VARIATION_REF,
+ ),
+ ScalarValue::Int16(Some(n)) => (LiteralType::I16(*n as i32), DEFAULT_TYPE_VARIATION_REF),
+ ScalarValue::UInt16(Some(n)) => (
+ LiteralType::I16(*n as i32),
+ UNSIGNED_INTEGER_TYPE_VARIATION_REF,
+ ),
+ ScalarValue::Int32(Some(n)) => (LiteralType::I32(*n), DEFAULT_TYPE_VARIATION_REF),
+ ScalarValue::UInt32(Some(n)) => (
+ LiteralType::I32(*n as i32),
+ UNSIGNED_INTEGER_TYPE_VARIATION_REF,
+ ),
+ ScalarValue::Int64(Some(n)) => (LiteralType::I64(*n), DEFAULT_TYPE_VARIATION_REF),
+ ScalarValue::UInt64(Some(n)) => (
+ LiteralType::I64(*n as i64),
+ UNSIGNED_INTEGER_TYPE_VARIATION_REF,
+ ),
+ ScalarValue::Float32(Some(f)) => (LiteralType::Fp32(*f), DEFAULT_TYPE_VARIATION_REF),
+ ScalarValue::Float64(Some(f)) => (LiteralType::Fp64(*f), DEFAULT_TYPE_VARIATION_REF),
+ ScalarValue::TimestampSecond(Some(t), _) => (
+ LiteralType::Timestamp(*t),
+ TIMESTAMP_SECOND_TYPE_VARIATION_REF,
+ ),
+ ScalarValue::TimestampMillisecond(Some(t), _) => (
+ LiteralType::Timestamp(*t),
+ TIMESTAMP_MILLI_TYPE_VARIATION_REF,
+ ),
+ ScalarValue::TimestampMicrosecond(Some(t), _) => (
+ LiteralType::Timestamp(*t),
+ TIMESTAMP_MICRO_TYPE_VARIATION_REF,
+ ),
+ ScalarValue::TimestampNanosecond(Some(t), _) => (
+ LiteralType::Timestamp(*t),
+ TIMESTAMP_NANO_TYPE_VARIATION_REF,
+ ),
+ ScalarValue::Date32(Some(d)) => (LiteralType::Date(*d), DATE_32_TYPE_VARIATION_REF),
+ _ => (
+ not_impl_err!("Unsupported literal: {value:?}")?,
+ DEFAULT_TYPE_VARIATION_REF,
+ ),
+ };
+
+ Ok(Literal {
+ nullable: false,
+ type_variation_reference,
+ literal_type: Some(literal_type),
+ })
+}
+
/// Convert a Substrait literal into a Value and its ConcreteDataType (So that we can know type even if the value is null)
pub(crate) fn from_substrait_literal(lit: &Literal) -> Result<(Value, CDT), Error> {
let scalar_value = match &lit.literal_type {
@@ -105,11 +175,122 @@ pub(crate) fn from_substrait_literal(lit: &Literal) -> Result<(Value, CDT), Erro
)
}
Some(LiteralType::Null(ntype)) => (Value::Null, from_substrait_type(ntype)?),
- _ => not_impl_err!("unsupported literal_type")?,
+ Some(LiteralType::IntervalDayToSecond(interval)) => {
+ let (days, seconds, microseconds) =
+ (interval.days, interval.seconds, interval.microseconds);
+ let millis = microseconds / 1000 + seconds * 1000;
+ let value_interval = common_time::Interval::from_day_time(days, millis);
+ (
+ Value::Interval(value_interval),
+ CDT::interval_day_time_datatype(),
+ )
+ }
+ Some(LiteralType::IntervalYearToMonth(interval)) => (
+ Value::Interval(common_time::Interval::from_year_month(
+ interval.years * 12 + interval.months,
+ )),
+ CDT::interval_year_month_datatype(),
+ ),
+ Some(LiteralType::UserDefined(user_defined)) => {
+ from_substrait_user_defined_type(user_defined)?
+ }
+ _ => not_impl_err!("unsupported literal_type: {:?}", &lit.literal_type)?,
};
Ok(scalar_value)
}
+fn from_bytes<T: FromBytes>(i: &Bytes) -> Result<T, Error>
+where
+ for<'a> &'a <T as num_traits::FromBytes>::Bytes:
+ std::convert::TryFrom<&'a [u8], Error = TryFromSliceError>,
+{
+ let (int_bytes, _rest) = i.split_at(std::mem::size_of::<T>());
+ let i = T::from_le_bytes(int_bytes.try_into().map_err(|e| {
+ UnexpectedSnafu {
+ reason: format!(
+ "Expect slice to be {} bytes, found {} bytes, error={:?}",
+ std::mem::size_of::<T>(),
+ int_bytes.len(),
+ e
+ ),
+ }
+ .build()
+ })?);
+ Ok(i)
+}
+
+fn from_substrait_user_defined_type(user_defined: &UserDefined) -> Result<(Value, CDT), Error> {
+ if let UserDefined {
+ type_reference,
+ type_parameters: _,
+ val: Some(Val::Value(val)),
+ } = user_defined
+ {
+ // see https://github.com/apache/datafusion/blob/146b679aa19c7749cc73d0c27440419d6498142b/datafusion/substrait/src/logical_plan/producer.rs#L1957
+ // for interval type's transform to substrait
+ let ret = match *type_reference {
+ INTERVAL_YEAR_MONTH_TYPE_REF => {
+ ensure!(
+ val.type_url == INTERVAL_YEAR_MONTH_TYPE_URL,
+ UnexpectedSnafu {
+ reason: format!(
+ "Expect {}, found {} in type_url",
+ INTERVAL_YEAR_MONTH_TYPE_URL, val.type_url
+ )
+ }
+ );
+ let i: i32 = from_bytes(&val.value)?;
+ let value_interval = common_time::Interval::from_year_month(i);
+ (
+ Value::Interval(value_interval),
+ CDT::interval_year_month_datatype(),
+ )
+ }
+ INTERVAL_MONTH_DAY_NANO_TYPE_REF => {
+ ensure!(
+ val.type_url == INTERVAL_MONTH_DAY_NANO_TYPE_URL,
+ UnexpectedSnafu {
+ reason: format!(
+ "Expect {}, found {} in type_url",
+ INTERVAL_MONTH_DAY_NANO_TYPE_URL, val.type_url
+ )
+ }
+ );
+ let i: i128 = from_bytes(&val.value)?;
+ let (months, days, nsecs) = ((i >> 96) as i32, (i >> 64) as i32, i as i64);
+ let value_interval =
+ common_time::Interval::from_month_day_nano(months, days, nsecs);
+ (
+ Value::Interval(value_interval),
+ CDT::interval_month_day_nano_datatype(),
+ )
+ }
+ INTERVAL_DAY_TIME_TYPE_REF => {
+ ensure!(
+ val.type_url == INTERVAL_DAY_TIME_TYPE_URL,
+ UnexpectedSnafu {
+ reason: format!(
+ "Expect {}, found {} in type_url",
+ INTERVAL_DAY_TIME_TYPE_URL, val.type_url
+ )
+ }
+ );
+ let i: i64 = from_bytes(&val.value)?;
+ let (days, millis) = ((i >> 32) as i32, i as i32);
+ let value_interval = common_time::Interval::from_day_time(days, millis);
+ (
+ Value::Interval(value_interval),
+ CDT::interval_day_time_datatype(),
+ )
+ }
+ _ => return not_impl_err!("unsupported user defined type: {:?}", user_defined)?,
+ };
+ Ok(ret)
+ } else {
+ not_impl_err!("Expect val to be Some(...)")
+ }
+}
+
/// convert a Substrait type into a ConcreteDataType
pub fn from_substrait_type(null_type: &substrait_proto::proto::Type) -> Result<CDT, Error> {
if let Some(kind) = &null_type.kind {
diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs
index 1ea03f2f1ec5..38e79de6c993 100644
--- a/src/operator/src/insert.rs
+++ b/src/operator/src/insert.rs
@@ -358,7 +358,6 @@ impl Inserter {
// already know this is not source table
Some(None) => continue,
_ => {
- // TODO(discord9): query metasrv for actual peer address
let peers = self
.table_flownode_set_cache
.get(table_id)
diff --git a/tests/cases/standalone/common/flow/basic.result b/tests/cases/standalone/common/flow/basic.result
index 1d480e2f2277..9b8273655eea 100644
--- a/tests/cases/standalone/common/flow/basic.result
+++ b/tests/cases/standalone/common/flow/basic.result
@@ -59,6 +59,7 @@ DROP TABLE out_num_cnt;
Affected Rows: 0
+-- test interprete interval
CREATE TABLE numbers_input (
number INT,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
@@ -68,55 +69,35 @@ CREATE TABLE numbers_input (
Affected Rows: 0
-CREATE FLOW test_numbers
-SINK TO out_num_cnt
-AS
-SELECT date_trunc('second', ts), sum(number) FROM numbers_input GROUP BY date_trunc('second', ts);
+create table out_num_cnt (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP TIME INDEX);
Affected Rows: 0
-INSERT INTO numbers_input
-VALUES
- (20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
-Affected Rows: 2
-
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
-
-+---------------------+-------+
-| col_0 | col_1 |
-+---------------------+-------+
-| 2021-07-01T00:00:00 | 42 |
-+---------------------+-------+
-
-INSERT INTO numbers_input
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (24,"2021-07-01 00:00:01.500");
+CREATE FLOW filter_numbers SINK TO out_num_cnt AS SELECT INTERVAL '1 day 1 second', INTERVAL '1 month 1 day 1 second', INTERVAL '1 year 1 month' FROM numbers_input where number > 10;
-Affected Rows: 2
+Affected Rows: 0
--- SQLNESS SLEEP 2s
-SELECT col_0, col_1 FROM out_num_cnt;
+SHOW CREATE FLOW filter_numbers;
-+---------------------+-------+
-| col_0 | col_1 |
-+---------------------+-------+
-| 2021-07-01T00:00:00 | 42 |
-| 2021-07-01T00:00:01 | 47 |
-+---------------------+-------+
++----------------+----------------------------------------------------------------------------------------------------------------------------------------+
+| Flow | Create Flow |
++----------------+----------------------------------------------------------------------------------------------------------------------------------------+
+| filter_numbers | CREATE OR REPLACE FLOW IF NOT EXISTS filter_numbers |
+| | SINK TO out_num_cnt |
+| | AS SELECT INTERVAL '1 day 1 second', INTERVAL '1 month 1 day 1 second', INTERVAL '1 year 1 month' FROM numbers_input WHERE number > 10 |
++----------------+----------------------------------------------------------------------------------------------------------------------------------------+
-DROP FLOW test_numbers;
+drop flow filter_numbers;
Affected Rows: 0
-DROP TABLE numbers_input;
+drop table out_num_cnt;
Affected Rows: 0
-DROP TABLE out_num_cnt;
+drop table numbers_input;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/flow/basic.sql b/tests/cases/standalone/common/flow/basic.sql
index 8c0c5d038ef6..d7beba69ce80 100644
--- a/tests/cases/standalone/common/flow/basic.sql
+++ b/tests/cases/standalone/common/flow/basic.sql
@@ -30,34 +30,24 @@ DROP FLOW test_numbers;
DROP TABLE numbers_input;
DROP TABLE out_num_cnt;
+-- test interprete interval
+
CREATE TABLE numbers_input (
number INT,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY(number),
TIME INDEX(ts)
);
+create table out_num_cnt (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP TIME INDEX);
-CREATE FLOW test_numbers
-SINK TO out_num_cnt
-AS
-SELECT date_trunc('second', ts), sum(number) FROM numbers_input GROUP BY date_trunc('second', ts);
+CREATE FLOW filter_numbers SINK TO out_num_cnt AS SELECT INTERVAL '1 day 1 second', INTERVAL '1 month 1 day 1 second', INTERVAL '1 year 1 month' FROM numbers_input where number > 10;
-INSERT INTO numbers_input
-VALUES
- (20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
+SHOW CREATE FLOW filter_numbers;
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
+drop flow filter_numbers;
-INSERT INTO numbers_input
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (24,"2021-07-01 00:00:01.500");
+drop table out_num_cnt;
--- SQLNESS SLEEP 2s
-SELECT col_0, col_1 FROM out_num_cnt;
-
-DROP FLOW test_numbers;
-DROP TABLE numbers_input;
-DROP TABLE out_num_cnt;
+drop table numbers_input;
diff --git a/tests/cases/standalone/common/flow/df_func.result b/tests/cases/standalone/common/flow/df_func.result
index 7ab393eeb10e..6c08f7854e04 100644
--- a/tests/cases/standalone/common/flow/df_func.result
+++ b/tests/cases/standalone/common/flow/df_func.result
@@ -124,3 +124,127 @@ DROP TABLE out_num_cnt_df_func;
Affected Rows: 0
+-- test date_bin
+CREATE TABLE numbers_input (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY(number),
+ TIME INDEX(ts)
+);
+
+Affected Rows: 0
+
+CREATE FLOW test_numbers
+SINK TO out_num_cnt
+AS
+SELECT max(number) - min(number), date_bin(INTERVAL '1 second', ts, '2021-07-01 00:00:00'::TimestampNanosecond) FROM numbers_input GROUP BY date_bin(INTERVAL '1 second', ts, '2021-07-01 00:00:00'::TimestampNanosecond);
+
+Affected Rows: 0
+
+INSERT INTO numbers_input
+VALUES
+ (20, "2021-07-01 00:00:00.200"),
+ (22, "2021-07-01 00:00:00.600");
+
+Affected Rows: 2
+
+-- SQLNESS SLEEP 3s
+SELECT col_0, col_1 FROM out_num_cnt;
+
++-------+---------------------+
+| col_0 | col_1 |
++-------+---------------------+
+| 2 | 2021-07-01T00:00:00 |
++-------+---------------------+
+
+INSERT INTO numbers_input
+VALUES
+ (23,"2021-07-01 00:00:01.000"),
+ (24,"2021-07-01 00:00:01.500");
+
+Affected Rows: 2
+
+-- SQLNESS SLEEP 2s
+SELECT col_0, col_1 FROM out_num_cnt;
+
++-------+---------------------+
+| col_0 | col_1 |
++-------+---------------------+
+| 2 | 2021-07-01T00:00:00 |
+| 1 | 2021-07-01T00:00:01 |
++-------+---------------------+
+
+DROP FLOW test_numbers;
+
+Affected Rows: 0
+
+DROP TABLE numbers_input;
+
+Affected Rows: 0
+
+DROP TABLE out_num_cnt;
+
+Affected Rows: 0
+
+-- test date_trunc
+CREATE TABLE numbers_input (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY(number),
+ TIME INDEX(ts)
+);
+
+Affected Rows: 0
+
+CREATE FLOW test_numbers
+SINK TO out_num_cnt
+AS
+SELECT date_trunc('second', ts), sum(number) FROM numbers_input GROUP BY date_trunc('second', ts);
+
+Affected Rows: 0
+
+INSERT INTO numbers_input
+VALUES
+ (20, "2021-07-01 00:00:00.200"),
+ (22, "2021-07-01 00:00:00.600");
+
+Affected Rows: 2
+
+-- SQLNESS SLEEP 3s
+SELECT col_0, col_1 FROM out_num_cnt;
+
++---------------------+-------+
+| col_0 | col_1 |
++---------------------+-------+
+| 2021-07-01T00:00:00 | 42 |
++---------------------+-------+
+
+INSERT INTO numbers_input
+VALUES
+ (23,"2021-07-01 00:00:01.000"),
+ (24,"2021-07-01 00:00:01.500");
+
+Affected Rows: 2
+
+-- SQLNESS SLEEP 2s
+SELECT col_0, col_1 FROM out_num_cnt;
+
++---------------------+-------+
+| col_0 | col_1 |
++---------------------+-------+
+| 2021-07-01T00:00:00 | 42 |
+| 2021-07-01T00:00:01 | 47 |
++---------------------+-------+
+
+DROP FLOW test_numbers;
+
+Affected Rows: 0
+
+DROP TABLE numbers_input;
+
+Affected Rows: 0
+
+DROP TABLE out_num_cnt;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/flow/df_func.sql b/tests/cases/standalone/common/flow/df_func.sql
index b9a22cb9da6d..e04d95e477e1 100644
--- a/tests/cases/standalone/common/flow/df_func.sql
+++ b/tests/cases/standalone/common/flow/df_func.sql
@@ -65,3 +65,70 @@ SELECT col_0, window_start, window_end FROM out_num_cnt_df_func;
DROP FLOW test_numbers_df_func;
DROP TABLE numbers_input_df_func;
DROP TABLE out_num_cnt_df_func;
+
+-- test date_bin
+CREATE TABLE numbers_input (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY(number),
+ TIME INDEX(ts)
+);
+
+CREATE FLOW test_numbers
+SINK TO out_num_cnt
+AS
+SELECT max(number) - min(number), date_bin(INTERVAL '1 second', ts, '2021-07-01 00:00:00'::TimestampNanosecond) FROM numbers_input GROUP BY date_bin(INTERVAL '1 second', ts, '2021-07-01 00:00:00'::TimestampNanosecond);
+
+INSERT INTO numbers_input
+VALUES
+ (20, "2021-07-01 00:00:00.200"),
+ (22, "2021-07-01 00:00:00.600");
+
+-- SQLNESS SLEEP 3s
+SELECT col_0, col_1 FROM out_num_cnt;
+
+INSERT INTO numbers_input
+VALUES
+ (23,"2021-07-01 00:00:01.000"),
+ (24,"2021-07-01 00:00:01.500");
+
+-- SQLNESS SLEEP 2s
+SELECT col_0, col_1 FROM out_num_cnt;
+
+DROP FLOW test_numbers;
+DROP TABLE numbers_input;
+DROP TABLE out_num_cnt;
+
+
+-- test date_trunc
+CREATE TABLE numbers_input (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY(number),
+ TIME INDEX(ts)
+);
+
+CREATE FLOW test_numbers
+SINK TO out_num_cnt
+AS
+SELECT date_trunc('second', ts), sum(number) FROM numbers_input GROUP BY date_trunc('second', ts);
+
+INSERT INTO numbers_input
+VALUES
+ (20, "2021-07-01 00:00:00.200"),
+ (22, "2021-07-01 00:00:00.600");
+
+-- SQLNESS SLEEP 3s
+SELECT col_0, col_1 FROM out_num_cnt;
+
+INSERT INTO numbers_input
+VALUES
+ (23,"2021-07-01 00:00:01.000"),
+ (24,"2021-07-01 00:00:01.500");
+
+-- SQLNESS SLEEP 2s
+SELECT col_0, col_1 FROM out_num_cnt;
+
+DROP FLOW test_numbers;
+DROP TABLE numbers_input;
+DROP TABLE out_num_cnt;
|
feat
|
flow perf&fix df func call (#4347)
|
465c8f714ebde1b46aac110303a857f97fbb1e0b
|
2023-10-31 12:06:31
|
Yingwen
|
feat(mito): avoid buffering all batches for the same primary key (#2658)
| false
|
diff --git a/src/mito2/src/read/merge.rs b/src/mito2/src/read/merge.rs
index b86eb8d69876..187449c01694 100644
--- a/src/mito2/src/read/merge.rs
+++ b/src/mito2/src/read/merge.rs
@@ -15,117 +15,264 @@
//! Merge reader implementation.
use std::cmp::Ordering;
-use std::collections::{BinaryHeap, VecDeque};
+use std::collections::BinaryHeap;
use std::mem;
use async_trait::async_trait;
+use common_time::Timestamp;
use crate::error::Result;
use crate::memtable::BoxedBatchIterator;
use crate::read::{Batch, BatchReader, BoxedBatchReader, Source};
+/// Minimum batch size to output.
+const MIN_BATCH_SIZE: usize = 64;
+
/// Reader to merge sorted batches.
///
/// The merge reader merges [Batch]es from multiple sources that yield sorted batches.
/// 1. Batch is ordered by primary key, time index, sequence desc, op type desc (we can
/// ignore op type as sequence is already unique).
/// 2. Batch doesn't have duplicate elements (elements with the same primary key and time index).
+/// 3. Batches from sources **must** not be empty.
pub struct MergeReader {
- /// Holds a min-heap for all [Node]s. Each node yields batches from a `source`.
+ /// Holds [Node]s whose key range of current batch **is** overlapped with the merge window.
+ /// Each node yields batches from a `source`.
+ ///
+ /// [Node] in this heap **must** not be empty. A `merge window` is the (primary key, timestamp)
+ /// range of the **root node** in the `hot` heap.
+ hot: BinaryHeap<Node>,
+ /// Holds `Node` whose key range of current batch **isn't** overlapped with the merge window.
///
- /// `Node` in this heap **must** not be EOF.
- nodes: BinaryHeap<Node>,
- /// Batches for the next primary key.
+ /// `Node` in this heap **must** not be empty.
+ cold: BinaryHeap<Node>,
+ /// Batches to output.
batch_merger: BatchMerger,
- /// Sorted batches to output.
- output: VecDeque<Batch>,
+ /// Suggested size of each batch. The batch returned by the reader can have more rows than the
+ /// batch size.
+ batch_size: usize,
}
#[async_trait]
impl BatchReader for MergeReader {
async fn next_batch(&mut self) -> Result<Option<Batch>> {
- while !self.output.is_empty() || !self.nodes.is_empty() {
- // Takes from sorted output if there are batches in it.
- if let Some(batch) = self.output.pop_front() {
- return Ok(Some(batch));
+ while !self.hot.is_empty() && self.batch_merger.num_rows() < self.batch_size {
+ if let Some(current_key) = self.batch_merger.primary_key() {
+ // If the hottest node has a different key, we have finish collecting current key.
+ // Safety: hot is not empty.
+ if self.hot.peek().unwrap().primary_key() != current_key {
+ break;
+ }
}
- // Collects batches to the merger.
- self.collect_batches_to_merge().await?;
-
- // Merge collected batches to output.
- self.output = self.batch_merger.merge_batches()?;
+ if self.hot.len() == 1 {
+ // No need to do merge sort if only one batch in the hot heap.
+ self.fetch_batch_from_hottest().await?;
+ } else {
+ // We could only fetch rows that less than the next node from the hottest node.
+ self.fetch_rows_from_hottest().await?;
+ }
}
- Ok(None)
+ if self.batch_merger.is_empty() {
+ // Nothing fetched.
+ Ok(None)
+ } else {
+ self.batch_merger.merge_batches()
+ }
}
}
impl MergeReader {
- /// Creates a new [MergeReader].
- pub async fn new(sources: Vec<Source>) -> Result<MergeReader> {
- let mut nodes = BinaryHeap::with_capacity(sources.len());
+ /// Creates and initializes a new [MergeReader].
+ pub async fn new(sources: Vec<Source>, batch_size: usize) -> Result<MergeReader> {
+ let mut cold = BinaryHeap::with_capacity(sources.len());
+ let hot = BinaryHeap::with_capacity(sources.len());
for source in sources {
let node = Node::new(source).await?;
if !node.is_eof() {
- // Ensure `nodes` don't have eof node.
- nodes.push(node);
+ // Ensure `cold` don't have eof nodes.
+ cold.push(node);
}
}
- Ok(MergeReader {
- nodes,
+ let mut reader = MergeReader {
+ hot,
+ cold,
batch_merger: BatchMerger::new(),
- output: VecDeque::new(),
- })
+ batch_size,
+ };
+ // Initializes the reader.
+ reader.refill_hot();
+
+ Ok(reader)
}
- /// Collect batches from sources for the same primary key.
- async fn collect_batches_to_merge(&mut self) -> Result<()> {
- while !self.nodes.is_empty() {
- // Peek current key.
- let Some(current_key) = self.batch_merger.primary_key() else {
- // The merger is empty, we could push it directly.
- self.take_batch_from_heap().await?;
- // Try next node.
- continue;
- };
- // If next node has a different key, we have finish collecting current key.
- // Safety: node is not empty.
- if self.nodes.peek().unwrap().primary_key() != current_key {
+ /// Moves nodes in `cold` heap, whose key range is overlapped with current merge
+ /// window to `hot` heap.
+ fn refill_hot(&mut self) {
+ while !self.cold.is_empty() {
+ if let Some(merge_window) = self.hot.peek() {
+ let warmest = self.cold.peek().unwrap();
+ if warmest.is_behind(merge_window) {
+ // if the warmest node in the `cold` heap is totally after the
+ // `merge_window`, then no need to add more nodes into the `hot`
+ // heap for merge sorting.
+ break;
+ }
+ }
+
+ let warmest = self.cold.pop().unwrap();
+ self.hot.push(warmest);
+ }
+ }
+
+ /// Fetches one batch from the hottest node.
+ async fn fetch_batch_from_hottest(&mut self) -> Result<()> {
+ assert_eq!(1, self.hot.len());
+
+ let mut hottest = self.hot.pop().unwrap();
+ let batch = hottest.fetch_batch().await?;
+ self.batch_merger.push(batch)?;
+ self.reheap(hottest)
+ }
+
+ /// Fetches non-duplicated rows from the hottest node and skips the timestamp duplicated
+ /// with the first timestamp in the next node.
+ async fn fetch_rows_from_hottest(&mut self) -> Result<()> {
+ // Safety: `fetch_batches_to_output()` ensures the hot heap has more than 1 element.
+ // Pop hottest node.
+ let mut top_node = self.hot.pop().unwrap();
+ let top = top_node.current_batch();
+ // Min timestamp and its sequence in the next batch.
+ let next_min_ts = {
+ let next_node = self.hot.peek().unwrap();
+ let next = next_node.current_batch();
+ // top and next have overlapping rows so they must have same primary keys.
+ debug_assert_eq!(top.primary_key(), next.primary_key());
+ // Safety: Batches in the heap is not empty, so we can use unwrap here.
+ next.first_timestamp().unwrap()
+ };
+
+ // Safety: Batches in the heap is not empty, so we can use unwrap here.
+ let timestamps = top.timestamps_native().unwrap();
+ // Binary searches the timestamp in the top batch.
+ // Safety: Batches should have the same timestamp resolution so we can compare the native
+ // value directly.
+ match timestamps.binary_search(&next_min_ts.value()) {
+ Ok(pos) => {
+ // They have duplicate timestamps. Outputs timestamps before the duplciated timestamp.
+ // Batch itself doesn't contain duplicate timestamps so timestamps before `pos`
+ // must be less than `next_min_ts`.
+ self.batch_merger.push(top.slice(0, pos))?;
+ // This keep the duplicate timestamp in the node.
+ top_node.skip_rows(pos).await?;
+ // The merge window should contain this timestamp so only nodes in the hot heap
+ // have this timestamp.
+ self.filter_first_duplicate_timestamp_in_hot(top_node, next_min_ts)
+ .await?;
+ }
+ Err(pos) => {
+ // No duplicate timestamp. Outputs timestamp before `pos`.
+ self.batch_merger.push(top.slice(0, pos))?;
+ top_node.skip_rows(pos).await?;
+ self.reheap(top_node)?;
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Filters the first duplicate `timestamp` in `top_node` and `hot` heap. Only keeps the timestamp
+ /// with the maximum sequence.
+ async fn filter_first_duplicate_timestamp_in_hot(
+ &mut self,
+ top_node: Node,
+ timestamp: Timestamp,
+ ) -> Result<()> {
+ debug_assert_eq!(
+ top_node.current_batch().first_timestamp().unwrap(),
+ timestamp
+ );
+
+ // The node with maximum sequence.
+ let mut max_seq_node = top_node;
+ let mut max_seq = max_seq_node.current_batch().first_sequence().unwrap();
+ while let Some(mut next_node) = self.hot.pop() {
+ // Safety: Batches in the heap is not empty.
+ let next_first_ts = next_node.current_batch().first_timestamp().unwrap();
+ let next_first_seq = next_node.current_batch().first_sequence().unwrap();
+
+ if next_first_ts != timestamp {
+ // We are done, push the node with max seq.
+ self.cold.push(next_node);
break;
}
- // They have the same primary key, we could take it and try next node.
- self.take_batch_from_heap().await?;
+
+ if max_seq < next_first_seq {
+ // The next node has larger seq.
+ max_seq_node.skip_rows(1).await?;
+ if !max_seq_node.is_eof() {
+ self.cold.push(max_seq_node);
+ }
+ max_seq_node = next_node;
+ max_seq = next_first_seq;
+ } else {
+ next_node.skip_rows(1).await?;
+ if !next_node.is_eof() {
+ // If the next node is
+ self.cold.push(next_node);
+ }
+ }
}
+ debug_assert!(!max_seq_node.is_eof());
+ self.cold.push(max_seq_node);
+
+ // The merge window is updated, we need to refill the hot heap.
+ self.refill_hot();
Ok(())
}
- /// Takes batch from heap top and reheap.
- async fn take_batch_from_heap(&mut self) -> Result<()> {
- let mut next_node = self.nodes.pop().unwrap();
- let batch = next_node.fetch_batch().await?;
- self.batch_merger.push(batch);
+ /// Push the node popped from `hot` back to a proper heap.
+ fn reheap(&mut self, node: Node) -> Result<()> {
+ if node.is_eof() {
+ // If the node is EOF, don't put it into the heap again.
+ // The merge window would be updated, need to refill the hot heap.
+ self.refill_hot();
+ } else {
+ // Find a proper heap for this node.
+ let node_is_cold = if let Some(hottest) = self.hot.peek() {
+ // If key range of this node is behind the hottest node's then we can
+ // push it to the cold heap. Otherwise we should push it to the hot heap.
+ node.is_behind(hottest)
+ } else {
+ // The hot heap is empty, but we don't known whether the current
+ // batch of this node is still the hottest.
+ true
+ };
- // Insert the node back to the heap.
- // If the node reaches EOF, ignores it. This ensures nodes in the heap is always not EOF.
- if next_node.is_eof() {
- return Ok(());
+ if node_is_cold {
+ self.cold.push(node);
+ } else {
+ self.hot.push(node);
+ }
+ // Anyway, the merge window has been changed, we need to refill the hot heap.
+ self.refill_hot();
}
- self.nodes.push(next_node);
Ok(())
}
}
/// Builder to build and initialize a [MergeReader].
-#[derive(Default)]
pub struct MergeReaderBuilder {
/// Input sources.
///
/// All source must yield batches with the same schema.
sources: Vec<Source>,
+ /// Batch size of the reader.
+ batch_size: usize,
}
impl MergeReaderBuilder {
@@ -140,25 +287,40 @@ impl MergeReaderBuilder {
self
}
- /// Push a batch iterator to sources.
+ /// Pushes a batch iterator to sources.
pub fn push_batch_iter(&mut self, iter: BoxedBatchIterator) -> &mut Self {
self.sources.push(Source::Iter(iter));
self
}
+ /// Sets the batch size of the reader.
+ pub fn batch_size(&mut self, size: usize) -> &mut Self {
+ self.batch_size = if size == 0 { MIN_BATCH_SIZE } else { size };
+ self
+ }
+
/// Builds and initializes the reader, then resets the builder.
pub async fn build(&mut self) -> Result<MergeReader> {
let sources = mem::take(&mut self.sources);
- MergeReader::new(sources).await
+ MergeReader::new(sources, self.batch_size).await
}
}
-/// Helper to merge batches for same primary key.
+impl Default for MergeReaderBuilder {
+ fn default() -> Self {
+ MergeReaderBuilder {
+ sources: Vec::new(),
+ batch_size: MIN_BATCH_SIZE,
+ }
+ }
+}
+
+/// Helper to collect and merge small batches for same primary key.
struct BatchMerger {
/// Buffered non-empty batches to merge.
batches: Vec<Batch>,
- /// Whether the batch buffer is still sorted.
- is_sorted: bool,
+ /// Number of rows in the batch.
+ num_rows: usize,
}
impl BatchMerger {
@@ -166,196 +328,63 @@ impl BatchMerger {
fn new() -> BatchMerger {
BatchMerger {
batches: Vec::new(),
- is_sorted: true, // An empty merger is always sorted.
+ num_rows: 0,
}
}
+ /// Returns the number of rows.
+ fn num_rows(&self) -> usize {
+ self.num_rows
+ }
+
+ /// Returns true if the merger is empty.
+ fn is_empty(&self) -> bool {
+ self.num_rows() == 0
+ }
+
/// Returns the primary key of current merger and `None` if the merger is empty.
fn primary_key(&self) -> Option<&[u8]> {
self.batches.first().map(|batch| batch.primary_key())
}
- /// Push a `batch` into the merger.
+ /// Removeds deleted entries and pushes a `batch` into the merger.
///
- /// Ignore the `batch` if it is empty.
+ /// Ignores the `batch` if it is empty.
///
/// # Panics
/// Panics if the `batch` has another primary key.
- fn push(&mut self, batch: Batch) {
+ fn push(&mut self, mut batch: Batch) -> Result<()> {
+ debug_assert!(self
+ .batches
+ .last()
+ .map(|b| b.primary_key() == batch.primary_key())
+ .unwrap_or(true));
+
+ batch.filter_deleted()?;
if batch.is_empty() {
- return;
- }
-
- if self.batches.is_empty() || !self.is_sorted {
- // Merger is empty or is not sorted, we can push the batch directly.
- self.batches.push(batch);
- return;
- }
-
- // Merger is sorted, checks whether we can still preserve sorted state.
- let last_batch = self.batches.last().unwrap();
- assert_eq!(last_batch.primary_key(), batch.primary_key());
- match last_batch.last_timestamp().cmp(&batch.first_timestamp()) {
- Ordering::Less => {
- // Still sorted.
- self.batches.push(batch);
- return;
- }
- Ordering::Equal => {
- // Check sequence.
- if last_batch.last_sequence() > batch.first_sequence() {
- // Still sorted.
- self.batches.push(batch);
- return;
- }
- }
- Ordering::Greater => (),
+ return Ok(());
}
- // Merger is no longer sorted.
+ self.num_rows += batch.num_rows();
self.batches.push(batch);
- self.is_sorted = false;
+
+ Ok(())
}
/// Merge all buffered batches and returns the merged batch. Then
/// reset the buffer.
- fn merge_batches(&mut self) -> Result<VecDeque<Batch>> {
+ fn merge_batches(&mut self) -> Result<Option<Batch>> {
if self.batches.is_empty() {
- return Ok(VecDeque::new());
+ return Ok(None);
}
- let mut output = VecDeque::with_capacity(self.batches.len());
- if self.is_sorted {
- // Fast path. We can output batches directly.
- for batch in self.batches.drain(..) {
- output_batch(&mut output, batch)?;
- }
-
- return Ok(output);
- }
-
- // Slow path. We need to merge overlapping batches.
- // Constructs a heap from batches. Batches in the heap is not empty, we need to check
- // this before pushing a batch into the heap.
- let mut heap = BinaryHeap::from_iter(self.batches.drain(..).map(CompareTimeSeq));
- // Reset merger as sorted as we have cleared batches.
- self.is_sorted = true;
-
- // Sorts batches.
- while let Some(top) = heap.pop() {
- let top = top.0;
- let Some(next) = heap.peek() else {
- // If there is no remaining batch, we can output the top-most batch.
- output_batch(&mut output, top)?;
- break;
- };
- let next = &next.0;
-
- if top.last_timestamp() < next.first_timestamp() {
- // If the top-most batch doesn't overlaps with the next batch, we can output it.
- output_batch(&mut output, top)?;
- continue;
- }
-
- // Safety: Batches (top, next) in the heap is not empty, so we can use unwrap here.
- // Min timestamp in the next batch.
- let next_min_ts = next.first_timestamp().unwrap();
- let timestamps = top.timestamps_native().unwrap();
- // Binary searches the timestamp in the top batch.
- // Safety: Batches should have the same timestamp resolution so we can compare the native
- // value directly.
- match timestamps.binary_search(&next_min_ts.value()) {
- Ok(pos) => {
- // They have duplicate timestamps. Outputs non overlapping timestamps.
- // Batch itself doesn't contain duplicate timestamps so timestamps before `pos`
- // must be less than `next_min_ts`.
- // It's possible to output a very small batch but concatenating small batches
- // slows down the reader.
- output_batch(&mut output, top.slice(0, pos))?;
- // Removes duplicate timestamp and fixes the heap. Keeps the timestamp with largest
- // sequence.
- // Safety: pos is a valid index returned by `binary_search` and `sequences` are always
- // not null.
- if top.get_sequence(pos) > next.first_sequence().unwrap() {
- // Safety: `next` is not None.
- let next = heap.pop().unwrap().0;
- // Keeps the timestamp in top and skips the first timestamp in the `next`
- // batch.
- push_remaining_to_heap(&mut heap, next, 1);
- // Skips already outputted timestamps.
- push_remaining_to_heap(&mut heap, top, pos);
- } else {
- // Keeps timestamp in next and skips the duplicated timestamp and already outputted
- // timestamp in top.
- push_remaining_to_heap(&mut heap, top, pos + 1);
- }
- }
- Err(pos) => {
- // No duplicate timestamp. Outputs timestamp before `pos`.
- output_batch(&mut output, top.slice(0, pos))?;
- push_remaining_to_heap(&mut heap, top, pos);
- }
- }
+ // Reset number of rows.
+ self.num_rows = 0;
+ if self.batches.len() == 1 {
+ return Ok(self.batches.pop());
}
-
- Ok(output)
- }
-}
-
-/// Skips first `num_to_skip` rows from the batch and pushes remaining batch into the heap if the batch
-/// is still not empty.
-fn push_remaining_to_heap(heap: &mut BinaryHeap<CompareTimeSeq>, batch: Batch, num_to_skip: usize) {
- debug_assert!(batch.num_rows() >= num_to_skip);
- let remaining = batch.num_rows() - num_to_skip;
- if remaining == 0 {
- // Nothing remains.
- return;
- }
-
- heap.push(CompareTimeSeq(batch.slice(num_to_skip, remaining)));
-}
-
-/// Removes deleted items from the `batch` and pushes it back to the `output` if
-/// the `batch` is not empty.
-fn output_batch(output: &mut VecDeque<Batch>, mut batch: Batch) -> Result<()> {
- // Filter rows by op type. Currently, the reader only removes deleted rows but doesn't filter
- // rows by sequence for simplicity and performance reason.
- batch.filter_deleted()?;
- if batch.is_empty() {
- return Ok(());
- }
-
- output.push_back(batch);
- Ok(())
-}
-
-/// Compare [Batch] by timestamp and sequence.
-struct CompareTimeSeq(Batch);
-
-impl PartialEq for CompareTimeSeq {
- fn eq(&self, other: &Self) -> bool {
- self.0.first_timestamp() == other.0.first_timestamp()
- && self.0.first_sequence() == other.0.first_sequence()
- }
-}
-
-impl Eq for CompareTimeSeq {}
-
-impl PartialOrd for CompareTimeSeq {
- fn partial_cmp(&self, other: &CompareTimeSeq) -> Option<Ordering> {
- Some(self.cmp(other))
- }
-}
-
-impl Ord for CompareTimeSeq {
- /// Compares by first timestamp desc, first sequence. (The heap is a max heap).
- fn cmp(&self, other: &CompareTimeSeq) -> Ordering {
- self.0
- .first_timestamp()
- .cmp(&other.0.first_timestamp())
- .then_with(|| other.0.first_sequence().cmp(&self.0.first_sequence()))
- // We reverse the ordering as the heap is a max heap.
- .reverse()
+ let batches = mem::take(&mut self.batches);
+ Batch::concat(batches).map(Some)
}
}
@@ -363,7 +392,7 @@ impl Ord for CompareTimeSeq {
struct Node {
/// Data source of this `Node`.
source: Source,
- /// Current batch to be read.
+ /// Current batch to be read. The node ensures the batch is not empty.
///
/// `None` means the `source` has reached EOF.
current_batch: Option<CompareFirst>,
@@ -374,6 +403,7 @@ impl Node {
///
/// It tries to fetch one batch from the `source`.
async fn new(mut source: Source) -> Result<Node> {
+ // Ensures batch is not empty.
let current_batch = source.next_batch().await?.map(CompareFirst);
Ok(Node {
source,
@@ -409,9 +439,49 @@ impl Node {
/// Panics if the node has reached EOF.
async fn fetch_batch(&mut self) -> Result<Batch> {
let current = self.current_batch.take().unwrap();
+ // Ensures batch is not empty.
self.current_batch = self.source.next_batch().await?.map(CompareFirst);
Ok(current.0)
}
+
+ /// Returns true if the key range of current batch in `self` is behind (exclusive) current
+ /// batch in `other`.
+ ///
+ /// # Panics
+ /// Panics if either `self` or `other` is EOF.
+ fn is_behind(&self, other: &Node) -> bool {
+ debug_assert!(!self.current_batch().is_empty());
+ debug_assert!(!other.current_batch().is_empty());
+
+ // We only compare pk and timestamp so nodes in the cold
+ // heap don't have overlapping timestamps with the hottest node
+ // in the hot heap.
+ self.primary_key().cmp(other.primary_key()).then_with(|| {
+ self.current_batch()
+ .first_timestamp()
+ .cmp(&other.current_batch().last_timestamp())
+ }) == Ordering::Greater
+ }
+
+ /// Skips first `num_to_skip` rows from node's current batch. If current batch is empty it fetches
+ /// next batch from the node.
+ ///
+ /// # Panics
+ /// Panics if the node is EOF.
+ async fn skip_rows(&mut self, num_to_skip: usize) -> Result<()> {
+ let batch = self.current_batch();
+ debug_assert!(batch.num_rows() >= num_to_skip);
+ let remaining = batch.num_rows() - num_to_skip;
+ if remaining == 0 {
+ // Nothing remains, we need to fetch next batch to ensure the batch is not empty.
+ self.fetch_batch().await?;
+ } else {
+ debug_assert!(!batch.is_empty());
+ self.current_batch = Some(CompareFirst(batch.slice(num_to_skip, remaining)));
+ }
+
+ Ok(())
+ }
}
impl PartialEq for Node {
@@ -525,20 +595,59 @@ mod tests {
&[
new_batch(
b"k1",
- &[1, 2],
- &[11, 12],
- &[OpType::Put, OpType::Put],
- &[21, 22],
+ &[1, 2, 4, 5, 7],
+ &[11, 12, 14, 15, 17],
+ &[
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ ],
+ &[21, 22, 24, 25, 27],
),
+ new_batch(b"k2", &[3], &[13], &[OpType::Put], &[23]),
+ ],
+ )
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_merge_reheap_hot() {
+ let reader1 = VecBatchReader::new(&[
+ new_batch(
+ b"k1",
+ &[1, 3],
+ &[10, 10],
+ &[OpType::Put, OpType::Put],
+ &[21, 23],
+ ),
+ new_batch(b"k2", &[3], &[10], &[OpType::Put], &[23]),
+ ]);
+ let reader2 = VecBatchReader::new(&[new_batch(
+ b"k1",
+ &[2, 4],
+ &[11, 11],
+ &[OpType::Put, OpType::Put],
+ &[32, 34],
+ )]);
+ let mut reader = MergeReaderBuilder::new()
+ .push_batch_reader(Box::new(reader1))
+ .push_batch_iter(Box::new(reader2))
+ .build()
+ .await
+ .unwrap();
+ check_reader_result(
+ &mut reader,
+ &[
new_batch(
b"k1",
- &[4, 5],
- &[14, 15],
- &[OpType::Put, OpType::Put],
- &[24, 25],
+ &[1, 2, 3, 4],
+ &[10, 11, 10, 11],
+ &[OpType::Put, OpType::Put, OpType::Put, OpType::Put],
+ &[21, 32, 23, 34],
),
- new_batch(b"k1", &[7], &[17], &[OpType::Put], &[27]),
- new_batch(b"k2", &[3], &[13], &[OpType::Put], &[23]),
+ new_batch(b"k2", &[3], &[10], &[OpType::Put], &[23]),
],
)
.await;
@@ -598,16 +707,18 @@ mod tests {
&[
new_batch(
b"k1",
- &[1, 2],
- &[11, 12],
- &[OpType::Put, OpType::Put],
- &[21, 22],
+ &[1, 2, 3, 4],
+ &[11, 12, 10, 14],
+ &[OpType::Put, OpType::Put, OpType::Put, OpType::Put],
+ &[21, 22, 33, 24],
+ ),
+ new_batch(
+ b"k2",
+ &[1, 3, 10],
+ &[11, 13, 20],
+ &[OpType::Put, OpType::Put, OpType::Put],
+ &[21, 23, 30],
),
- new_batch(b"k1", &[3], &[10], &[OpType::Put], &[33]),
- new_batch(b"k1", &[4], &[14], &[OpType::Put], &[24]),
- new_batch(b"k2", &[1], &[11], &[OpType::Put], &[21]),
- new_batch(b"k2", &[3], &[13], &[OpType::Put], &[23]),
- new_batch(b"k2", &[10], &[20], &[OpType::Put], &[30]),
],
)
.await;
@@ -651,89 +762,307 @@ mod tests {
.await;
}
- #[test]
- fn test_batch_merger_empty() {
- let mut merger = BatchMerger::new();
- assert!(merger.merge_batches().unwrap().is_empty());
+ #[tokio::test]
+ async fn test_merge_next_node_empty() {
+ let reader1 = VecBatchReader::new(&[new_batch(
+ b"k1",
+ &[1, 2],
+ &[11, 12],
+ &[OpType::Put, OpType::Put],
+ &[21, 22],
+ )]);
+ // This reader will be empty after skipping the timestamp.
+ let reader2 = VecBatchReader::new(&[new_batch(b"k1", &[1], &[10], &[OpType::Put], &[33])]);
+ let mut reader = MergeReaderBuilder::new()
+ .push_batch_reader(Box::new(reader1))
+ .push_batch_iter(Box::new(reader2))
+ .build()
+ .await
+ .unwrap();
+ check_reader_result(
+ &mut reader,
+ &[new_batch(
+ b"k1",
+ &[1, 2],
+ &[11, 12],
+ &[OpType::Put, OpType::Put],
+ &[21, 22],
+ )],
+ )
+ .await;
}
- #[test]
- fn test_batch_merger_unsorted() {
- let mut merger = BatchMerger::new();
- merger.push(new_batch(
+ #[tokio::test]
+ async fn test_merge_top_node_empty() {
+ // This reader will be empty after skipping the timestamp 2.
+ let reader1 = VecBatchReader::new(&[new_batch(
b"k1",
- &[1, 3, 5],
- &[10, 10, 10],
- &[OpType::Put, OpType::Put, OpType::Put],
- &[21, 23, 25],
- ));
- assert!(merger.is_sorted);
- merger.push(new_batch(
+ &[1, 2],
+ &[10, 10],
+ &[OpType::Put, OpType::Put],
+ &[21, 22],
+ )]);
+ let reader2 = VecBatchReader::new(&[new_batch(
b"k1",
- &[2, 4],
+ &[2, 3],
&[11, 11],
&[OpType::Put, OpType::Put],
- &[22, 24],
- ));
- assert!(!merger.is_sorted);
- let batches = merger.merge_batches().unwrap();
- let batch = Batch::concat(batches.into_iter().collect()).unwrap();
- assert_eq!(
- batch,
- new_batch(
+ &[32, 33],
+ )]);
+ let mut reader = MergeReaderBuilder::new()
+ .push_batch_reader(Box::new(reader1))
+ .push_batch_iter(Box::new(reader2))
+ .build()
+ .await
+ .unwrap();
+ check_reader_result(
+ &mut reader,
+ &[new_batch(
b"k1",
- &[1, 2, 3, 4, 5],
- &[10, 11, 10, 11, 10],
+ &[1, 2, 3],
+ &[10, 11, 11],
+ &[OpType::Put, OpType::Put, OpType::Put],
+ &[21, 32, 33],
+ )],
+ )
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_merge_large_range() {
+ let reader1 = VecBatchReader::new(&[new_batch(
+ b"k1",
+ &[1, 10],
+ &[10, 10],
+ &[OpType::Put, OpType::Put],
+ &[21, 30],
+ )]);
+ let reader2 = VecBatchReader::new(&[new_batch(
+ b"k1",
+ &[1, 20],
+ &[11, 11],
+ &[OpType::Put, OpType::Put],
+ &[31, 40],
+ )]);
+ // The hot heap have a node that doesn't have duplicate
+ // timestamps.
+ let reader3 = VecBatchReader::new(&[new_batch(
+ b"k1",
+ &[6, 8],
+ &[11, 11],
+ &[OpType::Put, OpType::Put],
+ &[36, 38],
+ )]);
+ let mut reader = MergeReaderBuilder::new()
+ .push_batch_reader(Box::new(reader1))
+ .push_batch_iter(Box::new(reader2))
+ .push_batch_reader(Box::new(reader3))
+ .build()
+ .await
+ .unwrap();
+ check_reader_result(
+ &mut reader,
+ &[new_batch(
+ b"k1",
+ &[1, 6, 8, 10, 20],
+ &[11, 11, 11, 10, 11],
&[
OpType::Put,
OpType::Put,
OpType::Put,
OpType::Put,
- OpType::Put
+ OpType::Put,
],
- &[21, 22, 23, 24, 25]
- )
- );
- assert!(merger.is_sorted);
+ &[31, 36, 38, 30, 40],
+ )],
+ )
+ .await;
}
- #[test]
- fn test_batch_merger_unsorted_by_heap() {
- let mut merger = BatchMerger::new();
- merger.push(new_batch(
+ #[tokio::test]
+ async fn test_merge_many_duplicates() {
+ let mut builder = MergeReaderBuilder::new();
+ builder.batch_size(3);
+ for i in 0..10 {
+ let batches: Vec<_> = (0..8)
+ .map(|ts| new_batch(b"k1", &[ts], &[i], &[OpType::Put], &[100]))
+ .collect();
+ let reader = VecBatchReader::new(&batches);
+ builder.push_batch_reader(Box::new(reader));
+ }
+ let mut reader = builder.build().await.unwrap();
+ check_reader_result(
+ &mut reader,
+ &[
+ new_batch(
+ b"k1",
+ &[0, 1, 2],
+ &[9, 9, 9],
+ &[OpType::Put, OpType::Put, OpType::Put],
+ &[100, 100, 100],
+ ),
+ new_batch(
+ b"k1",
+ &[3, 4, 5],
+ &[9, 9, 9],
+ &[OpType::Put, OpType::Put, OpType::Put],
+ &[100, 100, 100],
+ ),
+ new_batch(
+ b"k1",
+ &[6, 7],
+ &[9, 9],
+ &[OpType::Put, OpType::Put],
+ &[100, 100],
+ ),
+ ],
+ )
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_merge_more_than_batch_size() {
+ let batches: Vec<_> = (0..MIN_BATCH_SIZE as i64 * 2)
+ .map(|ts| new_batch(b"k1", &[ts], &[10], &[OpType::Put], &[100]))
+ .collect();
+ let reader = VecBatchReader::new(&batches);
+ let mut reader = MergeReaderBuilder::new()
+ .push_batch_reader(Box::new(reader))
+ // Still use the default batch size.
+ .batch_size(0)
+ .build()
+ .await
+ .unwrap();
+ let ts1: Vec<_> = (0..MIN_BATCH_SIZE as i64).collect();
+ let ts2: Vec<_> = (MIN_BATCH_SIZE as i64..MIN_BATCH_SIZE as i64 * 2).collect();
+ let seqs = vec![10; MIN_BATCH_SIZE];
+ let op_types = vec![OpType::Put; MIN_BATCH_SIZE];
+ let fields = vec![100; MIN_BATCH_SIZE];
+ check_reader_result(
+ &mut reader,
+ &[
+ new_batch(b"k1", &ts1, &seqs, &op_types, &fields),
+ new_batch(b"k1", &ts2, &seqs, &op_types, &fields),
+ ],
+ )
+ .await;
+ }
+
+ #[tokio::test]
+ async fn test_merge_more_than_batch_size_overlapping() {
+ let reader1 = VecBatchReader::new(&[new_batch(
b"k1",
- &[1, 3, 5],
- &[10, 10, 10],
- &[OpType::Put, OpType::Put, OpType::Put],
- &[21, 23, 25],
- ));
- assert!(merger.is_sorted);
- merger.push(new_batch(
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9],
+ &[11, 10, 11, 10, 11, 10, 11, 10, 11],
+ &[
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ ],
+ &[21, 22, 23, 24, 25, 26, 27, 28, 29],
+ )]);
+ let reader2 = VecBatchReader::new(&[new_batch(
b"k1",
- &[2, 4],
- &[11, 11],
- &[OpType::Put, OpType::Put],
- &[22, 24],
- ));
- assert!(!merger.is_sorted);
- let batches = merger.merge_batches().unwrap();
- let batch = Batch::concat(batches.into_iter().collect()).unwrap();
+ &[1, 2, 3, 4, 5, 6, 7, 8, 9],
+ &[10, 11, 10, 11, 10, 11, 10, 11, 10],
+ &[
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ OpType::Put,
+ ],
+ &[31, 32, 33, 34, 35, 36, 37, 38, 39],
+ )]);
+ let mut reader = MergeReaderBuilder::new()
+ .push_batch_iter(Box::new(reader1))
+ .push_batch_reader(Box::new(reader2))
+ .batch_size(3)
+ .build()
+ .await
+ .unwrap();
+ check_reader_result(
+ &mut reader,
+ &[
+ new_batch(
+ b"k1",
+ &[1, 2, 3],
+ &[11, 11, 11],
+ &[OpType::Put, OpType::Put, OpType::Put],
+ &[21, 32, 23],
+ ),
+ new_batch(
+ b"k1",
+ &[4, 5, 6],
+ &[11, 11, 11],
+ &[OpType::Put, OpType::Put, OpType::Put],
+ &[34, 25, 36],
+ ),
+ new_batch(
+ b"k1",
+ &[7, 8, 9],
+ &[11, 11, 11],
+ &[OpType::Put, OpType::Put, OpType::Put],
+ &[27, 38, 29],
+ ),
+ ],
+ )
+ .await;
+ }
+
+ #[test]
+ fn test_batch_merger_empty() {
+ let mut merger = BatchMerger::new();
+ assert!(merger.is_empty());
+ assert!(merger.merge_batches().unwrap().is_none());
+ assert!(merger.primary_key().is_none());
+ }
+
+ #[test]
+ fn test_merge_one_batch() {
+ let mut merger = BatchMerger::new();
+ let expect = new_batch(b"k1", &[1], &[10], &[OpType::Put], &[21]);
+ merger.push(expect.clone()).unwrap();
+ let batch = merger.merge_batches().unwrap().unwrap();
+ assert_eq!(1, batch.num_rows());
+ assert_eq!(expect, batch,);
+ assert!(merger.is_empty());
+ }
+
+ #[test]
+ fn test_merge_batches() {
+ let mut merger = BatchMerger::new();
+ merger
+ .push(new_batch(b"k1", &[1], &[10], &[OpType::Put], &[21]))
+ .unwrap();
+ assert_eq!(1, merger.num_rows());
+ assert!(!merger.is_empty());
+ merger
+ .push(new_batch(b"k1", &[2], &[10], &[OpType::Put], &[22]))
+ .unwrap();
+ assert_eq!(2, merger.num_rows());
+ let batch = merger.merge_batches().unwrap().unwrap();
+ assert_eq!(2, batch.num_rows());
assert_eq!(
batch,
new_batch(
b"k1",
- &[1, 2, 3, 4, 5],
- &[10, 11, 10, 11, 10],
- &[
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put
- ],
- &[21, 22, 23, 24, 25]
+ &[1, 2],
+ &[10, 10],
+ &[OpType::Put, OpType::Put,],
+ &[21, 22]
)
);
- assert!(merger.is_sorted);
+ assert!(merger.is_empty());
}
}
diff --git a/src/storage/src/read/merge.rs b/src/storage/src/read/merge.rs
index 713cb038d89f..d27d05b47b06 100644
--- a/src/storage/src/read/merge.rs
+++ b/src/storage/src/read/merge.rs
@@ -582,7 +582,9 @@ impl MergeReader {
// Now key range of this node is behind the hottest node's.
node.is_behind(hottest)
} else {
- false
+ // Setting this to false should not affect correctness but performance because
+ // `refille_hot()` ensures the hottest node is correct.
+ true
};
if node_is_cold {
|
feat
|
avoid buffering all batches for the same primary key (#2658)
|
0db10a33d0c80ec10a03ac6bdf5318e6f1fd288e
|
2025-01-16 13:36:28
|
Yohan Wal
|
chore: update proto rev (#5379)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 9c7fb40aac3d..a3e5f0309c5b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4449,7 +4449,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=9c56862fdcf713ad485932a62702b8afbd5a22dd#9c56862fdcf713ad485932a62702b8afbd5a22dd"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=4a173785b3376267c4d62b6e0b0a54ca040822aa#4a173785b3376267c4d62b6e0b0a54ca040822aa"
dependencies = [
"prost 0.12.6",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index 616cbe3b4012..0e9e5f4e87a6 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -124,7 +124,7 @@ etcd-client = "0.13"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "9c56862fdcf713ad485932a62702b8afbd5a22dd" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "4a173785b3376267c4d62b6e0b0a54ca040822aa" }
hex = "0.4"
http = "0.2"
humantime = "2.1"
|
chore
|
update proto rev (#5379)
|
e4fd5d0fd375d386db8e6c88abdfb185c78ef30c
|
2023-07-13 14:36:51
|
JeremyHi
|
refactor: let metasrv returns ref always (#1954)
| false
|
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 0811525c50f2..ff70c1922d13 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -238,57 +238,46 @@ impl MetaSrv {
&self.options
}
- #[inline]
- pub fn in_memory(&self) -> ResettableKvStoreRef {
- self.in_memory.clone()
+ pub fn in_memory(&self) -> &ResettableKvStoreRef {
+ &self.in_memory
}
- #[inline]
- pub fn kv_store(&self) -> KvStoreRef {
- self.kv_store.clone()
+ pub fn kv_store(&self) -> &KvStoreRef {
+ &self.kv_store
}
- #[inline]
- pub fn leader_cached_kv_store(&self) -> ResettableKvStoreRef {
- self.leader_cached_kv_store.clone()
+ pub fn leader_cached_kv_store(&self) -> &ResettableKvStoreRef {
+ &self.leader_cached_kv_store
}
- #[inline]
- pub fn meta_peer_client(&self) -> MetaPeerClientRef {
- self.meta_peer_client.clone()
+ pub fn meta_peer_client(&self) -> &MetaPeerClientRef {
+ &self.meta_peer_client
}
- #[inline]
- pub fn table_id_sequence(&self) -> SequenceRef {
- self.table_id_sequence.clone()
+ pub fn table_id_sequence(&self) -> &SequenceRef {
+ &self.table_id_sequence
}
- #[inline]
- pub fn selector(&self) -> SelectorRef {
- self.selector.clone()
+ pub fn selector(&self) -> &SelectorRef {
+ &self.selector
}
- #[inline]
- pub fn handler_group(&self) -> HeartbeatHandlerGroup {
- self.handler_group.clone()
+ pub fn handler_group(&self) -> &HeartbeatHandlerGroup {
+ &self.handler_group
}
- #[inline]
- pub fn election(&self) -> Option<ElectionRef> {
- self.election.clone()
+ pub fn election(&self) -> Option<&ElectionRef> {
+ self.election.as_ref()
}
- #[inline]
pub fn lock(&self) -> &DistLockRef {
&self.lock
}
- #[inline]
- pub fn mailbox(&self) -> MailboxRef {
- self.mailbox.clone()
+ pub fn mailbox(&self) -> &MailboxRef {
+ &self.mailbox
}
- #[inline]
pub fn ddl_manager(&self) -> &DdlManagerRef {
&self.ddl_manager
}
@@ -304,12 +293,12 @@ impl MetaSrv {
#[inline]
pub fn new_ctx(&self) -> Context {
let server_addr = self.options().server_addr.clone();
- let in_memory = self.in_memory();
- let kv_store = self.kv_store();
- let leader_cached_kv_store = self.leader_cached_kv_store();
- let meta_peer_client = self.meta_peer_client();
- let mailbox = self.mailbox();
- let election = self.election();
+ let in_memory = self.in_memory.clone();
+ let kv_store = self.kv_store.clone();
+ let leader_cached_kv_store = self.leader_cached_kv_store.clone();
+ let meta_peer_client = self.meta_peer_client.clone();
+ let mailbox = self.mailbox.clone();
+ let election = self.election.clone();
let skip_all = Arc::new(AtomicBool::new(false));
Context {
server_addr,
diff --git a/src/meta-srv/src/service/admin.rs b/src/meta-srv/src/service/admin.rs
index 3859bb6b717d..a2c4b314c638 100644
--- a/src/meta-srv/src/service/admin.rs
+++ b/src/meta-srv/src/service/admin.rs
@@ -36,35 +36,35 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
let router = router.route(
"/node-lease",
node_lease::NodeLeaseHandler {
- meta_peer_client: meta_srv.meta_peer_client(),
+ meta_peer_client: meta_srv.meta_peer_client().clone(),
},
);
let router = router.route(
"/heartbeat",
heartbeat::HeartBeatHandler {
- meta_peer_client: meta_srv.meta_peer_client(),
+ meta_peer_client: meta_srv.meta_peer_client().clone(),
},
);
let router = router.route(
"/catalogs",
meta::CatalogsHandler {
- kv_store: meta_srv.kv_store(),
+ kv_store: meta_srv.kv_store().clone(),
},
);
let router = router.route(
"/schemas",
meta::SchemasHandler {
- kv_store: meta_srv.kv_store(),
+ kv_store: meta_srv.kv_store().clone(),
},
);
let router = router.route(
"/tables",
meta::TablesHandler {
- kv_store: meta_srv.kv_store(),
+ kv_store: meta_srv.kv_store().clone(),
table_metadata_manager: meta_srv.table_metadata_manager().clone(),
},
);
@@ -72,7 +72,7 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
let router = router.route(
"/table",
meta::TableHandler {
- kv_store: meta_srv.kv_store(),
+ kv_store: meta_srv.kv_store().clone(),
table_metadata_manager: meta_srv.table_metadata_manager().clone(),
},
);
@@ -80,14 +80,14 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
let router = router.route(
"/leader",
leader::LeaderHandler {
- election: meta_srv.election(),
+ election: meta_srv.election().cloned(),
},
);
let router = router.route(
"/route",
route::RouteHandler {
- kv_store: meta_srv.kv_store(),
+ kv_store: meta_srv.kv_store().clone(),
},
);
diff --git a/src/meta-srv/src/service/ddl.rs b/src/meta-srv/src/service/ddl.rs
index ceb346f91c76..8292ff0e381e 100644
--- a/src/meta-srv/src/service/ddl.rs
+++ b/src/meta-srv/src/service/ddl.rs
@@ -53,8 +53,8 @@ impl ddl_task_server::DdlTask for MetaSrv {
let ctx = SelectorContext {
datanode_lease_secs: self.options().datanode_lease_secs,
server_addr: self.options().server_addr.clone(),
- kv_store: self.kv_store(),
- meta_peer_client: self.meta_peer_client(),
+ kv_store: self.kv_store().clone(),
+ meta_peer_client: self.meta_peer_client().clone(),
catalog: None,
schema: None,
table: None,
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index e3f7bbd21df9..628202d934a1 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -43,7 +43,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
) -> GrpcResult<Self::HeartbeatStream> {
let mut in_stream = req.into_inner();
let (tx, rx) = mpsc::channel(128);
- let handler_group = self.handler_group();
+ let handler_group = self.handler_group().clone();
let ctx = self.new_ctx();
let _handle = common_runtime::spawn_bg(async move {
let mut pusher_key = None;
diff --git a/src/meta-srv/src/service/router.rs b/src/meta-srv/src/service/router.rs
index faf7edd3bde0..3cdf69a118bb 100644
--- a/src/meta-srv/src/service/router.rs
+++ b/src/meta-srv/src/service/router.rs
@@ -85,8 +85,8 @@ impl router_server::Router for MetaSrv {
let ctx = SelectorContext {
datanode_lease_secs: self.options().datanode_lease_secs,
server_addr: self.options().server_addr.clone(),
- kv_store: self.kv_store(),
- meta_peer_client: self.meta_peer_client(),
+ kv_store: self.kv_store().clone(),
+ meta_peer_client: self.meta_peer_client().clone(),
catalog: Some(table_name.catalog_name.clone()),
schema: Some(table_name.schema_name.clone()),
table: Some(table_name.table_name.clone()),
@@ -140,8 +140,8 @@ impl router_server::Router for MetaSrv {
async fn handle_create(
req: CreateRequest,
ctx: SelectorContext,
- selector: SelectorRef,
- table_id_sequence: SequenceRef,
+ selector: &SelectorRef,
+ table_id_sequence: &SequenceRef,
) -> Result<RouteResponse> {
let CreateRequest {
header,
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index 713926b7906e..d25ca64340ca 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -97,7 +97,7 @@ impl GreptimeDbClusterBuilder {
build_datanode_clients(datanode_clients.clone(), &datanode_instances, datanodes).await;
- self.wait_datanodes_alive(&meta_srv.meta_srv.meta_peer_client(), datanodes)
+ self.wait_datanodes_alive(meta_srv.meta_srv.meta_peer_client(), datanodes)
.await;
let frontend = self
@@ -131,7 +131,7 @@ impl GreptimeDbClusterBuilder {
let mock =
meta_srv::mocks::mock(opt, self.kv_store.clone(), None, Some(datanode_clients)).await;
- let metadata_service = DefaultMetadataService::new(mock.meta_srv.kv_store());
+ let metadata_service = DefaultMetadataService::new(mock.meta_srv.kv_store().clone());
metadata_service
.create_schema("another_catalog", "another_schema", true)
.await
diff --git a/tests-integration/tests/region_failover.rs b/tests-integration/tests/region_failover.rs
index b9952d1263ca..34637c49380c 100644
--- a/tests-integration/tests/region_failover.rs
+++ b/tests-integration/tests/region_failover.rs
@@ -332,13 +332,13 @@ async fn run_region_failover_procedure(
let procedure = RegionFailoverProcedure::new(
failed_region.clone(),
RegionFailoverContext {
- mailbox: meta_srv.mailbox(),
+ mailbox: meta_srv.mailbox().clone(),
selector,
selector_ctx: SelectorContext {
datanode_lease_secs: meta_srv.options().datanode_lease_secs,
server_addr: meta_srv.options().server_addr.clone(),
- kv_store: meta_srv.kv_store(),
- meta_peer_client: meta_srv.meta_peer_client(),
+ kv_store: meta_srv.kv_store().clone(),
+ meta_peer_client: meta_srv.meta_peer_client().clone(),
catalog: None,
schema: None,
table: None,
|
refactor
|
let metasrv returns ref always (#1954)
|
de0b8aa0a04af3f7ca0ed69e07d5e89aea0919d7
|
2023-02-15 10:43:17
|
Xieqijun
|
feat: Support the DELETE SQL statement (#942)
| false
|
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 94db9f5f6776..ba605fb7de13 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -114,6 +114,17 @@ pub enum Error {
source: TableError,
},
+ #[snafu(display(
+ "Failed to delete value from table: {}, source: {}",
+ table_name,
+ source
+ ))]
+ Delete {
+ table_name: String,
+ #[snafu(backtrace)]
+ source: TableError,
+ },
+
#[snafu(display("Failed to start server, source: {}", source))]
StartServer {
#[snafu(backtrace)]
@@ -161,7 +172,10 @@ pub enum Error {
},
#[snafu(display("Invalid SQL, error: {}", msg))]
- InvalidSql { msg: String, backtrace: Backtrace },
+ InvalidSql { msg: String },
+
+ #[snafu(display("Not support SQL, error: {}", msg))]
+ NotSupportSql { msg: String },
#[snafu(display("Failed to create schema when creating table, source: {}", source))]
CreateSchema {
@@ -343,6 +357,7 @@ impl ErrorExt for Error {
Error::DropTable { source, .. } => source.status_code(),
Error::Insert { source, .. } => source.status_code(),
+ Error::Delete { source, .. } => source.status_code(),
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
@@ -361,6 +376,7 @@ impl ErrorExt for Error {
Error::ColumnValuesNumberMismatch { .. }
| Error::InvalidSql { .. }
+ | Error::NotSupportSql { .. }
| Error::KeyColumnNotFound { .. }
| Error::InvalidPrimaryKey { .. }
| Error::MissingTimestampColumn { .. }
diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs
index de517ba78c33..ca833f47284d 100644
--- a/src/datanode/src/instance/sql.rs
+++ b/src/datanode/src/instance/sql.rs
@@ -66,7 +66,10 @@ impl Instance {
)?;
self.sql_handler.execute(request, query_ctx).await
}
-
+ QueryStatement::Sql(Statement::Delete(d)) => {
+ let request = SqlRequest::Delete(*d);
+ self.sql_handler.execute(request, query_ctx).await
+ }
QueryStatement::Sql(Statement::CreateDatabase(c)) => {
let request = CreateDatabaseRequest {
db_name: c.name.to_string(),
diff --git a/src/datanode/src/sql.rs b/src/datanode/src/sql.rs
index 0a0da377a576..fdb8770ea754 100644
--- a/src/datanode/src/sql.rs
+++ b/src/datanode/src/sql.rs
@@ -19,17 +19,20 @@ use query::query_engine::QueryEngineRef;
use query::sql::{describe_table, explain, show_databases, show_tables};
use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
+use sql::statements::delete::Delete;
use sql::statements::describe::DescribeTable;
use sql::statements::explain::Explain;
use sql::statements::show::{ShowDatabases, ShowTables};
-use table::engine::TableEngineRef;
+use table::engine::{EngineContext, TableEngineRef, TableReference};
use table::requests::*;
+use table::TableRef;
-use crate::error::{self, ExecuteSqlSnafu, Result, TableNotFoundSnafu};
+use crate::error::{self, ExecuteSqlSnafu, GetTableSnafu, Result, TableNotFoundSnafu};
use crate::instance::sql::table_idents_to_full_name;
mod alter;
mod create;
+mod delete;
mod drop_table;
mod insert;
@@ -44,6 +47,7 @@ pub enum SqlRequest {
ShowTables(ShowTables),
DescribeTable(DescribeTable),
Explain(Box<Explain>),
+ Delete(Delete),
}
// Handler to execute SQL except query
@@ -77,6 +81,7 @@ impl SqlHandler {
SqlRequest::CreateDatabase(req) => self.create_database(req).await,
SqlRequest::Alter(req) => self.alter(req).await,
SqlRequest::DropTable(req) => self.drop_table(req).await,
+ SqlRequest::Delete(stmt) => self.delete(query_ctx.clone(), stmt).await,
SqlRequest::ShowDatabases(stmt) => {
show_databases(stmt, self.catalog_manager.clone()).context(ExecuteSqlSnafu)
}
@@ -108,6 +113,17 @@ impl SqlHandler {
result
}
+ pub(crate) fn get_table(&self, table_ref: &TableReference) -> Result<TableRef> {
+ self.table_engine
+ .get_table(&EngineContext::default(), table_ref)
+ .with_context(|_| GetTableSnafu {
+ table_name: table_ref.to_string(),
+ })?
+ .with_context(|| TableNotFoundSnafu {
+ table_name: table_ref.to_string(),
+ })
+ }
+
pub fn table_engine(&self) -> TableEngineRef {
self.table_engine.clone()
}
diff --git a/src/datanode/src/sql/delete.rs b/src/datanode/src/sql/delete.rs
new file mode 100644
index 000000000000..0308c4f8e5b0
--- /dev/null
+++ b/src/datanode/src/sql/delete.rs
@@ -0,0 +1,142 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::sync::Arc;
+
+use common_query::Output;
+use datatypes::data_type::DataType;
+use datatypes::prelude::VectorRef;
+use datatypes::vectors::StringVector;
+use session::context::QueryContextRef;
+use snafu::{OptionExt, ResultExt};
+use sql::ast::{BinaryOperator, Expr, Value};
+use sql::statements::delete::Delete;
+use sql::statements::sql_value_to_value;
+use table::engine::TableReference;
+use table::requests::DeleteRequest;
+use table::TableRef;
+
+use crate::error::{ColumnNotFoundSnafu, DeleteSnafu, InvalidSqlSnafu, NotSupportSqlSnafu, Result};
+use crate::instance::sql::table_idents_to_full_name;
+use crate::sql::SqlHandler;
+
+impl SqlHandler {
+ pub(crate) async fn delete(&self, query_ctx: QueryContextRef, stmt: Delete) -> Result<Output> {
+ let (catalog_name, schema_name, table_name) =
+ table_idents_to_full_name(stmt.table_name(), query_ctx)?;
+ let table_ref = TableReference {
+ catalog: &catalog_name.to_string(),
+ schema: &schema_name.to_string(),
+ table: &table_name.to_string(),
+ };
+
+ let table = self.get_table(&table_ref)?;
+
+ let req = DeleteRequest {
+ key_column_values: parse_selection(stmt.selection(), &table)?,
+ };
+
+ let affected_rows = table.delete(req).await.with_context(|_| DeleteSnafu {
+ table_name: table_ref.to_string(),
+ })?;
+
+ Ok(Output::AffectedRows(affected_rows))
+ }
+}
+
+/// parse selection, currently supported format is `tagkey1 = 'tagvalue1' and 'ts' = 'value'`.
+/// (only uses =, and in the where clause and provides all columns needed by the key.)
+fn parse_selection(
+ selection: &Option<Expr>,
+ table: &TableRef,
+) -> Result<HashMap<String, VectorRef>> {
+ let mut key_column_values = HashMap::new();
+ if let Some(expr) = selection {
+ parse_expr(expr, &mut key_column_values, table)?;
+ }
+ Ok(key_column_values)
+}
+
+fn parse_expr(
+ expr: &Expr,
+ key_column_values: &mut HashMap<String, VectorRef>,
+ table: &TableRef,
+) -> Result<()> {
+ // match BinaryOp
+ if let Expr::BinaryOp { left, op, right } = expr {
+ match (&**left, op, &**right) {
+ // match And operator
+ (Expr::BinaryOp { .. }, BinaryOperator::And, Expr::BinaryOp { .. }) => {
+ parse_expr(left, key_column_values, table)?;
+ parse_expr(right, key_column_values, table)?;
+ return Ok(());
+ }
+ // match Eq operator
+ (Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Value(value)) => {
+ key_column_values.insert(
+ column_name.to_string(),
+ value_to_vector(&column_name.to_string(), value, table)?,
+ );
+ return Ok(());
+ }
+ (Expr::Identifier(column_name), BinaryOperator::Eq, Expr::Identifier(value)) => {
+ key_column_values.insert(
+ column_name.to_string(),
+ Arc::new(StringVector::from(vec![value.to_string()])),
+ );
+ return Ok(());
+ }
+ _ => {}
+ }
+ }
+ NotSupportSqlSnafu {
+ msg: format!(
+ "Not support sql expr:{expr},correct format is tagkey1 = tagvalue1 and ts = value"
+ ),
+ }
+ .fail()
+}
+
+/// parse value to vector
+fn value_to_vector(column_name: &String, sql_value: &Value, table: &TableRef) -> Result<VectorRef> {
+ let schema = table.schema();
+ let column_schema =
+ schema
+ .column_schema_by_name(column_name)
+ .with_context(|| ColumnNotFoundSnafu {
+ table_name: table.table_info().name.clone(),
+ column_name: column_name.to_string(),
+ })?;
+ let data_type = &column_schema.data_type;
+ let value = sql_value_to_value(column_name, data_type, sql_value);
+ match value {
+ Ok(value) => {
+ let mut vec = data_type.create_mutable_vector(1);
+ if vec.push_value_ref(value.as_value_ref()).is_err() {
+ return InvalidSqlSnafu {
+ msg: format!(
+ "invalid sql, column name is {column_name}, value is {sql_value}",
+ ),
+ }
+ .fail();
+ }
+ Ok(vec.to_vector())
+ }
+ _ => InvalidSqlSnafu {
+ msg: format!("invalid sql, column name is {column_name}, value is {sql_value}",),
+ }
+ .fail(),
+ }
+}
diff --git a/src/datanode/src/sql/insert.rs b/src/datanode/src/sql/insert.rs
index cd9e8088fe67..7ca7137a590c 100644
--- a/src/datanode/src/sql/insert.rs
+++ b/src/datanode/src/sql/insert.rs
@@ -26,8 +26,8 @@ use table::requests::*;
use crate::error::{
CatalogSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu, ColumnNotFoundSnafu,
- ColumnValuesNumberMismatchSnafu, FindTableSnafu, InsertSnafu, ParseSqlSnafu,
- ParseSqlValueSnafu, Result, TableNotFoundSnafu,
+ ColumnValuesNumberMismatchSnafu, InsertSnafu, ParseSqlSnafu, ParseSqlValueSnafu, Result,
+ TableNotFoundSnafu,
};
use crate::sql::{SqlHandler, SqlRequest};
@@ -43,15 +43,7 @@ impl SqlHandler {
table: &req.table_name.to_string(),
};
- let table = self
- .catalog_manager
- .table(table_ref.catalog, table_ref.schema, table_ref.table)
- .context(FindTableSnafu {
- table_name: table_ref.to_string(),
- })?
- .context(TableNotFoundSnafu {
- table_name: table_ref.to_string(),
- })?;
+ let table = self.get_table(&table_ref)?;
let affected_rows = table.insert(req).await.with_context(|_| InsertSnafu {
table_name: table_ref.to_string(),
diff --git a/src/datanode/src/tests/instance_test.rs b/src/datanode/src/tests/instance_test.rs
index 8296b09d68e5..90608158113a 100644
--- a/src/datanode/src/tests/instance_test.rs
+++ b/src/datanode/src/tests/instance_test.rs
@@ -654,6 +654,55 @@ async fn test_use_database() {
check_output_stream(output, expected).await;
}
+#[tokio::test(flavor = "multi_thread")]
+async fn test_delete() {
+ let instance = MockInstance::new("test_delete").await;
+
+ let output = execute_sql(
+ &instance,
+ r#"create table test_table(
+ host string,
+ ts timestamp,
+ cpu double default 0,
+ memory double,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+ ) engine=mito with(regions=1);"#,
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+
+ let output = execute_sql(
+ &instance,
+ r#"insert into test_table(host, cpu, memory, ts) values
+ ('host1', 66.6, 1024, 1655276557000),
+ ('host2', 77.7, 2048, 1655276558000),
+ ('host3', 88.8, 3072, 1655276559000)
+ "#,
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(3)));
+
+ let output = execute_sql(
+ &instance,
+ "delete from test_table where host = host1 and ts = 1655276557000 ",
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(1)));
+
+ let output = execute_sql(&instance, "select * from test_table").await;
+ let expect = "\
++-------+---------------------+------+--------+
+| host | ts | cpu | memory |
++-------+---------------------+------+--------+
+| host2 | 2022-06-15T07:02:38 | 77.7 | 2048 |
+| host3 | 2022-06-15T07:02:39 | 88.8 | 3072 |
++-------+---------------------+------+--------+\
+"
+ .to_string();
+ check_output_stream(output, expect).await;
+}
+
async fn execute_sql(instance: &MockInstance, sql: &str) -> Output {
execute_sql_in_db(instance, sql, DEFAULT_SCHEMA_NAME).await
}
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 4a21874df35b..afac0f50d600 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -392,6 +392,7 @@ impl Instance {
| Statement::Explain(_)
| Statement::Query(_)
| Statement::Insert(_)
+ | Statement::Delete(_)
| Statement::Alter(_)
| Statement::DropTable(_) => self.sql_handler.do_statement_query(stmt, query_ctx).await,
Statement::Use(db) => self.handle_use(db, query_ctx),
@@ -575,6 +576,9 @@ pub fn check_permission(
Statement::DescribeTable(stmt) => {
validate_param(stmt.name(), query_ctx)?;
}
+ Statement::Delete(delete) => {
+ validate_param(delete.table_name(), query_ctx)?;
+ }
}
Ok(())
}
diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs
index 54ddd16c53f8..15035fbe3671 100644
--- a/src/query/src/datafusion/planner.rs
+++ b/src/query/src/datafusion/planner.rs
@@ -94,6 +94,7 @@ where
Statement::Query(qb) => self.query_to_plan(qb),
Statement::Explain(explain) => self.explain_to_plan(explain),
Statement::ShowTables(_)
+ | Statement::Delete(_)
| Statement::ShowDatabases(_)
| Statement::ShowCreateTable(_)
| Statement::DescribeTable(_)
diff --git a/src/sql/src/ast.rs b/src/sql/src/ast.rs
index 8f8acd597880..b35b71b51bbb 100644
--- a/src/sql/src/ast.rs
+++ b/src/sql/src/ast.rs
@@ -13,6 +13,7 @@
// limitations under the License.
pub use sqlparser::ast::{
- ColumnDef, ColumnOption, ColumnOptionDef, DataType, Expr, Function, FunctionArg,
- FunctionArgExpr, Ident, ObjectName, SqlOption, TableConstraint, TimezoneInfo, Value,
+ BinaryOperator, ColumnDef, ColumnOption, ColumnOptionDef, DataType, Expr, Function,
+ FunctionArg, FunctionArgExpr, Ident, ObjectName, SqlOption, TableConstraint, TimezoneInfo,
+ Value,
};
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index 1cc9a1caa044..47759279ab76 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -88,6 +88,8 @@ impl<'a> ParserContext<'a> {
self.parse_show()
}
+ Keyword::DELETE => self.parse_delete(),
+
Keyword::DESCRIBE | Keyword::DESC => {
self.parser.next_token();
self.parse_describe()
diff --git a/src/sql/src/parsers.rs b/src/sql/src/parsers.rs
index 1b874bc6169a..f356cf70a3a7 100644
--- a/src/sql/src/parsers.rs
+++ b/src/sql/src/parsers.rs
@@ -14,5 +14,6 @@
mod alter_parser;
pub(crate) mod create_parser;
+pub(crate) mod delete_parser;
pub(crate) mod insert_parser;
pub(crate) mod query_parser;
diff --git a/src/sql/src/parsers/delete_parser.rs b/src/sql/src/parsers/delete_parser.rs
new file mode 100644
index 000000000000..f538262c2c34
--- /dev/null
+++ b/src/sql/src/parsers/delete_parser.rs
@@ -0,0 +1,67 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use snafu::ResultExt;
+use sqlparser::ast::Statement as SpStatement;
+
+use crate::error::{self, Result};
+use crate::parser::ParserContext;
+use crate::statements::delete::Delete;
+use crate::statements::statement::Statement;
+
+/// DELETE statement parser implementation
+impl<'a> ParserContext<'a> {
+ pub(crate) fn parse_delete(&mut self) -> Result<Statement> {
+ self.parser.next_token();
+ let spstatement = self
+ .parser
+ .parse_delete()
+ .context(error::SyntaxSnafu { sql: self.sql })?;
+
+ match spstatement {
+ SpStatement::Delete { .. } => {
+ Ok(Statement::Delete(Box::new(Delete::try_from(spstatement)?)))
+ }
+ unexp => error::UnsupportedSnafu {
+ sql: self.sql.to_string(),
+ keyword: unexp.to_string(),
+ }
+ .fail(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use sqlparser::dialect::GenericDialect;
+
+ use super::*;
+
+ #[test]
+ pub fn test_parse_insert() {
+ let sql = r"delete from my_table where k1 = xxx and k2 = xxx and timestamp = xxx;";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
+ assert_eq!(1, result.len());
+ assert_matches!(result[0], Statement::Delete { .. })
+ }
+
+ #[test]
+ pub fn test_parse_invalid_insert() {
+ let sql = r"delete my_table where "; // intentionally a bad sql
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result.is_err(), "result is: {result:?}");
+ }
+}
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index d57a29b38314..5a87ecb46a60 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -14,6 +14,7 @@
pub mod alter;
pub mod create;
+pub mod delete;
pub mod describe;
pub mod drop;
pub mod explain;
@@ -21,6 +22,7 @@ pub mod insert;
pub mod query;
pub mod show;
pub mod statement;
+
use std::str::FromStr;
use api::helper::ColumnDataTypeWrapper;
diff --git a/src/sql/src/statements/delete.rs b/src/sql/src/statements/delete.rs
new file mode 100644
index 000000000000..bb9661f3a5b1
--- /dev/null
+++ b/src/sql/src/statements/delete.rs
@@ -0,0 +1,69 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use sqlparser::ast::{Expr, ObjectName, Statement, TableFactor};
+
+use crate::error::{Error, InvalidSqlSnafu, Result};
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct Delete {
+ table_name: ObjectName,
+ selection: Option<Expr>,
+}
+
+impl Delete {
+ pub fn table_name(&self) -> &ObjectName {
+ &self.table_name
+ }
+
+ pub fn selection(&self) -> &Option<Expr> {
+ &self.selection
+ }
+}
+
+impl TryFrom<Statement> for Delete {
+ type Error = Error;
+
+ fn try_from(stmt: Statement) -> Result<Self> {
+ match stmt {
+ Statement::Delete {
+ table_name,
+ using,
+ selection,
+ returning,
+ } => {
+ if using.is_some() || returning.is_some() {
+ return InvalidSqlSnafu {
+ msg: "delete sql isn't support using and returning.".to_string(),
+ }
+ .fail();
+ }
+ match table_name {
+ TableFactor::Table { name, .. } => Ok(Delete {
+ table_name: name,
+ selection,
+ }),
+ _ => InvalidSqlSnafu {
+ msg: "can't find table name, tableFactor is not Table type".to_string(),
+ }
+ .fail(),
+ }
+ }
+ unexp => InvalidSqlSnafu {
+ msg: format!("Not expected to be {unexp}"),
+ }
+ .fail(),
+ }
+ }
+}
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
index 325c67d3f858..558f9a4b8354 100644
--- a/src/sql/src/statements/statement.rs
+++ b/src/sql/src/statements/statement.rs
@@ -14,6 +14,7 @@
use crate::statements::alter::AlterTable;
use crate::statements::create::{CreateDatabase, CreateTable};
+use crate::statements::delete::Delete;
use crate::statements::describe::DescribeTable;
use crate::statements::drop::DropTable;
use crate::statements::explain::Explain;
@@ -29,6 +30,8 @@ pub enum Statement {
Query(Box<Query>),
// Insert
Insert(Box<Insert>),
+ // Delete
+ Delete(Box<Delete>),
/// CREATE TABLE
CreateTable(CreateTable),
// DROP TABLE
diff --git a/tests/cases/standalone/common/insert/insert_invalid.result b/tests/cases/standalone/common/insert/insert_invalid.result
index 3143c76a1837..95080b1f9f45 100644
--- a/tests/cases/standalone/common/insert/insert_invalid.result
+++ b/tests/cases/standalone/common/insert/insert_invalid.result
@@ -42,3 +42,11 @@ INSERT INTO a VALUES (1,2),(3,4,5);
Error: 1004(InvalidArguments), Columns and values number mismatch, columns: 2, values: 3
+DROP TABLE strings;
+
+Affected Rows: 1
+
+DROP TABLE a;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/common/insert/insert_invalid.sql b/tests/cases/standalone/common/insert/insert_invalid.sql
index 692b0e8d2643..19cfe147c340 100644
--- a/tests/cases/standalone/common/insert/insert_invalid.sql
+++ b/tests/cases/standalone/common/insert/insert_invalid.sql
@@ -17,3 +17,7 @@ INSERT INTO a VALUES (1,2,3);
INSERT INTO a VALUES (1,2),(3);
INSERT INTO a VALUES (1,2),(3,4,5);
+
+DROP TABLE strings;
+
+DROP TABLE a;
diff --git a/tests/cases/standalone/delete/delete.result b/tests/cases/standalone/delete/delete.result
new file mode 100644
index 000000000000..c97b613b6b78
--- /dev/null
+++ b/tests/cases/standalone/delete/delete.result
@@ -0,0 +1,35 @@
+CREATE TABLE monitor ( host STRING, ts TIMESTAMP, cpu DOUBLE DEFAULT 0, memory DOUBLE, TIME INDEX (ts), PRIMARY KEY(host)) ;
+
+Affected Rows: 0
+
+insert into monitor(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 77.7, 2048, 1655276558000), ('host3', 88.8, 3072, 1655276559000);
+
+Affected Rows: 3
+
+select * from monitor;
+
++-------+---------------------+------+--------+
+| host | ts | cpu | memory |
++-------+---------------------+------+--------+
+| host1 | 2022-06-15T07:02:37 | 66.6 | 1024 |
+| host2 | 2022-06-15T07:02:38 | 77.7 | 2048 |
+| host3 | 2022-06-15T07:02:39 | 88.8 | 3072 |
++-------+---------------------+------+--------+
+
+delete from monitor where host = 'host1' and ts = 1655276557000;
+
+Affected Rows: 1
+
+select * from monitor;
+
++-------+---------------------+------+--------+
+| host | ts | cpu | memory |
++-------+---------------------+------+--------+
+| host2 | 2022-06-15T07:02:38 | 77.7 | 2048 |
+| host3 | 2022-06-15T07:02:39 | 88.8 | 3072 |
++-------+---------------------+------+--------+
+
+drop table monitor;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/delete/delete.sql b/tests/cases/standalone/delete/delete.sql
new file mode 100644
index 000000000000..2eccf144c723
--- /dev/null
+++ b/tests/cases/standalone/delete/delete.sql
@@ -0,0 +1,11 @@
+CREATE TABLE monitor ( host STRING, ts TIMESTAMP, cpu DOUBLE DEFAULT 0, memory DOUBLE, TIME INDEX (ts), PRIMARY KEY(host)) ;
+
+insert into monitor(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 77.7, 2048, 1655276558000), ('host3', 88.8, 3072, 1655276559000);
+
+select * from monitor;
+
+delete from monitor where host = 'host1' and ts = 1655276557000;
+
+select * from monitor;
+
+drop table monitor;
diff --git a/tests/cases/standalone/delete/delete_invalid.result b/tests/cases/standalone/delete/delete_invalid.result
new file mode 100644
index 000000000000..2e269368980f
--- /dev/null
+++ b/tests/cases/standalone/delete/delete_invalid.result
@@ -0,0 +1,28 @@
+CREATE TABLE monitor ( host STRING, ts TIMESTAMP, cpu DOUBLE DEFAULT 0, memory DOUBLE, TIME INDEX (ts), PRIMARY KEY(host)) ;
+
+Affected Rows: 0
+
+insert into monitor(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 77.7, 2048, 1655276558000), ('host3', 88.8, 3072, 1655276559000);
+
+Affected Rows: 3
+
+delete from monitor where cpu = 66.6 and ts = 1655276557000;
+
+Error: 1004(InvalidArguments), Missing column host in write batch
+
+delete from monitor where host = 'host1' or ts = 1655276557000;
+
+Error: 1004(InvalidArguments), Not support SQL, error: Not support sql expr:host = 'host1' OR ts = 1655276557000,correct format is tagkey1 = tagvalue1 and ts = value
+
+delete from monitor where host = 'host1' or ts != 1655276557000;
+
+Error: 1004(InvalidArguments), Not support SQL, error: Not support sql expr:host = 'host1' OR ts <> 1655276557000,correct format is tagkey1 = tagvalue1 and ts = value
+
+delete from monitor where ts != 1655276557000;
+
+Error: 1004(InvalidArguments), Not support SQL, error: Not support sql expr:ts <> 1655276557000,correct format is tagkey1 = tagvalue1 and ts = value
+
+drop table monitor;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/delete/delete_invalid.sql b/tests/cases/standalone/delete/delete_invalid.sql
new file mode 100644
index 000000000000..68d7ccf2c323
--- /dev/null
+++ b/tests/cases/standalone/delete/delete_invalid.sql
@@ -0,0 +1,13 @@
+CREATE TABLE monitor ( host STRING, ts TIMESTAMP, cpu DOUBLE DEFAULT 0, memory DOUBLE, TIME INDEX (ts), PRIMARY KEY(host)) ;
+
+insert into monitor(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 77.7, 2048, 1655276558000), ('host3', 88.8, 3072, 1655276559000);
+
+delete from monitor where cpu = 66.6 and ts = 1655276557000;
+
+delete from monitor where host = 'host1' or ts = 1655276557000;
+
+delete from monitor where host = 'host1' or ts != 1655276557000;
+
+delete from monitor where ts != 1655276557000;
+
+drop table monitor;
|
feat
|
Support the DELETE SQL statement (#942)
|
0710e6ff36787af20da52bab78f4cc94827e906f
|
2024-07-26 13:10:07
|
dennis zhuang
|
fix: remove to_timezone function (#4439)
| false
|
diff --git a/src/common/function/src/scalars/timestamp.rs b/src/common/function/src/scalars/timestamp.rs
index fbee4279212d..fecf884ce02b 100644
--- a/src/common/function/src/scalars/timestamp.rs
+++ b/src/common/function/src/scalars/timestamp.rs
@@ -14,11 +14,9 @@
use std::sync::Arc;
mod greatest;
-mod to_timezone;
mod to_unixtime;
use greatest::GreatestFunction;
-use to_timezone::ToTimezoneFunction;
use to_unixtime::ToUnixtimeFunction;
use crate::function_registry::FunctionRegistry;
@@ -27,7 +25,6 @@ pub(crate) struct TimestampFunction;
impl TimestampFunction {
pub fn register(registry: &FunctionRegistry) {
- registry.register(Arc::new(ToTimezoneFunction));
registry.register(Arc::new(ToUnixtimeFunction));
registry.register(Arc::new(GreatestFunction));
}
diff --git a/src/common/function/src/scalars/timestamp/to_timezone.rs b/src/common/function/src/scalars/timestamp/to_timezone.rs
deleted file mode 100644
index 1b366ccde3f2..000000000000
--- a/src/common/function/src/scalars/timestamp/to_timezone.rs
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::fmt;
-use std::sync::Arc;
-
-use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
-use common_query::prelude::Signature;
-use common_time::{Timestamp, Timezone};
-use datatypes::data_type::ConcreteDataType;
-use datatypes::prelude::VectorRef;
-use datatypes::types::TimestampType;
-use datatypes::value::Value;
-use datatypes::vectors::{
- Int64Vector, StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
- TimestampNanosecondVector, TimestampSecondVector, Vector,
-};
-use snafu::{ensure, OptionExt};
-
-use crate::function::{Function, FunctionContext};
-use crate::helper;
-
-#[derive(Clone, Debug, Default)]
-pub struct ToTimezoneFunction;
-
-const NAME: &str = "to_timezone";
-
-fn convert_to_timezone(arg: &str) -> Option<Timezone> {
- Timezone::from_tz_string(arg).ok()
-}
-
-fn convert_to_timestamp(arg: &Value) -> Option<Timestamp> {
- match arg {
- Value::Timestamp(ts) => Some(*ts),
- Value::Int64(i) => Some(Timestamp::new_millisecond(*i)),
- _ => None,
- }
-}
-
-impl fmt::Display for ToTimezoneFunction {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "TO_TIMEZONE")
- }
-}
-
-impl Function for ToTimezoneFunction {
- fn name(&self) -> &str {
- NAME
- }
-
- fn return_type(&self, input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
- // type checked by signature - MUST BE timestamp
- Ok(input_types[0].clone())
- }
-
- fn signature(&self) -> Signature {
- helper::one_of_sigs2(
- vec![
- ConcreteDataType::int32_datatype(),
- ConcreteDataType::int64_datatype(),
- ConcreteDataType::timestamp_second_datatype(),
- ConcreteDataType::timestamp_millisecond_datatype(),
- ConcreteDataType::timestamp_microsecond_datatype(),
- ConcreteDataType::timestamp_nanosecond_datatype(),
- ],
- vec![ConcreteDataType::string_datatype()],
- )
- }
-
- fn eval(&self, _ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 2,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect exactly 2, have: {}",
- columns.len()
- ),
- }
- );
-
- let array = columns[0].to_arrow_array();
- let times = match columns[0].data_type() {
- ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
- let vector = Int64Vector::try_from_arrow_array(array).unwrap();
- (0..vector.len())
- .map(|i| convert_to_timestamp(&vector.get(i)))
- .collect::<Vec<_>>()
- }
- ConcreteDataType::Timestamp(ts) => match ts {
- TimestampType::Second(_) => {
- let vector = TimestampSecondVector::try_from_arrow_array(array).unwrap();
- (0..vector.len())
- .map(|i| convert_to_timestamp(&vector.get(i)))
- .collect::<Vec<_>>()
- }
- TimestampType::Millisecond(_) => {
- let vector = TimestampMillisecondVector::try_from_arrow_array(array).unwrap();
- (0..vector.len())
- .map(|i| convert_to_timestamp(&vector.get(i)))
- .collect::<Vec<_>>()
- }
- TimestampType::Microsecond(_) => {
- let vector = TimestampMicrosecondVector::try_from_arrow_array(array).unwrap();
- (0..vector.len())
- .map(|i| convert_to_timestamp(&vector.get(i)))
- .collect::<Vec<_>>()
- }
- TimestampType::Nanosecond(_) => {
- let vector = TimestampNanosecondVector::try_from_arrow_array(array).unwrap();
- (0..vector.len())
- .map(|i| convert_to_timestamp(&vector.get(i)))
- .collect::<Vec<_>>()
- }
- },
- _ => UnsupportedInputDataTypeSnafu {
- function: NAME,
- datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
- }
- .fail()?,
- };
-
- let tzs = {
- let array = columns[1].to_arrow_array();
- let vector = StringVector::try_from_arrow_array(&array)
- .ok()
- .with_context(|| UnsupportedInputDataTypeSnafu {
- function: NAME,
- datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
- })?;
- (0..vector.len())
- .map(|i| convert_to_timezone(&vector.get(i).to_string()))
- .collect::<Vec<_>>()
- };
-
- let result = times
- .iter()
- .zip(tzs.iter())
- .map(|(time, tz)| match (time, tz) {
- (Some(time), _) => Some(time.to_timezone_aware_string(tz.as_ref())),
- _ => None,
- })
- .collect::<Vec<Option<String>>>();
- Ok(Arc::new(StringVector::from(result)))
- }
-}
-
-#[cfg(test)]
-mod tests {
-
- use datatypes::scalars::ScalarVector;
- use datatypes::timestamp::{
- TimestampMicrosecond, TimestampMillisecond, TimestampNanosecond, TimestampSecond,
- };
- use datatypes::vectors::{Int64Vector, StringVector};
-
- use super::*;
-
- #[test]
- fn test_timestamp_to_timezone() {
- let f = ToTimezoneFunction;
- assert_eq!("to_timezone", f.name());
-
- let results = vec![
- Some("1969-12-31 19:00:01"),
- None,
- Some("1970-01-01 03:00:01"),
- None,
- ];
- let times: Vec<Option<TimestampSecond>> = vec![
- Some(TimestampSecond::new(1)),
- None,
- Some(TimestampSecond::new(1)),
- None,
- ];
- let ts_vector: TimestampSecondVector =
- TimestampSecondVector::from_owned_iterator(times.into_iter());
- let tzs = vec![Some("America/New_York"), None, Some("Europe/Moscow"), None];
- let args: Vec<VectorRef> = vec![
- Arc::new(ts_vector),
- Arc::new(StringVector::from(tzs.clone())),
- ];
- let vector = f.eval(FunctionContext::default(), &args).unwrap();
- assert_eq!(4, vector.len());
- let expect_times: VectorRef = Arc::new(StringVector::from(results));
- assert_eq!(expect_times, vector);
-
- let results = vec![
- Some("1969-12-31 19:00:00.001"),
- None,
- Some("1970-01-01 03:00:00.001"),
- None,
- ];
- let times: Vec<Option<TimestampMillisecond>> = vec![
- Some(TimestampMillisecond::new(1)),
- None,
- Some(TimestampMillisecond::new(1)),
- None,
- ];
- let ts_vector: TimestampMillisecondVector =
- TimestampMillisecondVector::from_owned_iterator(times.into_iter());
- let args: Vec<VectorRef> = vec![
- Arc::new(ts_vector),
- Arc::new(StringVector::from(tzs.clone())),
- ];
- let vector = f.eval(FunctionContext::default(), &args).unwrap();
- assert_eq!(4, vector.len());
- let expect_times: VectorRef = Arc::new(StringVector::from(results));
- assert_eq!(expect_times, vector);
-
- let results = vec![
- Some("1969-12-31 19:00:00.000001"),
- None,
- Some("1970-01-01 03:00:00.000001"),
- None,
- ];
- let times: Vec<Option<TimestampMicrosecond>> = vec![
- Some(TimestampMicrosecond::new(1)),
- None,
- Some(TimestampMicrosecond::new(1)),
- None,
- ];
- let ts_vector: TimestampMicrosecondVector =
- TimestampMicrosecondVector::from_owned_iterator(times.into_iter());
-
- let args: Vec<VectorRef> = vec![
- Arc::new(ts_vector),
- Arc::new(StringVector::from(tzs.clone())),
- ];
- let vector = f.eval(FunctionContext::default(), &args).unwrap();
- assert_eq!(4, vector.len());
- let expect_times: VectorRef = Arc::new(StringVector::from(results));
- assert_eq!(expect_times, vector);
-
- let results = vec![
- Some("1969-12-31 19:00:00.000000001"),
- None,
- Some("1970-01-01 03:00:00.000000001"),
- None,
- ];
- let times: Vec<Option<TimestampNanosecond>> = vec![
- Some(TimestampNanosecond::new(1)),
- None,
- Some(TimestampNanosecond::new(1)),
- None,
- ];
- let ts_vector: TimestampNanosecondVector =
- TimestampNanosecondVector::from_owned_iterator(times.into_iter());
-
- let args: Vec<VectorRef> = vec![
- Arc::new(ts_vector),
- Arc::new(StringVector::from(tzs.clone())),
- ];
- let vector = f.eval(FunctionContext::default(), &args).unwrap();
- assert_eq!(4, vector.len());
- let expect_times: VectorRef = Arc::new(StringVector::from(results));
- assert_eq!(expect_times, vector);
- }
-
- #[test]
- fn test_numerical_to_timezone() {
- let f = ToTimezoneFunction;
- let results = vec![
- Some("1969-12-31 19:00:00.001"),
- None,
- Some("1970-01-01 03:00:00.001"),
- None,
- Some("2024-03-26 23:01:50"),
- None,
- Some("2024-03-27 06:02:00"),
- None,
- ];
- let times: Vec<Option<i64>> = vec![
- Some(1),
- None,
- Some(1),
- None,
- Some(1711508510000),
- None,
- Some(1711508520000),
- None,
- ];
- let ts_vector: Int64Vector = Int64Vector::from_owned_iterator(times.into_iter());
- let tzs = vec![
- Some("America/New_York"),
- None,
- Some("Europe/Moscow"),
- None,
- Some("America/New_York"),
- None,
- Some("Europe/Moscow"),
- None,
- ];
- let args: Vec<VectorRef> = vec![
- Arc::new(ts_vector),
- Arc::new(StringVector::from(tzs.clone())),
- ];
- let vector = f.eval(FunctionContext::default(), &args).unwrap();
- assert_eq!(8, vector.len());
- let expect_times: VectorRef = Arc::new(StringVector::from(results));
- assert_eq!(expect_times, vector);
- }
-}
diff --git a/tests/cases/standalone/common/function/time.result b/tests/cases/standalone/common/function/time.result
index 83589ced352a..123b6a3f2f7c 100644
--- a/tests/cases/standalone/common/function/time.result
+++ b/tests/cases/standalone/common/function/time.result
@@ -20,51 +20,3 @@ select GREATEST('2000-02-11'::Date, '2020-12-30'::Date);
| 2020-12-30 |
+-------------------------------------------------+
-select to_timezone('2022-09-20T14:16:43.012345+08:00', 'Europe/Berlin');
-
-+-----------------------------------------------------------------------------+
-| to_timezone(Utf8("2022-09-20T14:16:43.012345+08:00"),Utf8("Europe/Berlin")) |
-+-----------------------------------------------------------------------------+
-| 2022-09-20 08:16:43.012345 |
-+-----------------------------------------------------------------------------+
-
-select to_timezone('2022-09-20T14:16:43.012345+08:00'::Timestamp, 'Europe/Berlin');
-
-+------------------------------------------------------------------------------------------------------------------------------+
-| to_timezone(arrow_cast(Utf8("2022-09-20T14:16:43.012345+08:00"),Utf8("Timestamp(Millisecond, None)")),Utf8("Europe/Berlin")) |
-+------------------------------------------------------------------------------------------------------------------------------+
-| 2022-09-20 08:16:43.012 |
-+------------------------------------------------------------------------------------------------------------------------------+
-
-select to_timezone('2024-03-29T14:16:43.012345Z', 'Asia/Shanghai');
-
-+------------------------------------------------------------------------+
-| to_timezone(Utf8("2024-03-29T14:16:43.012345Z"),Utf8("Asia/Shanghai")) |
-+------------------------------------------------------------------------+
-| 2024-03-29 22:16:43.012345 |
-+------------------------------------------------------------------------+
-
-select to_timezone('2024-03-29T14:16:43.012345Z'::Timestamp, 'Asia/Shanghai');
-
-+-------------------------------------------------------------------------------------------------------------------------+
-| to_timezone(arrow_cast(Utf8("2024-03-29T14:16:43.012345Z"),Utf8("Timestamp(Millisecond, None)")),Utf8("Asia/Shanghai")) |
-+-------------------------------------------------------------------------------------------------------------------------+
-| 2024-03-29 22:16:43.012 |
-+-------------------------------------------------------------------------------------------------------------------------+
-
-select to_timezone(1709992225, 'Asia/Shanghai');
-
-+------------------------------------------------------+
-| to_timezone(Int64(1709992225),Utf8("Asia/Shanghai")) |
-+------------------------------------------------------+
-| 1970-01-21 02:59:52.225 |
-+------------------------------------------------------+
-
-select to_timezone(1711508510000::INT64, 'Asia/Shanghai');
-
-+-----------------------------------------------------------------------------------+
-| to_timezone(arrow_cast(Int64(1711508510000),Utf8("Int64")),Utf8("Asia/Shanghai")) |
-+-----------------------------------------------------------------------------------+
-| 2024-03-27 11:01:50 |
-+-----------------------------------------------------------------------------------+
-
diff --git a/tests/cases/standalone/common/function/time.sql b/tests/cases/standalone/common/function/time.sql
index 678bb48af329..46d5c2347fd5 100644
--- a/tests/cases/standalone/common/function/time.sql
+++ b/tests/cases/standalone/common/function/time.sql
@@ -3,13 +3,5 @@
select current_time();
select GREATEST('1999-01-30', '2023-03-01');
-select GREATEST('2000-02-11'::Date, '2020-12-30'::Date);
-
-select to_timezone('2022-09-20T14:16:43.012345+08:00', 'Europe/Berlin');
-select to_timezone('2022-09-20T14:16:43.012345+08:00'::Timestamp, 'Europe/Berlin');
-select to_timezone('2024-03-29T14:16:43.012345Z', 'Asia/Shanghai');
-select to_timezone('2024-03-29T14:16:43.012345Z'::Timestamp, 'Asia/Shanghai');
-select to_timezone(1709992225, 'Asia/Shanghai');
-
-select to_timezone(1711508510000::INT64, 'Asia/Shanghai');
+select GREATEST('2000-02-11'::Date, '2020-12-30'::Date);
|
fix
|
remove to_timezone function (#4439)
|
6c4b8b63a5b47b2dd9eeb69feeb2fc691416eab1
|
2024-08-01 12:45:36
|
Yingwen
|
fix: notify flush receiver after write buffer is released (#4476)
| false
|
diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs
index 0bfd01dd4fd9..05561b6080ff 100644
--- a/src/mito2/src/flush.rs
+++ b/src/mito2/src/flush.rs
@@ -23,7 +23,7 @@ use smallvec::SmallVec;
use snafu::ResultExt;
use store_api::storage::RegionId;
use strum::IntoStaticStr;
-use tokio::sync::mpsc;
+use tokio::sync::{mpsc, watch};
use crate::access_layer::{AccessLayerRef, OperationType, SstWriteRequest};
use crate::cache::CacheManagerRef;
@@ -88,6 +88,9 @@ pub struct WriteBufferManagerImpl {
memory_used: AtomicUsize,
/// Memory that hasn't been scheduled to free (e.g. used by mutable memtables).
memory_active: AtomicUsize,
+ /// Optional notifier.
+ /// The manager can wake up the worker once we free the write buffer.
+ notifier: Option<watch::Sender<()>>,
}
impl WriteBufferManagerImpl {
@@ -98,9 +101,16 @@ impl WriteBufferManagerImpl {
mutable_limit: Self::get_mutable_limit(global_write_buffer_size),
memory_used: AtomicUsize::new(0),
memory_active: AtomicUsize::new(0),
+ notifier: None,
}
}
+ /// Attaches a notifier to the manager.
+ pub fn with_notifier(mut self, notifier: watch::Sender<()>) -> Self {
+ self.notifier = Some(notifier);
+ self
+ }
+
/// Returns memory usage of mutable memtables.
pub fn mutable_usage(&self) -> usize {
self.memory_active.load(Ordering::Relaxed)
@@ -159,6 +169,12 @@ impl WriteBufferManager for WriteBufferManagerImpl {
fn free_mem(&self, mem: usize) {
self.memory_used.fetch_sub(mem, Ordering::Relaxed);
+ if let Some(notifier) = &self.notifier {
+ // Notifies the worker after the memory usage is decreased. When we drop the memtable
+ // outside of the worker, the worker may still stall requests because the memory usage
+ // is not updated. So we need to notify the worker to handle stalled requests again.
+ let _ = notifier.send(());
+ }
}
fn memory_usage(&self) -> usize {
@@ -786,6 +802,18 @@ mod tests {
assert!(manager.should_flush_engine());
}
+ #[test]
+ fn test_manager_notify() {
+ let (sender, receiver) = watch::channel(());
+ let manager = WriteBufferManagerImpl::new(1000).with_notifier(sender);
+ manager.reserve_mem(500);
+ assert!(!receiver.has_changed().unwrap());
+ manager.schedule_free_mem(500);
+ assert!(!receiver.has_changed().unwrap());
+ manager.free_mem(500);
+ assert!(receiver.has_changed().unwrap());
+ }
+
#[tokio::test]
async fn test_schedule_empty() {
let env = SchedulerEnv::new().await;
diff --git a/src/mito2/src/metrics.rs b/src/mito2/src/metrics.rs
index b9ef03f1c1d7..963330948955 100644
--- a/src/mito2/src/metrics.rs
+++ b/src/mito2/src/metrics.rs
@@ -34,9 +34,13 @@ lazy_static! {
/// Global memtable dictionary size in bytes.
pub static ref MEMTABLE_DICT_BYTES: IntGauge =
register_int_gauge!("greptime_mito_memtable_dict_bytes", "mito memtable dictionary size in bytes").unwrap();
- /// Gauge for open regions
- pub static ref REGION_COUNT: IntGauge =
- register_int_gauge!("greptime_mito_region_count", "mito region count").unwrap();
+ /// Gauge for open regions in each worker.
+ pub static ref REGION_COUNT: IntGaugeVec =
+ register_int_gauge_vec!(
+ "greptime_mito_region_count",
+ "mito region count in each worker",
+ &[WORKER_LABEL],
+ ).unwrap();
/// Elapsed time to handle requests.
pub static ref HANDLE_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
"greptime_mito_handle_request_elapsed",
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index c8aa1bb340fb..82b48bcebb29 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -51,7 +51,7 @@ use crate::config::MitoConfig;
use crate::error::{JoinSnafu, Result, WorkerStoppedSnafu};
use crate::flush::{FlushScheduler, WriteBufferManagerImpl, WriteBufferManagerRef};
use crate::memtable::MemtableBuilderProvider;
-use crate::metrics::WRITE_STALL_TOTAL;
+use crate::metrics::{REGION_COUNT, WRITE_STALL_TOTAL};
use crate::region::{MitoRegionRef, OpeningRegions, OpeningRegionsRef, RegionMap, RegionMapRef};
use crate::request::{
BackgroundNotify, DdlRequest, SenderDdlRequest, SenderWriteRequest, WorkerRequest,
@@ -130,9 +130,11 @@ impl WorkerGroup {
object_store_manager: ObjectStoreManagerRef,
plugins: Plugins,
) -> Result<WorkerGroup> {
- let write_buffer_manager = Arc::new(WriteBufferManagerImpl::new(
- config.global_write_buffer_size.as_bytes() as usize,
- ));
+ let (flush_sender, flush_receiver) = watch::channel(());
+ let write_buffer_manager = Arc::new(
+ WriteBufferManagerImpl::new(config.global_write_buffer_size.as_bytes() as usize)
+ .with_notifier(flush_sender.clone()),
+ );
let puffin_manager_factory = PuffinManagerFactory::new(
&config.index.aux_path,
config.index.staging_size.as_bytes(),
@@ -165,7 +167,6 @@ impl WorkerGroup {
.build(),
);
let time_provider = Arc::new(StdTimeProvider);
- let (flush_sender, flush_receiver) = watch::channel(());
let workers = (0..config.num_workers)
.map(|id| {
@@ -265,10 +266,12 @@ impl WorkerGroup {
listener: Option<crate::engine::listener::EventListenerRef>,
time_provider: TimeProviderRef,
) -> Result<WorkerGroup> {
+ let (flush_sender, flush_receiver) = watch::channel(());
let write_buffer_manager = write_buffer_manager.unwrap_or_else(|| {
- Arc::new(WriteBufferManagerImpl::new(
- config.global_write_buffer_size.as_bytes() as usize,
- ))
+ Arc::new(
+ WriteBufferManagerImpl::new(config.global_write_buffer_size.as_bytes() as usize)
+ .with_notifier(flush_sender.clone()),
+ )
});
let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
let purge_scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
@@ -297,7 +300,6 @@ impl WorkerGroup {
.write_cache(write_cache)
.build(),
);
- let (flush_sender, flush_receiver) = watch::channel(());
let workers = (0..config.num_workers)
.map(|id| {
WorkerStarter {
@@ -401,6 +403,7 @@ impl<S: LogStore> WorkerStarter<S> {
let running = Arc::new(AtomicBool::new(true));
let now = self.time_provider.current_time_millis();
+ let id_string = self.id.to_string();
let mut worker_thread = RegionWorkerLoop {
id: self.id,
config: self.config.clone(),
@@ -436,7 +439,8 @@ impl<S: LogStore> WorkerStarter<S> {
last_periodical_check_millis: now,
flush_sender: self.flush_sender,
flush_receiver: self.flush_receiver,
- stalled_count: WRITE_STALL_TOTAL.with_label_values(&[&self.id.to_string()]),
+ stalled_count: WRITE_STALL_TOTAL.with_label_values(&[&id_string]),
+ region_count: REGION_COUNT.with_label_values(&[&id_string]),
};
let handle = common_runtime::spawn_global(async move {
worker_thread.run().await;
@@ -623,6 +627,8 @@ struct RegionWorkerLoop<S> {
flush_receiver: watch::Receiver<()>,
/// Gauge of stalled request count.
stalled_count: IntGauge,
+ /// Gauge of regions in the worker.
+ region_count: IntGauge,
}
impl<S: LogStore> RegionWorkerLoop<S> {
diff --git a/src/mito2/src/worker/handle_close.rs b/src/mito2/src/worker/handle_close.rs
index 26a6f9a34dde..8e33fcb1eb9d 100644
--- a/src/mito2/src/worker/handle_close.rs
+++ b/src/mito2/src/worker/handle_close.rs
@@ -19,7 +19,6 @@ use store_api::region_request::AffectedRows;
use store_api::storage::RegionId;
use crate::error::Result;
-use crate::metrics::REGION_COUNT;
use crate::worker::RegionWorkerLoop;
impl<S> RegionWorkerLoop<S> {
@@ -31,7 +30,7 @@ impl<S> RegionWorkerLoop<S> {
return Ok(0);
};
- info!("Try to close region {}", region_id);
+ info!("Try to close region {}, worker: {}", region_id, self.id);
region.stop().await;
self.regions.remove_region(region_id);
@@ -40,9 +39,9 @@ impl<S> RegionWorkerLoop<S> {
// Clean compaction status.
self.compaction_scheduler.on_region_closed(region_id);
- info!("Region {} closed", region_id);
+ info!("Region {} closed, worker: {}", region_id, self.id);
- REGION_COUNT.dec();
+ self.region_count.dec();
Ok(0)
}
diff --git a/src/mito2/src/worker/handle_create.rs b/src/mito2/src/worker/handle_create.rs
index e99c0a810237..863435c7eabe 100644
--- a/src/mito2/src/worker/handle_create.rs
+++ b/src/mito2/src/worker/handle_create.rs
@@ -24,7 +24,6 @@ use store_api::region_request::{AffectedRows, RegionCreateRequest};
use store_api::storage::RegionId;
use crate::error::{InvalidMetadataSnafu, Result};
-use crate::metrics::REGION_COUNT;
use crate::region::opener::{check_recovered_region, RegionOpener};
use crate::worker::RegionWorkerLoop;
@@ -70,9 +69,13 @@ impl<S: LogStore> RegionWorkerLoop<S> {
.create_or_open(&self.config, &self.wal)
.await?;
- info!("A new region created, region: {:?}", region.metadata());
+ info!(
+ "A new region created, worker: {}, region: {:?}",
+ self.id,
+ region.metadata()
+ );
- REGION_COUNT.inc();
+ self.region_count.inc();
// Insert the MitoRegion into the RegionMap.
self.regions.insert_region(Arc::new(region));
diff --git a/src/mito2/src/worker/handle_drop.rs b/src/mito2/src/worker/handle_drop.rs
index ca1466249759..51b42acb406f 100644
--- a/src/mito2/src/worker/handle_drop.rs
+++ b/src/mito2/src/worker/handle_drop.rs
@@ -28,7 +28,6 @@ use store_api::storage::RegionId;
use tokio::time::sleep;
use crate::error::{OpenDalSnafu, Result};
-use crate::metrics::REGION_COUNT;
use crate::region::{RegionMapRef, RegionState};
use crate::worker::{RegionWorkerLoop, DROPPING_MARKER_FILE};
@@ -45,7 +44,7 @@ where
) -> Result<AffectedRows> {
let region = self.regions.writable_region(region_id)?;
- info!("Try to drop region: {}", region_id);
+ info!("Try to drop region: {}, worker: {}", region_id, self.id);
// Marks the region as dropping.
region.set_dropping()?;
@@ -93,7 +92,7 @@ where
region_id
);
- REGION_COUNT.dec();
+ self.region_count.dec();
// Detaches a background task to delete the region dir
let region_dir = region.access_layer.region_dir().to_owned();
diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs
index 7fe1d3c322a1..fa4f48704009 100644
--- a/src/mito2/src/worker/handle_open.rs
+++ b/src/mito2/src/worker/handle_open.rs
@@ -26,7 +26,6 @@ use store_api::storage::RegionId;
use crate::error::{
ObjectStoreNotFoundSnafu, OpenDalSnafu, OpenRegionSnafu, RegionNotFoundSnafu, Result,
};
-use crate::metrics::REGION_COUNT;
use crate::region::opener::RegionOpener;
use crate::request::OptionOutputTx;
use crate::wal::entry_distributor::WalEntryReceiver;
@@ -56,7 +55,10 @@ impl<S: LogStore> RegionWorkerLoop<S> {
.context(OpenDalSnafu)?
{
let result = remove_region_dir_once(&request.region_dir, object_store).await;
- info!("Region {} is dropped, result: {:?}", region_id, result);
+ info!(
+ "Region {} is dropped, worker: {}, result: {:?}",
+ region_id, self.id, result
+ );
return RegionNotFoundSnafu { region_id }.fail();
}
@@ -84,7 +86,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
sender.send(Err(err));
return;
}
- info!("Try to open region {}", region_id);
+ info!("Try to open region {}, worker: {}", region_id, self.id);
// Open region from specific region dir.
let opener = match RegionOpener::new(
@@ -112,12 +114,14 @@ impl<S: LogStore> RegionWorkerLoop<S> {
let wal = self.wal.clone();
let config = self.config.clone();
let opening_regions = self.opening_regions.clone();
+ let region_count = self.region_count.clone();
+ let worker_id = self.id;
opening_regions.insert_sender(region_id, sender);
common_runtime::spawn_global(async move {
match opener.open(&config, &wal).await {
Ok(region) => {
- info!("Region {} is opened", region_id);
- REGION_COUNT.inc();
+ info!("Region {} is opened, worker: {}", region_id, worker_id);
+ region_count.inc();
// Insert the Region into the RegionMap.
regions.insert_region(Arc::new(region));
|
fix
|
notify flush receiver after write buffer is released (#4476)
|
ccd666aa9b71aba752c7d21fba447848b1604335
|
2023-06-02 10:46:59
|
Lei, HUANG
|
fix: avoid writing manifest and wal if no files are actually flushed (#1698)
| false
|
diff --git a/src/storage/src/compaction/task.rs b/src/storage/src/compaction/task.rs
index b37b3ff9bf50..233cf1a2b71a 100644
--- a/src/storage/src/compaction/task.rs
+++ b/src/storage/src/compaction/task.rs
@@ -16,7 +16,7 @@ use std::collections::HashSet;
use std::fmt::{Debug, Formatter};
use common_base::readable_size::ReadableSize;
-use common_telemetry::{debug, error, timer};
+use common_telemetry::{debug, error, info, timer};
use store_api::logstore::LogStore;
use store_api::storage::RegionId;
@@ -148,6 +148,10 @@ impl<S: LogStore> CompactionTask for CompactionTaskImpl<S> {
e
})?;
compacted.extend(self.expired_ssts.iter().map(FileHandle::meta));
+
+ let input_ids = compacted.iter().map(|f| f.file_id).collect::<Vec<_>>();
+ let output_ids = output.iter().map(|f| f.file_id).collect::<Vec<_>>();
+ info!("Compacting SST files, input: {input_ids:?}, output: {output_ids:?}");
self.write_manifest_and_apply(output, compacted)
.await
.map_err(|e| {
diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs
index 6501537c62f0..a5bd8f91498b 100644
--- a/src/storage/src/flush.rs
+++ b/src/storage/src/flush.rs
@@ -239,6 +239,10 @@ impl<S: LogStore> FlushJob<S> {
let _timer = timer!(FLUSH_ELAPSED);
let file_metas = self.write_memtables_to_layer().await?;
+ if file_metas.is_empty() {
+ // skip writing manifest and wal if no files are flushed.
+ return Ok(());
+ }
self.write_manifest_and_apply(&file_metas).await?;
Ok(())
@@ -287,13 +291,14 @@ impl<S: LogStore> FlushJob<S> {
});
}
- let metas = futures_util::future::try_join_all(futures)
+ let metas: Vec<_> = futures_util::future::try_join_all(futures)
.await?
.into_iter()
.flatten()
.collect();
- logging::info!("Successfully flush memtables to files: {:?}", metas);
+ let file_ids = metas.iter().map(|f| f.file_id).collect::<Vec<_>>();
+ logging::info!("Successfully flush memtables, region:{region_id}, files: {file_ids:?}");
Ok(metas)
}
|
fix
|
avoid writing manifest and wal if no files are actually flushed (#1698)
|
05f21679d6060987642a35d40b53624d7da3f722
|
2025-01-23 14:14:17
|
Weny Xu
|
feat: replace `DensePrimaryKeyCodec` with `Arc<dyn PrimaryKeyCodec>` (#5408)
| false
|
diff --git a/src/mito2/src/memtable/bulk/context.rs b/src/mito2/src/memtable/bulk/context.rs
index a3c019ebec40..0380afd7e086 100644
--- a/src/mito2/src/memtable/bulk/context.rs
+++ b/src/mito2/src/memtable/bulk/context.rs
@@ -22,7 +22,7 @@ use store_api::metadata::RegionMetadataRef;
use store_api::storage::ColumnId;
use table::predicate::Predicate;
-use crate::row_converter::DensePrimaryKeyCodec;
+use crate::row_converter::{build_primary_key_codec, DensePrimaryKeyCodec};
use crate::sst::parquet::file_range::RangeBase;
use crate::sst::parquet::format::ReadFormat;
use crate::sst::parquet::reader::SimpleFilterContext;
@@ -41,7 +41,7 @@ impl BulkIterContext {
projection: &Option<&[ColumnId]>,
predicate: Option<Predicate>,
) -> Self {
- let codec = DensePrimaryKeyCodec::new(®ion_metadata);
+ let codec = build_primary_key_codec(®ion_metadata);
let simple_filters = predicate
.as_ref()
diff --git a/src/mito2/src/memtable/bulk/part.rs b/src/mito2/src/memtable/bulk/part.rs
index 6c132ce64458..07f5fda5295e 100644
--- a/src/mito2/src/memtable/bulk/part.rs
+++ b/src/mito2/src/memtable/bulk/part.rs
@@ -562,7 +562,7 @@ mod tests {
let batch_values = batches
.into_iter()
.map(|b| {
- let pk_values = pk_encoder.decode_dense(b.primary_key()).unwrap();
+ let pk_values = pk_encoder.decode(b.primary_key()).unwrap().into_dense();
let timestamps = b
.timestamps()
.as_any()
diff --git a/src/mito2/src/memtable/partition_tree.rs b/src/mito2/src/memtable/partition_tree.rs
index 4bcd432d414a..78a8b7d847d9 100644
--- a/src/mito2/src/memtable/partition_tree.rs
+++ b/src/mito2/src/memtable/partition_tree.rs
@@ -31,9 +31,8 @@ use std::sync::atomic::{AtomicI64, AtomicU64, AtomicUsize, Ordering};
use std::sync::Arc;
use common_base::readable_size::ReadableSize;
-pub(crate) use primary_key_filter::DensePrimaryKeyFilter;
+pub(crate) use primary_key_filter::{DensePrimaryKeyFilter, SparsePrimaryKeyFilter};
use serde::{Deserialize, Serialize};
-use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::RegionMetadataRef;
use store_api::storage::{ColumnId, SequenceNumber};
use table::predicate::Predicate;
@@ -48,7 +47,7 @@ use crate::memtable::{
MemtableId, MemtableRange, MemtableRangeContext, MemtableRanges, MemtableRef, MemtableStats,
};
use crate::region::options::MergeMode;
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodec};
+use crate::row_converter::{build_primary_key_codec, PrimaryKeyCodec};
/// Use `1/DICTIONARY_SIZE_FACTOR` of OS memory as dictionary size.
pub(crate) const DICTIONARY_SIZE_FACTOR: u64 = 8;
@@ -330,22 +329,14 @@ impl PartitionTreeMemtableBuilder {
impl MemtableBuilder for PartitionTreeMemtableBuilder {
fn build(&self, id: MemtableId, metadata: &RegionMetadataRef) -> MemtableRef {
- match metadata.primary_key_encoding {
- PrimaryKeyEncoding::Dense => {
- let codec = Arc::new(DensePrimaryKeyCodec::new(metadata));
- Arc::new(PartitionTreeMemtable::new(
- id,
- codec,
- metadata.clone(),
- self.write_buffer_manager.clone(),
- &self.config,
- ))
- }
- PrimaryKeyEncoding::Sparse => {
- //TODO(weny): Implement sparse primary key encoding.
- todo!()
- }
- }
+ let codec = build_primary_key_codec(metadata);
+ Arc::new(PartitionTreeMemtable::new(
+ id,
+ codec,
+ metadata.clone(),
+ self.write_buffer_manager.clone(),
+ &self.config,
+ ))
}
}
@@ -382,7 +373,7 @@ mod tests {
use store_api::storage::RegionId;
use super::*;
- use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt};
+ use crate::row_converter::DensePrimaryKeyCodec;
use crate::test_util::memtable_util::{
self, collect_iter_timestamps, region_metadata_to_row_schema,
};
@@ -794,7 +785,7 @@ mod tests {
let mut reader = new_memtable.iter(None, None, None).unwrap();
let batch = reader.next().unwrap().unwrap();
- let pk = codec.decode(batch.primary_key()).unwrap();
+ let pk = codec.decode(batch.primary_key()).unwrap().into_dense();
if let Value::String(s) = &pk[2] {
assert_eq!("10min", s.as_utf8());
} else {
diff --git a/src/mito2/src/memtable/partition_tree/tree.rs b/src/mito2/src/memtable/partition_tree/tree.rs
index d02b13ddb47a..4645ca7ab953 100644
--- a/src/mito2/src/memtable/partition_tree/tree.rs
+++ b/src/mito2/src/memtable/partition_tree/tree.rs
@@ -96,6 +96,21 @@ impl PartitionTree {
}
}
+ fn verify_primary_key_length(&self, kv: &KeyValue) -> Result<()> {
+ // The sparse primary key codec does not have a fixed number of fields.
+ if let Some(expected_num_fields) = self.row_codec.num_fields() {
+ ensure!(
+ expected_num_fields == kv.num_primary_keys(),
+ PrimaryKeyLengthMismatchSnafu {
+ expect: expected_num_fields,
+ actual: kv.num_primary_keys(),
+ }
+ );
+ }
+ // TODO(weny): verify the primary key length for sparse primary key codec.
+ Ok(())
+ }
+
// TODO(yingwen): The size computed from values is inaccurate.
/// Write key-values into the tree.
///
@@ -110,13 +125,7 @@ impl PartitionTree {
let has_pk = !self.metadata.primary_key.is_empty();
for kv in kvs.iter() {
- ensure!(
- kv.num_primary_keys() == self.row_codec.num_fields(),
- PrimaryKeyLengthMismatchSnafu {
- expect: self.row_codec.num_fields(),
- actual: kv.num_primary_keys(),
- }
- );
+ self.verify_primary_key_length(&kv)?;
// Safety: timestamp of kv must be both present and a valid timestamp value.
let ts = kv.timestamp().as_timestamp().unwrap().unwrap().value();
metrics.min_ts = metrics.min_ts.min(ts);
@@ -161,13 +170,7 @@ impl PartitionTree {
) -> Result<()> {
let has_pk = !self.metadata.primary_key.is_empty();
- ensure!(
- kv.num_primary_keys() == self.row_codec.num_fields(),
- PrimaryKeyLengthMismatchSnafu {
- expect: self.row_codec.num_fields(),
- actual: kv.num_primary_keys(),
- }
- );
+ self.verify_primary_key_length(&kv)?;
// Safety: timestamp of kv must be both present and a valid timestamp value.
let ts = kv.timestamp().as_timestamp().unwrap().unwrap().value();
metrics.min_ts = metrics.min_ts.min(ts);
diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs
index a7c41648f39f..88fa058c6b59 100644
--- a/src/mito2/src/memtable/time_series.rs
+++ b/src/mito2/src/memtable/time_series.rs
@@ -51,7 +51,7 @@ use crate::metrics::{READ_ROWS_TOTAL, READ_STAGE_ELAPSED};
use crate::read::dedup::LastNonNullIter;
use crate::read::{Batch, BatchBuilder, BatchColumn};
use crate::region::options::MergeMode;
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodec, PrimaryKeyCodecExt};
+use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt};
/// Initial vector builder capacity.
const INITIAL_BUILDER_CAPACITY: usize = 0;
@@ -146,12 +146,13 @@ impl TimeSeriesMemtable {
fn write_key_value(&self, kv: KeyValue, stats: &mut WriteMetrics) -> Result<()> {
ensure!(
- kv.num_primary_keys() == self.row_codec.num_fields(),
+ self.row_codec.num_fields() == kv.num_primary_keys(),
PrimaryKeyLengthMismatchSnafu {
expect: self.row_codec.num_fields(),
- actual: kv.num_primary_keys()
+ actual: kv.num_primary_keys(),
}
);
+
let primary_key_encoded = self.row_codec.encode(kv.primary_keys())?;
let fields = kv.fields().collect::<Vec<_>>();
@@ -585,7 +586,7 @@ fn prune_primary_key(
let pk_values = if let Some(pk_values) = series.pk_cache.as_ref() {
pk_values
} else {
- let pk_values = codec.decode(pk);
+ let pk_values = codec.decode_dense_without_column_id(pk);
if let Err(e) = pk_values {
error!(e; "Failed to decode primary key");
return true;
@@ -1176,7 +1177,12 @@ mod tests {
let row_codec = Arc::new(DensePrimaryKeyCodec::with_fields(
schema
.primary_key_columns()
- .map(|c| SortField::new(c.column_schema.data_type.clone()))
+ .map(|c| {
+ (
+ c.column_id,
+ SortField::new(c.column_schema.data_type.clone()),
+ )
+ })
.collect(),
));
let set = Arc::new(SeriesSet::new(schema.clone(), row_codec));
diff --git a/src/mito2/src/read.rs b/src/mito2/src/read.rs
index 6001d3062491..2f9bdff7b036 100644
--- a/src/mito2/src/read.rs
+++ b/src/mito2/src/read.rs
@@ -40,7 +40,7 @@ use datatypes::arrow::compute::SortOptions;
use datatypes::arrow::row::{RowConverter, SortField};
use datatypes::prelude::{ConcreteDataType, DataType, ScalarVector};
use datatypes::types::TimestampType;
-use datatypes::value::{Value, ValueRef};
+use datatypes::value::ValueRef;
use datatypes::vectors::{
BooleanVector, Helper, TimestampMicrosecondVector, TimestampMillisecondVector,
TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, UInt8Vector,
@@ -58,6 +58,7 @@ use crate::error::{
use crate::memtable::BoxedBatchIterator;
use crate::metrics::{READ_BATCHES_RETURN, READ_ROWS_RETURN, READ_STAGE_ELAPSED};
use crate::read::prune::PruneReader;
+use crate::row_converter::CompositeValues;
/// Storage internal representation of a batch of rows for a primary key (time series).
///
@@ -68,7 +69,7 @@ pub struct Batch {
/// Primary key encoded in a comparable form.
primary_key: Vec<u8>,
/// Possibly decoded `primary_key` values. Some places would decode it in advance.
- pk_values: Option<Vec<Value>>,
+ pk_values: Option<CompositeValues>,
/// Timestamps of rows, should be sorted and not null.
timestamps: VectorRef,
/// Sequences of rows
@@ -114,12 +115,12 @@ impl Batch {
}
/// Returns possibly decoded primary-key values.
- pub fn pk_values(&self) -> Option<&[Value]> {
- self.pk_values.as_deref()
+ pub fn pk_values(&self) -> Option<&CompositeValues> {
+ self.pk_values.as_ref()
}
/// Sets possibly decoded primary-key values.
- pub fn set_pk_values(&mut self, pk_values: Vec<Value>) {
+ pub fn set_pk_values(&mut self, pk_values: CompositeValues) {
self.pk_values = Some(pk_values);
}
diff --git a/src/mito2/src/read/compat.rs b/src/mito2/src/read/compat.rs
index 1de5d624210c..c103bbaa9c94 100644
--- a/src/mito2/src/read/compat.rs
+++ b/src/mito2/src/read/compat.rs
@@ -15,6 +15,7 @@
//! Utilities to adapt readers with different schema.
use std::collections::HashMap;
+use std::sync::Arc;
use datatypes::data_type::ConcreteDataType;
use datatypes::value::Value;
@@ -26,7 +27,10 @@ use store_api::storage::ColumnId;
use crate::error::{CompatReaderSnafu, CreateDefaultSnafu, Result};
use crate::read::projection::ProjectionMapper;
use crate::read::{Batch, BatchColumn, BatchReader};
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodec, SortField};
+use crate::row_converter::{
+ build_primary_key_codec, build_primary_key_codec_with_fields, CompositeValues, PrimaryKeyCodec,
+ SortField,
+};
/// Reader to adapt schema of underlying reader to expected schema.
pub struct CompatReader<R> {
@@ -68,6 +72,8 @@ impl<R: BatchReader> BatchReader for CompatReader<R> {
/// A helper struct to adapt schema of the batch to an expected schema.
pub(crate) struct CompatBatch {
+ /// Optional primary key adapter.
+ rewrite_pk: Option<RewritePrimaryKey>,
/// Optional primary key adapter.
compat_pk: Option<CompatPrimaryKey>,
/// Optional fields adapter.
@@ -79,10 +85,12 @@ impl CompatBatch {
/// - `mapper` is built from the metadata users expect to see.
/// - `reader_meta` is the metadata of the input reader.
pub(crate) fn new(mapper: &ProjectionMapper, reader_meta: RegionMetadataRef) -> Result<Self> {
+ let rewrite_pk = may_rewrite_primary_key(mapper.metadata(), &reader_meta);
let compat_pk = may_compat_primary_key(mapper.metadata(), &reader_meta)?;
let compat_fields = may_compat_fields(mapper, &reader_meta)?;
Ok(Self {
+ rewrite_pk,
compat_pk,
compat_fields,
})
@@ -90,6 +98,9 @@ impl CompatBatch {
/// Adapts the `batch` to the expected schema.
pub(crate) fn compat_batch(&self, mut batch: Batch) -> Result<Batch> {
+ if let Some(rewrite_pk) = &self.rewrite_pk {
+ batch = rewrite_pk.compat(batch)?;
+ }
if let Some(compat_pk) = &self.compat_pk {
batch = compat_pk.compat(batch)?;
}
@@ -101,10 +112,15 @@ impl CompatBatch {
}
}
-/// Returns true if `left` and `right` have same columns to read.
-///
-/// It only consider column ids.
-pub(crate) fn has_same_columns(left: &RegionMetadata, right: &RegionMetadata) -> bool {
+/// Returns true if `left` and `right` have same columns and primary key encoding.
+pub(crate) fn has_same_columns_and_pk_encoding(
+ left: &RegionMetadata,
+ right: &RegionMetadata,
+) -> bool {
+ if left.primary_key_encoding != right.primary_key_encoding {
+ return false;
+ }
+
if left.column_metadatas.len() != right.column_metadatas.len() {
return false;
}
@@ -127,16 +143,17 @@ pub(crate) fn has_same_columns(left: &RegionMetadata, right: &RegionMetadata) ->
#[derive(Debug)]
struct CompatPrimaryKey {
/// Row converter to append values to primary keys.
- converter: DensePrimaryKeyCodec,
+ converter: Arc<dyn PrimaryKeyCodec>,
/// Default values to append.
- values: Vec<Value>,
+ values: Vec<(ColumnId, Value)>,
}
impl CompatPrimaryKey {
/// Make primary key of the `batch` compatible.
fn compat(&self, mut batch: Batch) -> Result<Batch> {
- let mut buffer =
- Vec::with_capacity(batch.primary_key().len() + self.converter.estimated_size());
+ let mut buffer = Vec::with_capacity(
+ batch.primary_key().len() + self.converter.estimated_size().unwrap_or_default(),
+ );
buffer.extend_from_slice(batch.primary_key());
self.converter.encode_values(&self.values, &mut buffer)?;
@@ -144,9 +161,7 @@ impl CompatPrimaryKey {
// update cache
if let Some(pk_values) = &mut batch.pk_values {
- for value in &self.values {
- pk_values.push(value.clone());
- }
+ pk_values.extend(&self.values);
}
Ok(batch)
@@ -211,6 +226,25 @@ impl CompatFields {
}
}
+fn may_rewrite_primary_key(
+ expect: &RegionMetadata,
+ actual: &RegionMetadata,
+) -> Option<RewritePrimaryKey> {
+ if expect.primary_key_encoding == actual.primary_key_encoding {
+ return None;
+ }
+
+ let fields = expect.primary_key.clone();
+ let original = build_primary_key_codec(actual);
+ let new = build_primary_key_codec(expect);
+
+ Some(RewritePrimaryKey {
+ original,
+ new,
+ fields,
+ })
+}
+
/// Creates a [CompatPrimaryKey] if needed.
fn may_compat_primary_key(
expect: &RegionMetadata,
@@ -248,7 +282,10 @@ fn may_compat_primary_key(
for column_id in to_add {
// Safety: The id comes from expect region metadata.
let column = expect.column_by_id(*column_id).unwrap();
- fields.push(SortField::new(column.column_schema.data_type.clone()));
+ fields.push((
+ *column_id,
+ SortField::new(column.column_schema.data_type.clone()),
+ ));
let default_value = column
.column_schema
.create_default()
@@ -263,9 +300,11 @@ fn may_compat_primary_key(
column.column_schema.name
),
})?;
- values.push(default_value);
+ values.push((*column_id, default_value));
}
- let converter = DensePrimaryKeyCodec::with_fields(fields);
+ // Using expect primary key encoding to build the converter
+ let converter =
+ build_primary_key_codec_with_fields(expect.primary_key_encoding, fields.into_iter());
Ok(Some(CompatPrimaryKey { converter, values }))
}
@@ -350,6 +389,53 @@ enum IndexOrDefault {
},
}
+/// Adapter to rewrite primary key.
+struct RewritePrimaryKey {
+ /// Original primary key codec.
+ original: Arc<dyn PrimaryKeyCodec>,
+ /// New primary key codec.
+ new: Arc<dyn PrimaryKeyCodec>,
+ /// Order of the fields in the new primary key.
+ fields: Vec<ColumnId>,
+}
+
+impl RewritePrimaryKey {
+ /// Make primary key of the `batch` compatible.
+ fn compat(&self, mut batch: Batch) -> Result<Batch> {
+ let values = if let Some(pk_values) = batch.pk_values() {
+ pk_values
+ } else {
+ let new_pk_values = self.original.decode(batch.primary_key())?;
+ batch.set_pk_values(new_pk_values);
+ // Safety: We ensure pk_values is not None.
+ batch.pk_values().as_ref().unwrap()
+ };
+
+ let mut buffer = Vec::with_capacity(
+ batch.primary_key().len() + self.new.estimated_size().unwrap_or_default(),
+ );
+ match values {
+ CompositeValues::Dense(values) => {
+ self.new.encode_values(values.as_slice(), &mut buffer)?;
+ }
+ CompositeValues::Sparse(values) => {
+ let values = self
+ .fields
+ .iter()
+ .map(|id| {
+ let value = values.get_or_null(*id);
+ (*id, value.as_value_ref())
+ })
+ .collect::<Vec<_>>();
+ self.new.encode_value_refs(&values, &mut buffer)?;
+ }
+ }
+ batch.set_primary_key(buffer);
+
+ Ok(batch)
+ }
+}
+
#[cfg(test)]
mod tests {
use std::sync::Arc;
@@ -359,11 +445,12 @@ mod tests {
use datatypes::schema::ColumnSchema;
use datatypes::value::ValueRef;
use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, UInt64Vector, UInt8Vector};
+ use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
use store_api::storage::RegionId;
use super::*;
- use crate::row_converter::PrimaryKeyCodecExt;
+ use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt, SparsePrimaryKeyCodec};
use crate::test_util::{check_reader_result, VecBatchReader};
/// Creates a new [RegionMetadata].
@@ -396,7 +483,7 @@ mod tests {
/// Encode primary key.
fn encode_key(keys: &[Option<&str>]) -> Vec<u8> {
let fields = (0..keys.len())
- .map(|_| SortField::new(ConcreteDataType::string_datatype()))
+ .map(|_| (0, SortField::new(ConcreteDataType::string_datatype())))
.collect();
let converter = DensePrimaryKeyCodec::with_fields(fields);
let row = keys.iter().map(|str_opt| match str_opt {
@@ -407,6 +494,24 @@ mod tests {
converter.encode(row).unwrap()
}
+ /// Encode sparse primary key.
+ fn encode_sparse_key(keys: &[(ColumnId, Option<&str>)]) -> Vec<u8> {
+ let fields = (0..keys.len())
+ .map(|_| (1, SortField::new(ConcreteDataType::string_datatype())))
+ .collect();
+ let converter = SparsePrimaryKeyCodec::with_fields(fields);
+ let row = keys
+ .iter()
+ .map(|(id, str_opt)| match str_opt {
+ Some(v) => (*id, ValueRef::String(v)),
+ None => (*id, ValueRef::Null),
+ })
+ .collect::<Vec<_>>();
+ let mut buffer = vec![];
+ converter.encode_value_refs(&row, &mut buffer).unwrap();
+ buffer
+ }
+
/// Creates a batch for specific primary `key`.
///
/// `fields`: [(column_id of the field, is null)]
@@ -526,6 +631,25 @@ mod tests {
.is_none());
}
+ #[test]
+ fn test_same_pk_encoding() {
+ let reader_meta = Arc::new(new_metadata(
+ &[
+ (
+ 0,
+ SemanticType::Timestamp,
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ ),
+ (1, SemanticType::Tag, ConcreteDataType::string_datatype()),
+ ],
+ &[1],
+ ));
+
+ assert!(may_compat_primary_key(&reader_meta, &reader_meta)
+ .unwrap()
+ .is_none());
+ }
+
#[test]
fn test_same_fields() {
let reader_meta = Arc::new(new_metadata(
@@ -747,4 +871,58 @@ mod tests {
)
.await;
}
+
+ #[tokio::test]
+ async fn test_compat_reader_different_pk_encoding() {
+ let mut reader_meta = new_metadata(
+ &[
+ (
+ 0,
+ SemanticType::Timestamp,
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ ),
+ (1, SemanticType::Tag, ConcreteDataType::string_datatype()),
+ (2, SemanticType::Field, ConcreteDataType::int64_datatype()),
+ ],
+ &[1],
+ );
+ reader_meta.primary_key_encoding = PrimaryKeyEncoding::Dense;
+ let reader_meta = Arc::new(reader_meta);
+ let mut expect_meta = new_metadata(
+ &[
+ (
+ 0,
+ SemanticType::Timestamp,
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ ),
+ (1, SemanticType::Tag, ConcreteDataType::string_datatype()),
+ (2, SemanticType::Field, ConcreteDataType::int64_datatype()),
+ (3, SemanticType::Tag, ConcreteDataType::string_datatype()),
+ (4, SemanticType::Field, ConcreteDataType::int64_datatype()),
+ ],
+ &[1, 3],
+ );
+ expect_meta.primary_key_encoding = PrimaryKeyEncoding::Sparse;
+ let expect_meta = Arc::new(expect_meta);
+
+ let mapper = ProjectionMapper::all(&expect_meta).unwrap();
+ let k1 = encode_key(&[Some("a")]);
+ let k2 = encode_key(&[Some("b")]);
+ let source_reader = VecBatchReader::new(&[
+ new_batch(&k1, &[(2, false)], 1000, 3),
+ new_batch(&k2, &[(2, false)], 1000, 3),
+ ]);
+
+ let mut compat_reader = CompatReader::new(&mapper, reader_meta, source_reader).unwrap();
+ let k1 = encode_sparse_key(&[(1, Some("a")), (3, None)]);
+ let k2 = encode_sparse_key(&[(1, Some("b")), (3, None)]);
+ check_reader_result(
+ &mut compat_reader,
+ &[
+ new_batch(&k1, &[(2, false), (4, true)], 1000, 3),
+ new_batch(&k2, &[(2, false), (4, true)], 1000, 3),
+ ],
+ )
+ .await;
+ }
}
diff --git a/src/mito2/src/read/projection.rs b/src/mito2/src/read/projection.rs
index 4ffc021e42cd..883f55406644 100644
--- a/src/mito2/src/read/projection.rs
+++ b/src/mito2/src/read/projection.rs
@@ -33,7 +33,7 @@ use store_api::storage::ColumnId;
use crate::cache::CacheStrategy;
use crate::error::{InvalidRequestSnafu, Result};
use crate::read::Batch;
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodec};
+use crate::row_converter::{build_primary_key_codec, CompositeValues, PrimaryKeyCodec};
/// Only cache vector when its length `<=` this value.
const MAX_VECTOR_LENGTH_TO_CACHE: usize = 16384;
@@ -47,7 +47,7 @@ pub struct ProjectionMapper {
/// Output record batch contains tags.
has_tags: bool,
/// Decoder for primary key.
- codec: DensePrimaryKeyCodec,
+ codec: Arc<dyn PrimaryKeyCodec>,
/// Schema for converted [RecordBatch].
output_schema: SchemaRef,
/// Ids of columns to project. It keeps ids in the same order as the `projection`
@@ -92,8 +92,8 @@ impl ProjectionMapper {
// Safety: idx is valid.
column_schemas.push(metadata.schema.column_schemas()[*idx].clone());
}
- let codec = DensePrimaryKeyCodec::new(metadata);
+ let codec = build_primary_key_codec(metadata);
if is_empty_projection {
// If projection is empty, we don't output any column.
return Ok(ProjectionMapper {
@@ -134,7 +134,7 @@ impl ProjectionMapper {
has_tags = true;
// We always read all primary key so the column always exists and the tag
// index is always valid.
- BatchIndex::Tag(index)
+ BatchIndex::Tag((index, column.column_id))
}
SemanticType::Timestamp => BatchIndex::Timestamp,
SemanticType::Field => {
@@ -213,15 +213,15 @@ impl ProjectionMapper {
// Skips decoding pk if we don't need to output it.
let pk_values = if self.has_tags {
match batch.pk_values() {
- Some(v) => v.to_vec(),
+ Some(v) => v.clone(),
None => self
.codec
- .decode_dense(batch.primary_key())
+ .decode(batch.primary_key())
.map_err(BoxedError::new)
.context(ExternalSnafu)?,
}
} else {
- Vec::new()
+ CompositeValues::Dense(vec![])
};
let mut columns = Vec::with_capacity(self.output_schema.num_columns());
@@ -232,8 +232,11 @@ impl ProjectionMapper {
.zip(self.output_schema.column_schemas())
{
match index {
- BatchIndex::Tag(idx) => {
- let value = &pk_values[*idx];
+ BatchIndex::Tag((idx, column_id)) => {
+ let value = match &pk_values {
+ CompositeValues::Dense(v) => &v[*idx].1,
+ CompositeValues::Sparse(v) => v.get_or_null(*column_id),
+ };
let vector = repeated_vector_with_cache(
&column_schema.data_type,
value,
@@ -259,7 +262,7 @@ impl ProjectionMapper {
#[derive(Debug, Clone, Copy)]
enum BatchIndex {
/// Index in primary keys.
- Tag(usize),
+ Tag((usize, ColumnId)),
/// The time index column.
Timestamp,
/// Index in fields.
@@ -321,7 +324,7 @@ mod tests {
use super::*;
use crate::cache::CacheManager;
use crate::read::BatchBuilder;
- use crate::row_converter::{PrimaryKeyCodecExt, SortField};
+ use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt, SortField};
use crate::test_util::meta_util::TestRegionMetadataBuilder;
fn new_batch(
@@ -332,7 +335,12 @@ mod tests {
) -> Batch {
let converter = DensePrimaryKeyCodec::with_fields(
(0..tags.len())
- .map(|_| SortField::new(ConcreteDataType::int64_datatype()))
+ .map(|idx| {
+ (
+ idx as u32,
+ SortField::new(ConcreteDataType::int64_datatype()),
+ )
+ })
.collect(),
);
let primary_key = converter
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index b5c4eecd0c0d..193e3c3e1764 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -767,7 +767,7 @@ impl ScanInput {
}
}
};
- if !compat::has_same_columns(
+ if !compat::has_same_columns_and_pk_encoding(
self.mapper.metadata(),
file_range_ctx.read_format().metadata(),
) {
diff --git a/src/mito2/src/row_converter.rs b/src/mito2/src/row_converter.rs
index 75f015d4494f..4d0635d3cc45 100644
--- a/src/mito2/src/row_converter.rs
+++ b/src/mito2/src/row_converter.rs
@@ -13,10 +13,8 @@
// limitations under the License.
mod dense;
-// TODO(weny): remove it.
-#[allow(unused)]
mod sparse;
-
+use std::fmt::Debug;
use std::sync::Arc;
use common_recordbatch::filter::SimpleFilterEvaluator;
@@ -24,7 +22,8 @@ use datatypes::value::{Value, ValueRef};
pub use dense::{DensePrimaryKeyCodec, SortField};
pub use sparse::{SparsePrimaryKeyCodec, SparseValues};
use store_api::codec::PrimaryKeyEncoding;
-use store_api::metadata::RegionMetadataRef;
+use store_api::metadata::{RegionMetadata, RegionMetadataRef};
+use store_api::storage::ColumnId;
use crate::error::Result;
use crate::memtable::key_values::KeyValue;
@@ -49,9 +48,6 @@ pub trait PrimaryKeyCodecExt {
fn encode_to_vec<'a, I>(&self, row: I, buffer: &mut Vec<u8>) -> Result<()>
where
I: Iterator<Item = ValueRef<'a>>;
-
- /// Decode row values from bytes.
- fn decode(&self, bytes: &[u8]) -> Result<Vec<Value>>;
}
pub trait PrimaryKeyFilter: Send + Sync {
@@ -59,15 +55,63 @@ pub trait PrimaryKeyFilter: Send + Sync {
fn matches(&mut self, pk: &[u8]) -> bool;
}
-pub trait PrimaryKeyCodec: Send + Sync {
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum CompositeValues {
+ Dense(Vec<(ColumnId, Value)>),
+ Sparse(SparseValues),
+}
+
+impl CompositeValues {
+ /// Extends the composite values with the given values.
+ pub fn extend(&mut self, values: &[(ColumnId, Value)]) {
+ match self {
+ CompositeValues::Dense(dense_values) => {
+ for (column_id, value) in values {
+ dense_values.push((*column_id, value.clone()));
+ }
+ }
+ CompositeValues::Sparse(sprase_value) => {
+ for (column_id, value) in values {
+ sprase_value.insert(*column_id, value.clone());
+ }
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+impl CompositeValues {
+ pub fn into_sparse(self) -> SparseValues {
+ match self {
+ CompositeValues::Sparse(v) => v,
+ _ => panic!("CompositeValues is not sparse"),
+ }
+ }
+
+ pub fn into_dense(self) -> Vec<Value> {
+ match self {
+ CompositeValues::Dense(v) => v.into_iter().map(|(_, v)| v).collect(),
+ _ => panic!("CompositeValues is not dense"),
+ }
+ }
+}
+
+pub trait PrimaryKeyCodec: Send + Sync + Debug {
/// Encodes a key value to bytes.
fn encode_key_value(&self, key_value: &KeyValue, buffer: &mut Vec<u8>) -> Result<()>;
/// Encodes values to bytes.
- fn encode_values(&self, values: &[Value], buffer: &mut Vec<u8>) -> Result<()>;
+ fn encode_values(&self, values: &[(ColumnId, Value)], buffer: &mut Vec<u8>) -> Result<()>;
+
+ /// Encodes values to bytes.
+ fn encode_value_refs(
+ &self,
+ values: &[(ColumnId, ValueRef)],
+ buffer: &mut Vec<u8>,
+ ) -> Result<()>;
/// Returns the number of fields in the primary key.
- fn num_fields(&self) -> usize;
+ fn num_fields(&self) -> Option<usize>;
/// Returns a primary key filter factory.
fn primary_key_filter(
@@ -86,9 +130,33 @@ pub trait PrimaryKeyCodec: Send + Sync {
/// Decodes the primary key from the given bytes.
///
- /// Returns a [`Vec<Value>`] that follows the primary key ordering.
- fn decode_dense(&self, bytes: &[u8]) -> Result<Vec<Value>>;
+ /// Returns a [`CompositeValues`] that follows the primary key ordering.
+ fn decode(&self, bytes: &[u8]) -> Result<CompositeValues>;
/// Decode the leftmost value from bytes.
fn decode_leftmost(&self, bytes: &[u8]) -> Result<Option<Value>>;
}
+
+/// Builds a primary key codec from region metadata.
+pub fn build_primary_key_codec(region_metadata: &RegionMetadata) -> Arc<dyn PrimaryKeyCodec> {
+ let fields = region_metadata.primary_key_columns().map(|col| {
+ (
+ col.column_id,
+ SortField::new(col.column_schema.data_type.clone()),
+ )
+ });
+ build_primary_key_codec_with_fields(region_metadata.primary_key_encoding, fields)
+}
+
+/// Builds a primary key codec from region metadata.
+pub fn build_primary_key_codec_with_fields(
+ encoding: PrimaryKeyEncoding,
+ fields: impl Iterator<Item = (ColumnId, SortField)>,
+) -> Arc<dyn PrimaryKeyCodec> {
+ match encoding {
+ PrimaryKeyEncoding::Dense => Arc::new(DensePrimaryKeyCodec::with_fields(fields.collect())),
+ PrimaryKeyEncoding::Sparse => {
+ Arc::new(SparsePrimaryKeyCodec::with_fields(fields.collect()))
+ }
+ }
+}
diff --git a/src/mito2/src/row_converter/dense.rs b/src/mito2/src/row_converter/dense.rs
index 5c21428523f8..8c3d497d7e21 100644
--- a/src/mito2/src/row_converter/dense.rs
+++ b/src/mito2/src/row_converter/dense.rs
@@ -30,8 +30,9 @@ use serde::{Deserialize, Serialize};
use snafu::ResultExt;
use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::{RegionMetadata, RegionMetadataRef};
+use store_api::storage::ColumnId;
-use super::PrimaryKeyFilter;
+use super::{CompositeValues, PrimaryKeyFilter};
use crate::error::{
self, FieldTypeMismatchSnafu, NotSupportedFieldSnafu, Result, SerializeFieldSnafu,
};
@@ -312,34 +313,31 @@ impl PrimaryKeyCodecExt for DensePrimaryKeyCodec {
{
self.encode_dense(row, buffer)
}
-
- fn decode(&self, bytes: &[u8]) -> Result<Vec<Value>> {
- self.decode_dense(bytes)
- }
}
/// A memory-comparable row [`Value`] encoder/decoder.
#[derive(Clone, Debug)]
pub struct DensePrimaryKeyCodec {
/// Primary key fields.
- ordered_primary_key_columns: Arc<Vec<SortField>>,
+ ordered_primary_key_columns: Arc<Vec<(ColumnId, SortField)>>,
}
impl DensePrimaryKeyCodec {
pub fn new(metadata: &RegionMetadata) -> Self {
- let ordered_primary_key_columns = Arc::new(
- metadata
- .primary_key_columns()
- .map(|c| SortField::new(c.column_schema.data_type.clone()))
- .collect::<Vec<_>>(),
- );
-
- Self {
- ordered_primary_key_columns,
- }
+ let ordered_primary_key_columns = metadata
+ .primary_key_columns()
+ .map(|c| {
+ (
+ c.column_id,
+ SortField::new(c.column_schema.data_type.clone()),
+ )
+ })
+ .collect::<Vec<_>>();
+
+ Self::with_fields(ordered_primary_key_columns)
}
- pub fn with_fields(fields: Vec<SortField>) -> Self {
+ pub fn with_fields(fields: Vec<(ColumnId, SortField)>) -> Self {
Self {
ordered_primary_key_columns: Arc::new(fields),
}
@@ -350,12 +348,42 @@ impl DensePrimaryKeyCodec {
I: Iterator<Item = ValueRef<'a>>,
{
let mut serializer = Serializer::new(buffer);
- for (value, field) in row.zip(self.ordered_primary_key_columns.iter()) {
+ for (value, (_, field)) in row.zip(self.ordered_primary_key_columns.iter()) {
field.serialize(&mut serializer, &value)?;
}
Ok(())
}
+ /// Decode primary key values from bytes.
+ pub fn decode_dense(&self, bytes: &[u8]) -> Result<Vec<(ColumnId, Value)>> {
+ let mut deserializer = Deserializer::new(bytes);
+ let mut values = Vec::with_capacity(self.ordered_primary_key_columns.len());
+ for (column_id, field) in self.ordered_primary_key_columns.iter() {
+ let value = field.deserialize(&mut deserializer)?;
+ values.push((*column_id, value));
+ }
+ Ok(values)
+ }
+
+ /// Decode primary key values from bytes without column id.
+ pub fn decode_dense_without_column_id(&self, bytes: &[u8]) -> Result<Vec<Value>> {
+ let mut deserializer = Deserializer::new(bytes);
+ let mut values = Vec::with_capacity(self.ordered_primary_key_columns.len());
+ for (_, field) in self.ordered_primary_key_columns.iter() {
+ let value = field.deserialize(&mut deserializer)?;
+ values.push(value);
+ }
+ Ok(values)
+ }
+
+ /// Returns the field at `pos`.
+ ///
+ /// # Panics
+ /// Panics if `pos` is out of bounds.
+ fn field_at(&self, pos: usize) -> &SortField {
+ &self.ordered_primary_key_columns[pos].1
+ }
+
/// Decode value at `pos` in `bytes`.
///
/// The i-th element in offsets buffer is how many bytes to skip in order to read value at `pos`.
@@ -370,7 +398,7 @@ impl DensePrimaryKeyCodec {
// We computed the offset before.
let to_skip = offsets_buf[pos];
deserializer.advance(to_skip);
- return self.ordered_primary_key_columns[pos].deserialize(&mut deserializer);
+ return self.field_at(pos).deserialize(&mut deserializer);
}
if offsets_buf.is_empty() {
@@ -379,7 +407,8 @@ impl DensePrimaryKeyCodec {
for i in 0..pos {
// Offset to skip before reading value i.
offsets_buf.push(offset);
- let skip = self.ordered_primary_key_columns[i]
+ let skip = self
+ .field_at(i)
.skip_deserialize(bytes, &mut deserializer)?;
offset += skip;
}
@@ -393,7 +422,8 @@ impl DensePrimaryKeyCodec {
deserializer.advance(offset);
for i in value_start..pos {
// Skip value i.
- let skip = self.ordered_primary_key_columns[i]
+ let skip = self
+ .field_at(i)
.skip_deserialize(bytes, &mut deserializer)?;
// Offset for the value at i + 1.
offset += skip;
@@ -401,15 +431,19 @@ impl DensePrimaryKeyCodec {
}
}
- self.ordered_primary_key_columns[pos].deserialize(&mut deserializer)
+ self.field_at(pos).deserialize(&mut deserializer)
}
pub fn estimated_size(&self) -> usize {
self.ordered_primary_key_columns
.iter()
- .map(|f| f.estimated_size())
+ .map(|(_, f)| f.estimated_size())
.sum()
}
+
+ pub fn num_fields(&self) -> usize {
+ self.ordered_primary_key_columns.len()
+ }
}
impl PrimaryKeyCodec for DensePrimaryKeyCodec {
@@ -417,16 +451,25 @@ impl PrimaryKeyCodec for DensePrimaryKeyCodec {
self.encode_dense(key_value.primary_keys(), buffer)
}
- fn encode_values(&self, values: &[Value], buffer: &mut Vec<u8>) -> Result<()> {
- self.encode_dense(values.iter().map(|v| v.as_value_ref()), buffer)
+ fn encode_values(&self, values: &[(ColumnId, Value)], buffer: &mut Vec<u8>) -> Result<()> {
+ self.encode_dense(values.iter().map(|(_, v)| v.as_value_ref()), buffer)
+ }
+
+ fn encode_value_refs(
+ &self,
+ values: &[(ColumnId, ValueRef)],
+ buffer: &mut Vec<u8>,
+ ) -> Result<()> {
+ let iter = values.iter().map(|(_, v)| *v);
+ self.encode_dense(iter, buffer)
}
fn estimated_size(&self) -> Option<usize> {
Some(self.estimated_size())
}
- fn num_fields(&self) -> usize {
- self.ordered_primary_key_columns.len()
+ fn num_fields(&self) -> Option<usize> {
+ Some(self.num_fields())
}
fn encoding(&self) -> PrimaryKeyEncoding {
@@ -445,20 +488,14 @@ impl PrimaryKeyCodec for DensePrimaryKeyCodec {
))
}
- fn decode_dense(&self, bytes: &[u8]) -> Result<Vec<Value>> {
- let mut deserializer = Deserializer::new(bytes);
- let mut values = Vec::with_capacity(self.ordered_primary_key_columns.len());
- for f in self.ordered_primary_key_columns.iter() {
- let value = f.deserialize(&mut deserializer)?;
- values.push(value);
- }
- Ok(values)
+ fn decode(&self, bytes: &[u8]) -> Result<CompositeValues> {
+ Ok(CompositeValues::Dense(self.decode_dense(bytes)?))
}
fn decode_leftmost(&self, bytes: &[u8]) -> Result<Option<Value>> {
// TODO(weny, yinwen): avoid decoding the whole primary key.
let mut values = self.decode_dense(bytes)?;
- Ok(values.pop())
+ Ok(values.pop().map(|(_, v)| v))
}
}
@@ -476,14 +513,14 @@ mod tests {
let encoder = DensePrimaryKeyCodec::with_fields(
data_types
.iter()
- .map(|t| SortField::new(t.clone()))
+ .map(|t| (0, SortField::new(t.clone())))
.collect::<Vec<_>>(),
);
let value_ref = row.iter().map(|v| v.as_value_ref()).collect::<Vec<_>>();
let result = encoder.encode(value_ref.iter().cloned()).unwrap();
- let decoded = encoder.decode(&result).unwrap();
+ let decoded = encoder.decode(&result).unwrap().into_dense();
assert_eq!(decoded, row);
let mut decoded = Vec::new();
let mut offsets = Vec::new();
@@ -502,14 +539,14 @@ mod tests {
#[test]
fn test_memcmp() {
let encoder = DensePrimaryKeyCodec::with_fields(vec![
- SortField::new(ConcreteDataType::string_datatype()),
- SortField::new(ConcreteDataType::int64_datatype()),
+ (0, SortField::new(ConcreteDataType::string_datatype())),
+ (1, SortField::new(ConcreteDataType::int64_datatype())),
]);
let values = [Value::String("abcdefgh".into()), Value::Int64(128)];
let value_ref = values.iter().map(|v| v.as_value_ref()).collect::<Vec<_>>();
let result = encoder.encode(value_ref.iter().cloned()).unwrap();
- let decoded = encoder.decode(&result).unwrap();
+ let decoded = encoder.decode(&result).unwrap().into_dense();
assert_eq!(&values, &decoded as &[Value]);
}
diff --git a/src/mito2/src/row_converter/sparse.rs b/src/mito2/src/row_converter/sparse.rs
index 6beca6412a6b..91a5623110d3 100644
--- a/src/mito2/src/row_converter/sparse.rs
+++ b/src/mito2/src/row_converter/sparse.rs
@@ -15,25 +15,30 @@
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
+use common_recordbatch::filter::SimpleFilterEvaluator;
use datatypes::prelude::ConcreteDataType;
use datatypes::value::{Value, ValueRef};
use memcomparable::{Deserializer, Serializer};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
+use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::RegionMetadataRef;
use store_api::storage::consts::ReservedColumnId;
use store_api::storage::ColumnId;
-use crate::error::{DeserializeFieldSnafu, Result, SerializeFieldSnafu};
+use crate::error::{DeserializeFieldSnafu, Result, SerializeFieldSnafu, UnsupportedOperationSnafu};
+use crate::memtable::key_values::KeyValue;
+use crate::memtable::partition_tree::SparsePrimaryKeyFilter;
use crate::row_converter::dense::SortField;
-use crate::row_converter::PrimaryKeyCodec;
+use crate::row_converter::{CompositeValues, PrimaryKeyCodec, PrimaryKeyFilter};
/// A codec for sparse key of metrics.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub struct SparsePrimaryKeyCodec {
inner: Arc<SparsePrimaryKeyCodecInner>,
}
+#[derive(Debug)]
struct SparsePrimaryKeyCodecInner {
// Internal fields
table_id_field: SortField,
@@ -66,6 +71,11 @@ impl SparseValues {
self.values.get(&column_id).unwrap_or(&Value::Null)
}
+ /// Returns the value of the given column, or [`None`] if the column is not present.
+ pub fn get(&self, column_id: &ColumnId) -> Option<&Value> {
+ self.values.get(column_id)
+ }
+
/// Inserts a new value into the [`SparseValues`].
pub fn insert(&mut self, column_id: ColumnId, value: Value) {
self.values.insert(column_id, value);
@@ -111,6 +121,17 @@ impl SparsePrimaryKeyCodec {
}
}
+ pub fn with_fields(fields: Vec<(ColumnId, SortField)>) -> Self {
+ Self {
+ inner: Arc::new(SparsePrimaryKeyCodecInner {
+ columns: Some(fields.iter().map(|f| f.0).collect()),
+ table_id_field: SortField::new(ConcreteDataType::uint32_datatype()),
+ tsid_field: SortField::new(ConcreteDataType::uint64_datatype()),
+ label_field: SortField::new(ConcreteDataType::string_datatype()),
+ }),
+ }
+ }
+
/// Returns the field of the given column id.
fn get_field(&self, column_id: ColumnId) -> Option<&SortField> {
// if the `columns` is not specified, all unknown columns is primary key(label field).
@@ -224,6 +245,59 @@ impl SparsePrimaryKeyCodec {
}
}
+impl PrimaryKeyCodec for SparsePrimaryKeyCodec {
+ fn encode_key_value(&self, _key_value: &KeyValue, _buffer: &mut Vec<u8>) -> Result<()> {
+ UnsupportedOperationSnafu {
+ err_msg: "The encode_key_value method is not supported in SparsePrimaryKeyCodec.",
+ }
+ .fail()
+ }
+
+ fn encode_values(&self, values: &[(ColumnId, Value)], buffer: &mut Vec<u8>) -> Result<()> {
+ self.encode_to_vec(values.iter().map(|v| (v.0, v.1.as_value_ref())), buffer)
+ }
+
+ fn encode_value_refs(
+ &self,
+ values: &[(ColumnId, ValueRef)],
+ buffer: &mut Vec<u8>,
+ ) -> Result<()> {
+ self.encode_to_vec(values.iter().map(|v| (v.0, v.1)), buffer)
+ }
+
+ fn estimated_size(&self) -> Option<usize> {
+ None
+ }
+
+ fn num_fields(&self) -> Option<usize> {
+ None
+ }
+
+ fn encoding(&self) -> PrimaryKeyEncoding {
+ PrimaryKeyEncoding::Sparse
+ }
+
+ fn primary_key_filter(
+ &self,
+ metadata: &RegionMetadataRef,
+ filters: Arc<Vec<SimpleFilterEvaluator>>,
+ ) -> Box<dyn PrimaryKeyFilter> {
+ Box::new(SparsePrimaryKeyFilter::new(
+ metadata.clone(),
+ filters,
+ self.clone(),
+ ))
+ }
+
+ fn decode(&self, bytes: &[u8]) -> Result<CompositeValues> {
+ Ok(CompositeValues::Sparse(self.decode_sparse(bytes)?))
+ }
+
+ fn decode_leftmost(&self, bytes: &[u8]) -> Result<Option<Value>> {
+ self.decode_leftmost(bytes)
+ }
+}
+
#[cfg(test)]
mod tests {
use std::sync::Arc;
diff --git a/src/mito2/src/sst/index/bloom_filter/creator.rs b/src/mito2/src/sst/index/bloom_filter/creator.rs
index 0f97ea102711..3dfe15dfd5af 100644
--- a/src/mito2/src/sst/index/bloom_filter/creator.rs
+++ b/src/mito2/src/sst/index/bloom_filter/creator.rs
@@ -30,7 +30,7 @@ use crate::error::{
PuffinAddBlobSnafu, PushBloomFilterValueSnafu, Result,
};
use crate::read::Batch;
-use crate::row_converter::SortField;
+use crate::row_converter::{CompositeValues, SortField};
use crate::sst::file::FileId;
use crate::sst::index::bloom_filter::INDEX_BLOB_TYPE;
use crate::sst::index::codec::{IndexValueCodec, IndexValuesCodec};
@@ -108,7 +108,10 @@ impl BloomFilterIndexer {
return Ok(None);
}
- let codec = IndexValuesCodec::from_tag_columns(metadata.primary_key_columns());
+ let codec = IndexValuesCodec::from_tag_columns(
+ metadata.primary_key_encoding,
+ metadata.primary_key_columns(),
+ );
let indexer = Self {
creators,
temp_file_provider,
@@ -192,11 +195,26 @@ impl BloomFilterIndexer {
let n = batch.num_rows();
guard.inc_row_count(n);
+ // TODO(weny, zhenchi): lazy decode
+ let values = self.codec.decode(batch.primary_key())?;
// Tags
- for ((col_id, _), field, value) in self.codec.decode(batch.primary_key())? {
+ for (idx, (col_id, field)) in self.codec.fields().iter().enumerate() {
let Some(creator) = self.creators.get_mut(col_id) else {
continue;
};
+
+ let value = match &values {
+ CompositeValues::Dense(vec) => {
+ let value = &vec[idx].1;
+ if value.is_null() {
+ None
+ } else {
+ Some(value)
+ }
+ }
+ CompositeValues::Sparse(sparse_values) => sparse_values.get(col_id),
+ };
+
let elems = value
.map(|v| {
let mut buf = vec![];
@@ -411,7 +429,7 @@ pub(crate) mod tests {
}
pub fn new_batch(str_tag: impl AsRef<str>, u64_field: impl IntoIterator<Item = u64>) -> Batch {
- let fields = vec![SortField::new(ConcreteDataType::string_datatype())];
+ let fields = vec![(0, SortField::new(ConcreteDataType::string_datatype()))];
let codec = DensePrimaryKeyCodec::with_fields(fields);
let row: [ValueRef; 1] = [str_tag.as_ref().into()];
let primary_key = codec.encode(row.into_iter()).unwrap();
diff --git a/src/mito2/src/sst/index/codec.rs b/src/mito2/src/sst/index/codec.rs
index 23702ba41448..5d08cc7b2934 100644
--- a/src/mito2/src/sst/index/codec.rs
+++ b/src/mito2/src/sst/index/codec.rs
@@ -12,15 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
+use std::sync::Arc;
+
use datatypes::data_type::ConcreteDataType;
-use datatypes::value::{Value, ValueRef};
+use datatypes::value::ValueRef;
use memcomparable::Serializer;
use snafu::{ensure, OptionExt, ResultExt};
+use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::ColumnMetadata;
use store_api::storage::ColumnId;
use crate::error::{FieldTypeMismatchSnafu, IndexEncodeNullSnafu, Result};
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodec, SortField};
+use crate::row_converter::{
+ build_primary_key_codec_with_fields, CompositeValues, PrimaryKeyCodec, SortField,
+};
/// Encodes index values according to their data types for sorting and storage use.
pub struct IndexValueCodec;
@@ -62,26 +68,35 @@ impl IndexValueCodec {
pub struct IndexValuesCodec {
/// Tuples containing column id and its corresponding index_name (result of `to_string` on ColumnId),
/// to minimize redundant `to_string` calls.
- column_ids: Vec<(ColumnId, String)>,
+ column_ids: HashMap<ColumnId, String>,
/// The data types of tag columns.
- fields: Vec<SortField>,
+ fields: Vec<(ColumnId, SortField)>,
/// The decoder for the primary key.
- decoder: DensePrimaryKeyCodec,
+ decoder: Arc<dyn PrimaryKeyCodec>,
}
impl IndexValuesCodec {
/// Creates a new `IndexValuesCodec` from a list of `ColumnMetadata` of tag columns.
- pub fn from_tag_columns<'a>(tag_columns: impl Iterator<Item = &'a ColumnMetadata>) -> Self {
+ pub fn from_tag_columns<'a>(
+ primary_key_encoding: PrimaryKeyEncoding,
+ tag_columns: impl Iterator<Item = &'a ColumnMetadata>,
+ ) -> Self {
let (column_ids, fields): (Vec<_>, Vec<_>) = tag_columns
.map(|column| {
(
(column.column_id, column.column_id.to_string()),
- SortField::new(column.column_schema.data_type.clone()),
+ (
+ column.column_id,
+ SortField::new(column.column_schema.data_type.clone()),
+ ),
)
})
.unzip();
- let decoder = DensePrimaryKeyCodec::with_fields(fields.clone());
+ let column_ids = column_ids.into_iter().collect();
+ let decoder =
+ build_primary_key_codec_with_fields(primary_key_encoding, fields.clone().into_iter());
+
Self {
column_ids,
fields,
@@ -89,26 +104,19 @@ impl IndexValuesCodec {
}
}
+ /// Returns the column ids of the index.
+ pub fn column_ids(&self) -> &HashMap<ColumnId, String> {
+ &self.column_ids
+ }
+
+ /// Returns the fields of the index.
+ pub fn fields(&self) -> &[(ColumnId, SortField)] {
+ &self.fields
+ }
+
/// Decodes a primary key into its corresponding column ids, data types and values.
- pub fn decode(
- &self,
- primary_key: &[u8],
- ) -> Result<impl Iterator<Item = (&(ColumnId, String), &SortField, Option<Value>)>> {
- let values = self.decoder.decode_dense(primary_key)?;
-
- let iter = values
- .into_iter()
- .zip(&self.column_ids)
- .zip(&self.fields)
- .map(|((value, column_id), encoder)| {
- if value.is_null() {
- (column_id, encoder, None)
- } else {
- (column_id, encoder, Some(value))
- }
- });
-
- Ok(iter)
+ pub fn decode(&self, primary_key: &[u8]) -> Result<CompositeValues> {
+ self.decoder.decode(primary_key)
}
}
@@ -116,10 +124,12 @@ impl IndexValuesCodec {
mod tests {
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::ColumnSchema;
+ use datatypes::value::Value;
+ use store_api::metadata::ColumnMetadata;
use super::*;
use crate::error::Error;
- use crate::row_converter::{PrimaryKeyCodecExt, SortField};
+ use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt, SortField};
#[test]
fn test_encode_value_basic() {
@@ -167,27 +177,18 @@ mod tests {
];
let primary_key = DensePrimaryKeyCodec::with_fields(vec![
- SortField::new(ConcreteDataType::string_datatype()),
- SortField::new(ConcreteDataType::int64_datatype()),
+ (0, SortField::new(ConcreteDataType::string_datatype())),
+ (1, SortField::new(ConcreteDataType::int64_datatype())),
])
.encode([ValueRef::Null, ValueRef::Int64(10)].into_iter())
.unwrap();
- let codec = IndexValuesCodec::from_tag_columns(tag_columns.iter());
- let mut iter = codec.decode(&primary_key).unwrap();
-
- let ((column_id, col_id_str), field, value) = iter.next().unwrap();
- assert_eq!(*column_id, 1);
- assert_eq!(col_id_str, "1");
- assert_eq!(field, &SortField::new(ConcreteDataType::string_datatype()));
- assert_eq!(value, None);
-
- let ((column_id, col_id_str), field, value) = iter.next().unwrap();
- assert_eq!(*column_id, 2);
- assert_eq!(col_id_str, "2");
- assert_eq!(field, &SortField::new(ConcreteDataType::int64_datatype()));
- assert_eq!(value, Some(Value::Int64(10)));
+ let codec =
+ IndexValuesCodec::from_tag_columns(PrimaryKeyEncoding::Dense, tag_columns.iter());
+ let values = codec.decode(&primary_key).unwrap().into_dense();
- assert!(iter.next().is_none());
+ assert_eq!(values.len(), 2);
+ assert_eq!(values[0], Value::Null);
+ assert_eq!(values[1], Value::Int64(10));
}
}
diff --git a/src/mito2/src/sst/index/inverted_index/creator.rs b/src/mito2/src/sst/index/inverted_index/creator.rs
index 669d4ff6f23e..7903f2a496d9 100644
--- a/src/mito2/src/sst/index/inverted_index/creator.rs
+++ b/src/mito2/src/sst/index/inverted_index/creator.rs
@@ -34,7 +34,7 @@ use crate::error::{
PushIndexValueSnafu, Result,
};
use crate::read::Batch;
-use crate::row_converter::SortField;
+use crate::row_converter::{CompositeValues, SortField};
use crate::sst::file::FileId;
use crate::sst::index::codec::{IndexValueCodec, IndexValuesCodec};
use crate::sst::index::intermediate::{
@@ -101,7 +101,10 @@ impl InvertedIndexer {
);
let index_creator = Box::new(SortIndexCreator::new(sorter, segment_row_count));
- let codec = IndexValuesCodec::from_tag_columns(metadata.primary_key_columns());
+ let codec = IndexValuesCodec::from_tag_columns(
+ metadata.primary_key_encoding,
+ metadata.primary_key_columns(),
+ );
Self {
codec,
index_creator,
@@ -180,11 +183,25 @@ impl InvertedIndexer {
let n = batch.num_rows();
guard.inc_row_count(n);
- for ((col_id, col_id_str), field, value) in self.codec.decode(batch.primary_key())? {
+ // TODO(weny, zhenchi): lazy decode
+ let values = self.codec.decode(batch.primary_key())?;
+ for (idx, (col_id, field)) in self.codec.fields().iter().enumerate() {
if !self.indexed_column_ids.contains(col_id) {
continue;
}
+ let value = match &values {
+ CompositeValues::Dense(vec) => {
+ let value = &vec[idx].1;
+ if value.is_null() {
+ None
+ } else {
+ Some(value)
+ }
+ }
+ CompositeValues::Sparse(sparse_values) => sparse_values.get(col_id),
+ };
+
if let Some(value) = value.as_ref() {
self.value_buf.clear();
IndexValueCodec::encode_nonnull_value(
@@ -194,6 +211,9 @@ impl InvertedIndexer {
)?;
}
+ // Safety: the column id is guaranteed to be in the map
+ let col_id_str = self.codec.column_ids().get(col_id).unwrap();
+
// non-null value -> Some(encoded_bytes), null value -> None
let value = value.is_some().then_some(self.value_buf.as_slice());
self.index_creator
@@ -381,8 +401,8 @@ mod tests {
u64_field: impl IntoIterator<Item = u64>,
) -> Batch {
let fields = vec![
- SortField::new(ConcreteDataType::string_datatype()),
- SortField::new(ConcreteDataType::int32_datatype()),
+ (0, SortField::new(ConcreteDataType::string_datatype())),
+ (1, SortField::new(ConcreteDataType::int32_datatype())),
];
let codec = DensePrimaryKeyCodec::with_fields(fields);
let row: [ValueRef; 2] = [str_tag.as_ref().into(), i32_tag.into().into()];
diff --git a/src/mito2/src/sst/parquet/file_range.rs b/src/mito2/src/sst/parquet/file_range.rs
index 388dc24677b4..e8241a453fb8 100644
--- a/src/mito2/src/sst/parquet/file_range.rs
+++ b/src/mito2/src/sst/parquet/file_range.rs
@@ -33,7 +33,7 @@ use crate::read::compat::CompatBatch;
use crate::read::last_row::RowGroupLastRowCachedReader;
use crate::read::prune::PruneReader;
use crate::read::Batch;
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt};
+use crate::row_converter::{CompositeValues, PrimaryKeyCodec};
use crate::sst::file::FileHandle;
use crate::sst::parquet::format::ReadFormat;
use crate::sst::parquet::reader::{RowGroupReader, RowGroupReaderBuilder, SimpleFilterContext};
@@ -156,7 +156,7 @@ impl FileRangeContext {
reader_builder: RowGroupReaderBuilder,
filters: Vec<SimpleFilterContext>,
read_format: ReadFormat,
- codec: DensePrimaryKeyCodec,
+ codec: Arc<dyn PrimaryKeyCodec>,
) -> Self {
Self {
reader_builder,
@@ -237,7 +237,7 @@ pub(crate) struct RangeBase {
/// Helper to read the SST.
pub(crate) read_format: ReadFormat,
/// Decoder for primary keys
- pub(crate) codec: DensePrimaryKeyCodec,
+ pub(crate) codec: Arc<dyn PrimaryKeyCodec>,
/// Optional helper to compat batches.
pub(crate) compat_batch: Option<CompatBatch>,
}
@@ -264,15 +264,25 @@ impl RangeBase {
input.set_pk_values(self.codec.decode(input.primary_key())?);
input.pk_values().unwrap()
};
- // Safety: this is a primary key
- let pk_index = self
- .read_format
- .metadata()
- .primary_key_index(filter.column_id())
- .unwrap();
- let pk_value = pk_values[pk_index]
- .try_to_scalar_value(filter.data_type())
- .context(FieldTypeMismatchSnafu)?;
+ let pk_value = match pk_values {
+ CompositeValues::Dense(v) => {
+ // Safety: this is a primary key
+ let pk_index = self
+ .read_format
+ .metadata()
+ .primary_key_index(filter.column_id())
+ .unwrap();
+ v[pk_index]
+ .1
+ .try_to_scalar_value(filter.data_type())
+ .context(FieldTypeMismatchSnafu)?
+ }
+ CompositeValues::Sparse(v) => {
+ let v = v.get_or_null(filter.column_id());
+ v.try_to_scalar_value(filter.data_type())
+ .context(FieldTypeMismatchSnafu)?
+ }
+ };
if filter
.filter()
.evaluate_scalar(&pk_value)
diff --git a/src/mito2/src/sst/parquet/format.rs b/src/mito2/src/sst/parquet/format.rs
index 34a1da565e40..c90907f0eb26 100644
--- a/src/mito2/src/sst/parquet/format.rs
+++ b/src/mito2/src/sst/parquet/format.rs
@@ -48,7 +48,7 @@ use crate::error::{
ConvertVectorSnafu, InvalidBatchSnafu, InvalidRecordBatchSnafu, NewRecordBatchSnafu, Result,
};
use crate::read::{Batch, BatchBuilder, BatchColumn};
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodec, SortField};
+use crate::row_converter::{build_primary_key_codec_with_fields, SortField};
use crate::sst::file::{FileMeta, FileTimeRange};
use crate::sst::to_sst_arrow_schema;
@@ -391,6 +391,7 @@ impl ReadFormat {
column: &ColumnMetadata,
is_min: bool,
) -> Option<ArrayRef> {
+ let primary_key_encoding = self.metadata.primary_key_encoding;
let is_first_tag = self
.metadata
.primary_key
@@ -402,9 +403,15 @@ impl ReadFormat {
return None;
}
- let converter = DensePrimaryKeyCodec::with_fields(vec![SortField::new(
- column.column_schema.data_type.clone(),
- )]);
+ let converter = build_primary_key_codec_with_fields(
+ primary_key_encoding,
+ [(
+ column.column_id,
+ SortField::new(column.column_schema.data_type.clone()),
+ )]
+ .into_iter(),
+ );
+
let values = row_groups.iter().map(|meta| {
let stats = meta
.borrow()
diff --git a/src/mito2/src/sst/parquet/reader.rs b/src/mito2/src/sst/parquet/reader.rs
index 6854c072a1a3..4aecf744d696 100644
--- a/src/mito2/src/sst/parquet/reader.rs
+++ b/src/mito2/src/sst/parquet/reader.rs
@@ -49,7 +49,7 @@ use crate::metrics::{
};
use crate::read::prune::{PruneReader, Source};
use crate::read::{Batch, BatchReader};
-use crate::row_converter::DensePrimaryKeyCodec;
+use crate::row_converter::build_primary_key_codec;
use crate::sst::file::FileHandle;
use crate::sst::index::bloom_filter::applier::BloomFilterIndexApplierRef;
use crate::sst::index::fulltext_index::applier::FulltextIndexApplierRef;
@@ -253,7 +253,7 @@ impl ParquetReaderBuilder {
vec![]
};
- let codec = DensePrimaryKeyCodec::new(read_format.metadata());
+ let codec = build_primary_key_codec(read_format.metadata());
let context = FileRangeContext::new(reader_builder, filters, read_format, codec);
diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs
index 369ba95f354d..4cb4469dc08d 100644
--- a/src/mito2/src/test_util/memtable_util.rs
+++ b/src/mito2/src/test_util/memtable_util.rs
@@ -326,8 +326,8 @@ pub(crate) fn encode_keys(
/// Encode one key.
pub(crate) fn encode_key_by_kv(key_value: &KeyValue) -> Vec<u8> {
let row_codec = DensePrimaryKeyCodec::with_fields(vec![
- SortField::new(ConcreteDataType::string_datatype()),
- SortField::new(ConcreteDataType::uint32_datatype()),
+ (0, SortField::new(ConcreteDataType::string_datatype())),
+ (1, SortField::new(ConcreteDataType::uint32_datatype())),
]);
row_codec.encode(key_value.primary_keys()).unwrap()
}
diff --git a/src/mito2/src/test_util/sst_util.rs b/src/mito2/src/test_util/sst_util.rs
index ce8cd4412f63..8bef6d205ba3 100644
--- a/src/mito2/src/test_util/sst_util.rs
+++ b/src/mito2/src/test_util/sst_util.rs
@@ -85,7 +85,12 @@ pub fn sst_region_metadata() -> RegionMetadata {
/// Encodes a primary key for specific tags.
pub fn new_primary_key(tags: &[&str]) -> Vec<u8> {
let fields = (0..tags.len())
- .map(|_| SortField::new(ConcreteDataType::string_datatype()))
+ .map(|idx| {
+ (
+ idx as u32,
+ SortField::new(ConcreteDataType::string_datatype()),
+ )
+ })
.collect();
let converter = DensePrimaryKeyCodec::with_fields(fields);
converter
|
feat
|
replace `DensePrimaryKeyCodec` with `Arc<dyn PrimaryKeyCodec>` (#5408)
|
5dba373ede58224f296fb49d3c1c367e4d6721bf
|
2023-12-14 15:31:12
|
shuiyisong
|
chore: return json body under http status 401 (#2924)
| false
|
diff --git a/src/common/error/src/status_code.rs b/src/common/error/src/status_code.rs
index 228b278aa7cb..64b9a9e7fb1b 100644
--- a/src/common/error/src/status_code.rs
+++ b/src/common/error/src/status_code.rs
@@ -138,7 +138,6 @@ impl StatusCode {
pub fn should_log_error(&self) -> bool {
match self {
StatusCode::Unknown
- | StatusCode::Unsupported
| StatusCode::Unexpected
| StatusCode::Internal
| StatusCode::Cancelled
@@ -147,6 +146,7 @@ impl StatusCode {
| StatusCode::StorageUnavailable
| StatusCode::RuntimeResourcesExhausted => true,
StatusCode::Success
+ | StatusCode::Unsupported
| StatusCode::InvalidArguments
| StatusCode::InvalidSyntax
| StatusCode::TableAlreadyExists
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 0bd6f90319f0..a390f406d778 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -36,7 +36,6 @@ use aide::axum::{routing as apirouting, ApiRouter, IntoApiResponse};
use aide::openapi::{Info, OpenApi, Server as OpenAPIServer};
use async_trait::async_trait;
use auth::UserProviderRef;
-use axum::body::BoxBody;
use axum::error_handling::HandleErrorLayer;
use axum::extract::{DefaultBodyLimit, MatchedPath};
use axum::http::Request;
@@ -62,12 +61,11 @@ use tokio::sync::oneshot::{self, Sender};
use tokio::sync::Mutex;
use tower::timeout::TimeoutLayer;
use tower::ServiceBuilder;
-use tower_http::auth::AsyncRequireAuthorizationLayer;
use tower_http::trace::TraceLayer;
+use self::authorize::AuthState;
use crate::configurator::ConfiguratorRef;
use crate::error::{AlreadyStartedSnafu, Error, Result, StartHttpSnafu, ToJsonSnafu};
-use crate::http::authorize::HttpAuth;
use crate::http::influxdb::{influxdb_health, influxdb_ping, influxdb_write_v1, influxdb_write_v2};
use crate::http::influxdb_result_v1::InfluxdbV1Response;
use crate::http::prometheus::{
@@ -721,9 +719,10 @@ impl HttpServer {
.try_into()
.unwrap_or_else(|_| DEFAULT_BODY_LIMIT.as_bytes() as usize),
))
- // custom layer
- .layer(AsyncRequireAuthorizationLayer::new(
- HttpAuth::<BoxBody>::new(self.user_provider.clone()),
+ // auth layer
+ .layer(middleware::from_fn_with_state(
+ AuthState::new(self.user_provider.clone()),
+ authorize::check_http_auth,
)),
)
// Handlers for debug, we don't expect a timeout.
diff --git a/src/servers/src/http/authorize.rs b/src/servers/src/http/authorize.rs
index 9225b6ec52dd..51040ba4899a 100644
--- a/src/servers/src/http/authorize.rs
+++ b/src/servers/src/http/authorize.rs
@@ -12,118 +12,123 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::marker::PhantomData;
-
use ::auth::UserProviderRef;
+use axum::extract::State;
use axum::http::{self, Request, StatusCode};
-use axum::response::Response;
+use axum::middleware::Next;
+use axum::response::{IntoResponse, Response};
+use axum::Json;
use base64::prelude::BASE64_STANDARD;
use base64::Engine;
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_catalog::parse_catalog_and_schema_from_db_string;
use common_error::ext::ErrorExt;
use common_telemetry::warn;
-use futures::future::BoxFuture;
use headers::Header;
-use http_body::Body;
use secrecy::SecretString;
use session::context::QueryContext;
use snafu::{ensure, OptionExt, ResultExt};
-use tower_http::auth::AsyncAuthorizeRequest;
use super::header::GreptimeDbName;
-use super::PUBLIC_APIS;
+use super::{JsonResponse, ResponseFormat, PUBLIC_APIS};
use crate::error::{
self, InvalidAuthorizationHeaderSnafu, InvalidParameterSnafu, InvisibleASCIISnafu,
NotFoundInfluxAuthSnafu, Result, UnsupportedAuthSchemeSnafu, UrlDecodeSnafu,
};
use crate::http::HTTP_API_PREFIX;
-pub struct HttpAuth<RespBody> {
+/// AuthState is a holder state for [`UserProviderRef`]
+/// during [`check_http_auth`] function in axum's middleware
+#[derive(Clone)]
+pub struct AuthState {
user_provider: Option<UserProviderRef>,
- _ty: PhantomData<RespBody>,
}
-impl<RespBody> HttpAuth<RespBody> {
+impl AuthState {
pub fn new(user_provider: Option<UserProviderRef>) -> Self {
- Self {
- user_provider,
- _ty: PhantomData,
- }
+ Self { user_provider }
}
}
-impl<RespBody> Clone for HttpAuth<RespBody> {
- fn clone(&self) -> Self {
- Self {
- user_provider: self.user_provider.clone(),
- _ty: PhantomData,
+pub async fn inner_auth<B>(
+ user_provider: Option<UserProviderRef>,
+ mut req: Request<B>,
+) -> std::result::Result<Request<B>, Response> {
+ // 1. prepare
+ let (catalog, schema) = extract_catalog_and_schema(&req);
+ let query_ctx = QueryContext::with(catalog, schema);
+ let need_auth = need_auth(&req);
+ let is_influxdb = req.uri().path().contains("influxdb");
+
+ // 2. check if auth is needed
+ let user_provider = if let Some(user_provider) = user_provider.filter(|_| need_auth) {
+ user_provider
+ } else {
+ query_ctx.set_current_user(Some(auth::userinfo_by_name(None)));
+ let _ = req.extensions_mut().insert(query_ctx);
+ return Ok(req);
+ };
+
+ // 3. get username and pwd
+ let (username, password) = match extract_username_and_password(is_influxdb, &req) {
+ Ok((username, password)) => (username, password),
+ Err(e) => {
+ warn!("extract username and password failed: {}", e);
+ crate::metrics::METRIC_AUTH_FAILURE
+ .with_label_values(&[e.status_code().as_ref()])
+ .inc();
+ return Err(err_response(is_influxdb, e).into_response());
+ }
+ };
+
+ // 4. auth
+ match user_provider
+ .auth(
+ auth::Identity::UserId(&username, None),
+ auth::Password::PlainText(password),
+ catalog,
+ schema,
+ )
+ .await
+ {
+ Ok(userinfo) => {
+ query_ctx.set_current_user(Some(userinfo));
+ let _ = req.extensions_mut().insert(query_ctx);
+ Ok(req)
+ }
+ Err(e) => {
+ warn!("authenticate failed: {}", e);
+ crate::metrics::METRIC_AUTH_FAILURE
+ .with_label_values(&[e.status_code().as_ref()])
+ .inc();
+ Err(err_response(is_influxdb, e).into_response())
}
}
}
-impl<B, RespBody> AsyncAuthorizeRequest<B> for HttpAuth<RespBody>
-where
- B: Send + Sync + 'static,
- RespBody: Body + Default,
-{
- type RequestBody = B;
- type ResponseBody = RespBody;
- type Future = BoxFuture<'static, std::result::Result<Request<B>, Response<Self::ResponseBody>>>;
-
- fn authorize(&mut self, mut request: Request<B>) -> Self::Future {
- let user_provider = self.user_provider.clone();
- Box::pin(async move {
- let (catalog, schema) = extract_catalog_and_schema(&request);
- let query_ctx = QueryContext::with(catalog, schema);
- let need_auth = need_auth(&request);
-
- let user_provider = if let Some(user_provider) = user_provider.filter(|_| need_auth) {
- user_provider
- } else {
- query_ctx.set_current_user(Some(auth::userinfo_by_name(None)));
- let _ = request.extensions_mut().insert(query_ctx);
- return Ok(request);
- };
-
- let (username, password) = match extract_username_and_password(&request) {
- Ok((username, password)) => (username, password),
- Err(e) => {
- warn!("extract username and password failed: {}", e);
- crate::metrics::METRIC_AUTH_FAILURE
- .with_label_values(&[e.status_code().as_ref()])
- .inc();
- return Err(unauthorized_resp());
- }
- };
-
- match user_provider
- .auth(
- ::auth::Identity::UserId(username.as_str(), None),
- ::auth::Password::PlainText(password),
- catalog,
- schema,
- )
- .await
- {
- Ok(userinfo) => {
- query_ctx.set_current_user(Some(userinfo));
- let _ = request.extensions_mut().insert(query_ctx);
- Ok(request)
- }
- Err(e) => {
- warn!("authenticate failed: {}", e);
- crate::metrics::METRIC_AUTH_FAILURE
- .with_label_values(&[e.status_code().as_ref()])
- .inc();
- Err(unauthorized_resp())
- }
- }
- })
+pub async fn check_http_auth<B>(
+ State(auth_state): State<AuthState>,
+ req: Request<B>,
+ next: Next<B>,
+) -> Response {
+ match inner_auth(auth_state.user_provider, req).await {
+ Ok(req) => next.run(req).await,
+ Err(resp) => resp,
}
}
-fn extract_catalog_and_schema<B: Send + Sync + 'static>(request: &Request<B>) -> (&str, &str) {
+fn err_response(is_influxdb: bool, err: impl ErrorExt) -> impl IntoResponse {
+ let format = if is_influxdb {
+ ResponseFormat::InfluxdbV1
+ } else {
+ ResponseFormat::GreptimedbV1
+ };
+
+ let body = JsonResponse::with_error(err, format);
+ (StatusCode::UNAUTHORIZED, Json(body))
+}
+
+fn extract_catalog_and_schema<B>(request: &Request<B>) -> (&str, &str) {
// parse database from header
let dbname = request
.headers()
@@ -139,9 +144,7 @@ fn extract_catalog_and_schema<B: Send + Sync + 'static>(request: &Request<B>) ->
parse_catalog_and_schema_from_db_string(dbname)
}
-fn get_influxdb_credentials<B: Send + Sync + 'static>(
- request: &Request<B>,
-) -> Result<Option<(Username, Password)>> {
+fn get_influxdb_credentials<B>(request: &Request<B>) -> Result<Option<(Username, Password)>> {
// compat with influxdb v2 and v1
if let Some(header) = request.headers().get(http::header::AUTHORIZATION) {
// try v2 first
@@ -182,10 +185,11 @@ fn get_influxdb_credentials<B: Send + Sync + 'static>(
}
}
-fn extract_username_and_password<B: Send + Sync + 'static>(
+fn extract_username_and_password<B>(
+ is_influxdb: bool,
request: &Request<B>,
) -> Result<(Username, Password)> {
- Ok(if request.uri().path().contains("influxdb") {
+ Ok(if is_influxdb {
// compatible with influxdb auth
get_influxdb_credentials(request)?.context(NotFoundInfluxAuthSnafu)?
} else {
@@ -197,15 +201,6 @@ fn extract_username_and_password<B: Send + Sync + 'static>(
})
}
-fn unauthorized_resp<RespBody>() -> Response<RespBody>
-where
- RespBody: Body + Default,
-{
- let mut res = Response::new(RespBody::default());
- *res.status_mut() = StatusCode::UNAUTHORIZED;
- res
-}
-
#[derive(Debug)]
pub enum AuthScheme {
Basic(Username, Password),
diff --git a/src/servers/tests/http/authorize.rs b/src/servers/tests/http/authorize.rs
index e41e0316f8aa..97f1c9e2e821 100644
--- a/src/servers/tests/http/authorize.rs
+++ b/src/servers/tests/http/authorize.rs
@@ -16,20 +16,17 @@ use std::sync::Arc;
use auth::tests::MockUserProvider;
use auth::UserProvider;
-use axum::body::BoxBody;
use axum::http;
-use hyper::Request;
-use servers::http::authorize::HttpAuth;
+use http_body::Body;
+use hyper::{Request, StatusCode};
+use servers::http::authorize::inner_auth;
use session::context::QueryContextRef;
-use tower_http::auth::AsyncAuthorizeRequest;
#[tokio::test]
async fn test_http_auth() {
- let mut http_auth: HttpAuth<BoxBody> = HttpAuth::new(None);
-
// base64encode("username:password") == "dXNlcm5hbWU6cGFzc3dvcmQ="
let req = mock_http_request(Some("Basic dXNlcm5hbWU6cGFzc3dvcmQ="), None).unwrap();
- let req = http_auth.authorize(req).await.unwrap();
+ let req = inner_auth(None, req).await.unwrap();
let ctx: &QueryContextRef = req.extensions().get().unwrap();
let user_info = ctx.current_user().unwrap();
let default = auth::userinfo_by_name(None);
@@ -37,32 +34,41 @@ async fn test_http_auth() {
// In mock user provider, right username:password == "greptime:greptime"
let mock_user_provider = Some(Arc::new(MockUserProvider::default()) as Arc<dyn UserProvider>);
- let mut http_auth: HttpAuth<BoxBody> = HttpAuth::new(mock_user_provider);
// base64encode("greptime:greptime") == "Z3JlcHRpbWU6Z3JlcHRpbWU="
let req = mock_http_request(Some("Basic Z3JlcHRpbWU6Z3JlcHRpbWU="), None).unwrap();
- let req = http_auth.authorize(req).await.unwrap();
+ let req = inner_auth(mock_user_provider.clone(), req).await.unwrap();
let ctx: &QueryContextRef = req.extensions().get().unwrap();
let user_info = ctx.current_user().unwrap();
let default = auth::userinfo_by_name(None);
assert_eq!(default.username(), user_info.username());
let req = mock_http_request(None, None).unwrap();
- let auth_res = http_auth.authorize(req).await;
+ let auth_res = inner_auth(mock_user_provider.clone(), req).await;
assert!(auth_res.is_err());
+ let mut resp = auth_res.unwrap_err();
+ assert_eq!(resp.status(), StatusCode::UNAUTHORIZED);
+ assert_eq!(
+ b"{\"type\":\"GreptimedbV1\",\"code\":7003,\"error\":\"Not found http or grpc authorization header\"}",
+ resp.data().await.unwrap().unwrap().as_ref()
+ );
// base64encode("username:password") == "dXNlcm5hbWU6cGFzc3dvcmQ="
let wrong_req = mock_http_request(Some("Basic dXNlcm5hbWU6cGFzc3dvcmQ="), None).unwrap();
- let auth_res = http_auth.authorize(wrong_req).await;
+ let auth_res = inner_auth(mock_user_provider, wrong_req).await;
assert!(auth_res.is_err());
+ let mut resp = auth_res.unwrap_err();
+ assert_eq!(resp.status(), StatusCode::UNAUTHORIZED);
+ assert_eq!(
+ b"{\"type\":\"GreptimedbV1\",\"code\":7000,\"error\":\"User not found, username: username\"}",
+ resp.data().await.unwrap().unwrap().as_ref(),
+ );
}
#[tokio::test]
async fn test_schema_validating() {
// In mock user provider, right username:password == "greptime:greptime"
- let provider = MockUserProvider::default();
- let mock_user_provider = Some(Arc::new(provider) as Arc<dyn UserProvider>);
- let mut http_auth: HttpAuth<BoxBody> = HttpAuth::new(mock_user_provider);
+ let mock_user_provider = Some(Arc::new(MockUserProvider::default()) as Arc<dyn UserProvider>);
// base64encode("greptime:greptime") == "Z3JlcHRpbWU6Z3JlcHRpbWU="
// http://localhost/{http_api_version}/sql?db=greptime
@@ -72,7 +78,7 @@ async fn test_schema_validating() {
Some(format!("http://localhost/{version}/sql?db=public").as_str()),
)
.unwrap();
- let req = http_auth.authorize(req).await.unwrap();
+ let req = inner_auth(mock_user_provider.clone(), req).await.unwrap();
let ctx: &QueryContextRef = req.extensions().get().unwrap();
let user_info = ctx.current_user().unwrap();
let default = auth::userinfo_by_name(None);
@@ -84,26 +90,37 @@ async fn test_schema_validating() {
Some(format!("http://localhost/{version}/sql?db=wrong").as_str()),
)
.unwrap();
- let result = http_auth.authorize(req).await;
+ let result = inner_auth(mock_user_provider, req).await;
assert!(result.is_err());
+ let mut resp = result.unwrap_err();
+ assert_eq!(resp.status(), StatusCode::UNAUTHORIZED);
+ assert_eq!(
+ b"{\"type\":\"GreptimedbV1\",\"code\":7005,\"error\":\"Access denied for user 'greptime' to database 'greptime-wrong'\"}",
+ resp.data().await.unwrap().unwrap().as_ref()
+ );
}
#[tokio::test]
async fn test_whitelist_no_auth() {
// In mock user provider, right username:password == "greptime:greptime"
let mock_user_provider = Some(Arc::new(MockUserProvider::default()) as Arc<dyn UserProvider>);
- let mut http_auth: HttpAuth<BoxBody> = HttpAuth::new(mock_user_provider);
// base64encode("greptime:greptime") == "Z3JlcHRpbWU6Z3JlcHRpbWU="
// try auth path first
let req = mock_http_request(None, None).unwrap();
- let req = http_auth.authorize(req).await;
- assert!(req.is_err());
+ let auth_res = inner_auth(mock_user_provider.clone(), req).await;
+ assert!(auth_res.is_err());
+ let mut resp = auth_res.unwrap_err();
+ assert_eq!(resp.status(), StatusCode::UNAUTHORIZED);
+ assert_eq!(
+ b"{\"type\":\"GreptimedbV1\",\"code\":7003,\"error\":\"Not found http or grpc authorization header\"}",
+ resp.data().await.unwrap().unwrap().as_ref()
+ );
// try whitelist path
let req = mock_http_request(None, Some("http://localhost/health")).unwrap();
- let req = http_auth.authorize(req).await;
- let _ = req.unwrap();
+ let req = inner_auth(mock_user_provider, req).await;
+ assert!(req.is_ok());
}
// copy from http::authorize
diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs
index d74acca3fcc3..06a9193ce350 100644
--- a/src/servers/tests/http/influxdb_test.rs
+++ b/src/servers/tests/http/influxdb_test.rs
@@ -168,6 +168,10 @@ async fn test_influxdb_write() {
.send()
.await;
assert_eq!(result.status(), 401);
+ assert_eq!(
+ "{\"type\":\"InfluxdbV1\",\"results\":[],\"error\":\"Username and password does not match, username: greptime\"}",
+ result.text().await
+ );
// no auth
let result = client
@@ -176,6 +180,10 @@ async fn test_influxdb_write() {
.send()
.await;
assert_eq!(result.status(), 401);
+ assert_eq!(
+ "{\"type\":\"InfluxdbV1\",\"results\":[],\"error\":\"Not found influx http authorization info\"}",
+ result.text().await
+ );
// make new app for db=influxdb
let app = make_test_app(tx, Some("influxdb"));
|
chore
|
return json body under http status 401 (#2924)
|
da1ea253ba49fdaf0d7c9e7dd2bee46ab15b8115
|
2024-07-11 18:02:07
|
Zhenchi
|
perf(puffin): not to stage uncompressed blob (#4333)
| false
|
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
index 7919aeb4ca5e..1533694bc9de 100644
--- a/src/mito2/src/config.rs
+++ b/src/mito2/src/config.rs
@@ -371,9 +371,6 @@ pub struct InvertedIndexConfig {
/// Memory threshold for performing an external sort during index creation.
pub mem_threshold_on_create: MemoryThreshold,
- /// Whether to compress the index data.
- pub compress: bool,
-
#[deprecated = "use [IndexConfig::aux_path] instead"]
#[serde(skip_serializing)]
pub intermediate_path: String,
@@ -396,7 +393,6 @@ impl Default for InvertedIndexConfig {
create_on_compaction: Mode::Auto,
apply_on_query: Mode::Auto,
mem_threshold_on_create: MemoryThreshold::Auto,
- compress: true,
write_buffer_size: ReadableSize::mb(8),
intermediate_path: String::new(),
metadata_cache_size: ReadableSize::mb(32),
diff --git a/src/mito2/src/engine/basic_test.rs b/src/mito2/src/engine/basic_test.rs
index 1d598efcb40e..9179d8a07411 100644
--- a/src/mito2/src/engine/basic_test.rs
+++ b/src/mito2/src/engine/basic_test.rs
@@ -580,7 +580,7 @@ async fn test_region_usage() {
flush_region(&engine, region_id, None).await;
let region_stat = region.region_usage();
- assert_eq!(region_stat.sst_usage, 3026);
+ assert_eq!(region_stat.sst_usage, 3010);
// region total usage
// Some memtables may share items.
diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs
index 2407a974c107..909bf481b484 100644
--- a/src/mito2/src/sst/index.rs
+++ b/src/mito2/src/sst/index.rs
@@ -220,7 +220,6 @@ impl<'a> IndexerBuilder<'a> {
self.intermediate_manager.clone(),
self.inverted_index_config.mem_threshold_on_create(),
segment_row_count,
- self.inverted_index_config.compress,
&self.index_options.inverted_index.ignore_column_ids,
);
diff --git a/src/mito2/src/sst/index/inverted_index/creator.rs b/src/mito2/src/sst/index/inverted_index/creator.rs
index 380661d60db9..c8bed65bd8f5 100644
--- a/src/mito2/src/sst/index/inverted_index/creator.rs
+++ b/src/mito2/src/sst/index/inverted_index/creator.rs
@@ -24,7 +24,6 @@ use index::inverted_index::create::sort::external_sort::ExternalSorter;
use index::inverted_index::create::sort_create::SortIndexCreator;
use index::inverted_index::create::InvertedIndexCreator;
use index::inverted_index::format::writer::InvertedIndexBlobWriter;
-use puffin::blob_metadata::CompressionCodec;
use puffin::puffin_manager::{PuffinWriter, PutOptions};
use snafu::{ensure, ResultExt};
use store_api::metadata::RegionMetadataRef;
@@ -71,9 +70,6 @@ pub struct SstIndexCreator {
/// The memory usage of the index creator.
memory_usage: Arc<AtomicUsize>,
- /// Whether to compress the index data.
- compress: bool,
-
/// Ids of indexed columns.
column_ids: HashSet<ColumnId>,
}
@@ -87,7 +83,6 @@ impl SstIndexCreator {
intermediate_manager: IntermediateManager,
memory_usage_threshold: Option<usize>,
segment_row_count: NonZeroUsize,
- compress: bool,
ignore_column_ids: &[ColumnId],
) -> Self {
let temp_file_provider = Arc::new(TempFileProvider::new(
@@ -122,7 +117,6 @@ impl SstIndexCreator {
stats: Statistics::default(),
aborted: false,
memory_usage,
- compress,
column_ids,
}
}
@@ -242,12 +236,9 @@ impl SstIndexCreator {
let (tx, rx) = duplex(PIPE_BUFFER_SIZE_FOR_SENDING_BLOB);
let mut index_writer = InvertedIndexBlobWriter::new(tx.compat_write());
- let put_options = PutOptions {
- compression: self.compress.then_some(CompressionCodec::Zstd),
- };
let (index_finish, puffin_add_blob) = futures::join!(
self.index_creator.finish(&mut index_writer),
- puffin_writer.put_blob(INDEX_BLOB_TYPE, rx.compat(), put_options)
+ puffin_writer.put_blob(INDEX_BLOB_TYPE, rx.compat(), PutOptions::default())
);
match (
@@ -398,7 +389,6 @@ mod tests {
intm_mgr,
memory_threshold,
NonZeroUsize::new(segment_row_count).unwrap(),
- false,
&[],
);
diff --git a/src/mito2/src/sst/index/puffin_manager.rs b/src/mito2/src/sst/index/puffin_manager.rs
index 7a9d24695173..98a240547d09 100644
--- a/src/mito2/src/sst/index/puffin_manager.rs
+++ b/src/mito2/src/sst/index/puffin_manager.rs
@@ -21,8 +21,8 @@ use object_store::{FuturesAsyncReader, FuturesAsyncWriter, ObjectStore};
use puffin::error::{self as puffin_error, Result as PuffinResult};
use puffin::puffin_manager::file_accessor::PuffinFileAccessor;
use puffin::puffin_manager::fs_puffin_manager::FsPuffinManager;
-use puffin::puffin_manager::stager::{BoundedStager, FsBlobGuard};
-use puffin::puffin_manager::{BlobGuard, PuffinManager};
+use puffin::puffin_manager::stager::BoundedStager;
+use puffin::puffin_manager::{BlobGuard, PuffinManager, PuffinReader};
use snafu::ResultExt;
use crate::error::{PuffinInitStagerSnafu, Result};
@@ -35,10 +35,11 @@ use crate::sst::index::store::{self, InstrumentedStore};
type InstrumentedAsyncRead = store::InstrumentedAsyncRead<'static, FuturesAsyncReader>;
type InstrumentedAsyncWrite = store::InstrumentedAsyncWrite<'static, FuturesAsyncWriter>;
-pub(crate) type BlobReader = <Arc<FsBlobGuard> as BlobGuard>::Reader;
-pub(crate) type SstPuffinWriter = <SstPuffinManager as PuffinManager>::Writer;
pub(crate) type SstPuffinManager =
FsPuffinManager<Arc<BoundedStager>, ObjectStorePuffinFileAccessor>;
+pub(crate) type SstPuffinReader = <SstPuffinManager as PuffinManager>::Reader;
+pub(crate) type SstPuffinWriter = <SstPuffinManager as PuffinManager>::Writer;
+pub(crate) type BlobReader = <<SstPuffinReader as PuffinReader>::Blob as BlobGuard>::Reader;
const STAGING_DIR: &str = "staging";
diff --git a/src/puffin/src/file_format/reader/file.rs b/src/puffin/src/file_format/reader/file.rs
index f1435bd0e474..9a87d70592c2 100644
--- a/src/puffin/src/file_format/reader/file.rs
+++ b/src/puffin/src/file_format/reader/file.rs
@@ -61,6 +61,15 @@ impl<R> PuffinFileReader<R> {
);
Ok(())
}
+
+ /// Converts the reader into an owned blob reader.
+ pub fn into_blob_reader(self, blob_metadata: &BlobMetadata) -> PartialReader<R> {
+ PartialReader::new(
+ self.source,
+ blob_metadata.offset as _,
+ blob_metadata.length as _,
+ )
+ }
}
impl<'a, R: io::Read + io::Seek + 'a> SyncReader<'a> for PuffinFileReader<R> {
diff --git a/src/puffin/src/puffin_manager.rs b/src/puffin/src/puffin_manager.rs
index 339b266c7476..f77b79c007ba 100644
--- a/src/puffin/src/puffin_manager.rs
+++ b/src/puffin/src/puffin_manager.rs
@@ -22,7 +22,6 @@ mod tests;
use std::path::PathBuf;
use async_trait::async_trait;
-use futures::future::BoxFuture;
use futures::{AsyncRead, AsyncSeek};
use crate::blob_metadata::CompressionCodec;
@@ -92,10 +91,11 @@ pub trait PuffinReader {
/// `BlobGuard` is provided by the `PuffinReader` to access the blob data.
/// Users should hold the `BlobGuard` until they are done with the blob data.
+#[async_trait]
#[auto_impl::auto_impl(Arc)]
pub trait BlobGuard {
type Reader: AsyncRead + AsyncSeek + Unpin;
- fn reader(&self) -> BoxFuture<'static, Result<Self::Reader>>;
+ async fn reader(&self) -> Result<Self::Reader>;
}
/// `DirGuard` is provided by the `PuffinReader` to access the directory in the filesystem.
diff --git a/src/puffin/src/puffin_manager/file_accessor.rs b/src/puffin/src/puffin_manager/file_accessor.rs
index 46deb198cc09..89ef8cc45192 100644
--- a/src/puffin/src/puffin_manager/file_accessor.rs
+++ b/src/puffin/src/puffin_manager/file_accessor.rs
@@ -21,7 +21,7 @@ use crate::error::Result;
#[async_trait]
#[auto_impl::auto_impl(Arc)]
pub trait PuffinFileAccessor: Send + Sync + 'static {
- type Reader: AsyncRead + AsyncSeek + Unpin + Send;
+ type Reader: AsyncRead + AsyncSeek + Unpin + Send + Sync;
type Writer: AsyncWrite + Unpin + Send;
/// Opens a reader for the given puffin file.
diff --git a/src/puffin/src/puffin_manager/fs_puffin_manager.rs b/src/puffin/src/puffin_manager/fs_puffin_manager.rs
index 7c95532dc604..01b367a78291 100644
--- a/src/puffin/src/puffin_manager/fs_puffin_manager.rs
+++ b/src/puffin/src/puffin_manager/fs_puffin_manager.rs
@@ -46,7 +46,7 @@ impl<S, F> FsPuffinManager<S, F> {
#[async_trait]
impl<S, F> PuffinManager for FsPuffinManager<S, F>
where
- S: Stager + Clone,
+ S: Stager + Clone + 'static,
F: PuffinFileAccessor + Clone,
{
type Reader = FsPuffinReader<S, F>;
diff --git a/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs b/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs
index a1b8d3a8ea0c..ad0eccabe46a 100644
--- a/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs
+++ b/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs
@@ -12,23 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
use async_compression::futures::bufread::ZstdDecoder;
use async_trait::async_trait;
-use futures::future::BoxFuture;
use futures::io::BufReader;
-use futures::{AsyncRead, AsyncReadExt, AsyncWrite};
+use futures::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncWrite};
use snafu::{ensure, OptionExt, ResultExt};
-use crate::blob_metadata::CompressionCodec;
+use crate::blob_metadata::{BlobMetadata, CompressionCodec};
use crate::error::{
BlobIndexOutOfBoundSnafu, BlobNotFoundSnafu, DeserializeJsonSnafu, FileKeyNotMatchSnafu,
ReadSnafu, Result, UnsupportedDecompressionSnafu, WriteSnafu,
};
use crate::file_format::reader::{AsyncReader, PuffinFileReader};
+use crate::partial_reader::PartialReader;
use crate::puffin_manager::file_accessor::PuffinFileAccessor;
use crate::puffin_manager::fs_puffin_manager::dir_meta::DirMetadata;
use crate::puffin_manager::stager::{BoxWriter, DirWriterProviderRef, Stager};
-use crate::puffin_manager::PuffinReader;
+use crate::puffin_manager::{BlobGuard, PuffinReader};
/// `FsPuffinReader` is a `PuffinReader` that provides fs readers for puffin files.
pub struct FsPuffinReader<S, F> {
@@ -55,25 +58,51 @@ impl<S, F> FsPuffinReader<S, F> {
#[async_trait]
impl<S, F> PuffinReader for FsPuffinReader<S, F>
where
- S: Stager,
+ S: Stager + 'static,
F: PuffinFileAccessor + Clone,
{
- type Blob = S::Blob;
+ type Blob = Either<RandomReadBlob<F>, S::Blob>;
type Dir = S::Dir;
async fn blob(&self, key: &str) -> Result<Self::Blob> {
- self.stager
- .get_blob(
- self.puffin_file_name.as_str(),
- key,
- Box::new(move |writer| {
- let accessor = self.puffin_file_accessor.clone();
- let puffin_file_name = self.puffin_file_name.clone();
- let key = key.to_string();
- Self::init_blob_to_cache(puffin_file_name, key, writer, accessor)
- }),
- )
- .await
+ let reader = self
+ .puffin_file_accessor
+ .reader(&self.puffin_file_name)
+ .await?;
+ let mut file = PuffinFileReader::new(reader);
+
+ // TODO(zhongzc): cache the metadata.
+ let metadata = file.metadata().await?;
+ let blob_metadata = metadata
+ .blobs
+ .into_iter()
+ .find(|m| m.blob_type == key)
+ .context(BlobNotFoundSnafu { blob: key })?;
+
+ let blob = if blob_metadata.compression_codec.is_none() {
+ // If the blob is not compressed, we can directly read it from the puffin file.
+ Either::L(RandomReadBlob {
+ file_name: self.puffin_file_name.clone(),
+ accessor: self.puffin_file_accessor.clone(),
+ blob_metadata,
+ })
+ } else {
+ // If the blob is compressed, we need to decompress it into staging space before reading.
+ let staged_blob = self
+ .stager
+ .get_blob(
+ self.puffin_file_name.as_str(),
+ key,
+ Box::new(|writer| {
+ Box::pin(Self::init_blob_to_stager(file, blob_metadata, writer))
+ }),
+ )
+ .await?;
+
+ Either::R(staged_blob)
+ };
+
+ Ok(blob)
}
async fn dir(&self, key: &str) -> Result<Self::Dir> {
@@ -85,7 +114,12 @@ where
let accessor = self.puffin_file_accessor.clone();
let puffin_file_name = self.puffin_file_name.clone();
let key = key.to_string();
- Self::init_dir_to_cache(puffin_file_name, key, writer_provider, accessor)
+ Box::pin(Self::init_dir_to_stager(
+ puffin_file_name,
+ key,
+ writer_provider,
+ accessor,
+ ))
}),
)
.await
@@ -97,79 +131,63 @@ where
S: Stager,
F: PuffinFileAccessor,
{
- fn init_blob_to_cache(
- puffin_file_name: String,
- key: String,
+ async fn init_blob_to_stager(
+ mut reader: PuffinFileReader<F::Reader>,
+ blob_metadata: BlobMetadata,
mut writer: BoxWriter,
- accessor: F,
- ) -> BoxFuture<'static, Result<u64>> {
- Box::pin(async move {
- let reader = accessor.reader(&puffin_file_name).await?;
- let mut file = PuffinFileReader::new(reader);
-
- let metadata = file.metadata().await?;
- let blob_metadata = metadata
- .blobs
- .iter()
- .find(|m| m.blob_type == key.as_str())
- .context(BlobNotFoundSnafu { blob: key })?;
- let reader = file.blob_reader(blob_metadata)?;
-
- let compression = blob_metadata.compression_codec;
- let size = Self::handle_decompress(reader, &mut writer, compression).await?;
-
- Ok(size)
- })
+ ) -> Result<u64> {
+ let reader = reader.blob_reader(&blob_metadata)?;
+ let compression = blob_metadata.compression_codec;
+ let size = Self::handle_decompress(reader, &mut writer, compression).await?;
+ Ok(size)
}
- fn init_dir_to_cache(
+ async fn init_dir_to_stager(
puffin_file_name: String,
key: String,
writer_provider: DirWriterProviderRef,
accessor: F,
- ) -> BoxFuture<'static, Result<u64>> {
- Box::pin(async move {
- let reader = accessor.reader(&puffin_file_name).await?;
- let mut file = PuffinFileReader::new(reader);
-
- let puffin_metadata = file.metadata().await?;
- let blob_metadata = puffin_metadata
- .blobs
- .iter()
- .find(|m| m.blob_type == key.as_str())
- .context(BlobNotFoundSnafu { blob: key })?;
-
- let mut reader = file.blob_reader(blob_metadata)?;
- let mut buf = vec![];
- reader.read_to_end(&mut buf).await.context(ReadSnafu)?;
- let dir_meta: DirMetadata =
- serde_json::from_slice(buf.as_slice()).context(DeserializeJsonSnafu)?;
-
- let mut size = 0;
- for file_meta in dir_meta.files {
- let blob_meta = puffin_metadata.blobs.get(file_meta.blob_index).context(
- BlobIndexOutOfBoundSnafu {
- index: file_meta.blob_index,
- max_index: puffin_metadata.blobs.len(),
- },
- )?;
- ensure!(
- blob_meta.blob_type == file_meta.key,
- FileKeyNotMatchSnafu {
- expected: file_meta.key,
- actual: &blob_meta.blob_type,
- }
- );
-
- let reader = file.blob_reader(blob_meta)?;
- let writer = writer_provider.writer(&file_meta.relative_path).await?;
-
- let compression = blob_meta.compression_codec;
- size += Self::handle_decompress(reader, writer, compression).await?;
- }
+ ) -> Result<u64> {
+ let reader = accessor.reader(&puffin_file_name).await?;
+ let mut file = PuffinFileReader::new(reader);
+
+ let puffin_metadata = file.metadata().await?;
+ let blob_metadata = puffin_metadata
+ .blobs
+ .iter()
+ .find(|m| m.blob_type == key.as_str())
+ .context(BlobNotFoundSnafu { blob: key })?;
- Ok(size)
- })
+ let mut reader = file.blob_reader(blob_metadata)?;
+ let mut buf = vec![];
+ reader.read_to_end(&mut buf).await.context(ReadSnafu)?;
+ let dir_meta: DirMetadata =
+ serde_json::from_slice(buf.as_slice()).context(DeserializeJsonSnafu)?;
+
+ let mut size = 0;
+ for file_meta in dir_meta.files {
+ let blob_meta = puffin_metadata.blobs.get(file_meta.blob_index).context(
+ BlobIndexOutOfBoundSnafu {
+ index: file_meta.blob_index,
+ max_index: puffin_metadata.blobs.len(),
+ },
+ )?;
+ ensure!(
+ blob_meta.blob_type == file_meta.key,
+ FileKeyNotMatchSnafu {
+ expected: file_meta.key,
+ actual: &blob_meta.blob_type,
+ }
+ );
+
+ let reader = file.blob_reader(blob_meta)?;
+ let writer = writer_provider.writer(&file_meta.relative_path).await?;
+
+ let compression = blob_meta.compression_codec;
+ size += Self::handle_decompress(reader, writer, compression).await?;
+ }
+
+ Ok(size)
}
/// Handles the decompression of the reader and writes the decompressed data to the writer.
@@ -196,3 +214,87 @@ where
}
}
}
+
+/// `RandomReadBlob` is a `BlobGuard` that directly reads the blob from the puffin file.
+pub struct RandomReadBlob<F> {
+ file_name: String,
+ accessor: F,
+ blob_metadata: BlobMetadata,
+}
+
+#[async_trait]
+impl<F: PuffinFileAccessor + Clone> BlobGuard for RandomReadBlob<F> {
+ type Reader = PartialReader<F::Reader>;
+
+ async fn reader(&self) -> Result<Self::Reader> {
+ ensure!(
+ self.blob_metadata.compression_codec.is_none(),
+ UnsupportedDecompressionSnafu {
+ decompression: self.blob_metadata.compression_codec.unwrap().to_string()
+ }
+ );
+
+ let reader = self.accessor.reader(&self.file_name).await?;
+ let blob_reader = PuffinFileReader::new(reader).into_blob_reader(&self.blob_metadata);
+ Ok(blob_reader)
+ }
+}
+
+/// `Either` is a type that represents either `A` or `B`.
+///
+/// Used to:
+/// impl `AsyncRead + AsyncSeek` for `Either<A: AsyncRead + AsyncSeek, B: AsyncRead + AsyncSeek>`,
+/// impl `BlobGuard` for `Either<A: BlobGuard, B: BlobGuard>`.
+pub enum Either<A, B> {
+ L(A),
+ R(B),
+}
+
+impl<A, B> AsyncRead for Either<A, B>
+where
+ A: AsyncRead + Unpin,
+ B: AsyncRead + Unpin,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<std::io::Result<usize>> {
+ match self.get_mut() {
+ Either::L(a) => Pin::new(a).poll_read(cx, buf),
+ Either::R(b) => Pin::new(b).poll_read(cx, buf),
+ }
+ }
+}
+
+impl<A, B> AsyncSeek for Either<A, B>
+where
+ A: AsyncSeek + Unpin,
+ B: AsyncSeek + Unpin,
+{
+ fn poll_seek(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: std::io::SeekFrom,
+ ) -> Poll<std::io::Result<u64>> {
+ match self.get_mut() {
+ Either::L(a) => Pin::new(a).poll_seek(cx, pos),
+ Either::R(b) => Pin::new(b).poll_seek(cx, pos),
+ }
+ }
+}
+
+#[async_trait]
+impl<A, B> BlobGuard for Either<A, B>
+where
+ A: BlobGuard + Sync,
+ B: BlobGuard + Sync,
+{
+ type Reader = Either<A::Reader, B::Reader>;
+ async fn reader(&self) -> Result<Self::Reader> {
+ match self {
+ Either::L(a) => Ok(Either::L(a.reader().await?)),
+ Either::R(b) => Ok(Either::R(b.reader().await?)),
+ }
+ }
+}
diff --git a/src/puffin/src/puffin_manager/stager.rs b/src/puffin/src/puffin_manager/stager.rs
index 396dd69ba222..6e1581cddbb5 100644
--- a/src/puffin/src/puffin_manager/stager.rs
+++ b/src/puffin/src/puffin_manager/stager.rs
@@ -42,19 +42,19 @@ pub type DirWriterProviderRef = Box<dyn DirWriterProvider + Send>;
///
/// `Stager` will provide a `BoxWriter` that the caller of `get_blob`
/// can use to write the blob into the staging area.
-pub trait InitBlobFn = Fn(BoxWriter) -> WriteResult;
+pub trait InitBlobFn = FnOnce(BoxWriter) -> WriteResult;
/// Function that initializes a directory.
///
/// `Stager` will provide a `DirWriterProvider` that the caller of `get_dir`
/// can use to write files inside the directory into the staging area.
-pub trait InitDirFn = Fn(DirWriterProviderRef) -> WriteResult;
+pub trait InitDirFn = FnOnce(DirWriterProviderRef) -> WriteResult;
/// `Stager` manages the staging area for the puffin files.
#[async_trait]
#[auto_impl::auto_impl(Arc)]
pub trait Stager: Send + Sync {
- type Blob: BlobGuard;
+ type Blob: BlobGuard + Sync;
type Dir: DirGuard;
/// Retrieves a blob, initializing it if necessary using the provided `init_fn`.
diff --git a/src/puffin/src/puffin_manager/stager/bounded_stager.rs b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
index 1f1aaa2c01de..9294497a062a 100644
--- a/src/puffin/src/puffin_manager/stager/bounded_stager.rs
+++ b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
@@ -22,7 +22,6 @@ use async_walkdir::{Filtering, WalkDir};
use base64::prelude::BASE64_URL_SAFE;
use base64::Engine;
use common_telemetry::{info, warn};
-use futures::future::BoxFuture;
use futures::{FutureExt, StreamExt};
use moka::future::Cache;
use sha2::{Digest, Sha256};
@@ -128,7 +127,7 @@ impl Stager for BoundedStager {
let file_name = format!("{}.{}", cache_key, uuid::Uuid::new_v4());
let path = self.base_dir.join(&file_name);
- let size = Self::write_blob(&path, &init_fn).await?;
+ let size = Self::write_blob(&path, init_fn).await?;
let guard = Arc::new(FsBlobGuard {
path,
@@ -163,7 +162,7 @@ impl Stager for BoundedStager {
let dir_name = format!("{}.{}", cache_key, uuid::Uuid::new_v4());
let path = self.base_dir.join(&dir_name);
- let size = Self::write_dir(&path, &init_fn).await?;
+ let size = Self::write_dir(&path, init_fn).await?;
let guard = Arc::new(FsDirGuard {
path,
@@ -225,7 +224,7 @@ impl BoundedStager {
async fn write_blob(
target_path: &PathBuf,
- init_fn: &(dyn InitBlobFn + Send + Sync + '_),
+ init_fn: Box<dyn InitBlobFn + Send + Sync + '_>,
) -> Result<u64> {
// To guarantee the atomicity of writing the file, we need to write
// the file to a temporary file first...
@@ -247,7 +246,7 @@ impl BoundedStager {
async fn write_dir(
target_path: &PathBuf,
- init_fn: &(dyn InitDirFn + Send + Sync + '_),
+ init_fn: Box<dyn InitDirFn + Send + Sync + '_>,
) -> Result<u64> {
// To guarantee the atomicity of writing the directory, we need to write
// the directory to a temporary directory first...
@@ -425,16 +424,13 @@ pub struct FsBlobGuard {
delete_queue: Sender<DeleteTask>,
}
+#[async_trait]
impl BlobGuard for FsBlobGuard {
type Reader = Compat<fs::File>;
- fn reader(&self) -> BoxFuture<'static, Result<Self::Reader>> {
- let path = self.path.clone();
- async move {
- let file = fs::File::open(&path).await.context(OpenSnafu)?;
- Ok(file.compat())
- }
- .boxed()
+ async fn reader(&self) -> Result<Self::Reader> {
+ let file = fs::File::open(&self.path).await.context(OpenSnafu)?;
+ Ok(file.compat())
}
}
diff --git a/src/puffin/src/puffin_manager/tests.rs b/src/puffin/src/puffin_manager/tests.rs
index dc106d746182..02073522bece 100644
--- a/src/puffin/src/puffin_manager/tests.rs
+++ b/src/puffin/src/puffin_manager/tests.rs
@@ -62,14 +62,30 @@ async fn test_put_get_file() {
writer.finish().await.unwrap();
let reader = puffin_manager.reader(puffin_file_name).await.unwrap();
- check_blob(puffin_file_name, key, raw_data, &stager, &reader).await;
+ check_blob(
+ puffin_file_name,
+ key,
+ raw_data,
+ &stager,
+ &reader,
+ compression_codec.is_some(),
+ )
+ .await;
// renew cache manager
let (_staging_dir, stager) = new_bounded_stager("test_put_get_file_", capacity).await;
let puffin_manager = FsPuffinManager::new(stager.clone(), file_accessor);
let reader = puffin_manager.reader(puffin_file_name).await.unwrap();
- check_blob(puffin_file_name, key, raw_data, &stager, &reader).await;
+ check_blob(
+ puffin_file_name,
+ key,
+ raw_data,
+ &stager,
+ &reader,
+ compression_codec.is_some(),
+ )
+ .await;
}
}
}
@@ -106,7 +122,15 @@ async fn test_put_get_files() {
let reader = puffin_manager.reader(puffin_file_name).await.unwrap();
for (key, raw_data) in &blobs {
- check_blob(puffin_file_name, key, raw_data, &stager, &reader).await;
+ check_blob(
+ puffin_file_name,
+ key,
+ raw_data,
+ &stager,
+ &reader,
+ compression_codec.is_some(),
+ )
+ .await;
}
// renew cache manager
@@ -114,7 +138,15 @@ async fn test_put_get_files() {
let puffin_manager = FsPuffinManager::new(stager.clone(), file_accessor);
let reader = puffin_manager.reader(puffin_file_name).await.unwrap();
for (key, raw_data) in &blobs {
- check_blob(puffin_file_name, key, raw_data, &stager, &reader).await;
+ check_blob(
+ puffin_file_name,
+ key,
+ raw_data,
+ &stager,
+ &reader,
+ compression_codec.is_some(),
+ )
+ .await;
}
}
}
@@ -205,7 +237,15 @@ async fn test_put_get_mix_file_dir() {
let reader = puffin_manager.reader(puffin_file_name).await.unwrap();
for (key, raw_data) in &blobs {
- check_blob(puffin_file_name, key, raw_data, &stager, &reader).await;
+ check_blob(
+ puffin_file_name,
+ key,
+ raw_data,
+ &stager,
+ &reader,
+ compression_codec.is_some(),
+ )
+ .await;
}
check_dir(puffin_file_name, dir_key, &files_in_dir, &stager, &reader).await;
@@ -216,7 +256,15 @@ async fn test_put_get_mix_file_dir() {
let reader = puffin_manager.reader(puffin_file_name).await.unwrap();
for (key, raw_data) in &blobs {
- check_blob(puffin_file_name, key, raw_data, &stager, &reader).await;
+ check_blob(
+ puffin_file_name,
+ key,
+ raw_data,
+ &stager,
+ &reader,
+ compression_codec.is_some(),
+ )
+ .await;
}
check_dir(puffin_file_name, dir_key, &files_in_dir, &stager, &reader).await;
}
@@ -241,24 +289,28 @@ async fn put_blob(
.unwrap();
}
-async fn check_blob<R>(
+async fn check_blob(
puffin_file_name: &str,
key: &str,
raw_data: &[u8],
stager: &BoundedStager,
- puffin_reader: &R,
-) where
- R: PuffinReader,
-{
+ puffin_reader: &impl PuffinReader,
+ compressed: bool,
+) {
let blob = puffin_reader.blob(key).await.unwrap();
let mut reader = blob.reader().await.unwrap();
let mut buf = Vec::new();
reader.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, raw_data);
- let mut cached_file = stager.must_get_file(puffin_file_name, key).await;
+ if !compressed {
+ // If the blob is not compressed, it won't be exist in the stager.
+ return;
+ }
+
+ let mut staged_file = stager.must_get_file(puffin_file_name, key).await;
let mut buf = Vec::new();
- cached_file.read_to_end(&mut buf).await.unwrap();
+ staged_file.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, raw_data);
}
@@ -291,15 +343,13 @@ async fn put_dir(
.unwrap();
}
-async fn check_dir<R>(
+async fn check_dir(
puffin_file_name: &str,
key: &str,
files_in_dir: &[(&str, &[u8])],
stager: &BoundedStager,
- puffin_reader: &R,
-) where
- R: PuffinReader,
-{
+ puffin_reader: &impl PuffinReader,
+) {
let res_dir = puffin_reader.dir(key).await.unwrap();
for (file_name, raw_data) in files_in_dir {
let file_path = if cfg!(windows) {
@@ -311,12 +361,12 @@ async fn check_dir<R>(
assert_eq!(buf, *raw_data);
}
- let cached_dir = stager.must_get_dir(puffin_file_name, key).await;
+ let staged_dir = stager.must_get_dir(puffin_file_name, key).await;
for (file_name, raw_data) in files_in_dir {
let file_path = if cfg!(windows) {
- cached_dir.as_path().join(file_name.replace('/', "\\"))
+ staged_dir.as_path().join(file_name.replace('/', "\\"))
} else {
- cached_dir.as_path().join(file_name)
+ staged_dir.as_path().join(file_name)
};
let buf = std::fs::read(file_path).unwrap();
assert_eq!(buf, *raw_data);
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 96d9316f5549..9a7d98279034 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -838,7 +838,6 @@ create_on_flush = "auto"
create_on_compaction = "auto"
apply_on_query = "auto"
mem_threshold_on_create = "auto"
-compress = true
metadata_cache_size = "32MiB"
content_cache_size = "32MiB"
|
perf
|
not to stage uncompressed blob (#4333)
|
50b521c526d8f6f02ee4467c587b14d956a337f2
|
2025-02-27 21:24:48
|
xiaoniaoyouhuajiang
|
feat: add `vec_dim` function (#5587)
| false
|
diff --git a/src/common/function/src/scalars/vector.rs b/src/common/function/src/scalars/vector.rs
index 90aed7cbd7a5..381c757d9b98 100644
--- a/src/common/function/src/scalars/vector.rs
+++ b/src/common/function/src/scalars/vector.rs
@@ -22,6 +22,7 @@ mod scalar_add;
mod scalar_mul;
pub(crate) mod sum;
mod vector_add;
+mod vector_dim;
mod vector_div;
mod vector_mul;
mod vector_norm;
@@ -54,6 +55,7 @@ impl VectorFunction {
registry.register(Arc::new(vector_mul::VectorMulFunction));
registry.register(Arc::new(vector_div::VectorDivFunction));
registry.register(Arc::new(vector_norm::VectorNormFunction));
+ registry.register(Arc::new(vector_dim::VectorDimFunction));
registry.register(Arc::new(elem_sum::ElemSumFunction));
registry.register(Arc::new(elem_product::ElemProductFunction));
}
diff --git a/src/common/function/src/scalars/vector/vector_dim.rs b/src/common/function/src/scalars/vector/vector_dim.rs
new file mode 100644
index 000000000000..6a7c07810049
--- /dev/null
+++ b/src/common/function/src/scalars/vector/vector_dim.rs
@@ -0,0 +1,172 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::borrow::Cow;
+use std::fmt::Display;
+
+use common_query::error::InvalidFuncArgsSnafu;
+use common_query::prelude::{Signature, TypeSignature, Volatility};
+use datatypes::prelude::ConcreteDataType;
+use datatypes::scalars::ScalarVectorBuilder;
+use datatypes::vectors::{MutableVector, UInt64VectorBuilder, VectorRef};
+use snafu::ensure;
+
+use crate::function::{Function, FunctionContext};
+use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
+
+const NAME: &str = "vec_dim";
+
+/// Returns the dimension of the vector.
+///
+/// # Example
+///
+/// ```sql
+/// SELECT vec_dim('[7.0, 8.0, 9.0, 10.0]');
+///
+/// +---------------------------------------------------------------+
+/// | vec_dim(Utf8("[7.0, 8.0, 9.0, 10.0]")) |
+/// +---------------------------------------------------------------+
+/// | 4 |
+/// +---------------------------------------------------------------+
+///
+#[derive(Debug, Clone, Default)]
+pub struct VectorDimFunction;
+
+impl Function for VectorDimFunction {
+ fn name(&self) -> &str {
+ NAME
+ }
+
+ fn return_type(
+ &self,
+ _input_types: &[ConcreteDataType],
+ ) -> common_query::error::Result<ConcreteDataType> {
+ Ok(ConcreteDataType::uint64_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ Signature::one_of(
+ vec![
+ TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
+ TypeSignature::Exact(vec![ConcreteDataType::binary_datatype()]),
+ ],
+ Volatility::Immutable,
+ )
+ }
+
+ fn eval(
+ &self,
+ _func_ctx: FunctionContext,
+ columns: &[VectorRef],
+ ) -> common_query::error::Result<VectorRef> {
+ ensure!(
+ columns.len() == 1,
+ InvalidFuncArgsSnafu {
+ err_msg: format!(
+ "The length of the args is not correct, expect exactly one, have: {}",
+ columns.len()
+ )
+ }
+ );
+ let arg0 = &columns[0];
+
+ let len = arg0.len();
+ let mut result = UInt64VectorBuilder::with_capacity(len);
+ if len == 0 {
+ return Ok(result.to_vector());
+ }
+
+ let arg0_const = as_veclit_if_const(arg0)?;
+
+ for i in 0..len {
+ let arg0 = match arg0_const.as_ref() {
+ Some(arg0) => Some(Cow::Borrowed(arg0.as_ref())),
+ None => as_veclit(arg0.get_ref(i))?,
+ };
+ let Some(arg0) = arg0 else {
+ result.push_null();
+ continue;
+ };
+ result.push(Some(arg0.len() as u64));
+ }
+
+ Ok(result.to_vector())
+ }
+}
+
+impl Display for VectorDimFunction {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{}", NAME.to_ascii_uppercase())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use common_query::error::Error;
+ use datatypes::vectors::StringVector;
+
+ use super::*;
+
+ #[test]
+ fn test_vec_dim() {
+ let func = VectorDimFunction;
+
+ let input0 = Arc::new(StringVector::from(vec![
+ Some("[0.0,2.0,3.0]".to_string()),
+ Some("[1.0,2.0,3.0,4.0]".to_string()),
+ None,
+ Some("[5.0]".to_string()),
+ ]));
+
+ let result = func.eval(FunctionContext::default(), &[input0]).unwrap();
+
+ let result = result.as_ref();
+ assert_eq!(result.len(), 4);
+ assert_eq!(result.get_ref(0).as_u64().unwrap(), Some(3));
+ assert_eq!(result.get_ref(1).as_u64().unwrap(), Some(4));
+ assert!(result.get_ref(2).is_null());
+ assert_eq!(result.get_ref(3).as_u64().unwrap(), Some(1));
+ }
+
+ #[test]
+ fn test_dim_error() {
+ let func = VectorDimFunction;
+
+ let input0 = Arc::new(StringVector::from(vec![
+ Some("[1.0,2.0,3.0]".to_string()),
+ Some("[4.0,5.0,6.0]".to_string()),
+ None,
+ Some("[2.0,3.0,3.0]".to_string()),
+ ]));
+ let input1 = Arc::new(StringVector::from(vec![
+ Some("[1.0,1.0,1.0]".to_string()),
+ Some("[6.0,5.0,4.0]".to_string()),
+ Some("[3.0,2.0,2.0]".to_string()),
+ ]));
+
+ let result = func.eval(FunctionContext::default(), &[input0, input1]);
+
+ match result {
+ Err(Error::InvalidFuncArgs { err_msg, .. }) => {
+ assert_eq!(
+ err_msg,
+ "The length of the args is not correct, expect exactly one, have: 2"
+ )
+ }
+ _ => unreachable!(),
+ }
+ }
+}
diff --git a/tests/cases/standalone/common/function/vector/vector.result b/tests/cases/standalone/common/function/vector/vector.result
index 1b81fa98b087..7f40c73636bb 100644
--- a/tests/cases/standalone/common/function/vector/vector.result
+++ b/tests/cases/standalone/common/function/vector/vector.result
@@ -284,3 +284,45 @@ FROM (
| [-4,-20,-54] |
+-------------------------------+
+SELECT vec_dim('[7.0, 8.0, 9.0, 10.0]');
+
++----------------------------------------+
+| vec_dim(Utf8("[7.0, 8.0, 9.0, 10.0]")) |
++----------------------------------------+
+| 4 |
++----------------------------------------+
+
+SELECT v, vec_dim(v)
+FROM (
+ SELECT '[1.0, 2.0, 3.0]' AS v
+ UNION ALL
+ SELECT '[-1.0]' AS v
+ UNION ALL
+ SELECT '[4.0, 5.0, 6.0]' AS v
+ ) Order By vec_dim(v) ASC;
+
++-----------------+------------+
+| v | vec_dim(v) |
++-----------------+------------+
+| [-1.0] | 1 |
+| [1.0, 2.0, 3.0] | 3 |
+| [4.0, 5.0, 6.0] | 3 |
++-----------------+------------+
+
+SELECT v, vec_dim(v)
+FROM (
+ SELECT '[1.0, 2.0, 3.0]' AS v
+ UNION ALL
+ SELECT '[-1.0]' AS v
+ UNION ALL
+ SELECT '[7.0, 8.0, 9.0, 10.0]' AS v
+ ) Order By vec_dim(v) ASC;
+
++-----------------------+------------+
+| v | vec_dim(v) |
++-----------------------+------------+
+| [-1.0] | 1 |
+| [1.0, 2.0, 3.0] | 3 |
+| [7.0, 8.0, 9.0, 10.0] | 4 |
++-----------------------+------------+
+
diff --git a/tests/cases/standalone/common/function/vector/vector.sql b/tests/cases/standalone/common/function/vector/vector.sql
index 49c8e88f2834..b53b6af453fb 100644
--- a/tests/cases/standalone/common/function/vector/vector.sql
+++ b/tests/cases/standalone/common/function/vector/vector.sql
@@ -79,3 +79,23 @@ FROM (
UNION ALL
SELECT '[4.0, 5.0, 6.0]' AS v
);
+
+SELECT vec_dim('[7.0, 8.0, 9.0, 10.0]');
+
+SELECT v, vec_dim(v)
+FROM (
+ SELECT '[1.0, 2.0, 3.0]' AS v
+ UNION ALL
+ SELECT '[-1.0]' AS v
+ UNION ALL
+ SELECT '[4.0, 5.0, 6.0]' AS v
+ ) Order By vec_dim(v) ASC;
+
+SELECT v, vec_dim(v)
+FROM (
+ SELECT '[1.0, 2.0, 3.0]' AS v
+ UNION ALL
+ SELECT '[-1.0]' AS v
+ UNION ALL
+ SELECT '[7.0, 8.0, 9.0, 10.0]' AS v
+ ) Order By vec_dim(v) ASC;
|
feat
|
add `vec_dim` function (#5587)
|
c839ed271c09b5ca2090a93151561f2cd32f8de5
|
2023-09-12 18:27:15
|
Weny Xu
|
refactor: refactor: ddl context (#2301)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 5248a61d4ec6..fa2584dab244 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1517,6 +1517,7 @@ dependencies = [
"api",
"arrow-flight",
"async-stream",
+ "async-trait",
"common-base",
"common-catalog",
"common-error",
diff --git a/src/client/Cargo.toml b/src/client/Cargo.toml
index aa37d26a3d6a..b77f515b973a 100644
--- a/src/client/Cargo.toml
+++ b/src/client/Cargo.toml
@@ -11,6 +11,7 @@ testing = []
api = { workspace = true }
arrow-flight.workspace = true
async-stream.workspace = true
+async-trait.workspace = true
common-base = { workspace = true }
common-catalog = { workspace = true }
common-error = { workspace = true }
diff --git a/src/client/src/client_manager.rs b/src/client/src/client_manager.rs
index eeda1510c8ee..e503555dd2a6 100644
--- a/src/client/src/client_manager.rs
+++ b/src/client/src/client_manager.rs
@@ -13,12 +13,15 @@
// limitations under the License.
use std::fmt::{Debug, Formatter};
+use std::sync::Arc;
use std::time::Duration;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
+use common_meta::datanode_manager::{Datanode, DatanodeManager};
use common_meta::peer::Peer;
use moka::future::{Cache, CacheBuilder};
+use crate::region::RegionRequester;
use crate::Client;
pub struct DatanodeClients {
@@ -40,6 +43,15 @@ impl Debug for DatanodeClients {
}
}
+#[async_trait::async_trait]
+impl DatanodeManager for DatanodeClients {
+ async fn datanode(&self, datanode: &Peer) -> Arc<dyn Datanode> {
+ let client = self.get_client(datanode).await;
+
+ Arc::new(RegionRequester::new(client))
+ }
+}
+
impl DatanodeClients {
pub fn new(config: ChannelConfig) -> Self {
Self {
diff --git a/src/client/src/region.rs b/src/client/src/region.rs
index bef7404b1ba0..2ebb80ae551f 100644
--- a/src/client/src/region.rs
+++ b/src/client/src/region.rs
@@ -14,15 +14,18 @@
use api::v1::region::{region_request, RegionRequest, RegionRequestHeader, RegionResponse};
use api::v1::ResponseHeader;
+use async_trait::async_trait;
+use common_error::ext::BoxedError;
use common_error::status_code::StatusCode;
+use common_meta::datanode_manager::{AffectedRows, Datanode};
+use common_meta::error::{self as meta_error, Result as MetaResult};
use common_telemetry::timer;
-use snafu::OptionExt;
+use snafu::{location, Location, OptionExt};
+use crate::error::Error::FlightGet;
use crate::error::{IllegalDatabaseResponseSnafu, Result, ServerSnafu};
use crate::{metrics, Client};
-type AffectedRows = u64;
-
#[derive(Debug)]
pub struct RegionRequester {
trace_id: u64,
@@ -30,6 +33,24 @@ pub struct RegionRequester {
client: Client,
}
+#[async_trait]
+impl Datanode for RegionRequester {
+ async fn handle(&self, request: region_request::Body) -> MetaResult<AffectedRows> {
+ self.handle_inner(request).await.map_err(|err| {
+ if matches!(err, FlightGet { .. }) {
+ meta_error::Error::RetryLater {
+ source: BoxedError::new(err),
+ }
+ } else {
+ meta_error::Error::OperateRegion {
+ source: BoxedError::new(err),
+ location: location!(),
+ }
+ }
+ })
+ }
+}
+
impl RegionRequester {
pub fn new(client: Client) -> Self {
// TODO(LFC): Pass in trace_id and span_id from some context when we have it.
@@ -40,7 +61,7 @@ impl RegionRequester {
}
}
- pub async fn handle(self, request: region_request::Body) -> Result<AffectedRows> {
+ async fn handle_inner(&self, request: region_request::Body) -> Result<AffectedRows> {
let request_type = request.as_ref().to_string();
let request = RegionRequest {
@@ -67,6 +88,10 @@ impl RegionRequester {
Ok(affected_rows)
}
+
+ pub async fn handle(&self, request: region_request::Body) -> Result<AffectedRows> {
+ self.handle_inner(request).await
+ }
}
fn check_response_header(header: Option<ResponseHeader>) -> Result<()> {
diff --git a/src/common/meta/src/cache_invalidator.rs b/src/common/meta/src/cache_invalidator.rs
new file mode 100644
index 000000000000..993ac3b098f2
--- /dev/null
+++ b/src/common/meta/src/cache_invalidator.rs
@@ -0,0 +1,31 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use crate::error::Result;
+use crate::ident::TableIdent;
+
+/// Places context of invalidating cache. e.g., span id, trace id etc.
+pub struct Context {
+ pub subject: Option<String>,
+}
+
+#[async_trait::async_trait]
+pub trait CacheInvalidator: Send + Sync {
+ // Invalidates table cache
+ async fn invalidate_table(&self, ctx: &Context, table_ident: TableIdent) -> Result<()>;
+}
+
+pub type CacheInvalidatorRef = Arc<dyn CacheInvalidator>;
diff --git a/src/common/meta/src/datanode_manager.rs b/src/common/meta/src/datanode_manager.rs
new file mode 100644
index 000000000000..18e70f94c979
--- /dev/null
+++ b/src/common/meta/src/datanode_manager.rs
@@ -0,0 +1,38 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use api::v1::region::region_request;
+
+use crate::error::Result;
+use crate::peer::Peer;
+
+pub type AffectedRows = u64;
+
+#[async_trait::async_trait]
+pub trait Datanode: Send + Sync {
+ /// Handles DML, and DDL requests.
+ async fn handle(&self, request: region_request::Body) -> Result<AffectedRows>;
+}
+
+pub type DatanodeRef = Arc<dyn Datanode>;
+
+#[async_trait::async_trait]
+pub trait DatanodeManager: Send + Sync {
+ /// Retrieves a target `datanode`.
+ async fn datanode(&self, datanode: &Peer) -> DatanodeRef;
+}
+
+pub type DatanodeManagerRef = Arc<dyn DatanodeManager>;
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index 97a28fe084c2..4662f28d2022 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -150,6 +150,15 @@ pub enum Error {
#[snafu(display("Invalid heartbeat response, location: {}", location))]
InvalidHeartbeatResponse { location: Location },
+
+ #[snafu(display("{}", source))]
+ OperateRegion {
+ location: Location,
+ source: BoxedError,
+ },
+
+ #[snafu(display("Retry later, source: {}", source))]
+ RetryLater { source: BoxedError },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -185,8 +194,9 @@ impl ErrorExt for Error {
| ConvertRawKey { .. }
| DecodeProto { .. } => StatusCode::Unexpected,
+ RetryLater { source, .. } => source.status_code(),
+ OperateRegion { source, .. } => source.status_code(),
MetaSrv { source, .. } => source.status_code(),
-
InvalidCatalogValue { source, .. } => source.status_code(),
}
}
@@ -195,3 +205,17 @@ impl ErrorExt for Error {
self
}
}
+
+impl Error {
+ /// Creates a new [Error::RetryLater] error from source `err`.
+ pub fn retry_later<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
+ Error::RetryLater {
+ source: BoxedError::new(err),
+ }
+ }
+
+ /// Determine whether it is a retry later type through [StatusCode]
+ pub fn is_retry_later(&self) -> bool {
+ matches!(self, Error::RetryLater { .. })
+ }
+}
diff --git a/src/common/meta/src/lib.rs b/src/common/meta/src/lib.rs
index e052afeb2a46..f3df2c74eb2a 100644
--- a/src/common/meta/src/lib.rs
+++ b/src/common/meta/src/lib.rs
@@ -14,6 +14,8 @@
#![feature(btree_extract_if)]
+pub mod cache_invalidator;
+pub mod datanode_manager;
pub mod error;
pub mod heartbeat;
// TODO(weny): Removes it
diff --git a/src/meta-srv/src/cache_invalidator.rs b/src/meta-srv/src/cache_invalidator.rs
new file mode 100644
index 000000000000..be3cab10ec74
--- /dev/null
+++ b/src/meta-srv/src/cache_invalidator.rs
@@ -0,0 +1,64 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::MailboxMessage;
+use common_error::ext::BoxedError;
+use common_meta::cache_invalidator::{CacheInvalidator, Context};
+use common_meta::error::{self as meta_error, Result as MetaResult};
+use common_meta::ident::TableIdent;
+use common_meta::instruction::Instruction;
+use snafu::ResultExt;
+
+use crate::metasrv::MetasrvInfo;
+use crate::service::mailbox::{BroadcastChannel, MailboxRef};
+
+const DEFAULT_SUBJECT: &str = "Invalidate table";
+
+pub struct MetasrvCacheInvalidator {
+ mailbox: MailboxRef,
+ // Metasrv infos
+ info: MetasrvInfo,
+}
+
+impl MetasrvCacheInvalidator {
+ pub fn new(mailbox: MailboxRef, info: MetasrvInfo) -> Self {
+ Self { mailbox, info }
+ }
+}
+
+#[async_trait::async_trait]
+impl CacheInvalidator for MetasrvCacheInvalidator {
+ async fn invalidate_table(&self, ctx: &Context, table_ident: TableIdent) -> MetaResult<()> {
+ let instruction = Instruction::InvalidateTableCache(table_ident);
+ let subject = &ctx
+ .subject
+ .clone()
+ .unwrap_or_else(|| DEFAULT_SUBJECT.to_string());
+
+ let msg = &MailboxMessage::json_message(
+ subject,
+ &format!("Metasrv@{}", self.info.server_addr),
+ "Frontend broadcast",
+ common_time::util::current_time_millis(),
+ &instruction,
+ )
+ .with_context(|_| meta_error::SerdeJsonSnafu)?;
+
+ self.mailbox
+ .broadcast(&BroadcastChannel::Frontend, msg)
+ .await
+ .map_err(BoxedError::new)
+ .context(meta_error::MetaSrvSnafu)
+ }
+}
diff --git a/src/meta-srv/src/ddl.rs b/src/meta-srv/src/ddl.rs
index b7cc19328354..6ca38790aa9d 100644
--- a/src/meta-srv/src/ddl.rs
+++ b/src/meta-srv/src/ddl.rs
@@ -15,6 +15,8 @@
use std::sync::Arc;
use client::client_manager::DatanodeClients;
+use common_meta::cache_invalidator::CacheInvalidatorRef;
+use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManagerRef;
@@ -32,23 +34,24 @@ use crate::error::{
use crate::procedure::alter_table::AlterTableProcedure;
use crate::procedure::create_table::CreateTableProcedure;
use crate::procedure::drop_table::DropTableProcedure;
-use crate::service::mailbox::MailboxRef;
pub type DdlManagerRef = Arc<DdlManager>;
pub struct DdlManager {
procedure_manager: ProcedureManagerRef,
datanode_clients: Arc<DatanodeClients>,
- pub(crate) mailbox: MailboxRef,
- pub(crate) server_addr: String,
+
+ pub(crate) cache_invalidator: CacheInvalidatorRef,
pub(crate) table_metadata_manager: TableMetadataManagerRef,
}
#[derive(Clone)]
pub(crate) struct DdlContext {
+ // TODO(weny): removes it
pub(crate) datanode_clients: Arc<DatanodeClients>,
- pub(crate) mailbox: MailboxRef,
- pub(crate) server_addr: String,
+
+ pub(crate) datanode_manager: DatanodeManagerRef,
+ pub(crate) cache_invalidator: CacheInvalidatorRef,
pub(crate) table_metadata_manager: TableMetadataManagerRef,
}
@@ -56,24 +59,23 @@ impl DdlManager {
pub(crate) fn new(
procedure_manager: ProcedureManagerRef,
datanode_clients: Arc<DatanodeClients>,
- mailbox: MailboxRef,
- server_addr: String,
+ cache_invalidator: CacheInvalidatorRef,
table_metadata_manager: TableMetadataManagerRef,
) -> Self {
Self {
procedure_manager,
datanode_clients,
- mailbox,
- server_addr,
+ cache_invalidator,
table_metadata_manager,
}
}
pub(crate) fn create_context(&self) -> DdlContext {
DdlContext {
+ datanode_manager: self.datanode_clients.clone(),
datanode_clients: self.datanode_clients.clone(),
- mailbox: self.mailbox.clone(),
- server_addr: self.server_addr.clone(),
+ cache_invalidator: self.cache_invalidator.clone(),
+
table_metadata_manager: self.table_metadata_manager.clone(),
}
}
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index f608bd90b194..03d815c1d63f 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -26,6 +26,19 @@ use crate::pubsub::Message;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
+ #[snafu(display("Failed to invalidate table cache: {}", source))]
+ InvalidateTableCache {
+ location: Location,
+ source: common_meta::error::Error,
+ },
+
+ #[snafu(display("Failed to operate region on peer:{}, source: {}", peer, source))]
+ OperateRegion {
+ location: Location,
+ peer: Peer,
+ source: BoxedError,
+ },
+
#[snafu(display("Failed to list catalogs: {}", source))]
ListCatalogs {
location: Location,
@@ -595,6 +608,7 @@ impl ErrorExt for Error {
| Error::ConvertRawTableInfo { .. }
| Error::BuildTableMeta { .. } => StatusCode::Unexpected,
Error::TableNotFound { .. } => StatusCode::TableNotFound,
+ Error::InvalidateTableCache { source, .. } => source.status_code(),
Error::Table { source, .. } => source.status_code(),
Error::RequestDatanode { source, .. } => source.status_code(),
Error::InvalidCatalogValue { source, .. } => source.status_code(),
@@ -612,6 +626,7 @@ impl ErrorExt for Error {
Error::RegionFailoverCandidatesNotFound { .. } => StatusCode::RuntimeResourcesExhausted,
Error::RegisterProcedureLoader { source, .. } => source.status_code(),
+ Error::OperateRegion { source, .. } => source.status_code(),
Error::TableRouteConversion { source, .. }
| Error::ConvertProtoData { source, .. }
diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs
index 31b0a688883c..a7329e535341 100644
--- a/src/meta-srv/src/lib.rs
+++ b/src/meta-srv/src/lib.rs
@@ -16,6 +16,7 @@
#![feature(result_flattening)]
pub mod bootstrap;
+mod cache_invalidator;
pub mod cluster;
pub mod ddl;
pub mod election;
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index ca3d67dbc457..2f1992bad85b 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -97,6 +97,10 @@ impl MetaSrvOptions {
}
}
+pub struct MetasrvInfo {
+ pub server_addr: String,
+}
+
// Options for datanode.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
pub struct DatanodeOptions {
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index c82d711c51af..b027522065ba 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -22,6 +22,7 @@ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_procedure::local::{LocalManager, ManagerConfig};
use common_procedure::ProcedureManagerRef;
+use crate::cache_invalidator::MetasrvCacheInvalidator;
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
use crate::ddl::{DdlManager, DdlManagerRef};
use crate::error::Result;
@@ -38,7 +39,7 @@ use crate::lock::memory::MemLock;
use crate::lock::DistLockRef;
use crate::metadata_service::{DefaultMetadataService, MetadataServiceRef};
use crate::metasrv::{
- ElectionRef, MetaSrv, MetaSrvOptions, SelectorContext, SelectorRef, TABLE_ID_SEQ,
+ ElectionRef, MetaSrv, MetaSrvOptions, MetasrvInfo, SelectorContext, SelectorRef, TABLE_ID_SEQ,
};
use crate::procedure::region_failover::RegionFailoverManager;
use crate::procedure::state_store::MetaStateStore;
@@ -329,12 +330,17 @@ fn build_ddl_manager(
.tcp_nodelay(options.datanode.client_options.tcp_nodelay);
Arc::new(DatanodeClients::new(datanode_client_channel_config))
});
+ let cache_invalidator = Arc::new(MetasrvCacheInvalidator::new(
+ mailbox.clone(),
+ MetasrvInfo {
+ server_addr: options.server_addr.clone(),
+ },
+ ));
// TODO(weny): considers to modify the default config of procedure manager
Arc::new(DdlManager::new(
procedure_manager.clone(),
datanode_clients,
- mailbox.clone(),
- options.server_addr.clone(),
+ cache_invalidator,
table_metadata_manager.clone(),
))
}
diff --git a/src/meta-srv/src/procedure/alter_table.rs b/src/meta-srv/src/procedure/alter_table.rs
index 8e5df331817b..916f1ea55161 100644
--- a/src/meta-srv/src/procedure/alter_table.rs
+++ b/src/meta-srv/src/procedure/alter_table.rs
@@ -14,11 +14,10 @@
use std::vec;
-use api::v1::meta::MailboxMessage;
use async_trait::async_trait;
use client::Database;
+use common_meta::cache_invalidator::Context;
use common_meta::ident::TableIdent;
-use common_meta::instruction::Instruction;
use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_name::TableNameKey;
use common_meta::key::table_route::TableRouteValue;
@@ -40,7 +39,6 @@ use table::requests::{AlterKind, AlterTableRequest};
use crate::ddl::DdlContext;
use crate::error::{self, Result, TableMetadataManagerSnafu};
use crate::procedure::utils::handle_request_datanode_error;
-use crate::service::mailbox::BroadcastChannel;
// TODO(weny): removes in following PRs.
#[allow(dead_code)]
@@ -265,23 +263,18 @@ impl AlterTableProcedure {
table_id: self.data.table_id(),
engine: self.data.table_info().meta.engine.to_string(),
};
- let instruction = Instruction::InvalidateTableCache(table_ident);
-
- let msg = &MailboxMessage::json_message(
- "Invalidate table cache by alter table procedure",
- &format!("Metasrv@{}", self.context.server_addr),
- "Frontend broadcast",
- common_time::util::current_time_millis(),
- &instruction,
- )
- .with_context(|_| error::SerializeToJsonSnafu {
- input: instruction.to_string(),
- })?;
self.context
- .mailbox
- .broadcast(&BroadcastChannel::Frontend, msg)
- .await?;
+ .cache_invalidator
+ .invalidate_table(
+ &Context {
+ subject: Some("Invalidate table cache by alter table procedure".to_string()),
+ },
+ table_ident,
+ )
+ .await
+ .context(error::InvalidateTableCacheSnafu)?;
+
self.data.state = AlterTableState::DatanodeAlterTable;
Ok(Status::executing(true))
}
diff --git a/src/meta-srv/src/procedure/create_table.rs b/src/meta-srv/src/procedure/create_table.rs
index 8fbe4323f983..3b536428da0d 100644
--- a/src/meta-srv/src/procedure/create_table.rs
+++ b/src/meta-srv/src/procedure/create_table.rs
@@ -16,15 +16,9 @@ use api::v1::region::region_request::Body as PbRegionRequest;
use api::v1::region::{ColumnDef, CreateRequest as PbCreateRegionRequest};
use api::v1::SemanticType;
use async_trait::async_trait;
-use client::region::RegionRequester;
-use client::Database;
-use common_catalog::consts::MITO2_ENGINE;
-use common_error::ext::ErrorExt;
-use common_error::status_code::StatusCode;
use common_meta::key::table_name::TableNameKey;
use common_meta::rpc::ddl::CreateTableTask;
use common_meta::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
-use common_meta::table_name::TableName;
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
use common_telemetry::info;
@@ -36,10 +30,10 @@ use strum::AsRefStr;
use table::engine::TableReference;
use table::metadata::{RawTableInfo, TableId};
-use super::utils::{handle_request_datanode_error, handle_retry_error};
use crate::ddl::DdlContext;
use crate::error::{self, PrimaryKeyNotFoundSnafu, Result, TableMetadataManagerSnafu};
use crate::metrics;
+use crate::procedure::utils::{handle_operate_region_error, handle_retry_error};
pub struct CreateTableProcedure {
context: DdlContext,
@@ -69,10 +63,6 @@ impl CreateTableProcedure {
})
}
- fn table_name(&self) -> TableName {
- self.creator.data.task.table_name()
- }
-
pub fn table_info(&self) -> &RawTableInfo {
&self.creator.data.task.table_info
}
@@ -111,11 +101,7 @@ impl CreateTableProcedure {
return Ok(Status::Done);
}
- self.creator.data.state = if expr.engine == MITO2_ENGINE {
- CreateTableState::DatanodeCreateRegions
- } else {
- CreateTableState::DatanodeCreateTable
- };
+ self.creator.data.state = CreateTableState::DatanodeCreateRegions;
Ok(Status::executing(true))
}
@@ -190,7 +176,7 @@ impl CreateTableProcedure {
let mut create_region_tasks = Vec::with_capacity(leaders.len());
for datanode in leaders {
- let clients = self.context.datanode_clients.clone();
+ let manager = self.context.datanode_manager.clone();
let regions = find_leader_regions(region_routes, &datanode);
let requests = regions
@@ -209,11 +195,10 @@ impl CreateTableProcedure {
create_region_tasks.push(async move {
for request in requests {
- let client = clients.get_client(&datanode).await;
- let requester = RegionRequester::new(client);
+ let requester = manager.datanode(&datanode).await;
if let Err(err) = requester.handle(request).await {
- return Err(handle_request_datanode_error(datanode)(err));
+ return Err(handle_operate_region_error(datanode)(err));
}
}
Ok(())
@@ -244,44 +229,6 @@ impl CreateTableProcedure {
Ok(Status::Done)
}
-
- async fn on_datanode_create_table(&mut self) -> Result<Status> {
- let region_routes = &self.creator.data.region_routes;
- let table_name = self.table_name();
- let clients = self.context.datanode_clients.clone();
- let leaders = find_leaders(region_routes);
- let mut joins = Vec::with_capacity(leaders.len());
- let table_id = self.table_id();
-
- for datanode in leaders {
- let client = clients.get_client(&datanode).await;
- let client = Database::new(&table_name.catalog_name, &table_name.schema_name, client);
-
- let regions = find_leader_regions(region_routes, &datanode);
- let mut create_expr_for_region = self.creator.data.task.create_table.clone();
- create_expr_for_region.region_numbers = regions;
- create_expr_for_region.table_id = Some(api::v1::TableId { id: table_id });
-
- joins.push(common_runtime::spawn_bg(async move {
- if let Err(err) = client.create(create_expr_for_region).await {
- if err.status_code() != StatusCode::TableAlreadyExists {
- return Err(handle_request_datanode_error(datanode)(err));
- }
- }
- Ok(())
- }));
- }
-
- let _r = join_all(joins)
- .await
- .into_iter()
- .map(|e| e.context(error::JoinSnafu).flatten())
- .collect::<Result<Vec<_>>>()?;
-
- self.creator.data.state = CreateTableState::CreateMetadata;
-
- Ok(Status::executing(true))
- }
}
#[async_trait]
@@ -300,7 +247,6 @@ impl Procedure for CreateTableProcedure {
match state {
CreateTableState::Prepare => self.on_prepare().await,
- CreateTableState::DatanodeCreateTable => self.on_datanode_create_table().await,
CreateTableState::DatanodeCreateRegions => self.on_datanode_create_regions().await,
CreateTableState::CreateMetadata => self.on_create_metadata().await,
}
@@ -344,9 +290,7 @@ impl TableCreator {
enum CreateTableState {
/// Prepares to create the table
Prepare,
- /// Datanode creates the table
- DatanodeCreateTable,
- /// Create regions on the Datanode
+ /// Creates regions on the Datanode
DatanodeCreateRegions,
/// Creates metadata
CreateMetadata,
@@ -372,6 +316,7 @@ mod test {
use std::sync::{Arc, Mutex};
use api::v1::{ColumnDataType, ColumnDef as PbColumnDef, CreateTableExpr};
+ use common_catalog::consts::MITO2_ENGINE;
use super::*;
use crate::procedure::utils::mock::EchoRegionServer;
diff --git a/src/meta-srv/src/procedure/drop_table.rs b/src/meta-srv/src/procedure/drop_table.rs
index 9a71a548dfd6..29428fd31be0 100644
--- a/src/meta-srv/src/procedure/drop_table.rs
+++ b/src/meta-srv/src/procedure/drop_table.rs
@@ -12,17 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::meta::MailboxMessage;
use api::v1::region::{region_request, DropRequest as PbDropRegionRequest};
-use api::v1::DropTableExpr;
use async_trait::async_trait;
use client::region::RegionRequester;
-use client::Database;
-use common_catalog::consts::MITO2_ENGINE;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
+use common_meta::cache_invalidator::Context;
use common_meta::ident::TableIdent;
-use common_meta::instruction::Instruction;
use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_name::TableNameKey;
use common_meta::key::table_route::TableRouteValue;
@@ -47,7 +43,7 @@ use crate::ddl::DdlContext;
use crate::error::{self, Result, TableMetadataManagerSnafu};
use crate::metrics;
use crate::procedure::utils::handle_request_datanode_error;
-use crate::service::mailbox::BroadcastChannel;
+
pub struct DropTableProcedure {
context: DdlContext,
data: DropTableData,
@@ -132,29 +128,19 @@ impl DropTableProcedure {
table_id: self.data.task.table_id,
engine: engine.to_string(),
};
- let instruction = Instruction::InvalidateTableCache(table_ident);
-
- let msg = &MailboxMessage::json_message(
- "Invalidate Table Cache by dropping table procedure",
- &format!("Metasrv@{}", self.context.server_addr),
- "Frontend broadcast",
- common_time::util::current_time_millis(),
- &instruction,
- )
- .with_context(|_| error::SerializeToJsonSnafu {
- input: instruction.to_string(),
- })?;
self.context
- .mailbox
- .broadcast(&BroadcastChannel::Frontend, msg)
- .await?;
-
- self.data.state = if engine == MITO2_ENGINE {
- DropTableState::DatanodeDropRegions
- } else {
- DropTableState::DatanodeDropTable
- };
+ .cache_invalidator
+ .invalidate_table(
+ &Context {
+ subject: Some("Invalidate Table Cache by dropping table procedure".to_string()),
+ },
+ table_ident,
+ )
+ .await
+ .context(error::InvalidateTableCacheSnafu)?;
+
+ self.data.state = DropTableState::DatanodeDropRegions;
Ok(Status::executing(true))
}
@@ -203,50 +189,6 @@ impl DropTableProcedure {
Ok(Status::Done)
}
-
- /// Executes drop table instruction on datanode.
- async fn on_datanode_drop_table(&mut self) -> Result<Status> {
- let region_routes = &self.data.region_routes();
-
- let table_ref = self.data.table_ref();
- let table_id = self.data.task.table_id;
-
- let clients = self.context.datanode_clients.clone();
- let leaders = find_leaders(region_routes);
- let mut joins = Vec::with_capacity(leaders.len());
-
- let expr = DropTableExpr {
- catalog_name: table_ref.catalog.to_string(),
- schema_name: table_ref.schema.to_string(),
- table_name: table_ref.table.to_string(),
- table_id: Some(api::v1::TableId { id: table_id }),
- };
-
- for datanode in leaders {
- debug!("Dropping table {table_ref} on Datanode {datanode:?}");
-
- let client = clients.get_client(&datanode).await;
- let client = Database::new(table_ref.catalog, table_ref.schema, client);
- let expr = expr.clone();
- joins.push(common_runtime::spawn_bg(async move {
- if let Err(err) = client.drop_table(expr).await {
- // TODO(weny): add tests for `TableNotFound`
- if err.status_code() != StatusCode::TableNotFound {
- return Err(handle_request_datanode_error(datanode)(err));
- }
- }
- Ok(())
- }));
- }
-
- let _r = join_all(joins)
- .await
- .into_iter()
- .map(|e| e.context(error::JoinSnafu).flatten())
- .collect::<Result<Vec<_>>>()?;
-
- Ok(Status::Done)
- }
}
#[async_trait]
@@ -267,7 +209,6 @@ impl Procedure for DropTableProcedure {
DropTableState::Prepare => self.on_prepare().await,
DropTableState::RemoveMetadata => self.on_remove_metadata().await,
DropTableState::InvalidateTableCache => self.on_broadcast().await,
- DropTableState::DatanodeDropTable => self.on_datanode_drop_table().await,
DropTableState::DatanodeDropRegions => self.on_datanode_drop_regions().await,
}
.map_err(handle_retry_error)
@@ -343,9 +284,7 @@ enum DropTableState {
RemoveMetadata,
/// Invalidates Table Cache
InvalidateTableCache,
- /// Datanode drops the table
- DatanodeDropTable,
- /// Drop regions on Datanode
+ /// Drops regions on Datanode
DatanodeDropRegions,
}
diff --git a/src/meta-srv/src/procedure/utils.rs b/src/meta-srv/src/procedure/utils.rs
index c85c4e7639fb..1ebc611254b0 100644
--- a/src/meta-srv/src/procedure/utils.rs
+++ b/src/meta-srv/src/procedure/utils.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use common_error::ext::BoxedError;
use common_meta::peer::Peer;
use common_procedure::error::Error as ProcedureError;
use snafu::{location, Location};
@@ -35,6 +36,25 @@ pub fn handle_request_datanode_error(datanode: Peer) -> impl FnOnce(client::erro
}
}
+pub fn handle_operate_region_error(
+ datanode: Peer,
+) -> impl FnOnce(common_meta::error::Error) -> Error {
+ move |err| {
+ if matches!(err, common_meta::error::Error::RetryLater { .. }) {
+ error::RetryLaterSnafu {
+ reason: format!("Failed to execute operation on datanode, source: {}", err),
+ }
+ .build()
+ } else {
+ error::Error::OperateRegion {
+ location: location!(),
+ peer: datanode,
+ source: BoxedError::new(err),
+ }
+ }
+ }
+}
+
pub fn handle_retry_error(e: Error) -> ProcedureError {
if matches!(e, error::Error::RetryLater { .. }) {
ProcedureError::retry_later(e)
@@ -145,8 +165,10 @@ pub mod test_data {
use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
use table::requests::TableOptions;
+ use crate::cache_invalidator::MetasrvCacheInvalidator;
use crate::ddl::DdlContext;
use crate::handler::{HeartbeatMailbox, Pushers};
+ use crate::metasrv::MetasrvInfo;
use crate::sequence::Sequence;
use crate::service::store::kv::KvBackendAdapter;
use crate::service::store::memory::MemStore;
@@ -220,10 +242,16 @@ pub mod test_data {
let mailbox = HeartbeatMailbox::create(Pushers::default(), mailbox_sequence);
let kv_backend = KvBackendAdapter::wrap(kv_store);
+ let clients = Arc::new(DatanodeClients::default());
DdlContext {
- datanode_clients: Arc::new(DatanodeClients::default()),
- mailbox,
- server_addr: "127.0.0.1:4321".to_string(),
+ datanode_clients: clients.clone(),
+ datanode_manager: clients,
+ cache_invalidator: Arc::new(MetasrvCacheInvalidator::new(
+ mailbox,
+ MetasrvInfo {
+ server_addr: "127.0.0.1:4321".to_string(),
+ },
+ )),
table_metadata_manager: Arc::new(TableMetadataManager::new(kv_backend)),
}
}
|
refactor
|
refactor: ddl context (#2301)
|
abd5a8ecbb9026a4afe46ff28741565d61dbe33f
|
2023-05-10 08:20:24
|
Weny Xu
|
chore(datasource): make CompressionType follow the style of the guide (#1522)
| false
|
diff --git a/src/common/datasource/src/compression.rs b/src/common/datasource/src/compression.rs
index 85701b0bf50a..fcf21f5db7d5 100644
--- a/src/common/datasource/src/compression.rs
+++ b/src/common/datasource/src/compression.rs
@@ -26,15 +26,15 @@ use crate::error::{self, Error, Result};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum CompressionType {
/// Gzip-ed file
- GZIP,
+ Gzip,
/// Bzip2-ed file
- BZIP2,
+ Bzip2,
/// Xz-ed file (liblzma)
- XZ,
+ Xz,
/// Zstd-ed file,
- ZSTD,
+ Zstd,
/// Uncompressed file
- UNCOMPRESSED,
+ Uncompressed,
}
impl FromStr for CompressionType {
@@ -43,11 +43,11 @@ impl FromStr for CompressionType {
fn from_str(s: &str) -> Result<Self> {
let s = s.to_uppercase();
match s.as_str() {
- "GZIP" | "GZ" => Ok(Self::GZIP),
- "BZIP2" | "BZ2" => Ok(Self::BZIP2),
- "XZ" => Ok(Self::XZ),
- "ZST" | "ZSTD" => Ok(Self::ZSTD),
- "" => Ok(Self::UNCOMPRESSED),
+ "GZIP" | "GZ" => Ok(Self::Gzip),
+ "BZIP2" | "BZ2" => Ok(Self::Bzip2),
+ "XZ" => Ok(Self::Xz),
+ "ZST" | "ZSTD" => Ok(Self::Zstd),
+ "" => Ok(Self::Uncompressed),
_ => error::UnsupportedCompressionTypeSnafu {
compression_type: s,
}
@@ -59,18 +59,18 @@ impl FromStr for CompressionType {
impl Display for CompressionType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
- Self::GZIP => "GZIP",
- Self::BZIP2 => "BZIP2",
- Self::XZ => "XZ",
- Self::ZSTD => "ZSTD",
- Self::UNCOMPRESSED => "",
+ Self::Gzip => "GZIP",
+ Self::Bzip2 => "BZIP2",
+ Self::Xz => "XZ",
+ Self::Zstd => "ZSTD",
+ Self::Uncompressed => "",
})
}
}
impl CompressionType {
pub const fn is_compressed(&self) -> bool {
- !matches!(self, &Self::UNCOMPRESSED)
+ !matches!(self, &Self::Uncompressed)
}
pub fn convert_async_read<T: AsyncRead + Unpin + Send + 'static>(
@@ -78,11 +78,11 @@ impl CompressionType {
s: T,
) -> Box<dyn AsyncRead + Unpin + Send> {
match self {
- CompressionType::GZIP => Box::new(GzipDecoder::new(BufReader::new(s))),
- CompressionType::BZIP2 => Box::new(BzDecoder::new(BufReader::new(s))),
- CompressionType::XZ => Box::new(XzDecoder::new(BufReader::new(s))),
- CompressionType::ZSTD => Box::new(ZstdDecoder::new(BufReader::new(s))),
- CompressionType::UNCOMPRESSED => Box::new(s),
+ CompressionType::Gzip => Box::new(GzipDecoder::new(BufReader::new(s))),
+ CompressionType::Bzip2 => Box::new(BzDecoder::new(BufReader::new(s))),
+ CompressionType::Xz => Box::new(XzDecoder::new(BufReader::new(s))),
+ CompressionType::Zstd => Box::new(ZstdDecoder::new(BufReader::new(s))),
+ CompressionType::Uncompressed => Box::new(s),
}
}
@@ -91,19 +91,19 @@ impl CompressionType {
s: T,
) -> Box<dyn Stream<Item = io::Result<Bytes>> + Send + Unpin> {
match self {
- CompressionType::GZIP => {
+ CompressionType::Gzip => {
Box::new(ReaderStream::new(GzipDecoder::new(StreamReader::new(s))))
}
- CompressionType::BZIP2 => {
+ CompressionType::Bzip2 => {
Box::new(ReaderStream::new(BzDecoder::new(StreamReader::new(s))))
}
- CompressionType::XZ => {
+ CompressionType::Xz => {
Box::new(ReaderStream::new(XzDecoder::new(StreamReader::new(s))))
}
- CompressionType::ZSTD => {
+ CompressionType::Zstd => {
Box::new(ReaderStream::new(ZstdDecoder::new(StreamReader::new(s))))
}
- CompressionType::UNCOMPRESSED => Box::new(s),
+ CompressionType::Uncompressed => Box::new(s),
}
}
}
diff --git a/src/common/datasource/src/file_format/csv.rs b/src/common/datasource/src/file_format/csv.rs
index 27ff0475798a..d6b388fc227d 100644
--- a/src/common/datasource/src/file_format/csv.rs
+++ b/src/common/datasource/src/file_format/csv.rs
@@ -94,7 +94,7 @@ impl Default for CsvFormat {
has_header: true,
delimiter: b',',
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
- compression_type: CompressionType::UNCOMPRESSED,
+ compression_type: CompressionType::Uncompressed,
}
}
}
@@ -309,7 +309,7 @@ mod tests {
assert_eq!(
format,
CsvFormat {
- compression_type: CompressionType::ZSTD,
+ compression_type: CompressionType::Zstd,
schema_infer_max_record: Some(2000),
delimiter: b'\t',
has_header: false,
diff --git a/src/common/datasource/src/file_format/json.rs b/src/common/datasource/src/file_format/json.rs
index 134b23bce02d..6fab2ba5b728 100644
--- a/src/common/datasource/src/file_format/json.rs
+++ b/src/common/datasource/src/file_format/json.rs
@@ -73,7 +73,7 @@ impl Default for JsonFormat {
fn default() -> Self {
Self {
schema_infer_max_record: Some(file_format::DEFAULT_SCHEMA_INFER_MAX_RECORD),
- compression_type: CompressionType::UNCOMPRESSED,
+ compression_type: CompressionType::Uncompressed,
}
}
}
@@ -230,7 +230,7 @@ mod tests {
assert_eq!(
format,
JsonFormat {
- compression_type: CompressionType::ZSTD,
+ compression_type: CompressionType::Zstd,
schema_infer_max_record: Some(2000),
}
);
diff --git a/src/common/datasource/src/file_format/tests.rs b/src/common/datasource/src/file_format/tests.rs
index 221f2cee4b97..6ed3c8486f2a 100644
--- a/src/common/datasource/src/file_format/tests.rs
+++ b/src/common/datasource/src/file_format/tests.rs
@@ -67,7 +67,7 @@ async fn test_json_opener() {
100,
schema.clone(),
store.clone(),
- CompressionType::UNCOMPRESSED,
+ CompressionType::Uncompressed,
);
let path = &test_util::get_data_dir("tests/json/basic.json")
@@ -119,7 +119,7 @@ async fn test_csv_opener() {
.build()
.unwrap();
- let csv_opener = CsvOpener::new(csv_conf, store, CompressionType::UNCOMPRESSED);
+ let csv_opener = CsvOpener::new(csv_conf, store, CompressionType::Uncompressed);
let tests = [
Test {
diff --git a/src/common/datasource/src/test_util.rs b/src/common/datasource/src/test_util.rs
index a72a194a7d91..ab04017f1644 100644
--- a/src/common/datasource/src/test_util.rs
+++ b/src/common/datasource/src/test_util.rs
@@ -100,7 +100,7 @@ pub async fn setup_stream_to_json_test(origin_path: &str, threshold: impl Fn(usi
test_util::TEST_BATCH_SIZE,
schema.clone(),
store.clone(),
- CompressionType::UNCOMPRESSED,
+ CompressionType::Uncompressed,
);
let size = store.read(origin_path).await.unwrap().len();
@@ -143,7 +143,7 @@ pub async fn setup_stream_to_csv_test(origin_path: &str, threshold: impl Fn(usiz
.build()
.unwrap();
- let csv_opener = CsvOpener::new(csv_conf, store.clone(), CompressionType::UNCOMPRESSED);
+ let csv_opener = CsvOpener::new(csv_conf, store.clone(), CompressionType::Uncompressed);
let size = store.read(origin_path).await.unwrap().len();
|
chore
|
make CompressionType follow the style of the guide (#1522)
|
55f18b5a0b6346ce156cdd1725d1b3130063e498
|
2022-11-16 15:49:29
|
Yingwen
|
refactor: Rename table-engine to mito (#539)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 5ea4f8e48568..a400dc8f0fa6 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -807,6 +807,7 @@ dependencies = [
"lazy_static",
"log-store",
"meta-client",
+ "mito",
"object-store",
"opendal",
"regex",
@@ -815,7 +816,6 @@ dependencies = [
"snafu",
"storage",
"table",
- "table-engine",
"tempdir",
"tokio",
]
@@ -1739,6 +1739,7 @@ dependencies = [
"meta-client",
"meta-srv",
"metrics",
+ "mito",
"object-store",
"query",
"script",
@@ -1751,7 +1752,6 @@ dependencies = [
"store-api",
"substrait 0.1.0",
"table",
- "table-engine",
"tempdir",
"tokio",
"tokio-stream",
@@ -3187,6 +3187,36 @@ dependencies = [
"windows-sys",
]
+[[package]]
+name = "mito"
+version = "0.1.0"
+dependencies = [
+ "arc-swap",
+ "async-stream",
+ "async-trait",
+ "chrono",
+ "common-catalog",
+ "common-error",
+ "common-query",
+ "common-recordbatch",
+ "common-telemetry",
+ "common-time",
+ "datafusion",
+ "datafusion-common",
+ "datatypes",
+ "futures",
+ "log-store",
+ "object-store",
+ "serde",
+ "serde_json",
+ "snafu",
+ "storage",
+ "store-api",
+ "table",
+ "tempdir",
+ "tokio",
+]
+
[[package]]
name = "moka"
version = "0.9.6"
@@ -5092,6 +5122,7 @@ dependencies = [
"futures",
"futures-util",
"log-store",
+ "mito",
"paste",
"query",
"ron",
@@ -5106,7 +5137,6 @@ dependencies = [
"sql",
"storage",
"table",
- "table-engine",
"tempdir",
"tokio",
"tokio-test",
@@ -5469,10 +5499,10 @@ dependencies = [
"common-time",
"datatypes",
"itertools",
+ "mito",
"once_cell",
"snafu",
"sqlparser",
- "table-engine",
]
[[package]]
@@ -5778,36 +5808,6 @@ dependencies = [
"tokio-util",
]
-[[package]]
-name = "table-engine"
-version = "0.1.0"
-dependencies = [
- "arc-swap",
- "async-stream",
- "async-trait",
- "chrono",
- "common-catalog",
- "common-error",
- "common-query",
- "common-recordbatch",
- "common-telemetry",
- "common-time",
- "datafusion",
- "datafusion-common",
- "datatypes",
- "futures",
- "log-store",
- "object-store",
- "serde",
- "serde_json",
- "snafu",
- "storage",
- "store-api",
- "table",
- "tempdir",
- "tokio",
-]
-
[[package]]
name = "tagptr"
version = "0.2.0"
diff --git a/Cargo.toml b/Cargo.toml
index ad51afb7078f..be37ca379055 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -32,7 +32,7 @@ members = [
"src/storage",
"src/store-api",
"src/table",
- "src/table-engine",
+ "src/mito",
]
[profile.release]
diff --git a/src/catalog/Cargo.toml b/src/catalog/Cargo.toml
index aa9f38f774d9..b4b88d966e8e 100644
--- a/src/catalog/Cargo.toml
+++ b/src/catalog/Cargo.toml
@@ -42,6 +42,6 @@ log-store = { path = "../log-store" }
object-store = { path = "../object-store" }
opendal = "0.17"
storage = { path = "../storage" }
-table-engine = { path = "../table-engine" }
+mito = { path = "../mito" }
tempdir = "0.3"
tokio = { version = "1.0", features = ["full"] }
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index 887c4a8022d4..09980b7482ce 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -372,13 +372,13 @@ pub struct TableEntryValue {
#[cfg(test)]
mod tests {
use log_store::fs::noop::NoopLogStore;
+ use mito::config::EngineConfig;
+ use mito::engine::MitoEngine;
use object_store::ObjectStore;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::metadata::TableType;
use table::metadata::TableType::Base;
- use table_engine::config::EngineConfig;
- use table_engine::engine::MitoEngine;
use tempdir::TempDir;
use super::*;
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index c722c328d761..4b95e2a18bd5 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -46,7 +46,7 @@ storage = { path = "../storage" }
store-api = { path = "../store-api" }
substrait = { path = "../common/substrait" }
table = { path = "../table" }
-table-engine = { path = "../table-engine", features = ["test"] }
+mito = { path = "../mito", features = ["test"] }
tokio = { version = "1.18", features = ["full"] }
tokio-stream = { version = "0.1", features = ["net"] }
tonic = "0.8"
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 9cf336b446d4..51ccf7068357 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -25,6 +25,8 @@ use log_store::fs::config::LogConfig;
use log_store::fs::log::LocalFileLogStore;
use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_client::MetaClientOpts;
+use mito::config::EngineConfig as TableEngineConfig;
+use mito::engine::MitoEngine;
use object_store::layers::LoggingLayer;
use object_store::services::fs::Builder;
use object_store::{util, ObjectStore};
@@ -33,8 +35,6 @@ use snafu::prelude::*;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::table::TableIdProviderRef;
-use table_engine::config::EngineConfig as TableEngineConfig;
-use table_engine::engine::MitoEngine;
use crate::datanode::{DatanodeOptions, ObjectStoreConfig};
use crate::error::{
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
index edbfe09c870e..240b68e04dec 100644
--- a/src/datanode/src/mock.rs
+++ b/src/datanode/src/mock.rs
@@ -19,12 +19,12 @@ use catalog::remote::MetaKvBackend;
use common_catalog::consts::MIN_USER_TABLE_ID;
use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_srv::mocks::MockInfo;
+use mito::config::EngineConfig as TableEngineConfig;
use query::QueryEngineFactory;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::metadata::TableId;
use table::table::{TableIdProvider, TableIdProviderRef};
-use table_engine::config::EngineConfig as TableEngineConfig;
use crate::datanode::DatanodeOptions;
use crate::error::Result;
@@ -38,7 +38,7 @@ impl Instance {
// This method is used in other crate's testing codes, so move it out of "cfg(test)".
// TODO(LFC): Delete it when callers no longer need it.
pub async fn new_mock() -> Result<Self> {
- use table_engine::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
+ use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
let mock_info = meta_srv::mocks::mock_with_memstore().await;
let meta_client = Some(Arc::new(mock_meta_client(mock_info, 0).await));
diff --git a/src/datanode/src/sql.rs b/src/datanode/src/sql.rs
index 01b3247c7e5c..2c928946bf8f 100644
--- a/src/datanode/src/sql.rs
+++ b/src/datanode/src/sql.rs
@@ -97,6 +97,8 @@ mod tests {
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
use datatypes::value::Value;
use log_store::fs::noop::NoopLogStore;
+ use mito::config::EngineConfig as TableEngineConfig;
+ use mito::engine::MitoEngine;
use object_store::services::fs::Builder;
use object_store::ObjectStore;
use query::QueryEngineFactory;
@@ -106,8 +108,6 @@ mod tests {
use table::error::Result as TableResult;
use table::metadata::TableInfoRef;
use table::{Table, TableRef};
- use table_engine::config::EngineConfig as TableEngineConfig;
- use table_engine::engine::MitoEngine;
use tempdir::TempDir;
use super::*;
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index 497167c30256..eeb7eaf30a38 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -20,11 +20,11 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder};
use frontend::frontend::Mode;
+use mito::config::EngineConfig;
+use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
use snafu::ResultExt;
use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
-use table_engine::config::EngineConfig;
-use table_engine::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
use tempdir::TempDir;
use crate::datanode::{DatanodeOptions, ObjectStoreConfig};
diff --git a/src/table-engine/Cargo.toml b/src/mito/Cargo.toml
similarity index 98%
rename from src/table-engine/Cargo.toml
rename to src/mito/Cargo.toml
index 3a81c782bde9..63612075f77d 100644
--- a/src/table-engine/Cargo.toml
+++ b/src/mito/Cargo.toml
@@ -1,5 +1,5 @@
[package]
-name = "table-engine"
+name = "mito"
version = "0.1.0"
edition = "2021"
license = "Apache-2.0"
diff --git a/src/mito/README.md b/src/mito/README.md
new file mode 100644
index 000000000000..facfbfa5d633
--- /dev/null
+++ b/src/mito/README.md
@@ -0,0 +1,8 @@
+# Mito
+
+Mito is GreptimeDB's default table engine.
+
+## About Mito
+The Alfa Romeo [MiTo](https://en.wikipedia.org/wiki/Alfa_Romeo_MiTo) is a front-wheel drive, three-door supermini designed by Centro Stile Alfa Romeo.
+
+> "You can't be a true petrolhead until you've owned an Alfa Romeo." -- by Jeremy Clarkson
diff --git a/src/table-engine/src/config.rs b/src/mito/src/config.rs
similarity index 100%
rename from src/table-engine/src/config.rs
rename to src/mito/src/config.rs
diff --git a/src/table-engine/src/engine.rs b/src/mito/src/engine.rs
similarity index 99%
rename from src/table-engine/src/engine.rs
rename to src/mito/src/engine.rs
index 29e2f09c74d3..8154ad2ecd9c 100644
--- a/src/table-engine/src/engine.rs
+++ b/src/mito/src/engine.rs
@@ -64,7 +64,7 @@ fn table_dir(schema_name: &str, table_name: &str) -> String {
/// [TableEngine] implementation.
///
/// About mito <https://en.wikipedia.org/wiki/Alfa_Romeo_MiTo>.
-/// "you can't be a true petrolhead until you've owned an Alfa Romeo" -- by Jeremy Clarkson
+/// "You can't be a true petrolhead until you've owned an Alfa Romeo." -- by Jeremy Clarkson
#[derive(Clone)]
pub struct MitoEngine<S: StorageEngine> {
inner: Arc<MitoEngineInner<S>>,
diff --git a/src/table-engine/src/error.rs b/src/mito/src/error.rs
similarity index 100%
rename from src/table-engine/src/error.rs
rename to src/mito/src/error.rs
diff --git a/src/table-engine/src/lib.rs b/src/mito/src/lib.rs
similarity index 100%
rename from src/table-engine/src/lib.rs
rename to src/mito/src/lib.rs
diff --git a/src/table-engine/src/manifest.rs b/src/mito/src/manifest.rs
similarity index 100%
rename from src/table-engine/src/manifest.rs
rename to src/mito/src/manifest.rs
diff --git a/src/table-engine/src/manifest/action.rs b/src/mito/src/manifest/action.rs
similarity index 100%
rename from src/table-engine/src/manifest/action.rs
rename to src/mito/src/manifest/action.rs
diff --git a/src/table-engine/src/table.rs b/src/mito/src/table.rs
similarity index 100%
rename from src/table-engine/src/table.rs
rename to src/mito/src/table.rs
diff --git a/src/table-engine/src/table/test_util.rs b/src/mito/src/table/test_util.rs
similarity index 100%
rename from src/table-engine/src/table/test_util.rs
rename to src/mito/src/table/test_util.rs
diff --git a/src/table-engine/src/table/test_util/mock_engine.rs b/src/mito/src/table/test_util/mock_engine.rs
similarity index 100%
rename from src/table-engine/src/table/test_util/mock_engine.rs
rename to src/mito/src/table/test_util/mock_engine.rs
diff --git a/src/script/Cargo.toml b/src/script/Cargo.toml
index b894736c3d23..cde4f8391c7f 100644
--- a/src/script/Cargo.toml
+++ b/src/script/Cargo.toml
@@ -58,7 +58,7 @@ log-store = { path = "../log-store" }
ron = "0.7"
serde = { version = "1.0", features = ["derive"] }
storage = { path = "../storage" }
-table-engine = { path = "../table-engine", features = ["test"] }
+mito = { path = "../mito", features = ["test"] }
tempdir = "0.3"
tokio = { version = "1.18", features = ["full"] }
tokio-test = "0.4"
diff --git a/src/script/src/manager.rs b/src/script/src/manager.rs
index be9825587056..1949c7723359 100644
--- a/src/script/src/manager.rs
+++ b/src/script/src/manager.rs
@@ -96,17 +96,17 @@ impl ScriptManager {
#[cfg(test)]
mod tests {
use catalog::CatalogManager;
+ use mito::config::EngineConfig as TableEngineConfig;
+ use mito::table::test_util::new_test_object_store;
use query::QueryEngineFactory;
- use table_engine::config::EngineConfig as TableEngineConfig;
- use table_engine::table::test_util::new_test_object_store;
use super::*;
type DefaultEngine = MitoEngine<EngineImpl<LocalFileLogStore>>;
use log_store::fs::config::LogConfig;
use log_store::fs::log::LocalFileLogStore;
+ use mito::engine::MitoEngine;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
- use table_engine::engine::MitoEngine;
use tempdir::TempDir;
#[tokio::test]
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
index 457bb988771a..7b3949e04300 100644
--- a/src/sql/Cargo.toml
+++ b/src/sql/Cargo.toml
@@ -15,4 +15,4 @@ itertools = "0.10"
once_cell = "1.10"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.15.0"
-table-engine = { path = "../table-engine" }
+mito = { path = "../mito" }
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index af52f12603c3..d1a76ffacfd9 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -15,13 +15,13 @@
use std::cmp::Ordering;
use itertools::Itertools;
+use mito::engine;
use once_cell::sync::Lazy;
use snafu::{ensure, OptionExt, ResultExt};
use sqlparser::ast::Value;
use sqlparser::dialect::keywords::Keyword;
use sqlparser::parser::IsOptional::Mandatory;
use sqlparser::tokenizer::{Token, Word};
-use table_engine::engine;
use crate::ast::{ColumnDef, Ident, TableConstraint, Value as SqlValue};
use crate::error::{self, InvalidTimeIndexSnafu, Result, SyntaxSnafu};
|
refactor
|
Rename table-engine to mito (#539)
|
7453d9779d307b23a89a77c1de0c2c93bc82d543
|
2024-02-27 09:16:12
|
Weny Xu
|
fix: throw errors instead of panic (#3391)
| false
|
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index ec5e35767ebd..0d149c3af7bf 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -87,12 +87,24 @@ impl CreateTableProcedure {
self.table_info().ident.table_id
}
- fn region_wal_options(&self) -> Option<&HashMap<RegionNumber, String>> {
- self.creator.data.region_wal_options.as_ref()
+ fn region_wal_options(&self) -> Result<&HashMap<RegionNumber, String>> {
+ self.creator
+ .data
+ .region_wal_options
+ .as_ref()
+ .context(error::UnexpectedSnafu {
+ err_msg: "region_wal_options is not allocated",
+ })
}
- fn table_route(&self) -> Option<&TableRouteValue> {
- self.creator.data.table_route.as_ref()
+ fn table_route(&self) -> Result<&TableRouteValue> {
+ self.creator
+ .data
+ .table_route
+ .as_ref()
+ .context(error::UnexpectedSnafu {
+ err_msg: "table_route is not allocated",
+ })
}
#[cfg(any(test, feature = "testing"))]
@@ -181,7 +193,7 @@ impl CreateTableProcedure {
/// - [Code::Unavailable](tonic::status::Code::Unavailable)
pub async fn on_datanode_create_regions(&mut self) -> Result<Status> {
// Safety: the table route must be allocated.
- match &self.creator.data.table_route.clone().unwrap() {
+ match self.table_route()?.clone() {
TableRouteValue::Physical(x) => {
let region_routes = x.region_routes.clone();
let request_builder = self.new_region_request_builder(None)?;
@@ -214,7 +226,7 @@ impl CreateTableProcedure {
request_builder: CreateRequestBuilder,
) -> Result<Status> {
// Safety: the table_route must be allocated.
- if self.table_route().unwrap().is_physical() {
+ if self.table_route()?.is_physical() {
// Registers opening regions
let guards = self
.creator
@@ -226,7 +238,7 @@ impl CreateTableProcedure {
let create_table_data = &self.creator.data;
// Safety: the region_wal_options must be allocated
- let region_wal_options = self.region_wal_options().unwrap();
+ let region_wal_options = self.region_wal_options()?;
let create_table_expr = &create_table_data.task.create_table;
let catalog = &create_table_expr.catalog_name;
let schema = &create_table_expr.schema_name;
@@ -291,9 +303,9 @@ impl CreateTableProcedure {
let raw_table_info = self.table_info().clone();
// Safety: the region_wal_options must be allocated.
- let region_wal_options = self.region_wal_options().unwrap().clone();
+ let region_wal_options = self.region_wal_options()?.clone();
// Safety: the table_route must be allocated.
- let table_route = self.table_route().unwrap().clone();
+ let table_route = self.table_route()?.clone();
manager
.create_table_metadata(raw_table_info, table_route, region_wal_options)
.await?;
|
fix
|
throw errors instead of panic (#3391)
|
f6e2039eb8293cecde13911d3f5ee41900b1d33f
|
2024-05-01 11:38:49
|
Weny Xu
|
test: introduce unstable fuzz create table test (#3788)
| false
|
diff --git a/.env.example b/.env.example
index 369ebb8e2f43..f8518696580a 100644
--- a/.env.example
+++ b/.env.example
@@ -24,3 +24,7 @@ GT_KAFKA_ENDPOINTS = localhost:9092
# Setting for fuzz tests
GT_MYSQL_ADDR = localhost:4002
+
+# Setting for unstable fuzz tests
+GT_FUZZ_BINARY_PATH=/path/to/
+GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime
diff --git a/.github/actions/fuzz-test/action.yaml b/.github/actions/fuzz-test/action.yaml
index d50d5be6ef26..e3b4970d0caa 100644
--- a/.github/actions/fuzz-test/action.yaml
+++ b/.github/actions/fuzz-test/action.yaml
@@ -3,11 +3,17 @@ description: 'Fuzz test given setup and service'
inputs:
target:
description: "The fuzz target to test"
+ required: true
+ max-total-time:
+ description: "Max total time(secs)"
+ required: true
+ unstable:
+ default: 'false'
+ description: "Enable unstable feature"
runs:
using: composite
steps:
- name: Run Fuzz Test
shell: bash
- run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120
- env:
- GT_MYSQL_ADDR: 127.0.0.1:4002
+ run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }}
+
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 1657962d5110..4c64b85e3cd5 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -171,8 +171,62 @@ jobs:
uses: ./.github/actions/fuzz-test
env:
CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
+ GT_MYSQL_ADDR: 127.0.0.1:4002
with:
target: ${{ matrix.target }}
+ max-total-time: 120
+
+ unstable-fuzztest:
+ name: Unstable Fuzz Test
+ needs: build
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ target: [ "unstable_fuzz_create_table_standalone" ]
+ steps:
+ - uses: actions/checkout@v4
+ - uses: arduino/setup-protoc@v3
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: ${{ env.RUST_TOOLCHAIN }}
+ - name: Rust Cache
+ uses: Swatinem/rust-cache@v2
+ with:
+ # Shares across multiple jobs
+ shared-key: "fuzz-test-targets"
+ - name: Set Rust Fuzz
+ shell: bash
+ run: |
+ sudo apt update && sudo apt install -y libfuzzer-14-dev
+ cargo install cargo-fuzz
+ - name: Download pre-built binaries
+ uses: actions/download-artifact@v4
+ with:
+ name: bins
+ path: .
+ - name: Unzip binaries
+ run: tar -xvf ./bins.tar.gz
+ - name: Fuzz Test
+ uses: ./.github/actions/fuzz-test
+ env:
+ CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a
+ GT_MYSQL_ADDR: 127.0.0.1:4002
+ GT_FUZZ_BINARY_PATH: ./bins/greptime
+ GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/
+ with:
+ target: ${{ matrix.target }}
+ max-total-time: 120
+ unstable: 'true'
+ - name: Upload unstable fuzz test logs
+ if: failure()
+ uses: actions/upload-artifact@v4
+ with:
+ name: unstable-fuzz-logs
+ path: /tmp/unstable-greptime/
+ retention-days: 3
+
sqlness:
name: Sqlness Test
diff --git a/Cargo.lock b/Cargo.lock
index fc15b4681f2c..242fb17f4565 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -10237,15 +10237,18 @@ dependencies = [
"dotenv",
"lazy_static",
"libfuzzer-sys",
+ "nix 0.28.0",
"partition",
"rand",
"rand_chacha",
+ "reqwest",
"serde",
"serde_json",
"snafu",
"sql",
"sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0)",
"sqlx",
+ "tinytemplate",
"tokio",
]
diff --git a/tests-fuzz/Cargo.toml b/tests-fuzz/Cargo.toml
index 8bf5a34fb1ba..c7e733448988 100644
--- a/tests-fuzz/Cargo.toml
+++ b/tests-fuzz/Cargo.toml
@@ -10,6 +10,10 @@ workspace = true
[package.metadata]
cargo-fuzz = true
+[features]
+default = []
+unstable = ["nix"]
+
[dependencies]
arbitrary = { version = "1.3.0", features = ["derive"] }
async-trait = { workspace = true }
@@ -24,9 +28,11 @@ derive_builder = { workspace = true }
dotenv = "0.15"
lazy_static = { workspace = true }
libfuzzer-sys = "0.4"
+nix = { version = "0.28", features = ["process", "signal"], optional = true }
partition = { workspace = true }
rand = { workspace = true }
rand_chacha = "0.3.1"
+reqwest = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
snafu = { workspace = true }
@@ -38,10 +44,11 @@ sqlx = { version = "0.6", features = [
"postgres",
"chrono",
] }
+tinytemplate = "1.2"
+tokio = { workspace = true }
[dev-dependencies]
dotenv.workspace = true
-tokio = { workspace = true }
[[bin]]
name = "fuzz_create_table"
@@ -91,3 +98,11 @@ path = "targets/fuzz_create_database.rs"
test = false
bench = false
doc = false
+
+[[bin]]
+name = "unstable_fuzz_create_table_standalone"
+path = "targets/unstable/fuzz_create_table_standalone.rs"
+test = false
+bench = false
+doc = false
+required-features = ["unstable"]
diff --git a/tests-fuzz/README.md b/tests-fuzz/README.md
index c1e2147fb4bd..780107a65002 100644
--- a/tests-fuzz/README.md
+++ b/tests-fuzz/README.md
@@ -9,6 +9,22 @@ cargo install cargo-fuzz
2. Start GreptimeDB
3. Copy the `.env.example`, which is at project root, to `.env` and change the values on need.
+### For stable fuzz tests
+Set the GreptimeDB MySQL address.
+```
+GT_MYSQL_ADDR = localhost:4002
+```
+
+### For unstable fuzz tests
+Set the binary path of the GreptimeDB:
+```
+GT_FUZZ_BINARY_PATH = /path/to/
+```
+
+Change the instance root directory(the default value: `/tmp/unstable_greptime/`)
+```
+GT_FUZZ_INSTANCE_ROOT_DIR = /path/to/
+```
## Run
1. List all fuzz targets
```bash
diff --git a/tests-fuzz/conf/standalone.template.toml b/tests-fuzz/conf/standalone.template.toml
new file mode 100644
index 000000000000..f0ddc38d048e
--- /dev/null
+++ b/tests-fuzz/conf/standalone.template.toml
@@ -0,0 +1,23 @@
+mode = 'standalone'
+enable_memory_catalog = false
+require_lease_before_startup = true
+
+[wal]
+provider = "raft_engine"
+file_size = '1GB'
+purge_interval = '10m'
+purge_threshold = '10GB'
+read_batch_size = 128
+sync_write = false
+
+[storage]
+type = 'File'
+data_home = '{data_home}'
+
+[grpc_options]
+addr = '127.0.0.1:4001'
+runtime_size = 8
+
+[procedure]
+max_retry_times = 3
+retry_delay = "500ms"
diff --git a/tests-fuzz/src/error.rs b/tests-fuzz/src/error.rs
index add82c854187..72033b257521 100644
--- a/tests-fuzz/src/error.rs
+++ b/tests-fuzz/src/error.rs
@@ -16,6 +16,8 @@ use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
use crate::ir::create_expr::{CreateDatabaseExprBuilderError, CreateTableExprBuilderError};
+#[cfg(feature = "unstable")]
+use crate::utils::process::Pid;
pub type Result<T> = std::result::Result<T, Error>;
@@ -23,6 +25,22 @@ pub type Result<T> = std::result::Result<T, Error>;
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
+ #[snafu(display("Failed to create a file: {}", path))]
+ CreateFile {
+ path: String,
+ location: Location,
+ #[snafu(source)]
+ error: std::io::Error,
+ },
+
+ #[snafu(display("Failed to write a file: {}", path))]
+ WriteFile {
+ path: String,
+ location: Location,
+ #[snafu(source)]
+ error: std::io::Error,
+ },
+
#[snafu(display("Unexpected, violated: {violated}"))]
Unexpected {
violated: String,
@@ -56,4 +74,23 @@ pub enum Error {
#[snafu(display("Failed to assert: {}", reason))]
Assert { reason: String, location: Location },
+
+ #[snafu(display("Child process exited unexpected"))]
+ UnexpectedExited { location: Location },
+
+ #[snafu(display("Failed to spawn a child process"))]
+ SpawnChild {
+ location: Location,
+ #[snafu(source)]
+ error: std::io::Error,
+ },
+
+ #[cfg(feature = "unstable")]
+ #[snafu(display("Failed to kill a process, pid: {}", pid))]
+ KillProcess {
+ location: Location,
+ #[snafu(source)]
+ error: nix::Error,
+ pid: Pid,
+ },
}
diff --git a/tests-fuzz/src/utils.rs b/tests-fuzz/src/utils.rs
index 7c50b0ac66cb..9156067b253e 100644
--- a/tests-fuzz/src/utils.rs
+++ b/tests-fuzz/src/utils.rs
@@ -12,21 +12,40 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub mod config;
+pub mod health;
+#[cfg(feature = "unstable")]
+pub mod process;
+
use std::env;
use common_telemetry::info;
use sqlx::mysql::MySqlPoolOptions;
use sqlx::{MySql, Pool};
+/// Database connections
pub struct Connections {
pub mysql: Option<Pool<MySql>>,
}
const GT_MYSQL_ADDR: &str = "GT_MYSQL_ADDR";
-pub async fn init_greptime_connections() -> Connections {
+/// Connects to GreptimeDB via env variables.
+pub async fn init_greptime_connections_via_env() -> Connections {
let _ = dotenv::dotenv();
let mysql = if let Ok(addr) = env::var(GT_MYSQL_ADDR) {
+ Some(addr)
+ } else {
+ info!("GT_MYSQL_ADDR is empty, ignores test");
+ None
+ };
+
+ init_greptime_connections(mysql).await
+}
+
+/// Connects to GreptimeDB.
+pub async fn init_greptime_connections(mysql: Option<String>) -> Connections {
+ let mysql = if let Some(addr) = mysql {
Some(
MySqlPoolOptions::new()
.connect(&format!("mysql://{addr}/public"))
@@ -34,9 +53,33 @@ pub async fn init_greptime_connections() -> Connections {
.unwrap(),
)
} else {
- info!("GT_MYSQL_ADDR is empty, ignores test");
None
};
Connections { mysql }
}
+
+const GT_FUZZ_BINARY_PATH: &str = "GT_FUZZ_BINARY_PATH";
+const GT_FUZZ_INSTANCE_ROOT_DIR: &str = "GT_FUZZ_INSTANCE_ROOT_DIR";
+
+/// The variables for unstable test
+pub struct UnstableTestVariables {
+ pub binary_path: String,
+ pub root_dir: Option<String>,
+}
+
+/// Loads env variables for unstable test
+pub fn load_unstable_test_env_variables() -> UnstableTestVariables {
+ let _ = dotenv::dotenv();
+ let binary_path = env::var(GT_FUZZ_BINARY_PATH).expect("GT_FUZZ_BINARY_PATH not found");
+ let root_dir = if let Ok(root) = env::var(GT_FUZZ_INSTANCE_ROOT_DIR) {
+ Some(root)
+ } else {
+ None
+ };
+
+ UnstableTestVariables {
+ binary_path,
+ root_dir,
+ }
+}
diff --git a/tests-fuzz/src/utils/config.rs b/tests-fuzz/src/utils/config.rs
new file mode 100644
index 000000000000..5692ff478fab
--- /dev/null
+++ b/tests-fuzz/src/utils/config.rs
@@ -0,0 +1,58 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::path::PathBuf;
+
+use common_telemetry::tracing::info;
+use serde::Serialize;
+use snafu::ResultExt;
+use tinytemplate::TinyTemplate;
+use tokio::fs::File;
+use tokio::io::AsyncWriteExt;
+
+use crate::error;
+use crate::error::Result;
+
+/// Get the path of config dir `tests-fuzz/conf`.
+pub fn get_conf_path() -> PathBuf {
+ let mut root_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ root_path.push("conf");
+ root_path
+}
+
+/// Returns rendered config file.
+pub fn render_config_file<C: Serialize>(template_path: &str, context: &C) -> String {
+ let mut tt = TinyTemplate::new();
+ let template = std::fs::read_to_string(template_path).unwrap();
+ tt.add_template(template_path, &template).unwrap();
+ tt.render(template_path, context).unwrap()
+}
+
+// Writes config file to `output_path`.
+pub async fn write_config_file<C: Serialize>(
+ template_path: &str,
+ context: &C,
+ output_path: &str,
+) -> Result<()> {
+ info!("template_path: {template_path}, output_path: {output_path}");
+ let content = render_config_file(template_path, context);
+ let mut config_file = File::create(output_path)
+ .await
+ .context(error::CreateFileSnafu { path: output_path })?;
+ config_file
+ .write_all(content.as_bytes())
+ .await
+ .context(error::WriteFileSnafu { path: output_path })?;
+ Ok(())
+}
diff --git a/tests-fuzz/src/utils/health.rs b/tests-fuzz/src/utils/health.rs
new file mode 100644
index 000000000000..88f0c97321f1
--- /dev/null
+++ b/tests-fuzz/src/utils/health.rs
@@ -0,0 +1,57 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::time::Duration;
+
+use crate::utils::info;
+
+/// Check health of the processing.
+#[async_trait::async_trait]
+pub trait HealthChecker: Send + Sync {
+ async fn check(&self);
+
+ fn wait_timeout(&self) -> Duration;
+}
+
+/// Http health checker.
+pub struct HttpHealthChecker {
+ pub url: String,
+}
+
+#[async_trait::async_trait]
+impl HealthChecker for HttpHealthChecker {
+ async fn check(&self) {
+ loop {
+ match reqwest::get(&self.url).await {
+ Ok(resp) => {
+ if resp.status() == 200 {
+ info!("Health checked!");
+ return;
+ }
+ info!("Failed to check health, status: {}", resp.status());
+ }
+ Err(err) => {
+ info!("Failed to check health, error: {err:?}");
+ }
+ }
+
+ info!("Checking health later...");
+ tokio::time::sleep(Duration::from_secs(1)).await;
+ }
+ }
+
+ fn wait_timeout(&self) -> Duration {
+ Duration::from_secs(5)
+ }
+}
diff --git a/tests-fuzz/src/utils/process.rs b/tests-fuzz/src/utils/process.rs
new file mode 100644
index 000000000000..b3b03c042b2b
--- /dev/null
+++ b/tests-fuzz/src/utils/process.rs
@@ -0,0 +1,264 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::process::{ExitStatus, Stdio};
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+
+use common_telemetry::{info, warn};
+use nix::sys::signal::Signal;
+use rand::{Rng, SeedableRng};
+use rand_chacha::ChaChaRng;
+use snafu::{ensure, ResultExt};
+use tokio::fs::OpenOptions;
+use tokio::process::Child;
+
+use crate::error::{self, Result};
+use crate::utils::health::HealthChecker;
+
+pub type Pid = u32;
+
+/// The state of a process.
+#[derive(Debug, Clone)]
+pub struct Process {
+ pub(crate) exit_status: Option<ExitStatus>,
+ pub(crate) exited: bool,
+}
+
+/// ProcessManager provides the ability to spawn/wait/kill a child process.
+#[derive(Debug, Clone)]
+pub struct ProcessManager {
+ processes: Arc<Mutex<HashMap<Pid, Process>>>,
+}
+
+/// The callback while the child process exits.
+pub type OnChildExitResult = std::result::Result<ExitStatus, std::io::Error>;
+
+impl Default for ProcessManager {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl ProcessManager {
+ pub fn new() -> Self {
+ Self {
+ processes: Arc::new(Default::default()),
+ }
+ }
+
+ pub fn get(&self, pid: Pid) -> Option<Process> {
+ self.processes.lock().unwrap().get(&pid).cloned()
+ }
+
+ fn wait<F>(&self, mut child: Child, f: F)
+ where
+ F: FnOnce(Pid, OnChildExitResult) + Send + 'static,
+ {
+ let processes = self.processes.clone();
+ tokio::spawn(async move {
+ // Safety: caller checked
+ let pid = child.id().unwrap();
+ let result = child.wait().await;
+
+ match result {
+ Ok(code) => {
+ warn!("pid: {pid} exited with status: {}", code);
+ f(pid, Ok(code));
+ processes.lock().unwrap().entry(pid).and_modify(|process| {
+ process.exit_status = Some(code);
+ process.exited = true;
+ });
+ }
+ Err(err) => {
+ warn!("pid: {pid} exited with error: {}", err);
+ f(pid, Err(err));
+ processes.lock().unwrap().entry(pid).and_modify(|process| {
+ process.exited = true;
+ });
+ }
+ }
+ });
+ }
+
+ /// Spawns a new process.
+ pub fn spawn<T: Into<Stdio>, F>(
+ &self,
+ binary: &str,
+ args: &[String],
+ stdout: T,
+ stderr: T,
+ on_exit: F,
+ ) -> Result<Pid>
+ where
+ F: FnOnce(Pid, OnChildExitResult) + Send + 'static,
+ {
+ info!("starting {} with {:?}", binary, args);
+ let child = tokio::process::Command::new(binary)
+ .args(args)
+ .stdout(stdout)
+ .stderr(stderr)
+ .spawn()
+ .context(error::SpawnChildSnafu)?;
+ let pid = child.id();
+
+ if let Some(pid) = pid {
+ self.processes.lock().unwrap().insert(
+ pid,
+ Process {
+ exit_status: None,
+ exited: false,
+ },
+ );
+
+ self.wait(child, on_exit);
+ Ok(pid)
+ } else {
+ error::UnexpectedExitedSnafu {}.fail()
+ }
+ }
+
+ /// Kills a process via [Pid].
+ pub fn kill<T: Into<Option<Signal>>>(pid: Pid, signal: T) -> Result<()> {
+ let signal: Option<Signal> = signal.into();
+ info!("kill pid :{} signal: {:?}", pid, signal);
+ // Safety: checked.
+ nix::sys::signal::kill(nix::unistd::Pid::from_raw(pid as i32), signal)
+ .context(error::KillProcessSnafu { pid })?;
+
+ Ok(())
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum ProcessState {
+ NotSpawn,
+ Spawning,
+ HealthChecking(Pid),
+ Health(Pid),
+ Killing(Pid),
+ Exited(Pid),
+}
+
+impl ProcessState {
+ /// Returns true if it's [ProcessState::Health].
+ pub fn health(&self) -> bool {
+ matches!(self, ProcessState::Health(_))
+ }
+}
+
+/// The controller of an unstable process.
+pub struct UnstableProcessController {
+ pub binary_path: String,
+ pub args: Vec<String>,
+ pub root_dir: String,
+ pub seed: u64,
+ pub process_manager: ProcessManager,
+ pub health_check: Box<dyn HealthChecker>,
+ pub sender: tokio::sync::watch::Sender<ProcessState>,
+ pub running: Arc<AtomicBool>,
+}
+
+async fn path_to_stdio(path: &str) -> Result<std::fs::File> {
+ Ok(OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .read(true)
+ .write(true)
+ .open(path)
+ .await
+ .context(error::CreateFileSnafu { path })?
+ .into_std()
+ .await)
+}
+
+impl UnstableProcessController {
+ /// Start the unstable processes.
+ pub async fn start(&self) {
+ self.running.store(true, Ordering::Relaxed);
+ let mut rng = ChaChaRng::seed_from_u64(self.seed);
+ while self.running.load(Ordering::Relaxed) {
+ let min = rng.gen_range(50..100);
+ let max = rng.gen_range(300..600);
+ let ms = rng.gen_range(min..max);
+ let pid = self
+ .start_process_with_retry(3)
+ .await
+ .expect("Failed to start process");
+ tokio::time::sleep(Duration::from_millis(ms)).await;
+ warn!("After {ms}ms, killing pid: {pid}");
+ self.sender.send(ProcessState::Killing(pid)).unwrap();
+ ProcessManager::kill(pid, Signal::SIGKILL).expect("Failed to kill");
+ }
+ }
+
+ pub fn stop(&self) {
+ self.running.store(false, Ordering::Relaxed);
+ }
+
+ async fn start_process_with_retry(&self, max_retry: usize) -> Result<Pid> {
+ for _ in 0..max_retry {
+ let pid = self.start_process().await.unwrap();
+ let wait_timeout = self.health_check.wait_timeout();
+ let result = tokio::time::timeout(wait_timeout, async {
+ self.sender.send(ProcessState::HealthChecking(pid)).unwrap();
+ self.health_check.check().await;
+ })
+ .await;
+ match result {
+ Ok(_) => {
+ self.sender.send(ProcessState::Health(pid)).unwrap();
+ return Ok(pid);
+ }
+ Err(_) => {
+ ensure!(
+ self.process_manager.get(pid).unwrap().exited,
+ error::UnexpectedSnafu {
+ violated: format!("Failed to start process: pid: {pid}")
+ }
+ );
+ self.sender.send(ProcessState::Exited(pid)).unwrap();
+ // Retry alter
+ warn!("Wait for health checking timeout, retry later...");
+ }
+ }
+ }
+
+ error::UnexpectedSnafu {
+ violated: "Failed to start process",
+ }
+ .fail()
+ }
+
+ async fn start_process(&self) -> Result<Pid> {
+ let on_exit = move |pid, result| {
+ info!("The pid: {pid} exited, result: {result:?}");
+ };
+ let now = common_time::util::current_time_millis();
+ let stdout = format!("{}stdout-{}", self.root_dir, now);
+ let stderr = format!("{}stderr-{}", self.root_dir, now);
+ let stdout = path_to_stdio(&stdout).await?;
+ let stderr = path_to_stdio(&stderr).await?;
+ self.sender.send(ProcessState::Spawning).unwrap();
+ self.process_manager.spawn(
+ &self.binary_path,
+ &self.args.clone(),
+ stdout,
+ stderr,
+ on_exit,
+ )
+ }
+}
diff --git a/tests-fuzz/targets/fuzz_alter_logical_table.rs b/tests-fuzz/targets/fuzz_alter_logical_table.rs
index 3b0e25443097..57a773c56050 100644
--- a/tests-fuzz/targets/fuzz_alter_logical_table.rs
+++ b/tests-fuzz/targets/fuzz_alter_logical_table.rs
@@ -42,7 +42,7 @@ use tests_fuzz::ir::{
use tests_fuzz::translator::mysql::alter_expr::AlterTableExprTranslator;
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
-use tests_fuzz::utils::{init_greptime_connections, Connections};
+use tests_fuzz::utils::{init_greptime_connections_via_env, Connections};
use tests_fuzz::validator;
struct FuzzContext {
@@ -229,7 +229,7 @@ async fn execute_alter_table(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
- let Connections { mysql } = init_greptime_connections().await;
+ let Connections { mysql } = init_greptime_connections_via_env().await;
let ctx = FuzzContext {
greptime: mysql.expect("mysql connection init must be succeed"),
};
diff --git a/tests-fuzz/targets/fuzz_alter_table.rs b/tests-fuzz/targets/fuzz_alter_table.rs
index 3d345c2f16e7..a38e9d355a4b 100644
--- a/tests-fuzz/targets/fuzz_alter_table.rs
+++ b/tests-fuzz/targets/fuzz_alter_table.rs
@@ -39,7 +39,7 @@ use tests_fuzz::ir::{droppable_columns, AlterTableExpr, CreateTableExpr};
use tests_fuzz::translator::mysql::alter_expr::AlterTableExprTranslator;
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
-use tests_fuzz::utils::{init_greptime_connections, Connections};
+use tests_fuzz::utils::{init_greptime_connections_via_env, Connections};
use tests_fuzz::validator;
struct FuzzContext {
@@ -174,7 +174,7 @@ async fn execute_alter_table(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
- let Connections { mysql } = init_greptime_connections().await;
+ let Connections { mysql } = init_greptime_connections_via_env().await;
let ctx = FuzzContext {
greptime: mysql.expect("mysql connection init must be succeed"),
};
diff --git a/tests-fuzz/targets/fuzz_create_database.rs b/tests-fuzz/targets/fuzz_create_database.rs
index b59ed4fe8e69..7fd3f1c3d9c8 100644
--- a/tests-fuzz/targets/fuzz_create_database.rs
+++ b/tests-fuzz/targets/fuzz_create_database.rs
@@ -31,7 +31,7 @@ use tests_fuzz::generator::Generator;
use tests_fuzz::ir::CreateDatabaseExpr;
use tests_fuzz::translator::mysql::create_expr::CreateDatabaseExprTranslator;
use tests_fuzz::translator::DslTranslator;
-use tests_fuzz::utils::{init_greptime_connections, Connections};
+use tests_fuzz::utils::{init_greptime_connections_via_env, Connections};
struct FuzzContext {
greptime: Pool<MySql>,
@@ -95,7 +95,7 @@ async fn execute_create_database(ctx: FuzzContext, input: FuzzInput) -> Result<(
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
- let Connections { mysql } = init_greptime_connections().await;
+ let Connections { mysql } = init_greptime_connections_via_env().await;
let ctx = FuzzContext {
greptime: mysql.expect("mysql connection init must be succeed"),
};
diff --git a/tests-fuzz/targets/fuzz_create_logical_table.rs b/tests-fuzz/targets/fuzz_create_logical_table.rs
index e66ea4518966..c54b8f9ab7ca 100644
--- a/tests-fuzz/targets/fuzz_create_logical_table.rs
+++ b/tests-fuzz/targets/fuzz_create_logical_table.rs
@@ -37,7 +37,7 @@ use tests_fuzz::generator::Generator;
use tests_fuzz::ir::{primary_key_and_not_null_column_options_generator, Column};
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
-use tests_fuzz::utils::{init_greptime_connections, Connections};
+use tests_fuzz::utils::{init_greptime_connections_via_env, Connections};
use tests_fuzz::validator;
struct FuzzContext {
@@ -184,7 +184,7 @@ async fn execute_create_logic_table(ctx: FuzzContext, input: FuzzInput) -> Resul
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
- let Connections { mysql } = init_greptime_connections().await;
+ let Connections { mysql } = init_greptime_connections_via_env().await;
let ctx = FuzzContext {
greptime: mysql.expect("mysql connection init must be succeed"),
};
diff --git a/tests-fuzz/targets/fuzz_create_table.rs b/tests-fuzz/targets/fuzz_create_table.rs
index ae43e6d6966f..0eb29ec7c6dd 100644
--- a/tests-fuzz/targets/fuzz_create_table.rs
+++ b/tests-fuzz/targets/fuzz_create_table.rs
@@ -31,7 +31,7 @@ use tests_fuzz::generator::Generator;
use tests_fuzz::ir::CreateTableExpr;
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
-use tests_fuzz::utils::{init_greptime_connections, Connections};
+use tests_fuzz::utils::{init_greptime_connections_via_env, Connections};
use tests_fuzz::validator;
struct FuzzContext {
@@ -111,7 +111,7 @@ async fn execute_create_table(ctx: FuzzContext, input: FuzzInput) -> Result<()>
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
- let Connections { mysql } = init_greptime_connections().await;
+ let Connections { mysql } = init_greptime_connections_via_env().await;
let ctx = FuzzContext {
greptime: mysql.expect("mysql connection init must be succeed"),
};
diff --git a/tests-fuzz/targets/fuzz_insert.rs b/tests-fuzz/targets/fuzz_insert.rs
index e6a24dba9dc5..3f133b289424 100644
--- a/tests-fuzz/targets/fuzz_insert.rs
+++ b/tests-fuzz/targets/fuzz_insert.rs
@@ -36,7 +36,7 @@ use tests_fuzz::ir::{CreateTableExpr, InsertIntoExpr};
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator;
use tests_fuzz::translator::DslTranslator;
-use tests_fuzz::utils::{init_greptime_connections, Connections};
+use tests_fuzz::utils::{init_greptime_connections_via_env, Connections};
struct FuzzContext {
greptime: Pool<MySql>,
@@ -155,7 +155,7 @@ async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
- let Connections { mysql } = init_greptime_connections().await;
+ let Connections { mysql } = init_greptime_connections_via_env().await;
let ctx = FuzzContext {
greptime: mysql.expect("mysql connection init must be succeed"),
};
diff --git a/tests-fuzz/targets/fuzz_insert_logical_table.rs b/tests-fuzz/targets/fuzz_insert_logical_table.rs
index 97f0a8b82575..47f53386a859 100644
--- a/tests-fuzz/targets/fuzz_insert_logical_table.rs
+++ b/tests-fuzz/targets/fuzz_insert_logical_table.rs
@@ -38,7 +38,7 @@ use tests_fuzz::ir::{CreateTableExpr, InsertIntoExpr};
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator;
use tests_fuzz::translator::DslTranslator;
-use tests_fuzz::utils::{init_greptime_connections, Connections};
+use tests_fuzz::utils::{init_greptime_connections_via_env, Connections};
struct FuzzContext {
greptime: Pool<MySql>,
@@ -191,7 +191,7 @@ async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
fuzz_target!(|input: FuzzInput| {
common_telemetry::init_default_ut_logging();
common_runtime::block_on_write(async {
- let Connections { mysql } = init_greptime_connections().await;
+ let Connections { mysql } = init_greptime_connections_via_env().await;
let ctx = FuzzContext {
greptime: mysql.expect("mysql connection init must be succeed"),
};
diff --git a/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs b/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs
new file mode 100644
index 000000000000..c4b60b50d16b
--- /dev/null
+++ b/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs
@@ -0,0 +1,246 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![no_main]
+
+use std::collections::HashMap;
+use std::fs::create_dir_all;
+use std::sync::atomic::AtomicBool;
+use std::sync::Arc;
+
+use common_telemetry::info;
+use common_telemetry::tracing::warn;
+use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
+use libfuzzer_sys::fuzz_target;
+use rand::{Rng, SeedableRng};
+use rand_chacha::ChaChaRng;
+use serde::Serialize;
+use snafu::ensure;
+use sqlx::mysql::MySqlPoolOptions;
+use sqlx::{MySql, Pool};
+use tests_fuzz::context::TableContext;
+use tests_fuzz::error::Result;
+use tests_fuzz::fake::{
+ merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
+ MappedGenerator, WordGenerator,
+};
+use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder;
+use tests_fuzz::generator::Generator;
+use tests_fuzz::ir::CreateTableExpr;
+use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
+use tests_fuzz::translator::DslTranslator;
+use tests_fuzz::utils::config::{get_conf_path, write_config_file};
+use tests_fuzz::utils::health::HttpHealthChecker;
+use tests_fuzz::utils::load_unstable_test_env_variables;
+use tests_fuzz::utils::process::{ProcessManager, ProcessState, UnstableProcessController};
+use tests_fuzz::{error, validator};
+use tokio::sync::watch;
+
+struct FuzzContext {
+ greptime: Pool<MySql>,
+}
+
+impl FuzzContext {
+ async fn close(self) {
+ self.greptime.close().await;
+ }
+}
+
+#[derive(Clone, Debug)]
+struct FuzzInput {
+ seed: u64,
+ num: usize,
+}
+
+impl Arbitrary<'_> for FuzzInput {
+ fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
+ let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
+ let mut rng = ChaChaRng::seed_from_u64(seed);
+ let num = rng.gen_range(1..500);
+ Ok(FuzzInput { seed, num })
+ }
+}
+
+const DEFAULT_TEMPLATE: &str = "standalone.template.toml";
+const DEFAULT_CONFIG_NAME: &str = "standalone.template.toml";
+const DEFAULT_ROOT_DIR: &str = "/tmp/unstable_greptime/";
+const DEFAULT_DATA_HOME: &str = "/tmp/unstable_greptime/datahome/";
+const DEFAULT_MYSQL_URL: &str = "127.0.0.1:4002";
+const DEFAULT_HTTP_HEALTH_URL: &str = "http://127.0.0.1:4000/health";
+
+fn generate_create_table_expr<R: Rng + 'static>(rng: &mut R) -> CreateTableExpr {
+ let columns = rng.gen_range(2..30);
+ let create_table_generator = CreateTableExprGeneratorBuilder::default()
+ .name_generator(Box::new(MappedGenerator::new(
+ WordGenerator,
+ merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
+ )))
+ .columns(columns)
+ .engine("mito")
+ .build()
+ .unwrap();
+ create_table_generator.generate(rng).unwrap()
+}
+
+async fn connect_mysql(addr: &str) -> Pool<MySql> {
+ loop {
+ match MySqlPoolOptions::new()
+ .connect(&format!("mysql://{addr}/public"))
+ .await
+ {
+ Ok(mysql) => return mysql,
+ Err(err) => {
+ warn!("Reconnecting to {addr}, error: {err}")
+ }
+ }
+ }
+}
+
+async fn execute_unstable_create_table(
+ unstable_process_controller: Arc<UnstableProcessController>,
+ rx: watch::Receiver<ProcessState>,
+ input: FuzzInput,
+) -> Result<()> {
+ // Starts the unstable process.
+ let moved_unstable_process_controller = unstable_process_controller.clone();
+ let handler = tokio::spawn(async move { moved_unstable_process_controller.start().await });
+ let mut rng = ChaChaRng::seed_from_u64(input.seed);
+ let mysql = connect_mysql(DEFAULT_MYSQL_URL).await;
+ let ctx = FuzzContext { greptime: mysql };
+
+ let mut table_states = HashMap::new();
+
+ for _ in 0..input.num {
+ let expr = generate_create_table_expr(&mut rng);
+ let table_ctx = Arc::new(TableContext::from(&expr));
+ let table_name = expr.table_name.to_string();
+ if table_states.contains_key(&table_name) {
+ warn!("ignores same name table: {table_name}");
+ // ignores.
+ continue;
+ }
+
+ let translator = CreateTableExprTranslator;
+ let sql = translator.translate(&expr).unwrap();
+ let result = sqlx::query(&sql).execute(&ctx.greptime).await;
+ match result {
+ Ok(result) => {
+ let state = *rx.borrow();
+ table_states.insert(table_name, state);
+ validate_columns(&ctx.greptime, &table_ctx).await;
+ info!("Create table: {sql}, result: {result:?}");
+ }
+ Err(err) => {
+ let state = *rx.borrow();
+ ensure!(
+ !state.health(),
+ error::UnexpectedSnafu {
+ violated: format!("Failed to create table: {sql}, error: {err}")
+ }
+ );
+ table_states.insert(table_name, state);
+ continue;
+ }
+ }
+ }
+
+ loop {
+ let sql = "DROP DATABASE IF EXISTS public";
+ match sqlx::query(sql).execute(&ctx.greptime).await {
+ Ok(result) => {
+ info!("Drop table: {}, result: {result:?}", sql);
+ break;
+ }
+ Err(err) => warn!("Failed to drop table: {}, error: {err}", sql),
+ }
+ }
+ // Cleans up
+ ctx.close().await;
+ unstable_process_controller.stop();
+ let _ = handler.await;
+ info!("Finishing test for input: {:?}", input);
+ Ok(())
+}
+
+async fn validate_columns(client: &Pool<MySql>, table_ctx: &TableContext) {
+ loop {
+ match validator::column::fetch_columns(client, "public".into(), table_ctx.name.clone())
+ .await
+ {
+ Ok(mut column_entries) => {
+ column_entries.sort_by(|a, b| a.column_name.cmp(&b.column_name));
+ let mut columns = table_ctx.columns.clone();
+ columns.sort_by(|a, b| a.name.value.cmp(&b.name.value));
+ validator::column::assert_eq(&column_entries, &columns).unwrap();
+ return;
+ }
+ Err(err) => warn!(
+ "Failed to fetch table '{}' columns, error: {}",
+ table_ctx.name, err
+ ),
+ }
+ }
+}
+
+fuzz_target!(|input: FuzzInput| {
+ common_telemetry::init_default_ut_logging();
+ common_runtime::block_on_write(async {
+ let variables = load_unstable_test_env_variables();
+ let root_dir = variables.root_dir.unwrap_or(DEFAULT_ROOT_DIR.to_string());
+ create_dir_all(&root_dir).unwrap();
+ let output_config_path = format!("{root_dir}{DEFAULT_CONFIG_NAME}");
+ let mut conf_path = get_conf_path();
+ conf_path.push(DEFAULT_TEMPLATE);
+ let template_path = conf_path.to_str().unwrap().to_string();
+
+ // Writes config file.
+ #[derive(Serialize)]
+ struct Context {
+ data_home: String,
+ }
+ write_config_file(
+ &template_path,
+ &Context {
+ data_home: DEFAULT_DATA_HOME.to_string(),
+ },
+ &output_config_path,
+ )
+ .await
+ .unwrap();
+
+ let args = vec![
+ "standalone".to_string(),
+ "start".to_string(),
+ format!("--config-file={output_config_path}"),
+ ];
+ let process_manager = ProcessManager::new();
+ let (tx, rx) = watch::channel(ProcessState::NotSpawn);
+ let unstable_process_controller = Arc::new(UnstableProcessController {
+ binary_path: variables.binary_path,
+ args,
+ root_dir,
+ seed: input.seed,
+ process_manager,
+ health_check: Box::new(HttpHealthChecker {
+ url: DEFAULT_HTTP_HEALTH_URL.to_string(),
+ }),
+ sender: tx,
+ running: Arc::new(AtomicBool::new(false)),
+ });
+
+ execute_unstable_create_table(unstable_process_controller, rx, input)
+ .await
+ .unwrap_or_else(|err| panic!("fuzz test must be succeed: {err:?}"));
+ })
+});
|
test
|
introduce unstable fuzz create table test (#3788)
|
bac7e7bac964f5e409065d8a31e5ec580e719bd2
|
2024-12-09 12:49:00
|
Zhenchi
|
refactor: extract implicit conversion helper functions of vector type (#5118)
| false
|
diff --git a/src/common/function/src/scalars/vector.rs b/src/common/function/src/scalars/vector.rs
index 602504ec83ba..7c8cf5550e25 100644
--- a/src/common/function/src/scalars/vector.rs
+++ b/src/common/function/src/scalars/vector.rs
@@ -14,6 +14,7 @@
mod convert;
mod distance;
+pub(crate) mod impl_conv;
use std::sync::Arc;
diff --git a/src/common/function/src/scalars/vector/distance.rs b/src/common/function/src/scalars/vector/distance.rs
index 1905a375f3e4..f17eec5b042c 100644
--- a/src/common/function/src/scalars/vector/distance.rs
+++ b/src/common/function/src/scalars/vector/distance.rs
@@ -18,18 +18,17 @@ mod l2sq;
use std::borrow::Cow;
use std::fmt::Display;
-use std::sync::Arc;
use common_query::error::{InvalidFuncArgsSnafu, Result};
use common_query::prelude::Signature;
use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::ScalarVectorBuilder;
-use datatypes::value::ValueRef;
-use datatypes::vectors::{Float32VectorBuilder, MutableVector, Vector, VectorRef};
+use datatypes::vectors::{Float32VectorBuilder, MutableVector, VectorRef};
use snafu::ensure;
use crate::function::{Function, FunctionContext};
use crate::helper;
+use crate::scalars::vector::impl_conv::{as_veclit, as_veclit_if_const};
macro_rules! define_distance_function {
($StructName:ident, $display_name:expr, $similarity_method:path) => {
@@ -80,17 +79,17 @@ macro_rules! define_distance_function {
return Ok(result.to_vector());
}
- let arg0_const = parse_if_constant_string(arg0)?;
- let arg1_const = parse_if_constant_string(arg1)?;
+ let arg0_const = as_veclit_if_const(arg0)?;
+ let arg1_const = as_veclit_if_const(arg1)?;
for i in 0..size {
let vec0 = match arg0_const.as_ref() {
- Some(a) => Some(Cow::Borrowed(a.as_slice())),
- None => as_vector(arg0.get_ref(i))?,
+ Some(a) => Some(Cow::Borrowed(a.as_ref())),
+ None => as_veclit(arg0.get_ref(i))?,
};
let vec1 = match arg1_const.as_ref() {
- Some(b) => Some(Cow::Borrowed(b.as_slice())),
- None => as_vector(arg1.get_ref(i))?,
+ Some(b) => Some(Cow::Borrowed(b.as_ref())),
+ None => as_veclit(arg1.get_ref(i))?,
};
if let (Some(vec0), Some(vec1)) = (vec0, vec1) {
@@ -129,98 +128,6 @@ define_distance_function!(CosDistanceFunction, "vec_cos_distance", cos::cos);
define_distance_function!(L2SqDistanceFunction, "vec_l2sq_distance", l2sq::l2sq);
define_distance_function!(DotProductFunction, "vec_dot_product", dot::dot);
-/// Parse a vector value if the value is a constant string.
-fn parse_if_constant_string(arg: &Arc<dyn Vector>) -> Result<Option<Vec<f32>>> {
- if !arg.is_const() {
- return Ok(None);
- }
- if arg.data_type() != ConcreteDataType::string_datatype() {
- return Ok(None);
- }
- arg.get_ref(0)
- .as_string()
- .unwrap() // Safe: checked if it is a string
- .map(parse_f32_vector_from_string)
- .transpose()
-}
-
-/// Convert a value to a vector value.
-/// Supported data types are binary and string.
-fn as_vector(arg: ValueRef<'_>) -> Result<Option<Cow<'_, [f32]>>> {
- match arg.data_type() {
- ConcreteDataType::Binary(_) => arg
- .as_binary()
- .unwrap() // Safe: checked if it is a binary
- .map(binary_as_vector)
- .transpose(),
- ConcreteDataType::String(_) => arg
- .as_string()
- .unwrap() // Safe: checked if it is a string
- .map(|s| Ok(Cow::Owned(parse_f32_vector_from_string(s)?)))
- .transpose(),
- ConcreteDataType::Null(_) => Ok(None),
- _ => InvalidFuncArgsSnafu {
- err_msg: format!("Unsupported data type: {:?}", arg.data_type()),
- }
- .fail(),
- }
-}
-
-/// Convert a u8 slice to a vector value.
-fn binary_as_vector(bytes: &[u8]) -> Result<Cow<'_, [f32]>> {
- if bytes.len() % std::mem::size_of::<f32>() != 0 {
- return InvalidFuncArgsSnafu {
- err_msg: format!("Invalid binary length of vector: {}", bytes.len()),
- }
- .fail();
- }
-
- if cfg!(target_endian = "little") {
- Ok(unsafe {
- let vec = std::slice::from_raw_parts(
- bytes.as_ptr() as *const f32,
- bytes.len() / std::mem::size_of::<f32>(),
- );
- Cow::Borrowed(vec)
- })
- } else {
- let v = bytes
- .chunks_exact(std::mem::size_of::<f32>())
- .map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
- .collect::<Vec<f32>>();
- Ok(Cow::Owned(v))
- }
-}
-
-/// Parse a string to a vector value.
-/// Valid inputs are strings like "[1.0, 2.0, 3.0]".
-fn parse_f32_vector_from_string(s: &str) -> Result<Vec<f32>> {
- let trimmed = s.trim();
- if !trimmed.starts_with('[') || !trimmed.ends_with(']') {
- return InvalidFuncArgsSnafu {
- err_msg: format!(
- "Failed to parse {s} to Vector value: not properly enclosed in brackets"
- ),
- }
- .fail();
- }
- let content = trimmed[1..trimmed.len() - 1].trim();
- if content.is_empty() {
- return Ok(Vec::new());
- }
-
- content
- .split(',')
- .map(|s| s.trim().parse::<f32>())
- .collect::<std::result::Result<_, _>>()
- .map_err(|e| {
- InvalidFuncArgsSnafu {
- err_msg: format!("Failed to parse {s} to Vector value: {e}"),
- }
- .build()
- })
-}
-
#[cfg(test)]
mod tests {
use std::sync::Arc;
@@ -456,27 +363,4 @@ mod tests {
assert!(result.is_err());
}
}
-
- #[test]
- fn test_parse_vector_from_string() {
- let result = parse_f32_vector_from_string("[1.0, 2.0, 3.0]").unwrap();
- assert_eq!(result, vec![1.0, 2.0, 3.0]);
-
- let result = parse_f32_vector_from_string("[]").unwrap();
- assert_eq!(result, Vec::<f32>::new());
-
- let result = parse_f32_vector_from_string("[1.0, a, 3.0]");
- assert!(result.is_err());
- }
-
- #[test]
- fn test_binary_as_vector() {
- let bytes = [0, 0, 128, 63];
- let result = binary_as_vector(&bytes).unwrap();
- assert_eq!(result.as_ref(), &[1.0]);
-
- let invalid_bytes = [0, 0, 128];
- let result = binary_as_vector(&invalid_bytes);
- assert!(result.is_err());
- }
}
diff --git a/src/common/function/src/scalars/vector/impl_conv.rs b/src/common/function/src/scalars/vector/impl_conv.rs
new file mode 100644
index 000000000000..903bfb2a0336
--- /dev/null
+++ b/src/common/function/src/scalars/vector/impl_conv.rs
@@ -0,0 +1,156 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::borrow::Cow;
+use std::sync::Arc;
+
+use common_query::error::{InvalidFuncArgsSnafu, Result};
+use datatypes::prelude::ConcreteDataType;
+use datatypes::value::ValueRef;
+use datatypes::vectors::Vector;
+
+/// Convert a constant string or binary literal to a vector literal.
+pub fn as_veclit_if_const(arg: &Arc<dyn Vector>) -> Result<Option<Cow<'_, [f32]>>> {
+ if !arg.is_const() {
+ return Ok(None);
+ }
+ if arg.data_type() != ConcreteDataType::string_datatype()
+ && arg.data_type() != ConcreteDataType::binary_datatype()
+ {
+ return Ok(None);
+ }
+ as_veclit(arg.get_ref(0))
+}
+
+/// Convert a string or binary literal to a vector literal.
+pub fn as_veclit(arg: ValueRef<'_>) -> Result<Option<Cow<'_, [f32]>>> {
+ match arg.data_type() {
+ ConcreteDataType::Binary(_) => arg
+ .as_binary()
+ .unwrap() // Safe: checked if it is a binary
+ .map(binlit_as_veclit)
+ .transpose(),
+ ConcreteDataType::String(_) => arg
+ .as_string()
+ .unwrap() // Safe: checked if it is a string
+ .map(|s| Ok(Cow::Owned(parse_veclit_from_strlit(s)?)))
+ .transpose(),
+ ConcreteDataType::Null(_) => Ok(None),
+ _ => InvalidFuncArgsSnafu {
+ err_msg: format!("Unsupported data type: {:?}", arg.data_type()),
+ }
+ .fail(),
+ }
+}
+
+/// Convert a u8 slice to a vector literal.
+pub fn binlit_as_veclit(bytes: &[u8]) -> Result<Cow<'_, [f32]>> {
+ if bytes.len() % std::mem::size_of::<f32>() != 0 {
+ return InvalidFuncArgsSnafu {
+ err_msg: format!("Invalid binary length of vector: {}", bytes.len()),
+ }
+ .fail();
+ }
+
+ if cfg!(target_endian = "little") {
+ Ok(unsafe {
+ let vec = std::slice::from_raw_parts(
+ bytes.as_ptr() as *const f32,
+ bytes.len() / std::mem::size_of::<f32>(),
+ );
+ Cow::Borrowed(vec)
+ })
+ } else {
+ let v = bytes
+ .chunks_exact(std::mem::size_of::<f32>())
+ .map(|chunk| f32::from_le_bytes(chunk.try_into().unwrap()))
+ .collect::<Vec<f32>>();
+ Ok(Cow::Owned(v))
+ }
+}
+
+/// Parse a string literal to a vector literal.
+/// Valid inputs are strings like "[1.0, 2.0, 3.0]".
+pub fn parse_veclit_from_strlit(s: &str) -> Result<Vec<f32>> {
+ let trimmed = s.trim();
+ if !trimmed.starts_with('[') || !trimmed.ends_with(']') {
+ return InvalidFuncArgsSnafu {
+ err_msg: format!(
+ "Failed to parse {s} to Vector value: not properly enclosed in brackets"
+ ),
+ }
+ .fail();
+ }
+ let content = trimmed[1..trimmed.len() - 1].trim();
+ if content.is_empty() {
+ return Ok(Vec::new());
+ }
+
+ content
+ .split(',')
+ .map(|s| s.trim().parse::<f32>())
+ .collect::<std::result::Result<_, _>>()
+ .map_err(|e| {
+ InvalidFuncArgsSnafu {
+ err_msg: format!("Failed to parse {s} to Vector value: {e}"),
+ }
+ .build()
+ })
+}
+
+#[allow(unused)]
+/// Convert a vector literal to a binary literal.
+pub fn veclit_to_binlit(vec: &[f32]) -> Vec<u8> {
+ if cfg!(target_endian = "little") {
+ unsafe {
+ std::slice::from_raw_parts(vec.as_ptr() as *const u8, std::mem::size_of_val(vec))
+ .to_vec()
+ }
+ } else {
+ let mut bytes = Vec::with_capacity(std::mem::size_of_val(vec));
+ for e in vec {
+ bytes.extend_from_slice(&e.to_le_bytes());
+ }
+ bytes
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_parse_veclit_from_strlit() {
+ let result = parse_veclit_from_strlit("[1.0, 2.0, 3.0]").unwrap();
+ assert_eq!(result, vec![1.0, 2.0, 3.0]);
+
+ let result = parse_veclit_from_strlit("[]").unwrap();
+ assert_eq!(result, Vec::<f32>::new());
+
+ let result = parse_veclit_from_strlit("[1.0, a, 3.0]");
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn test_binlit_as_veclit() {
+ let vec = &[1.0, 2.0, 3.0];
+ let bytes = veclit_to_binlit(vec);
+ let result = binlit_as_veclit(&bytes).unwrap();
+ assert_eq!(result.as_ref(), vec);
+
+ let invalid_bytes = [0, 0, 128];
+ let result = binlit_as_veclit(&invalid_bytes);
+ assert!(result.is_err());
+ }
+}
|
refactor
|
extract implicit conversion helper functions of vector type (#5118)
|
26848f9f5c082b4533ac2463e7a51b777355e3c8
|
2022-12-28 07:52:46
|
LFC
|
feat: Replace SelectResult with FlightData (#776)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index a827be32fd1f..6dc7e947dea3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1383,7 +1383,6 @@ version = "6.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e621e7e86c46fd8a14c32c6ae3cb95656621b4743a27d0cffedb831d46e7ad21"
dependencies = [
- "crossterm",
"strum",
"strum_macros",
"unicode-width",
@@ -1866,31 +1865,6 @@ dependencies = [
"cfg-if 1.0.0",
]
-[[package]]
-name = "crossterm"
-version = "0.25.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67"
-dependencies = [
- "bitflags",
- "crossterm_winapi",
- "libc",
- "mio",
- "parking_lot",
- "signal-hook",
- "signal-hook-mio",
- "winapi",
-]
-
-[[package]]
-name = "crossterm_winapi"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2ae1b35a484aa10e07fe0638d02301c5ad24de82d310ccbd2f3693da5f09bf1c"
-dependencies = [
- "winapi",
-]
-
[[package]]
name = "crunchy"
version = "0.2.2"
@@ -6546,27 +6520,6 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
-[[package]]
-name = "signal-hook"
-version = "0.3.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d"
-dependencies = [
- "libc",
- "signal-hook-registry",
-]
-
-[[package]]
-name = "signal-hook-mio"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af"
-dependencies = [
- "libc",
- "mio",
- "signal-hook",
-]
-
[[package]]
name = "signal-hook-registry"
version = "1.4.0"
@@ -6750,8 +6703,8 @@ version = "0.1.0"
dependencies = [
"async-trait",
"client",
- "comfy-table",
"common-base",
+ "common-grpc",
"sqlness",
"tokio",
]
diff --git a/src/api/build.rs b/src/api/build.rs
index 3a3008c4811a..9187207a425b 100644
--- a/src/api/build.rs
+++ b/src/api/build.rs
@@ -20,7 +20,6 @@ fn main() {
.file_descriptor_set_path(default_out_dir.join("greptime_fd.bin"))
.compile(
&[
- "greptime/v1/select.proto",
"greptime/v1/greptime.proto",
"greptime/v1/meta/common.proto",
"greptime/v1/meta/heartbeat.proto",
diff --git a/src/api/greptime/v1/database.proto b/src/api/greptime/v1/database.proto
index bd24087264c4..9bfd4f4bcf20 100644
--- a/src/api/greptime/v1/database.proto
+++ b/src/api/greptime/v1/database.proto
@@ -48,17 +48,11 @@ message InsertExpr {
message ObjectResult {
ResultHeader header = 1;
oneof result {
- SelectResult select = 2;
- MutateResult mutate = 3;
- FlightDataRaw flight_data = 4;
+ MutateResult mutate = 2;
+ FlightDataRaw flight_data = 3;
}
}
-// TODO(LFC): replace with flight data
-message SelectResult {
- bytes raw_data = 1;
-}
-
message FlightDataRaw {
repeated bytes raw_data = 1;
}
diff --git a/src/api/greptime/v1/select.proto b/src/api/greptime/v1/select.proto
deleted file mode 100644
index 636b614f14a8..000000000000
--- a/src/api/greptime/v1/select.proto
+++ /dev/null
@@ -1,10 +0,0 @@
-syntax = "proto3";
-
-package greptime.v1.codec;
-
-import "greptime/v1/column.proto";
-
-message SelectResult {
- repeated Column columns = 1;
- uint32 row_count = 2;
-}
diff --git a/src/api/src/result.rs b/src/api/src/result.rs
index f467a7ec03a7..6d79a3b775f3 100644
--- a/src/api/src/result.rs
+++ b/src/api/src/result.rs
@@ -14,10 +14,8 @@
use common_error::prelude::ErrorExt;
-use crate::v1::codec::SelectResult;
use crate::v1::{
admin_result, object_result, AdminResult, MutateResult, ObjectResult, ResultHeader,
- SelectResult as SelectResultRaw,
};
pub const PROTOCOL_VERSION: u32 = 1;
@@ -36,7 +34,6 @@ pub struct ObjectResultBuilder {
pub enum Body {
Mutate((Success, Failure)),
- Select(SelectResult),
FlightDataRaw(FlightDataRaw),
}
@@ -69,11 +66,6 @@ impl ObjectResultBuilder {
self
}
- pub fn select_result(mut self, select_result: SelectResult) -> Self {
- self.result = Some(Body::Select(select_result));
- self
- }
-
pub fn flight_data(mut self, flight_data: FlightDataRaw) -> Self {
self.result = Some(Body::FlightDataRaw(flight_data));
self
@@ -93,9 +85,6 @@ impl ObjectResultBuilder {
failure,
}))
}
- Some(Body::Select(select)) => Some(object_result::Result::Select(SelectResultRaw {
- raw_data: select.into(),
- })),
Some(Body::FlightDataRaw(raw_data)) => Some(object_result::Result::FlightData(
crate::v1::FlightDataRaw { raw_data },
)),
diff --git a/src/api/src/serde.rs b/src/api/src/serde.rs
index 18dd19b5fa5d..1c1bc8beb7a9 100644
--- a/src/api/src/serde.rs
+++ b/src/api/src/serde.rs
@@ -15,7 +15,6 @@
pub use prost::DecodeError;
use prost::Message;
-use crate::v1::codec::SelectResult;
use crate::v1::meta::TableRouteValue;
macro_rules! impl_convert_with_bytes {
@@ -36,80 +35,4 @@ macro_rules! impl_convert_with_bytes {
};
}
-impl_convert_with_bytes!(SelectResult);
impl_convert_with_bytes!(TableRouteValue);
-
-#[cfg(test)]
-mod tests {
- use std::ops::Deref;
-
- use crate::v1::codec::*;
- use crate::v1::{column, Column};
-
- const SEMANTIC_TAG: i32 = 0;
-
- #[test]
- fn test_convert_select_result() {
- let select_result = mock_select_result();
-
- let bytes: Vec<u8> = select_result.into();
- let result: SelectResult = bytes.deref().try_into().unwrap();
-
- assert_eq!(8, result.row_count);
- assert_eq!(1, result.columns.len());
-
- let column = &result.columns[0];
- assert_eq!("foo", column.column_name);
- assert_eq!(SEMANTIC_TAG, column.semantic_type);
- assert_eq!(vec![1], column.null_mask);
- assert_eq!(
- vec![2, 3, 4, 5, 6, 7, 8],
- column.values.as_ref().unwrap().i32_values
- );
- }
-
- #[should_panic]
- #[test]
- fn test_convert_select_result_wrong() {
- let select_result = mock_select_result();
-
- let mut bytes: Vec<u8> = select_result.into();
-
- // modify some bytes
- bytes[0] = 0b1;
- bytes[1] = 0b1;
-
- let result: SelectResult = bytes.deref().try_into().unwrap();
-
- assert_eq!(8, result.row_count);
- assert_eq!(1, result.columns.len());
-
- let column = &result.columns[0];
- assert_eq!("foo", column.column_name);
- assert_eq!(SEMANTIC_TAG, column.semantic_type);
- assert_eq!(vec![1], column.null_mask);
- assert_eq!(
- vec![2, 3, 4, 5, 6, 7, 8],
- column.values.as_ref().unwrap().i32_values
- );
- }
-
- fn mock_select_result() -> SelectResult {
- let values = column::Values {
- i32_values: vec![2, 3, 4, 5, 6, 7, 8],
- ..Default::default()
- };
- let null_mask = vec![1];
- let column = Column {
- column_name: "foo".to_string(),
- semantic_type: SEMANTIC_TAG,
- values: Some(values),
- null_mask,
- ..Default::default()
- };
- SelectResult {
- columns: vec![column],
- row_count: 8,
- }
- }
-}
diff --git a/src/api/src/v1.rs b/src/api/src/v1.rs
index 380e810f0976..636b6f2cf013 100644
--- a/src/api/src/v1.rs
+++ b/src/api/src/v1.rs
@@ -17,9 +17,5 @@ tonic::include_proto!("greptime.v1");
pub const GREPTIME_FD_SET: &[u8] = tonic::include_file_descriptor_set!("greptime_fd");
-pub mod codec {
- tonic::include_proto!("greptime.v1.codec");
-}
-
mod column_def;
pub mod meta;
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 14c1d5dc28f0..8595dcd852ba 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -12,24 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
-
-use api::v1::codec::SelectResult as GrpcSelectResult;
-use api::v1::column::SemanticType;
use api::v1::{
object_expr, object_result, query_request, DatabaseRequest, ExprHeader, InsertExpr,
MutateResult as GrpcMutateResult, ObjectExpr, ObjectResult as GrpcObjectResult, QueryRequest,
};
use common_error::status_code::StatusCode;
use common_grpc::flight::{raw_flight_data_to_message, FlightMessage};
-use common_grpc_expr::column_to_vector;
use common_query::Output;
-use common_recordbatch::{RecordBatch, RecordBatches};
-use datatypes::prelude::*;
-use datatypes::schema::{ColumnSchema, Schema};
use snafu::{ensure, OptionExt, ResultExt};
-use crate::error::{ColumnToVectorSnafu, ConvertSchemaSnafu, DatanodeSnafu, DecodeSelectSnafu};
+use crate::error::DatanodeSnafu;
use crate::{error, Client, Result};
pub const PROTOCOL_VERSION: u32 = 1;
@@ -141,7 +133,6 @@ impl Database {
#[derive(Debug)]
pub enum ObjectResult {
- Select(GrpcSelectResult),
FlightData(Vec<FlightMessage>),
Mutate(GrpcMutateResult),
}
@@ -165,10 +156,6 @@ impl TryFrom<api::v1::ObjectResult> for ObjectResult {
actual: 0_usize,
})?;
Ok(match obj_result {
- object_result::Result::Select(select) => {
- let result = (*select.raw_data).try_into().context(DecodeSelectSnafu)?;
- ObjectResult::Select(result)
- }
object_result::Result::Mutate(mutate) => ObjectResult::Mutate(mutate),
object_result::Result::FlightData(flight_data) => {
let flight_messages = raw_flight_data_to_message(flight_data.raw_data)
@@ -188,41 +175,6 @@ impl TryFrom<ObjectResult> for Output {
fn try_from(value: ObjectResult) -> Result<Self> {
let output = match value {
- ObjectResult::Select(select) => {
- let vectors = select
- .columns
- .iter()
- .map(|column| {
- column_to_vector(column, select.row_count).context(ColumnToVectorSnafu)
- })
- .collect::<Result<Vec<VectorRef>>>()?;
-
- let column_schemas = select
- .columns
- .iter()
- .zip(vectors.iter())
- .map(|(column, vector)| {
- let datatype = vector.data_type();
- // nullable or not, does not affect the output
- let mut column_schema =
- ColumnSchema::new(&column.column_name, datatype, true);
- if column.semantic_type == SemanticType::Timestamp as i32 {
- column_schema = column_schema.with_time_index(true);
- }
- column_schema
- })
- .collect::<Vec<ColumnSchema>>();
-
- let schema = Arc::new(Schema::try_new(column_schemas).context(ConvertSchemaSnafu)?);
- let recordbatches = if vectors.is_empty() {
- RecordBatches::try_new(schema, vec![])
- } else {
- RecordBatch::new(schema, vectors)
- .and_then(|batch| RecordBatches::try_new(batch.schema.clone(), vec![batch]))
- }
- .context(error::CreateRecordBatchesSnafu)?;
- Output::RecordBatches(recordbatches)
- }
ObjectResult::Mutate(mutate) => {
if mutate.failure != 0 {
return error::MutateFailureSnafu {
@@ -240,17 +192,19 @@ impl TryFrom<ObjectResult> for Output {
#[cfg(test)]
mod tests {
+ use std::sync::Arc;
+
use api::helper::ColumnDataTypeWrapper;
use api::v1::Column;
use common_grpc::select::{null_mask, values};
+ use common_grpc_expr::column_to_vector;
+ use datatypes::prelude::{Vector, VectorRef};
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, UInt16Vector,
UInt32Vector, UInt64Vector, UInt8Vector,
};
- use super::*;
-
#[test]
fn test_column_to_vector() {
let mut column = create_test_column(Arc::new(BooleanVector::from(vec![true])));
diff --git a/src/client/src/error.rs b/src/client/src/error.rs
index 74645fbee633..e7e6147c6b2f 100644
--- a/src/client/src/error.rs
+++ b/src/client/src/error.rs
@@ -14,7 +14,6 @@
use std::any::Any;
-use api::serde::DecodeError;
use common_error::prelude::*;
#[derive(Debug, Snafu)]
@@ -44,9 +43,6 @@ pub enum Error {
backtrace: Backtrace,
},
- #[snafu(display("Fail to decode select result, source: {}", source))]
- DecodeSelect { source: DecodeError },
-
#[snafu(display("Error occurred on the data node, code: {}, msg: {}", code, msg))]
Datanode { code: u32, msg: String },
@@ -65,12 +61,6 @@ pub enum Error {
source: api::error::Error,
},
- #[snafu(display("Failed to create RecordBatches, source: {}", source))]
- CreateRecordBatches {
- #[snafu(backtrace)]
- source: common_recordbatch::error::Error,
- },
-
#[snafu(display("Illegal GRPC client state: {}", err_msg))]
IllegalGrpcClientState {
err_msg: String,
@@ -80,12 +70,6 @@ pub enum Error {
#[snafu(display("Missing required field in protobuf, field: {}", field))]
MissingField { field: String, backtrace: Backtrace },
- #[snafu(display("Failed to convert schema, source: {}", source))]
- ConvertSchema {
- #[snafu(backtrace)]
- source: datatypes::error::Error,
- },
-
#[snafu(display(
"Failed to create gRPC channel, peer address: {}, source: {}",
addr,
@@ -96,12 +80,6 @@ pub enum Error {
#[snafu(backtrace)]
source: common_grpc::error::Error,
},
-
- #[snafu(display("Failed to convert column to vector, source: {}", source))]
- ColumnToVector {
- #[snafu(backtrace)]
- source: common_grpc_expr::error::Error,
- },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -113,18 +91,14 @@ impl ErrorExt for Error {
| Error::MissingResult { .. }
| Error::MissingHeader { .. }
| Error::TonicStatus { .. }
- | Error::DecodeSelect { .. }
| Error::Datanode { .. }
| Error::MutateFailure { .. }
| Error::ColumnDataType { .. }
| Error::MissingField { .. } => StatusCode::Internal,
- Error::ConvertSchema { source } => source.status_code(),
- Error::CreateRecordBatches { source } => source.status_code(),
Error::CreateChannel { source, .. } | Error::ConvertFlightData { source } => {
source.status_code()
}
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
- Error::ColumnToVector { source, .. } => source.status_code(),
}
}
diff --git a/src/common/grpc/src/select.rs b/src/common/grpc/src/select.rs
index 7bf3e9cd251c..8b6730442b4d 100644
--- a/src/common/grpc/src/select.rs
+++ b/src/common/grpc/src/select.rs
@@ -12,17 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::helper::ColumnDataTypeWrapper;
-use api::result::{build_err_result, ObjectResultBuilder};
-use api::v1::codec::SelectResult;
-use api::v1::column::{SemanticType, Values};
-use api::v1::{Column, ObjectResult};
+use api::v1::column::Values;
use common_base::BitVec;
-use common_error::prelude::ErrorExt;
-use common_error::status_code::StatusCode;
-use common_query::Output;
-use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
-use datatypes::schema::SchemaRef;
use datatypes::types::{TimestampType, WrapperType};
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
@@ -30,89 +21,9 @@ use datatypes::vectors::{
TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector,
UInt32Vector, UInt64Vector, UInt8Vector, VectorRef,
};
-use snafu::{OptionExt, ResultExt};
+use snafu::OptionExt;
-use crate::error::{self, ConversionSnafu, Result};
-
-// TODO(LFC): replace with FlightData
-pub async fn to_object_result(output: std::result::Result<Output, impl ErrorExt>) -> ObjectResult {
- let result = match output {
- Ok(Output::AffectedRows(rows)) => Ok(ObjectResultBuilder::new()
- .status_code(StatusCode::Success as u32)
- .mutate_result(rows as u32, 0)
- .build()),
- Ok(Output::Stream(stream)) => collect(stream).await,
- Ok(Output::RecordBatches(recordbatches)) => build_result(recordbatches),
- Err(e) => return build_err_result(&e),
- };
- match result {
- Ok(r) => r,
- Err(e) => build_err_result(&e),
- }
-}
-
-async fn collect(stream: SendableRecordBatchStream) -> Result<ObjectResult> {
- let recordbatches = RecordBatches::try_collect(stream)
- .await
- .context(error::CreateRecordBatchSnafu)?;
- let object_result = build_result(recordbatches)?;
- Ok(object_result)
-}
-
-fn build_result(recordbatches: RecordBatches) -> Result<ObjectResult> {
- let select_result = try_convert(recordbatches)?;
- let object_result = ObjectResultBuilder::new()
- .status_code(StatusCode::Success as u32)
- .select_result(select_result)
- .build();
- Ok(object_result)
-}
-
-#[inline]
-fn get_semantic_type(schema: &SchemaRef, idx: usize) -> i32 {
- if Some(idx) == schema.timestamp_index() {
- SemanticType::Timestamp as i32
- } else {
- // FIXME(dennis): set primary key's columns semantic type as Tag,
- // but we can't get the table's schema here right now.
- SemanticType::Field as i32
- }
-}
-
-fn try_convert(record_batches: RecordBatches) -> Result<SelectResult> {
- let schema = record_batches.schema();
- let record_batches = record_batches.take();
-
- let row_count: usize = record_batches.iter().map(|r| r.num_rows()).sum();
-
- let schemas = schema.column_schemas();
- let mut columns = Vec::with_capacity(schemas.len());
-
- for (idx, column_schema) in schemas.iter().enumerate() {
- let column_name = column_schema.name.clone();
-
- let arrays: Vec<_> = record_batches
- .iter()
- .map(|r| r.column(idx).clone())
- .collect();
-
- let column = Column {
- column_name,
- values: Some(values(&arrays)?),
- null_mask: null_mask(&arrays, row_count),
- datatype: ColumnDataTypeWrapper::try_from(column_schema.data_type.clone())
- .context(error::ColumnDataTypeSnafu)?
- .datatype() as i32,
- semantic_type: get_semantic_type(&schema, idx),
- };
- columns.push(column);
- }
-
- Ok(SelectResult {
- columns,
- row_count: row_count as u32,
- })
-}
+use crate::error::{ConversionSnafu, Result};
pub fn null_mask(arrays: &[VectorRef], row_count: usize) -> Vec<u8> {
let null_count: usize = arrays.iter().map(|a| a.null_count()).sum();
@@ -264,34 +175,8 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
mod tests {
use std::sync::Arc;
- use common_recordbatch::{RecordBatch, RecordBatches};
- use datatypes::data_type::ConcreteDataType;
- use datatypes::schema::{ColumnSchema, Schema};
-
use super::*;
- #[test]
- fn test_convert_record_batches_to_select_result() {
- let r1 = mock_record_batch();
- let schema = r1.schema.clone();
- let r2 = mock_record_batch();
- let record_batches = vec![r1, r2];
- let record_batches = RecordBatches::try_new(schema, record_batches).unwrap();
-
- let s = try_convert(record_batches).unwrap();
-
- let c1 = s.columns.get(0).unwrap();
- let c2 = s.columns.get(1).unwrap();
- assert_eq!("c1", c1.column_name);
- assert_eq!("c2", c2.column_name);
-
- assert_eq!(vec![0b0010_0100], c1.null_mask);
- assert_eq!(vec![0b0011_0110], c2.null_mask);
-
- assert_eq!(vec![1, 2, 1, 2], c1.values.as_ref().unwrap().u32_values);
- assert_eq!(vec![1, 1], c2.values.as_ref().unwrap().u32_values);
- }
-
#[test]
fn test_convert_arrow_arrays_i32() {
let array = Int32Vector::from(vec![Some(1), Some(2), None, Some(3)]);
@@ -359,18 +244,4 @@ mod tests {
let mask = null_mask(&[a1, a2], 3 + 3);
assert_eq!(vec![0b0010_0000], mask);
}
-
- fn mock_record_batch() -> RecordBatch {
- let column_schemas = vec![
- ColumnSchema::new("c1", ConcreteDataType::uint32_datatype(), true),
- ColumnSchema::new("c2", ConcreteDataType::uint32_datatype(), true),
- ];
- let schema = Arc::new(Schema::try_new(column_schemas).unwrap());
-
- let v1 = Arc::new(UInt32Vector::from(vec![Some(1), Some(2), None]));
- let v2 = Arc::new(UInt32Vector::from(vec![Some(1), None, None]));
- let columns: Vec<VectorRef> = vec![v1, v2];
-
- RecordBatch::new(schema, columns).unwrap()
- }
}
diff --git a/src/common/recordbatch/src/recordbatch.rs b/src/common/recordbatch/src/recordbatch.rs
index 0ecbca104c85..863f1107ce8c 100644
--- a/src/common/recordbatch/src/recordbatch.rs
+++ b/src/common/recordbatch/src/recordbatch.rs
@@ -151,7 +151,7 @@ impl<'a> RecordBatchRowIterator<'a> {
}
impl<'a> Iterator for RecordBatchRowIterator<'a> {
- type Item = Result<Vec<Value>>;
+ type Item = Vec<Value>;
fn next(&mut self) -> Option<Self::Item> {
if self.row_cursor == self.rows {
@@ -165,7 +165,7 @@ impl<'a> Iterator for RecordBatchRowIterator<'a> {
}
self.row_cursor += 1;
- Some(Ok(row))
+ Some(row)
}
}
}
@@ -256,7 +256,6 @@ mod tests {
record_batch_iter
.next()
.unwrap()
- .unwrap()
.into_iter()
.collect::<Vec<Value>>()
);
@@ -266,7 +265,6 @@ mod tests {
record_batch_iter
.next()
.unwrap()
- .unwrap()
.into_iter()
.collect::<Vec<Value>>()
);
@@ -276,7 +274,6 @@ mod tests {
record_batch_iter
.next()
.unwrap()
- .unwrap()
.into_iter()
.collect::<Vec<Value>>()
);
@@ -286,7 +283,6 @@ mod tests {
record_batch_iter
.next()
.unwrap()
- .unwrap()
.into_iter()
.collect::<Vec<Value>>()
);
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index fde5e0b43cd2..e2fa79a59c73 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -12,24 +12,23 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
-
use api::prometheus::remote::read_request::ResponseType;
use api::prometheus::remote::{Query, QueryResult, ReadRequest, ReadResponse, WriteRequest};
+use api::v1::object_expr::Expr;
+use api::v1::{query_request, ObjectExpr, QueryRequest};
use async_trait::async_trait;
use client::ObjectResult;
use common_error::prelude::BoxedError;
-use common_grpc::select::to_object_result;
+use common_grpc::flight;
use common_telemetry::logging;
use prost::Message;
use servers::error::{self, Result as ServerResult};
use servers::prometheus::{self, Metrics};
use servers::query_handler::{PrometheusProtocolHandler, PrometheusResponse};
use servers::Mode;
-use session::context::QueryContext;
-use snafu::{ensure, OptionExt, ResultExt};
+use snafu::{OptionExt, ResultExt};
-use crate::instance::{parse_stmt, Instance};
+use crate::instance::Instance;
const SAMPLES_RESPONSE_TYPE: i32 = ResponseType::Samples as i32;
@@ -66,13 +65,11 @@ fn object_result_to_query_result(
table_name: &str,
object_result: ObjectResult,
) -> ServerResult<QueryResult> {
- let select_result = match object_result {
- ObjectResult::Select(result) => result,
- _ => unreachable!(),
- };
-
+ let ObjectResult::FlightData(flight_messages) = object_result else { unreachable!() };
+ let recordbatches = flight::flight_messages_to_recordbatches(flight_messages)
+ .context(error::ConvertFlightMessageSnafu)?;
Ok(QueryResult {
- timeseries: prometheus::select_result_to_timeseries(table_name, select_result)?,
+ timeseries: prometheus::recordbatches_to_timeseries(table_name, recordbatches)?,
})
}
@@ -92,24 +89,16 @@ impl Instance {
sql
);
- let query_ctx = Arc::new(QueryContext::with_current_schema(db.to_string()));
-
- let mut stmts = parse_stmt(&sql)
- .map_err(BoxedError::new)
- .context(error::ExecuteQuerySnafu { query: &sql })?;
-
- ensure!(
- stmts.len() == 1,
- error::InvalidQuerySnafu {
- reason: "The sql has multiple statements".to_string()
- }
- );
- let stmt = stmts.remove(0);
-
- let output = self.sql_handler.do_statement_query(stmt, query_ctx).await;
-
- let object_result = to_object_result(output)
- .await
+ let query = ObjectExpr {
+ header: None,
+ expr: Some(Expr::Query(QueryRequest {
+ query: Some(query_request::Query::Sql(sql.to_string())),
+ })),
+ };
+ let object_result = self
+ .grpc_query_handler
+ .do_query(query)
+ .await?
.try_into()
.map_err(BoxedError::new)
.context(error::ExecuteQuerySnafu { query: &sql })?;
diff --git a/src/frontend/src/table/insert.rs b/src/frontend/src/table/insert.rs
index 5d6c62a68c52..6473bec4fe16 100644
--- a/src/frontend/src/table/insert.rs
+++ b/src/frontend/src/table/insert.rs
@@ -72,7 +72,7 @@ impl DistTable {
let object_result = join.await.context(error::JoinTaskSnafu)??;
let result = match object_result {
ObjectResult::Mutate(result) => result,
- ObjectResult::Select(_) | ObjectResult::FlightData(_) => unreachable!(),
+ ObjectResult::FlightData(_) => unreachable!(),
};
success += result.success;
failure += result.failure;
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index b039a09c4a09..2208228949c7 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -243,6 +243,12 @@ pub enum Error {
#[snafu(display("Error accessing catalog: {}", source))]
CatalogError { source: catalog::error::Error },
+
+ #[snafu(display("Failed to convert Flight Message, source: {}", source))]
+ ConvertFlightMessage {
+ #[snafu(backtrace)]
+ source: common_grpc::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -285,7 +291,10 @@ impl ErrorExt for Error {
| DecodeRegionNumber { .. }
| TimePrecision { .. } => StatusCode::InvalidArguments,
- InfluxdbLinesWrite { source, .. } => source.status_code(),
+ InfluxdbLinesWrite { source, .. } | ConvertFlightMessage { source } => {
+ source.status_code()
+ }
+
Hyper { .. } => StatusCode::Unknown,
TlsRequired { .. } => StatusCode::Unknown,
StartFrontend { source, .. } => source.status_code(),
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index ccb28b0e1188..d054d756e366 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -171,7 +171,6 @@ impl TryFrom<Vec<RecordBatch>> for HttpRecordsOutput {
for recordbatch in recordbatches {
for row in recordbatch.rows() {
- let row = row.map_err(|e| e.to_string())?;
let value_row = row
.into_iter()
.map(|f| Value::try_from(f).map_err(|err| err.to_string()))
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index 5a0373fbc390..13bebfc91ec8 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -108,7 +108,6 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
recordbatch: &RecordBatch,
) -> Result<()> {
for row in recordbatch.rows() {
- let row = row.context(error::CollectRecordbatchSnafu)?;
for value in row.into_iter() {
match value {
Value::Null => row_writer.write_col(None::<u8>)?,
diff --git a/src/servers/src/postgres/handler.rs b/src/servers/src/postgres/handler.rs
index e7f68f1fc046..eda85452ba30 100644
--- a/src/servers/src/postgres/handler.rs
+++ b/src/servers/src/postgres/handler.rs
@@ -109,10 +109,7 @@ where
Ok(rb) => stream::iter(
// collect rows from a single recordbatch into vector to avoid
// borrowing it
- rb.rows()
- .map(|row| row.map_err(|e| PgWireError::ApiError(Box::new(e))))
- .collect::<Vec<_>>()
- .into_iter(),
+ rb.rows().map(Ok).collect::<Vec<_>>().into_iter(),
)
.boxed(),
Err(e) => stream::once(future::err(PgWireError::ApiError(Box::new(e)))).boxed(),
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index b252b1cfa489..2b46e581bd68 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -19,12 +19,14 @@ use std::hash::{Hash, Hasher};
use api::prometheus::remote::label_matcher::Type as MatcherType;
use api::prometheus::remote::{Label, Query, Sample, TimeSeries, WriteRequest};
-use api::v1::codec::SelectResult;
use api::v1::column::SemanticType;
use api::v1::{column, Column, ColumnDataType, InsertExpr};
use common_grpc::writer::Precision::Millisecond;
+use common_recordbatch::{RecordBatch, RecordBatches};
+use common_time::timestamp::TimeUnit;
+use datatypes::prelude::{ConcreteDataType, Value};
use openmetrics_parser::{MetricsExposition, PrometheusType, PrometheusValue};
-use snafu::{OptionExt, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
use snap::raw::{Decoder, Encoder};
use table::requests::InsertRequest;
@@ -175,79 +177,88 @@ impl PartialOrd for TimeSeriesId {
/// Collect each row's timeseries id
/// This processing is ugly, hope https://github.com/GreptimeTeam/greptimedb/issues/336 making some progress in future.
-fn collect_timeseries_ids(
- table_name: &str,
- row_count: usize,
- columns: &[Column],
-) -> Vec<TimeSeriesId> {
+fn collect_timeseries_ids(table_name: &str, recordbatch: &RecordBatch) -> Vec<TimeSeriesId> {
+ let row_count = recordbatch.num_rows();
let mut timeseries_ids = Vec::with_capacity(row_count);
- let mut columns_rows = vec![0; columns.len()];
-
for row in 0..row_count {
- let mut labels = Vec::with_capacity(columns.len() - 1);
-
+ let mut labels = Vec::with_capacity(recordbatch.num_columns() - 1);
labels.push(new_label(
METRIC_NAME_LABEL.to_string(),
table_name.to_string(),
));
- for (i, column) in columns.iter().enumerate() {
- let column_name = &column.column_name;
- let null_mask = &column.null_mask;
- let values = &column.values;
-
- if column_name == VALUE_COLUMN_NAME || column_name == TIMESTAMP_COLUMN_NAME {
+ for (i, column_schema) in recordbatch.schema.column_schemas().iter().enumerate() {
+ if column_schema.name == VALUE_COLUMN_NAME
+ || column_schema.name == TIMESTAMP_COLUMN_NAME
+ {
continue;
}
+ let column = &recordbatch.columns()[i];
// A label with an empty label value is considered equivalent to a label that does not exist.
- if !null_mask.is_empty() && null_mask[row] == 0 {
+ if column.is_null(row) {
continue;
}
- let row = columns_rows[i];
- columns_rows[i] += 1;
-
- let column_value = values.as_ref().map(|vs| vs.string_values[row].to_string());
- if let Some(value) = column_value {
- labels.push(new_label(column_name.to_string(), value));
- }
+ let value = column.get(row).to_string();
+ labels.push(new_label(column_schema.name.clone(), value));
}
timeseries_ids.push(TimeSeriesId { labels });
}
timeseries_ids
}
-pub fn select_result_to_timeseries(
+pub fn recordbatches_to_timeseries(
table_name: &str,
- select_result: SelectResult,
+ recordbatches: RecordBatches,
) -> Result<Vec<TimeSeries>> {
- let row_count = select_result.row_count as usize;
- let columns = select_result.columns;
- let ts_column = columns
- .iter()
- .find(|c| c.column_name == TIMESTAMP_COLUMN_NAME)
- .context(error::InvalidPromRemoteReadQueryResultSnafu {
+ Ok(recordbatches
+ .take()
+ .into_iter()
+ .map(|x| recordbatch_to_timeseries(table_name, x))
+ .collect::<Result<Vec<_>>>()?
+ .into_iter()
+ .flatten()
+ .collect())
+}
+
+fn recordbatch_to_timeseries(table: &str, recordbatch: RecordBatch) -> Result<Vec<TimeSeries>> {
+ let ts_column = recordbatch.column_by_name(TIMESTAMP_COLUMN_NAME).context(
+ error::InvalidPromRemoteReadQueryResultSnafu {
msg: "missing greptime_timestamp column in query result",
- })?;
+ },
+ )?;
+ ensure!(
+ ts_column.data_type() == ConcreteDataType::timestamp_millisecond_datatype(),
+ error::InvalidPromRemoteReadQueryResultSnafu {
+ msg: format!(
+ "Expect timestamp column of datatype Timestamp(Millisecond), actual {:?}",
+ ts_column.data_type()
+ )
+ }
+ );
- let value_column = columns
- .iter()
- .find(|c| c.column_name == VALUE_COLUMN_NAME)
- .context(error::InvalidPromRemoteReadQueryResultSnafu {
+ let value_column = recordbatch.column_by_name(VALUE_COLUMN_NAME).context(
+ error::InvalidPromRemoteReadQueryResultSnafu {
msg: "missing greptime_value column in query result",
- })?;
+ },
+ )?;
+ ensure!(
+ value_column.data_type() == ConcreteDataType::float64_datatype(),
+ error::InvalidPromRemoteReadQueryResultSnafu {
+ msg: format!(
+ "Expect value column of datatype Float64, actual {:?}",
+ value_column.data_type()
+ )
+ }
+ );
+
// First, collect each row's timeseries id
- let timeseries_ids = collect_timeseries_ids(table_name, row_count, &columns);
+ let timeseries_ids = collect_timeseries_ids(table, &recordbatch);
// Then, group timeseries by it's id.
let mut timeseries_map: BTreeMap<&TimeSeriesId, TimeSeries> = BTreeMap::default();
- let mut value_column_row = 0;
- let mut ts_column_row = 0;
- let value_null_mask = &value_column.null_mask;
- let ts_null_mask = &ts_column.null_mask;
-
for (row, timeseries_id) in timeseries_ids.iter().enumerate() {
let timeseries = timeseries_map
.entry(timeseries_id)
@@ -256,30 +267,19 @@ pub fn select_result_to_timeseries(
..Default::default()
});
- if !ts_null_mask.is_empty() && ts_null_mask[row] == 0 {
+ if ts_column.is_null(row) || value_column.is_null(row) {
continue;
}
- let ts_row = ts_column_row;
- ts_column_row += 1;
- if !value_null_mask.is_empty() && value_null_mask[row] == 0 {
- continue;
- }
- let value_row = value_column_row;
- value_column_row += 1;
-
- let sample = Sample {
- value: value_column
- .values
- .as_ref()
- .map(|vs| vs.f64_values[value_row])
- .unwrap_or(0.0f64),
- timestamp: ts_column
- .values
- .as_ref()
- .map(|vs| vs.ts_millisecond_values[ts_row])
- .unwrap_or(0i64),
+ let value: f64 = match value_column.get(row) {
+ Value::Float64(value) => value.into(),
+ _ => unreachable!("checked by the \"ensure\" above"),
+ };
+ let timestamp = match ts_column.get(row) {
+ Value::Timestamp(t) if t.unit() == TimeUnit::Millisecond => t.value(),
+ _ => unreachable!("checked by the \"ensure\" above"),
};
+ let sample = Sample { value, timestamp };
timeseries.samples.push(sample);
}
@@ -509,8 +509,9 @@ mod tests {
use api::prometheus::remote::LabelMatcher;
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
+ use datatypes::schema::{ColumnSchema, Schema};
use datatypes::value::Value;
- use datatypes::vectors::Vector;
+ use datatypes::vectors::{Float64Vector, StringVector, TimestampMillisecondVector, Vector};
use super::*;
@@ -764,38 +765,47 @@ mod tests {
}
#[test]
- fn test_select_result_to_timeseries() {
- let select_result = SelectResult {
- row_count: 2,
- columns: vec![
- Column {
- column_name: TIMESTAMP_COLUMN_NAME.to_string(),
- values: Some(column::Values {
- ts_millisecond_values: vec![1000, 2000],
- ..Default::default()
- }),
- ..Default::default()
- },
- Column {
- column_name: VALUE_COLUMN_NAME.to_string(),
- values: Some(column::Values {
- f64_values: vec![3.0, 7.0],
- ..Default::default()
- }),
- ..Default::default()
- },
- Column {
- column_name: "instance".to_string(),
- values: Some(column::Values {
- string_values: vec!["host1".to_string(), "host2".to_string()],
- ..Default::default()
- }),
- ..Default::default()
- },
+ fn test_recordbatches_to_timeseries() {
+ let schema = Arc::new(Schema::new(vec![
+ ColumnSchema::new(
+ TIMESTAMP_COLUMN_NAME,
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ true,
+ ),
+ ColumnSchema::new(
+ VALUE_COLUMN_NAME,
+ ConcreteDataType::float64_datatype(),
+ true,
+ ),
+ ColumnSchema::new("instance", ConcreteDataType::string_datatype(), true),
+ ]));
+
+ let recordbatches = RecordBatches::try_new(
+ schema.clone(),
+ vec![
+ RecordBatch::new(
+ schema.clone(),
+ vec![
+ Arc::new(TimestampMillisecondVector::from_vec(vec![1000])) as _,
+ Arc::new(Float64Vector::from_vec(vec![3.0])) as _,
+ Arc::new(StringVector::from(vec!["host1"])) as _,
+ ],
+ )
+ .unwrap(),
+ RecordBatch::new(
+ schema,
+ vec![
+ Arc::new(TimestampMillisecondVector::from_vec(vec![2000])) as _,
+ Arc::new(Float64Vector::from_vec(vec![7.0])) as _,
+ Arc::new(StringVector::from(vec!["host2"])) as _,
+ ],
+ )
+ .unwrap(),
],
- };
+ )
+ .unwrap();
- let timeseries = select_result_to_timeseries("metric1", select_result).unwrap();
+ let timeseries = recordbatches_to_timeseries("metric1", recordbatches).unwrap();
assert_eq!(2, timeseries.len());
assert_eq!(
diff --git a/tests/runner/Cargo.toml b/tests/runner/Cargo.toml
index 42512fa44f79..5a5b58bf7052 100644
--- a/tests/runner/Cargo.toml
+++ b/tests/runner/Cargo.toml
@@ -7,7 +7,7 @@ license.workspace = true
[dependencies]
async-trait = "0.1"
client = { path = "../../src/client" }
-comfy-table = "6.1"
common-base = { path = "../../src/common/base" }
+common-grpc = { path = "../../src/common/grpc" }
sqlness = { git = "https://github.com/ceresdb/sqlness.git" }
tokio.workspace = true
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 0a1a5d5091b4..67eb0fd822ae 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -18,11 +18,8 @@ use std::process::Stdio;
use std::time::Duration;
use async_trait::async_trait;
-use client::api::v1::codec::SelectResult;
-use client::api::v1::column::SemanticType;
-use client::api::v1::ColumnDataType;
use client::{Client, Database as DB, Error as ClientError, ObjectResult, Select};
-use comfy_table::{Cell, Table};
+use common_grpc::flight;
use sqlness::{Database, Environment};
use tokio::process::{Child, Command};
@@ -129,75 +126,20 @@ impl Display for ResultDisplayer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self.result {
Ok(result) => match result {
- ObjectResult::Select(select_result) => {
- write!(
- f,
- "{}",
- SelectResultDisplayer {
- result: select_result
- }
- .display()
- )
- }
ObjectResult::Mutate(mutate_result) => {
write!(f, "{mutate_result:?}")
}
- // TODO(LFC): Implement it.
- ObjectResult::FlightData(_) => unimplemented!(),
+ ObjectResult::FlightData(messages) => {
+ let pretty = flight::flight_messages_to_recordbatches(messages.clone())
+ .map_err(|e| e.to_string())
+ .and_then(|x| x.pretty_print().map_err(|e| e.to_string()));
+ match pretty {
+ Ok(s) => write!(f, "{s}"),
+ Err(e) => write!(f, "format result error: {e}"),
+ }
+ }
},
Err(e) => write!(f, "Failed to execute, error: {e:?}"),
}
}
}
-
-struct SelectResultDisplayer<'a> {
- result: &'a SelectResult,
-}
-
-impl SelectResultDisplayer<'_> {
- fn display(&self) -> impl Display {
- let mut table = Table::new();
- table.load_preset("||--+-++| ++++++");
-
- if self.result.row_count == 0 {
- return table;
- }
-
- let mut headers = vec![];
- for column in &self.result.columns {
- headers.push(Cell::new(format!(
- "{}, #{:?}, #{:?}",
- column.column_name,
- SemanticType::from_i32(column.semantic_type).unwrap(),
- ColumnDataType::from_i32(column.datatype).unwrap()
- )));
- }
- table.set_header(headers);
-
- let col_count = self.result.columns.len();
- let row_count = self.result.row_count as usize;
- let columns = self
- .result
- .columns
- .iter()
- .map(|col| {
- util::values_to_string(
- ColumnDataType::from_i32(col.datatype).unwrap(),
- col.values.clone().unwrap(),
- col.null_mask.clone(),
- row_count,
- )
- })
- .collect::<Vec<_>>();
-
- for row_index in 0..row_count {
- let mut row = Vec::with_capacity(col_count);
- for col in columns.iter() {
- row.push(col[row_index].clone());
- }
- table.add_row(row);
- }
-
- table
- }
-}
diff --git a/tests/runner/src/util.rs b/tests/runner/src/util.rs
index 652e1257bbd0..e3c1ff00d502 100644
--- a/tests/runner/src/util.rs
+++ b/tests/runner/src/util.rs
@@ -17,9 +17,6 @@ use std::net::SocketAddr;
use std::path::PathBuf;
use std::time::Duration;
-use client::api::v1::column::Values;
-use client::api::v1::ColumnDataType;
-use common_base::BitVec;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpSocket;
use tokio::time;
@@ -63,103 +60,6 @@ where
}
}
-macro_rules! build_nullable_iter {
- ($null_iter:ident, $data_iter:expr, $row_count:ident) => {
- NullableColumnIter {
- null_iter: $null_iter,
- data_iter: $data_iter,
- }
- .collect()
- };
-}
-
-pub fn values_to_string(
- data_type: ColumnDataType,
- values: Values,
- null_mask: Vec<u8>,
- row_count: usize,
-) -> Vec<String> {
- let mut bit_vec = BitVec::from_vec(null_mask);
- bit_vec.resize(row_count, false);
- let null_iter = bit_vec.iter();
- match data_type {
- ColumnDataType::Int64 => {
- build_nullable_iter!(null_iter, values.i64_values.into_iter(), row_count)
- }
- ColumnDataType::Float64 => {
- build_nullable_iter!(null_iter, values.f64_values.into_iter(), row_count)
- }
- ColumnDataType::String => {
- build_nullable_iter!(null_iter, values.string_values.into_iter(), row_count)
- }
- ColumnDataType::Boolean => {
- build_nullable_iter!(null_iter, values.bool_values.into_iter(), row_count)
- }
- ColumnDataType::Int8 => {
- build_nullable_iter!(null_iter, values.i8_values.into_iter(), row_count)
- }
- ColumnDataType::Int16 => {
- build_nullable_iter!(null_iter, values.i16_values.into_iter(), row_count)
- }
- ColumnDataType::Int32 => {
- build_nullable_iter!(null_iter, values.i32_values.into_iter(), row_count)
- }
- ColumnDataType::Uint8 => {
- build_nullable_iter!(null_iter, values.u8_values.into_iter(), row_count)
- }
- ColumnDataType::Uint16 => {
- build_nullable_iter!(null_iter, values.u16_values.into_iter(), row_count)
- }
- ColumnDataType::Uint32 => {
- build_nullable_iter!(null_iter, values.u32_values.into_iter(), row_count)
- }
- ColumnDataType::Uint64 => {
- build_nullable_iter!(null_iter, values.u64_values.into_iter(), row_count)
- }
- ColumnDataType::Float32 => {
- build_nullable_iter!(null_iter, values.f32_values.into_iter(), row_count)
- }
- ColumnDataType::Binary => build_nullable_iter!(
- null_iter,
- values
- .binary_values
- .into_iter()
- .map(|val| format!("{val:?}")),
- row_count
- ),
- ColumnDataType::Datetime => {
- build_nullable_iter!(null_iter, values.i64_values.into_iter(), row_count)
- }
- ColumnDataType::Date => {
- build_nullable_iter!(null_iter, values.i32_values.into_iter(), row_count)
- }
- ColumnDataType::TimestampSecond => {
- build_nullable_iter!(null_iter, values.ts_second_values.into_iter(), row_count)
- }
- ColumnDataType::TimestampMillisecond => {
- build_nullable_iter!(
- null_iter,
- values.ts_millisecond_values.into_iter(),
- row_count
- )
- }
- ColumnDataType::TimestampMicrosecond => {
- build_nullable_iter!(
- null_iter,
- values.ts_microsecond_values.into_iter(),
- row_count
- )
- }
- ColumnDataType::TimestampNanosecond => {
- build_nullable_iter!(
- null_iter,
- values.ts_nanosecond_values.into_iter(),
- row_count
- )
- }
- }
-}
-
/// Get the dir of test cases. This function only works when the runner is run
/// under the project's dir because it depends on some envs set by cargo.
pub fn get_case_dir() -> String {
@@ -214,76 +114,3 @@ pub async fn check_port(ip_addr: SocketAddr, timeout: Duration) -> bool {
tokio::time::timeout(timeout, check_task).await.is_ok()
}
-
-#[cfg(test)]
-mod test {
- use super::*;
-
- #[test]
- fn test_display_nullable_column() {
- let data_type = ColumnDataType::Int64;
- let values = Values {
- i64_values: vec![1, 2, 3, 4, 5, 7, 8, 9],
- ..Default::default()
- };
- let null_mask = vec![0b00100000, 0b00000010];
- let result = values_to_string(data_type, values, null_mask, 10);
- let expected: Vec<String> = ["1", "2", "3", "4", "5", "NULL", "7", "8", "9", "NULL"]
- .into_iter()
- .map(String::from)
- .collect();
-
- assert_eq!(result, expected);
- }
-
- #[test]
- fn test_display_nullable_column_exceed_length() {
- let data_type = ColumnDataType::Int64;
- let values = Values {
- i64_values: vec![1, 2, 3, 4, 5, 7, 8, 9],
- ..Default::default()
- };
- let null_mask = vec![0b00100000, 0b11111110, 0b0001111];
- let result = values_to_string(data_type, values, null_mask, 20);
- let expected: Vec<String> = [
- "1", "2", "3", "4", "5", "NULL", "7", "8", "9", "NULL", "NULL", "NULL", "NULL", "NULL",
- "NULL", "NULL", "NULL", "NULL", "NULL", "NULL",
- ]
- .into_iter()
- .map(String::from)
- .collect();
-
- assert_eq!(result, expected);
- }
-
- #[test]
- fn test_display_nullable_column_no_vacancy() {
- let data_type = ColumnDataType::Int64;
- let values = Values {
- i64_values: vec![1, 2, 3, 4, 5, 7, 8, 9],
- ..Default::default()
- };
- let null_mask = vec![0b00000000, 0b00000000];
- let result = values_to_string(data_type, values, null_mask, 8);
- let expected: Vec<String> = ["1", "2", "3", "4", "5", "7", "8", "9"]
- .into_iter()
- .map(String::from)
- .collect();
-
- assert_eq!(result, expected);
- }
-
- #[test]
- fn test_display_nullable_column_shorter_length() {
- let data_type = ColumnDataType::Int64;
- let values = Values {
- i64_values: vec![1, 2, 3, 4, 5, 7, 8, 9],
- ..Default::default()
- };
- let null_mask = vec![0b00000000, 0b00000000];
- let result = values_to_string(data_type, values, null_mask, 1);
- let expected: Vec<String> = ["1"].into_iter().map(String::from).collect();
-
- assert_eq!(result, expected);
- }
-}
|
feat
|
Replace SelectResult with FlightData (#776)
|
ac7f52d303f9eae53421f4834ed1a64a67a9af8f
|
2022-11-25 08:55:57
|
Lei, HUANG
|
fix: start datanode instance before frontend services (#634)
| false
|
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 62fbd0e82d87..b3a86e3fb3eb 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -164,8 +164,15 @@ impl StartCommand {
.context(StartDatanodeSnafu)?;
let mut frontend = build_frontend(fe_opts, &dn_opts, datanode.get_instance()).await?;
+ // Start datanode instance before starting services, to avoid requests come in before internal components are started.
+ datanode
+ .start_instance()
+ .await
+ .context(StartDatanodeSnafu)?;
+ info!("Datanode instance started");
+
try_join!(
- async { datanode.start().await.context(StartDatanodeSnafu) },
+ async { datanode.start_services().await.context(StartDatanodeSnafu) },
async { frontend.start().await.context(StartFrontendSnafu) }
)?;
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 7e1b935a0035..af620146970b 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -88,9 +88,18 @@ impl Datanode {
pub async fn start(&mut self) -> Result<()> {
info!("Starting datanode instance...");
- self.instance.start().await?;
- self.services.start(&self.opts).await?;
- Ok(())
+ self.start_instance().await?;
+ self.start_services().await
+ }
+
+ /// Start only the internal component of datanode.
+ pub async fn start_instance(&mut self) -> Result<()> {
+ self.instance.start().await
+ }
+
+ /// Start services of datanode. This method call will block until services are shutdown.
+ pub async fn start_services(&mut self) -> Result<()> {
+ self.services.start(&self.opts).await
}
pub fn get_instance(&self) -> InstanceRef {
|
fix
|
start datanode instance before frontend services (#634)
|
8f0959fa9ff18249234633ff6739e7adc8fd1293
|
2024-08-07 14:43:38
|
Ruihang Xia
|
fix: fix incorrect result of topk with cte (#4523)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 1e07d3d6140e..877349ab1697 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2764,7 +2764,7 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
[[package]]
name = "datafusion"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2816,7 +2816,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2837,7 +2837,7 @@ dependencies = [
[[package]]
name = "datafusion-common-runtime"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"tokio",
]
@@ -2845,7 +2845,7 @@ dependencies = [
[[package]]
name = "datafusion-execution"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"arrow",
"chrono",
@@ -2865,7 +2865,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2882,7 +2882,7 @@ dependencies = [
[[package]]
name = "datafusion-functions"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"arrow",
"base64 0.22.1",
@@ -2908,7 +2908,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-aggregate"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2925,7 +2925,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-array"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"arrow",
"arrow-array",
@@ -2944,7 +2944,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"arrow",
"async-trait",
@@ -2962,7 +2962,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2992,7 +2992,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr-common"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"arrow",
"datafusion-common",
@@ -3003,7 +3003,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-plan"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -3036,7 +3036,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"arrow",
"arrow-array",
@@ -3052,7 +3052,7 @@ dependencies = [
[[package]]
name = "datafusion-substrait"
version = "38.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=7823ef2f63663907edab46af0d51359900f608d6#7823ef2f63663907edab46af0d51359900f608d6"
dependencies = [
"async-recursion",
"chrono",
@@ -4680,7 +4680,7 @@ dependencies = [
"httpdate",
"itoa",
"pin-project-lite",
- "socket2 0.4.10",
+ "socket2 0.5.7",
"tokio",
"tower-service",
"tracing",
@@ -5228,6 +5228,15 @@ dependencies = [
"either",
]
+[[package]]
+name = "itertools"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
+dependencies = [
+ "either",
+]
+
[[package]]
name = "itoa"
version = "1.0.11"
@@ -8265,7 +8274,7 @@ checksum = "5bb182580f71dd070f88d01ce3de9f4da5021db7115d2e1c3605a754153b77c1"
dependencies = [
"bytes",
"heck 0.5.0",
- "itertools 0.12.1",
+ "itertools 0.13.0",
"log",
"multimap",
"once_cell",
@@ -8309,7 +8318,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca"
dependencies = [
"anyhow",
- "itertools 0.12.1",
+ "itertools 0.13.0",
"proc-macro2",
"quote",
"syn 2.0.66",
@@ -8456,7 +8465,7 @@ dependencies = [
"indoc",
"libc",
"memoffset 0.9.1",
- "parking_lot 0.11.2",
+ "parking_lot 0.12.3",
"portable-atomic",
"pyo3-build-config",
"pyo3-ffi",
diff --git a/Cargo.toml b/Cargo.toml
index cedd92dfbc2d..5d473de72226 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -104,15 +104,15 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4"
-datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
-datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
-datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
-datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
-datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
-datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
-datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
-datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
-datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
+datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
+datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
+datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
+datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
+datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
+datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
+datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
+datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
+datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "7823ef2f63663907edab46af0d51359900f608d6" }
derive_builder = "0.12"
dotenv = "0.15"
etcd-client = { version = "0.13" }
diff --git a/src/common/query/src/error.rs b/src/common/query/src/error.rs
index b8adeeba5c90..a41ab6df1127 100644
--- a/src/common/query/src/error.rs
+++ b/src/common/query/src/error.rs
@@ -155,13 +155,6 @@ pub enum Error {
source: DataTypeError,
},
- #[snafu(display("Failed to execute physical plan"))]
- ExecutePhysicalPlan {
- #[snafu(implicit)]
- location: Location,
- source: BoxedError,
- },
-
#[snafu(display("Failed to cast array to {:?}", typ))]
TypeCast {
#[snafu(source)]
@@ -308,7 +301,6 @@ impl ErrorExt for Error {
Error::DecodePlan { source, .. }
| Error::Execute { source, .. }
- | Error::ExecutePhysicalPlan { source, .. }
| Error::ProcedureService { source, .. }
| Error::TableMutation { source, .. } => source.status_code(),
diff --git a/tests/cases/standalone/common/order/nulls_first.result b/tests/cases/standalone/common/order/nulls_first.result
index 9b0c28da795c..8aae8ccadb5e 100644
--- a/tests/cases/standalone/common/order/nulls_first.result
+++ b/tests/cases/standalone/common/order/nulls_first.result
@@ -36,16 +36,8 @@ SELECT * FROM test ORDER BY i NULLS LAST, j NULLS FIRST;
| | 1 | 1970-01-01T00:00:00.002 |
+---+---+-------------------------+
-SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST;
-
-+---+---+------------------------------------------------------------------------------------------------------------------------+
-| i | j | ROW_NUMBER() PARTITION BY [test.i] ORDER BY [test.j ASC NULLS FIRST] RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW |
-+---+---+------------------------------------------------------------------------------------------------------------------------+
-| | 1 | 1 |
-| 1 | | 1 |
-| 1 | 1 | 2 |
-+---+---+------------------------------------------------------------------------------------------------------------------------+
-
+-- Temporary disable. Waiting for next upgrade of DataFusion.
+-- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST;
SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS LAST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST;
+---+---+-----------------------------------------------------------------------------------------------------------------------+
diff --git a/tests/cases/standalone/common/order/nulls_first.sql b/tests/cases/standalone/common/order/nulls_first.sql
index e66b0f12fb4f..c22a2cfc381d 100644
--- a/tests/cases/standalone/common/order/nulls_first.sql
+++ b/tests/cases/standalone/common/order/nulls_first.sql
@@ -8,7 +8,8 @@ SELECT * FROM test ORDER BY i NULLS FIRST, j NULLS FIRST;
SELECT * FROM test ORDER BY i NULLS LAST, j NULLS FIRST;
-SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST;
+-- Temporary disable. Waiting for next upgrade of DataFusion.
+-- SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS FIRST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST;
SELECT i, j, row_number() OVER (PARTITION BY i ORDER BY j NULLS LAST) FROM test ORDER BY i NULLS FIRST, j NULLS FIRST;
|
fix
|
fix incorrect result of topk with cte (#4523)
|
d521bc9dc5597f011887ad210f35a0ed74630f83
|
2024-01-10 19:46:03
|
Lanqing Yang
|
chore: impl KvBackend for MetaPeerClient (#3076)
| false
|
diff --git a/src/meta-srv/src/cluster.rs b/src/meta-srv/src/cluster.rs
index 7e460664c181..2dbc0c1ea66c 100644
--- a/src/meta-srv/src/cluster.rs
+++ b/src/meta-srv/src/cluster.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::any::Any;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::time::Duration;
@@ -22,8 +23,12 @@ use api::v1::meta::{
RangeRequest as PbRangeRequest, RangeResponse as PbRangeResponse, ResponseHeader,
};
use common_grpc::channel_manager::ChannelManager;
-use common_meta::kv_backend::ResettableKvBackendRef;
-use common_meta::rpc::store::{BatchGetRequest, RangeRequest};
+use common_meta::kv_backend::{KvBackend, ResettableKvBackendRef, TxnService};
+use common_meta::rpc::store::{
+ BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
+ BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
+ DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
+};
use common_meta::rpc::KeyValue;
use common_meta::util;
use common_telemetry::warn;
@@ -49,66 +54,64 @@ pub struct MetaPeerClient {
retry_interval_ms: u64,
}
-impl MetaPeerClient {
- async fn get_dn_key_value(&self, keys_only: bool) -> Result<Vec<KeyValue>> {
- let key = format!("{DN_STAT_PREFIX}-").into_bytes();
- let range_end = util::get_prefix_end_key(&key);
- self.range(key, range_end, keys_only).await
- }
+#[async_trait::async_trait]
+impl TxnService for MetaPeerClient {
+ type Error = error::Error;
+}
- // Get all datanode stat kvs from leader meta.
- pub async fn get_all_dn_stat_kvs(&self) -> Result<HashMap<StatKey, StatValue>> {
- let kvs = self.get_dn_key_value(false).await?;
- to_stat_kv_map(kvs)
+#[async_trait::async_trait]
+impl KvBackend for MetaPeerClient {
+ fn name(&self) -> &str {
+ "MetaPeerClient"
}
- pub async fn get_node_cnt(&self) -> Result<i32> {
- let kvs = self.get_dn_key_value(true).await?;
- kvs.into_iter()
- .map(|kv| kv.key.try_into())
- .collect::<Result<HashSet<StatKey>>>()
- .map(|hash_set| hash_set.len() as i32)
+ fn as_any(&self) -> &dyn Any {
+ self
}
- // Get datanode stat kvs from leader meta by input keys.
- pub async fn get_dn_stat_kvs(&self, keys: Vec<StatKey>) -> Result<HashMap<StatKey, StatValue>> {
- let stat_keys = keys.into_iter().map(|key| key.into()).collect();
+ async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
+ if self.is_leader() {
+ return self
+ .in_memory
+ .range(req)
+ .await
+ .context(error::KvBackendSnafu);
+ }
- let kvs = self.batch_get(stat_keys).await?;
+ let max_retry_count = self.max_retry_count;
+ let retry_interval_ms = self.retry_interval_ms;
- to_stat_kv_map(kvs)
- }
+ for _ in 0..max_retry_count {
+ match self
+ .remote_range(req.key.clone(), req.range_end.clone(), req.keys_only)
+ .await
+ {
+ Ok(res) => return Ok(res),
+ Err(e) => {
+ if need_retry(&e) {
+ warn!("Encountered an error that need to retry, err: {:?}", e);
+ tokio::time::sleep(Duration::from_millis(retry_interval_ms)).await;
+ } else {
+ return Err(e);
+ }
+ }
+ }
+ }
- // Get kv information from the leader's in_mem kv store.
- pub async fn get(&self, key: Vec<u8>) -> Result<Option<KeyValue>> {
- let mut kvs = self.range(key, vec![], false).await?;
- Ok(if kvs.is_empty() {
- None
- } else {
- debug_assert_eq!(kvs.len(), 1);
- Some(kvs.remove(0))
- })
+ error::ExceededRetryLimitSnafu {
+ func_name: "range",
+ retry_num: max_retry_count,
+ }
+ .fail()
}
- // Range kv information from the leader's in_mem kv store
- pub async fn range(
- &self,
- key: Vec<u8>,
- range_end: Vec<u8>,
- keys_only: bool,
- ) -> Result<Vec<KeyValue>> {
+ // Get kv information from the leader's in_mem kv store
+ async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
if self.is_leader() {
- let request = RangeRequest {
- key,
- range_end,
- ..Default::default()
- };
-
return self
.in_memory
- .range(request)
+ .batch_get(req)
.await
- .map(|resp| resp.kvs)
.context(error::KvBackendSnafu);
}
@@ -116,11 +119,8 @@ impl MetaPeerClient {
let retry_interval_ms = self.retry_interval_ms;
for _ in 0..max_retry_count {
- match self
- .remote_range(key.clone(), range_end.clone(), keys_only)
- .await
- {
- Ok(kvs) => return Ok(kvs),
+ match self.remote_batch_get(req.keys.clone()).await {
+ Ok(res) => return Ok(res),
Err(e) => {
if need_retry(&e) {
warn!("Encountered an error that need to retry, err: {:?}", e);
@@ -133,18 +133,111 @@ impl MetaPeerClient {
}
error::ExceededRetryLimitSnafu {
- func_name: "range",
+ func_name: "batch_get",
retry_num: max_retry_count,
}
.fail()
}
+ // MetaPeerClient does not support mutable methods listed below.
+ async fn put(&self, _req: PutRequest) -> Result<PutResponse> {
+ error::UnsupportedSnafu {
+ operation: "put".to_string(),
+ }
+ .fail()
+ }
+
+ async fn batch_put(&self, _req: BatchPutRequest) -> Result<BatchPutResponse> {
+ error::UnsupportedSnafu {
+ operation: "batch put".to_string(),
+ }
+ .fail()
+ }
+
+ async fn compare_and_put(&self, _req: CompareAndPutRequest) -> Result<CompareAndPutResponse> {
+ error::UnsupportedSnafu {
+ operation: "compare and put".to_string(),
+ }
+ .fail()
+ }
+
+ async fn delete_range(&self, _req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
+ error::UnsupportedSnafu {
+ operation: "delete range".to_string(),
+ }
+ .fail()
+ }
+
+ async fn batch_delete(&self, _req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
+ error::UnsupportedSnafu {
+ operation: "batch delete".to_string(),
+ }
+ .fail()
+ }
+
+ async fn delete(&self, _key: &[u8], _prev_kv: bool) -> Result<Option<KeyValue>> {
+ error::UnsupportedSnafu {
+ operation: "delete".to_string(),
+ }
+ .fail()
+ }
+
+ async fn put_conditionally(
+ &self,
+ _key: Vec<u8>,
+ _value: Vec<u8>,
+ _if_not_exists: bool,
+ ) -> Result<bool> {
+ error::UnsupportedSnafu {
+ operation: "put conditionally".to_string(),
+ }
+ .fail()
+ }
+}
+
+impl MetaPeerClient {
+ async fn get_dn_key_value(&self, keys_only: bool) -> Result<Vec<KeyValue>> {
+ let key = format!("{DN_STAT_PREFIX}-").into_bytes();
+ let range_end = util::get_prefix_end_key(&key);
+ let range_request = RangeRequest {
+ key,
+ range_end,
+ keys_only,
+ ..Default::default()
+ };
+ self.range(range_request).await.map(|res| res.kvs)
+ }
+
+ // Get all datanode stat kvs from leader meta.
+ pub async fn get_all_dn_stat_kvs(&self) -> Result<HashMap<StatKey, StatValue>> {
+ let kvs = self.get_dn_key_value(false).await?;
+ to_stat_kv_map(kvs)
+ }
+
+ pub async fn get_node_cnt(&self) -> Result<i32> {
+ let kvs = self.get_dn_key_value(true).await?;
+ kvs.into_iter()
+ .map(|kv| kv.key.try_into())
+ .collect::<Result<HashSet<StatKey>>>()
+ .map(|hash_set| hash_set.len() as i32)
+ }
+
+ // Get datanode stat kvs from leader meta by input keys.
+ pub async fn get_dn_stat_kvs(&self, keys: Vec<StatKey>) -> Result<HashMap<StatKey, StatValue>> {
+ let stat_keys = keys.into_iter().map(|key| key.into()).collect();
+ let batch_get_req = BatchGetRequest { keys: stat_keys };
+
+ let res = self.batch_get(batch_get_req).await?;
+
+ to_stat_kv_map(res.kvs)
+ }
+
async fn remote_range(
&self,
key: Vec<u8>,
range_end: Vec<u8>,
keys_only: bool,
- ) -> Result<Vec<KeyValue>> {
+ ) -> Result<RangeResponse> {
// Safety: when self.is_leader() == false, election must not empty.
let election = self.election.as_ref().unwrap();
@@ -170,47 +263,13 @@ impl MetaPeerClient {
check_resp_header(&response.header, Context { addr: &leader_addr })?;
- Ok(response.kvs.into_iter().map(KeyValue::new).collect())
- }
-
- // Get kv information from the leader's in_mem kv store
- pub async fn batch_get(&self, keys: Vec<Vec<u8>>) -> Result<Vec<KeyValue>> {
- if self.is_leader() {
- let request = BatchGetRequest { keys };
-
- return self
- .in_memory
- .batch_get(request)
- .await
- .map(|resp| resp.kvs)
- .context(error::KvBackendSnafu);
- }
-
- let max_retry_count = self.max_retry_count;
- let retry_interval_ms = self.retry_interval_ms;
-
- for _ in 0..max_retry_count {
- match self.remote_batch_get(keys.clone()).await {
- Ok(kvs) => return Ok(kvs),
- Err(e) => {
- if need_retry(&e) {
- warn!("Encountered an error that need to retry, err: {:?}", e);
- tokio::time::sleep(Duration::from_millis(retry_interval_ms)).await;
- } else {
- return Err(e);
- }
- }
- }
- }
-
- error::ExceededRetryLimitSnafu {
- func_name: "batch_get",
- retry_num: max_retry_count,
- }
- .fail()
+ Ok(RangeResponse {
+ kvs: response.kvs.into_iter().map(KeyValue::new).collect(),
+ more: response.more,
+ })
}
- async fn remote_batch_get(&self, keys: Vec<Vec<u8>>) -> Result<Vec<KeyValue>> {
+ async fn remote_batch_get(&self, keys: Vec<Vec<u8>>) -> Result<BatchGetResponse> {
// Safety: when self.is_leader() == false, election must not empty.
let election = self.election.as_ref().unwrap();
@@ -234,7 +293,9 @@ impl MetaPeerClient {
check_resp_header(&response.header, Context { addr: &leader_addr })?;
- Ok(response.kvs.into_iter().map(KeyValue::new).collect())
+ Ok(BatchGetResponse {
+ kvs: response.kvs.into_iter().map(KeyValue::new).collect(),
+ })
}
// Check if the meta node is a leader node.
diff --git a/src/meta-srv/src/lease.rs b/src/meta-srv/src/lease.rs
index 2fa3224e35b4..0a091e4a683b 100644
--- a/src/meta-srv/src/lease.rs
+++ b/src/meta-srv/src/lease.rs
@@ -14,6 +14,7 @@
use std::collections::HashMap;
+use common_meta::kv_backend::KvBackend;
use common_meta::peer::Peer;
use common_meta::{util, ClusterId};
use common_time::util as time_util;
@@ -39,7 +40,8 @@ pub async fn lookup_alive_datanode_peer(
cluster_id,
node_id: datanode_id,
};
- let Some(kv) = meta_peer_client.get(lease_key.clone().try_into()?).await? else {
+ let lease_key_bytes: Vec<u8> = lease_key.clone().try_into()?;
+ let Some(kv) = meta_peer_client.get(&lease_key_bytes).await? else {
return Ok(None);
};
let lease_value: LeaseValue = kv.value.try_into()?;
@@ -74,7 +76,13 @@ where
let key = get_lease_prefix(cluster_id);
let range_end = util::get_prefix_end_key(&key);
- let kvs = meta_peer_client.range(key, range_end, false).await?;
+ let range_req = common_meta::rpc::store::RangeRequest {
+ key,
+ range_end,
+ keys_only: false,
+ ..Default::default()
+ };
+ let kvs = meta_peer_client.range(range_req).await?.kvs;
let mut lease_kvs = HashMap::new();
for kv in kvs {
let lease_key: LeaseKey = kv.key.try_into()?;
|
chore
|
impl KvBackend for MetaPeerClient (#3076)
|
1629435888c633992c06ca3079d39d0792775bd3
|
2024-04-09 08:33:26
|
JeremyHi
|
chore: unify name metasrv (#3671)
| false
|
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 68e9108d5790..d88aa4b664f1 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -39,7 +39,7 @@ body:
- Query Engine
- Table Engine
- Write Protocols
- - MetaSrv
+ - Metasrv
- Frontend
- Datanode
- Other
diff --git a/docs/rfcs/2023-07-06-table-engine-refactor.md b/docs/rfcs/2023-07-06-table-engine-refactor.md
index 2477a5e2e932..33df3eed08dc 100644
--- a/docs/rfcs/2023-07-06-table-engine-refactor.md
+++ b/docs/rfcs/2023-07-06-table-engine-refactor.md
@@ -27,8 +27,8 @@ subgraph Frontend["Frontend"]
end
end
-MyTable --> MetaSrv
-MetaSrv --> ETCD
+MyTable --> Metasrv
+Metasrv --> ETCD
MyTable-->TableEngine0
MyTable-->TableEngine1
@@ -95,8 +95,8 @@ subgraph Frontend["Frontend"]
end
end
-MyTable --> MetaSrv
-MetaSrv --> ETCD
+MyTable --> Metasrv
+Metasrv --> ETCD
MyTable-->RegionEngine
MyTable-->RegionEngine1
diff --git a/docs/rfcs/2024-01-17-dataflow-framework.md b/docs/rfcs/2024-01-17-dataflow-framework.md
index 3d62deba42d5..46da2175e1af 100644
--- a/docs/rfcs/2024-01-17-dataflow-framework.md
+++ b/docs/rfcs/2024-01-17-dataflow-framework.md
@@ -36,7 +36,7 @@ Hence, we choose the third option, and use a simple logical plan that's anagonis
## Deploy mode and protocol
- Greptime Flow is an independent streaming compute component. It can be used either within a standalone node or as a dedicated node at the same level as frontend in distributed mode.
- It accepts insert request Rows, which is used between frontend and datanode.
-- New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in MetaSrv.
+- New flow job is submitted in the format of modified SQL query like snowflake do, like: `CREATE TASK avg_over_5m WINDOW_SIZE = "5m" AS SELECT avg(value) FROM table WHERE time > now() - 5m GROUP BY time(1m)`. Flow job then got stored in Metasrv.
- It also persists results in the format of Rows to frontend.
- The query plan uses Substrait as codec format. It's the same with GreptimeDB's query engine.
- Greptime Flow needs a WAL for recovering. It's possible to reuse datanode's.
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index 3e8f204ed332..8391dab045c8 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -216,7 +216,7 @@ pub enum Error {
},
#[snafu(display("Failed to perform metasrv operation"))]
- MetaSrv {
+ Metasrv {
location: Location,
source: meta_client::error::Error,
},
@@ -304,7 +304,7 @@ impl ErrorExt for Error {
| Error::CreateTable { source, .. }
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
- Error::MetaSrv { source, .. } => source.status_code(),
+ Error::Metasrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source, .. } => source.status_code(),
Error::SystemCatalogTableScanExec { source, .. } => source.status_code(),
Error::InvalidTableInfoInCatalog { source, .. } => source.status_code(),
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index c7affc93c6f1..29b0f517de7e 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -17,8 +17,8 @@ use std::time::Duration;
use async_trait::async_trait;
use clap::Parser;
use common_telemetry::logging;
-use meta_srv::bootstrap::MetaSrvInstance;
-use meta_srv::metasrv::MetaSrvOptions;
+use meta_srv::bootstrap::MetasrvInstance;
+use meta_srv::metasrv::MetasrvOptions;
use snafu::ResultExt;
use crate::error::{self, Result, StartMetaServerSnafu};
@@ -26,11 +26,11 @@ use crate::options::{CliOptions, Options};
use crate::App;
pub struct Instance {
- instance: MetaSrvInstance,
+ instance: MetasrvInstance,
}
impl Instance {
- fn new(instance: MetaSrvInstance) -> Self {
+ fn new(instance: MetasrvInstance) -> Self {
Self { instance }
}
}
@@ -42,7 +42,7 @@ impl App for Instance {
}
async fn start(&mut self) -> Result<()> {
- plugins::start_meta_srv_plugins(self.instance.plugins())
+ plugins::start_metasrv_plugins(self.instance.plugins())
.await
.context(StartMetaServerSnafu)?;
@@ -64,7 +64,7 @@ pub struct Command {
}
impl Command {
- pub async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
+ pub async fn build(self, opts: MetasrvOptions) -> Result<Instance> {
self.subcmd.build(opts).await
}
@@ -79,7 +79,7 @@ enum SubCommand {
}
impl SubCommand {
- async fn build(self, opts: MetaSrvOptions) -> Result<Instance> {
+ async fn build(self, opts: MetasrvOptions) -> Result<Instance> {
match self {
SubCommand::Start(cmd) => cmd.build(opts).await,
}
@@ -127,10 +127,10 @@ struct StartCommand {
impl StartCommand {
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
- let mut opts: MetaSrvOptions = Options::load_layered_options(
+ let mut opts: MetasrvOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
- MetaSrvOptions::env_list_keys(),
+ MetasrvOptions::env_list_keys(),
)?;
if let Some(dir) = &cli_options.log_dir {
@@ -193,20 +193,20 @@ impl StartCommand {
Ok(Options::Metasrv(Box::new(opts)))
}
- async fn build(self, mut opts: MetaSrvOptions) -> Result<Instance> {
- let plugins = plugins::setup_meta_srv_plugins(&mut opts)
+ async fn build(self, mut opts: MetasrvOptions) -> Result<Instance> {
+ let plugins = plugins::setup_metasrv_plugins(&mut opts)
.await
.context(StartMetaServerSnafu)?;
- logging::info!("MetaSrv start command: {:#?}", self);
- logging::info!("MetaSrv options: {:#?}", opts);
+ logging::info!("Metasrv start command: {:#?}", self);
+ logging::info!("Metasrv options: {:#?}", opts);
let builder = meta_srv::bootstrap::metasrv_builder(&opts, plugins.clone(), None)
.await
.context(error::BuildMetaServerSnafu)?;
let metasrv = builder.build().await.context(error::BuildMetaServerSnafu)?;
- let instance = MetaSrvInstance::new(opts, plugins, metasrv)
+ let instance = MetasrvInstance::new(opts, plugins, metasrv)
.await
.context(error::BuildMetaServerSnafu)?;
diff --git a/src/cmd/src/options.rs b/src/cmd/src/options.rs
index 9bc652d44abf..8fd974b939dc 100644
--- a/src/cmd/src/options.rs
+++ b/src/cmd/src/options.rs
@@ -15,12 +15,12 @@
use clap::ArgMatches;
use common_config::KvBackendConfig;
use common_telemetry::logging::{LoggingOptions, TracingOptions};
-use common_wal::config::MetaSrvWalConfig;
+use common_wal::config::MetasrvWalConfig;
use config::{Config, Environment, File, FileFormat};
use datanode::config::{DatanodeOptions, ProcedureConfig};
use frontend::error::{Result as FeResult, TomlFormatSnafu};
use frontend::frontend::{FrontendOptions, TomlSerializable};
-use meta_srv::metasrv::MetaSrvOptions;
+use meta_srv::metasrv::MetasrvOptions;
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
@@ -38,7 +38,7 @@ pub struct MixOptions {
pub frontend: FrontendOptions,
pub datanode: DatanodeOptions,
pub logging: LoggingOptions,
- pub wal_meta: MetaSrvWalConfig,
+ pub wal_meta: MetasrvWalConfig,
}
impl From<MixOptions> for FrontendOptions {
@@ -56,7 +56,7 @@ impl TomlSerializable for MixOptions {
pub enum Options {
Datanode(Box<DatanodeOptions>),
Frontend(Box<FrontendOptions>),
- Metasrv(Box<MetaSrvOptions>),
+ Metasrv(Box<MetasrvOptions>),
Standalone(Box<MixOptions>),
Cli(Box<LoggingOptions>),
}
diff --git a/src/common/meta/src/wal_options_allocator.rs b/src/common/meta/src/wal_options_allocator.rs
index 8116773415a7..202b2958ba4a 100644
--- a/src/common/meta/src/wal_options_allocator.rs
+++ b/src/common/meta/src/wal_options_allocator.rs
@@ -17,7 +17,7 @@ pub mod kafka;
use std::collections::HashMap;
use std::sync::Arc;
-use common_wal::config::MetaSrvWalConfig;
+use common_wal::config::MetasrvWalConfig;
use common_wal::options::{KafkaWalOptions, WalOptions, WAL_OPTIONS_KEY};
use snafu::ResultExt;
use store_api::storage::{RegionId, RegionNumber};
@@ -39,10 +39,10 @@ pub type WalOptionsAllocatorRef = Arc<WalOptionsAllocator>;
impl WalOptionsAllocator {
/// Creates a WalOptionsAllocator.
- pub fn new(config: MetaSrvWalConfig, kv_backend: KvBackendRef) -> Self {
+ pub fn new(config: MetasrvWalConfig, kv_backend: KvBackendRef) -> Self {
match config {
- MetaSrvWalConfig::RaftEngine => Self::RaftEngine,
- MetaSrvWalConfig::Kafka(kafka_config) => {
+ MetasrvWalConfig::RaftEngine => Self::RaftEngine,
+ MetasrvWalConfig::Kafka(kafka_config) => {
Self::Kafka(KafkaTopicManager::new(kafka_config, kv_backend))
}
}
@@ -118,7 +118,7 @@ pub fn prepare_wal_options(
#[cfg(test)]
mod tests {
- use common_wal::config::kafka::MetaSrvKafkaConfig;
+ use common_wal::config::kafka::MetasrvKafkaConfig;
use common_wal::test_util::run_test_with_kafka_wal;
use super::*;
@@ -129,7 +129,7 @@ mod tests {
#[tokio::test]
async fn test_allocator_with_raft_engine() {
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
- let wal_config = MetaSrvWalConfig::RaftEngine;
+ let wal_config = MetasrvWalConfig::RaftEngine;
let allocator = WalOptionsAllocator::new(wal_config, kv_backend);
allocator.start().await.unwrap();
@@ -155,7 +155,7 @@ mod tests {
.collect::<Vec<_>>();
// Creates a topic manager.
- let config = MetaSrvKafkaConfig {
+ let config = MetasrvKafkaConfig {
replication_factor: broker_endpoints.len() as i16,
broker_endpoints,
..Default::default()
diff --git a/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs b/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs
index d6ee3e774600..ea2f89554bfa 100644
--- a/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs
+++ b/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs
@@ -16,7 +16,7 @@ use std::collections::HashSet;
use std::sync::Arc;
use common_telemetry::{error, info};
-use common_wal::config::kafka::MetaSrvKafkaConfig;
+use common_wal::config::kafka::MetasrvKafkaConfig;
use common_wal::TopicSelectorType;
use rskafka::client::controller::ControllerClient;
use rskafka::client::error::Error as RsKafkaError;
@@ -46,7 +46,7 @@ const DEFAULT_PARTITION: i32 = 0;
/// Manages topic initialization and selection.
pub struct TopicManager {
- config: MetaSrvKafkaConfig,
+ config: MetasrvKafkaConfig,
pub(crate) topic_pool: Vec<String>,
pub(crate) topic_selector: TopicSelectorRef,
kv_backend: KvBackendRef,
@@ -54,7 +54,7 @@ pub struct TopicManager {
impl TopicManager {
/// Creates a new topic manager.
- pub fn new(config: MetaSrvKafkaConfig, kv_backend: KvBackendRef) -> Self {
+ pub fn new(config: MetasrvKafkaConfig, kv_backend: KvBackendRef) -> Self {
// Topics should be created.
let topics = (0..config.num_topics)
.map(|topic_id| format!("{}_{topic_id}", config.topic_name_prefix))
@@ -283,7 +283,7 @@ mod tests {
.collect::<Vec<_>>();
// Creates a topic manager.
- let config = MetaSrvKafkaConfig {
+ let config = MetasrvKafkaConfig {
replication_factor: broker_endpoints.len() as i16,
broker_endpoints,
..Default::default()
diff --git a/src/common/wal/src/config.rs b/src/common/wal/src/config.rs
index a51335c199bb..6c4993b835bc 100644
--- a/src/common/wal/src/config.rs
+++ b/src/common/wal/src/config.rs
@@ -17,16 +17,16 @@ pub mod raft_engine;
use serde::{Deserialize, Serialize};
-use crate::config::kafka::{DatanodeKafkaConfig, MetaSrvKafkaConfig, StandaloneKafkaConfig};
+use crate::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig, StandaloneKafkaConfig};
use crate::config::raft_engine::RaftEngineConfig;
/// Wal configurations for metasrv.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)]
#[serde(tag = "provider", rename_all = "snake_case")]
-pub enum MetaSrvWalConfig {
+pub enum MetasrvWalConfig {
#[default]
RaftEngine,
- Kafka(MetaSrvKafkaConfig),
+ Kafka(MetasrvKafkaConfig),
}
/// Wal configurations for datanode.
@@ -57,11 +57,11 @@ impl Default for StandaloneWalConfig {
}
}
-impl From<StandaloneWalConfig> for MetaSrvWalConfig {
+impl From<StandaloneWalConfig> for MetasrvWalConfig {
fn from(config: StandaloneWalConfig) -> Self {
match config {
StandaloneWalConfig::RaftEngine(_) => Self::RaftEngine,
- StandaloneWalConfig::Kafka(config) => Self::Kafka(MetaSrvKafkaConfig {
+ StandaloneWalConfig::Kafka(config) => Self::Kafka(MetasrvKafkaConfig {
broker_endpoints: config.broker_endpoints,
num_topics: config.num_topics,
selector_type: config.selector_type,
@@ -100,7 +100,7 @@ mod tests {
use super::*;
use crate::config::kafka::common::BackoffConfig;
- use crate::config::{DatanodeKafkaConfig, MetaSrvKafkaConfig, StandaloneKafkaConfig};
+ use crate::config::{DatanodeKafkaConfig, MetasrvKafkaConfig, StandaloneKafkaConfig};
use crate::TopicSelectorType;
#[test]
@@ -109,8 +109,8 @@ mod tests {
let toml_str = r#"
provider = "raft_engine"
"#;
- let metasrv_wal_config: MetaSrvWalConfig = toml::from_str(toml_str).unwrap();
- assert_eq!(metasrv_wal_config, MetaSrvWalConfig::RaftEngine);
+ let metasrv_wal_config: MetasrvWalConfig = toml::from_str(toml_str).unwrap();
+ assert_eq!(metasrv_wal_config, MetasrvWalConfig::RaftEngine);
let datanode_wal_config: DatanodeWalConfig = toml::from_str(toml_str).unwrap();
assert_eq!(
@@ -166,9 +166,9 @@ mod tests {
backoff_deadline = "5mins"
"#;
- // Deserialized to MetaSrvWalConfig.
- let metasrv_wal_config: MetaSrvWalConfig = toml::from_str(toml_str).unwrap();
- let expected = MetaSrvKafkaConfig {
+ // Deserialized to MetasrvWalConfig.
+ let metasrv_wal_config: MetasrvWalConfig = toml::from_str(toml_str).unwrap();
+ let expected = MetasrvKafkaConfig {
broker_endpoints: vec!["127.0.0.1:9092".to_string()],
num_topics: 32,
selector_type: TopicSelectorType::RoundRobin,
@@ -183,7 +183,7 @@ mod tests {
deadline: Some(Duration::from_secs(60 * 5)),
},
};
- assert_eq!(metasrv_wal_config, MetaSrvWalConfig::Kafka(expected));
+ assert_eq!(metasrv_wal_config, MetasrvWalConfig::Kafka(expected));
// Deserialized to DatanodeWalConfig.
let datanode_wal_config: DatanodeWalConfig = toml::from_str(toml_str).unwrap();
diff --git a/src/common/wal/src/config/kafka.rs b/src/common/wal/src/config/kafka.rs
index 586d2182a0f0..f47e444521f2 100644
--- a/src/common/wal/src/config/kafka.rs
+++ b/src/common/wal/src/config/kafka.rs
@@ -18,5 +18,5 @@ pub mod metasrv;
pub mod standalone;
pub use datanode::DatanodeKafkaConfig;
-pub use metasrv::MetaSrvKafkaConfig;
+pub use metasrv::MetasrvKafkaConfig;
pub use standalone::StandaloneKafkaConfig;
diff --git a/src/common/wal/src/config/kafka/metasrv.rs b/src/common/wal/src/config/kafka/metasrv.rs
index a8989275f42b..99efe762fbc0 100644
--- a/src/common/wal/src/config/kafka/metasrv.rs
+++ b/src/common/wal/src/config/kafka/metasrv.rs
@@ -22,7 +22,7 @@ use crate::{TopicSelectorType, BROKER_ENDPOINT, TOPIC_NAME_PREFIX};
/// Kafka wal configurations for metasrv.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(default)]
-pub struct MetaSrvKafkaConfig {
+pub struct MetasrvKafkaConfig {
/// The broker endpoints of the Kafka cluster.
pub broker_endpoints: Vec<String>,
/// The number of topics to be created upon start.
@@ -43,7 +43,7 @@ pub struct MetaSrvKafkaConfig {
pub backoff: BackoffConfig,
}
-impl Default for MetaSrvKafkaConfig {
+impl Default for MetasrvKafkaConfig {
fn default() -> Self {
let broker_endpoints = vec![BROKER_ENDPOINT.to_string()];
let replication_factor = broker_endpoints.len() as i16;
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 858b62a8d213..e6cf40bb4d92 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -41,8 +41,8 @@ use crate::election::etcd::EtcdElection;
use crate::error::InitExportMetricsTaskSnafu;
use crate::lock::etcd::EtcdLock;
use crate::lock::memory::MemLock;
-use crate::metasrv::builder::MetaSrvBuilder;
-use crate::metasrv::{MetaSrv, MetaSrvOptions, SelectorRef};
+use crate::metasrv::builder::MetasrvBuilder;
+use crate::metasrv::{Metasrv, MetasrvOptions, SelectorRef};
use crate::selector::lease_based::LeaseBasedSelector;
use crate::selector::load_based::LoadBasedSelector;
use crate::selector::SelectorType;
@@ -50,12 +50,12 @@ use crate::service::admin;
use crate::{error, Result};
#[derive(Clone)]
-pub struct MetaSrvInstance {
- meta_srv: MetaSrv,
+pub struct MetasrvInstance {
+ metasrv: Metasrv,
- http_srv: Arc<HttpServer>,
+ httpsrv: Arc<HttpServer>,
- opts: MetaSrvOptions,
+ opts: MetasrvOptions,
signal_sender: Option<Sender<()>>,
@@ -64,25 +64,25 @@ pub struct MetaSrvInstance {
export_metrics_task: Option<ExportMetricsTask>,
}
-impl MetaSrvInstance {
+impl MetasrvInstance {
pub async fn new(
- opts: MetaSrvOptions,
+ opts: MetasrvOptions,
plugins: Plugins,
- meta_srv: MetaSrv,
- ) -> Result<MetaSrvInstance> {
- let http_srv = Arc::new(
+ metasrv: Metasrv,
+ ) -> Result<MetasrvInstance> {
+ let httpsrv = Arc::new(
HttpServerBuilder::new(opts.http.clone())
.with_metrics_handler(MetricsHandler)
.with_greptime_config_options(opts.to_toml_string())
.build(),
);
- // put meta_srv into plugins for later use
- plugins.insert::<Arc<MetaSrv>>(Arc::new(meta_srv.clone()));
+ // put metasrv into plugins for later use
+ plugins.insert::<Arc<Metasrv>>(Arc::new(metasrv.clone()));
let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
.context(InitExportMetricsTaskSnafu)?;
- Ok(MetaSrvInstance {
- meta_srv,
- http_srv,
+ Ok(MetasrvInstance {
+ metasrv,
+ httpsrv,
opts,
signal_sender: None,
plugins,
@@ -91,7 +91,7 @@ impl MetaSrvInstance {
}
pub async fn start(&mut self) -> Result<()> {
- self.meta_srv.try_start().await?;
+ self.metasrv.try_start().await?;
if let Some(t) = self.export_metrics_task.as_ref() {
t.start(None).context(InitExportMetricsTaskSnafu)?
@@ -101,23 +101,23 @@ impl MetaSrvInstance {
self.signal_sender = Some(tx);
- let mut router = router(self.meta_srv.clone());
- if let Some(configurator) = self.meta_srv.plugins().get::<ConfiguratorRef>() {
+ let mut router = router(self.metasrv.clone());
+ if let Some(configurator) = self.metasrv.plugins().get::<ConfiguratorRef>() {
router = configurator.config_grpc(router);
}
- let meta_srv = bootstrap_meta_srv_with_router(&self.opts.bind_addr, router, rx);
+ let metasrv = bootstrap_metasrv_with_router(&self.opts.bind_addr, router, rx);
let addr = self.opts.http.addr.parse().context(error::ParseAddrSnafu {
addr: &self.opts.http.addr,
})?;
let http_srv = async {
- self.http_srv
+ self.httpsrv
.start(addr)
.await
.map(|_| ())
.context(error::StartHttpSnafu)
};
- future::try_join(meta_srv, http_srv).await?;
+ future::try_join(metasrv, http_srv).await?;
Ok(())
}
@@ -128,12 +128,12 @@ impl MetaSrvInstance {
.await
.context(error::SendShutdownSignalSnafu)?;
}
- self.meta_srv.shutdown().await?;
- self.http_srv
+ self.metasrv.shutdown().await?;
+ self.httpsrv
.shutdown()
.await
.context(error::ShutdownServerSnafu {
- server: self.http_srv.name(),
+ server: self.httpsrv.name(),
})?;
Ok(())
}
@@ -143,7 +143,7 @@ impl MetaSrvInstance {
}
}
-pub async fn bootstrap_meta_srv_with_router(
+pub async fn bootstrap_metasrv_with_router(
bind_addr: &str,
router: Router,
mut signal: Receiver<()>,
@@ -167,22 +167,22 @@ pub async fn bootstrap_meta_srv_with_router(
Ok(())
}
-pub fn router(meta_srv: MetaSrv) -> Router {
+pub fn router(metasrv: Metasrv) -> Router {
tonic::transport::Server::builder()
.accept_http1(true) // for admin services
- .add_service(HeartbeatServer::new(meta_srv.clone()))
- .add_service(StoreServer::new(meta_srv.clone()))
- .add_service(ClusterServer::new(meta_srv.clone()))
- .add_service(LockServer::new(meta_srv.clone()))
- .add_service(ProcedureServiceServer::new(meta_srv.clone()))
- .add_service(admin::make_admin_service(meta_srv))
+ .add_service(HeartbeatServer::new(metasrv.clone()))
+ .add_service(StoreServer::new(metasrv.clone()))
+ .add_service(ClusterServer::new(metasrv.clone()))
+ .add_service(LockServer::new(metasrv.clone()))
+ .add_service(ProcedureServiceServer::new(metasrv.clone()))
+ .add_service(admin::make_admin_service(metasrv))
}
pub async fn metasrv_builder(
- opts: &MetaSrvOptions,
+ opts: &MetasrvOptions,
plugins: Plugins,
kv_backend: Option<KvBackendRef>,
-) -> Result<MetaSrvBuilder> {
+) -> Result<MetasrvBuilder> {
let (kv_backend, election, lock) = match (kv_backend, opts.use_memory_store) {
(Some(kv_backend), _) => (kv_backend, None, Some(Arc::new(MemLock::default()) as _)),
(None, true) => (
@@ -229,7 +229,7 @@ pub async fn metasrv_builder(
SelectorType::LeaseBased => Arc::new(LeaseBasedSelector) as SelectorRef,
};
- Ok(MetaSrvBuilder::new()
+ Ok(MetasrvBuilder::new()
.options(opts.clone())
.kv_backend(kv_backend)
.in_memory(in_memory)
@@ -239,7 +239,7 @@ pub async fn metasrv_builder(
.plugins(plugins))
}
-async fn create_etcd_client(opts: &MetaSrvOptions) -> Result<Client> {
+async fn create_etcd_client(opts: &MetasrvOptions) -> Result<Client> {
let etcd_endpoints = opts
.store_addr
.split(',')
diff --git a/src/meta-srv/src/election.rs b/src/meta-srv/src/election.rs
index cdd434068c90..fb5c9296e2d1 100644
--- a/src/meta-srv/src/election.rs
+++ b/src/meta-srv/src/election.rs
@@ -22,7 +22,7 @@ use tokio::sync::broadcast::Receiver;
use crate::error::Result;
-pub const ELECTION_KEY: &str = "__meta_srv_election";
+pub const ELECTION_KEY: &str = "__metasrv_election";
#[derive(Debug, Clone)]
pub enum LeaderChangeMessage {
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 5db066c34deb..c643528dddad 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -363,7 +363,7 @@ pub enum Error {
location: Location,
},
- #[snafu(display("MetaSrv has no leader at this moment"))]
+ #[snafu(display("Metasrv has no leader at this moment"))]
NoLeader { location: Location },
#[snafu(display("Table {} not found", name))]
diff --git a/src/meta-srv/src/handler/failure_handler.rs b/src/meta-srv/src/handler/failure_handler.rs
index 9748737fae30..004d32e4a295 100644
--- a/src/meta-srv/src/handler/failure_handler.rs
+++ b/src/meta-srv/src/handler/failure_handler.rs
@@ -115,7 +115,7 @@ mod tests {
use super::*;
use crate::handler::node_stat::{RegionStat, Stat};
- use crate::metasrv::builder::MetaSrvBuilder;
+ use crate::metasrv::builder::MetasrvBuilder;
use crate::test_util::create_region_failover_manager;
#[tokio::test(flavor = "multi_thread")]
@@ -129,7 +129,7 @@ mod tests {
let req = &HeartbeatRequest::default();
- let builder = MetaSrvBuilder::new();
+ let builder = MetasrvBuilder::new();
let metasrv = builder.build().await.unwrap();
let mut ctx = metasrv.new_ctx();
ctx.is_infancy = false;
diff --git a/src/meta-srv/src/handler/on_leader_start_handler.rs b/src/meta-srv/src/handler/on_leader_start_handler.rs
index 58751833d173..dccb8d3d60f9 100644
--- a/src/meta-srv/src/handler/on_leader_start_handler.rs
+++ b/src/meta-srv/src/handler/on_leader_start_handler.rs
@@ -38,7 +38,7 @@ impl HeartbeatHandler for OnLeaderStartHandler {
if election.in_infancy() {
ctx.is_infancy = true;
- // TODO(weny): Unifies the multiple leader state between Context and MetaSrv.
+ // TODO(weny): Unifies the multiple leader state between Context and Metasrv.
// we can't ensure the in-memory kv has already been reset in the outside loop.
// We still use heartbeat requests to trigger resetting in-memory kv.
ctx.reset_in_memory();
diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs
index 0a8f917bf6f2..83d190ef8a24 100644
--- a/src/meta-srv/src/handler/region_lease_handler.rs
+++ b/src/meta-srv/src/handler/region_lease_handler.rs
@@ -116,7 +116,7 @@ mod test {
use super::*;
use crate::handler::node_stat::{RegionStat, Stat};
- use crate::metasrv::builder::MetaSrvBuilder;
+ use crate::metasrv::builder::MetasrvBuilder;
fn new_test_keeper() -> RegionLeaseKeeper {
let store = Arc::new(MemoryKvBackend::new());
@@ -170,7 +170,7 @@ mod test {
.await
.unwrap();
- let builder = MetaSrvBuilder::new();
+ let builder = MetasrvBuilder::new();
let metasrv = builder.build().await.unwrap();
let ctx = &mut metasrv.new_ctx();
@@ -317,7 +317,7 @@ mod test {
.await
.unwrap();
- let builder = MetaSrvBuilder::new();
+ let builder = MetasrvBuilder::new();
let metasrv = builder.build().await.unwrap();
let ctx = &mut metasrv.new_ctx();
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index fa7690369415..4b84fcb9e38c 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -33,7 +33,7 @@ use common_procedure::options::ProcedureConfig;
use common_procedure::ProcedureManagerRef;
use common_telemetry::logging::LoggingOptions;
use common_telemetry::{error, info, warn};
-use common_wal::config::MetaSrvWalConfig;
+use common_wal::config::MetasrvWalConfig;
use serde::{Deserialize, Serialize};
use servers::export_metrics::ExportMetricsOption;
use servers::http::HttpOptions;
@@ -63,7 +63,7 @@ pub const METASRV_HOME: &str = "/tmp/metasrv";
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(default)]
-pub struct MetaSrvOptions {
+pub struct MetasrvOptions {
pub bind_addr: String,
pub server_addr: String,
pub store_addr: String,
@@ -77,7 +77,7 @@ pub struct MetaSrvOptions {
pub datanode: DatanodeOptions,
pub enable_telemetry: bool,
pub data_home: String,
- pub wal: MetaSrvWalConfig,
+ pub wal: MetasrvWalConfig,
pub export_metrics: ExportMetricsOption,
pub store_key_prefix: String,
/// The max operations per txn
@@ -93,13 +93,13 @@ pub struct MetaSrvOptions {
pub max_txn_ops: usize,
}
-impl MetaSrvOptions {
+impl MetasrvOptions {
pub fn env_list_keys() -> Option<&'static [&'static str]> {
Some(&["wal.broker_endpoints"])
}
}
-impl Default for MetaSrvOptions {
+impl Default for MetasrvOptions {
fn default() -> Self {
Self {
bind_addr: "127.0.0.1:3002".to_string(),
@@ -124,7 +124,7 @@ impl Default for MetaSrvOptions {
datanode: DatanodeOptions::default(),
enable_telemetry: true,
data_home: METASRV_HOME.to_string(),
- wal: MetaSrvWalConfig::default(),
+ wal: MetasrvWalConfig::default(),
export_metrics: ExportMetricsOption::default(),
store_key_prefix: String::new(),
max_txn_ops: 128,
@@ -132,7 +132,7 @@ impl Default for MetaSrvOptions {
}
}
-impl MetaSrvOptions {
+impl MetasrvOptions {
pub fn to_toml_string(&self) -> String {
toml::to_string(&self).unwrap()
}
@@ -253,10 +253,10 @@ impl MetaStateHandler {
}
#[derive(Clone)]
-pub struct MetaSrv {
+pub struct Metasrv {
state: StateRef,
started: Arc<AtomicBool>,
- options: MetaSrvOptions,
+ options: MetasrvOptions,
// It is only valid at the leader node and is used to temporarily
// store some data that will not be persisted.
in_memory: ResettableKvBackendRef,
@@ -279,14 +279,14 @@ pub struct MetaSrv {
plugins: Plugins,
}
-impl MetaSrv {
+impl Metasrv {
pub async fn try_start(&self) -> Result<()> {
if self
.started
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
.is_err()
{
- warn!("MetaSrv already started");
+ warn!("Metasrv already started");
return Ok(());
}
@@ -347,11 +347,11 @@ impl MetaSrv {
while started.load(Ordering::Relaxed) {
let res = election.campaign().await;
if let Err(e) = res {
- warn!("MetaSrv election error: {}", e);
+ warn!("Metasrv election error: {}", e);
}
- info!("MetaSrv re-initiate election");
+ info!("Metasrv re-initiate election");
}
- info!("MetaSrv stopped");
+ info!("Metasrv stopped");
});
} else {
if let Err(e) = self.wal_options_allocator.start().await {
@@ -368,7 +368,7 @@ impl MetaSrv {
.context(StartProcedureManagerSnafu)?;
}
- info!("MetaSrv started");
+ info!("Metasrv started");
Ok(())
}
@@ -403,7 +403,7 @@ impl MetaSrv {
}
#[inline]
- pub fn options(&self) -> &MetaSrvOptions {
+ pub fn options(&self) -> &MetasrvOptions {
&self.options
}
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index d406589599b5..003d8ba5c6d6 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -57,7 +57,7 @@ use crate::handler::{HeartbeatHandlerGroup, HeartbeatMailbox, Pushers};
use crate::lock::memory::MemLock;
use crate::lock::DistLockRef;
use crate::metasrv::{
- ElectionRef, MetaSrv, MetaSrvOptions, MetasrvInfo, SelectorContext, SelectorRef, TABLE_ID_SEQ,
+ ElectionRef, Metasrv, MetasrvInfo, MetasrvOptions, SelectorContext, SelectorRef, TABLE_ID_SEQ,
};
use crate::procedure::region_failover::RegionFailoverManager;
use crate::procedure::region_migration::manager::RegionMigrationManager;
@@ -70,8 +70,8 @@ use crate::state::State;
use crate::table_meta_alloc::MetasrvPeerAllocator;
// TODO(fys): try use derive_builder macro
-pub struct MetaSrvBuilder {
- options: Option<MetaSrvOptions>,
+pub struct MetasrvBuilder {
+ options: Option<MetasrvOptions>,
kv_backend: Option<KvBackendRef>,
in_memory: Option<ResettableKvBackendRef>,
selector: Option<SelectorRef>,
@@ -84,7 +84,7 @@ pub struct MetaSrvBuilder {
table_metadata_allocator: Option<TableMetadataAllocatorRef>,
}
-impl MetaSrvBuilder {
+impl MetasrvBuilder {
pub fn new() -> Self {
Self {
kv_backend: None,
@@ -101,7 +101,7 @@ impl MetaSrvBuilder {
}
}
- pub fn options(mut self, options: MetaSrvOptions) -> Self {
+ pub fn options(mut self, options: MetasrvOptions) -> Self {
self.options = Some(options);
self
}
@@ -159,10 +159,10 @@ impl MetaSrvBuilder {
self
}
- pub async fn build(self) -> Result<MetaSrv> {
+ pub async fn build(self) -> Result<Metasrv> {
let started = Arc::new(AtomicBool::new(false));
- let MetaSrvBuilder {
+ let MetasrvBuilder {
election,
meta_peer_client,
options,
@@ -320,7 +320,7 @@ impl MetaSrvBuilder {
let enable_telemetry = options.enable_telemetry;
let metasrv_home = options.data_home.to_string();
- Ok(MetaSrv {
+ Ok(Metasrv {
state,
started,
options,
@@ -373,7 +373,7 @@ fn build_mailbox(kv_backend: &KvBackendRef, pushers: &Pushers) -> MailboxRef {
}
fn build_procedure_manager(
- options: &MetaSrvOptions,
+ options: &MetasrvOptions,
kv_backend: &KvBackendRef,
) -> ProcedureManagerRef {
let manager_config = ManagerConfig {
@@ -391,7 +391,7 @@ fn build_procedure_manager(
}
fn build_ddl_manager(
- options: &MetaSrvOptions,
+ options: &MetasrvOptions,
datanode_clients: Option<DatanodeManagerRef>,
procedure_manager: &ProcedureManagerRef,
mailbox: &MailboxRef,
@@ -431,7 +431,7 @@ fn build_ddl_manager(
))
}
-impl Default for MetaSrvBuilder {
+impl Default for MetasrvBuilder {
fn default() -> Self {
Self::new()
}
diff --git a/src/meta-srv/src/mocks.rs b/src/meta-srv/src/mocks.rs
index 2228a2dc5a4b..53e22ce6d4af 100644
--- a/src/meta-srv/src/mocks.rs
+++ b/src/meta-srv/src/mocks.rs
@@ -26,14 +26,14 @@ use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::KvBackendRef;
use tower::service_fn;
-use crate::metasrv::builder::MetaSrvBuilder;
-use crate::metasrv::{MetaSrv, MetaSrvOptions, SelectorRef};
+use crate::metasrv::builder::MetasrvBuilder;
+use crate::metasrv::{Metasrv, MetasrvOptions, SelectorRef};
#[derive(Clone)]
pub struct MockInfo {
pub server_addr: String,
pub channel_manager: ChannelManager,
- pub meta_srv: MetaSrv,
+ pub metasrv: Metasrv,
}
pub async fn mock_with_memstore() -> MockInfo {
@@ -52,7 +52,7 @@ pub async fn mock_with_memstore_and_selector(selector: SelectorRef) -> MockInfo
}
pub async fn mock(
- opts: MetaSrvOptions,
+ opts: MetasrvOptions,
kv_backend: KvBackendRef,
selector: Option<SelectorRef>,
datanode_clients: Option<Arc<DatanodeClients>>,
@@ -62,7 +62,7 @@ pub async fn mock(
table_metadata_manager.init().await.unwrap();
- let builder = MetaSrvBuilder::new().options(opts).kv_backend(kv_backend);
+ let builder = MetasrvBuilder::new().options(opts).kv_backend(kv_backend);
let builder = match selector {
Some(s) => builder.selector(s),
@@ -74,11 +74,11 @@ pub async fn mock(
None => builder,
};
- let meta_srv = builder.build().await.unwrap();
- meta_srv.try_start().await.unwrap();
+ let metasrv = builder.build().await.unwrap();
+ metasrv.try_start().await.unwrap();
let (client, server) = tokio::io::duplex(1024);
- let service = meta_srv.clone();
+ let service = metasrv.clone();
let _handle = tokio::spawn(async move {
tonic::transport::Server::builder()
.add_service(HeartbeatServer::new(service.clone()))
@@ -119,6 +119,6 @@ pub async fn mock(
MockInfo {
server_addr,
channel_manager,
- meta_srv,
+ metasrv,
}
}
diff --git a/src/meta-srv/src/service/admin.rs b/src/meta-srv/src/service/admin.rs
index bf376b1c8068..6e38fc22fc76 100644
--- a/src/meta-srv/src/service/admin.rs
+++ b/src/meta-srv/src/service/admin.rs
@@ -31,20 +31,20 @@ use tonic::body::BoxBody;
use tonic::codegen::{empty_body, http, BoxFuture, Service};
use tonic::transport::NamedService;
-use crate::metasrv::MetaSrv;
+use crate::metasrv::Metasrv;
-pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
+pub fn make_admin_service(metasrv: Metasrv) -> Admin {
let router = Router::new().route("/health", health::HealthHandler);
let router = router.route(
"/node-lease",
node_lease::NodeLeaseHandler {
- meta_peer_client: meta_srv.meta_peer_client().clone(),
+ meta_peer_client: metasrv.meta_peer_client().clone(),
},
);
let handler = heartbeat::HeartBeatHandler {
- meta_peer_client: meta_srv.meta_peer_client().clone(),
+ meta_peer_client: metasrv.meta_peer_client().clone(),
};
let router = router
.route("/heartbeat", handler.clone())
@@ -53,26 +53,26 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
let router = router.route(
"/catalogs",
meta::CatalogsHandler {
- table_metadata_manager: meta_srv.table_metadata_manager().clone(),
+ table_metadata_manager: metasrv.table_metadata_manager().clone(),
},
);
let handler = meta::SchemasHandler {
- table_metadata_manager: meta_srv.table_metadata_manager().clone(),
+ table_metadata_manager: metasrv.table_metadata_manager().clone(),
};
let router = router
.route("/schemas", handler.clone())
.route("/schemas/help", handler);
let handler = meta::TablesHandler {
- table_metadata_manager: meta_srv.table_metadata_manager().clone(),
+ table_metadata_manager: metasrv.table_metadata_manager().clone(),
};
let router = router
.route("/tables", handler.clone())
.route("/tables/help", handler);
let handler = meta::TableHandler {
- table_metadata_manager: meta_srv.table_metadata_manager().clone(),
+ table_metadata_manager: metasrv.table_metadata_manager().clone(),
};
let router = router
.route("/table", handler.clone())
@@ -81,27 +81,27 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
let router = router.route(
"/leader",
leader::LeaderHandler {
- election: meta_srv.election().cloned(),
+ election: metasrv.election().cloned(),
},
);
let handler = route::RouteHandler {
- table_metadata_manager: meta_srv.table_metadata_manager().clone(),
+ table_metadata_manager: metasrv.table_metadata_manager().clone(),
};
let router = router
.route("/route", handler.clone())
.route("/route/help", handler);
let handler = region_migration::SubmitRegionMigrationTaskHandler {
- region_migration_manager: meta_srv.region_migration_manager().clone(),
- meta_peer_client: meta_srv.meta_peer_client().clone(),
+ region_migration_manager: metasrv.region_migration_manager().clone(),
+ meta_peer_client: metasrv.meta_peer_client().clone(),
};
let router = router.route("/region-migration", handler);
let router = router.route(
"/maintenance",
maintenance::MaintenanceHandler {
- kv_backend: meta_srv.kv_backend().clone(),
+ kv_backend: metasrv.kv_backend().clone(),
},
);
let router = Router::nest("/admin", router);
diff --git a/src/meta-srv/src/service/cluster.rs b/src/meta-srv/src/service/cluster.rs
index 5b8c38aaca98..049b34269d26 100644
--- a/src/meta-srv/src/service/cluster.rs
+++ b/src/meta-srv/src/service/cluster.rs
@@ -21,11 +21,11 @@ use snafu::ResultExt;
use tonic::{Request, Response};
use crate::error;
-use crate::metasrv::MetaSrv;
+use crate::metasrv::Metasrv;
use crate::service::GrpcResult;
#[async_trait::async_trait]
-impl cluster_server::Cluster for MetaSrv {
+impl cluster_server::Cluster for Metasrv {
async fn batch_get(&self, req: Request<PbBatchGetRequest>) -> GrpcResult<PbBatchGetResponse> {
if !self.is_leader() {
let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader());
@@ -73,7 +73,7 @@ impl cluster_server::Cluster for MetaSrv {
}
}
-impl MetaSrv {
+impl Metasrv {
pub fn is_leader(&self) -> bool {
self.election().map(|x| x.is_leader()).unwrap_or(false)
}
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index 598edf2ca765..542793b12874 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -30,11 +30,11 @@ use tonic::{Request, Response, Streaming};
use crate::error;
use crate::error::Result;
use crate::handler::Pusher;
-use crate::metasrv::{Context, MetaSrv};
+use crate::metasrv::{Context, Metasrv};
use crate::service::{GrpcResult, GrpcStream};
#[async_trait::async_trait]
-impl heartbeat_server::Heartbeat for MetaSrv {
+impl heartbeat_server::Heartbeat for Metasrv {
type HeartbeatStream = GrpcStream<HeartbeatResponse>;
async fn heartbeat(
@@ -179,13 +179,13 @@ mod tests {
use tonic::IntoRequest;
use super::get_node_id;
- use crate::metasrv::builder::MetaSrvBuilder;
+ use crate::metasrv::builder::MetasrvBuilder;
#[tokio::test]
async fn test_ask_leader() {
let kv_backend = Arc::new(MemoryKvBackend::new());
- let meta_srv = MetaSrvBuilder::new()
+ let metasrv = MetasrvBuilder::new()
.kv_backend(kv_backend)
.build()
.await
@@ -195,10 +195,10 @@ mod tests {
header: Some(RequestHeader::new((1, 1), Role::Datanode, W3cTrace::new())),
};
- let res = meta_srv.ask_leader(req.into_request()).await.unwrap();
+ let res = metasrv.ask_leader(req.into_request()).await.unwrap();
let res = res.into_inner();
assert_eq!(1, res.header.unwrap().cluster_id);
- assert_eq!(meta_srv.options().bind_addr, res.leader.unwrap().addr);
+ assert_eq!(metasrv.options().bind_addr, res.leader.unwrap().addr);
}
#[test]
diff --git a/src/meta-srv/src/service/lock.rs b/src/meta-srv/src/service/lock.rs
index 81f218027e07..4334bdfc3766 100644
--- a/src/meta-srv/src/service/lock.rs
+++ b/src/meta-srv/src/service/lock.rs
@@ -17,10 +17,10 @@ use tonic::{Request, Response};
use super::GrpcResult;
use crate::lock::Opts;
-use crate::metasrv::MetaSrv;
+use crate::metasrv::Metasrv;
#[async_trait::async_trait]
-impl lock_server::Lock for MetaSrv {
+impl lock_server::Lock for Metasrv {
async fn lock(&self, request: Request<LockRequest>) -> GrpcResult<LockResponse> {
let LockRequest {
name, expire_secs, ..
diff --git a/src/meta-srv/src/service/procedure.rs b/src/meta-srv/src/service/procedure.rs
index a45e538a3693..0a410875fac1 100644
--- a/src/meta-srv/src/service/procedure.rs
+++ b/src/meta-srv/src/service/procedure.rs
@@ -27,11 +27,11 @@ use tonic::{Request, Response};
use super::GrpcResult;
use crate::error;
-use crate::metasrv::MetaSrv;
+use crate::metasrv::Metasrv;
use crate::procedure::region_migration::manager::RegionMigrationProcedureTask;
#[async_trait::async_trait]
-impl procedure_service_server::ProcedureService for MetaSrv {
+impl procedure_service_server::ProcedureService for Metasrv {
async fn query(
&self,
request: Request<QueryProcedureRequest>,
diff --git a/src/meta-srv/src/service/store.rs b/src/meta-srv/src/service/store.rs
index 1a291ada1f25..f9a970bca731 100644
--- a/src/meta-srv/src/service/store.rs
+++ b/src/meta-srv/src/service/store.rs
@@ -32,12 +32,12 @@ use snafu::{OptionExt, ResultExt};
use tonic::{Request, Response};
use crate::error::{self, MissingRequestHeaderSnafu};
-use crate::metasrv::MetaSrv;
+use crate::metasrv::Metasrv;
use crate::metrics::METRIC_META_KV_REQUEST_ELAPSED;
use crate::service::GrpcResult;
#[async_trait::async_trait]
-impl store_server::Store for MetaSrv {
+impl store_server::Store for Metasrv {
async fn range(&self, req: Request<PbRangeRequest>) -> GrpcResult<PbRangeResponse> {
let req = req.into_inner();
@@ -260,11 +260,11 @@ mod tests {
use common_telemetry::tracing_context::W3cTrace;
use tonic::IntoRequest;
- use crate::metasrv::builder::MetaSrvBuilder;
- use crate::metasrv::MetaSrv;
+ use crate::metasrv::builder::MetasrvBuilder;
+ use crate::metasrv::Metasrv;
- async fn new_meta_srv() -> MetaSrv {
- MetaSrvBuilder::new()
+ async fn new_metasrv() -> Metasrv {
+ MetasrvBuilder::new()
.kv_backend(Arc::new(MemoryKvBackend::new()))
.build()
.await
@@ -273,77 +273,77 @@ mod tests {
#[tokio::test]
async fn test_range() {
- let meta_srv = new_meta_srv().await;
+ let metasrv = new_metasrv().await;
let mut req = RangeRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
- let res = meta_srv.range(req.into_request()).await;
+ let res = metasrv.range(req.into_request()).await;
let _ = res.unwrap();
}
#[tokio::test]
async fn test_put() {
- let meta_srv = new_meta_srv().await;
+ let metasrv = new_metasrv().await;
let mut req = PutRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
- let res = meta_srv.put(req.into_request()).await;
+ let res = metasrv.put(req.into_request()).await;
let _ = res.unwrap();
}
#[tokio::test]
async fn test_batch_get() {
- let meta_srv = new_meta_srv().await;
+ let metasrv = new_metasrv().await;
let mut req = BatchGetRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
- let res = meta_srv.batch_get(req.into_request()).await;
+ let res = metasrv.batch_get(req.into_request()).await;
let _ = res.unwrap();
}
#[tokio::test]
async fn test_batch_put() {
- let meta_srv = new_meta_srv().await;
+ let metasrv = new_metasrv().await;
let mut req = BatchPutRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
- let res = meta_srv.batch_put(req.into_request()).await;
+ let res = metasrv.batch_put(req.into_request()).await;
let _ = res.unwrap();
}
#[tokio::test]
async fn test_batch_delete() {
- let meta_srv = new_meta_srv().await;
+ let metasrv = new_metasrv().await;
let mut req = BatchDeleteRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
- let res = meta_srv.batch_delete(req.into_request()).await;
+ let res = metasrv.batch_delete(req.into_request()).await;
let _ = res.unwrap();
}
#[tokio::test]
async fn test_compare_and_put() {
- let meta_srv = new_meta_srv().await;
+ let metasrv = new_metasrv().await;
let mut req = CompareAndPutRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
- let res = meta_srv.compare_and_put(req.into_request()).await;
+ let res = metasrv.compare_and_put(req.into_request()).await;
let _ = res.unwrap();
}
#[tokio::test]
async fn test_delete_range() {
- let meta_srv = new_meta_srv().await;
+ let metasrv = new_metasrv().await;
let mut req = DeleteRangeRequest::default();
req.set_header((1, 1), Role::Datanode, W3cTrace::new());
- let res = meta_srv.delete_range(req.into_request()).await;
+ let res = metasrv.delete_range(req.into_request()).await;
let _ = res.unwrap();
}
diff --git a/src/plugins/src/lib.rs b/src/plugins/src/lib.rs
index a80ff8ff5f7e..f0be0bd76352 100644
--- a/src/plugins/src/lib.rs
+++ b/src/plugins/src/lib.rs
@@ -18,4 +18,4 @@ mod meta_srv;
pub use datanode::{setup_datanode_plugins, start_datanode_plugins};
pub use frontend::{setup_frontend_plugins, start_frontend_plugins};
-pub use meta_srv::{setup_meta_srv_plugins, start_meta_srv_plugins};
+pub use meta_srv::{setup_metasrv_plugins, start_metasrv_plugins};
diff --git a/src/plugins/src/meta_srv.rs b/src/plugins/src/meta_srv.rs
index 80ac6d8c7f41..2974494be56d 100644
--- a/src/plugins/src/meta_srv.rs
+++ b/src/plugins/src/meta_srv.rs
@@ -14,12 +14,12 @@
use common_base::Plugins;
use meta_srv::error::Result;
-use meta_srv::metasrv::MetaSrvOptions;
+use meta_srv::metasrv::MetasrvOptions;
-pub async fn setup_meta_srv_plugins(_opts: &mut MetaSrvOptions) -> Result<Plugins> {
+pub async fn setup_metasrv_plugins(_opts: &mut MetasrvOptions) -> Result<Plugins> {
Ok(Plugins::new())
}
-pub async fn start_meta_srv_plugins(_plugins: Plugins) -> Result<()> {
+pub async fn start_metasrv_plugins(_plugins: Plugins) -> Result<()> {
Ok(())
}
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index bc28e8e1305f..f4c3756ead4d 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -36,7 +36,7 @@ use common_meta::peer::Peer;
use common_meta::DatanodeId;
use common_runtime::Builder as RuntimeBuilder;
use common_test_util::temp_dir::create_temp_dir;
-use common_wal::config::{DatanodeWalConfig, MetaSrvWalConfig};
+use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
use datanode::config::{DatanodeOptions, ObjectStoreConfig};
use datanode::datanode::{Datanode, DatanodeBuilder, ProcedureConfig};
use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
@@ -45,7 +45,7 @@ use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use meta_client::client::MetaClientBuilder;
use meta_srv::cluster::MetaPeerClientRef;
-use meta_srv::metasrv::{MetaSrv, MetaSrvOptions, SelectorRef};
+use meta_srv::metasrv::{Metasrv, MetasrvOptions, SelectorRef};
use meta_srv::mocks::MockInfo;
use servers::grpc::flight::FlightCraftWrapper;
use servers::grpc::region_server::RegionServerRequestHandler;
@@ -68,7 +68,7 @@ pub struct GreptimeDbCluster {
pub datanode_instances: HashMap<DatanodeId, Datanode>,
pub kv_backend: KvBackendRef,
- pub meta_srv: MetaSrv,
+ pub metasrv: Metasrv,
pub frontend: Arc<FeInstance>,
}
@@ -79,7 +79,7 @@ pub struct GreptimeDbClusterBuilder {
store_providers: Option<Vec<StorageType>>,
datanodes: Option<u32>,
datanode_wal_config: DatanodeWalConfig,
- metasrv_wal_config: MetaSrvWalConfig,
+ metasrv_wal_config: MetasrvWalConfig,
shared_home_dir: Option<Arc<TempDir>>,
meta_selector: Option<SelectorRef>,
}
@@ -110,7 +110,7 @@ impl GreptimeDbClusterBuilder {
store_providers: None,
datanodes: None,
datanode_wal_config: DatanodeWalConfig::default(),
- metasrv_wal_config: MetaSrvWalConfig::default(),
+ metasrv_wal_config: MetasrvWalConfig::default(),
shared_home_dir: None,
meta_selector: None,
}
@@ -141,7 +141,7 @@ impl GreptimeDbClusterBuilder {
}
#[must_use]
- pub fn with_metasrv_wal_config(mut self, metasrv_wal_config: MetaSrvWalConfig) -> Self {
+ pub fn with_metasrv_wal_config(mut self, metasrv_wal_config: MetasrvWalConfig) -> Self {
self.metasrv_wal_config = metasrv_wal_config;
self
}
@@ -168,7 +168,7 @@ impl GreptimeDbClusterBuilder {
let channel_config = ChannelConfig::new().timeout(Duration::from_secs(20));
let datanode_clients = Arc::new(DatanodeClients::new(channel_config));
- let opt = MetaSrvOptions {
+ let opt = MetasrvOptions {
procedure: ProcedureConfig {
// Due to large network delay during cross data-center.
// We only make max_retry_times and retry_delay large than the default in tests.
@@ -180,7 +180,7 @@ impl GreptimeDbClusterBuilder {
..Default::default()
};
- let meta_srv = meta_srv::mocks::mock(
+ let metasrv = meta_srv::mocks::mock(
opt,
self.kv_backend.clone(),
self.meta_selector.clone(),
@@ -189,17 +189,15 @@ impl GreptimeDbClusterBuilder {
.await;
let datanode_instances = self
- .build_datanodes_with_options(&meta_srv, &datanode_options)
+ .build_datanodes_with_options(&metasrv, &datanode_options)
.await;
build_datanode_clients(datanode_clients.clone(), &datanode_instances, datanodes).await;
- self.wait_datanodes_alive(meta_srv.meta_srv.meta_peer_client(), datanodes)
+ self.wait_datanodes_alive(metasrv.metasrv.meta_peer_client(), datanodes)
.await;
- let frontend = self
- .build_frontend(meta_srv.clone(), datanode_clients)
- .await;
+ let frontend = self.build_frontend(metasrv.clone(), datanode_clients).await;
test_util::prepare_another_catalog_and_schema(frontend.as_ref()).await;
@@ -211,7 +209,7 @@ impl GreptimeDbClusterBuilder {
dir_guards,
datanode_instances,
kv_backend: self.kv_backend.clone(),
- meta_srv: meta_srv.meta_srv,
+ metasrv: metasrv.metasrv,
frontend,
}
}
@@ -280,13 +278,13 @@ impl GreptimeDbClusterBuilder {
async fn build_datanodes_with_options(
&self,
- meta_srv: &MockInfo,
+ metasrv: &MockInfo,
options: &[DatanodeOptions],
) -> HashMap<DatanodeId, Datanode> {
let mut instances = HashMap::with_capacity(options.len());
for opts in options {
- let datanode = self.create_datanode(opts.clone(), meta_srv.clone()).await;
+ let datanode = self.create_datanode(opts.clone(), metasrv.clone()).await;
instances.insert(opts.node_id.unwrap(), datanode);
}
@@ -312,14 +310,14 @@ impl GreptimeDbClusterBuilder {
panic!("Some Datanodes are not alive in 10 seconds!")
}
- async fn create_datanode(&self, opts: DatanodeOptions, meta_srv: MockInfo) -> Datanode {
+ async fn create_datanode(&self, opts: DatanodeOptions, metasrv: MockInfo) -> Datanode {
let mut meta_client = MetaClientBuilder::new(1000, opts.node_id.unwrap(), Role::Datanode)
.enable_router()
.enable_store()
.enable_heartbeat()
- .channel_manager(meta_srv.channel_manager)
+ .channel_manager(metasrv.channel_manager)
.build();
- meta_client.start(&[&meta_srv.server_addr]).await.unwrap();
+ meta_client.start(&[&metasrv.server_addr]).await.unwrap();
let meta_backend = Arc::new(MetaKvBackend {
client: Arc::new(meta_client.clone()),
@@ -339,18 +337,18 @@ impl GreptimeDbClusterBuilder {
async fn build_frontend(
&self,
- meta_srv: MockInfo,
+ metasrv: MockInfo,
datanode_clients: Arc<DatanodeClients>,
) -> Arc<FeInstance> {
let mut meta_client = MetaClientBuilder::new(1000, 0, Role::Frontend)
.enable_router()
.enable_store()
.enable_heartbeat()
- .channel_manager(meta_srv.channel_manager)
+ .channel_manager(metasrv.channel_manager)
.enable_procedure()
.enable_access_cluster_info()
.build();
- meta_client.start(&[&meta_srv.server_addr]).await.unwrap();
+ meta_client.start(&[&metasrv.server_addr]).await.unwrap();
let meta_client = Arc::new(meta_client);
let cached_meta_backend =
diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs
index 6b4340db27ba..72b36dad5da1 100644
--- a/tests-integration/src/standalone.rs
+++ b/tests-integration/src/standalone.rs
@@ -30,7 +30,7 @@ use common_meta::wal_options_allocator::WalOptionsAllocator;
use common_procedure::options::ProcedureConfig;
use common_procedure::ProcedureManagerRef;
use common_telemetry::logging::LoggingOptions;
-use common_wal::config::{DatanodeWalConfig, MetaSrvWalConfig};
+use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
use datanode::datanode::DatanodeBuilder;
use frontend::frontend::FrontendOptions;
use frontend::instance::builder::FrontendBuilder;
@@ -51,7 +51,7 @@ pub struct GreptimeDbStandalone {
pub struct GreptimeDbStandaloneBuilder {
instance_name: String,
datanode_wal_config: DatanodeWalConfig,
- metasrv_wal_config: MetaSrvWalConfig,
+ metasrv_wal_config: MetasrvWalConfig,
store_providers: Option<Vec<StorageType>>,
default_store: Option<StorageType>,
plugin: Option<Plugins>,
@@ -65,7 +65,7 @@ impl GreptimeDbStandaloneBuilder {
plugin: None,
default_store: None,
datanode_wal_config: DatanodeWalConfig::default(),
- metasrv_wal_config: MetaSrvWalConfig::default(),
+ metasrv_wal_config: MetasrvWalConfig::default(),
}
}
@@ -102,7 +102,7 @@ impl GreptimeDbStandaloneBuilder {
}
#[must_use]
- pub fn with_metasrv_wal_config(mut self, metasrv_wal_config: MetaSrvWalConfig) -> Self {
+ pub fn with_metasrv_wal_config(mut self, metasrv_wal_config: MetasrvWalConfig) -> Self {
self.metasrv_wal_config = metasrv_wal_config;
self
}
diff --git a/tests-integration/src/tests.rs b/tests-integration/src/tests.rs
index 22c0d591de9a..65f91e7f5e23 100644
--- a/tests-integration/src/tests.rs
+++ b/tests-integration/src/tests.rs
@@ -38,7 +38,7 @@ impl MockDistributedInstance {
}
pub fn table_metadata_manager(&self) -> &TableMetadataManagerRef {
- self.0.meta_srv.table_metadata_manager()
+ self.0.metasrv.table_metadata_manager()
}
}
diff --git a/tests-integration/src/tests/test_util.rs b/tests-integration/src/tests/test_util.rs
index 43f3981fee0b..01ca50f29f50 100644
--- a/tests-integration/src/tests/test_util.rs
+++ b/tests-integration/src/tests/test_util.rs
@@ -20,8 +20,8 @@ use common_query::Output;
use common_recordbatch::util;
use common_telemetry::warn;
use common_test_util::find_workspace_path;
-use common_wal::config::kafka::{DatanodeKafkaConfig, MetaSrvKafkaConfig};
-use common_wal::config::{DatanodeWalConfig, MetaSrvWalConfig};
+use common_wal::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig};
+use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
use frontend::instance::Instance;
use rstest_reuse::{self, template};
@@ -227,7 +227,7 @@ pub(crate) async fn standalone_with_kafka_wal() -> Option<Box<dyn RebuildableMoc
broker_endpoints: endpoints.clone(),
..Default::default()
}))
- .with_metasrv_wal_config(MetaSrvWalConfig::Kafka(MetaSrvKafkaConfig {
+ .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
topic_name_prefix: test_name.to_string(),
num_topics: 3,
@@ -257,7 +257,7 @@ pub(crate) async fn distributed_with_kafka_wal() -> Option<Box<dyn RebuildableMo
broker_endpoints: endpoints.clone(),
..Default::default()
}))
- .with_metasrv_wal_config(MetaSrvWalConfig::Kafka(MetaSrvKafkaConfig {
+ .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
topic_name_prefix: test_name.to_string(),
num_topics: 3,
diff --git a/tests-integration/tests/region_failover.rs b/tests-integration/tests/region_failover.rs
index e0d82f658704..8ca53a6f463a 100644
--- a/tests-integration/tests/region_failover.rs
+++ b/tests-integration/tests/region_failover.rs
@@ -258,7 +258,7 @@ async fn find_region_distribution(
cluster: &GreptimeDbCluster,
table_id: TableId,
) -> RegionDistribution {
- let manager = cluster.meta_srv.table_metadata_manager();
+ let manager = cluster.metasrv.table_metadata_manager();
let region_distribution = manager
.table_route_manager()
.get_region_distribution(table_id)
@@ -343,27 +343,27 @@ async fn run_region_failover_procedure(
failed_region: RegionIdent,
selector: SelectorRef,
) {
- let meta_srv = &cluster.meta_srv;
- let procedure_manager = meta_srv.procedure_manager();
+ let metasrv = &cluster.metasrv;
+ let procedure_manager = metasrv.procedure_manager();
let procedure = RegionFailoverProcedure::new(
"greptime".into(),
"public".into(),
failed_region.clone(),
RegionFailoverContext {
region_lease_secs: 10,
- in_memory: meta_srv.in_memory().clone(),
- kv_backend: meta_srv.kv_backend().clone(),
- mailbox: meta_srv.mailbox().clone(),
+ in_memory: metasrv.in_memory().clone(),
+ kv_backend: metasrv.kv_backend().clone(),
+ mailbox: metasrv.mailbox().clone(),
selector,
selector_ctx: SelectorContext {
datanode_lease_secs: distributed_time_constants::REGION_LEASE_SECS,
- server_addr: meta_srv.options().server_addr.clone(),
- kv_backend: meta_srv.kv_backend().clone(),
- meta_peer_client: meta_srv.meta_peer_client().clone(),
+ server_addr: metasrv.options().server_addr.clone(),
+ kv_backend: metasrv.kv_backend().clone(),
+ meta_peer_client: metasrv.meta_peer_client().clone(),
table_id: None,
},
- dist_lock: meta_srv.lock().clone(),
- table_metadata_manager: meta_srv.table_metadata_manager().clone(),
+ dist_lock: metasrv.lock().clone(),
+ table_metadata_manager: metasrv.table_metadata_manager().clone(),
},
);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
diff --git a/tests-integration/tests/region_migration.rs b/tests-integration/tests/region_migration.rs
index 79f7a3d38a68..1ceb249978a2 100644
--- a/tests-integration/tests/region_migration.rs
+++ b/tests-integration/tests/region_migration.rs
@@ -23,8 +23,8 @@ use common_recordbatch::RecordBatches;
use common_telemetry::info;
use common_test_util::recordbatch::check_output_stream;
use common_test_util::temp_dir::create_temp_dir;
-use common_wal::config::kafka::{DatanodeKafkaConfig, MetaSrvKafkaConfig};
-use common_wal::config::{DatanodeWalConfig, MetaSrvWalConfig};
+use common_wal::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig};
+use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
use datatypes::prelude::ScalarVector;
use datatypes::value::Value;
use datatypes::vectors::{Helper, UInt64Vector};
@@ -116,7 +116,7 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
linger: Duration::from_millis(25),
..Default::default()
}))
- .with_metasrv_wal_config(MetaSrvWalConfig::Kafka(MetaSrvKafkaConfig {
+ .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
num_topics: 3,
topic_name_prefix: Uuid::new_v4().to_string(),
@@ -127,7 +127,7 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
.build()
.await;
let mut logical_timer = 1685508715000;
- let table_metadata_manager = cluster.meta_srv.table_metadata_manager().clone();
+ let table_metadata_manager = cluster.metasrv.table_metadata_manager().clone();
// Prepares test table.
let table_id = prepare_testing_table(&cluster).await;
@@ -143,7 +143,7 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
let mut distribution = find_region_distribution(&table_metadata_manager, table_id).await;
// Selecting target of region migration.
- let region_migration_manager = cluster.meta_srv.region_migration_manager();
+ let region_migration_manager = cluster.metasrv.region_migration_manager();
let (from_peer_id, from_regions) = distribution.pop_first().unwrap();
info!(
"Selecting from peer: {from_peer_id}, and regions: {:?}",
@@ -243,7 +243,7 @@ pub async fn test_region_migration_by_sql(store_type: StorageType, endpoints: Ve
linger: Duration::from_millis(25),
..Default::default()
}))
- .with_metasrv_wal_config(MetaSrvWalConfig::Kafka(MetaSrvKafkaConfig {
+ .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
num_topics: 3,
topic_name_prefix: Uuid::new_v4().to_string(),
@@ -271,7 +271,7 @@ pub async fn test_region_migration_by_sql(store_type: StorageType, endpoints: Ve
let old_distribution = distribution.clone();
// Selecting target of region migration.
- let region_migration_manager = cluster.meta_srv.region_migration_manager();
+ let region_migration_manager = cluster.metasrv.region_migration_manager();
let (from_peer_id, from_regions) = distribution.pop_first().unwrap();
info!(
"Selecting from peer: {from_peer_id}, and regions: {:?}",
@@ -365,7 +365,7 @@ pub async fn test_region_migration_multiple_regions(
linger: Duration::from_millis(25),
..Default::default()
}))
- .with_metasrv_wal_config(MetaSrvWalConfig::Kafka(MetaSrvKafkaConfig {
+ .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
num_topics: 3,
topic_name_prefix: Uuid::new_v4().to_string(),
@@ -376,7 +376,7 @@ pub async fn test_region_migration_multiple_regions(
.build()
.await;
let mut logical_timer = 1685508715000;
- let table_metadata_manager = cluster.meta_srv.table_metadata_manager().clone();
+ let table_metadata_manager = cluster.metasrv.table_metadata_manager().clone();
// Prepares test table.
let table_id = prepare_testing_table(&cluster).await;
@@ -393,7 +393,7 @@ pub async fn test_region_migration_multiple_regions(
assert_eq!(distribution.len(), 2);
// Selecting target of region migration.
- let region_migration_manager = cluster.meta_srv.region_migration_manager();
+ let region_migration_manager = cluster.metasrv.region_migration_manager();
let (peer_1, peer_1_regions) = distribution.pop_first().unwrap();
let (peer_2, peer_2_regions) = distribution.pop_first().unwrap();
@@ -502,7 +502,7 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
linger: Duration::from_millis(25),
..Default::default()
}))
- .with_metasrv_wal_config(MetaSrvWalConfig::Kafka(MetaSrvKafkaConfig {
+ .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
num_topics: 3,
topic_name_prefix: Uuid::new_v4().to_string(),
@@ -513,7 +513,7 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
.build()
.await;
let mut logical_timer = 1685508715000;
- let table_metadata_manager = cluster.meta_srv.table_metadata_manager().clone();
+ let table_metadata_manager = cluster.metasrv.table_metadata_manager().clone();
// Prepares test table.
let table_id = prepare_testing_table(&cluster).await;
@@ -530,7 +530,7 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
assert_eq!(distribution.len(), 1);
// Selecting target of region migration.
- let region_migration_manager = cluster.meta_srv.region_migration_manager();
+ let region_migration_manager = cluster.metasrv.region_migration_manager();
let (from_peer_id, mut from_regions) = distribution.pop_first().unwrap();
let to_peer_id = 1;
let mut to_regions = Vec::new();
@@ -634,7 +634,7 @@ pub async fn test_region_migration_incorrect_from_peer(
linger: Duration::from_millis(25),
..Default::default()
}))
- .with_metasrv_wal_config(MetaSrvWalConfig::Kafka(MetaSrvKafkaConfig {
+ .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
num_topics: 3,
topic_name_prefix: Uuid::new_v4().to_string(),
@@ -645,7 +645,7 @@ pub async fn test_region_migration_incorrect_from_peer(
.build()
.await;
let logical_timer = 1685508715000;
- let table_metadata_manager = cluster.meta_srv.table_metadata_manager().clone();
+ let table_metadata_manager = cluster.metasrv.table_metadata_manager().clone();
// Prepares test table.
let table_id = prepare_testing_table(&cluster).await;
@@ -659,7 +659,7 @@ pub async fn test_region_migration_incorrect_from_peer(
// The region distribution
let distribution = find_region_distribution(&table_metadata_manager, table_id).await;
assert_eq!(distribution.len(), 3);
- let region_migration_manager = cluster.meta_srv.region_migration_manager();
+ let region_migration_manager = cluster.metasrv.region_migration_manager();
let region_id = RegionId::new(table_id, 1);
@@ -709,7 +709,7 @@ pub async fn test_region_migration_incorrect_region_id(
linger: Duration::from_millis(25),
..Default::default()
}))
- .with_metasrv_wal_config(MetaSrvWalConfig::Kafka(MetaSrvKafkaConfig {
+ .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
num_topics: 3,
topic_name_prefix: Uuid::new_v4().to_string(),
@@ -720,7 +720,7 @@ pub async fn test_region_migration_incorrect_region_id(
.build()
.await;
let logical_timer = 1685508715000;
- let table_metadata_manager = cluster.meta_srv.table_metadata_manager().clone();
+ let table_metadata_manager = cluster.metasrv.table_metadata_manager().clone();
// Prepares test table.
let table_id = prepare_testing_table(&cluster).await;
@@ -734,7 +734,7 @@ pub async fn test_region_migration_incorrect_region_id(
// The region distribution
let distribution = find_region_distribution(&table_metadata_manager, table_id).await;
assert_eq!(distribution.len(), 3);
- let region_migration_manager = cluster.meta_srv.region_migration_manager();
+ let region_migration_manager = cluster.metasrv.region_migration_manager();
let region_id = RegionId::new(table_id, 5);
|
chore
|
unify name metasrv (#3671)
|
9c1f0234de54d976ddaf5de426218293c6f687b8
|
2023-07-25 11:41:34
|
shuiyisong
|
refactor: query context (#2022)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 9adb2e139bbb..51dd95a12273 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8874,6 +8874,7 @@ dependencies = [
"common-catalog",
"common-telemetry",
"common-time",
+ "derive_builder 0.12.0",
"sql",
]
@@ -9436,7 +9437,7 @@ dependencies = [
"common-recordbatch",
"common-time",
"datatypes",
- "derive_builder 0.11.2",
+ "derive_builder 0.12.0",
"futures",
"serde",
"serde_json",
@@ -9714,7 +9715,7 @@ dependencies = [
"datafusion-expr",
"datafusion-physical-expr",
"datatypes",
- "derive_builder 0.11.2",
+ "derive_builder 0.12.0",
"futures",
"humantime",
"humantime-serde",
diff --git a/Cargo.toml b/Cargo.toml
index dab2da2303fd..6fa7e5ce5068 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -71,6 +71,7 @@ datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git
datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
+derive_builder = "0.12"
etcd-client = "0.11"
futures = "0.3"
futures-util = "0.3"
diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs
index 470cbc8181bc..ce8e0fcc3aa8 100644
--- a/src/cmd/src/cli/repl.rs
+++ b/src/cmd/src/cli/repl.rs
@@ -165,10 +165,7 @@ impl Repl {
let stmt = QueryLanguageParser::parse_sql(&sql)
.with_context(|_| ParseSqlSnafu { sql: sql.clone() })?;
- let query_ctx = Arc::new(QueryContext::with(
- self.database.catalog(),
- self.database.schema(),
- ));
+ let query_ctx = QueryContext::with(self.database.catalog(), self.database.schema());
let plan = query_engine
.planner()
diff --git a/src/common/datasource/Cargo.toml b/src/common/datasource/Cargo.toml
index f99f4a624c8d..d96670a46f46 100644
--- a/src/common/datasource/Cargo.toml
+++ b/src/common/datasource/Cargo.toml
@@ -21,7 +21,7 @@ common-base = { path = "../base" }
common-error = { path = "../error" }
common-runtime = { path = "../runtime" }
datafusion.workspace = true
-derive_builder = "0.12"
+derive_builder.workspace = true
futures.workspace = true
object-store = { path = "../../object-store" }
orc-rust = "0.2"
diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs
index 809bf2462ba6..1118bae3436e 100644
--- a/src/datanode/src/instance/sql.rs
+++ b/src/datanode/src/instance/sql.rs
@@ -287,7 +287,6 @@ impl SqlStatementExecutor for Instance {
#[cfg(test)]
mod test {
- use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use session::context::QueryContext;
@@ -305,8 +304,8 @@ mod test {
let bare = ObjectName(vec![my_table.into()]);
let using_schema = "foo";
- let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, using_schema));
- let empty_ctx = Arc::new(QueryContext::new());
+ let query_ctx = QueryContext::with(DEFAULT_CATALOG_NAME, using_schema);
+ let empty_ctx = QueryContext::arc();
assert_eq!(
table_idents_to_full_name(&full, query_ctx.clone()).unwrap(),
diff --git a/src/frontend/src/expr_factory.rs b/src/frontend/src/expr_factory.rs
index af12ab745330..cf528578b588 100644
--- a/src/frontend/src/expr_factory.rs
+++ b/src/frontend/src/expr_factory.rs
@@ -342,7 +342,7 @@ mod tests {
.unwrap();
let Statement::CreateTable(create_table) = stmt else { unreachable!() };
- let expr = create_to_expr(&create_table, Arc::new(QueryContext::default())).unwrap();
+ let expr = create_to_expr(&create_table, QueryContext::arc()).unwrap();
assert_eq!("3days", expr.table_options.get("ttl").unwrap());
assert_eq!(
"1.0MiB",
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 4bd51c51a97b..8dc34ae89f91 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -623,7 +623,7 @@ pub fn check_permission(
// These are executed by query engine, and will be checked there.
Statement::Query(_) | Statement::Explain(_) | Statement::Tql(_) | Statement::Delete(_) => {}
// database ops won't be checked
- Statement::CreateDatabase(_) | Statement::ShowDatabases(_) | Statement::Use(_) => {}
+ Statement::CreateDatabase(_) | Statement::ShowDatabases(_) => {}
// show create table and alter are not supported yet
Statement::ShowCreateTable(_) | Statement::CreateExternalTable(_) | Statement::Alter(_) => {
}
@@ -777,7 +777,7 @@ mod tests {
#[test]
fn test_exec_validation() {
- let query_ctx = Arc::new(QueryContext::new());
+ let query_ctx = QueryContext::arc();
let plugins = Plugins::new();
plugins.insert(QueryOptions {
disallow_cross_schema_query: true,
@@ -808,11 +808,6 @@ mod tests {
re.unwrap();
}
- let sql = "USE randomschema";
- let stmts = parse_stmt(sql, &GreptimeDbDialect {}).unwrap();
- let re = check_permission(plugins.clone(), &stmts[0], &query_ctx);
- re.unwrap();
-
fn replace_test(template_sql: &str, plugins: Arc<Plugins>, query_ctx: &QueryContextRef) {
// test right
let right = vec![("", ""), ("", "public."), ("greptime.", "public.")];
diff --git a/src/frontend/src/statement.rs b/src/frontend/src/statement.rs
index fea6fdf3a792..56817a501aae 100644
--- a/src/frontend/src/statement.rs
+++ b/src/frontend/src/statement.rs
@@ -25,7 +25,6 @@ use std::str::FromStr;
use catalog::CatalogManagerRef;
use common_error::ext::BoxedError;
use common_query::Output;
-use common_recordbatch::RecordBatches;
use common_time::range::TimestampRange;
use common_time::Timestamp;
use datanode::instance::sql::{idents_to_full_database_name, table_idents_to_full_name};
@@ -33,7 +32,7 @@ use query::parser::QueryStatement;
use query::query_engine::SqlStatementExecutorRef;
use query::QueryEngineRef;
use session::context::QueryContextRef;
-use snafu::{ensure, OptionExt, ResultExt};
+use snafu::{OptionExt, ResultExt};
use sql::statements::copy::{CopyDatabaseArgument, CopyTable, CopyTableArgument};
use sql::statements::statement::Statement;
use table::engine::TableReference;
@@ -43,7 +42,7 @@ use table::TableRef;
use crate::error;
use crate::error::{
CatalogSnafu, ExecLogicalPlanSnafu, ExecuteStatementSnafu, ExternalSnafu, PlanStatementSnafu,
- Result, SchemaNotFoundSnafu, TableNotFoundSnafu,
+ Result, TableNotFoundSnafu,
};
use crate::statement::backup::{COPY_DATABASE_TIME_END_KEY, COPY_DATABASE_TIME_START_KEY};
@@ -102,8 +101,6 @@ impl StatementExecutor {
Statement::DescribeTable(stmt) => self.describe_table(stmt, query_ctx).await,
- Statement::Use(db) => self.handle_use(db, query_ctx).await,
-
Statement::ShowDatabases(stmt) => self.show_databases(stmt, query_ctx).await,
Statement::ShowTables(stmt) => self.show_tables(stmt, query_ctx).await,
@@ -151,21 +148,6 @@ impl StatementExecutor {
.context(ExecLogicalPlanSnafu)
}
- async fn handle_use(&self, db: String, query_ctx: QueryContextRef) -> Result<Output> {
- let catalog = &query_ctx.current_catalog();
- ensure!(
- self.catalog_manager
- .schema_exist(catalog, &db)
- .await
- .context(CatalogSnafu)?,
- SchemaNotFoundSnafu { schema_info: &db }
- );
-
- query_ctx.set_current_schema(&db);
-
- Ok(Output::RecordBatches(RecordBatches::empty()))
- }
-
async fn get_table(&self, table_ref: &TableReference<'_>) -> Result<TableRef> {
let TableReference {
catalog,
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index 883840e6b761..5e39e941405f 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -26,7 +26,7 @@ common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
dashmap = "5.4"
datatypes = { path = "../datatypes" }
-derive_builder = "0.12"
+derive_builder.workspace = true
etcd-client.workspace = true
futures.workspace = true
h2 = "0.3"
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index f79bf95debdf..28318b66dfc3 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -1375,7 +1375,7 @@ mod test {
})
.await
.is_ok());
- DfTableSourceProvider::new(catalog_list, false, &QueryContext::new())
+ DfTableSourceProvider::new(catalog_list, false, QueryContext::arc().as_ref())
}
// {
diff --git a/src/query/Cargo.toml b/src/query/Cargo.toml
index 3a4b3eb623a9..b4dc6b85ffd8 100644
--- a/src/query/Cargo.toml
+++ b/src/query/Cargo.toml
@@ -59,6 +59,7 @@ num = "0.4"
num-traits = "0.2"
paste = "1.0"
rand.workspace = true
+session = { path = "../session", features = ["testing"] }
statrs = "0.16"
stats-cli = "3.0"
store-api = { path = "../store-api" }
diff --git a/src/query/src/query_engine/options.rs b/src/query/src/query_engine/options.rs
index 8cb411ca2569..6b1f3d9b7bf4 100644
--- a/src/query/src/query_engine/options.rs
+++ b/src/query/src/query_engine/options.rs
@@ -47,7 +47,6 @@ pub fn validate_catalog_and_schema(
#[cfg(test)]
mod tests {
- use std::sync::Arc;
use session::context::QueryContext;
@@ -55,7 +54,7 @@ mod tests {
#[test]
fn test_validate_catalog_and_schema() {
- let context = Arc::new(QueryContext::with("greptime", "public"));
+ let context = QueryContext::with("greptime", "public");
validate_catalog_and_schema("greptime", "public", &context).unwrap();
let re = validate_catalog_and_schema("greptime", "wrong_schema", &context);
diff --git a/src/script/src/python/ffi_types/copr.rs b/src/script/src/python/ffi_types/copr.rs
index 8f2629f15232..8df5e8994d5d 100644
--- a/src/script/src/python/ffi_types/copr.rs
+++ b/src/script/src/python/ffi_types/copr.rs
@@ -34,7 +34,7 @@ use rustpython_compiler_core::CodeObject;
use rustpython_vm as vm;
#[cfg(test)]
use serde::Deserialize;
-use session::context::QueryContext;
+use session::context::{QueryContext, QueryContextBuilder};
use snafu::{OptionExt, ResultExt};
use vm::convert::ToPyObject;
use vm::{pyclass as rspyclass, PyObjectRef, PyPayload, PyResult, VirtualMachine};
@@ -381,7 +381,7 @@ impl PyQueryEngine {
let res = handle.block_on(async {
let plan = engine
.planner()
- .plan(stmt, Default::default())
+ .plan(stmt, QueryContextBuilder::default().build())
.await
.map_err(|e| e.to_string())?;
let res = engine
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index a4f41eba5f39..857423660765 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -37,7 +37,7 @@ datafusion-common.workspace = true
datafusion-expr.workspace = true
datatypes = { path = "../datatypes" }
-derive_builder = "0.12"
+derive_builder.workspace = true
digest = "0.10"
futures = "0.3"
headers = "0.3"
@@ -103,6 +103,7 @@ rand.workspace = true
rustls = { version = "0.21", features = ["dangerous_configuration"] }
script = { path = "../script", features = ["python"] }
serde_json = "1.0"
+session = { path = "../session", features = ["testing"] }
table = { path = "../table" }
tokio-postgres = "0.7"
tokio-postgres-rustls = "0.10"
diff --git a/src/servers/src/grpc/handler.rs b/src/servers/src/grpc/handler.rs
index c4baa911c7f2..097294bcd541 100644
--- a/src/servers/src/grpc/handler.rs
+++ b/src/servers/src/grpc/handler.rs
@@ -168,12 +168,11 @@ pub(crate) fn create_query_context(header: Option<&RequestHeader>) -> QueryConte
})
.unwrap_or((DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME));
- QueryContextBuilder::new()
- .catalog(catalog.to_string())
- .schema(schema.to_string())
+ QueryContextBuilder::default()
+ .current_catalog(catalog.to_string())
+ .current_schema(schema.to_string())
.try_trace_id(header.and_then(|h: &RequestHeader| h.trace_id))
.build()
- .to_arc()
}
/// Histogram timer for handling gRPC request.
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index f84fcc4ee868..ed77848d13ac 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -90,7 +90,7 @@ pub(crate) async fn query_context_from_db(
let (catalog, schema) = super::parse_catalog_and_schema_from_client_database_name(db);
match query_handler.is_valid_schema(catalog, schema).await {
- Ok(true) => Ok(Arc::new(QueryContext::with(catalog, schema))),
+ Ok(true) => Ok(QueryContext::with(catalog, schema)),
Ok(false) => Err(JsonResponse::with_error(
format!("Database not found: {db}"),
StatusCode::DatabaseNotFound,
diff --git a/src/servers/src/http/influxdb.rs b/src/servers/src/http/influxdb.rs
index 51fcf1b9b68a..df5131188ab5 100644
--- a/src/servers/src/http/influxdb.rs
+++ b/src/servers/src/http/influxdb.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::collections::HashMap;
-use std::sync::Arc;
use axum::extract::{Query, State};
use axum::http::StatusCode;
@@ -88,7 +87,7 @@ pub async fn influxdb_write(
);
let (catalog, schema) = parse_catalog_and_schema_from_client_database_name(db);
- let ctx = Arc::new(QueryContext::with(catalog, schema));
+ let ctx = QueryContext::with(catalog, schema);
let request = InfluxdbRequest { precision, lines };
diff --git a/src/servers/src/http/opentsdb.rs b/src/servers/src/http/opentsdb.rs
index 566253dfb9ff..c4f81030645e 100644
--- a/src/servers/src/http/opentsdb.rs
+++ b/src/servers/src/http/opentsdb.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::collections::HashMap;
-use std::sync::Arc;
use axum::extract::{Query, RawBody, State};
use axum::http::StatusCode as HttpStatusCode;
@@ -91,7 +90,7 @@ pub async fn put(
.unwrap_or(DEFAULT_SCHEMA_NAME);
let (catalog, schema) = parse_catalog_and_schema_from_client_database_name(db);
- let ctx = Arc::new(QueryContext::with(catalog, schema));
+ let ctx = QueryContext::with(catalog, schema);
let data_points = parse_data_points(body).await?;
diff --git a/src/servers/src/http/otlp.rs b/src/servers/src/http/otlp.rs
index 5ddc34806c55..05d29951a2c0 100644
--- a/src/servers/src/http/otlp.rs
+++ b/src/servers/src/http/otlp.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
-
use axum::extract::{RawBody, State};
use axum::http::header;
use axum::response::IntoResponse;
@@ -40,7 +38,7 @@ pub async fn metrics(
) -> Result<OtlpResponse> {
let ctx = if let Some(db) = db.value() {
let (catalog, schema) = parse_catalog_and_schema_from_client_database_name(db);
- Arc::new(QueryContext::with(catalog, schema))
+ QueryContext::with(catalog, schema)
} else {
QueryContext::arc()
};
diff --git a/src/servers/src/http/prom_store.rs b/src/servers/src/http/prom_store.rs
index c050124b1093..22ab0c5f5da2 100644
--- a/src/servers/src/http/prom_store.rs
+++ b/src/servers/src/http/prom_store.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
-
use api::prom_store::remote::{ReadRequest, WriteRequest};
use axum::extract::{Query, RawBody, State};
use axum::http::{header, StatusCode};
@@ -62,7 +60,7 @@ pub async fn remote_write(
);
let ctx = if let Some(db) = params.db {
let (catalog, schema) = parse_catalog_and_schema_from_client_database_name(&db);
- Arc::new(QueryContext::with(catalog, schema))
+ QueryContext::with(catalog, schema)
} else {
QueryContext::arc()
};
@@ -102,7 +100,7 @@ pub async fn remote_read(
);
let ctx = if let Some(db) = params.db {
let (catalog, schema) = parse_catalog_and_schema_from_client_database_name(&db);
- Arc::new(QueryContext::with(catalog, schema))
+ QueryContext::with(catalog, schema)
} else {
QueryContext::arc()
};
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index f6c0909e119e..2ed84d54d842 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -333,15 +333,15 @@ mod test {
#[test]
fn test_check() {
let query = "select 1";
- let result = check(query, Arc::new(QueryContext::new()));
+ let result = check(query, QueryContext::arc());
assert!(result.is_none());
let query = "select versiona";
- let output = check(query, Arc::new(QueryContext::new()));
+ let output = check(query, QueryContext::arc());
assert!(output.is_none());
fn test(query: &str, expected: &str) {
- let output = check(query, Arc::new(QueryContext::new()));
+ let output = check(query, QueryContext::arc());
match output.unwrap() {
Output::RecordBatches(r) => {
assert_eq!(&r.pretty_print().unwrap(), expected)
@@ -428,7 +428,7 @@ mod test {
#[test]
fn test_set_time_zone() {
- let query_context = Arc::new(QueryContext::new());
+ let query_context = QueryContext::arc();
let output = check("set time_zone = 'UTC'", query_context.clone());
match output.unwrap() {
Output::AffectedRows(rows) => {
diff --git a/src/servers/src/opentsdb/handler.rs b/src/servers/src/opentsdb/handler.rs
index bc28f18be035..fd41297e487f 100644
--- a/src/servers/src/opentsdb/handler.rs
+++ b/src/servers/src/opentsdb/handler.rs
@@ -15,7 +15,7 @@
//! Modified from Tokio's mini-redis example.
use common_telemetry::timer;
-use session::context::QueryContext;
+use session::context::QueryContextBuilder;
use tokio::io::{AsyncRead, AsyncWrite};
use crate::error::Result;
@@ -62,7 +62,7 @@ impl<S: AsyncWrite + AsyncRead + Unpin> Handler<S> {
pub(crate) async fn run(&mut self) -> Result<()> {
// TODO(shuiyisong): figure out how to auth in tcp connection.
- let ctx = QueryContext::arc();
+ let ctx = QueryContextBuilder::default().build();
while !self.shutdown.is_shutdown() {
// While reading a request, also listen for the shutdown signal.
let maybe_line = tokio::select! {
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index 9c65a243cad2..94121eb6ee82 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -446,7 +446,7 @@ pub async fn instant_query(
let query_ctx = QueryContext::with(catalog, schema);
- let result = handler.do_query(&prom_query, Arc::new(query_ctx)).await;
+ let result = handler.do_query(&prom_query, query_ctx).await;
let (metric_name, result_type) = match retrieve_metric_name_and_result_type(&prom_query.query) {
Ok((metric_name, result_type)) => (metric_name.unwrap_or_default(), result_type),
Err(err) => {
@@ -485,7 +485,7 @@ pub async fn range_query(
let query_ctx = QueryContext::with(catalog, schema);
- let result = handler.do_query(&prom_query, Arc::new(query_ctx)).await;
+ let result = handler.do_query(&prom_query, query_ctx).await;
let metric_name = match retrieve_metric_name_and_result_type(&prom_query.query) {
Err(err) => {
return PrometheusJsonResponse::error(err.status_code().to_string(), err.to_string())
@@ -565,7 +565,7 @@ pub async fn labels_query(
let db = ¶ms.db.unwrap_or(DEFAULT_SCHEMA_NAME.to_string());
let (catalog, schema) = crate::parse_catalog_and_schema_from_client_database_name(db);
- let query_ctx = Arc::new(QueryContext::with(catalog, schema));
+ let query_ctx = QueryContext::with(catalog, schema);
let mut labels = HashSet::new();
let _ = labels.insert(METRIC_NAME.to_string());
@@ -792,7 +792,7 @@ pub async fn label_values_query(
let end = params.end.unwrap_or_else(current_time_rfc3339);
let db = ¶ms.db.unwrap_or(DEFAULT_SCHEMA_NAME.to_string());
let (catalog, schema) = crate::parse_catalog_and_schema_from_client_database_name(db);
- let query_ctx = Arc::new(QueryContext::with(catalog, schema));
+ let query_ctx = QueryContext::with(catalog, schema);
let mut label_values = HashSet::new();
@@ -914,7 +914,7 @@ pub async fn series_query(
let db = ¶ms.db.unwrap_or(DEFAULT_SCHEMA_NAME.to_string());
let (catalog, schema) = super::parse_catalog_and_schema_from_client_database_name(db);
- let query_ctx = Arc::new(QueryContext::with(catalog, schema));
+ let query_ctx = QueryContext::with(catalog, schema);
let mut series = Vec::new();
for query in queries {
diff --git a/src/servers/tests/interceptor.rs b/src/servers/tests/interceptor.rs
index ab721e03eb77..92fcace38f10 100644
--- a/src/servers/tests/interceptor.rs
+++ b/src/servers/tests/interceptor.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::borrow::Cow;
-use std::sync::Arc;
use api::v1::greptime_request::Request;
use api::v1::{InsertRequest, InsertRequests};
@@ -38,7 +37,7 @@ impl SqlQueryInterceptor for NoopInterceptor {
#[test]
fn test_default_interceptor_behaviour() {
let di = NoopInterceptor;
- let ctx = Arc::new(QueryContext::new());
+ let ctx = QueryContext::arc();
let query = "SELECT 1";
assert_eq!("SELECT 1;", di.pre_parsing(query, ctx).unwrap());
@@ -72,7 +71,7 @@ impl GrpcQueryInterceptor for NoopInterceptor {
#[test]
fn test_grpc_interceptor() {
let di = NoopInterceptor;
- let ctx = Arc::new(QueryContext::new());
+ let ctx = QueryContext::arc();
let req = Request::Inserts(InsertRequests {
inserts: vec![InsertRequest {
@@ -117,7 +116,7 @@ impl PromQueryInterceptor for NoopInterceptor {
#[test]
fn test_prom_interceptor() {
let di = NoopInterceptor;
- let ctx = Arc::new(QueryContext::new());
+ let ctx = QueryContext::arc();
let query = PromQuery {
query: "up".to_string(),
diff --git a/src/servers/tests/py_script/mod.rs b/src/servers/tests/py_script/mod.rs
index fcd3317b0f4b..49dcdd7aa404 100644
--- a/src/servers/tests/py_script/mod.rs
+++ b/src/servers/tests/py_script/mod.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
-
use servers::error::Result;
use servers::query_handler::sql::SqlQueryHandler;
use servers::query_handler::ScriptHandler;
@@ -24,7 +22,7 @@ use crate::create_testing_instance;
#[tokio::test]
async fn test_insert_py_udf_and_query() -> Result<()> {
- let query_ctx = Arc::new(QueryContext::new());
+ let query_ctx = QueryContext::arc();
let table = MemTable::default_numbers_table();
let instance = create_testing_instance(table);
diff --git a/src/session/Cargo.toml b/src/session/Cargo.toml
index bdfc4bc7adc6..681fe4498444 100644
--- a/src/session/Cargo.toml
+++ b/src/session/Cargo.toml
@@ -4,9 +4,13 @@ version.workspace = true
edition.workspace = true
license.workspace = true
+[features]
+testing = []
+
[dependencies]
arc-swap = "1.5"
common-catalog = { path = "../common/catalog" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
+derive_builder.workspace = true
sql = { path = "../sql" }
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index 909db64d4b37..3159774309f9 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -19,28 +19,24 @@ use std::sync::Arc;
use arc_swap::ArcSwap;
use common_catalog::build_db_string;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
-use common_telemetry::debug;
use common_time::TimeZone;
+use derive_builder::Builder;
use sql::dialect::{Dialect, GreptimeDbDialect, MySqlDialect, PostgreSqlDialect};
pub type QueryContextRef = Arc<QueryContext>;
pub type ConnInfoRef = Arc<ConnInfo>;
-#[derive(Debug)]
+#[derive(Debug, Builder)]
+#[builder(pattern = "owned")]
+#[builder(build_fn(skip))]
pub struct QueryContext {
- current_catalog: ArcSwap<String>,
- current_schema: ArcSwap<String>,
+ current_catalog: String,
+ current_schema: String,
time_zone: ArcSwap<Option<TimeZone>>,
sql_dialect: Box<dyn Dialect + Send + Sync>,
trace_id: u64,
}
-impl Default for QueryContext {
- fn default() -> Self {
- Self::new()
- }
-}
-
impl Display for QueryContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
@@ -53,39 +49,26 @@ impl Display for QueryContext {
}
impl QueryContext {
+ #[cfg(any(test, feature = "testing"))]
pub fn arc() -> QueryContextRef {
- Arc::new(QueryContext::new())
- }
-
- pub fn to_arc(self) -> QueryContextRef {
- Arc::new(self)
- }
-
- pub fn new() -> Self {
- Self {
- current_catalog: ArcSwap::new(Arc::new(DEFAULT_CATALOG_NAME.to_string())),
- current_schema: ArcSwap::new(Arc::new(DEFAULT_SCHEMA_NAME.to_string())),
- time_zone: ArcSwap::new(Arc::new(None)),
- sql_dialect: Box::new(GreptimeDbDialect {}),
- trace_id: common_telemetry::gen_trace_id(),
- }
+ QueryContextBuilder::default().build()
}
- pub fn with(catalog: &str, schema: &str) -> Self {
- QueryContextBuilder::new()
- .catalog(catalog.to_string())
- .schema(schema.to_string())
+ pub fn with(catalog: &str, schema: &str) -> QueryContextRef {
+ QueryContextBuilder::default()
+ .current_catalog(catalog.to_string())
+ .current_schema(schema.to_string())
.build()
}
#[inline]
pub fn current_schema(&self) -> String {
- self.current_schema.load().as_ref().clone()
+ self.current_schema.clone()
}
#[inline]
pub fn current_catalog(&self) -> String {
- self.current_catalog.load().as_ref().clone()
+ self.current_catalog.clone()
}
#[inline]
@@ -93,26 +76,6 @@ impl QueryContext {
&*self.sql_dialect
}
- pub fn set_current_schema(&self, schema: &str) {
- let last = self.current_schema.swap(Arc::new(schema.to_string()));
- if schema != last.as_str() {
- debug!(
- "set new session default schema: {:?}, swap old: {:?}",
- schema, last
- )
- }
- }
-
- pub fn set_current_catalog(&self, catalog: &str) {
- let last = self.current_catalog.swap(Arc::new(catalog.to_string()));
- if catalog != last.as_str() {
- debug!(
- "set new session default catalog: {:?}, swap old: {:?}",
- catalog, last
- )
- }
- }
-
pub fn get_db_string(&self) -> String {
let catalog = self.current_catalog();
let schema = self.current_schema();
@@ -135,63 +98,29 @@ impl QueryContext {
}
}
-#[derive(Default)]
-pub struct QueryContextBuilder {
- catalog: Option<String>,
- schema: Option<String>,
- time_zone: Option<TimeZone>,
- sql_dialect: Option<Box<dyn Dialect + Send + Sync>>,
- trace_id: Option<u64>,
-}
-
impl QueryContextBuilder {
- pub fn new() -> Self {
- Self::default()
- }
-
- pub fn catalog(mut self, catalog: String) -> Self {
- self.catalog = Some(catalog);
- self
- }
-
- pub fn schema(mut self, schema: String) -> Self {
- self.schema = Some(schema);
- self
- }
-
- pub fn time_zone(mut self, tz: TimeZone) -> Self {
- self.time_zone = Some(tz);
- self
- }
-
- pub fn sql_dialect(mut self, sql_dialect: Box<dyn Dialect + Send + Sync>) -> Self {
- self.sql_dialect = Some(sql_dialect);
- self
- }
-
- pub fn trace_id(mut self, trace_id: u64) -> Self {
- self.trace_id = Some(trace_id);
- self
+ pub fn build(self) -> QueryContextRef {
+ Arc::new(QueryContext {
+ current_catalog: self
+ .current_catalog
+ .unwrap_or_else(|| DEFAULT_CATALOG_NAME.to_string()),
+ current_schema: self
+ .current_schema
+ .unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string()),
+ time_zone: self
+ .time_zone
+ .unwrap_or_else(|| ArcSwap::new(Arc::new(None))),
+ sql_dialect: self
+ .sql_dialect
+ .unwrap_or_else(|| Box::new(GreptimeDbDialect {})),
+ trace_id: self.trace_id.unwrap_or_else(common_telemetry::gen_trace_id),
+ })
}
pub fn try_trace_id(mut self, trace_id: Option<u64>) -> Self {
self.trace_id = trace_id;
self
}
-
- pub fn build(self) -> QueryContext {
- QueryContext {
- current_catalog: ArcSwap::new(Arc::new(
- self.catalog.unwrap_or(DEFAULT_CATALOG_NAME.to_string()),
- )),
- current_schema: ArcSwap::new(Arc::new(
- self.schema.unwrap_or(DEFAULT_SCHEMA_NAME.to_string()),
- )),
- time_zone: ArcSwap::new(Arc::new(self.time_zone)),
- sql_dialect: self.sql_dialect.unwrap_or(Box::new(GreptimeDbDialect {})),
- trace_id: self.trace_id.unwrap_or(common_telemetry::gen_trace_id()),
- }
- }
}
pub const DEFAULT_USERNAME: &str = "greptime";
@@ -227,8 +156,8 @@ pub struct ConnInfo {
pub channel: Channel,
}
-impl std::fmt::Display for ConnInfo {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+impl Display for ConnInfo {
+ fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"{}[{}]",
@@ -265,8 +194,8 @@ impl Channel {
}
}
-impl std::fmt::Display for Channel {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+impl Display for Channel {
+ fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match self {
Channel::Mysql => write!(f, "mysql"),
Channel::Postgres => write!(f, "postgres"),
@@ -276,6 +205,8 @@ impl std::fmt::Display for Channel {
#[cfg(test)]
mod test {
+ use common_catalog::consts::DEFAULT_CATALOG_NAME;
+
use super::*;
use crate::context::{Channel, UserInfo};
use crate::Session;
@@ -299,16 +230,10 @@ mod test {
#[test]
fn test_context_db_string() {
- let context = QueryContext::new();
-
- context.set_current_catalog("a0b1c2d3");
- context.set_current_schema("test");
-
+ let context = QueryContext::with("a0b1c2d3", "test");
assert_eq!("a0b1c2d3-test", context.get_db_string());
- context.set_current_catalog(DEFAULT_CATALOG_NAME);
- context.set_current_schema("test");
-
+ let context = QueryContext::with(DEFAULT_CATALOG_NAME, "test");
assert_eq!("test", context.get_db_string());
}
}
diff --git a/src/session/src/lib.rs b/src/session/src/lib.rs
index 11591980bdbc..2fa8b09f5075 100644
--- a/src/session/src/lib.rs
+++ b/src/session/src/lib.rs
@@ -47,12 +47,11 @@ impl Session {
#[inline]
pub fn new_query_context(&self) -> QueryContextRef {
- QueryContextBuilder::new()
- .catalog(self.catalog.load().to_string())
- .schema(self.schema.load().to_string())
+ QueryContextBuilder::default()
+ .current_catalog(self.catalog.load().to_string())
+ .current_schema(self.schema.load().to_string())
.sql_dialect(self.conn_info.channel.dialect())
.build()
- .to_arc()
}
#[inline]
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index aa4c18200d5d..9b9409b2f562 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -107,20 +107,6 @@ impl<'a> ParserContext<'a> {
Keyword::DROP => self.parse_drop(),
- Keyword::USE => {
- let _ = self.parser.next_token();
-
- let database_name =
- self.parser
- .parse_identifier()
- .context(error::UnexpectedSnafu {
- sql: self.sql,
- expected: "a database name",
- actual: self.peek_token_as_string(),
- })?;
- Ok(Statement::Use(database_name.value))
- }
-
Keyword::COPY => self.parse_copy(),
Keyword::TRUNCATE => self.parse_truncate(),
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
index 4b8d2799526f..ee8072331d28 100644
--- a/src/sql/src/statements/statement.rs
+++ b/src/sql/src/statements/statement.rs
@@ -58,7 +58,6 @@ pub enum Statement {
DescribeTable(DescribeTable),
// EXPLAIN QUERY
Explain(Explain),
- Use(String),
// COPY
Copy(crate::statements::copy::Copy),
Tql(Tql),
diff --git a/src/store-api/Cargo.toml b/src/store-api/Cargo.toml
index 4834af37bb94..1657fbbfded8 100644
--- a/src/store-api/Cargo.toml
+++ b/src/store-api/Cargo.toml
@@ -13,7 +13,7 @@ common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
-derive_builder = "0.11"
+derive_builder.workspace = true
futures.workspace = true
serde.workspace = true
snafu.workspace = true
diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml
index 77ae09e1df13..9c240b314b45 100644
--- a/src/table/Cargo.toml
+++ b/src/table/Cargo.toml
@@ -24,7 +24,7 @@ datafusion-common.workspace = true
datafusion-expr.workspace = true
datafusion-physical-expr.workspace = true
datatypes = { path = "../datatypes" }
-derive_builder = "0.11"
+derive_builder.workspace = true
futures.workspace = true
humantime = "2.1"
humantime-serde = "1.1"
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index acf06e38cc77..bd3db3fb82cf 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -68,5 +68,6 @@ partition = { path = "../src/partition" }
paste.workspace = true
prost.workspace = true
script = { path = "../src/script" }
+session = { path = "../src/session", features = ["testing"] }
store-api = { path = "../src/store-api" }
opentelemetry-proto.workspace = true
diff --git a/tests-integration/src/instance.rs b/tests-integration/src/instance.rs
index c73dfdc85ab9..af2d6db39d61 100644
--- a/tests-integration/src/instance.rs
+++ b/tests-integration/src/instance.rs
@@ -356,7 +356,7 @@ mod tests {
}
}
- let query_ctx = Arc::new(QueryContext::new());
+ let query_ctx = QueryContext::arc();
let standalone = tests::create_standalone_instance("test_db_hook").await;
let mut instance = standalone.instance;
diff --git a/tests-integration/src/opentsdb.rs b/tests-integration/src/opentsdb.rs
index 122e667429ad..59c473d7e3ff 100644
--- a/tests-integration/src/opentsdb.rs
+++ b/tests-integration/src/opentsdb.rs
@@ -75,7 +75,7 @@ mod tests {
let output = instance
.do_query(
"select * from my_metric_1 order by greptime_timestamp",
- Arc::new(QueryContext::new()),
+ QueryContext::arc(),
)
.await
.remove(0)
diff --git a/tests-integration/src/otlp.rs b/tests-integration/src/otlp.rs
index e1fcf03cbba6..89ff45ab4bca 100644
--- a/tests-integration/src/otlp.rs
+++ b/tests-integration/src/otlp.rs
@@ -50,7 +50,7 @@ mod test {
async fn test_otlp(instance: &Arc<Instance>) {
let req = build_request();
let db = "otlp";
- let ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
+ let ctx = QueryContext::with(DEFAULT_CATALOG_NAME, db);
assert!(SqlQueryHandler::do_query(
instance.as_ref(),
diff --git a/tests-integration/src/prom_store.rs b/tests-integration/src/prom_store.rs
index 3625a36ef3d5..048501c6f38e 100644
--- a/tests-integration/src/prom_store.rs
+++ b/tests-integration/src/prom_store.rs
@@ -53,7 +53,7 @@ mod tests {
};
let db = "prometheus";
- let ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
+ let ctx = QueryContext::with(DEFAULT_CATALOG_NAME, db);
assert!(SqlQueryHandler::do_query(
instance.as_ref(),
diff --git a/tests-integration/src/tests/instance_test.rs b/tests-integration/src/tests/instance_test.rs
index 3570ab533fc5..2ccaa7b88cd1 100644
--- a/tests-integration/src/tests/instance_test.rs
+++ b/tests-integration/src/tests/instance_test.rs
@@ -844,7 +844,7 @@ async fn test_rename_table(instance: Arc<dyn MockInstance>) {
let output = execute_sql(&instance, "create database db").await;
assert!(matches!(output, Output::AffectedRows(1)));
- let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, "db"));
+ let query_ctx = QueryContext::with(DEFAULT_CATALOG_NAME, "db");
let output = execute_sql_with(
&instance,
"create table demo(host string, cpu double, memory double, ts timestamp, time index(ts))",
@@ -912,7 +912,7 @@ async fn test_create_table_after_rename_table(instance: Arc<dyn MockInstance>) {
// create test table
let table_name = "demo";
- let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, "db"));
+ let query_ctx = QueryContext::with(DEFAULT_CATALOG_NAME, "db");
let output = execute_sql_with(
&instance,
&format!("create table {table_name}(host string, cpu double, memory double, ts timestamp, time index(ts))"),
@@ -1096,7 +1096,7 @@ async fn test_use_database(instance: Arc<dyn MockInstance>) {
let output = execute_sql(&instance, "create database db1").await;
assert!(matches!(output, Output::AffectedRows(1)));
- let query_ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, "db1"));
+ let query_ctx = QueryContext::with(DEFAULT_CATALOG_NAME, "db1");
let output = execute_sql_with(
&instance,
"create table tb1(col_i32 int, ts bigint, TIME INDEX(ts))",
@@ -1410,7 +1410,7 @@ async fn test_information_schema_dot_tables(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
let sql = "create table another_table(i bigint time index)";
- let query_ctx = Arc::new(QueryContext::with("another_catalog", "another_schema"));
+ let query_ctx = QueryContext::with("another_catalog", "another_schema");
let output = execute_sql_with(&instance, sql, query_ctx.clone()).await;
assert!(matches!(output, Output::AffectedRows(0)));
@@ -1469,7 +1469,7 @@ async fn test_information_schema_dot_columns(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
let sql = "create table another_table(i bigint time index)";
- let query_ctx = Arc::new(QueryContext::with("another_catalog", "another_schema"));
+ let query_ctx = QueryContext::with("another_catalog", "another_schema");
let output = execute_sql_with(&instance, sql, query_ctx.clone()).await;
assert!(matches!(output, Output::AffectedRows(0)));
diff --git a/tests/cases/standalone/common/catalog/schema.result b/tests/cases/standalone/common/catalog/schema.result
index f0bcac620aa5..9fa7b2b1a8e1 100644
--- a/tests/cases/standalone/common/catalog/schema.result
+++ b/tests/cases/standalone/common/catalog/schema.result
@@ -29,8 +29,7 @@ SHOW DATABASES WHERE Schemas='test_public_schema';
USE test_public_schema;
-++
-++
+Affected Rows: 0
CREATE TABLE hello(i BIGINT TIME INDEX);
@@ -125,6 +124,5 @@ Error: 3000(PlanQuery), Error during planning: Table not found: greptime.test_pu
USE public;
-++
-++
+Affected Rows: 0
diff --git a/tests/cases/standalone/common/create/upper_case_table_name.result b/tests/cases/standalone/common/create/upper_case_table_name.result
index e517358991cb..eed6c1e6f46d 100644
--- a/tests/cases/standalone/common/create/upper_case_table_name.result
+++ b/tests/cases/standalone/common/create/upper_case_table_name.result
@@ -4,8 +4,7 @@ Affected Rows: 1
use upper_case_table_name;
-++
-++
+Affected Rows: 0
create table system_Metric(ts timestamp time index);
@@ -34,6 +33,5 @@ Affected Rows: 1
use public;
-++
-++
+Affected Rows: 0
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
index 826fbbc77279..d6ad9c60e0ae 100644
--- a/tests/cases/standalone/common/system/information_schema.result
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -5,8 +5,7 @@ Affected Rows: 1
use my_db;
-++
-++
+Affected Rows: 0
create table foo
(
@@ -52,6 +51,5 @@ order by table_schema, table_name;
use public;
-++
-++
+Affected Rows: 0
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index f5168760b0a6..e0011996859f 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -331,10 +331,13 @@ impl Database for GreptimeDB {
.expect("Illegal `USE` statement: expecting a database.")
.trim_end_matches(';');
client.set_schema(database);
+ Box::new(ResultDisplayer {
+ result: Ok(Output::AffectedRows(0)),
+ }) as _
+ } else {
+ let result = client.sql(&query).await;
+ Box::new(ResultDisplayer { result }) as _
}
-
- let result = client.sql(&query).await;
- Box::new(ResultDisplayer { result }) as _
}
}
|
refactor
|
query context (#2022)
|
f8500e54c1b98210cddf04c77a5b064776fbe551
|
2022-12-26 10:41:24
|
Yingwen
|
refactor: Remove PutOperation and Simplify WriteRequest API (#775)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 77ee0412bfed..fb9a777a9ce5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6784,6 +6784,7 @@ dependencies = [
"common-base",
"common-error",
"common-query",
+ "common-recordbatch",
"common-runtime",
"common-telemetry",
"common-time",
diff --git a/src/mito/src/error.rs b/src/mito/src/error.rs
index ff29e72a818b..ff92512028ad 100644
--- a/src/mito/src/error.rs
+++ b/src/mito/src/error.rs
@@ -153,13 +153,6 @@ pub enum Error {
table_name: String,
},
- #[snafu(display("Columns {} not exist in table {}", column_names.join(","), table_name))]
- ColumnsNotExist {
- backtrace: Backtrace,
- column_names: Vec<String>,
- table_name: String,
- },
-
#[snafu(display("Failed to alter table {}, source: {}", table_name, source))]
AlterTable {
table_name: String,
@@ -176,12 +169,6 @@ pub enum Error {
column_qualified_name: String,
},
- #[snafu(display("Unsupported column default constraint, source: {}", source))]
- UnsupportedDefaultConstraint {
- #[snafu(backtrace)]
- source: datatypes::error::Error,
- },
-
#[snafu(display(
"Failed to convert metadata from deserialized data, source: {}",
source
@@ -219,11 +206,8 @@ impl ErrorExt for Error {
| ProjectedColumnNotFound { .. }
| InvalidPrimaryKey { .. }
| MissingTimestampIndex { .. }
- | UnsupportedDefaultConstraint { .. }
| TableNotFound { .. } => StatusCode::InvalidArguments,
- ColumnsNotExist { .. } => StatusCode::TableColumnNotFound,
-
TableInfoNotFound { .. } | ConvertRaw { .. } => StatusCode::Unexpected,
ScanTableManifest { .. } | UpdateTableManifest { .. } => StatusCode::StorageUnavailable,
diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs
index 74488d3f409e..e0dd91055b65 100644
--- a/src/mito/src/table.rs
+++ b/src/mito/src/table.rs
@@ -27,18 +27,16 @@ use common_query::physical_plan::PhysicalPlanRef;
use common_recordbatch::error::{ExternalSnafu, Result as RecordBatchResult};
use common_recordbatch::{RecordBatch, RecordBatchStream};
use common_telemetry::logging;
-use datatypes::schema::ColumnSchema;
-use datatypes::vectors::VectorRef;
use futures::task::{Context, Poll};
use futures::Stream;
use object_store::ObjectStore;
-use snafu::{ensure, OptionExt, ResultExt};
+use snafu::{OptionExt, ResultExt};
use store_api::manifest::{self, Manifest, ManifestVersion, MetaActionIterator};
use store_api::storage::{
- AddColumn, AlterOperation, AlterRequest, ChunkReader, PutOperation, ReadContext, Region,
- RegionMeta, ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
+ AddColumn, AlterOperation, AlterRequest, ChunkReader, ReadContext, Region, RegionMeta,
+ ScanRequest, SchemaRef, Snapshot, WriteContext, WriteRequest,
};
-use table::error::{Error as TableError, MissingColumnSnafu, Result as TableResult};
+use table::error::{Error as TableError, Result as TableResult};
use table::metadata::{
FilterPushDownType, RawTableInfo, TableInfo, TableInfoRef, TableMeta, TableType,
};
@@ -48,8 +46,8 @@ use table::table::Table;
use tokio::sync::Mutex;
use crate::error::{
- self, ColumnsNotExistSnafu, ProjectedColumnNotFoundSnafu, Result, ScanTableManifestSnafu,
- TableInfoNotFoundSnafu, UnsupportedDefaultConstraintSnafu, UpdateTableManifestSnafu,
+ self, ProjectedColumnNotFoundSnafu, Result, ScanTableManifestSnafu, TableInfoNotFoundSnafu,
+ UpdateTableManifestSnafu,
};
use crate::manifest::action::*;
use crate::manifest::TableManifest;
@@ -86,66 +84,17 @@ impl<R: Region> Table for MitoTable<R> {
let mut write_request = self.region.write_request();
- let mut put_op = write_request.put_op();
- let mut columns_values = request.columns_values;
-
- let table_info = self.table_info();
- let schema = self.schema();
- let key_columns = table_info.meta.row_key_column_names();
- let value_columns = table_info.meta.value_column_names();
+ let columns_values = request.columns_values;
// columns_values is not empty, it's safe to unwrap
let rows_num = columns_values.values().next().unwrap().len();
- // Add row key columns
- for name in key_columns {
- let column_schema = schema
- .column_schema_by_name(name)
- .expect("column schema not found");
-
- let vector = match columns_values.remove(name) {
- Some(v) => v,
- None => Self::try_get_column_default_constraint_vector(column_schema, rows_num)?,
- };
-
- put_op
- .add_key_column(name, vector)
- .map_err(TableError::new)?;
- }
-
- // Add value columns
- for name in value_columns {
- let column_schema = schema
- .column_schema_by_name(name)
- .expect("column schema not found");
-
- let vector = match columns_values.remove(name) {
- Some(v) => v,
- None => Self::try_get_column_default_constraint_vector(column_schema, rows_num)?,
- };
- put_op
- .add_value_column(name, vector)
- .map_err(TableError::new)?;
- }
-
- ensure!(
- columns_values.is_empty(),
- ColumnsNotExistSnafu {
- table_name: &table_info.name,
- column_names: columns_values
- .keys()
- .into_iter()
- .map(|s| s.to_string())
- .collect::<Vec<_>>(),
- }
- );
-
logging::trace!(
- "Insert into table {} with put_op: {:?}",
- table_info.name,
- put_op
+ "Insert into table {} with data: {:?}",
+ self.table_info().name,
+ columns_values
);
- write_request.put(put_op).map_err(TableError::new)?;
+ write_request.put(columns_values).map_err(TableError::new)?;
let _resp = self
.region
@@ -375,21 +324,6 @@ impl<R: Region> MitoTable<R> {
Ok(MitoTable::new(table_info, region, manifest))
}
- fn try_get_column_default_constraint_vector(
- column_schema: &ColumnSchema,
- rows_num: usize,
- ) -> TableResult<VectorRef> {
- // TODO(dennis): when we support altering schema, we should check the schemas difference between table and region
- let vector = column_schema
- .create_default_vector(rows_num)
- .context(UnsupportedDefaultConstraintSnafu)?
- .context(MissingColumnSnafu {
- name: &column_schema.name,
- })?;
-
- Ok(vector)
- }
-
pub async fn open(
table_name: &str,
table_dir: &str,
diff --git a/src/mito/src/table/test_util/mock_engine.rs b/src/mito/src/table/test_util/mock_engine.rs
index 54b845bc514b..f3b859c82c47 100644
--- a/src/mito/src/table/test_util/mock_engine.rs
+++ b/src/mito/src/table/test_util/mock_engine.rs
@@ -24,7 +24,7 @@ use common_telemetry::logging;
use datatypes::prelude::{DataType, Value, VectorRef};
use datatypes::schema::{ColumnSchema, Schema};
use storage::metadata::{RegionMetaImpl, RegionMetadata};
-use storage::write_batch::{Mutation, WriteBatch};
+use storage::write_batch::WriteBatch;
use store_api::storage::{
AlterRequest, Chunk, ChunkReader, CreateOptions, EngineContext, GetRequest, GetResponse,
OpenOptions, ReadContext, Region, RegionDescriptor, RegionId, RegionMeta, ScanRequest,
@@ -219,10 +219,10 @@ impl MockRegionInner {
let mut memtable = self.memtable.write().unwrap();
- for Mutation::Put(put) in request.iter() {
+ for mutation in &request.payload().mutations {
for ColumnSchema { name, .. } in metadata.user_schema().column_schemas() {
let column = memtable.get_mut(name).unwrap();
- if let Some(data) = put.column_by_name(name) {
+ if let Some(data) = mutation.record_batch.column_by_name(name) {
(0..data.len()).for_each(|i| column.push(data.get(i)));
}
}
diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml
index 1847dac9ae13..aa68e2082494 100644
--- a/src/storage/Cargo.toml
+++ b/src/storage/Cargo.toml
@@ -13,6 +13,7 @@ bytes = "1.1"
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-query = { path = "../common/query" }
+common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
diff --git a/src/storage/benches/wal/bench_decode.rs b/src/storage/benches/wal/bench_decode.rs
index f451407e9789..911836061243 100644
--- a/src/storage/benches/wal/bench_decode.rs
+++ b/src/storage/benches/wal/bench_decode.rs
@@ -33,25 +33,13 @@ rows | protobuf | arrow |
*/
fn encode_arrow(batch: &WriteBatch, dst: &mut Vec<u8>) {
- let encoder = codec::WriteBatchArrowEncoder::new();
- let result = encoder.encode(batch, dst);
- assert!(result.is_ok());
-}
-
-fn encode_protobuf(batch: &WriteBatch, dst: &mut Vec<u8>) {
- let encoder = codec::WriteBatchProtobufEncoder {};
- let result = encoder.encode(batch, dst);
+ let encoder = codec::PayloadEncoder::new();
+ let result = encoder.encode(batch.payload(), dst);
assert!(result.is_ok());
}
fn decode_arrow(dst: &[u8], mutation_types: &[i32]) {
- let decoder = codec::WriteBatchArrowDecoder::new(mutation_types.to_vec());
- let result = decoder.decode(dst);
- assert!(result.is_ok());
-}
-
-fn decode_protobuf(dst: &[u8], mutation_types: &[i32]) {
- let decoder = codec::WriteBatchProtobufDecoder::new(mutation_types.to_vec());
+ let decoder = codec::PayloadDecoder::new(mutation_types);
let result = decoder.decode(dst);
assert!(result.is_ok());
}
@@ -60,32 +48,16 @@ fn bench_wal_decode(c: &mut Criterion) {
let (batch_10, types_10) = gen_new_batch_and_types(1);
let (batch_100, types_100) = gen_new_batch_and_types(10);
let (batch_10000, types_10000) = gen_new_batch_and_types(100);
- let mut dst_protobuf_10 = vec![];
- let mut dst_protobuf_100 = vec![];
- let mut dst_protobuf_10000 = vec![];
let mut dst_arrow_10 = vec![];
let mut dst_arrow_100 = vec![];
let mut dst_arrow_10000 = vec![];
- encode_protobuf(&batch_10, &mut dst_protobuf_10);
- encode_protobuf(&batch_100, &mut dst_protobuf_100);
- encode_protobuf(&batch_10000, &mut dst_protobuf_10000);
-
encode_arrow(&batch_10, &mut dst_arrow_10);
encode_arrow(&batch_100, &mut dst_arrow_100);
encode_arrow(&batch_10000, &mut dst_arrow_10000);
let mut group = c.benchmark_group("wal_decode");
- group.bench_function("protobuf_decode_with_10_num_rows", |b| {
- b.iter(|| decode_protobuf(&dst_protobuf_10, &types_10))
- });
- group.bench_function("protobuf_decode_with_100_num_rows", |b| {
- b.iter(|| decode_protobuf(&dst_protobuf_100, &types_100))
- });
- group.bench_function("protobuf_decode_with_10000_num_rows", |b| {
- b.iter(|| decode_protobuf(&dst_protobuf_10000, &types_10000))
- });
group.bench_function("arrow_decode_with_10_num_rows", |b| {
b.iter(|| decode_arrow(&dst_arrow_10, &types_10))
});
diff --git a/src/storage/benches/wal/bench_encode.rs b/src/storage/benches/wal/bench_encode.rs
index 1abc46002f70..b1897e85e9f5 100644
--- a/src/storage/benches/wal/bench_encode.rs
+++ b/src/storage/benches/wal/bench_encode.rs
@@ -33,16 +33,9 @@ rows | protobuf | arrow |
*/
fn encode_arrow(batch: &WriteBatch) {
- let encoder = codec::WriteBatchArrowEncoder::new();
+ let encoder = codec::PayloadEncoder::new();
let mut dst = vec![];
- let result = encoder.encode(batch, &mut dst);
- assert!(result.is_ok());
-}
-
-fn encode_protobuf(batch: &WriteBatch) {
- let encoder = codec::WriteBatchProtobufEncoder {};
- let mut dst = vec![];
- let result = encoder.encode(batch, &mut dst);
+ let result = encoder.encode(batch.payload(), &mut dst);
assert!(result.is_ok());
}
@@ -52,15 +45,6 @@ fn bench_wal_encode(c: &mut Criterion) {
let (batch_10000, _) = gen_new_batch_and_types(100);
let mut group = c.benchmark_group("wal_encode");
- group.bench_function("protobuf_encode_with_10_num_rows", |b| {
- b.iter(|| encode_protobuf(&batch_10))
- });
- group.bench_function("protobuf_encode_with_100_num_rows", |b| {
- b.iter(|| encode_protobuf(&batch_100))
- });
- group.bench_function("protobuf_encode_with_10000_num_rows", |b| {
- b.iter(|| encode_protobuf(&batch_10000))
- });
group.bench_function("arrow_encode_with_10_num_rows", |b| {
b.iter(|| encode_arrow(&batch_10))
});
diff --git a/src/storage/benches/wal/bench_wal.rs b/src/storage/benches/wal/bench_wal.rs
index 67e76f2527b7..0499f6ae4bab 100644
--- a/src/storage/benches/wal/bench_wal.rs
+++ b/src/storage/benches/wal/bench_wal.rs
@@ -33,22 +33,12 @@ rows | protobuf | arrow |
*/
fn codec_arrow(batch: &WriteBatch, mutation_types: &[i32]) {
- let encoder = codec::WriteBatchArrowEncoder::new();
+ let encoder = codec::PayloadEncoder::new();
let mut dst = vec![];
- let result = encoder.encode(batch, &mut dst);
+ let result = encoder.encode(batch.payload(), &mut dst);
assert!(result.is_ok());
- let decoder = codec::WriteBatchArrowDecoder::new(mutation_types.to_vec());
- let result = decoder.decode(&dst);
- assert!(result.is_ok());
-}
-fn codec_protobuf(batch: &WriteBatch, mutation_types: &[i32]) {
- let encoder = codec::WriteBatchProtobufEncoder {};
- let mut dst = vec![];
- let result = encoder.encode(batch, &mut dst);
- assert!(result.is_ok());
-
- let decoder = codec::WriteBatchProtobufDecoder::new(mutation_types.to_vec());
+ let decoder = codec::PayloadDecoder::new(mutation_types);
let result = decoder.decode(&dst);
assert!(result.is_ok());
}
@@ -59,15 +49,6 @@ fn bench_wal_encode_decode(c: &mut Criterion) {
let (batch_10000, types_10000) = gen_new_batch_and_types(100);
let mut group = c.benchmark_group("wal_encode_decode");
- group.bench_function("protobuf_encode_decode_with_10_num_rows", |b| {
- b.iter(|| codec_protobuf(&batch_10, &types_10))
- });
- group.bench_function("protobuf_encode_decode_with_100_num_rows", |b| {
- b.iter(|| codec_protobuf(&batch_100, &types_100))
- });
- group.bench_function("protobuf_encode_decode_with_10000_num_rows", |b| {
- b.iter(|| codec_protobuf(&batch_10000, &types_10000))
- });
group.bench_function("arrow_encode_decode_with_10_num_rows", |b| {
b.iter(|| codec_arrow(&batch_10, &types_10))
});
diff --git a/src/storage/benches/wal/util/mod.rs b/src/storage/benches/wal/util/mod.rs
index 477297074afc..b9df47710df8 100644
--- a/src/storage/benches/wal/util/mod.rs
+++ b/src/storage/benches/wal/util/mod.rs
@@ -14,17 +14,18 @@
pub mod write_batch_util;
+use std::collections::HashMap;
use std::sync::Arc;
use datatypes::prelude::ScalarVector;
use datatypes::type_id::LogicalTypeId;
use datatypes::vectors::{
- BooleanVector, Float64Vector, StringVector, TimestampMillisecondVector, UInt64Vector,
+ BooleanVector, Float64Vector, StringVector, TimestampMillisecondVector, UInt64Vector, VectorRef,
};
use rand::Rng;
use storage::proto;
-use storage::write_batch::{PutData, WriteBatch};
-use store_api::storage::{consts, PutOperation, WriteRequest};
+use storage::write_batch::WriteBatch;
+use store_api::storage::{consts, WriteRequest};
pub fn new_test_batch() -> WriteBatch {
write_batch_util::new_write_batch(
@@ -69,25 +70,25 @@ pub fn gen_new_batch_and_types(putdate_nums: usize) -> (WriteBatch, Vec<i32>) {
rng.fill(&mut boolvs[..]);
rng.fill(&mut tsvs[..]);
rng.fill(&mut fvs[..]);
- let intv = Arc::new(UInt64Vector::from_slice(&intvs));
- let boolv = Arc::new(BooleanVector::from(boolvs.to_vec()));
- let tsv = Arc::new(TimestampMillisecondVector::from_values(tsvs));
- let fvs = Arc::new(Float64Vector::from_slice(&fvs));
- let svs = Arc::new(StringVector::from_slice(&svs));
- let mut put_data = PutData::default();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- put_data.add_version_column(intv).unwrap();
- put_data.add_value_column("v1", boolv).unwrap();
- put_data.add_key_column("ts", tsv.clone()).unwrap();
- put_data.add_key_column("4", fvs.clone()).unwrap();
- put_data.add_key_column("5", fvs.clone()).unwrap();
- put_data.add_key_column("6", fvs.clone()).unwrap();
- put_data.add_key_column("7", fvs.clone()).unwrap();
- put_data.add_key_column("8", fvs.clone()).unwrap();
- put_data.add_key_column("9", fvs.clone()).unwrap();
- put_data.add_key_column("10", svs.clone()).unwrap();
+ let intv = Arc::new(UInt64Vector::from_slice(&intvs)) as VectorRef;
+ let boolv = Arc::new(BooleanVector::from(boolvs.to_vec())) as VectorRef;
+ let tsv = Arc::new(TimestampMillisecondVector::from_values(tsvs)) as VectorRef;
+ let fvs = Arc::new(Float64Vector::from_slice(&fvs)) as VectorRef;
+ let svs = Arc::new(StringVector::from_slice(&svs)) as VectorRef;
+ let mut put_data = HashMap::with_capacity(11);
+ put_data.insert("k1".to_string(), intv.clone());
+ put_data.insert(consts::VERSION_COLUMN_NAME.to_string(), intv);
+ put_data.insert("v1".to_string(), boolv);
+ put_data.insert("ts".to_string(), tsv.clone());
+ put_data.insert("4".to_string(), fvs.clone());
+ put_data.insert("5".to_string(), fvs.clone());
+ put_data.insert("6".to_string(), fvs.clone());
+ put_data.insert("7".to_string(), fvs.clone());
+ put_data.insert("8".to_string(), fvs.clone());
+ put_data.insert("9".to_string(), fvs.clone());
+ put_data.insert("10".to_string(), svs.clone());
batch.put(put_data).unwrap();
}
- let types = proto::wal::gen_mutation_types(&batch);
+ let types = proto::wal::gen_mutation_types(batch.payload());
(batch, types)
}
diff --git a/src/storage/build.rs b/src/storage/build.rs
index 7397154783e3..b8140ee3955c 100644
--- a/src/storage/build.rs
+++ b/src/storage/build.rs
@@ -14,6 +14,6 @@
fn main() {
tonic_build::configure()
- .compile(&["proto/wal.proto", "proto/write_batch.proto"], &["."])
+ .compile(&["proto/wal.proto"], &["."])
.expect("compile proto");
}
diff --git a/src/storage/proto/wal.proto b/src/storage/proto/wal.proto
index c8cfb75d5047..94bb14cda1b2 100644
--- a/src/storage/proto/wal.proto
+++ b/src/storage/proto/wal.proto
@@ -3,23 +3,12 @@ syntax = "proto3";
package greptime.storage.wal.v1;
message WalHeader {
- PayloadType payload_type = 1;
- uint64 last_manifest_version = 2;
+ uint64 last_manifest_version = 1;
// Type of each mutation in payload, now only arrow payload uses this field.
- repeated MutationType mutation_types = 3;
-}
-
-enum PayloadType {
- NONE = 0;
- WRITE_BATCH_ARROW = 1;
- WRITE_BATCH_PROTO = 2;
-}
-
-message MutationExtra {
- MutationType mutation_type = 1;
+ repeated MutationType mutation_types = 2;
}
enum MutationType {
- PUT = 0;
- DELETE = 1;
+ DELETE = 0;
+ PUT = 1;
}
diff --git a/src/storage/proto/write_batch.proto b/src/storage/proto/write_batch.proto
deleted file mode 100644
index ed1813aa556d..000000000000
--- a/src/storage/proto/write_batch.proto
+++ /dev/null
@@ -1,95 +0,0 @@
-syntax = "proto3";
-
-package greptime.storage.write_batch.v1;
-
-message WriteBatch {
- Schema schema = 1;
- repeated Mutation mutations = 2;
-}
-
-message Schema {
- repeated ColumnSchema column_schemas = 1;
- TimestampIndex timestamp_index = 2;
-}
-
-message TimestampIndex {
- uint64 value = 1;
-}
-
-message ColumnSchema {
- string name = 1;
- DataType data_type = 2;
- bool is_nullable = 3;
- bool is_time_index = 4;
-}
-
-message Mutation {
- oneof mutation {
- Put put = 1;
- Delete delete = 2;
- }
-}
-
-message Put {
- repeated Column columns = 1;
-}
-
-message Delete {
- // TODO(zxy)
-}
-
-message Column {
- Values values = 1;
- bytes value_null_mask = 2;
- uint64 num_rows = 3;
-}
-
-// TODO(jiachun): Enum might be insufficient to represent some composite data type such as list, struct.
-// In the future, may be we can refer to https://github.com/apache/arrow/blob/master/format/Schema.fbs#L398
-enum DataType {
- NULL = 0;
- BOOLEAN = 1;
- INT8 = 2;
- INT16 = 3;
- INT32 = 4;
- INT64 = 5;
- UINT8 = 6;
- UINT16 = 7;
- UINT32 = 8;
- UINT64 = 9;
- FLOAT32 = 10;
- FLOAT64 = 11;
- STRING = 12;
- BINARY = 13;
- DATE = 14;
- DATETIME = 15;
- TIMESTAMP_SECOND = 16;
- TIMESTAMP_MILLISECOND = 17;
- TIMESTAMP_MICROSECOND = 18;
- TIMESTAMP_NANOSECOND = 19;
-}
-
-message Values {
- repeated int32 i8_values = 1;
- repeated int32 i16_values = 2;
- repeated int32 i32_values = 3;
- repeated int64 i64_values = 4;
-
- repeated uint32 u8_values = 5;
- repeated uint32 u16_values = 6;
- repeated uint32 u32_values = 7;
- repeated uint64 u64_values = 8;
-
- repeated float f32_values = 9;
- repeated double f64_values = 10;
-
- repeated bool bool_values = 11;
- repeated bytes binary_values = 12;
- repeated string string_values = 13;
- repeated int32 date_values = 14;
- repeated int64 datetime_values = 15;
- repeated int64 ts_second_values = 16;
- repeated int64 ts_millisecond_values = 17;
- repeated int64 ts_microsecond_values = 18;
- repeated int64 ts_nanosecond_values = 19;
-}
diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs
index 53c34f8ecc83..e26f42db2c71 100644
--- a/src/storage/src/error.rs
+++ b/src/storage/src/error.rs
@@ -17,7 +17,7 @@ use std::io::Error as IoError;
use std::str::Utf8Error;
use common_error::prelude::*;
-use datatypes::arrow;
+use datatypes::arrow::error::ArrowError;
use datatypes::prelude::ConcreteDataType;
use serde_json::error::Error as JsonError;
use store_api::manifest::action::ProtocolVersion;
@@ -25,6 +25,7 @@ use store_api::manifest::ManifestVersion;
use store_api::storage::{RegionId, SequenceNumber};
use crate::metadata::Error as MetadataError;
+use crate::write_batch;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -60,7 +61,7 @@ pub enum Error {
#[snafu(display("Failed to create RecordBatch from vectors, source: {}", source))]
NewRecordBatch {
backtrace: Backtrace,
- source: arrow::error::ArrowError,
+ source: ArrowError,
},
#[snafu(display("Fail to read object from path: {}, source: {}", path, source))]
@@ -145,12 +146,6 @@ pub enum Error {
backtrace: Backtrace,
},
- #[snafu(display("Invalid timestamp in write batch, source: {}", source))]
- InvalidTimestamp {
- #[snafu(backtrace)]
- source: crate::write_batch::Error,
- },
-
#[snafu(display("Task already cancelled"))]
Cancelled { backtrace: Backtrace },
@@ -293,13 +288,14 @@ pub enum Error {
},
#[snafu(display(
- "Failed to add default value for column {}, source: {}",
- column,
+ "Failed to create default value for column {}, source: {}",
+ name,
source
))]
- AddDefault {
- column: String,
- source: crate::write_batch::Error,
+ CreateDefault {
+ name: String,
+ #[snafu(backtrace)]
+ source: datatypes::error::Error,
},
#[snafu(display(
@@ -366,6 +362,78 @@ pub enum Error {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
+
+ #[snafu(display("Unknown column {}", name))]
+ UnknownColumn { name: String, backtrace: Backtrace },
+
+ #[snafu(display("Failed to create record batch for write batch, source:{}", source))]
+ CreateRecordBatch {
+ #[snafu(backtrace)]
+ source: common_recordbatch::error::Error,
+ },
+
+ #[snafu(display(
+ "Request is too large, max is {}, current is {}",
+ write_batch::MAX_BATCH_SIZE,
+ num_rows
+ ))]
+ RequestTooLarge {
+ num_rows: usize,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display(
+ "Type of column {} does not match type in schema, expect {:?}, given {:?}",
+ name,
+ expect,
+ given
+ ))]
+ TypeMismatch {
+ name: String,
+ expect: ConcreteDataType,
+ given: ConcreteDataType,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Column {} is not null but input has null", name))]
+ HasNull { name: String, backtrace: Backtrace },
+
+ #[snafu(display(
+ "Length of column {} not equals to other columns, expect {}, given {}",
+ name,
+ expect,
+ given
+ ))]
+ LenNotEquals {
+ name: String,
+ expect: usize,
+ given: usize,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Failed to decode write batch, corrupted data {}", message))]
+ BatchCorrupted {
+ message: String,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Failed to decode arrow data, source: {}", source))]
+ DecodeArrow {
+ backtrace: Backtrace,
+ source: ArrowError,
+ },
+
+ #[snafu(display("Failed to encode arrow data, source: {}", source))]
+ EncodeArrow {
+ backtrace: Backtrace,
+ source: ArrowError,
+ },
+
+ #[snafu(display("Failed to parse schema, source: {}", source))]
+ ParseSchema {
+ backtrace: Backtrace,
+ source: datatypes::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -378,12 +446,16 @@ impl ErrorExt for Error {
InvalidScanIndex { .. }
| BatchMissingColumn { .. }
| BatchMissingTimestamp { .. }
- | InvalidTimestamp { .. }
| InvalidProjection { .. }
| BuildBatch { .. }
| NotInSchemaToCompat { .. }
| WriteToOldVersion { .. }
- | IllegalTimestampColumnType { .. } => StatusCode::InvalidArguments,
+ | IllegalTimestampColumnType { .. }
+ | CreateRecordBatch { .. }
+ | RequestTooLarge { .. }
+ | TypeMismatch { .. }
+ | HasNull { .. }
+ | LenNotEquals { .. } => StatusCode::InvalidArguments,
Utf8 { .. }
| EncodeJson { .. }
@@ -402,7 +474,11 @@ impl ErrorExt for Error {
| CompatRead { .. }
| CreateDefaultToRead { .. }
| NoDefaultToRead { .. }
- | NewRecordBatch { .. } => StatusCode::Unexpected,
+ | NewRecordBatch { .. }
+ | BatchCorrupted { .. }
+ | DecodeArrow { .. }
+ | EncodeArrow { .. }
+ | ParseSchema { .. } => StatusCode::Unexpected,
FlushIo { .. }
| WriteParquet { .. }
@@ -420,11 +496,13 @@ impl ErrorExt for Error {
| InvalidRegionState { .. }
| ReadWal { .. } => StatusCode::StorageUnavailable,
+ UnknownColumn { .. } => StatusCode::TableColumnNotFound,
+
InvalidAlterRequest { source, .. }
| InvalidRegionDesc { source, .. }
| ConvertColumnSchema { source, .. } => source.status_code(),
PushBatch { source, .. } => source.status_code(),
- AddDefault { source, .. } => source.status_code(),
+ CreateDefault { source, .. } => source.status_code(),
ConvertChunk { source, .. } => source.status_code(),
MarkWalStable { source, .. } => source.status_code(),
}
@@ -441,9 +519,7 @@ impl ErrorExt for Error {
#[cfg(test)]
mod tests {
-
use common_error::prelude::StatusCode::*;
- use datatypes::arrow::error::ArrowError;
use snafu::GenerateImplicitData;
use super::*;
diff --git a/src/storage/src/memtable/inserter.rs b/src/storage/src/memtable/inserter.rs
index a876f7c4c4f9..24af9e8c6878 100644
--- a/src/storage/src/memtable/inserter.rs
+++ b/src/storage/src/memtable/inserter.rs
@@ -12,14 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use datatypes::vectors::VectorRef;
-use snafu::OptionExt;
-use store_api::storage::{ColumnDescriptor, OpType, SequenceNumber};
+use store_api::storage::{OpType, SequenceNumber};
use super::MemtableRef;
-use crate::error::{self, Result};
+use crate::error::Result;
use crate::memtable::KeyValues;
-use crate::write_batch::{Mutation, PutData, WriteBatch};
+use crate::write_batch::{Mutation, Payload};
/// Wraps logic of inserting key/values in [WriteBatch] to [Memtable].
pub struct Inserter {
@@ -37,21 +35,20 @@ impl Inserter {
}
}
- // TODO(yingwen): Can we take the WriteBatch?
- /// Insert write batch into memtable.
+ /// Insert write batch payload into memtable.
///
/// Won't do schema validation if not configured. Caller (mostly the [`RegionWriter`]) should ensure the
- /// schemas of `memtable` are consistent with `batch`'s.
- pub fn insert_memtable(&mut self, batch: &WriteBatch, memtable: &MemtableRef) -> Result<()> {
- if batch.is_empty() {
+ /// schemas of `memtable` are consistent with `payload`'s.
+ pub fn insert_memtable(&mut self, payload: &Payload, memtable: &MemtableRef) -> Result<()> {
+ if payload.is_empty() {
return Ok(());
}
// This function only makes effect in debug mode.
- validate_input_and_memtable_schemas(batch, memtable);
+ validate_input_and_memtable_schemas(payload, memtable);
// Enough to hold all key or value columns.
- let total_column_num = batch.schema().num_columns();
+ let total_column_num = payload.schema.num_columns();
// Reusable KeyValues buffer.
let mut kvs = KeyValues {
sequence: self.sequence,
@@ -61,12 +58,8 @@ impl Inserter {
values: Vec::with_capacity(total_column_num),
};
- for mutation in batch {
- match mutation {
- Mutation::Put(put_data) => {
- self.write_one_mutation(put_data, memtable, &mut kvs)?;
- }
- }
+ for mutation in &payload.mutations {
+ self.write_one_mutation(mutation, memtable, &mut kvs)?;
}
Ok(())
@@ -74,21 +67,22 @@ impl Inserter {
fn write_one_mutation(
&mut self,
- put_data: &PutData,
+ mutation: &Mutation,
memtable: &MemtableRef,
kvs: &mut KeyValues,
) -> Result<()> {
let schema = memtable.schema();
- let num_rows = put_data.num_rows();
+ let num_rows = mutation.record_batch.num_rows();
- kvs.reset(OpType::Put, self.index_in_batch);
+ kvs.reset(mutation.op_type, self.index_in_batch);
- for key_col in schema.row_key_columns() {
- clone_put_data_column_to(put_data, &key_col.desc, &mut kvs.keys)?;
+ for key_idx in schema.row_key_indices() {
+ kvs.keys.push(mutation.record_batch.column(key_idx).clone());
}
- for value_col in schema.value_columns() {
- clone_put_data_column_to(put_data, &value_col.desc, &mut kvs.values)?;
+ for value_idx in schema.value_indices() {
+ kvs.values
+ .push(mutation.record_batch.column(value_idx).clone());
}
memtable.write(kvs)?;
@@ -99,30 +93,20 @@ impl Inserter {
}
}
-fn validate_input_and_memtable_schemas(batch: &WriteBatch, memtable: &MemtableRef) {
+fn validate_input_and_memtable_schemas(payload: &Payload, memtable: &MemtableRef) {
if cfg!(debug_assertions) {
- let batch_schema = batch.schema();
+ let payload_schema = &payload.schema;
let memtable_schema = memtable.schema();
let user_schema = memtable_schema.user_schema();
- debug_assert_eq!(batch_schema.version(), user_schema.version());
+ debug_assert_eq!(payload_schema.version(), user_schema.version());
// Only validate column schemas.
- debug_assert_eq!(batch_schema.column_schemas(), user_schema.column_schemas());
+ debug_assert_eq!(
+ payload_schema.column_schemas(),
+ user_schema.column_schemas()
+ );
}
}
-fn clone_put_data_column_to(
- put_data: &PutData,
- desc: &ColumnDescriptor,
- target: &mut Vec<VectorRef>,
-) -> Result<()> {
- let vector = put_data
- .column_by_name(&desc.name)
- .context(error::BatchMissingColumnSnafu { column: &desc.name })?;
- target.push(vector.clone());
-
- Ok(())
-}
-
/// Holds `start` and `end` indexes to get a slice `[start, end)` from the vector whose
/// timestamps belong to same time range at `range_index`.
#[derive(Debug, PartialEq)]
@@ -135,13 +119,14 @@ struct SliceIndex {
#[cfg(test)]
mod tests {
+ use std::collections::HashMap;
use std::sync::Arc;
use common_time::timestamp::Timestamp;
use datatypes::type_id::LogicalTypeId;
use datatypes::value::Value;
- use datatypes::vectors::{Int64Vector, TimestampMillisecondVector};
- use store_api::storage::{PutOperation, WriteRequest};
+ use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
+ use store_api::storage::WriteRequest;
use super::*;
use crate::memtable::{DefaultMemtableBuilder, IterContext, MemtableBuilder};
@@ -149,6 +134,7 @@ mod tests {
use crate::schema::RegionSchemaRef;
use crate::test_util::descriptor_util::RegionDescBuilder;
use crate::test_util::write_batch_util;
+ use crate::write_batch::WriteBatch;
fn new_test_write_batch() -> WriteBatch {
write_batch_util::new_write_batch(
@@ -172,11 +158,11 @@ mod tests {
}
fn put_batch(batch: &mut WriteBatch, data: &[(i64, Option<i64>)]) {
- let mut put_data = PutData::with_num_columns(2);
+ let mut put_data = HashMap::with_capacity(2);
let ts = TimestampMillisecondVector::from_values(data.iter().map(|v| v.0));
- put_data.add_key_column("ts", Arc::new(ts)).unwrap();
+ put_data.insert("ts".to_string(), Arc::new(ts) as VectorRef);
let value = Int64Vector::from(data.iter().map(|v| v.1).collect::<Vec<_>>());
- put_data.add_value_column("value", Arc::new(value)).unwrap();
+ put_data.insert("value".to_string(), Arc::new(value) as VectorRef);
batch.put(put_data).unwrap();
}
@@ -232,7 +218,9 @@ mod tests {
],
);
- inserter.insert_memtable(&batch, &mutable_memtable).unwrap();
+ inserter
+ .insert_memtable(batch.payload(), &mutable_memtable)
+ .unwrap();
check_memtable_content(
&mutable_memtable,
sequence,
diff --git a/src/storage/src/proto.rs b/src/storage/src/proto.rs
index 6aa66c03a0bf..72e922055412 100644
--- a/src/storage/src/proto.rs
+++ b/src/storage/src/proto.rs
@@ -13,4 +13,3 @@
// limitations under the License.
pub mod wal;
-pub mod write_batch;
diff --git a/src/storage/src/proto/wal.rs b/src/storage/src/proto/wal.rs
index f410fb6639b9..b1876312cd62 100644
--- a/src/storage/src/proto/wal.rs
+++ b/src/storage/src/proto/wal.rs
@@ -15,13 +15,16 @@
#![allow(clippy::all)]
tonic::include_proto!("greptime.storage.wal.v1");
-use crate::write_batch::{Mutation, WriteBatch};
+use store_api::storage::OpType;
-pub fn gen_mutation_types(write_batch: &WriteBatch) -> Vec<i32> {
- write_batch
+use crate::write_batch::Payload;
+
+pub fn gen_mutation_types(payload: &Payload) -> Vec<i32> {
+ payload
+ .mutations
.iter()
- .map(|m| match m {
- Mutation::Put(_) => MutationType::Put.into(),
+ .map(|m| match m.op_type {
+ OpType::Put => MutationType::Put.into(),
})
.collect::<Vec<_>>()
}
diff --git a/src/storage/src/proto/write_batch.rs b/src/storage/src/proto/write_batch.rs
deleted file mode 100644
index d710df9dc2a0..000000000000
--- a/src/storage/src/proto/write_batch.rs
+++ /dev/null
@@ -1,391 +0,0 @@
-// Copyright 2022 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#![allow(clippy::all)]
-tonic::include_proto!("greptime.storage.write_batch.v1");
-
-use std::sync::Arc;
-
-use common_base::BitVec;
-use common_error::prelude::*;
-use datatypes::data_type::ConcreteDataType;
-use datatypes::prelude::{ScalarVector, ScalarVectorBuilder};
-use datatypes::schema;
-use datatypes::types::TimestampType;
-use datatypes::vectors::{
- BinaryVector, BinaryVectorBuilder, BooleanVector, BooleanVectorBuilder, DateTimeVector,
- DateTimeVectorBuilder, DateVector, DateVectorBuilder, Float32Vector, Float32VectorBuilder,
- Float64Vector, Float64VectorBuilder, Int16Vector, Int16VectorBuilder, Int32Vector,
- Int32VectorBuilder, Int64Vector, Int64VectorBuilder, Int8Vector, Int8VectorBuilder,
- StringVector, StringVectorBuilder, TimestampMicrosecondVector,
- TimestampMicrosecondVectorBuilder, TimestampMillisecondVector,
- TimestampMillisecondVectorBuilder, TimestampNanosecondVector, TimestampNanosecondVectorBuilder,
- TimestampSecondVector, TimestampSecondVectorBuilder, UInt16Vector, UInt16VectorBuilder,
- UInt32Vector, UInt32VectorBuilder, UInt64Vector, UInt64VectorBuilder, UInt8Vector,
- UInt8VectorBuilder, Vector, VectorRef,
-};
-use paste::paste;
-use snafu::OptionExt;
-
-#[derive(Debug, Snafu)]
-pub enum Error {
- #[snafu(display("Failed to convert datafusion type: {}", from))]
- Conversion { from: String, backtrace: Backtrace },
-
- #[snafu(display("Empty column values read"))]
- EmptyColumnValues { backtrace: Backtrace },
-
- #[snafu(display("Invalid data type: {}", data_type))]
- InvalidDataType {
- data_type: i32,
- backtrace: Backtrace,
- },
-
- #[snafu(display("Failed to convert schema, source: {}", source))]
- ConvertSchema {
- #[snafu(backtrace)]
- source: datatypes::error::Error,
- },
-}
-
-pub type Result<T> = std::result::Result<T, Error>;
-
-impl TimestampIndex {
- pub fn new(value: u64) -> Self {
- Self { value }
- }
-}
-
-impl From<&schema::SchemaRef> for Schema {
- fn from(schema: &schema::SchemaRef) -> Self {
- let column_schemas = schema
- .column_schemas()
- .iter()
- .map(|column_schema| column_schema.into())
- .collect();
-
- Schema {
- column_schemas,
- timestamp_index: schema
- .timestamp_index()
- .map(|index| TimestampIndex::new(index as u64)),
- }
- }
-}
-
-impl TryFrom<Schema> for schema::SchemaRef {
- type Error = Error;
-
- fn try_from(schema: Schema) -> Result<Self> {
- let column_schemas = schema
- .column_schemas
- .iter()
- .map(schema::ColumnSchema::try_from)
- .collect::<Result<Vec<_>>>()?;
-
- let schema = Arc::new(
- schema::SchemaBuilder::try_from(column_schemas)
- .context(ConvertSchemaSnafu)?
- .build()
- .context(ConvertSchemaSnafu)?,
- );
-
- Ok(schema)
- }
-}
-
-impl From<&schema::ColumnSchema> for ColumnSchema {
- fn from(cs: &schema::ColumnSchema) -> Self {
- Self {
- name: cs.name.clone(),
- data_type: DataType::from(&cs.data_type).into(),
- is_nullable: cs.is_nullable(),
- is_time_index: cs.is_time_index(),
- }
- }
-}
-
-impl TryFrom<&ColumnSchema> for schema::ColumnSchema {
- type Error = Error;
-
- fn try_from(column_schema: &ColumnSchema) -> Result<Self> {
- if let Some(data_type) = DataType::from_i32(column_schema.data_type) {
- Ok(schema::ColumnSchema::new(
- column_schema.name.clone(),
- data_type.into(),
- column_schema.is_nullable,
- )
- .with_time_index(column_schema.is_time_index))
- } else {
- InvalidDataTypeSnafu {
- data_type: column_schema.data_type,
- }
- .fail()
- }
- }
-}
-
-impl From<&ConcreteDataType> for DataType {
- fn from(data_type: &ConcreteDataType) -> Self {
- match data_type {
- ConcreteDataType::Boolean(_) => DataType::Boolean,
- ConcreteDataType::Int8(_) => DataType::Int8,
- ConcreteDataType::Int16(_) => DataType::Int16,
- ConcreteDataType::Int32(_) => DataType::Int32,
- ConcreteDataType::Int64(_) => DataType::Int64,
- ConcreteDataType::UInt8(_) => DataType::Uint8,
- ConcreteDataType::UInt16(_) => DataType::Uint16,
- ConcreteDataType::UInt32(_) => DataType::Uint32,
- ConcreteDataType::UInt64(_) => DataType::Uint64,
- ConcreteDataType::Float32(_) => DataType::Float64,
- ConcreteDataType::Float64(_) => DataType::Float64,
- ConcreteDataType::String(_) => DataType::String,
- ConcreteDataType::Null(_) => DataType::Null,
- ConcreteDataType::Binary(_) => DataType::Binary,
- ConcreteDataType::Timestamp(unit) => match unit {
- TimestampType::Second(_) => DataType::TimestampSecond,
- TimestampType::Millisecond(_) => DataType::TimestampMillisecond,
- TimestampType::Microsecond(_) => DataType::TimestampMicrosecond,
- TimestampType::Nanosecond(_) => DataType::TimestampNanosecond,
- },
- ConcreteDataType::Date(_)
- | ConcreteDataType::DateTime(_)
- | ConcreteDataType::List(_) => {
- // TODO(jiachun): Maybe support some composite types in the future , such as list, struct, etc.
- unimplemented!("data type {:?} is not supported", data_type)
- }
- }
- }
-}
-
-impl From<DataType> for ConcreteDataType {
- fn from(data_type: DataType) -> Self {
- match data_type {
- DataType::Boolean => ConcreteDataType::boolean_datatype(),
- DataType::Int8 => ConcreteDataType::int8_datatype(),
- DataType::Int16 => ConcreteDataType::int16_datatype(),
- DataType::Int32 => ConcreteDataType::int32_datatype(),
- DataType::Int64 => ConcreteDataType::int64_datatype(),
- DataType::Uint8 => ConcreteDataType::uint8_datatype(),
- DataType::Uint16 => ConcreteDataType::uint16_datatype(),
- DataType::Uint32 => ConcreteDataType::uint32_datatype(),
- DataType::Uint64 => ConcreteDataType::uint64_datatype(),
- DataType::Float32 => ConcreteDataType::float32_datatype(),
- DataType::Float64 => ConcreteDataType::float64_datatype(),
- DataType::String => ConcreteDataType::string_datatype(),
- DataType::Binary => ConcreteDataType::binary_datatype(),
- DataType::Null => ConcreteDataType::null_datatype(),
- DataType::Date => ConcreteDataType::date_datatype(),
- DataType::Datetime => ConcreteDataType::datetime_datatype(),
- DataType::TimestampSecond => ConcreteDataType::timestamp_second_datatype(),
- DataType::TimestampMillisecond => ConcreteDataType::timestamp_millisecond_datatype(),
- DataType::TimestampMicrosecond => ConcreteDataType::timestamp_microsecond_datatype(),
- DataType::TimestampNanosecond => ConcreteDataType::timestamp_nanosecond_datatype(),
- }
- }
-}
-
-#[macro_export]
-macro_rules! gen_columns {
- ($key: tt, $vec_ty: ty, $vari: ident, $cast: expr) => {
- paste! {
- pub fn [<gen_columns_ $key>](vector: &VectorRef) -> Result<Column> {
- let mut column = Column::default();
- let mut values = Values::default();
- let vector_ref =
- vector
- .as_any()
- .downcast_ref::<$vec_ty>()
- .with_context(|| ConversionSnafu {
- from: std::format!("{:?}", vector.as_ref().data_type()),
- })?;
- let mut bits: Option<BitVec> = None;
-
- vector_ref
- .iter_data()
- .enumerate()
- .for_each(|(i, value)| match value {
- Some($vari) => values.[<$key _values>].push($cast),
- None => {
- if (bits.is_none()) {
- bits = Some(BitVec::repeat(false, vector_ref.len()));
- }
- bits.as_mut().map(|x| x.set(i, true));
- }
- });
-
- let null_mask = if let Some(bits) = bits {
- bits.into_vec()
- } else {
- Default::default()
- };
-
- column.values = Some(values);
- column.value_null_mask = null_mask;
- column.num_rows = vector_ref.len() as u64;
-
- Ok(column)
- }
- }
- };
-}
-
-gen_columns!(i8, Int8Vector, v, v as i32);
-gen_columns!(i16, Int16Vector, v, v as i32);
-gen_columns!(i32, Int32Vector, v, v as i32);
-gen_columns!(i64, Int64Vector, v, v as i64);
-gen_columns!(u8, UInt8Vector, v, v as u32);
-gen_columns!(u16, UInt16Vector, v, v as u32);
-gen_columns!(u32, UInt32Vector, v, v as u32);
-gen_columns!(u64, UInt64Vector, v, v as u64);
-gen_columns!(f32, Float32Vector, v, v);
-gen_columns!(f64, Float64Vector, v, v);
-gen_columns!(bool, BooleanVector, v, v);
-gen_columns!(binary, BinaryVector, v, v.to_vec());
-gen_columns!(string, StringVector, v, v.to_string());
-gen_columns!(date, DateVector, v, v.val());
-gen_columns!(datetime, DateTimeVector, v, v.val());
-gen_columns!(ts_second, TimestampSecondVector, v, v.into());
-gen_columns!(ts_millisecond, TimestampMillisecondVector, v, v.into());
-gen_columns!(ts_microsecond, TimestampMicrosecondVector, v, v.into());
-gen_columns!(ts_nanosecond, TimestampNanosecondVector, v, v.into());
-
-#[macro_export]
-macro_rules! gen_put_data {
- ($key: tt, $builder_type: ty, $vari: ident, $cast: expr) => {
- paste! {
- pub fn [<gen_put_data_ $key>](column: Column) -> Result<VectorRef> {
- let values = column.values.context(EmptyColumnValuesSnafu {})?;
- let mut vector_iter = values.[<$key _values>].iter();
- let num_rows = column.num_rows as usize;
- let mut builder = <$builder_type>::with_capacity(num_rows);
-
- if column.value_null_mask.is_empty() {
- (0..num_rows)
- .for_each(|_| builder.push(vector_iter.next().map(|$vari| $cast)));
- } else {
- BitVec::from_vec(column.value_null_mask)
- .into_iter()
- .take(num_rows)
- .for_each(|is_null| {
- if is_null {
- builder.push(None);
- } else {
- builder.push(vector_iter.next().map(|$vari| $cast));
- }
- });
- }
-
-
- Ok(Arc::new(builder.finish()))
- }
- }
- };
-}
-
-gen_put_data!(i8, Int8VectorBuilder, v, *v as i8);
-gen_put_data!(i16, Int16VectorBuilder, v, *v as i16);
-gen_put_data!(i32, Int32VectorBuilder, v, *v);
-gen_put_data!(i64, Int64VectorBuilder, v, *v);
-gen_put_data!(u8, UInt8VectorBuilder, v, *v as u8);
-gen_put_data!(u16, UInt16VectorBuilder, v, *v as u16);
-gen_put_data!(u32, UInt32VectorBuilder, v, *v as u32);
-gen_put_data!(u64, UInt64VectorBuilder, v, *v as u64);
-gen_put_data!(f32, Float32VectorBuilder, v, *v as f32);
-gen_put_data!(f64, Float64VectorBuilder, v, *v as f64);
-gen_put_data!(bool, BooleanVectorBuilder, v, *v);
-gen_put_data!(binary, BinaryVectorBuilder, v, v.as_slice());
-gen_put_data!(string, StringVectorBuilder, v, v.as_str());
-gen_put_data!(date, DateVectorBuilder, v, (*v).into());
-gen_put_data!(datetime, DateTimeVectorBuilder, v, (*v).into());
-gen_put_data!(ts_second, TimestampSecondVectorBuilder, v, (*v).into());
-gen_put_data!(
- ts_millisecond,
- TimestampMillisecondVectorBuilder,
- v,
- (*v).into()
-);
-gen_put_data!(
- ts_microsecond,
- TimestampMicrosecondVectorBuilder,
- v,
- (*v).into()
-);
-gen_put_data!(
- ts_nanosecond,
- TimestampNanosecondVectorBuilder,
- v,
- (*v).into()
-);
-
-pub fn gen_columns(vector: &VectorRef) -> Result<Column> {
- let data_type = vector.data_type();
- match data_type {
- ConcreteDataType::Boolean(_) => gen_columns_bool(vector),
- ConcreteDataType::Int8(_) => gen_columns_i8(vector),
- ConcreteDataType::Int16(_) => gen_columns_i16(vector),
- ConcreteDataType::Int32(_) => gen_columns_i32(vector),
- ConcreteDataType::Int64(_) => gen_columns_i64(vector),
- ConcreteDataType::UInt8(_) => gen_columns_u8(vector),
- ConcreteDataType::UInt16(_) => gen_columns_u16(vector),
- ConcreteDataType::UInt32(_) => gen_columns_u32(vector),
- ConcreteDataType::UInt64(_) => gen_columns_u64(vector),
- ConcreteDataType::Float32(_) => gen_columns_f32(vector),
- ConcreteDataType::Float64(_) => gen_columns_f64(vector),
- ConcreteDataType::Binary(_) => gen_columns_binary(vector),
- ConcreteDataType::String(_) => gen_columns_string(vector),
- ConcreteDataType::Date(_) => gen_columns_date(vector),
- ConcreteDataType::DateTime(_) => gen_columns_datetime(vector),
- ConcreteDataType::Timestamp(t) => match t {
- TimestampType::Second(_) => gen_columns_ts_second(vector),
- TimestampType::Millisecond(_) => gen_columns_ts_millisecond(vector),
- TimestampType::Microsecond(_) => gen_columns_ts_microsecond(vector),
- TimestampType::Nanosecond(_) => gen_columns_ts_nanosecond(vector),
- },
- ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
- // TODO(jiachun): Maybe support some composite types in the future, such as list, struct, etc.
- unimplemented!("data type {:?} is not supported", data_type)
- }
- }
-}
-
-pub fn gen_put_data_vector(data_type: ConcreteDataType, column: Column) -> Result<VectorRef> {
- match data_type {
- ConcreteDataType::Boolean(_) => gen_put_data_bool(column),
- ConcreteDataType::Int8(_) => gen_put_data_i8(column),
- ConcreteDataType::Int16(_) => gen_put_data_i16(column),
- ConcreteDataType::Int32(_) => gen_put_data_i32(column),
- ConcreteDataType::Int64(_) => gen_put_data_i64(column),
- ConcreteDataType::UInt8(_) => gen_put_data_u8(column),
- ConcreteDataType::UInt16(_) => gen_put_data_u16(column),
- ConcreteDataType::UInt32(_) => gen_put_data_u32(column),
- ConcreteDataType::UInt64(_) => gen_put_data_u64(column),
- ConcreteDataType::Float32(_) => gen_put_data_f32(column),
- ConcreteDataType::Float64(_) => gen_put_data_f64(column),
- ConcreteDataType::Binary(_) => gen_put_data_binary(column),
- ConcreteDataType::String(_) => gen_put_data_string(column),
- ConcreteDataType::Date(_) => gen_put_data_date(column),
- ConcreteDataType::DateTime(_) => gen_put_data_datetime(column),
- ConcreteDataType::Timestamp(t) => match t {
- TimestampType::Second(_) => gen_put_data_ts_second(column),
- TimestampType::Millisecond(_) => gen_put_data_ts_millisecond(column),
- TimestampType::Microsecond(_) => gen_put_data_ts_microsecond(column),
- TimestampType::Nanosecond(_) => gen_put_data_ts_nanosecond(column),
- },
- ConcreteDataType::Null(_) | ConcreteDataType::List(_) => {
- // TODO(jiachun): Maybe support some composite types in the future, such as list, struct, etc.
- unimplemented!("data type {:?} is not supported", data_type)
- }
- }
-}
diff --git a/src/storage/src/region/tests.rs b/src/storage/src/region/tests.rs
index 798eca19c10b..d14040ecdfae 100644
--- a/src/storage/src/region/tests.rs
+++ b/src/storage/src/region/tests.rs
@@ -19,17 +19,19 @@ mod basic;
mod flush;
mod projection;
+use std::collections::HashMap;
+
use common_telemetry::logging;
use datatypes::prelude::{ScalarVector, WrapperType};
use datatypes::timestamp::TimestampMillisecond;
use datatypes::type_id::LogicalTypeId;
-use datatypes::vectors::{Int64Vector, TimestampMillisecondVector};
+use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
use log_store::fs::log::LocalFileLogStore;
use log_store::fs::noop::NoopLogStore;
use object_store::backend::fs;
use object_store::ObjectStore;
use store_api::storage::{
- consts, Chunk, ChunkReader, PutOperation, ScanRequest, SequenceNumber, Snapshot, WriteRequest,
+ consts, Chunk, ChunkReader, ScanRequest, SequenceNumber, Snapshot, WriteRequest,
};
use tempdir::TempDir;
@@ -39,7 +41,6 @@ use crate::manifest::test_utils::*;
use crate::memtable::DefaultMemtableBuilder;
use crate::test_util::descriptor_util::RegionDescBuilder;
use crate::test_util::{self, config_util, schema_util, write_batch_util};
-use crate::write_batch::PutData;
/// Create metadata of a region with schema: (timestamp, v0).
pub fn new_metadata(region_name: &str, enable_version_column: bool) -> RegionMetadata {
@@ -156,17 +157,18 @@ fn new_write_batch_for_test(enable_version_column: bool) -> WriteBatch {
}
}
-fn new_put_data(data: &[(TimestampMillisecond, Option<i64>)]) -> PutData {
- let mut put_data = PutData::with_num_columns(2);
+fn new_put_data(data: &[(TimestampMillisecond, Option<i64>)]) -> HashMap<String, VectorRef> {
+ let mut put_data = HashMap::with_capacity(2);
let timestamps =
TimestampMillisecondVector::from_vec(data.iter().map(|v| v.0.into()).collect());
let values = Int64Vector::from_owned_iterator(data.iter().map(|kv| kv.1));
- put_data
- .add_key_column(test_util::TIMESTAMP_NAME, Arc::new(timestamps))
- .unwrap();
- put_data.add_value_column("v0", Arc::new(values)).unwrap();
+ put_data.insert(
+ test_util::TIMESTAMP_NAME.to_string(),
+ Arc::new(timestamps) as VectorRef,
+ );
+ put_data.insert("v0".to_string(), Arc::new(values) as VectorRef);
put_data
}
diff --git a/src/storage/src/region/tests/alter.rs b/src/storage/src/region/tests/alter.rs
index 4372e96c9506..238f2bd09471 100644
--- a/src/storage/src/region/tests/alter.rs
+++ b/src/storage/src/region/tests/alter.rs
@@ -12,17 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::BTreeMap;
+use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use datatypes::prelude::*;
use datatypes::timestamp::TimestampMillisecond;
-use datatypes::vectors::{Int64Vector, TimestampMillisecondVector};
+use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
use log_store::fs::log::LocalFileLogStore;
use store_api::storage::{
AddColumn, AlterOperation, AlterRequest, Chunk, ChunkReader, ColumnDescriptor,
- ColumnDescriptorBuilder, ColumnId, PutOperation, Region, RegionMeta, ScanRequest, SchemaRef,
- Snapshot, WriteRequest, WriteResponse,
+ ColumnDescriptorBuilder, ColumnId, Region, RegionMeta, ScanRequest, SchemaRef, Snapshot,
+ WriteRequest, WriteResponse,
};
use tempdir::TempDir;
@@ -31,7 +31,6 @@ use crate::region::{OpenOptions, RawRegionMetadata, RegionImpl, RegionMetadata};
use crate::test_util;
use crate::test_util::config_util;
use crate::test_util::descriptor_util::RegionDescBuilder;
-use crate::write_batch::PutData;
const REGION_NAME: &str = "region-alter-0";
@@ -69,8 +68,8 @@ impl DataRow {
}
}
-fn new_put_data(data: &[DataRow]) -> PutData {
- let mut put_data = PutData::with_num_columns(4);
+fn new_put_data(data: &[DataRow]) -> HashMap<String, VectorRef> {
+ let mut put_data = HashMap::with_capacity(4);
let keys = Int64Vector::from(data.iter().map(|v| v.key).collect::<Vec<_>>());
let timestamps = TimestampMillisecondVector::from(
data.iter()
@@ -80,13 +79,14 @@ fn new_put_data(data: &[DataRow]) -> PutData {
let values1 = Int64Vector::from(data.iter().map(|kv| kv.v0).collect::<Vec<_>>());
let values2 = Int64Vector::from(data.iter().map(|kv| kv.v1).collect::<Vec<_>>());
- put_data.add_key_column("k0", Arc::new(keys)).unwrap();
- put_data
- .add_key_column(test_util::TIMESTAMP_NAME, Arc::new(timestamps))
- .unwrap();
+ put_data.insert("k0".to_string(), Arc::new(keys) as VectorRef);
+ put_data.insert(
+ test_util::TIMESTAMP_NAME.to_string(),
+ Arc::new(timestamps) as VectorRef,
+ );
- put_data.add_value_column("v0", Arc::new(values1)).unwrap();
- put_data.add_value_column("v1", Arc::new(values2)).unwrap();
+ put_data.insert("v0".to_string(), Arc::new(values1) as VectorRef);
+ put_data.insert("v1".to_string(), Arc::new(values2) as VectorRef);
put_data
}
diff --git a/src/storage/src/region/tests/projection.rs b/src/storage/src/region/tests/projection.rs
index 98d0e5026adc..8a9536f5c2bb 100644
--- a/src/storage/src/region/tests/projection.rs
+++ b/src/storage/src/region/tests/projection.rs
@@ -12,23 +12,23 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
use std::sync::Arc;
use datatypes::data_type::ConcreteDataType;
use datatypes::prelude::ScalarVector;
use datatypes::type_id::LogicalTypeId;
-use datatypes::vectors::{Int64Vector, TimestampMillisecondVector};
+use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
use log_store::fs::log::LocalFileLogStore;
use store_api::logstore::LogStore;
use store_api::storage::{
- Chunk, ChunkReader, PutOperation, ReadContext, Region, ScanRequest, Snapshot, WriteContext,
- WriteRequest,
+ Chunk, ChunkReader, ReadContext, Region, ScanRequest, Snapshot, WriteContext, WriteRequest,
};
use tempdir::TempDir;
use crate::region::{RegionImpl, RegionMetadata};
use crate::test_util::{self, config_util, descriptor_util, write_batch_util};
-use crate::write_batch::{PutData, WriteBatch};
+use crate::write_batch::WriteBatch;
/// Create metadata with schema (k0, timestamp, v0, v1)
fn new_metadata(region_name: &str) -> RegionMetadata {
@@ -60,20 +60,31 @@ fn new_write_batch_for_test() -> WriteBatch {
/// v0: [initial_value, ...., initial_value]
/// v1: [initial_value, ..., initial_value + len - 1]
/// ```
-fn new_put_data(len: usize, key_start: i64, ts_start: i64, initial_value: i64) -> PutData {
- let mut put_data = PutData::with_num_columns(4);
-
- let k0 = Int64Vector::from_values((0..len).map(|v| key_start + v as i64));
- let ts = TimestampMillisecondVector::from_values((0..len).map(|v| ts_start + v as i64));
- let v0 = Int64Vector::from_values(std::iter::repeat(initial_value).take(len));
- let v1 = Int64Vector::from_values((0..len).map(|v| initial_value + v as i64));
-
- put_data.add_key_column("k0", Arc::new(k0)).unwrap();
- put_data
- .add_key_column(test_util::TIMESTAMP_NAME, Arc::new(ts))
- .unwrap();
- put_data.add_value_column("v0", Arc::new(v0)).unwrap();
- put_data.add_value_column("v1", Arc::new(v1)).unwrap();
+fn new_put_data(
+ len: usize,
+ key_start: i64,
+ ts_start: i64,
+ initial_value: i64,
+) -> HashMap<String, VectorRef> {
+ let mut put_data = HashMap::with_capacity(4);
+
+ let k0 = Arc::new(Int64Vector::from_values(
+ (0..len).map(|v| key_start + v as i64),
+ )) as VectorRef;
+ let ts = Arc::new(TimestampMillisecondVector::from_values(
+ (0..len).map(|v| ts_start + v as i64),
+ )) as VectorRef;
+ let v0 = Arc::new(Int64Vector::from_values(
+ std::iter::repeat(initial_value).take(len),
+ )) as VectorRef;
+ let v1 = Arc::new(Int64Vector::from_values(
+ (0..len).map(|v| initial_value + v as i64),
+ )) as VectorRef;
+
+ put_data.insert("k0".to_string(), k0);
+ put_data.insert(test_util::TIMESTAMP_NAME.to_string(), ts);
+ put_data.insert("v0".to_string(), v0);
+ put_data.insert("v1".to_string(), v1);
put_data
}
diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs
index c0a8cdc46493..f40f53fa325b 100644
--- a/src/storage/src/region/writer.rs
+++ b/src/storage/src/region/writer.rs
@@ -35,7 +35,7 @@ use crate::region::{RecoverdMetadata, RecoveredMetadataMap, RegionManifest, Shar
use crate::schema::compat::CompatWrite;
use crate::sst::AccessLayerRef;
use crate::version::{VersionControl, VersionControlRef, VersionEdit};
-use crate::wal::{Payload, Wal};
+use crate::wal::Wal;
use crate::write_batch::WriteBatch;
pub type RegionWriterRef = Arc<RegionWriter>;
@@ -216,8 +216,7 @@ impl RegionWriter {
version_control.set_committed_sequence(next_sequence);
let header = WalHeader::with_last_manifest_version(manifest_version);
- wal.write_to_wal(next_sequence, header, Payload::None)
- .await?;
+ wal.write_to_wal(next_sequence, header, None).await?;
Ok(())
}
@@ -311,16 +310,12 @@ impl WriterInner {
let wal_header = WalHeader::with_last_manifest_version(version.manifest_version());
writer_ctx
.wal
- .write_to_wal(
- next_sequence,
- wal_header,
- Payload::WriteBatchArrow(&request),
- )
+ .write_to_wal(next_sequence, wal_header, Some(request.payload()))
.await?;
// Insert batch into memtable.
let mut inserter = Inserter::new(next_sequence);
- inserter.insert_memtable(&request, version.mutable_memtable())?;
+ inserter.insert_memtable(request.payload(), version.mutable_memtable())?;
// Update committed_sequence to make current batch visible. The `&mut self` of WriterInner
// guarantees the writer is exclusive.
@@ -350,7 +345,7 @@ impl WriterInner {
// Read starts from the first entry after last flushed entry, so the start sequence
// should be flushed_sequence + 1.
let mut stream = writer_ctx.wal.read_from_wal(flushed_sequence + 1).await?;
- while let Some((req_sequence, _header, request)) = stream.try_next().await? {
+ while let Some((req_sequence, _header, payload)) = stream.try_next().await? {
while let Some((sequence_before_alter, _)) = next_apply_metadata {
// There might be multiple metadata changes to be applied, so a loop is necessary.
if req_sequence > sequence_before_alter {
@@ -370,7 +365,7 @@ impl WriterInner {
}
}
- if let Some(request) = request {
+ if let Some(payload) = payload {
num_requests += 1;
// Note that memtables of `Version` may be updated during replay.
let version = version_control.current();
@@ -398,7 +393,7 @@ impl WriterInner {
// TODO(yingwen): Trigger flush if the size of memtables reach the flush threshold to avoid
// out of memory during replay, but we need to do it carefully to avoid dead lock.
let mut inserter = Inserter::new(last_sequence);
- inserter.insert_memtable(&request, version.mutable_memtable())?;
+ inserter.insert_memtable(&payload, version.mutable_memtable())?;
}
}
diff --git a/src/storage/src/schema/region.rs b/src/storage/src/schema/region.rs
index b6c0ef2a4e96..3b1d51203514 100644
--- a/src/storage/src/schema/region.rs
+++ b/src/storage/src/schema/region.rs
@@ -121,6 +121,11 @@ impl RegionSchema {
self.store_schema.row_key_indices()
}
+ #[inline]
+ pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
+ self.store_schema.value_indices()
+ }
+
#[inline]
pub(crate) fn column_metadata(&self, idx: usize) -> &ColumnMetadata {
self.columns.column_metadata(idx)
diff --git a/src/storage/src/schema/store.rs b/src/storage/src/schema/store.rs
index e20a5c177059..6dc86ef9104d 100644
--- a/src/storage/src/schema/store.rs
+++ b/src/storage/src/schema/store.rs
@@ -151,6 +151,11 @@ impl StoreSchema {
0..self.row_key_end
}
+ #[inline]
+ pub(crate) fn value_indices(&self) -> impl Iterator<Item = usize> {
+ self.row_key_end..self.user_column_end
+ }
+
#[inline]
pub(crate) fn column_name(&self, idx: usize) -> &str {
&self.schema.column_schemas()[idx].name
@@ -288,6 +293,8 @@ mod tests {
assert_eq!(4, store_schema.op_type_index());
let row_key_indices: Vec<_> = store_schema.row_key_indices().collect();
assert_eq!([0, 1], &row_key_indices[..]);
+ let value_indices: Vec<_> = store_schema.value_indices().collect();
+ assert_eq!([2], &value_indices[..]);
// Test batch and chunk conversion.
let batch = tests::new_batch();
diff --git a/src/storage/src/wal.rs b/src/storage/src/wal.rs
index 10d3882cc858..575bd2380820 100644
--- a/src/storage/src/wal.rs
+++ b/src/storage/src/wal.rs
@@ -25,12 +25,9 @@ use store_api::storage::{RegionId, SequenceNumber};
use crate::codec::{Decoder, Encoder};
use crate::error::{self, Error, MarkWalStableSnafu, Result};
-use crate::proto::wal::{self, PayloadType, WalHeader};
-use crate::write_batch::codec::{
- WriteBatchArrowDecoder, WriteBatchArrowEncoder, WriteBatchProtobufDecoder,
- WriteBatchProtobufEncoder,
-};
-use crate::write_batch::WriteBatch;
+use crate::proto::wal::{self, WalHeader};
+use crate::write_batch::codec::{PayloadDecoder, PayloadEncoder};
+use crate::write_batch::Payload;
#[derive(Debug)]
pub struct Wal<S: LogStore> {
@@ -39,9 +36,8 @@ pub struct Wal<S: LogStore> {
store: Arc<S>,
}
-pub type WriteBatchStream<'a> = Pin<
- Box<dyn Stream<Item = Result<(SequenceNumber, WalHeader, Option<WriteBatch>)>> + Send + 'a>,
->;
+pub type PayloadStream<'a> =
+ Pin<Box<dyn Stream<Item = Result<(SequenceNumber, WalHeader, Option<Payload>)>> + Send + 'a>>;
// Wal should be cheap to clone, so avoid holding things like String, Vec.
impl<S: LogStore> Clone for Wal<S> {
@@ -85,12 +81,12 @@ impl<S: LogStore> Wal<S> {
///
/// ```text
/// | |
- /// |--------------------------> Header Len <-----------------------------| Arrow/Protobuf/... encoded
+ /// |--------------------------> Header Len <-----------------------------| Arrow IPC format
/// | |
/// v v
/// +---------------------+----------------------------------------------------+--------------+-------------+--------------+
/// | | Header | | | |
- /// | Header Len(varint) | (last_manifest_version + mutation_types + ...) | Data Chunk0 | Data Chunk1 | ... |
+ /// | Header Len(varint) | (last_manifest_version + mutation_types + ...) | Payload 0 | Payload 1 | ... |
/// | | | | | |
/// +---------------------+----------------------------------------------------+--------------+-------------+--------------+
/// ```
@@ -99,35 +95,24 @@ impl<S: LogStore> Wal<S> {
&self,
seq: SequenceNumber,
mut header: WalHeader,
- payload: Payload<'_>,
+ payload: Option<&Payload>,
) -> Result<(u64, usize)> {
- header.payload_type = payload.payload_type();
- if let Payload::WriteBatchArrow(batch) = payload {
- header.mutation_types = wal::gen_mutation_types(batch);
+ if let Some(p) = payload {
+ header.mutation_types = wal::gen_mutation_types(p);
}
let mut buf = vec![];
- // header
+ // Encode header
let wal_header_encoder = WalHeaderEncoder {};
wal_header_encoder.encode(&header, &mut buf)?;
- if let Payload::WriteBatchArrow(batch) = payload {
- // entry
- let encoder = WriteBatchArrowEncoder::new();
+ // Encode payload
+ if let Some(p) = payload {
+ let encoder = PayloadEncoder::new();
// TODO(jiachun): provide some way to compute data size before encode, so we can preallocate an exactly sized buf.
encoder
- .encode(batch, &mut buf)
- .map_err(BoxedError::new)
- .context(error::WriteWalSnafu {
- region_id: self.region_id(),
- })?;
- } else if let Payload::WriteBatchProto(batch) = payload {
- // entry
- let encoder = WriteBatchProtobufEncoder {};
- // TODO(jiachun): provide some way to compute data size before encode, so we can preallocate an exactly sized buf.
- encoder
- .encode(batch, &mut buf)
+ .encode(p, &mut buf)
.map_err(BoxedError::new)
.context(error::WriteWalSnafu {
region_id: self.region_id(),
@@ -138,7 +123,7 @@ impl<S: LogStore> Wal<S> {
self.write(seq, &buf).await
}
- pub async fn read_from_wal(&self, start_seq: SequenceNumber) -> Result<WriteBatchStream<'_>> {
+ pub async fn read_from_wal(&self, start_seq: SequenceNumber) -> Result<PayloadStream<'_>> {
let stream = self
.store
.read(&self.namespace, start_seq)
@@ -180,12 +165,12 @@ impl<S: LogStore> Wal<S> {
fn decode_entry<E: Entry>(
&self,
entry: E,
- ) -> Result<(SequenceNumber, WalHeader, Option<WriteBatch>)> {
+ ) -> Result<(SequenceNumber, WalHeader, Option<Payload>)> {
let seq_num = entry.id();
let input = entry.data();
let wal_header_decoder = WalHeaderDecoder {};
- let (data_pos, mut header) = wal_header_decoder.decode(input)?;
+ let (data_pos, header) = wal_header_decoder.decode(input)?;
ensure!(
data_pos <= input.len(),
@@ -199,55 +184,19 @@ impl<S: LogStore> Wal<S> {
}
);
- match PayloadType::from_i32(header.payload_type) {
- Some(PayloadType::None) => Ok((seq_num, header, None)),
- Some(PayloadType::WriteBatchArrow) => {
- let mutation_types = std::mem::take(&mut header.mutation_types);
- let decoder = WriteBatchArrowDecoder::new(mutation_types);
- let write_batch = decoder
- .decode(&input[data_pos..])
- .map_err(BoxedError::new)
- .context(error::ReadWalSnafu {
- region_id: self.region_id(),
- })?;
-
- Ok((seq_num, header, Some(write_batch)))
- }
- Some(PayloadType::WriteBatchProto) => {
- let mutation_types = std::mem::take(&mut header.mutation_types);
- let decoder = WriteBatchProtobufDecoder::new(mutation_types);
- let write_batch = decoder
- .decode(&input[data_pos..])
- .map_err(BoxedError::new)
- .context(error::ReadWalSnafu {
- region_id: self.region_id(),
- })?;
-
- Ok((seq_num, header, Some(write_batch)))
- }
- _ => error::WalDataCorruptedSnafu {
- region_id: self.region_id(),
- message: format!("invalid payload type={}", header.payload_type),
- }
- .fail(),
+ if header.mutation_types.is_empty() {
+ return Ok((seq_num, header, None));
}
- }
-}
-pub enum Payload<'a> {
- None, // only header
- WriteBatchArrow(&'a WriteBatch),
- #[allow(dead_code)]
- WriteBatchProto(&'a WriteBatch),
-}
+ let decoder = PayloadDecoder::new(&header.mutation_types);
+ let payload = decoder
+ .decode(&input[data_pos..])
+ .map_err(BoxedError::new)
+ .context(error::ReadWalSnafu {
+ region_id: self.region_id(),
+ })?;
-impl<'a> Payload<'a> {
- pub fn payload_type(&self) -> i32 {
- match self {
- Payload::None => PayloadType::None.into(),
- Payload::WriteBatchArrow(_) => PayloadType::WriteBatchArrow.into(),
- Payload::WriteBatchProto(_) => PayloadType::WriteBatchProto.into(),
- }
+ Ok((seq_num, header, Some(payload)))
}
}
@@ -314,7 +263,7 @@ mod tests {
test_util::log_store_util::create_tmp_local_file_log_store("wal_test").await;
let wal = Wal::new(0, Arc::new(log_store));
let header = WalHeader::with_last_manifest_version(111);
- let (seq_num, _) = wal.write_to_wal(3, header, Payload::None).await?;
+ let (seq_num, _) = wal.write_to_wal(3, header, None).await?;
assert_eq!(3, seq_num);
@@ -335,7 +284,6 @@ mod tests {
#[test]
pub fn test_wal_header_codec() {
let wal_header = WalHeader {
- payload_type: 1,
last_manifest_version: 99999999,
mutation_types: vec![],
};
diff --git a/src/storage/src/write_batch.rs b/src/storage/src/write_batch.rs
index eaaeef6fe721..49eba13101e2 100644
--- a/src/storage/src/write_batch.rs
+++ b/src/storage/src/write_batch.rs
@@ -15,420 +15,162 @@
pub mod codec;
mod compat;
-use std::any::Any;
-use std::collections::{BTreeSet, HashMap};
-use std::slice;
-use std::time::Duration;
-
-use common_error::prelude::*;
-use common_time::timestamp_millis::BucketAligned;
-use common_time::RangeMillis;
-use datatypes::arrow::error::ArrowError;
-use datatypes::data_type::ConcreteDataType;
-use datatypes::prelude::{ScalarVector, Value};
+use std::collections::HashMap;
+
+use common_recordbatch::RecordBatch;
use datatypes::schema::{ColumnSchema, SchemaRef};
-use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
-use prost::{DecodeError, EncodeError};
+use datatypes::vectors::VectorRef;
use snafu::{ensure, OptionExt, ResultExt};
-use store_api::storage::{consts, PutOperation, WriteRequest};
-
-use crate::proto;
-
-#[derive(Debug, Snafu)]
-pub enum Error {
- #[snafu(display("Duplicate column {} in same request", name))]
- DuplicateColumn { name: String, backtrace: Backtrace },
-
- #[snafu(display("Missing column {} in request", name))]
- MissingColumn { name: String, backtrace: Backtrace },
-
- #[snafu(display(
- "Type of column {} does not match type in schema, expect {:?}, given {:?}",
- name,
- expect,
- given
- ))]
- TypeMismatch {
- name: String,
- expect: ConcreteDataType,
- given: ConcreteDataType,
- backtrace: Backtrace,
- },
-
- #[snafu(display("Column {} is not null but input has null", name))]
- HasNull { name: String, backtrace: Backtrace },
-
- #[snafu(display("Unknown column {}", name))]
- UnknownColumn { name: String, backtrace: Backtrace },
-
- #[snafu(display(
- "Length of column {} not equals to other columns, expect {}, given {}",
- name,
- expect,
- given
- ))]
- LenNotEquals {
- name: String,
- expect: usize,
- given: usize,
- backtrace: Backtrace,
- },
-
- #[snafu(display(
- "Request is too large, max is {}, current is {}",
- MAX_BATCH_SIZE,
- num_rows
- ))]
- RequestTooLarge {
- num_rows: usize,
- backtrace: Backtrace,
- },
-
- #[snafu(display("Cannot align timestamp: {}", ts))]
- TimestampOverflow { ts: i64 },
-
- #[snafu(display("Failed to encode, source: {}", source))]
- EncodeArrow {
- backtrace: Backtrace,
- source: ArrowError,
- },
-
- #[snafu(display("Failed to decode, source: {}", source))]
- DecodeArrow {
- backtrace: Backtrace,
- source: ArrowError,
- },
-
- #[snafu(display("Failed to encode into protobuf, source: {}", source))]
- EncodeProtobuf {
- backtrace: Backtrace,
- source: EncodeError,
- },
-
- #[snafu(display("Failed to decode from protobuf, source: {}", source))]
- DecodeProtobuf {
- backtrace: Backtrace,
- source: DecodeError,
- },
-
- #[snafu(display("Failed to parse schema, source: {}", source))]
- ParseSchema {
- backtrace: Backtrace,
- source: datatypes::error::Error,
- },
-
- #[snafu(display("Failed to decode, corrupted data {}", message))]
- DataCorrupted {
- message: String,
- backtrace: Backtrace,
- },
-
- #[snafu(display("Failed to decode vector, source {}", source))]
- DecodeVector {
- backtrace: Backtrace,
- source: datatypes::error::Error,
- },
-
- #[snafu(display("Failed to convert into protobuf struct, source {}", source))]
- ToProtobuf {
- source: proto::write_batch::Error,
- backtrace: Backtrace,
- },
-
- #[snafu(display("Failed to convert from protobuf struct, source {}", source))]
- FromProtobuf {
- source: proto::write_batch::Error,
- backtrace: Backtrace,
- },
-
- #[snafu(display(
- "Failed to create default value for column {}, source: {}",
- name,
- source
- ))]
- CreateDefault {
- name: String,
- #[snafu(backtrace)]
- source: datatypes::error::Error,
- },
-}
+use store_api::storage::{OpType, WriteRequest};
-pub type Result<T> = std::result::Result<T, Error>;
+use crate::error::{
+ BatchMissingColumnSnafu, CreateDefaultSnafu, CreateRecordBatchSnafu, Error, HasNullSnafu,
+ LenNotEqualsSnafu, RequestTooLargeSnafu, Result, TypeMismatchSnafu, UnknownColumnSnafu,
+};
/// Max number of updates of a write batch.
-const MAX_BATCH_SIZE: usize = 1_000_000;
+pub(crate) const MAX_BATCH_SIZE: usize = 1_000_000;
+
+/// Data of [WriteBatch].
+///
+/// We serialize this struct to the WAL instead of the whole `WriteBatch` to avoid
+/// storing unnecessary information.
+#[derive(Debug, PartialEq)]
+pub struct Payload {
+ /// Schema of the payload.
+ ///
+ /// This schema doesn't contain internal columns.
+ pub schema: SchemaRef,
+ pub mutations: Vec<Mutation>,
+}
-impl ErrorExt for Error {
- fn status_code(&self) -> StatusCode {
- StatusCode::InvalidArguments
+impl Payload {
+ /// Creates a new payload with given `schema`.
+ fn new(schema: SchemaRef) -> Payload {
+ Payload {
+ schema,
+ mutations: Vec::new(),
+ }
}
- fn backtrace_opt(&self) -> Option<&Backtrace> {
- ErrorCompat::backtrace(self)
+ /// Returns true if there is no mutation in the payload.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.mutations.is_empty()
}
+}
- fn as_any(&self) -> &dyn Any {
- self
- }
+/// A write operation to the region.
+#[derive(Debug, PartialEq)]
+pub struct Mutation {
+ /// Type of the mutation.
+ pub op_type: OpType,
+ /// Data of the mutation.
+ pub record_batch: RecordBatch,
}
/// Implementation of [WriteRequest].
+#[derive(Debug)]
pub struct WriteBatch {
- schema: SchemaRef,
- mutations: Vec<Mutation>,
- num_rows: usize,
+ payload: Payload,
+ /// Number of rows this batch need to mutate (put, delete, etc).
+ ///
+ /// We use it to check whether this batch is too large.
+ num_rows_to_mutate: usize,
}
impl WriteRequest for WriteBatch {
type Error = Error;
- type PutOp = PutData;
- fn put(&mut self, mut data: PutData) -> Result<()> {
+ fn put(&mut self, data: HashMap<String, VectorRef>) -> Result<()> {
+ let data = NameToVector::new(data)?;
if data.is_empty() {
return Ok(());
}
- self.preprocess_put_data(&mut data)?;
+ let record_batch = self.process_put_data(data)?;
- self.add_num_rows(data.num_rows())?;
- self.mutations.push(Mutation::Put(data));
+ self.add_num_rows_to_mutate(record_batch.num_rows())?;
+ self.payload.mutations.push(Mutation {
+ op_type: OpType::Put,
+ record_batch,
+ });
Ok(())
}
-
- /// Aligns timestamps in write batch specified by schema to durations.
- ///
- /// A negative timestamp means "before Unix epoch".
- /// Valid timestamp range is `[i64::MIN + duration, i64::MAX-(i64::MAX%duration))`.
- fn time_ranges(&self, duration: Duration) -> Result<Vec<RangeMillis>> {
- let ts_col_name = match self.schema.timestamp_column() {
- None => {
- // write batch does not have a timestamp column
- return Ok(Vec::new());
- }
- Some(ts_col) => &ts_col.name,
- };
- let durations_millis = duration.as_millis() as i64;
- let mut aligned_timestamps: BTreeSet<i64> = BTreeSet::new();
- for m in &self.mutations {
- match m {
- Mutation::Put(put_data) => {
- let column = put_data
- .column_by_name(ts_col_name)
- .unwrap_or_else(|| panic!("Cannot find column by name: {ts_col_name}"));
- if column.is_const() {
- let ts = match column.get(0) {
- Value::Timestamp(ts) => ts,
- _ => unreachable!(),
- };
- let aligned = align_timestamp(ts.value(), durations_millis)
- .context(TimestampOverflowSnafu { ts: ts.value() })?;
-
- aligned_timestamps.insert(aligned);
- } else {
- match column.data_type() {
- ConcreteDataType::Timestamp(_) => {
- let ts_vector = column
- .as_any()
- .downcast_ref::<TimestampMillisecondVector>()
- .unwrap();
- for ts in ts_vector.iter_data().flatten() {
- let aligned = align_timestamp(ts.into(), durations_millis)
- .context(TimestampOverflowSnafu { ts: i64::from(ts) })?;
- aligned_timestamps.insert(aligned);
- }
- }
- ConcreteDataType::Int64(_) => {
- let ts_vector =
- column.as_any().downcast_ref::<Int64Vector>().unwrap();
- for ts in ts_vector.iter_data().flatten() {
- let aligned = align_timestamp(ts, durations_millis)
- .context(TimestampOverflowSnafu { ts })?;
- aligned_timestamps.insert(aligned);
- }
- }
- _ => unreachable!(),
- }
- }
- }
- }
- }
-
- let ranges = aligned_timestamps
- .iter()
- .map(|t| RangeMillis::new(*t, *t + durations_millis).unwrap())
- .collect::<Vec<_>>();
-
- Ok(ranges)
- }
-
- fn put_op(&self) -> Self::PutOp {
- PutData::new()
- }
-
- fn put_op_with_columns(num_columns: usize) -> Self::PutOp {
- PutData::with_num_columns(num_columns)
- }
-}
-
-/// Aligns timestamp to nearest time interval.
-/// Negative ts means a timestamp before Unix epoch.
-/// If arithmetic overflows, this function returns None.
-/// So timestamp within `[i64::MIN, i64::MIN + duration)` or
-/// `[i64::MAX-(i64::MAX%duration), i64::MAX]` is not a valid input.
-fn align_timestamp(ts: i64, duration: i64) -> Option<i64> {
- let aligned = ts.align_by_bucket(duration)?.as_i64();
- // Also ensure end timestamp won't overflow.
- aligned.checked_add(duration)?;
- Some(aligned)
}
// WriteBatch pub methods.
impl WriteBatch {
pub fn new(schema: SchemaRef) -> Self {
Self {
- schema,
- mutations: Vec::new(),
- num_rows: 0,
+ payload: Payload::new(schema),
+ num_rows_to_mutate: 0,
}
}
+ #[inline]
pub fn schema(&self) -> &SchemaRef {
- &self.schema
- }
-
- pub fn iter(&self) -> slice::Iter<'_, Mutation> {
- self.mutations.iter()
+ &self.payload.schema
}
- pub fn is_empty(&self) -> bool {
- self.mutations.is_empty()
+ #[inline]
+ pub fn payload(&self) -> &Payload {
+ &self.payload
}
}
-/// Enum to wrap different operations.
-pub enum Mutation {
- Put(PutData),
-}
-
-#[derive(Default, Debug)]
-pub struct PutData {
- columns: HashMap<String, VectorRef>,
-}
-
-impl PutData {
- pub(crate) fn new() -> PutData {
- PutData::default()
- }
+impl WriteBatch {
+ /// Validates `data` and converts it into a [RecordBatch].
+ ///
+ /// It fills missing columns by schema's default values.
+ fn process_put_data(&self, data: NameToVector) -> Result<RecordBatch> {
+ let num_rows = data.num_rows();
+ let mut columns = Vec::with_capacity(self.schema().num_columns());
- pub(crate) fn with_num_columns(num_columns: usize) -> PutData {
- PutData {
- columns: HashMap::with_capacity(num_columns),
+ for column_schema in self.schema().column_schemas() {
+ match data.0.get(&column_schema.name) {
+ Some(col) => {
+ validate_column(column_schema, col)?;
+ columns.push(col.clone());
+ }
+ None => {
+ // If column is not provided, fills it by default value.
+ let col = new_column_with_default_value(column_schema, num_rows)?;
+ columns.push(col);
+ }
+ }
}
- }
- fn add_column_by_name(&mut self, name: &str, vector: VectorRef) -> Result<()> {
- ensure!(
- !self.columns.contains_key(name),
- DuplicateColumnSnafu { name }
- );
-
- if let Some(col) = self.columns.values().next() {
+ // Check all columns in data also exists in schema, which means we
+ // are not inserting unknown columns.
+ for name in data.0.keys() {
ensure!(
- col.len() == vector.len(),
- LenNotEqualsSnafu {
- name,
- expect: col.len(),
- given: vector.len(),
- }
+ self.schema().contains_column(name),
+ UnknownColumnSnafu { name }
);
}
- self.columns.insert(name.to_string(), vector);
-
- Ok(())
- }
-
- /// Add columns by its default value.
- fn add_default_by_name(&mut self, column_schema: &ColumnSchema) -> Result<()> {
- let num_rows = self.num_rows();
-
- // If column is not provided, fills it by default value.
- let vector = column_schema
- .create_default_vector(num_rows)
- .context(CreateDefaultSnafu {
- name: &column_schema.name,
- })?
- .context(MissingColumnSnafu {
- name: &column_schema.name,
- })?;
-
- validate_column(column_schema, &vector)?;
-
- self.add_column_by_name(&column_schema.name, vector)
+ RecordBatch::new(self.schema().clone(), columns).context(CreateRecordBatchSnafu)
}
-}
-impl PutOperation for PutData {
- type Error = Error;
-
- fn add_key_column(&mut self, name: &str, vector: VectorRef) -> Result<()> {
- self.add_column_by_name(name, vector)
- }
-
- fn add_version_column(&mut self, vector: VectorRef) -> Result<()> {
- self.add_column_by_name(consts::VERSION_COLUMN_NAME, vector)
- }
-
- fn add_value_column(&mut self, name: &str, vector: VectorRef) -> Result<()> {
- self.add_column_by_name(name, vector)
+ fn add_num_rows_to_mutate(&mut self, len: usize) -> Result<()> {
+ let num_rows = self.num_rows_to_mutate + len;
+ ensure!(
+ num_rows <= MAX_BATCH_SIZE,
+ RequestTooLargeSnafu { num_rows }
+ );
+ self.num_rows_to_mutate = num_rows;
+ Ok(())
}
}
-// PutData pub methods.
-impl PutData {
- pub fn column_by_name(&self, name: &str) -> Option<&VectorRef> {
- self.columns.get(name)
- }
-
- /// Returns number of columns in data.
- pub fn num_columns(&self) -> usize {
- self.columns.len()
- }
-
- /// Returns number of rows in data.
- pub fn num_rows(&self) -> usize {
- self.columns
- .values()
- .next()
- .map(|col| col.len())
- .unwrap_or(0)
- }
-
- /// Returns true if no rows in data.
- ///
- /// `PutData` with empty column will also be considered as empty.
- pub fn is_empty(&self) -> bool {
- self.num_rows() == 0
- }
-
- /// Returns slice of [PutData] in range `[start, end)`.
- ///
- /// # Panics
- /// Panics if `start > end`.
- pub fn slice(&self, start: usize, end: usize) -> PutData {
- assert!(start <= end);
-
- let columns = self
- .columns
- .iter()
- .map(|(k, v)| (k.clone(), v.slice(start, end - start)))
- .collect();
-
- PutData { columns }
- }
+/// Returns the length of the first vector in `data`.
+fn first_vector_len(data: &HashMap<String, VectorRef>) -> usize {
+ data.values().next().map(|col| col.len()).unwrap_or(0)
}
+/// Checks whether `col` matches given `column_schema`.
fn validate_column(column_schema: &ColumnSchema, col: &VectorRef) -> Result<()> {
if !col.data_type().is_null() {
// This allow us to use NullVector for columns that only have null value.
@@ -454,55 +196,63 @@ fn validate_column(column_schema: &ColumnSchema, col: &VectorRef) -> Result<()>
Ok(())
}
-impl WriteBatch {
- /// Validate [PutData] and fill missing columns by default value.
- fn preprocess_put_data(&self, data: &mut PutData) -> Result<()> {
- for column_schema in self.schema.column_schemas() {
- match data.column_by_name(&column_schema.name) {
- Some(col) => {
- validate_column(column_schema, col)?;
- }
- None => {
- // If column is not provided, fills it by default value.
- data.add_default_by_name(column_schema)?;
- }
- }
- }
+/// Creates a new column and fills it by default value.
+///
+/// `num_rows` MUST be greater than 0. This function will also validate the schema.
+pub(crate) fn new_column_with_default_value(
+ column_schema: &ColumnSchema,
+ num_rows: usize,
+) -> Result<VectorRef> {
+ // If column is not provided, fills it by default value.
+ let vector = column_schema
+ .create_default_vector(num_rows)
+ .context(CreateDefaultSnafu {
+ name: &column_schema.name,
+ })?
+ .context(BatchMissingColumnSnafu {
+ column: &column_schema.name,
+ })?;
+
+ validate_column(column_schema, &vector)?;
+
+ Ok(vector)
+}
- // Check all columns in data also exists in schema.
- for name in data.columns.keys() {
+/// Vectors in [NameToVector] have same length.
+///
+/// MUST construct it via [`NameToVector::new()`] to ensure the vector lengths are validated.
+struct NameToVector(HashMap<String, VectorRef>);
+
+impl NameToVector {
+ fn new(data: HashMap<String, VectorRef>) -> Result<NameToVector> {
+ let num_rows = first_vector_len(&data);
+ for (name, vector) in &data {
ensure!(
- self.schema.column_schema_by_name(name).is_some(),
- UnknownColumnSnafu { name }
+ num_rows == vector.len(),
+ LenNotEqualsSnafu {
+ name,
+ expect: num_rows,
+ given: vector.len(),
+ }
);
}
- Ok(())
+ Ok(NameToVector(data))
}
- fn add_num_rows(&mut self, len: usize) -> Result<()> {
- let num_rows = self.num_rows + len;
- ensure!(
- num_rows <= MAX_BATCH_SIZE,
- RequestTooLargeSnafu { num_rows }
- );
- self.num_rows = num_rows;
- Ok(())
+ fn num_rows(&self) -> usize {
+ first_vector_len(&self.0)
}
-}
-
-impl<'a> IntoIterator for &'a WriteBatch {
- type Item = &'a Mutation;
- type IntoIter = slice::Iter<'a, Mutation>;
- fn into_iter(self) -> slice::Iter<'a, Mutation> {
- self.iter()
+ fn is_empty(&self) -> bool {
+ self.num_rows() == 0
}
}
#[cfg(test)]
pub(crate) fn new_test_batch() -> WriteBatch {
use datatypes::type_id::LogicalTypeId;
+ use store_api::storage::consts;
use crate::test_util::write_batch_util;
@@ -522,251 +272,165 @@ mod tests {
use std::iter;
use std::sync::Arc;
+ use common_error::prelude::*;
+ use datatypes::prelude::ScalarVector;
use datatypes::type_id::LogicalTypeId;
use datatypes::vectors::{
- BooleanVector, ConstantVector, Int32Vector, Int64Vector, TimestampMillisecondVector,
- UInt64Vector,
+ BooleanVector, Int32Vector, Int64Vector, TimestampMillisecondVector, UInt64Vector,
};
+ use store_api::storage::consts;
use super::*;
use crate::test_util::write_batch_util;
#[test]
- fn test_put_data_basic() {
- let mut put_data = PutData::new();
- assert!(put_data.is_empty());
-
- let vector1 = Arc::new(Int32Vector::from_slice(&[1, 2, 3, 4, 5]));
- let vector2 = Arc::new(UInt64Vector::from_slice(&[0, 2, 4, 6, 8]));
-
- put_data.add_key_column("k1", vector1.clone()).unwrap();
- put_data.add_version_column(vector2).unwrap();
- put_data.add_value_column("v1", vector1).unwrap();
-
- assert_eq!(5, put_data.num_rows());
- assert!(!put_data.is_empty());
-
- assert!(put_data.column_by_name("no such column").is_none());
- assert!(put_data.column_by_name("k1").is_some());
- assert!(put_data.column_by_name("v1").is_some());
- assert!(put_data
- .column_by_name(consts::VERSION_COLUMN_NAME)
- .is_some());
+ fn test_name_to_vector_basic() {
+ let columns = NameToVector::new(HashMap::new()).unwrap();
+ assert!(columns.is_empty());
+
+ let vector1 = Arc::new(Int32Vector::from_slice(&[1, 2, 3, 4, 5])) as VectorRef;
+ let vector2 = Arc::new(UInt64Vector::from_slice(&[0, 2, 4, 6, 8])) as VectorRef;
+
+ let mut put_data = HashMap::with_capacity(3);
+ put_data.insert("k1".to_string(), vector1.clone());
+ put_data.insert(consts::VERSION_COLUMN_NAME.to_string(), vector2);
+ put_data.insert("v1".to_string(), vector1);
+
+ let columns = NameToVector::new(put_data).unwrap();
+ assert_eq!(5, columns.num_rows());
+ assert!(!columns.is_empty());
}
#[test]
- fn test_put_data_empty_vector() {
- let mut put_data = PutData::with_num_columns(1);
- assert!(put_data.is_empty());
-
- let vector1 = Arc::new(Int32Vector::from_slice(&[]));
- put_data.add_key_column("k1", vector1).unwrap();
+ fn test_name_to_vector_empty_vector() {
+ let vector1 = Arc::new(Int32Vector::from_slice(&[])) as VectorRef;
+ let mut put_data = HashMap::new();
+ put_data.insert("k1".to_string(), vector1);
- assert_eq!(0, put_data.num_rows());
- assert!(put_data.is_empty());
+ let columns = NameToVector::new(put_data).unwrap();
+ assert_eq!(0, columns.num_rows());
+ assert!(columns.is_empty());
}
#[test]
fn test_write_batch_put() {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
- let boolv = Arc::new(BooleanVector::from(vec![true, false, true]));
- let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![0, 0, 0]));
+ let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3])) as VectorRef;
+ let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
+ let tsv = Arc::new(TimestampMillisecondVector::from_slice(&[0, 0, 0])) as VectorRef;
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- put_data.add_version_column(intv).unwrap();
- put_data.add_value_column("v1", boolv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
+ let mut put_data = HashMap::with_capacity(4);
+ put_data.insert("k1".to_string(), intv.clone());
+ put_data.insert(consts::VERSION_COLUMN_NAME.to_string(), intv);
+ put_data.insert("v1".to_string(), boolv);
+ put_data.insert("ts".to_string(), tsv);
let mut batch = new_test_batch();
- assert!(batch.is_empty());
+ assert!(batch.payload().is_empty());
batch.put(put_data).unwrap();
- assert!(!batch.is_empty());
+ assert!(!batch.payload().is_empty());
- let mut iter = batch.iter();
- let Mutation::Put(put_data) = iter.next().unwrap();
- assert_eq!(3, put_data.num_rows());
+ let mutation = &batch.payload().mutations[0];
+ assert_eq!(3, mutation.record_batch.num_rows());
}
fn check_err(err: Error, msg: &str) {
assert_eq!(StatusCode::InvalidArguments, err.status_code());
assert!(err.backtrace_opt().is_some());
- assert!(err.to_string().contains(msg));
+ assert!(
+ err.to_string().contains(msg),
+ "<{err}> does not contain {msg}",
+ );
}
#[test]
fn test_write_batch_too_large() {
let boolv = Arc::new(BooleanVector::from_iterator(
iter::repeat(true).take(MAX_BATCH_SIZE + 1),
- ));
+ )) as VectorRef;
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", boolv).unwrap();
+ let mut put_data = HashMap::new();
+ put_data.insert("k1".to_string(), boolv);
let mut batch =
write_batch_util::new_write_batch(&[("k1", LogicalTypeId::Boolean, false)], None);
- let err = batch.put(put_data).err().unwrap();
+ let err = batch.put(put_data).unwrap_err();
check_err(err, "Request is too large");
}
- #[test]
- fn test_put_data_duplicate() {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
-
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- let err = put_data.add_key_column("k1", intv).err().unwrap();
- check_err(err, "Duplicate column k1");
- }
-
#[test]
fn test_put_data_different_len() {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
- let boolv = Arc::new(BooleanVector::from(vec![true, false]));
+ let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3])) as VectorRef;
+ let tsv = Arc::new(TimestampMillisecondVector::from_slice(&[0, 0])) as VectorRef;
+ let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv).unwrap();
- let err = put_data.add_value_column("v1", boolv).err().unwrap();
- check_err(err, "Length of column v1 not equals");
+ let mut put_data = HashMap::new();
+ put_data.insert("k1".to_string(), intv.clone());
+ put_data.insert(consts::VERSION_COLUMN_NAME.to_string(), intv);
+ put_data.insert("v1".to_string(), boolv.clone());
+ put_data.insert("ts".to_string(), tsv);
+
+ let mut batch = new_test_batch();
+ let err = batch.put(put_data).unwrap_err();
+ check_err(err, "not equals to other columns");
}
#[test]
fn test_put_type_mismatch() {
- let boolv = Arc::new(BooleanVector::from(vec![true, false, true]));
- let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0]));
+ let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
+ let tsv = Arc::new(Int64Vector::from_slice(&[0, 0, 0])) as VectorRef;
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", boolv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
+ let mut put_data = HashMap::new();
+ put_data.insert("k1".to_string(), boolv);
+ put_data.insert("ts".to_string(), tsv);
let mut batch = new_test_batch();
- let err = batch.put(put_data).err().unwrap();
+ let err = batch.put(put_data).unwrap_err();
check_err(err, "Type of column k1 does not match");
}
#[test]
fn test_put_type_has_null() {
- let intv = Arc::new(UInt64Vector::from(vec![Some(1), None, Some(3)]));
- let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0]));
+ let intv = Arc::new(UInt64Vector::from(vec![Some(1), None, Some(3)])) as VectorRef;
+ let tsv = Arc::new(Int64Vector::from_slice(&[0, 0, 0])) as VectorRef;
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
+ let mut put_data = HashMap::new();
+ put_data.insert("k1".to_string(), intv);
+ put_data.insert("ts".to_string(), tsv);
let mut batch = new_test_batch();
- let err = batch.put(put_data).err().unwrap();
+ let err = batch.put(put_data).unwrap_err();
check_err(err, "Column k1 is not null");
}
#[test]
fn test_put_missing_column() {
- let boolv = Arc::new(BooleanVector::from(vec![true, false, true]));
- let tsv = Arc::new(Int64Vector::from_vec(vec![0, 0, 0]));
+ let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
+ let tsv = Arc::new(Int64Vector::from_slice(&[0, 0, 0])) as VectorRef;
+
+ let mut put_data = HashMap::new();
+ put_data.insert("v1".to_string(), boolv);
+ put_data.insert("ts".to_string(), tsv);
- let mut put_data = PutData::new();
- put_data.add_key_column("v1", boolv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
let mut batch = new_test_batch();
- let err = batch.put(put_data).err().unwrap();
+ let err = batch.put(put_data).unwrap_err();
check_err(err, "Missing column k1");
}
#[test]
fn test_put_unknown_column() {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
- let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![0, 0, 0]));
- let boolv = Arc::new(BooleanVector::from(vec![true, false, true]));
-
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- put_data.add_version_column(intv).unwrap();
- put_data.add_value_column("v1", boolv.clone()).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
- put_data.add_value_column("v2", boolv).unwrap();
- let mut batch = new_test_batch();
- let err = batch.put(put_data).err().unwrap();
- check_err(err, "Unknown column v2");
- }
-
- #[test]
- fn test_align_timestamp() {
- let duration_millis = 20;
- let ts = [-21, -20, -19, -1, 0, 5, 15, 19, 20, 21];
- let res = ts.map(|t| align_timestamp(t, duration_millis));
- assert_eq!(res, [-40, -20, -20, -20, 0, 0, 0, 0, 20, 20].map(Some));
- }
-
- #[test]
- fn test_align_timestamp_overflow() {
- assert_eq!(Some(i64::MIN), align_timestamp(i64::MIN, 1));
- assert_eq!(Some(-9223372036854775808), align_timestamp(i64::MIN, 2));
- assert_eq!(
- Some(((i64::MIN + 20) / 20 - 1) * 20),
- align_timestamp(i64::MIN + 20, 20)
- );
- assert_eq!(None, align_timestamp(i64::MAX - (i64::MAX % 23), 23));
- assert_eq!(
- Some(9223372036854775780),
- align_timestamp(i64::MAX / 20 * 20 - 1, 20)
- );
- }
+ let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3])) as VectorRef;
+ let tsv = Arc::new(TimestampMillisecondVector::from_slice(&[0, 0, 0])) as VectorRef;
+ let boolv = Arc::new(BooleanVector::from(vec![true, false, true])) as VectorRef;
- #[test]
- fn test_write_batch_time_range() {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3, 4, 5, 6]));
- let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![
- -21, -20, -1, 0, 1, 20,
- ]));
- let boolv = Arc::new(BooleanVector::from(vec![
- true, false, true, false, false, false,
- ]));
-
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- put_data.add_version_column(intv).unwrap();
- put_data.add_value_column("v1", boolv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
+ let mut put_data = HashMap::new();
+ put_data.insert("k1".to_string(), intv.clone());
+ put_data.insert(consts::VERSION_COLUMN_NAME.to_string(), intv);
+ put_data.insert("v1".to_string(), boolv.clone());
+ put_data.insert("ts".to_string(), tsv);
+ put_data.insert("v2".to_string(), boolv);
let mut batch = new_test_batch();
- batch.put(put_data).unwrap();
-
- let duration_millis = 20i64;
- let ranges = batch
- .time_ranges(Duration::from_millis(duration_millis as u64))
- .unwrap();
- assert_eq!(
- [-40, -20, 0, 20].map(|v| RangeMillis::new(v, v + duration_millis).unwrap()),
- ranges.as_slice()
- )
- }
-
- #[test]
- fn test_write_batch_time_range_const_vector() {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3, 4, 5, 6]));
- let tsv = Arc::new(ConstantVector::new(
- Arc::new(TimestampMillisecondVector::from_vec(vec![20])),
- 6,
- ));
- let boolv = Arc::new(BooleanVector::from(vec![
- true, false, true, false, false, false,
- ]));
-
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- put_data.add_version_column(intv).unwrap();
- put_data.add_value_column("v1", boolv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
-
- let mut batch = new_test_batch();
- batch.put(put_data).unwrap();
-
- let duration_millis = 20i64;
- let ranges = batch
- .time_ranges(Duration::from_millis(duration_millis as u64))
- .unwrap();
- assert_eq!(
- [20].map(|v| RangeMillis::new(v, v + duration_millis).unwrap()),
- ranges.as_slice()
- )
+ let err = batch.put(put_data).unwrap_err();
+ assert_eq!(StatusCode::TableColumnNotFound, err.status_code());
}
}
diff --git a/src/storage/src/write_batch/codec.rs b/src/storage/src/write_batch/codec.rs
index 15dad0ada2a4..961d0d09accf 100644
--- a/src/storage/src/write_batch/codec.rs
+++ b/src/storage/src/write_batch/codec.rs
@@ -15,298 +15,150 @@
use std::io::Cursor;
use std::sync::Arc;
+use common_recordbatch::RecordBatch;
use datatypes::arrow::ipc::reader::StreamReader;
use datatypes::arrow::ipc::writer::{IpcWriteOptions, StreamWriter};
-use datatypes::arrow::record_batch::RecordBatch;
-use datatypes::schema::{Schema, SchemaRef};
-use datatypes::vectors::Helper;
-use prost::Message;
-use snafu::{ensure, OptionExt, ResultExt};
-use store_api::storage::WriteRequest;
+use datatypes::schema::Schema;
+use snafu::{ensure, ResultExt};
+use store_api::storage::OpType;
use crate::codec::{Decoder, Encoder};
-use crate::proto::wal::MutationType;
-use crate::proto::write_batch::{self, gen_columns, gen_put_data_vector};
-use crate::write_batch::{
- DataCorruptedSnafu, DecodeArrowSnafu, DecodeProtobufSnafu, DecodeVectorSnafu, EncodeArrowSnafu,
- EncodeProtobufSnafu, Error as WriteBatchError, FromProtobufSnafu, MissingColumnSnafu, Mutation,
- ParseSchemaSnafu, PutData, Result, ToProtobufSnafu, WriteBatch,
+use crate::error::{
+ BatchCorruptedSnafu, CreateRecordBatchSnafu, DecodeArrowSnafu, EncodeArrowSnafu, Error,
+ ParseSchemaSnafu, Result,
};
+use crate::proto::wal::MutationType;
+use crate::write_batch::{Mutation, Payload};
-// TODO(jiachun): We can make a comparison with protobuf, including performance, storage cost,
-// CPU consumption, etc
#[derive(Default)]
-pub struct WriteBatchArrowEncoder {}
+pub struct PayloadEncoder {}
-impl WriteBatchArrowEncoder {
+impl PayloadEncoder {
pub fn new() -> Self {
Self::default()
}
}
-impl Encoder for WriteBatchArrowEncoder {
- type Item = WriteBatch;
- type Error = WriteBatchError;
+impl Encoder for PayloadEncoder {
+ type Item = Payload;
+ type Error = Error;
- fn encode(&self, item: &WriteBatch, dst: &mut Vec<u8>) -> Result<()> {
- let item_schema = item.schema();
- let arrow_schema = item_schema.arrow_schema();
+ fn encode(&self, item: &Payload, dst: &mut Vec<u8>) -> Result<()> {
+ let arrow_schema = item.schema.arrow_schema();
let opts = IpcWriteOptions::default();
let mut writer = StreamWriter::try_new_with_options(dst, arrow_schema, opts)
.context(EncodeArrowSnafu)?;
- for mutation in item.iter() {
- let rb = match mutation {
- Mutation::Put(put) => {
- let arrays = item_schema
- .column_schemas()
- .iter()
- .map(|column_schema| {
- let vector = put.column_by_name(&column_schema.name).context(
- MissingColumnSnafu {
- name: &column_schema.name,
- },
- )?;
- Ok(vector.to_arrow_array())
- })
- .collect::<Result<Vec<_>>>()?;
-
- RecordBatch::try_new(arrow_schema.clone(), arrays).context(EncodeArrowSnafu)?
- }
- };
- writer.write(&rb).context(EncodeArrowSnafu)?;
+ for mutation in &item.mutations {
+ let record_batch = mutation.record_batch.df_record_batch();
+ writer.write(record_batch).context(EncodeArrowSnafu)?;
}
writer.finish().context(EncodeArrowSnafu)?;
+
Ok(())
}
}
-pub struct WriteBatchArrowDecoder {
- mutation_types: Vec<i32>,
+pub struct PayloadDecoder<'a> {
+ mutation_types: &'a [i32],
}
-impl WriteBatchArrowDecoder {
- pub fn new(mutation_types: Vec<i32>) -> Self {
+impl<'a> PayloadDecoder<'a> {
+ pub fn new(mutation_types: &'a [i32]) -> Self {
Self { mutation_types }
}
}
-impl Decoder for WriteBatchArrowDecoder {
- type Item = WriteBatch;
- type Error = WriteBatchError;
+impl<'a> Decoder for PayloadDecoder<'a> {
+ type Item = Payload;
+ type Error = Error;
- fn decode(&self, src: &[u8]) -> Result<WriteBatch> {
+ fn decode(&self, src: &[u8]) -> Result<Payload> {
let reader = Cursor::new(src);
let mut reader = StreamReader::try_new(reader, None).context(DecodeArrowSnafu)?;
let arrow_schema = reader.schema();
- let mut chunks = Vec::with_capacity(self.mutation_types.len());
-
- for maybe_record_batch in reader.by_ref() {
- let record_batch = maybe_record_batch.context(DecodeArrowSnafu)?;
- chunks.push(record_batch);
- }
-
- // check if exactly finished
- ensure!(
- reader.is_finished(),
- DataCorruptedSnafu {
- message: "Impossible, the num of data chunks is different than expected."
- }
- );
-
- ensure!(
- chunks.len() == self.mutation_types.len(),
- DataCorruptedSnafu {
- message: format!(
- "expected {} mutations, but got {}",
- self.mutation_types.len(),
- chunks.len()
- )
- }
- );
+ // We could let the decoder takes a schema as input if possible, then we don't
+ // need to rebuild the schema here.
let schema = Arc::new(Schema::try_from(arrow_schema).context(ParseSchemaSnafu)?);
- let mut write_batch = WriteBatch::new(schema.clone());
-
- for (mutation_type, record_batch) in self.mutation_types.iter().zip(chunks.into_iter()) {
- match MutationType::from_i32(*mutation_type) {
- Some(MutationType::Put) => {
- let mut put_data = PutData::with_num_columns(schema.num_columns());
- for (column_schema, array) in schema
- .column_schemas()
- .iter()
- .zip(record_batch.columns().iter())
- {
- let vector = Helper::try_into_vector(array).context(DecodeVectorSnafu)?;
- put_data.add_column_by_name(&column_schema.name, vector)?;
- }
+ let mut mutations = Vec::with_capacity(self.mutation_types.len());
- write_batch.put(put_data)?;
- }
+ for (record_batch, mutation_type) in reader.by_ref().zip(self.mutation_types) {
+ let record_batch = record_batch.context(DecodeArrowSnafu)?;
+ let record_batch = RecordBatch::try_from_df_record_batch(schema.clone(), record_batch)
+ .context(CreateRecordBatchSnafu)?;
+ let op_type = match MutationType::from_i32(*mutation_type) {
Some(MutationType::Delete) => {
unimplemented!("delete mutation is not implemented")
}
- _ => {
- return DataCorruptedSnafu {
- message: format!("Unexpected mutation type: {mutation_type}"),
+ Some(MutationType::Put) => OpType::Put,
+ None => {
+ return BatchCorruptedSnafu {
+ message: format!("Unexpceted mutation type: {mutation_type}"),
}
.fail()
}
- }
+ };
+ mutations.push(Mutation {
+ op_type,
+ record_batch,
+ });
}
- Ok(write_batch)
- }
-}
-
-pub struct WriteBatchProtobufEncoder {}
-
-impl Encoder for WriteBatchProtobufEncoder {
- type Item = WriteBatch;
- type Error = WriteBatchError;
-
- fn encode(&self, item: &WriteBatch, dst: &mut Vec<u8>) -> Result<()> {
- let schema = item.schema().into();
-
- let mutations = item
- .iter()
- .map(|mtn| match mtn {
- Mutation::Put(put_data) => item
- .schema()
- .column_schemas()
- .iter()
- .map(|cs| {
- let vector = put_data
- .column_by_name(&cs.name)
- .context(MissingColumnSnafu { name: &cs.name })?;
- gen_columns(vector).context(ToProtobufSnafu)
- })
- .collect::<Result<Vec<_>>>(),
- })
- .collect::<Result<Vec<_>>>()?
- .into_iter()
- .map(|columns| write_batch::Mutation {
- mutation: Some(write_batch::mutation::Mutation::Put(write_batch::Put {
- columns,
- })),
- })
- .collect();
-
- let write_batch = write_batch::WriteBatch {
- schema: Some(schema),
- mutations,
- };
-
- write_batch.encode(dst).context(EncodeProtobufSnafu)
- }
-}
-
-pub struct WriteBatchProtobufDecoder {
- mutation_types: Vec<i32>,
-}
-
-impl WriteBatchProtobufDecoder {
- #[allow(dead_code)]
- pub fn new(mutation_types: Vec<i32>) -> Self {
- Self { mutation_types }
- }
-}
-impl Decoder for WriteBatchProtobufDecoder {
- type Item = WriteBatch;
- type Error = WriteBatchError;
-
- fn decode(&self, src: &[u8]) -> Result<WriteBatch> {
- let write_batch = write_batch::WriteBatch::decode(src).context(DecodeProtobufSnafu)?;
-
- let schema = write_batch.schema.context(DataCorruptedSnafu {
- message: "schema required",
- })?;
-
- let schema = SchemaRef::try_from(schema).context(FromProtobufSnafu {})?;
+ // check if exactly finished
+ ensure!(
+ reader.is_finished(),
+ BatchCorruptedSnafu {
+ message: "The num of data chunks is different than expected."
+ }
+ );
ensure!(
- write_batch.mutations.len() == self.mutation_types.len(),
- DataCorruptedSnafu {
- message: &format!(
+ mutations.len() == self.mutation_types.len(),
+ BatchCorruptedSnafu {
+ message: format!(
"expected {} mutations, but got {}",
self.mutation_types.len(),
- write_batch.mutations.len()
+ mutations.len()
)
}
);
- let mutations = write_batch
- .mutations
- .into_iter()
- .map(|mtn| match mtn.mutation {
- Some(write_batch::mutation::Mutation::Put(put)) => {
- let mut put_data = PutData::with_num_columns(put.columns.len());
-
- let res = schema
- .column_schemas()
- .iter()
- .map(|column| (column.name.clone(), column.data_type.clone()))
- .zip(put.columns.into_iter())
- .map(|((name, data_type), column)| {
- gen_put_data_vector(data_type, column)
- .map(|vector| (name, vector))
- .context(FromProtobufSnafu)
- })
- .collect::<Result<Vec<_>>>()?
- .into_iter()
- .map(|(name, vector)| put_data.add_column_by_name(&name, vector))
- .collect::<Result<Vec<_>>>();
-
- res.map(|_| Mutation::Put(put_data))
- }
- Some(write_batch::mutation::Mutation::Delete(_)) => todo!(),
- _ => DataCorruptedSnafu {
- message: "invalid mutation type",
- }
- .fail(),
- })
- .collect::<Result<Vec<_>>>()?;
-
- let mut write_batch = WriteBatch::new(schema);
-
- mutations
- .into_iter()
- .try_for_each(|mutation| match mutation {
- Mutation::Put(put_data) => write_batch.put(put_data),
- })?;
-
- Ok(write_batch)
+ Ok(Payload { schema, mutations })
}
}
#[cfg(test)]
mod tests {
+ use std::collections::HashMap;
use std::sync::Arc;
- use datatypes::vectors::{BooleanVector, TimestampMillisecondVector, UInt64Vector};
- use store_api::storage::PutOperation;
+ use datatypes::vectors::{BooleanVector, TimestampMillisecondVector, UInt64Vector, VectorRef};
+ use store_api::storage::{consts, WriteRequest};
use super::*;
+ use crate::write_batch::WriteBatch;
use crate::{proto, write_batch};
fn gen_new_batch_and_types() -> (WriteBatch, Vec<i32>) {
let mut batch = write_batch::new_test_batch();
for i in 0..10 {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
- let boolv = Arc::new(BooleanVector::from(vec![Some(true), Some(false), None]));
- let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![i, i, i]));
+ let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3])) as VectorRef;
+ let boolv =
+ Arc::new(BooleanVector::from(vec![Some(true), Some(false), None])) as VectorRef;
+ let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![i, i, i])) as VectorRef;
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- put_data.add_version_column(intv).unwrap();
- put_data.add_value_column("v1", boolv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
+ let mut put_data = HashMap::new();
+ put_data.insert("k1".to_string(), intv.clone());
+ put_data.insert(consts::VERSION_COLUMN_NAME.to_string(), intv);
+ put_data.insert("v1".to_string(), boolv);
+ put_data.insert("ts".to_string(), tsv);
batch.put(put_data).unwrap();
}
- let types = proto::wal::gen_mutation_types(&batch);
+ let types = proto::wal::gen_mutation_types(batch.payload());
(batch, types)
}
@@ -315,32 +167,15 @@ mod tests {
fn test_codec_arrow() -> Result<()> {
let (batch, mutation_types) = gen_new_batch_and_types();
- let encoder = WriteBatchArrowEncoder::new();
+ let encoder = PayloadEncoder::new();
let mut dst = vec![];
- let result = encoder.encode(&batch, &mut dst);
+ let result = encoder.encode(batch.payload(), &mut dst);
assert!(result.is_ok());
- let decoder = WriteBatchArrowDecoder::new(mutation_types);
+ let decoder = PayloadDecoder::new(&mutation_types);
let result = decoder.decode(&dst);
- let batch2 = result?;
- assert_eq!(batch.num_rows, batch2.num_rows);
-
- Ok(())
- }
-
- #[test]
- fn test_codec_protobuf() -> Result<()> {
- let (batch, mutation_types) = gen_new_batch_and_types();
-
- let encoder = WriteBatchProtobufEncoder {};
- let mut dst = vec![];
- let result = encoder.encode(&batch, &mut dst);
- assert!(result.is_ok());
-
- let decoder = WriteBatchProtobufDecoder::new(mutation_types);
- let result = decoder.decode(&dst);
- let batch2 = result?;
- assert_eq!(batch.num_rows, batch2.num_rows);
+ let payload = result?;
+ assert_eq!(*batch.payload(), payload);
Ok(())
}
@@ -348,18 +183,18 @@ mod tests {
fn gen_new_batch_and_types_with_none_column() -> (WriteBatch, Vec<i32>) {
let mut batch = write_batch::new_test_batch();
for _ in 0..10 {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
- let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![0, 0, 0]));
+ let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3])) as VectorRef;
+ let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![0, 0, 0])) as VectorRef;
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- put_data.add_version_column(intv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
+ let mut put_data = HashMap::with_capacity(3);
+ put_data.insert("k1".to_string(), intv.clone());
+ put_data.insert(consts::VERSION_COLUMN_NAME.to_string(), intv);
+ put_data.insert("ts".to_string(), tsv);
batch.put(put_data).unwrap();
}
- let types = proto::wal::gen_mutation_types(&batch);
+ let types = proto::wal::gen_mutation_types(batch.payload());
(batch, types)
}
@@ -368,31 +203,15 @@ mod tests {
fn test_codec_with_none_column_arrow() -> Result<()> {
let (batch, mutation_types) = gen_new_batch_and_types_with_none_column();
- let encoder = WriteBatchArrowEncoder::new();
+ let encoder = PayloadEncoder::new();
let mut dst = vec![];
- let result = encoder.encode(&batch, &mut dst);
+ let result = encoder.encode(batch.payload(), &mut dst);
assert!(result.is_ok());
- let decoder = WriteBatchArrowDecoder::new(mutation_types);
- let result = decoder.decode(&dst);
- let batch2 = result?;
- assert_eq!(batch.num_rows, batch2.num_rows);
-
- Ok(())
- }
-
- #[test]
- fn test_codec_with_none_column_protobuf() -> Result<()> {
- let (batch, mutation_types) = gen_new_batch_and_types_with_none_column();
-
- let encoder = WriteBatchProtobufEncoder {};
- let mut dst = vec![];
- encoder.encode(&batch, &mut dst).unwrap();
-
- let decoder = WriteBatchProtobufDecoder::new(mutation_types);
+ let decoder = PayloadDecoder::new(&mutation_types);
let result = decoder.decode(&dst);
- let batch2 = result?;
- assert_eq!(batch.num_rows, batch2.num_rows);
+ let payload = result?;
+ assert_eq!(*batch.payload(), payload);
Ok(())
}
diff --git a/src/storage/src/write_batch/compat.rs b/src/storage/src/write_batch/compat.rs
index b7d6758bb7a4..b7089825d3b8 100644
--- a/src/storage/src/write_batch/compat.rs
+++ b/src/storage/src/write_batch/compat.rs
@@ -12,20 +12,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use common_recordbatch::RecordBatch;
use datatypes::schema::{ColumnSchema, SchemaRef};
use snafu::{ensure, ResultExt};
use crate::error::{self, Result};
use crate::schema::compat::CompatWrite;
-use crate::write_batch::{Mutation, PutData, WriteBatch};
+use crate::write_batch::{self, Mutation, WriteBatch};
impl CompatWrite for WriteBatch {
fn compat_write(&mut self, dest_schema: &SchemaRef) -> Result<()> {
- let (data_version, schema_version) = (dest_schema.version(), self.schema.version());
+ let data_version = dest_schema.version();
+ let schema_version = self.schema().version();
// Fast path, nothing to do if schema version of the write batch is equal to version
// of destination.
if data_version == schema_version {
- debug_assert_eq!(dest_schema.column_schemas(), self.schema.column_schemas());
+ debug_assert_eq!(dest_schema.column_schemas(), self.schema().column_schemas());
return Ok(());
}
@@ -39,7 +41,7 @@ impl CompatWrite for WriteBatch {
);
// For columns not in schema, returns error instead of discarding the column silently.
- let column_not_in = column_not_in_schema(dest_schema, self.schema.column_schemas());
+ let column_not_in = column_not_in_schema(dest_schema, self.schema().column_schemas());
ensure!(
column_not_in.is_none(),
error::NotInSchemaToCompatSnafu {
@@ -48,37 +50,39 @@ impl CompatWrite for WriteBatch {
}
);
- for m in &mut self.mutations {
- match m {
- Mutation::Put(put_data) => {
- put_data.compat_write(dest_schema)?;
- }
- }
+ for mutation in &mut self.payload.mutations {
+ mutation.compat_write(dest_schema)?;
}
// Change schema to `dest_schema`.
- self.schema = dest_schema.clone();
+ self.payload.schema = dest_schema.clone();
Ok(())
}
}
-impl CompatWrite for PutData {
+impl CompatWrite for Mutation {
fn compat_write(&mut self, dest_schema: &SchemaRef) -> Result<()> {
- if self.is_empty() {
+ if self.record_batch.num_rows() == 0 {
return Ok(());
}
+ let num_rows = self.record_batch.num_rows();
+ let mut columns = Vec::with_capacity(dest_schema.num_columns());
for column_schema in dest_schema.column_schemas() {
- if self.column_by_name(&column_schema.name).is_none() {
+ if let Some(vector) = self.record_batch.column_by_name(&column_schema.name) {
+ columns.push(vector.clone());
+ } else {
// We need to fill the column by null or its default value.
- self.add_default_by_name(column_schema)
- .context(error::AddDefaultSnafu {
- column: &column_schema.name,
- })?;
+ let vector = write_batch::new_column_with_default_value(column_schema, num_rows)?;
+ columns.push(vector);
}
}
+ // Using dest schema to build RecordBatch.
+ self.record_batch = RecordBatch::new(dest_schema.clone(), columns)
+ .context(error::CreateRecordBatchSnafu)?;
+
Ok(())
}
}
@@ -95,12 +99,13 @@ fn column_not_in_schema(schema: &SchemaRef, column_schemas: &[ColumnSchema]) ->
#[cfg(test)]
mod tests {
+ use std::collections::HashMap;
use std::sync::Arc;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnDefaultConstraint, SchemaBuilder};
- use datatypes::vectors::{Int32Vector, TimestampMillisecondVector};
- use store_api::storage::{PutOperation, WriteRequest};
+ use datatypes::vectors::{Int32Vector, TimestampMillisecondVector, VectorRef};
+ use store_api::storage::WriteRequest;
use super::*;
use crate::error::Error;
@@ -135,23 +140,31 @@ mod tests {
Arc::new(schema)
}
- fn new_put_data() -> PutData {
- let mut put_data = PutData::new();
- let k0 = Arc::new(Int32Vector::from_slice(&[1, 2, 3]));
- let ts = Arc::new(TimestampMillisecondVector::from_values([11, 12, 13]));
+ fn new_put_data() -> HashMap<String, VectorRef> {
+ let mut put_data = HashMap::new();
+ let k0 = Arc::new(Int32Vector::from_slice(&[1, 2, 3])) as VectorRef;
+ let ts = Arc::new(TimestampMillisecondVector::from_values([11, 12, 13])) as VectorRef;
- put_data.add_key_column("k0", k0).unwrap();
- put_data.add_key_column("ts", ts).unwrap();
+ put_data.insert("k0".to_string(), k0);
+ put_data.insert("ts".to_string(), ts);
put_data
}
#[test]
- fn test_put_data_compat_write() {
- let mut put_data = new_put_data();
+ fn test_mutation_compat_write() {
+ let put_data = new_put_data();
+ let schema_old = new_test_schema(None);
+ // Mutation doesn't check schema version, so we don't have to bump the version here.
let schema = new_test_schema(Some(Some(ColumnDefaultConstraint::null_value())));
- put_data.compat_write(&schema).unwrap();
- let v0 = put_data.column_by_name("v0").unwrap();
+ // Use WriteBatch to build a payload and its mutation.
+ let mut batch = WriteBatch::new(schema_old);
+ batch.put(put_data).unwrap();
+
+ let mutation = &mut batch.payload.mutations[0];
+ mutation.compat_write(&schema).unwrap();
+
+ let v0 = mutation.record_batch.column_by_name("v0").unwrap();
assert!(v0.only_null());
}
@@ -170,8 +183,9 @@ mod tests {
);
batch.compat_write(&schema_new).unwrap();
assert_eq!(schema_new, *batch.schema());
- let Mutation::Put(put_data) = batch.iter().next().unwrap();
- put_data.column_by_name("v0").unwrap();
+
+ let mutation = &batch.payload().mutations[0];
+ mutation.record_batch.column_by_name("v0").unwrap();
}
#[test]
diff --git a/src/store-api/src/storage.rs b/src/store-api/src/storage.rs
index 154c99b385b9..bea711ea6552 100644
--- a/src/store-api/src/storage.rs
+++ b/src/store-api/src/storage.rs
@@ -36,7 +36,7 @@ pub use self::engine::{CreateOptions, EngineContext, OpenOptions, StorageEngine}
pub use self::metadata::RegionMeta;
pub use self::region::{Region, WriteContext};
pub use self::requests::{
- AddColumn, AlterOperation, AlterRequest, GetRequest, PutOperation, ScanRequest, WriteRequest,
+ AddColumn, AlterOperation, AlterRequest, GetRequest, ScanRequest, WriteRequest,
};
pub use self::responses::{GetResponse, ScanResponse, WriteResponse};
pub use self::snapshot::{ReadContext, Snapshot};
diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs
index 6e7184810a5e..7d9870f0c937 100644
--- a/src/store-api/src/storage/requests.rs
+++ b/src/store-api/src/storage/requests.rs
@@ -12,12 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashSet;
-use std::time::Duration;
+use std::collections::{HashMap, HashSet};
use common_error::ext::ErrorExt;
use common_query::logical_plan::Expr;
-use common_time::RangeMillis;
use datatypes::vectors::VectorRef;
use crate::storage::{ColumnDescriptor, RegionDescriptor, SequenceNumber};
@@ -28,35 +26,11 @@ use crate::storage::{ColumnDescriptor, RegionDescriptor, SequenceNumber};
/// the request follows the same schema restriction.
pub trait WriteRequest: Send {
type Error: ErrorExt + Send + Sync;
- type PutOp: PutOperation;
/// Add put operation to the request.
- fn put(&mut self, put: Self::PutOp) -> Result<(), Self::Error>;
-
- /// Returns all possible time ranges that contain the timestamp in this batch.
///
- /// Each time range is aligned to given `duration`.
- fn time_ranges(&self, duration: Duration) -> Result<Vec<RangeMillis>, Self::Error>;
-
- /// Create a new put operation.
- fn put_op(&self) -> Self::PutOp;
-
- /// Create a new put operation with capacity reserved for `num_columns`.
- fn put_op_with_columns(num_columns: usize) -> Self::PutOp;
-}
-
-/// Put multiple rows.
-pub trait PutOperation: Send + std::fmt::Debug {
- type Error: ErrorExt + Send + Sync;
-
- /// Put data to the key column.
- fn add_key_column(&mut self, name: &str, vector: VectorRef) -> Result<(), Self::Error>;
-
- /// Put data to the version column.
- fn add_version_column(&mut self, vector: VectorRef) -> Result<(), Self::Error>;
-
- /// Put data to the value column.
- fn add_value_column(&mut self, name: &str, vector: VectorRef) -> Result<(), Self::Error>;
+ /// `data` is the columnar format of the data to put.
+ fn put(&mut self, data: HashMap<String, VectorRef>) -> Result<(), Self::Error>;
}
#[derive(Default)]
diff --git a/src/table/src/error.rs b/src/table/src/error.rs
index 3605ab0a1ae5..eba0a5269338 100644
--- a/src/table/src/error.rs
+++ b/src/table/src/error.rs
@@ -39,9 +39,6 @@ pub enum InnerError {
backtrace: Backtrace,
},
- #[snafu(display("Missing column when insert, column: {}", name))]
- MissingColumn { name: String, backtrace: Backtrace },
-
#[snafu(display("Poll stream failed, source: {}", source))]
PollStream {
source: ArrowError,
@@ -119,9 +116,9 @@ impl ErrorExt for InnerError {
| InnerError::PollStream { .. }
| InnerError::SchemaConversion { .. }
| InnerError::TableProjection { .. } => StatusCode::EngineExecuteQuery,
- InnerError::MissingColumn { .. }
- | InnerError::RemoveColumnInIndex { .. }
- | InnerError::BuildColumnDescriptor { .. } => StatusCode::InvalidArguments,
+ InnerError::RemoveColumnInIndex { .. } | InnerError::BuildColumnDescriptor { .. } => {
+ StatusCode::InvalidArguments
+ }
InnerError::TablesRecordBatch { .. } => StatusCode::Unexpected,
InnerError::ColumnExists { .. } => StatusCode::TableColumnExists,
InnerError::SchemaBuild { source, .. } => source.status_code(),
@@ -166,12 +163,16 @@ mod tests {
Err(DataFusionError::NotImplemented("table test".to_string())).context(DatafusionSnafu)?
}
- fn throw_missing_column_inner() -> std::result::Result<(), InnerError> {
- MissingColumnSnafu { name: "test" }.fail()
+ fn throw_column_exists_inner() -> std::result::Result<(), InnerError> {
+ ColumnExistsSnafu {
+ column_name: "col",
+ table_name: "test",
+ }
+ .fail()
}
fn throw_missing_column() -> Result<()> {
- Ok(throw_missing_column_inner()?)
+ Ok(throw_column_exists_inner()?)
}
fn throw_arrow() -> Result<()> {
@@ -186,7 +187,7 @@ mod tests {
let err = throw_missing_column().err().unwrap();
assert!(err.backtrace_opt().is_some());
- assert_eq!(StatusCode::InvalidArguments, err.status_code());
+ assert_eq!(StatusCode::TableColumnExists, err.status_code());
let err = throw_arrow().err().unwrap();
assert!(err.backtrace_opt().is_some());
@@ -195,15 +196,15 @@ mod tests {
#[test]
fn test_into_record_batch_error() {
- let err = throw_missing_column_inner().err().unwrap();
+ let err = throw_column_exists_inner().err().unwrap();
let err: RecordBatchError = err.into();
assert!(err.backtrace_opt().is_some());
- assert_eq!(StatusCode::InvalidArguments, err.status_code());
+ assert_eq!(StatusCode::TableColumnExists, err.status_code());
}
#[test]
fn test_into_df_error() {
- let err = throw_missing_column_inner().err().unwrap();
+ let err = throw_column_exists_inner().err().unwrap();
let err: DataFusionError = err.into();
assert!(matches!(err, DataFusionError::External(_)));
}
|
refactor
|
Remove PutOperation and Simplify WriteRequest API (#775)
|
573d369f77b59c707fe17c9c68af3db66a051944
|
2024-05-01 09:18:51
|
Yohan Wal
|
feat(fuzz): add insert logical table target (#3842)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index a02ab2e59560..8b904daff4b5 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -130,7 +130,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert" ]
+ target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ]
steps:
- uses: actions/checkout@v4
- uses: arduino/setup-protoc@v3
diff --git a/tests-fuzz/Cargo.toml b/tests-fuzz/Cargo.toml
index 03b708b9feb7..8bf5a34fb1ba 100644
--- a/tests-fuzz/Cargo.toml
+++ b/tests-fuzz/Cargo.toml
@@ -64,6 +64,13 @@ test = false
bench = false
doc = false
+[[bin]]
+name = "fuzz_insert_logical_table"
+path = "targets/fuzz_insert_logical_table.rs"
+test = false
+bench = false
+doc = false
+
[[bin]]
name = "fuzz_alter_table"
path = "targets/fuzz_alter_table.rs"
diff --git a/tests-fuzz/src/generator/insert_expr.rs b/tests-fuzz/src/generator/insert_expr.rs
index f3f0dba11646..5af3289c0436 100644
--- a/tests-fuzz/src/generator/insert_expr.rs
+++ b/tests-fuzz/src/generator/insert_expr.rs
@@ -31,6 +31,8 @@ use crate::ir::{generate_random_value, Ident};
#[builder(pattern = "owned")]
pub struct InsertExprGenerator<R: Rng + 'static> {
table_ctx: TableContextRef,
+ // Whether to omit all columns, i.e. INSERT INTO table_name VALUES (...)
+ omit_column_list: bool,
#[builder(default = "1")]
rows: usize,
#[builder(default = "Box::new(WordGenerator)")]
@@ -44,11 +46,8 @@ impl<R: Rng + 'static> Generator<InsertIntoExpr, R> for InsertExprGenerator<R> {
/// Generates the [InsertIntoExpr].
fn generate(&self, rng: &mut R) -> Result<InsertIntoExpr> {
- // Whether to omit all columns, i.e. INSERT INTO table_name VALUES (...)
- let omit_column_list = rng.gen_bool(0.2);
-
let mut values_columns = vec![];
- if omit_column_list {
+ if self.omit_column_list {
// If omit column list, then all columns are required in the values list
values_columns.clone_from(&self.table_ctx.columns);
} else {
@@ -94,7 +93,7 @@ impl<R: Rng + 'static> Generator<InsertIntoExpr, R> for InsertExprGenerator<R> {
Ok(InsertIntoExpr {
table_name: self.table_ctx.name.to_string(),
- columns: if omit_column_list {
+ columns: if self.omit_column_list {
vec![]
} else {
values_columns
diff --git a/tests-fuzz/src/translator/mysql/insert_expr.rs b/tests-fuzz/src/translator/mysql/insert_expr.rs
index 49ff192afb14..0e2252cbc54a 100644
--- a/tests-fuzz/src/translator/mysql/insert_expr.rs
+++ b/tests-fuzz/src/translator/mysql/insert_expr.rs
@@ -71,7 +71,7 @@ impl InsertIntoExprTranslator {
mod tests {
use std::sync::Arc;
- use rand::SeedableRng;
+ use rand::{Rng, SeedableRng};
use super::*;
use crate::generator::insert_expr::InsertExprGeneratorBuilder;
@@ -82,10 +82,12 @@ mod tests {
#[test]
fn test_insert_into_translator() {
let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0);
+ let omit_column_list = rng.gen_bool(0.2);
let test_ctx = test_utils::new_test_ctx();
let insert_expr_generator = InsertExprGeneratorBuilder::default()
.table_ctx(Arc::new(test_ctx))
+ .omit_column_list(omit_column_list)
.rows(2)
.build()
.unwrap();
@@ -100,16 +102,16 @@ mod tests {
let insert_expr = insert_expr_generator.generate(&mut rng).unwrap();
let output = InsertIntoExprTranslator.translate(&insert_expr).unwrap();
- let expected = r#"INSERT INTO test (cpu_util, disk_util, ts) VALUES
-(0.7074194466620976, 0.661288102315126, '-47252-05-08 07:33:49.567+0000'),
-(0.8266101224213618, 0.7947724277743285, '-224292-12-07 02:51:53.371+0000');"#;
+ let expected = r#"INSERT INTO test (ts, memory_util) VALUES
+('+22606-05-02 04:44:02.976+0000', 0.7074194466620976),
+('+33689-06-12 08:42:11.037+0000', 0.40987428386535585);"#;
assert_eq!(output, expected);
let insert_expr = insert_expr_generator.generate(&mut rng).unwrap();
let output = InsertIntoExprTranslator.translate(&insert_expr).unwrap();
- let expected = r#"INSERT INTO test VALUES
-('odio', NULL, 0.48809950435391647, 0.5228925709595407, 0.9091528874275897, '+241156-12-16 20:52:15.185+0000'),
-('dignissimos', 'labore', NULL, 0.12983559048685023, 0.6362040919831425, '-30691-06-17 23:41:09.938+0000');"#;
+ let expected = r#"INSERT INTO test (ts, disk_util, cpu_util, host) VALUES
+('+200107-10-22 01:36:36.924+0000', 0.9082597320638828, 0.020853190804573818, 'voluptates'),
+('+241156-12-16 20:52:15.185+0000', 0.6492772846116915, 0.18078027701087784, 'repellat');"#;
assert_eq!(output, expected);
}
}
diff --git a/tests-fuzz/targets/fuzz_insert.rs b/tests-fuzz/targets/fuzz_insert.rs
index 7fc6d30a237a..e6a24dba9dc5 100644
--- a/tests-fuzz/targets/fuzz_insert.rs
+++ b/tests-fuzz/targets/fuzz_insert.rs
@@ -90,8 +90,11 @@ fn generate_insert_expr<R: Rng + 'static>(
rng: &mut R,
table_ctx: TableContextRef,
) -> Result<InsertIntoExpr> {
+ let omit_column_list = rng.gen_bool(0.2);
+
let insert_generator = InsertExprGeneratorBuilder::default()
.table_ctx(table_ctx)
+ .omit_column_list(omit_column_list)
.rows(input.rows)
.build()
.unwrap();
diff --git a/tests-fuzz/targets/fuzz_insert_logical_table.rs b/tests-fuzz/targets/fuzz_insert_logical_table.rs
new file mode 100644
index 000000000000..97f0a8b82575
--- /dev/null
+++ b/tests-fuzz/targets/fuzz_insert_logical_table.rs
@@ -0,0 +1,202 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![no_main]
+
+use std::sync::Arc;
+
+use common_telemetry::info;
+use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
+use libfuzzer_sys::fuzz_target;
+use rand::{Rng, SeedableRng};
+use rand_chacha::ChaChaRng;
+use snafu::{ensure, ResultExt};
+use sqlx::{Executor, MySql, Pool};
+use tests_fuzz::context::{TableContext, TableContextRef};
+use tests_fuzz::error::{self, Result};
+use tests_fuzz::fake::{
+ merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
+ MappedGenerator, WordGenerator,
+};
+use tests_fuzz::generator::create_expr::{
+ CreateLogicalTableExprGeneratorBuilder, CreatePhysicalTableExprGeneratorBuilder,
+};
+use tests_fuzz::generator::insert_expr::InsertExprGeneratorBuilder;
+use tests_fuzz::generator::Generator;
+use tests_fuzz::ir::{CreateTableExpr, InsertIntoExpr};
+use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
+use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator;
+use tests_fuzz::translator::DslTranslator;
+use tests_fuzz::utils::{init_greptime_connections, Connections};
+
+struct FuzzContext {
+ greptime: Pool<MySql>,
+}
+
+impl FuzzContext {
+ async fn close(self) {
+ self.greptime.close().await;
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+struct FuzzInput {
+ seed: u64,
+ rows: usize,
+}
+
+impl Arbitrary<'_> for FuzzInput {
+ fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
+ let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
+ let mut rng = ChaChaRng::seed_from_u64(seed);
+ let rows = rng.gen_range(1..4096);
+ Ok(FuzzInput { rows, seed })
+ }
+}
+
+fn generate_create_physical_table_expr<R: Rng + 'static>(rng: &mut R) -> Result<CreateTableExpr> {
+ let physical_table_if_not_exists = rng.gen_bool(0.5);
+ let create_physical_table_expr = CreatePhysicalTableExprGeneratorBuilder::default()
+ .name_generator(Box::new(MappedGenerator::new(
+ WordGenerator,
+ merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
+ )))
+ .if_not_exists(physical_table_if_not_exists)
+ .build()
+ .unwrap();
+ create_physical_table_expr.generate(rng)
+}
+
+fn generate_create_logical_table_expr<R: Rng + 'static>(
+ physical_table_ctx: TableContextRef,
+ rng: &mut R,
+) -> Result<CreateTableExpr> {
+ let labels = rng.gen_range(1..=5);
+ let logical_table_if_not_exists = rng.gen_bool(0.5);
+
+ let create_logical_table_expr = CreateLogicalTableExprGeneratorBuilder::default()
+ .name_generator(Box::new(MappedGenerator::new(
+ WordGenerator,
+ merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
+ )))
+ .physical_table_ctx(physical_table_ctx)
+ .labels(labels)
+ .if_not_exists(logical_table_if_not_exists)
+ .build()
+ .unwrap();
+ create_logical_table_expr.generate(rng)
+}
+
+fn generate_insert_expr<R: Rng + 'static>(
+ input: FuzzInput,
+ rng: &mut R,
+ table_ctx: TableContextRef,
+) -> Result<InsertIntoExpr> {
+ let insert_generator = InsertExprGeneratorBuilder::default()
+ .omit_column_list(false)
+ .table_ctx(table_ctx)
+ .rows(input.rows)
+ .build()
+ .unwrap();
+ insert_generator.generate(rng)
+}
+
+async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
+ info!("input: {input:?}");
+ let mut rng = ChaChaRng::seed_from_u64(input.seed);
+
+ // Create a physical table and a logical table on top of it
+ let create_physical_table_expr = generate_create_physical_table_expr(&mut rng).unwrap();
+ let translator = CreateTableExprTranslator;
+ let sql = translator.translate(&create_physical_table_expr)?;
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql: &sql })?;
+ info!("Create physical table: {sql}, result: {result:?}");
+
+ let physical_table_ctx = Arc::new(TableContext::from(&create_physical_table_expr));
+
+ let create_logical_table_expr =
+ generate_create_logical_table_expr(physical_table_ctx, &mut rng).unwrap();
+ let sql = translator.translate(&create_logical_table_expr)?;
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql: &sql })?;
+ info!("Create logical table: {sql}, result: {result:?}");
+
+ let logical_table_ctx = Arc::new(TableContext::from(&create_logical_table_expr));
+
+ let insert_expr = generate_insert_expr(input, &mut rng, logical_table_ctx)?;
+ let translator = InsertIntoExprTranslator;
+ let sql = translator.translate(&insert_expr)?;
+ let result = ctx
+ .greptime
+ // unprepared query, see <https://github.com/GreptimeTeam/greptimedb/issues/3500>
+ .execute(sql.as_str())
+ .await
+ .context(error::ExecuteQuerySnafu { sql: &sql })?;
+
+ ensure!(
+ result.rows_affected() == input.rows as u64,
+ error::AssertSnafu {
+ reason: format!(
+ "expected rows affected: {}, actual: {}",
+ input.rows,
+ result.rows_affected(),
+ )
+ }
+ );
+
+ // TODO: Validate inserted rows
+
+ // Clean up logical table
+ let sql = format!("DROP TABLE {}", create_logical_table_expr.table_name);
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql: &sql })?;
+ info!(
+ "Drop table: {}, result: {result:?}",
+ create_logical_table_expr.table_name
+ );
+
+ // Clean up physical table
+ let sql = format!("DROP TABLE {}", create_physical_table_expr.table_name);
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql })?;
+ info!(
+ "Drop table: {}, result: {result:?}",
+ create_physical_table_expr.table_name
+ );
+ ctx.close().await;
+
+ Ok(())
+}
+
+fuzz_target!(|input: FuzzInput| {
+ common_telemetry::init_default_ut_logging();
+ common_runtime::block_on_write(async {
+ let Connections { mysql } = init_greptime_connections().await;
+ let ctx = FuzzContext {
+ greptime: mysql.expect("mysql connection init must be succeed"),
+ };
+ execute_insert(ctx, input)
+ .await
+ .unwrap_or_else(|err| panic!("fuzz test must be succeed: {err:?}"));
+ })
+});
|
feat
|
add insert logical table target (#3842)
|
45d4065fd68803d3a28f6ddc3c30b3f18878b674
|
2025-01-10 14:07:02
|
ZonaHe
|
feat: update dashboard to v0.7.4 (#5336)
| false
|
diff --git a/src/servers/dashboard/VERSION b/src/servers/dashboard/VERSION
index 3d105a6fd8ce..520c9c28b7fa 100644
--- a/src/servers/dashboard/VERSION
+++ b/src/servers/dashboard/VERSION
@@ -1 +1 @@
-v0.7.3
+v0.7.4
|
feat
|
update dashboard to v0.7.4 (#5336)
|
f5829364a2626f559eebe962e70863ed8c47ee79
|
2025-01-14 13:49:33
|
Ning Sun
|
fix: security fix, sqlx, hashbrown, idna and CI updates (#5330)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 4fada893ca30..f3799e8df037 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -1,4 +1,6 @@
on:
+ schedule:
+ - cron: "0 15 * * 1-5"
merge_group:
pull_request:
types: [ opened, synchronize, reopened, ready_for_review ]
@@ -51,12 +53,6 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- # - name: Rust Cache
- # uses: Swatinem/rust-cache@v2
- # with:
- # # Shares across multiple jobs
- # # Shares with `Clippy` job
- # shared-key: "check-lint"
- name: Run cargo check
run: cargo check --locked --workspace --all-targets
@@ -67,11 +63,6 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
- # - name: Rust Cache
- # uses: Swatinem/rust-cache@v2
- # with:
- # # Shares across multiple jobs
- # shared-key: "check-toml"
- name: Install taplo
run: cargo +stable install taplo-cli --version ^0.9 --locked --force
- name: Run taplo
@@ -94,6 +85,8 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "build-binaries"
+ cache-all-crates: "true"
+ save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin --force
@@ -142,11 +135,6 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- # - name: Rust Cache
- # uses: Swatinem/rust-cache@v2
- # with:
- # # Shares across multiple jobs
- # shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
@@ -200,11 +188,6 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- # - name: Rust Cache
- # uses: Swatinem/rust-cache@v2
- # with:
- # # Shares across multiple jobs
- # shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
@@ -255,6 +238,8 @@ jobs:
with:
# Shares across multiple jobs
shared-key: "build-greptime-ci"
+ cache-all-crates: "true"
+ save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Install cargo-gc-bin
shell: bash
run: cargo install cargo-gc-bin --force
@@ -317,11 +302,6 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- # - name: Rust Cache
- # uses: Swatinem/rust-cache@v2
- # with:
- # # Shares across multiple jobs
- # shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
@@ -466,11 +446,6 @@ jobs:
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions-rust-lang/setup-rust-toolchain@v1
- # - name: Rust Cache
- # uses: Swatinem/rust-cache@v2
- # with:
- # # Shares across multiple jobs
- # shared-key: "fuzz-test-targets"
- name: Set Rust Fuzz
shell: bash
run: |
@@ -612,11 +587,6 @@ jobs:
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: rustfmt
- # - name: Rust Cache
- # uses: Swatinem/rust-cache@v2
- # with:
- # # Shares across multiple jobs
- # shared-key: "check-rust-fmt"
- name: Check format
run: make fmt-check
@@ -638,6 +608,8 @@ jobs:
# Shares across multiple jobs
# Shares with `Check` job
shared-key: "check-lint"
+ cache-all-crates: "true"
+ save-if: ${{ github.ref == 'refs/heads/main' }}
- name: Run cargo clippy
run: make clippy
@@ -670,6 +642,7 @@ jobs:
with:
# Shares cross multiple jobs
shared-key: "coverage-test"
+ cache-all-crates: "true"
save-if: ${{ github.ref == 'refs/heads/main' }}
# Disabled temporarily to see performance
# - name: Docker Cache
diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml
index bcdb31eaf644..b8a41a0a8e46 100644
--- a/.github/workflows/nightly-ci.yml
+++ b/.github/workflows/nightly-ci.yml
@@ -108,51 +108,6 @@ jobs:
GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
UNITTEST_LOG_DIR: "__unittest_logs"
- ## this is designed for generating cache that usable for pull requests
- test-on-linux:
- name: Run tests on Linux
- if: ${{ github.repository == 'GreptimeTeam/greptimedb' }}
- runs-on: ubuntu-20.04-8-cores
- timeout-minutes: 60
- steps:
- - uses: actions/checkout@v4
- - uses: arduino/setup-protoc@v3
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- - uses: rui314/setup-mold@v1
- - name: Install Rust toolchain
- uses: actions-rust-lang/setup-rust-toolchain@v1
- - name: Rust Cache
- uses: Swatinem/rust-cache@v2
- with:
- # Shares cross multiple jobs
- shared-key: "coverage-test"
- - name: Install Cargo Nextest
- uses: taiki-e/install-action@nextest
- - name: Setup external services
- working-directory: tests-integration/fixtures
- run: docker compose up -d --wait
- - name: Running tests
- run: cargo nextest run -F dashboard -F pg_kvbackend
- env:
- CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
- RUST_BACKTRACE: 1
- CARGO_INCREMENTAL: 0
- GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
- GT_S3_ACCESS_KEY_ID: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }}
- GT_S3_ACCESS_KEY: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }}
- GT_S3_REGION: ${{ vars.AWS_CI_TEST_BUCKET_REGION }}
- GT_MINIO_BUCKET: greptime
- GT_MINIO_ACCESS_KEY_ID: superpower_ci_user
- GT_MINIO_ACCESS_KEY: superpower_password
- GT_MINIO_REGION: us-west-2
- GT_MINIO_ENDPOINT_URL: http://127.0.0.1:9000
- GT_ETCD_ENDPOINTS: http://127.0.0.1:2379
- GT_POSTGRES_ENDPOINTS: postgres://greptimedb:[email protected]:5432/postgres
- GT_KAFKA_ENDPOINTS: 127.0.0.1:9092
- GT_KAFKA_SASL_ENDPOINTS: 127.0.0.1:9093
- UNITTEST_LOG_DIR: "__unittest_logs"
-
cleanbuild-linux-nix:
name: Run clean build on Linux
runs-on: ubuntu-latest-8-cores
diff --git a/Cargo.lock b/Cargo.lock
index 2f36838a7b6f..5b3291408d66 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -62,6 +62,15 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "aligned-vec"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e0966165eaf052580bd70eb1b32cb3d6245774c0104d1b2793e9650bf83b52a"
+dependencies = [
+ "equator",
+]
+
[[package]]
name = "alloc-no-stdlib"
version = "2.0.4"
@@ -332,7 +341,7 @@ dependencies = [
"arrow-data",
"arrow-schema",
"arrow-select",
- "atoi 2.0.0",
+ "atoi",
"base64 0.22.1",
"chrono",
"comfy-table",
@@ -674,15 +683,6 @@ dependencies = [
"pin-project-lite",
]
-[[package]]
-name = "atoi"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7c57d12312ff59c811c0643f4d80830505833c9ffaebd193d819392b265be8e"
-dependencies = [
- "num-traits",
-]
-
[[package]]
name = "atoi"
version = "2.0.0"
@@ -957,6 +957,9 @@ name = "bitflags"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+dependencies = [
+ "serde",
+]
[[package]]
name = "bitpacking"
@@ -2139,7 +2142,7 @@ version = "0.12.0"
dependencies = [
"common-error",
"common-macro",
- "pprof",
+ "pprof 0.14.0",
"prost 0.12.6",
"snafu 0.8.5",
"tokio",
@@ -2422,12 +2425,6 @@ dependencies = [
"tracing-subscriber",
]
-[[package]]
-name = "const-oid"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3"
-
[[package]]
name = "const-oid"
version = "0.9.6"
@@ -2686,16 +2683,6 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
-[[package]]
-name = "crypto-bigint"
-version = "0.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21"
-dependencies = [
- "generic-array",
- "subtle",
-]
-
[[package]]
name = "crypto-common"
version = "0.1.6"
@@ -3259,25 +3246,14 @@ dependencies = [
"num-traits",
]
-[[package]]
-name = "der"
-version = "0.5.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c"
-dependencies = [
- "const-oid 0.7.1",
- "crypto-bigint",
- "pem-rfc7468 0.3.1",
-]
-
[[package]]
name = "der"
version = "0.7.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0"
dependencies = [
- "const-oid 0.9.6",
- "pem-rfc7468 0.7.0",
+ "const-oid",
+ "pem-rfc7468",
"zeroize",
]
@@ -3474,7 +3450,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
- "const-oid 0.9.6",
+ "const-oid",
"crypto-common",
"subtle",
]
@@ -3488,15 +3464,6 @@ dependencies = [
"dirs-sys",
]
-[[package]]
-name = "dirs"
-version = "4.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059"
-dependencies = [
- "dirs-sys",
-]
-
[[package]]
name = "dirs-next"
version = "2.0.0"
@@ -3529,6 +3496,17 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "displaydoc"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.90",
+]
+
[[package]]
name = "dlv-list"
version = "0.3.0"
@@ -3609,6 +3587,9 @@ name = "either"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
+dependencies = [
+ "serde",
+]
[[package]]
name = "encode_unicode"
@@ -3655,6 +3636,26 @@ dependencies = [
"syn 2.0.90",
]
+[[package]]
+name = "equator"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c35da53b5a021d2484a7cc49b2ac7f2d840f8236a286f84202369bd338d761ea"
+dependencies = [
+ "equator-macro",
+]
+
+[[package]]
+name = "equator-macro"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.90",
+]
+
[[package]]
name = "equivalent"
version = "1.0.1"
@@ -3707,6 +3708,17 @@ dependencies = [
"tower-service",
]
+[[package]]
+name = "etcetera"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943"
+dependencies = [
+ "cfg-if",
+ "home",
+ "windows-sys 0.48.0",
+]
+
[[package]]
name = "event-listener"
version = "2.5.3"
@@ -3983,6 +3995,17 @@ dependencies = [
"bitflags 1.3.2",
]
+[[package]]
+name = "flume"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+ "spin",
+]
+
[[package]]
name = "fnv"
version = "1.0.7"
@@ -4214,13 +4237,13 @@ dependencies = [
[[package]]
name = "futures-intrusive"
-version = "0.4.2"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5"
+checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f"
dependencies = [
"futures-core",
"lock_api",
- "parking_lot 0.11.2",
+ "parking_lot 0.12.3",
]
[[package]]
@@ -4515,9 +4538,9 @@ dependencies = [
[[package]]
name = "hashbrown"
-version = "0.15.0"
+version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb"
+checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
dependencies = [
"allocator-api2",
"equivalent",
@@ -4526,11 +4549,11 @@ dependencies = [
[[package]]
name = "hashlink"
-version = "0.8.4"
+version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
+checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1"
dependencies = [
- "hashbrown 0.14.5",
+ "hashbrown 0.15.2",
]
[[package]]
@@ -4609,9 +4632,6 @@ name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
-dependencies = [
- "unicode-segmentation",
-]
[[package]]
name = "heck"
@@ -5090,6 +5110,124 @@ dependencies = [
"cc",
]
+[[package]]
+name = "icu_collections"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526"
+dependencies = [
+ "displaydoc",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637"
+dependencies = [
+ "displaydoc",
+ "litemap",
+ "tinystr",
+ "writeable",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_locid_transform_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e"
+
+[[package]]
+name = "icu_normalizer"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_normalizer_data",
+ "icu_properties",
+ "icu_provider",
+ "smallvec",
+ "utf16_iter",
+ "utf8_iter",
+ "write16",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_normalizer_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516"
+
+[[package]]
+name = "icu_properties"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_locid_transform",
+ "icu_properties_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_properties_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569"
+
+[[package]]
+name = "icu_provider"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_provider_macros",
+ "stable_deref_trait",
+ "tinystr",
+ "writeable",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_provider_macros"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.90",
+]
+
[[package]]
name = "ident_case"
version = "1.0.1"
@@ -5098,12 +5236,23 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idna"
-version = "0.5.0"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
+checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
dependencies = [
- "unicode-bidi",
- "unicode-normalization",
+ "idna_adapter",
+ "smallvec",
+ "utf8_iter",
+]
+
+[[package]]
+name = "idna_adapter"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71"
+dependencies = [
+ "icu_normalizer",
+ "icu_properties",
]
[[package]]
@@ -5184,7 +5333,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
dependencies = [
"equivalent",
- "hashbrown 0.15.0",
+ "hashbrown 0.15.2",
"serde",
]
@@ -5517,7 +5666,7 @@ dependencies = [
"base64 0.21.7",
"js-sys",
"pem",
- "ring 0.17.8",
+ "ring",
"serde",
"serde_json",
"simple_asn1",
@@ -5713,7 +5862,7 @@ version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
dependencies = [
- "spin 0.9.8",
+ "spin",
]
[[package]]
@@ -5852,6 +6001,16 @@ dependencies = [
"redox_syscall 0.5.7",
]
+[[package]]
+name = "libsqlite3-sys"
+version = "0.30.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149"
+dependencies = [
+ "pkg-config",
+ "vcpkg",
+]
+
[[package]]
name = "libz-sys"
version = "1.1.20"
@@ -5876,6 +6035,12 @@ version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
+[[package]]
+name = "litemap"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104"
+
[[package]]
name = "local-ip-address"
version = "0.6.3"
@@ -6528,7 +6693,7 @@ dependencies = [
"log",
"memchr",
"mime",
- "spin 0.9.8",
+ "spin",
"version_check",
]
@@ -7725,15 +7890,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "pem-rfc7468"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30"
-dependencies = [
- "base64ct",
-]
-
[[package]]
name = "pem-rfc7468"
version = "0.7.0"
@@ -7826,7 +7982,7 @@ dependencies = [
"md5",
"postgres-types",
"rand",
- "ring 0.17.8",
+ "ring",
"rust_decimal",
"thiserror 2.0.6",
"tokio",
@@ -7972,26 +8128,15 @@ dependencies = [
"futures-io",
]
-[[package]]
-name = "pkcs1"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320"
-dependencies = [
- "der 0.5.1",
- "pkcs8 0.8.0",
- "zeroize",
-]
-
[[package]]
name = "pkcs1"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
dependencies = [
- "der 0.7.9",
- "pkcs8 0.10.2",
- "spki 0.7.3",
+ "der",
+ "pkcs8",
+ "spki",
]
[[package]]
@@ -8002,22 +8147,11 @@ checksum = "e847e2c91a18bfa887dd028ec33f2fe6f25db77db3619024764914affe8b69a6"
dependencies = [
"aes",
"cbc",
- "der 0.7.9",
+ "der",
"pbkdf2",
"scrypt",
"sha2",
- "spki 0.7.3",
-]
-
-[[package]]
-name = "pkcs8"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0"
-dependencies = [
- "der 0.5.1",
- "spki 0.5.4",
- "zeroize",
+ "spki",
]
[[package]]
@@ -8026,10 +8160,10 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
dependencies = [
- "der 0.7.9",
+ "der",
"pkcs5",
"rand_core",
- "spki 0.7.3",
+ "spki",
]
[[package]]
@@ -8142,6 +8276,28 @@ dependencies = [
"nix 0.26.4",
"once_cell",
"parking_lot 0.12.3",
+ "smallvec",
+ "symbolic-demangle",
+ "tempfile",
+ "thiserror 1.0.64",
+]
+
+[[package]]
+name = "pprof"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebbe2f8898beba44815fdc9e5a4ae9c929e21c5dc29b0c774a15555f7f58d6d0"
+dependencies = [
+ "aligned-vec",
+ "backtrace",
+ "cfg-if",
+ "findshlibs",
+ "inferno",
+ "libc",
+ "log",
+ "nix 0.26.4",
+ "once_cell",
+ "parking_lot 0.12.3",
"prost 0.12.6",
"prost-build 0.12.6",
"prost-derive 0.12.6",
@@ -8746,7 +8902,7 @@ checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6"
dependencies = [
"bytes",
"rand",
- "ring 0.17.8",
+ "ring",
"rustc-hash 2.0.0",
"rustls 0.23.20",
"slab",
@@ -9073,7 +9229,7 @@ dependencies = [
"quick-xml 0.35.0",
"rand",
"reqwest",
- "rsa 0.9.6",
+ "rsa",
"rust-ini 0.21.1",
"serde",
"serde_json",
@@ -9149,21 +9305,6 @@ dependencies = [
"bytemuck",
]
-[[package]]
-name = "ring"
-version = "0.16.20"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc"
-dependencies = [
- "cc",
- "libc",
- "once_cell",
- "spin 0.5.2",
- "untrusted 0.7.1",
- "web-sys",
- "winapi",
-]
-
[[package]]
name = "ring"
version = "0.17.8"
@@ -9174,8 +9315,8 @@ dependencies = [
"cfg-if",
"getrandom",
"libc",
- "spin 0.9.8",
- "untrusted 0.9.0",
+ "spin",
+ "untrusted",
"windows-sys 0.52.0",
]
@@ -9225,43 +9366,23 @@ dependencies = [
"serde",
]
-[[package]]
-name = "rsa"
-version = "0.6.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b"
-dependencies = [
- "byteorder",
- "digest",
- "num-bigint-dig",
- "num-integer",
- "num-iter",
- "num-traits",
- "pkcs1 0.3.3",
- "pkcs8 0.8.0",
- "rand_core",
- "smallvec",
- "subtle",
- "zeroize",
-]
-
[[package]]
name = "rsa"
version = "0.9.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc"
dependencies = [
- "const-oid 0.9.6",
+ "const-oid",
"digest",
"num-bigint-dig",
"num-integer",
"num-traits",
- "pkcs1 0.7.5",
- "pkcs8 0.10.2",
+ "pkcs1",
+ "pkcs8",
"rand_core",
"sha2",
"signature",
- "spki 0.7.3",
+ "spki",
"subtle",
"zeroize",
]
@@ -9533,25 +9654,13 @@ dependencies = [
[[package]]
name = "rustls"
-version = "0.20.9"
+version = "0.21.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99"
+checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
dependencies = [
"log",
- "ring 0.16.20",
- "sct",
- "webpki",
-]
-
-[[package]]
-name = "rustls"
-version = "0.21.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
-dependencies = [
- "log",
- "ring 0.17.8",
- "rustls-webpki 0.101.7",
+ "ring",
+ "rustls-webpki 0.101.7",
"sct",
]
@@ -9562,7 +9671,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432"
dependencies = [
"log",
- "ring 0.17.8",
+ "ring",
"rustls-pki-types",
"rustls-webpki 0.102.8",
"subtle",
@@ -9576,7 +9685,7 @@ source = "git+https://github.com/GreptimeTeam/rustls?rev=34fd0c6#34fd0c6244af150
dependencies = [
"log",
"once_cell",
- "ring 0.17.8",
+ "ring",
"rustls-pki-types",
"rustls-webpki 0.102.8",
"subtle",
@@ -9639,8 +9748,8 @@ version = "0.101.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765"
dependencies = [
- "ring 0.17.8",
- "untrusted 0.9.0",
+ "ring",
+ "untrusted",
]
[[package]]
@@ -9649,9 +9758,9 @@ version = "0.102.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9"
dependencies = [
- "ring 0.17.8",
+ "ring",
"rustls-pki-types",
- "untrusted 0.9.0",
+ "untrusted",
]
[[package]]
@@ -9839,8 +9948,8 @@ version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414"
dependencies = [
- "ring 0.17.8",
- "untrusted 0.9.0",
+ "ring",
+ "untrusted",
]
[[package]]
@@ -10105,7 +10214,7 @@ dependencies = [
"derive_builder 0.12.0",
"futures",
"futures-util",
- "hashbrown 0.15.0",
+ "hashbrown 0.15.2",
"headers 0.3.9",
"hostname",
"http 0.2.12",
@@ -10135,7 +10244,7 @@ dependencies = [
"pin-project",
"pipeline",
"postgres-types",
- "pprof",
+ "pprof 0.13.0",
"prometheus",
"promql-parser",
"prost 0.12.6",
@@ -10463,26 +10572,13 @@ dependencies = [
"vob",
]
-[[package]]
-name = "spin"
-version = "0.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
-
[[package]]
name = "spin"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
-
-[[package]]
-name = "spki"
-version = "0.5.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27"
dependencies = [
- "base64ct",
- "der 0.5.1",
+ "lock_api",
]
[[package]]
@@ -10492,7 +10588,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"
dependencies = [
"base64ct",
- "der 0.7.9",
+ "der",
]
[[package]]
@@ -10531,16 +10627,6 @@ dependencies = [
"table",
]
-[[package]]
-name = "sqlformat"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790"
-dependencies = [
- "nom",
- "unicode_categories",
-]
-
[[package]]
name = "sqlness"
version = "0.6.1"
@@ -10645,100 +10731,196 @@ dependencies = [
[[package]]
name = "sqlx"
-version = "0.6.3"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f8de3b03a925878ed54a954f621e64bf55a3c1bd29652d0d1a17830405350188"
+checksum = "4410e73b3c0d8442c5f99b425d7a435b5ee0ae4167b3196771dd3f7a01be745f"
dependencies = [
"sqlx-core",
"sqlx-macros",
+ "sqlx-mysql",
+ "sqlx-postgres",
+ "sqlx-sqlite",
]
[[package]]
name = "sqlx-core"
-version = "0.6.3"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa8241483a83a3f33aa5fff7e7d9def398ff9990b2752b6c6112b83c6d246029"
+checksum = "6a007b6936676aa9ab40207cde35daab0a04b823be8ae004368c0793b96a61e0"
dependencies = [
- "ahash 0.7.8",
- "atoi 1.0.0",
- "base64 0.13.1",
- "bitflags 1.3.2",
- "byteorder",
"bytes",
"chrono",
"crc",
"crossbeam-queue",
+ "either",
+ "event-listener 5.3.1",
+ "futures-core",
+ "futures-intrusive",
+ "futures-io",
+ "futures-util",
+ "hashbrown 0.15.2",
+ "hashlink",
+ "indexmap 2.6.0",
+ "log",
+ "memchr",
+ "once_cell",
+ "percent-encoding",
+ "rustls 0.23.20",
+ "rustls-pemfile 2.2.0",
+ "serde",
+ "serde_json",
+ "sha2",
+ "smallvec",
+ "thiserror 2.0.6",
+ "tokio",
+ "tokio-stream",
+ "tracing",
+ "url",
+ "webpki-roots 0.26.6",
+]
+
+[[package]]
+name = "sqlx-macros"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3112e2ad78643fef903618d78cf0aec1cb3134b019730edb039b69eaf531f310"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "sqlx-core",
+ "sqlx-macros-core",
+ "syn 2.0.90",
+]
+
+[[package]]
+name = "sqlx-macros-core"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4e9f90acc5ab146a99bf5061a7eb4976b573f560bc898ef3bf8435448dd5e7ad"
+dependencies = [
+ "dotenvy",
+ "either",
+ "heck 0.5.0",
+ "hex",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "serde",
+ "serde_json",
+ "sha2",
+ "sqlx-core",
+ "sqlx-mysql",
+ "sqlx-postgres",
+ "sqlx-sqlite",
+ "syn 2.0.90",
+ "tempfile",
+ "tokio",
+ "url",
+]
+
+[[package]]
+name = "sqlx-mysql"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4560278f0e00ce64938540546f59f590d60beee33fffbd3b9cd47851e5fff233"
+dependencies = [
+ "atoi",
+ "base64 0.22.1",
+ "bitflags 2.6.0",
+ "byteorder",
+ "bytes",
+ "chrono",
+ "crc",
"digest",
- "dirs 4.0.0",
"dotenvy",
"either",
- "event-listener 2.5.3",
"futures-channel",
"futures-core",
- "futures-intrusive",
+ "futures-io",
"futures-util",
"generic-array",
- "hashlink",
"hex",
"hkdf",
"hmac",
- "indexmap 1.9.3",
"itoa",
- "libc",
"log",
"md-5",
"memchr",
- "num-bigint",
"once_cell",
- "paste",
"percent-encoding",
"rand",
- "rsa 0.6.1",
- "rustls 0.20.9",
- "rustls-pemfile 1.0.4",
+ "rsa",
"serde",
- "serde_json",
"sha1",
"sha2",
"smallvec",
- "sqlformat",
- "sqlx-rt",
+ "sqlx-core",
"stringprep",
- "thiserror 1.0.64",
- "tokio-stream",
- "url",
- "webpki-roots 0.22.6",
+ "thiserror 2.0.6",
+ "tracing",
"whoami",
]
[[package]]
-name = "sqlx-macros"
-version = "0.6.3"
+name = "sqlx-postgres"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9966e64ae989e7e575b19d7265cb79d7fc3cbbdf179835cb0d716f294c2049c9"
+checksum = "c5b98a57f363ed6764d5b3a12bfedf62f07aa16e1856a7ddc2a0bb190a959613"
dependencies = [
+ "atoi",
+ "base64 0.22.1",
+ "bitflags 2.6.0",
+ "byteorder",
+ "chrono",
+ "crc",
"dotenvy",
- "either",
- "heck 0.4.1",
+ "etcetera",
+ "futures-channel",
+ "futures-core",
+ "futures-util",
+ "hex",
+ "hkdf",
+ "hmac",
+ "home",
+ "itoa",
+ "log",
+ "md-5",
+ "memchr",
"once_cell",
- "proc-macro2",
- "quote",
+ "rand",
+ "serde",
+ "serde_json",
"sha2",
+ "smallvec",
"sqlx-core",
- "sqlx-rt",
- "syn 1.0.109",
- "url",
+ "stringprep",
+ "thiserror 2.0.6",
+ "tracing",
+ "whoami",
]
[[package]]
-name = "sqlx-rt"
-version = "0.6.3"
+name = "sqlx-sqlite"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "804d3f245f894e61b1e6263c84b23ca675d96753b5abfd5cc8597d86806e8024"
+checksum = "f85ca71d3a5b24e64e1d08dd8fe36c6c95c339a896cc33068148906784620540"
dependencies = [
- "once_cell",
- "tokio",
- "tokio-rustls 0.23.4",
+ "atoi",
+ "chrono",
+ "flume",
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-intrusive",
+ "futures-util",
+ "libsqlite3-sys",
+ "log",
+ "percent-encoding",
+ "serde",
+ "serde_urlencoded",
+ "sqlx-core",
+ "tracing",
+ "url",
]
[[package]]
@@ -11050,6 +11232,17 @@ dependencies = [
"futures-core",
]
+[[package]]
+name = "synstructure"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.90",
+]
+
[[package]]
name = "sysinfo"
version = "0.30.13"
@@ -11610,6 +11803,16 @@ dependencies = [
"log",
]
+[[package]]
+name = "tinystr"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f"
+dependencies = [
+ "displaydoc",
+ "zerovec",
+]
+
[[package]]
name = "tinytemplate"
version = "1.2.1"
@@ -11732,7 +11935,7 @@ version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04fb792ccd6bbcd4bba408eb8a292f70fc4a3589e5d793626f45190e6454b6ab"
dependencies = [
- "ring 0.17.8",
+ "ring",
"rustls 0.23.20",
"tokio",
"tokio-postgres",
@@ -11740,17 +11943,6 @@ dependencies = [
"x509-certificate",
]
-[[package]]
-name = "tokio-rustls"
-version = "0.23.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59"
-dependencies = [
- "rustls 0.20.9",
- "tokio",
- "webpki",
-]
-
[[package]]
name = "tokio-rustls"
version = "0.24.1"
@@ -12192,7 +12384,7 @@ dependencies = [
"atty",
"clap 2.34.0",
"difference",
- "dirs 3.0.2",
+ "dirs",
"glob",
"html-escape",
"indexmap 1.9.3",
@@ -12224,7 +12416,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f5fec4cb27f052ead2246631b332dba0cb6af9a54ce012badee59c4b0ded5e03"
dependencies = [
"anyhow",
- "dirs 3.0.2",
+ "dirs",
"serde",
"serde_json",
]
@@ -12248,7 +12440,7 @@ checksum = "d0b17eef4833c7c139abed66d562dfa23228e97e647597baf246fd56c21bbfaf"
dependencies = [
"anyhow",
"cc",
- "dirs 3.0.2",
+ "dirs",
"libloading 0.7.4",
"once_cell",
"regex",
@@ -12472,24 +12664,12 @@ version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
-[[package]]
-name = "unicode_categories"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
-
[[package]]
name = "unsafe-libyaml"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
-[[package]]
-name = "untrusted"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
-
[[package]]
name = "untrusted"
version = "0.9.0"
@@ -12498,9 +12678,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "url"
-version = "2.5.2"
+version = "2.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c"
+checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
dependencies = [
"form_urlencoded",
"idna",
@@ -12513,6 +12693,12 @@ version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
+[[package]]
+name = "utf16_iter"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246"
+
[[package]]
name = "utf8-ranges"
version = "1.0.5"
@@ -12525,6 +12711,12 @@ version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86bd8d4e895da8537e5315b8254664e6b769c4ff3db18321b297a1e7004392e3"
+[[package]]
+name = "utf8_iter"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
+
[[package]]
name = "utf8parse"
version = "0.2.2"
@@ -12750,17 +12942,8 @@ version = "0.22.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53"
dependencies = [
- "ring 0.17.8",
- "untrusted 0.9.0",
-]
-
-[[package]]
-name = "webpki-roots"
-version = "0.22.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87"
-dependencies = [
- "webpki",
+ "ring",
+ "untrusted",
]
[[package]]
@@ -13135,6 +13318,18 @@ dependencies = [
"thiserror 1.0.64",
]
+[[package]]
+name = "write16"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936"
+
+[[package]]
+name = "writeable"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
+
[[package]]
name = "wyz"
version = "0.5.1"
@@ -13153,12 +13348,12 @@ dependencies = [
"bcder",
"bytes",
"chrono",
- "der 0.7.9",
+ "der",
"hex",
"pem",
- "ring 0.17.8",
+ "ring",
"signature",
- "spki 0.7.3",
+ "spki",
"thiserror 1.0.64",
"zeroize",
]
@@ -13198,6 +13393,30 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
+[[package]]
+name = "yoke"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40"
+dependencies = [
+ "serde",
+ "stable_deref_trait",
+ "yoke-derive",
+ "zerofrom",
+]
+
+[[package]]
+name = "yoke-derive"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.90",
+ "synstructure",
+]
+
[[package]]
name = "zerocopy"
version = "0.7.35"
@@ -13219,6 +13438,27 @@ dependencies = [
"syn 2.0.90",
]
+[[package]]
+name = "zerofrom"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e"
+dependencies = [
+ "zerofrom-derive",
+]
+
+[[package]]
+name = "zerofrom-derive"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.90",
+ "synstructure",
+]
+
[[package]]
name = "zeroize"
version = "1.8.1"
@@ -13239,6 +13479,28 @@ dependencies = [
"syn 2.0.90",
]
+[[package]]
+name = "zerovec"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079"
+dependencies = [
+ "yoke",
+ "zerofrom",
+ "zerovec-derive",
+]
+
+[[package]]
+name = "zerovec-derive"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.90",
+]
+
[[package]]
name = "zstd"
version = "0.11.2+zstd.1.5.2"
diff --git a/src/common/pprof/Cargo.toml b/src/common/pprof/Cargo.toml
index 1657244d21f1..1e7d00803414 100644
--- a/src/common/pprof/Cargo.toml
+++ b/src/common/pprof/Cargo.toml
@@ -12,7 +12,7 @@ snafu.workspace = true
tokio.workspace = true
[target.'cfg(unix)'.dependencies]
-pprof = { version = "0.13", features = [
+pprof = { version = "0.14", features = [
"flamegraph",
"prost-codec",
"protobuf",
diff --git a/tests-fuzz/Cargo.toml b/tests-fuzz/Cargo.toml
index 3b7b41e7a5db..eab760c60d2c 100644
--- a/tests-fuzz/Cargo.toml
+++ b/tests-fuzz/Cargo.toml
@@ -53,7 +53,7 @@ serde_yaml = "0.9"
snafu = { workspace = true }
sql = { workspace = true }
sqlparser.workspace = true
-sqlx = { version = "0.6", features = [
+sqlx = { version = "0.8", features = [
"runtime-tokio-rustls",
"mysql",
"postgres",
diff --git a/tests-fuzz/src/utils.rs b/tests-fuzz/src/utils.rs
index 8e5daef4eb1d..f52d75f4da25 100644
--- a/tests-fuzz/src/utils.rs
+++ b/tests-fuzz/src/utils.rs
@@ -59,8 +59,11 @@ pub async fn init_greptime_connections_via_env() -> Connections {
/// Connects to GreptimeDB.
pub async fn init_greptime_connections(mysql: Option<String>) -> Connections {
let mysql = if let Some(addr) = mysql {
- let mut opts: MySqlConnectOptions = format!("mysql://{addr}/public").parse().unwrap();
- opts.log_statements(LevelFilter::Off);
+ let opts = format!("mysql://{addr}/public")
+ .parse::<MySqlConnectOptions>()
+ .unwrap()
+ .log_statements(LevelFilter::Off);
+
Some(MySqlPoolOptions::new().connect_with(opts).await.unwrap())
} else {
None
diff --git a/tests-fuzz/src/utils/cluster_info.rs b/tests-fuzz/src/utils/cluster_info.rs
index fa4bbbc5404f..9f43711ca4df 100644
--- a/tests-fuzz/src/utils/cluster_info.rs
+++ b/tests-fuzz/src/utils/cluster_info.rs
@@ -17,8 +17,7 @@ use std::time::Duration;
use common_telemetry::info;
use humantime::parse_duration;
use snafu::ResultExt;
-use sqlx::database::HasArguments;
-use sqlx::{ColumnIndex, Database, Decode, Encode, Executor, IntoArguments, MySql, Pool, Type};
+use sqlx::MySqlPool;
use super::wait::wait_condition_fn;
use crate::error::{self, Result};
@@ -34,19 +33,10 @@ pub struct NodeInfo {
}
/// Returns all [NodeInfo] in the cluster.
-pub async fn fetch_nodes<'a, DB, E>(e: E) -> Result<Vec<NodeInfo>>
-where
- DB: Database,
- <DB as HasArguments<'a>>::Arguments: IntoArguments<'a, DB>,
- for<'c> E: 'a + Executor<'c, Database = DB>,
- for<'c> i64: Decode<'c, DB> + Type<DB>,
- for<'c> String: Decode<'c, DB> + Type<DB>,
- for<'c> String: Encode<'c, DB> + Type<DB>,
- for<'c> &'c str: ColumnIndex<<DB as Database>::Row>,
-{
- let sql = "select * from information_schema.cluster_info;";
+pub async fn fetch_nodes(db: &MySqlPool) -> Result<Vec<NodeInfo>> {
+ let sql = "select * from information_schema.cluster_info";
sqlx::query_as::<_, NodeInfo>(sql)
- .fetch_all(e)
+ .fetch_all(db)
.await
.context(error::ExecuteQuerySnafu { sql })
}
@@ -55,7 +45,7 @@ where
///
/// This function repeatedly checks the status of all datanodes and waits until all of them are online
/// or the timeout period elapses. A datanode is considered online if its `active_time` is less than 3 seconds.
-pub async fn wait_for_all_datanode_online(greptime: Pool<MySql>, timeout: Duration) {
+pub async fn wait_for_all_datanode_online(greptime: MySqlPool, timeout: Duration) {
wait_condition_fn(
timeout,
|| {
diff --git a/tests-fuzz/src/utils/partition.rs b/tests-fuzz/src/utils/partition.rs
index 26b95e68f99e..197a51d00adc 100644
--- a/tests-fuzz/src/utils/partition.rs
+++ b/tests-fuzz/src/utils/partition.rs
@@ -17,8 +17,7 @@ use std::time::Duration;
use common_telemetry::info;
use snafu::ResultExt;
-use sqlx::database::HasArguments;
-use sqlx::{ColumnIndex, Database, Decode, Encode, Executor, IntoArguments, MySql, Pool, Type};
+use sqlx::MySqlPool;
use store_api::storage::RegionId;
use super::wait::wait_condition_fn;
@@ -36,61 +35,33 @@ pub struct PartitionCount {
pub count: i64,
}
-pub async fn count_partitions<'a, DB, E>(e: E, datanode_id: u64) -> Result<PartitionCount>
-where
- DB: Database,
- <DB as HasArguments<'a>>::Arguments: IntoArguments<'a, DB>,
- for<'c> E: 'a + Executor<'c, Database = DB>,
- for<'c> i64: Decode<'c, DB> + Type<DB>,
- for<'c> String: Decode<'c, DB> + Type<DB>,
- for<'c> u64: Encode<'c, DB> + Type<DB>,
- for<'c> &'c str: ColumnIndex<<DB as Database>::Row>,
-{
+pub async fn count_partitions(db: &MySqlPool, datanode_id: u64) -> Result<PartitionCount> {
let sql = "select count(1) as count from information_schema.region_peers where peer_id == ?";
- Ok(sqlx::query_as::<_, PartitionCount>(sql)
+ sqlx::query_as::<_, PartitionCount>(sql)
.bind(datanode_id)
- .fetch_all(e)
+ .fetch_one(db)
.await
- .context(error::ExecuteQuerySnafu { sql })?
- .remove(0))
+ .context(error::ExecuteQuerySnafu { sql })
}
/// Returns the [Partition] of the specific `region_id`
-pub async fn fetch_partition<'a, DB, E>(e: E, region_id: u64) -> Result<Partition>
-where
- DB: Database,
- <DB as HasArguments<'a>>::Arguments: IntoArguments<'a, DB>,
- for<'c> E: 'a + Executor<'c, Database = DB>,
- for<'c> u64: Decode<'c, DB> + Type<DB>,
- for<'c> String: Decode<'c, DB> + Type<DB>,
- for<'c> u64: Encode<'c, DB> + Type<DB>,
- for<'c> &'c str: ColumnIndex<<DB as Database>::Row>,
-{
+pub async fn fetch_partition(db: &MySqlPool, region_id: u64) -> Result<Partition> {
let sql = "select region_id, peer_id as datanode_id from information_schema.region_peers where region_id = ?;";
sqlx::query_as::<_, Partition>(sql)
.bind(region_id)
- .fetch_one(e)
+ .fetch_one(db)
.await
.context(error::ExecuteQuerySnafu { sql })
}
/// Returns all [Partition] of the specific `table`
-pub async fn fetch_partitions<'a, DB, E>(e: E, table_name: Ident) -> Result<Vec<Partition>>
-where
- DB: Database,
- <DB as HasArguments<'a>>::Arguments: IntoArguments<'a, DB>,
- for<'c> E: 'a + Executor<'c, Database = DB>,
- for<'c> u64: Decode<'c, DB> + Type<DB>,
- for<'c> String: Decode<'c, DB> + Type<DB>,
- for<'c> String: Encode<'c, DB> + Type<DB>,
- for<'c> &'c str: ColumnIndex<<DB as Database>::Row>,
-{
+pub async fn fetch_partitions(db: &MySqlPool, table_name: Ident) -> Result<Vec<Partition>> {
let sql = "select b.peer_id as datanode_id, a.greptime_partition_id as region_id
from information_schema.partitions a left join information_schema.region_peers b
on a.greptime_partition_id = b.region_id where a.table_name= ? order by datanode_id asc;";
sqlx::query_as::<_, Partition>(sql)
.bind(table_name.value.to_string())
- .fetch_all(e)
+ .fetch_all(db)
.await
.context(error::ExecuteQuerySnafu { sql })
}
@@ -124,7 +95,7 @@ pub fn pretty_print_region_distribution(distribution: &BTreeMap<u64, Vec<RegionI
/// This function repeatedly checks the number of partitions on the specified datanode and waits until
/// the count reaches zero or the timeout period elapses. It logs the number of partitions on each check.
pub async fn wait_for_all_regions_evicted(
- greptime: Pool<MySql>,
+ greptime: MySqlPool,
selected_datanode: u64,
timeout: Duration,
) {
diff --git a/tests-fuzz/src/validator/column.rs b/tests-fuzz/src/validator/column.rs
index 349057817bf8..8642a275a438 100644
--- a/tests-fuzz/src/validator/column.rs
+++ b/tests-fuzz/src/validator/column.rs
@@ -15,8 +15,7 @@
use common_telemetry::debug;
use datatypes::data_type::DataType;
use snafu::{ensure, ResultExt};
-use sqlx::database::HasArguments;
-use sqlx::{ColumnIndex, Database, Decode, Encode, Executor, IntoArguments, Type};
+use sqlx::MySqlPool;
use crate::error::{self, Result};
use crate::ir::create_expr::ColumnOption;
@@ -198,24 +197,16 @@ pub fn assert_eq(fetched_columns: &[ColumnEntry], columns: &[Column]) -> Result<
}
/// Returns all [ColumnEntry] of the `table_name` from `information_schema`.
-pub async fn fetch_columns<'a, DB, E>(
- e: E,
+pub async fn fetch_columns(
+ db: &MySqlPool,
schema_name: Ident,
table_name: Ident,
-) -> Result<Vec<ColumnEntry>>
-where
- DB: Database,
- <DB as HasArguments<'a>>::Arguments: IntoArguments<'a, DB>,
- for<'c> E: 'a + Executor<'c, Database = DB>,
- for<'c> String: Decode<'c, DB> + Type<DB>,
- for<'c> String: Encode<'c, DB> + Type<DB>,
- for<'c> &'c str: ColumnIndex<<DB as Database>::Row>,
-{
+) -> Result<Vec<ColumnEntry>> {
let sql = "SELECT table_schema, table_name, column_name, greptime_data_type as data_type, semantic_type, column_default, is_nullable FROM information_schema.columns WHERE table_schema = ? AND table_name = ?";
sqlx::query_as::<_, ColumnEntry>(sql)
.bind(schema_name.value.to_string())
.bind(table_name.value.to_string())
- .fetch_all(e)
+ .fetch_all(db)
.await
.context(error::ExecuteQuerySnafu { sql })
}
diff --git a/tests-fuzz/src/validator/row.rs b/tests-fuzz/src/validator/row.rs
index 1e9535d6677f..5648a24f48b8 100644
--- a/tests-fuzz/src/validator/row.rs
+++ b/tests-fuzz/src/validator/row.rs
@@ -17,11 +17,8 @@ use common_time::date::Date;
use common_time::{DateTime, Timestamp};
use datatypes::value::Value;
use snafu::{ensure, ResultExt};
-use sqlx::database::HasArguments;
-use sqlx::{
- Column, ColumnIndex, Database, Decode, Encode, Executor, IntoArguments, Row, Type, TypeInfo,
- ValueRef,
-};
+use sqlx::mysql::MySqlRow;
+use sqlx::{Column, ColumnIndex, Database, MySqlPool, Row, TypeInfo, ValueRef};
use crate::error::{self, Result};
use crate::ir::insert_expr::{RowValue, RowValues};
@@ -151,33 +148,17 @@ pub struct ValueCount {
pub count: i64,
}
-pub async fn count_values<'a, DB, E>(e: E, sql: &'a str) -> Result<ValueCount>
-where
- DB: Database,
- <DB as HasArguments<'a>>::Arguments: IntoArguments<'a, DB>,
- for<'c> E: 'a + Executor<'c, Database = DB>,
- for<'c> i64: Decode<'c, DB> + Type<DB>,
- for<'c> String: Encode<'c, DB> + Type<DB>,
- for<'c> &'c str: ColumnIndex<<DB as Database>::Row>,
-{
- Ok(sqlx::query_as::<_, ValueCount>(sql)
- .fetch_all(e)
+pub async fn count_values(db: &MySqlPool, sql: &str) -> Result<ValueCount> {
+ sqlx::query_as::<_, ValueCount>(sql)
+ .fetch_one(db)
.await
- .context(error::ExecuteQuerySnafu { sql })?
- .remove(0))
+ .context(error::ExecuteQuerySnafu { sql })
}
/// Returns all [RowEntry] of the `table_name`.
-pub async fn fetch_values<'a, DB, E>(e: E, sql: &'a str) -> Result<Vec<<DB as Database>::Row>>
-where
- DB: Database,
- <DB as HasArguments<'a>>::Arguments: IntoArguments<'a, DB>,
- for<'c> E: 'a + Executor<'c, Database = DB>,
- for<'c> String: Decode<'c, DB> + Type<DB>,
- for<'c> String: Encode<'c, DB> + Type<DB>,
-{
+pub async fn fetch_values(db: &MySqlPool, sql: &str) -> Result<Vec<MySqlRow>> {
sqlx::query(sql)
- .fetch_all(e)
+ .fetch_all(db)
.await
.context(error::ExecuteQuerySnafu { sql })
}
diff --git a/tests-fuzz/src/validator/table.rs b/tests-fuzz/src/validator/table.rs
index 406719b2d660..3e45930f2954 100644
--- a/tests-fuzz/src/validator/table.rs
+++ b/tests-fuzz/src/validator/table.rs
@@ -13,8 +13,7 @@
// limitations under the License.
use snafu::{ensure, ResultExt};
-use sqlx::database::HasArguments;
-use sqlx::{ColumnIndex, Database, Decode, Encode, Executor, IntoArguments, Row, Type};
+use sqlx::{MySqlPool, Row};
use crate::error::{self, Result, UnexpectedSnafu};
use crate::ir::alter_expr::AlterTableOption;
@@ -47,17 +46,9 @@ fn parse_show_create(show_create: &str) -> Result<Vec<AlterTableOption>> {
}
/// Fetches table options from the context
-pub async fn fetch_table_options<'a, DB, E>(e: E, sql: &'a str) -> Result<Vec<AlterTableOption>>
-where
- DB: Database,
- <DB as HasArguments<'a>>::Arguments: IntoArguments<'a, DB>,
- for<'c> E: 'a + Executor<'c, Database = DB>,
- for<'c> String: Decode<'c, DB> + Type<DB>,
- for<'c> String: Encode<'c, DB> + Type<DB>,
- usize: ColumnIndex<<DB as Database>::Row>,
-{
+pub async fn fetch_table_options(db: &MySqlPool, sql: &str) -> Result<Vec<AlterTableOption>> {
let fetched_rows = sqlx::query(sql)
- .fetch_all(e)
+ .fetch_all(db)
.await
.context(error::ExecuteQuerySnafu { sql })?;
ensure!(
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index 057890b6395d..d3434908b7e5 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -64,7 +64,7 @@ session.workspace = true
similar-asserts.workspace = true
snafu.workspace = true
sql.workspace = true
-sqlx = { version = "0.6", features = [
+sqlx = { version = "0.8", features = [
"runtime-tokio-rustls",
"mysql",
"postgres",
|
fix
|
security fix, sqlx, hashbrown, idna and CI updates (#5330)
|
63acc30ce744283b105db2274d2e110a5d71a63e
|
2024-07-12 12:26:13
|
Ruihang Xia
|
perf: fine–tuned plan steps (#4258)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 0acae30c2cab..fdf1952380d1 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -739,12 +739,9 @@ dependencies = [
[[package]]
name = "atomic"
-version = "0.6.0"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994"
-dependencies = [
- "bytemuck",
-]
+checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba"
[[package]]
name = "atomic-waker"
@@ -2766,7 +2763,7 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
[[package]]
name = "datafusion"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2818,7 +2815,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2839,7 +2836,7 @@ dependencies = [
[[package]]
name = "datafusion-common-runtime"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"tokio",
]
@@ -2847,7 +2844,7 @@ dependencies = [
[[package]]
name = "datafusion-execution"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"arrow",
"chrono",
@@ -2867,7 +2864,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2884,7 +2881,7 @@ dependencies = [
[[package]]
name = "datafusion-functions"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"arrow",
"base64 0.22.1",
@@ -2910,7 +2907,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-aggregate"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2927,7 +2924,7 @@ dependencies = [
[[package]]
name = "datafusion-functions-array"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"arrow",
"arrow-array",
@@ -2946,7 +2943,7 @@ dependencies = [
[[package]]
name = "datafusion-optimizer"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"arrow",
"async-trait",
@@ -2964,7 +2961,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -2994,7 +2991,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr-common"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"arrow",
"datafusion-common",
@@ -3005,7 +3002,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-plan"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"ahash 0.8.11",
"arrow",
@@ -3038,7 +3035,7 @@ dependencies = [
[[package]]
name = "datafusion-sql"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"arrow",
"arrow-array",
@@ -3054,7 +3051,7 @@ dependencies = [
[[package]]
name = "datafusion-substrait"
version = "38.0.0"
-source = "git+https://github.com/apache/datafusion.git?rev=729b356ef543ffcda6813c7b5373507a04ae0109#729b356ef543ffcda6813c7b5373507a04ae0109"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=d7bda5c9b762426e81f144296deadc87e5f4a0b8#d7bda5c9b762426e81f144296deadc87e5f4a0b8"
dependencies = [
"async-recursion",
"chrono",
@@ -6410,9 +6407,9 @@ dependencies = [
[[package]]
name = "multimap"
-version = "0.8.3"
+version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
+checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03"
[[package]]
name = "mur3"
@@ -12900,9 +12897,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
-version = "1.9.1"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439"
+checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0"
dependencies = [
"atomic",
"getrandom",
@@ -12913,9 +12910,9 @@ dependencies = [
[[package]]
name = "uuid-macro-internal"
-version = "1.9.1"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a3ff64d5cde1e2cb5268bdb497235b6bd255ba8244f910dbc3574e59593de68c"
+checksum = "9881bea7cbe687e36c9ab3b778c36cd0487402e270304e8b1296d5085303c1a2"
dependencies = [
"proc-macro2",
"quote",
diff --git a/Cargo.toml b/Cargo.toml
index 26d2baf2b7cd..3985bb525534 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -104,15 +104,15 @@ clap = { version = "4.4", features = ["derive"] }
config = "0.13.0"
crossbeam-utils = "0.8"
dashmap = "5.4"
-datafusion = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
-datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
-datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
-datafusion-functions = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
-datafusion-optimizer = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
-datafusion-physical-expr = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
-datafusion-physical-plan = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
-datafusion-sql = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
-datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "729b356ef543ffcda6813c7b5373507a04ae0109" }
+datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
+datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
+datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
+datafusion-functions = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
+datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
+datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
+datafusion-physical-plan = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
+datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
+datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "d7bda5c9b762426e81f144296deadc87e5f4a0b8" }
derive_builder = "0.12"
dotenv = "0.15"
etcd-client = { version = "0.13" }
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 14c01b05d389..fe57987a5c7c 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -48,6 +48,7 @@ use table::TableRef;
use crate::analyze::DistAnalyzeExec;
use crate::dataframe::DataFrame;
pub use crate::datafusion::planner::DfContextProviderAdapter;
+use crate::dist_plan::MergeScanLogicalPlan;
use crate::error::{
CatalogSnafu, CreateRecordBatchSnafu, DataFusionSnafu, MissingTableMutationHandlerSnafu,
MissingTimestampColumnSnafu, QueryExecutionSnafu, Result, TableMutationSnafu,
@@ -373,8 +374,41 @@ impl PhysicalPlanner for DatafusionQueryEngine {
match logical_plan {
LogicalPlan::DfPlan(df_plan) => {
let state = ctx.state();
+
+ // special handle EXPLAIN plan
+ if matches!(df_plan, DfLogicalPlan::Explain(_)) {
+ return state
+ .create_physical_plan(df_plan)
+ .await
+ .context(error::DatafusionSnafu)
+ .map_err(BoxedError::new)
+ .context(QueryExecutionSnafu);
+ }
+
+ // analyze first
+ let analyzed_plan = state
+ .analyzer()
+ .execute_and_check(df_plan.clone(), state.config_options(), |_, _| {})
+ .context(error::DatafusionSnafu)
+ .map_err(BoxedError::new)
+ .context(QueryExecutionSnafu)?;
+ // skip optimize for MergeScan
+ let optimized_plan = if let DfLogicalPlan::Extension(ext) = &analyzed_plan
+ && ext.node.name() == MergeScanLogicalPlan::name()
+ {
+ analyzed_plan.clone()
+ } else {
+ state
+ .optimizer()
+ .optimize(analyzed_plan, state, |_, _| {})
+ .context(error::DatafusionSnafu)
+ .map_err(BoxedError::new)
+ .context(QueryExecutionSnafu)?
+ };
+
let physical_plan = state
- .create_physical_plan(df_plan)
+ .query_planner()
+ .create_physical_plan(&optimized_plan, state)
.await
.context(error::DatafusionSnafu)
.map_err(BoxedError::new)
diff --git a/src/query/src/dist_plan/planner.rs b/src/query/src/dist_plan/planner.rs
index 41227e868730..73168ff1bda8 100644
--- a/src/query/src/dist_plan/planner.rs
+++ b/src/query/src/dist_plan/planner.rs
@@ -27,7 +27,6 @@ use datafusion::physical_planner::{ExtensionPlanner, PhysicalPlanner};
use datafusion_common::tree_node::{TreeNode, TreeNodeRecursion, TreeNodeVisitor};
use datafusion_common::TableReference;
use datafusion_expr::{LogicalPlan, UserDefinedLogicalNode};
-use datafusion_optimizer::analyzer::Analyzer;
use session::context::QueryContext;
use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionId;
@@ -72,8 +71,9 @@ impl ExtensionPlanner for DistExtensionPlanner {
let input_plan = merge_scan.input();
let fallback = |logical_plan| async move {
+ let optimized_plan = self.optimize_input_logical_plan(session_state, logical_plan)?;
planner
- .create_physical_plan(logical_plan, session_state)
+ .create_physical_plan(&optimized_plan, session_state)
.await
.map(Some)
};
@@ -83,15 +83,15 @@ impl ExtensionPlanner for DistExtensionPlanner {
return fallback(input_plan).await;
}
- let optimized_plan = self.optimize_input_logical_plan(session_state, input_plan)?;
+ let optimized_plan = input_plan;
let Some(table_name) = Self::extract_full_table_name(input_plan)? else {
// no relation found in input plan, going to execute them locally
- return fallback(&optimized_plan).await;
+ return fallback(optimized_plan).await;
};
let Ok(regions) = self.get_regions(&table_name).await else {
// no peers found, going to execute them locally
- return fallback(&optimized_plan).await;
+ return fallback(optimized_plan).await;
};
// TODO(ruihang): generate different execution plans for different variant merge operation
@@ -137,16 +137,14 @@ impl DistExtensionPlanner {
Ok(table.table_info().region_ids())
}
- // TODO(ruihang): find a more elegant way to optimize input logical plan
+ /// Input logical plan is analyzed. Thus only call logical optimizer to optimize it.
fn optimize_input_logical_plan(
&self,
session_state: &SessionState,
plan: &LogicalPlan,
) -> Result<LogicalPlan> {
let state = session_state.clone();
- let analyzer = Analyzer::default();
- let state = state.with_analyzer_rules(analyzer.rules);
- state.optimize(plan)
+ state.optimizer().optimize(plan.clone(), &state, |_, _| {})
}
}
|
perf
|
fine–tuned plan steps (#4258)
|
6e776d5f988c54ce8481064c5179c387985ec6cd
|
2024-09-30 13:58:51
|
Weny Xu
|
feat: support to reject write after flushing (#4759)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 073c40748618..13c155d6171d 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -442,6 +442,13 @@ jobs:
minio: true
kafka: true
values: "with-remote-wal.yaml"
+ include:
+ - target: "fuzz_migrate_mito_regions"
+ mode:
+ name: "Local WAL"
+ minio: true
+ kafka: false
+ values: "with-minio.yaml"
steps:
- name: Remove unused software
run: |
@@ -530,7 +537,7 @@ jobs:
with:
image-registry: localhost:5001
values-filename: ${{ matrix.mode.values }}
- enable-region-failover: true
+ enable-region-failover: ${{ matrix.mode.kafka }}
- name: Port forward (mysql)
run: |
kubectl port-forward service/my-greptimedb-frontend 4002:4002 -n my-greptimedb&
diff --git a/Cargo.lock b/Cargo.lock
index 7aebe2e719eb..ab5043939a94 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4414,7 +4414,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=36334744c7020734dcb4a6b8d24d52ae7ed53fe1#36334744c7020734dcb4a6b8d24d52ae7ed53fe1"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=0b4f7c8ab06399f6b90e1626e8d5b9697cb33bb9#0b4f7c8ab06399f6b90e1626e8d5b9697cb33bb9"
dependencies = [
"prost 0.12.6",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index 44b2cda1227f..63d7ad3ba739 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -120,7 +120,7 @@ etcd-client = { version = "0.13" }
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "36334744c7020734dcb4a6b8d24d52ae7ed53fe1" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "0b4f7c8ab06399f6b90e1626e8d5b9697cb33bb9" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
diff --git a/src/catalog/src/system_schema/information_schema/region_peers.rs b/src/catalog/src/system_schema/information_schema/region_peers.rs
index 5496879af0fb..50c2593f8621 100644
--- a/src/catalog/src/system_schema/information_schema/region_peers.rs
+++ b/src/catalog/src/system_schema/information_schema/region_peers.rs
@@ -224,8 +224,8 @@ impl InformationSchemaRegionPeersBuilder {
let region_id = RegionId::new(table_id, route.region.id.region_number()).as_u64();
let peer_id = route.leader_peer.clone().map(|p| p.id);
let peer_addr = route.leader_peer.clone().map(|p| p.addr);
- let status = if let Some(status) = route.leader_status {
- Some(status.as_ref().to_string())
+ let state = if let Some(state) = route.leader_state {
+ Some(state.as_ref().to_string())
} else {
// Alive by default
Some("ALIVE".to_string())
@@ -242,7 +242,7 @@ impl InformationSchemaRegionPeersBuilder {
self.peer_ids.push(peer_id);
self.peer_addrs.push(peer_addr.as_deref());
self.is_leaders.push(Some("Yes"));
- self.statuses.push(status.as_deref());
+ self.statuses.push(state.as_deref());
self.down_seconds
.push(route.leader_down_millis().map(|m| m / 1000));
}
diff --git a/src/cmd/src/cli/bench.rs b/src/cmd/src/cli/bench.rs
index bf5a6825f014..f3d1d0f8097f 100644
--- a/src/cmd/src/cli/bench.rs
+++ b/src/cmd/src/cli/bench.rs
@@ -158,7 +158,7 @@ fn create_region_routes(regions: Vec<RegionNumber>) -> Vec<RegionRoute> {
addr: String::new(),
}),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
});
}
diff --git a/src/common/meta/src/ddl/alter_table/region_request.rs b/src/common/meta/src/ddl/alter_table/region_request.rs
index b4223b8ea05d..07563603954f 100644
--- a/src/common/meta/src/ddl/alter_table/region_request.rs
+++ b/src/common/meta/src/ddl/alter_table/region_request.rs
@@ -187,7 +187,7 @@ mod tests {
region: Region::new_test(region_id),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
}]),
HashMap::new(),
diff --git a/src/common/meta/src/ddl/tests/alter_table.rs b/src/common/meta/src/ddl/tests/alter_table.rs
index 06654cfe0f3d..36a1ff0ecece 100644
--- a/src/common/meta/src/ddl/tests/alter_table.rs
+++ b/src/common/meta/src/ddl/tests/alter_table.rs
@@ -107,21 +107,21 @@ async fn test_on_submit_alter_request() {
region: Region::new_test(RegionId::new(table_id, 1)),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(5)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 2)),
leader_peer: Some(Peer::empty(2)),
follower_peers: vec![Peer::empty(4)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 3)),
leader_peer: Some(Peer::empty(3)),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
]),
@@ -193,21 +193,21 @@ async fn test_on_submit_alter_request_with_outdated_request() {
region: Region::new_test(RegionId::new(table_id, 1)),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(5)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 2)),
leader_peer: Some(Peer::empty(2)),
follower_peers: vec![Peer::empty(4)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 3)),
leader_peer: Some(Peer::empty(3)),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
]),
diff --git a/src/common/meta/src/ddl/tests/drop_table.rs b/src/common/meta/src/ddl/tests/drop_table.rs
index aff123747223..c3a5f5875cad 100644
--- a/src/common/meta/src/ddl/tests/drop_table.rs
+++ b/src/common/meta/src/ddl/tests/drop_table.rs
@@ -119,21 +119,21 @@ async fn test_on_datanode_drop_regions() {
region: Region::new_test(RegionId::new(table_id, 1)),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(5)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 2)),
leader_peer: Some(Peer::empty(2)),
follower_peers: vec![Peer::empty(4)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 3)),
leader_peer: Some(Peer::empty(3)),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
]),
diff --git a/src/common/meta/src/instruction.rs b/src/common/meta/src/instruction.rs
index 61e2811e72b2..4864f7562d10 100644
--- a/src/common/meta/src/instruction.rs
+++ b/src/common/meta/src/instruction.rs
@@ -137,14 +137,16 @@ pub struct DowngradeRegion {
/// `None` stands for don't flush before downgrading the region.
#[serde(default)]
pub flush_timeout: Option<Duration>,
+ /// Rejects all write requests after flushing.
+ pub reject_write: bool,
}
impl Display for DowngradeRegion {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
- "DowngradeRegion(region_id={}, flush_timeout={:?})",
- self.region_id, self.flush_timeout,
+ "DowngradeRegion(region_id={}, flush_timeout={:?}, rejct_write={})",
+ self.region_id, self.flush_timeout, self.reject_write
)
}
}
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index d864882da6dd..0f703b9430a3 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -140,7 +140,7 @@ use crate::key::table_route::TableRouteKey;
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
-use crate::rpc::router::{region_distribution, RegionRoute, RegionStatus};
+use crate::rpc::router::{region_distribution, LeaderState, RegionRoute};
use crate::rpc::store::BatchDeleteRequest;
use crate::DatanodeId;
@@ -1126,14 +1126,14 @@ impl TableMetadataManager {
next_region_route_status: F,
) -> Result<()>
where
- F: Fn(&RegionRoute) -> Option<Option<RegionStatus>>,
+ F: Fn(&RegionRoute) -> Option<Option<LeaderState>>,
{
let mut new_region_routes = current_table_route_value.region_routes()?.clone();
let mut updated = 0;
for route in &mut new_region_routes {
- if let Some(status) = next_region_route_status(route) {
- if route.set_leader_status(status) {
+ if let Some(state) = next_region_route_status(route) {
+ if route.set_leader_state(state) {
updated += 1;
}
}
@@ -1280,7 +1280,7 @@ mod tests {
use crate::key::{DeserializedValueWithBytes, TableMetadataManager, ViewInfoValue};
use crate::kv_backend::memory::MemoryKvBackend;
use crate::peer::Peer;
- use crate::rpc::router::{region_distribution, Region, RegionRoute, RegionStatus};
+ use crate::rpc::router::{region_distribution, LeaderState, Region, RegionRoute};
#[test]
fn test_deserialized_value_with_bytes() {
@@ -1324,7 +1324,7 @@ mod tests {
},
leader_peer: Some(Peer::new(datanode, "a2")),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
}
}
@@ -1715,7 +1715,7 @@ mod tests {
attrs: BTreeMap::new(),
},
leader_peer: Some(Peer::new(datanode, "a2")),
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
follower_peers: vec![],
leader_down_since: Some(current_time_millis()),
},
@@ -1727,7 +1727,7 @@ mod tests {
attrs: BTreeMap::new(),
},
leader_peer: Some(Peer::new(datanode, "a1")),
- leader_status: None,
+ leader_state: None,
follower_peers: vec![],
leader_down_since: None,
},
@@ -1750,10 +1750,10 @@ mod tests {
table_metadata_manager
.update_leader_region_status(table_id, ¤t_table_route_value, |region_route| {
- if region_route.leader_status.is_some() {
+ if region_route.leader_state.is_some() {
None
} else {
- Some(Some(RegionStatus::Downgraded))
+ Some(Some(LeaderState::Downgrading))
}
})
.await
@@ -1768,8 +1768,8 @@ mod tests {
.unwrap();
assert_eq!(
- updated_route_value.region_routes().unwrap()[0].leader_status,
- Some(RegionStatus::Downgraded)
+ updated_route_value.region_routes().unwrap()[0].leader_state,
+ Some(LeaderState::Downgrading)
);
assert!(updated_route_value.region_routes().unwrap()[0]
@@ -1777,8 +1777,8 @@ mod tests {
.is_some());
assert_eq!(
- updated_route_value.region_routes().unwrap()[1].leader_status,
- Some(RegionStatus::Downgraded)
+ updated_route_value.region_routes().unwrap()[1].leader_state,
+ Some(LeaderState::Downgrading)
);
assert!(updated_route_value.region_routes().unwrap()[1]
.leader_down_since
@@ -1943,21 +1943,21 @@ mod tests {
region: Region::new_test(RegionId::new(table_id, 1)),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(5)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 2)),
leader_peer: Some(Peer::empty(2)),
follower_peers: vec![Peer::empty(4)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 3)),
leader_peer: Some(Peer::empty(3)),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
]),
@@ -1996,21 +1996,21 @@ mod tests {
region: Region::new_test(RegionId::new(table_id, 1)),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(5)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 2)),
leader_peer: Some(Peer::empty(2)),
follower_peers: vec![Peer::empty(4)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 3)),
leader_peer: Some(Peer::empty(3)),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
]),
diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs
index 0be0aab3aae0..c9990ab12129 100644
--- a/src/common/meta/src/key/table_route.rs
+++ b/src/common/meta/src/key/table_route.rs
@@ -744,6 +744,7 @@ mod tests {
use crate::kv_backend::memory::MemoryKvBackend;
use crate::kv_backend::{KvBackend, TxnService};
use crate::peer::Peer;
+ use crate::rpc::router::Region;
use crate::rpc::store::PutRequest;
#[test]
@@ -751,11 +752,43 @@ mod tests {
let old_raw_v = r#"{"region_routes":[{"region":{"id":1,"name":"r1","partition":null,"attrs":{}},"leader_peer":{"id":2,"addr":"a2"},"follower_peers":[]},{"region":{"id":1,"name":"r1","partition":null,"attrs":{}},"leader_peer":{"id":2,"addr":"a2"},"follower_peers":[]}],"version":0}"#;
let v = TableRouteValue::try_from_raw_value(old_raw_v.as_bytes()).unwrap();
- let new_raw_v = format!("{:?}", v);
- assert_eq!(
- new_raw_v,
- r#"Physical(PhysicalTableRouteValue { region_routes: [RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None, leader_down_since: None }, RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None, leader_down_since: None }], version: 0 })"#
- );
+ let expected_table_route = TableRouteValue::Physical(PhysicalTableRouteValue {
+ region_routes: vec![
+ RegionRoute {
+ region: Region {
+ id: RegionId::new(0, 1),
+ name: "r1".to_string(),
+ partition: None,
+ attrs: Default::default(),
+ },
+ leader_peer: Some(Peer {
+ id: 2,
+ addr: "a2".to_string(),
+ }),
+ follower_peers: vec![],
+ leader_state: None,
+ leader_down_since: None,
+ },
+ RegionRoute {
+ region: Region {
+ id: RegionId::new(0, 1),
+ name: "r1".to_string(),
+ partition: None,
+ attrs: Default::default(),
+ },
+ leader_peer: Some(Peer {
+ id: 2,
+ addr: "a2".to_string(),
+ }),
+ follower_peers: vec![],
+ leader_state: None,
+ leader_down_since: None,
+ },
+ ],
+ version: 0,
+ });
+
+ assert_eq!(v, expected_table_route);
}
#[test]
diff --git a/src/common/meta/src/region_keeper.rs b/src/common/meta/src/region_keeper.rs
index a0d53b847752..54d5d6cc11d3 100644
--- a/src/common/meta/src/region_keeper.rs
+++ b/src/common/meta/src/region_keeper.rs
@@ -58,7 +58,7 @@ impl MemoryRegionKeeper {
Default::default()
}
- /// Returns [OpeningRegionGuard] if Region(`region_id`) on Peer(`datanode_id`) does not exist.
+ /// Returns [OperatingRegionGuard] if Region(`region_id`) on Peer(`datanode_id`) does not exist.
pub fn register(
&self,
datanode_id: DatanodeId,
diff --git a/src/common/meta/src/rpc/router.rs b/src/common/meta/src/rpc/router.rs
index 3e609e4af4d8..8dc409c8be31 100644
--- a/src/common/meta/src/rpc/router.rs
+++ b/src/common/meta/src/rpc/router.rs
@@ -108,16 +108,16 @@ pub fn convert_to_region_peer_map(
.collect::<HashMap<_, _>>()
}
-/// Returns the HashMap<[RegionNumber], [RegionStatus]>;
-pub fn convert_to_region_leader_status_map(
+/// Returns the HashMap<[RegionNumber], [LeaderState]>;
+pub fn convert_to_region_leader_state_map(
region_routes: &[RegionRoute],
-) -> HashMap<RegionNumber, RegionStatus> {
+) -> HashMap<RegionNumber, LeaderState> {
region_routes
.iter()
.filter_map(|x| {
- x.leader_status
+ x.leader_state
.as_ref()
- .map(|status| (x.region.id.region_number(), *status))
+ .map(|state| (x.region.id.region_number(), *state))
})
.collect::<HashMap<_, _>>()
}
@@ -205,7 +205,7 @@ impl TableRoute {
region,
leader_peer,
follower_peers,
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
});
}
@@ -259,9 +259,13 @@ pub struct RegionRoute {
pub follower_peers: Vec<Peer>,
/// `None` by default.
#[builder(setter(into, strip_option), default)]
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub leader_status: Option<RegionStatus>,
- /// The start time when the leader is in `Downgraded` status.
+ #[serde(
+ default,
+ alias = "leader_status",
+ skip_serializing_if = "Option::is_none"
+ )]
+ pub leader_state: Option<LeaderState>,
+ /// The start time when the leader is in `Downgraded` state.
#[serde(default)]
#[builder(default = "self.default_leader_down_since()")]
pub leader_down_since: Option<i64>,
@@ -269,76 +273,78 @@ pub struct RegionRoute {
impl RegionRouteBuilder {
fn default_leader_down_since(&self) -> Option<i64> {
- match self.leader_status {
- Some(Some(RegionStatus::Downgraded)) => Some(current_time_millis()),
+ match self.leader_state {
+ Some(Some(LeaderState::Downgrading)) => Some(current_time_millis()),
_ => None,
}
}
}
-/// The Status of the [Region].
+/// The State of the [`Region`] Leader.
/// TODO(dennis): It's better to add more fine-grained statuses such as `PENDING` etc.
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, AsRefStr)]
#[strum(serialize_all = "UPPERCASE")]
-pub enum RegionStatus {
- /// The following cases in which the [Region] will be downgraded.
+pub enum LeaderState {
+ /// The following cases in which the [`Region`] will be downgraded.
///
- /// - The [Region] is unavailable(e.g., Crashed, Network disconnected).
- /// - The [Region] was planned to migrate to another [Peer].
- Downgraded,
+ /// - The [`Region`] may be unavailable (e.g., Crashed, Network disconnected).
+ /// - The [`Region`] was planned to migrate to another [`Peer`].
+ Downgrading,
}
impl RegionRoute {
- /// Returns true if the Leader [Region] is downgraded.
+ /// Returns true if the Leader [`Region`] is downgraded.
///
- /// The following cases in which the [Region] will be downgraded.
+ /// The following cases in which the [`Region`] will be downgraded.
///
- /// - The [Region] is unavailable(e.g., Crashed, Network disconnected).
- /// - The [Region] was planned to migrate to another [Peer].
+ /// - The [`Region`] is unavailable(e.g., Crashed, Network disconnected).
+ /// - The [`Region`] was planned to migrate to another [`Peer`].
///
- pub fn is_leader_downgraded(&self) -> bool {
- matches!(self.leader_status, Some(RegionStatus::Downgraded))
+ pub fn is_leader_downgrading(&self) -> bool {
+ matches!(self.leader_state, Some(LeaderState::Downgrading))
}
- /// Marks the Leader [Region] as downgraded.
+ /// Marks the Leader [`Region`] as [`RegionState::Downgrading`].
///
- /// We should downgrade a [Region] before deactivating it:
+ /// We should downgrade a [`Region`] before deactivating it:
///
- /// - During the [Region] Failover Procedure.
- /// - Migrating a [Region].
+ /// - During the [`Region`] Failover Procedure.
+ /// - Migrating a [`Region`].
///
- /// **Notes:** Meta Server will stop renewing the lease for the downgraded [Region].
+ /// **Notes:** Meta Server will renewing a special lease(`Downgrading`) for the downgrading [`Region`].
+ ///
+ /// A downgrading region will reject any write requests, and only allow memetable to be flushed to object storage
///
pub fn downgrade_leader(&mut self) {
self.leader_down_since = Some(current_time_millis());
- self.leader_status = Some(RegionStatus::Downgraded)
+ self.leader_state = Some(LeaderState::Downgrading)
}
- /// Returns how long since the leader is in `Downgraded` status.
+ /// Returns how long since the leader is in `Downgraded` state.
pub fn leader_down_millis(&self) -> Option<i64> {
self.leader_down_since
.map(|start| current_time_millis() - start)
}
- /// Sets the leader status.
+ /// Sets the leader state.
///
/// Returns true if updated.
- pub fn set_leader_status(&mut self, status: Option<RegionStatus>) -> bool {
- let updated = self.leader_status != status;
+ pub fn set_leader_state(&mut self, state: Option<LeaderState>) -> bool {
+ let updated = self.leader_state != state;
- match (status, updated) {
- (Some(RegionStatus::Downgraded), true) => {
+ match (state, updated) {
+ (Some(LeaderState::Downgrading), true) => {
self.leader_down_since = Some(current_time_millis());
}
- (Some(RegionStatus::Downgraded), false) => {
- // Do nothing if leader is still in `Downgraded` status.
+ (Some(LeaderState::Downgrading), false) => {
+ // Do nothing if leader is still in `Downgraded` state.
}
_ => {
self.leader_down_since = None;
}
}
- self.leader_status = status;
+ self.leader_state = state;
updated
}
}
@@ -477,15 +483,15 @@ mod tests {
},
leader_peer: Some(Peer::new(1, "a1")),
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
};
- assert!(!region_route.is_leader_downgraded());
+ assert!(!region_route.is_leader_downgrading());
region_route.downgrade_leader();
- assert!(region_route.is_leader_downgraded());
+ assert!(region_route.is_leader_downgrading());
}
#[test]
@@ -499,7 +505,7 @@ mod tests {
},
leader_peer: Some(Peer::new(1, "a1")),
follower_peers: vec![Peer::new(2, "a2"), Peer::new(3, "a3")],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
};
diff --git a/src/datanode/src/alive_keeper.rs b/src/datanode/src/alive_keeper.rs
index c6ef6cb3f6db..a0ea2c0188bb 100644
--- a/src/datanode/src/alive_keeper.rs
+++ b/src/datanode/src/alive_keeper.rs
@@ -129,8 +129,10 @@ impl RegionAliveKeeper {
let request = RegionRequest::Close(RegionCloseRequest {});
if let Err(e) = self.region_server.handle_request(region_id, request).await {
if e.status_code() != StatusCode::RegionNotFound {
- let _ = self.region_server.set_writable(region_id, false);
- error!(e; "Failed to close staled region {}, set region to readonly.",region_id);
+ let _ = self
+ .region_server
+ .set_region_role(region_id, RegionRole::Follower);
+ error!(e; "Failed to close staled region {}, convert region to follower.", region_id);
}
}
}
@@ -378,7 +380,7 @@ impl CountdownTask {
}
},
Some(CountdownCommand::Reset((role, deadline))) => {
- let _ = self.region_server.set_writable(self.region_id, role.writable());
+ let _ = self.region_server.set_region_role(self.region_id, role);
trace!(
"Reset deadline of region {region_id} to approximately {} seconds later.",
(deadline - Instant::now()).as_secs_f32(),
@@ -399,8 +401,8 @@ impl CountdownTask {
}
}
() = &mut countdown => {
- warn!("The region {region_id} lease is expired, set region to readonly.");
- let _ = self.region_server.set_writable(self.region_id, false);
+ warn!("The region {region_id} lease is expired, convert region to follower.");
+ let _ = self.region_server.set_region_role(self.region_id, RegionRole::Follower);
// resets the countdown.
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 30);
countdown.as_mut().reset(far_future);
@@ -436,7 +438,9 @@ mod test {
.handle_request(region_id, RegionRequest::Create(builder.build()))
.await
.unwrap();
- region_server.set_writable(region_id, true).unwrap();
+ region_server
+ .set_region_role(region_id, RegionRole::Leader)
+ .unwrap();
// Register a region before starting.
alive_keeper.register_region(region_id).await;
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index f5d7bd9fc627..128a60ab9b7b 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -47,7 +47,7 @@ use servers::server::ServerHandlers;
use servers::Mode;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::path_utils::{region_dir, WAL_DIR};
-use store_api::region_engine::RegionEngineRef;
+use store_api::region_engine::{RegionEngineRef, RegionRole};
use store_api::region_request::RegionOpenRequest;
use store_api::storage::RegionId;
use tokio::fs;
@@ -546,9 +546,9 @@ async fn open_all_regions(
for region_id in open_regions {
if open_with_writable {
- if let Err(e) = region_server.set_writable(region_id, true) {
+ if let Err(e) = region_server.set_region_role(region_id, RegionRole::Leader) {
error!(
- e; "failed to set writable for region {region_id}"
+ e; "failed to convert region {region_id} to leader"
);
}
}
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index d84552a8d215..ef9af0acdd02 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -126,7 +126,9 @@ impl HeartbeatTask {
let mut follower_region_lease_count = 0;
for lease in &lease.regions {
match lease.role() {
- RegionRole::Leader => leader_region_lease_count += 1,
+ RegionRole::Leader | RegionRole::DowngradingLeader => {
+ leader_region_lease_count += 1
+ }
RegionRole::Follower => follower_region_lease_count += 1,
}
}
diff --git a/src/datanode/src/heartbeat/handler.rs b/src/datanode/src/heartbeat/handler.rs
index d23615eb13d8..89b6991788cc 100644
--- a/src/datanode/src/heartbeat/handler.rs
+++ b/src/datanode/src/heartbeat/handler.rs
@@ -153,6 +153,7 @@ mod tests {
use mito2::engine::MITO_ENGINE_NAME;
use mito2::test_util::{CreateRequestBuilder, TestEnv};
use store_api::path_utils::region_dir;
+ use store_api::region_engine::RegionRole;
use store_api::region_request::{RegionCloseRequest, RegionRequest};
use store_api::storage::RegionId;
use tokio::sync::mpsc::{self, Receiver};
@@ -213,6 +214,7 @@ mod tests {
let instruction = Instruction::DowngradeRegion(DowngradeRegion {
region_id: RegionId::new(2048, 1),
flush_timeout: Some(Duration::from_secs(1)),
+ reject_write: false,
});
assert!(heartbeat_handler
.is_acceptable(&heartbeat_env.create_handler_ctx((meta.clone(), instruction))));
@@ -295,7 +297,9 @@ mod tests {
}
assert_matches!(
- region_server.set_writable(region_id, true).unwrap_err(),
+ region_server
+ .set_region_role(region_id, RegionRole::Leader)
+ .unwrap_err(),
error::Error::RegionNotFound { .. }
);
}
@@ -411,6 +415,7 @@ mod tests {
let instruction = Instruction::DowngradeRegion(DowngradeRegion {
region_id,
flush_timeout: Some(Duration::from_secs(1)),
+ reject_write: false,
});
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
@@ -433,6 +438,7 @@ mod tests {
let instruction = Instruction::DowngradeRegion(DowngradeRegion {
region_id: RegionId::new(2048, 1),
flush_timeout: Some(Duration::from_secs(1)),
+ reject_write: false,
});
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
diff --git a/src/datanode/src/heartbeat/handler/downgrade_region.rs b/src/datanode/src/heartbeat/handler/downgrade_region.rs
index ac1179280376..fd85c75ba2dd 100644
--- a/src/datanode/src/heartbeat/handler/downgrade_region.rs
+++ b/src/datanode/src/heartbeat/handler/downgrade_region.rs
@@ -16,7 +16,7 @@ use common_meta::instruction::{DowngradeRegion, DowngradeRegionReply, Instructio
use common_telemetry::tracing::info;
use common_telemetry::warn;
use futures_util::future::BoxFuture;
-use store_api::region_engine::SetReadonlyResponse;
+use store_api::region_engine::{SetRegionRoleStateResponse, SettableRegionRoleState};
use store_api::region_request::{RegionFlushRequest, RegionRequest};
use store_api::storage::RegionId;
@@ -24,16 +24,20 @@ use crate::heartbeat::handler::HandlerContext;
use crate::heartbeat::task_tracker::WaitResult;
impl HandlerContext {
- async fn set_readonly_gracefully(&self, region_id: RegionId) -> InstructionReply {
- match self.region_server.set_readonly_gracefully(region_id).await {
- Ok(SetReadonlyResponse::Success { last_entry_id }) => {
+ async fn downgrade_to_follower_gracefully(&self, region_id: RegionId) -> InstructionReply {
+ match self
+ .region_server
+ .set_region_role_state_gracefully(region_id, SettableRegionRoleState::Follower)
+ .await
+ {
+ Ok(SetRegionRoleStateResponse::Success { last_entry_id }) => {
InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id,
exists: true,
error: None,
})
}
- Ok(SetReadonlyResponse::NotFound) => {
+ Ok(SetRegionRoleStateResponse::NotFound) => {
InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
exists: false,
@@ -53,10 +57,12 @@ impl HandlerContext {
DowngradeRegion {
region_id,
flush_timeout,
+ reject_write,
}: DowngradeRegion,
) -> BoxFuture<'static, InstructionReply> {
Box::pin(async move {
- let Some(writable) = self.region_server.is_writable(region_id) else {
+ let Some(writable) = self.region_server.is_region_leader(region_id) else {
+ warn!("Region: {region_id} is not found");
return InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
exists: false,
@@ -64,61 +70,89 @@ impl HandlerContext {
});
};
+ let region_server_moved = self.region_server.clone();
+
// Ignores flush request
if !writable {
- return self.set_readonly_gracefully(region_id).await;
+ return self.downgrade_to_follower_gracefully(region_id).await;
}
- let region_server_moved = self.region_server.clone();
- if let Some(flush_timeout) = flush_timeout {
- let register_result = self
- .downgrade_tasks
- .try_register(
+ // If flush_timeout is not set, directly convert region to follower.
+ let Some(flush_timeout) = flush_timeout else {
+ return self.downgrade_to_follower_gracefully(region_id).await;
+ };
+
+ if reject_write {
+ // Sets region to downgrading, the downgrading region will reject all write requests.
+ match self
+ .region_server
+ .set_region_role_state_gracefully(
region_id,
- Box::pin(async move {
- info!("Flush region: {region_id} before downgrading region");
- region_server_moved
- .handle_request(
- region_id,
- RegionRequest::Flush(RegionFlushRequest {
- row_group_size: None,
- }),
- )
- .await?;
-
- Ok(())
- }),
+ SettableRegionRoleState::DowngradingLeader,
)
- .await;
-
- if register_result.is_busy() {
- warn!("Another flush task is running for the region: {region_id}");
- }
-
- let mut watcher = register_result.into_watcher();
- let result = self.catchup_tasks.wait(&mut watcher, flush_timeout).await;
-
- match result {
- WaitResult::Timeout => {
- InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ .await
+ {
+ Ok(SetRegionRoleStateResponse::Success { .. }) => {}
+ Ok(SetRegionRoleStateResponse::NotFound) => {
+ warn!("Region: {region_id} is not found");
+ return InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
- exists: true,
- error: Some(format!(
- "Flush region: {region_id} before downgrading region is timeout"
- )),
- })
+ exists: false,
+ error: None,
+ });
}
- WaitResult::Finish(Ok(_)) => self.set_readonly_gracefully(region_id).await,
- WaitResult::Finish(Err(err)) => {
- InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ Err(err) => {
+ warn!(err; "Failed to convert region to downgrading leader");
+ return InstructionReply::DowngradeRegion(DowngradeRegionReply {
last_entry_id: None,
exists: true,
error: Some(format!("{err:?}")),
- })
+ });
}
}
- } else {
- self.set_readonly_gracefully(region_id).await
+ }
+
+ let register_result = self
+ .downgrade_tasks
+ .try_register(
+ region_id,
+ Box::pin(async move {
+ info!("Flush region: {region_id} before converting region to follower");
+ region_server_moved
+ .handle_request(
+ region_id,
+ RegionRequest::Flush(RegionFlushRequest {
+ row_group_size: None,
+ }),
+ )
+ .await?;
+
+ Ok(())
+ }),
+ )
+ .await;
+
+ if register_result.is_busy() {
+ warn!("Another flush task is running for the region: {region_id}");
+ }
+
+ let mut watcher = register_result.into_watcher();
+ let result = self.catchup_tasks.wait(&mut watcher, flush_timeout).await;
+
+ match result {
+ WaitResult::Timeout => InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ last_entry_id: None,
+ exists: true,
+ error: Some(format!("Flush region: {region_id} is timeout")),
+ }),
+ WaitResult::Finish(Ok(_)) => self.downgrade_to_follower_gracefully(region_id).await,
+ WaitResult::Finish(Err(err)) => {
+ InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ last_entry_id: None,
+ exists: true,
+ error: Some(format!("{err:?}")),
+ })
+ }
}
})
}
@@ -131,7 +165,7 @@ mod tests {
use common_meta::instruction::{DowngradeRegion, InstructionReply};
use mito2::engine::MITO_ENGINE_NAME;
- use store_api::region_engine::{RegionRole, SetReadonlyResponse};
+ use store_api::region_engine::{RegionRole, SetRegionRoleStateResponse};
use store_api::region_request::RegionRequest;
use store_api::storage::RegionId;
use tokio::time::Instant;
@@ -155,6 +189,7 @@ mod tests {
.handle_downgrade_region_instruction(DowngradeRegion {
region_id,
flush_timeout,
+ reject_write: false,
})
.await;
assert_matches!(reply, InstructionReply::DowngradeRegion(_));
@@ -182,8 +217,9 @@ mod tests {
Ok(0)
}));
- region_engine.handle_set_readonly_gracefully_mock_fn =
- Some(Box::new(|_| Ok(SetReadonlyResponse::success(Some(1024)))))
+ region_engine.handle_set_readonly_gracefully_mock_fn = Some(Box::new(|_| {
+ Ok(SetRegionRoleStateResponse::success(Some(1024)))
+ }))
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext::new_for_test(mock_region_server);
@@ -195,6 +231,7 @@ mod tests {
.handle_downgrade_region_instruction(DowngradeRegion {
region_id,
flush_timeout,
+ reject_write: false,
})
.await;
assert_matches!(reply, InstructionReply::DowngradeRegion(_));
@@ -215,8 +252,9 @@ mod tests {
MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| {
region_engine.mock_role = Some(Some(RegionRole::Leader));
region_engine.handle_request_delay = Some(Duration::from_secs(100));
- region_engine.handle_set_readonly_gracefully_mock_fn =
- Some(Box::new(|_| Ok(SetReadonlyResponse::success(Some(1024)))))
+ region_engine.handle_set_readonly_gracefully_mock_fn = Some(Box::new(|_| {
+ Ok(SetRegionRoleStateResponse::success(Some(1024)))
+ }))
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext::new_for_test(mock_region_server);
@@ -227,6 +265,7 @@ mod tests {
.handle_downgrade_region_instruction(DowngradeRegion {
region_id,
flush_timeout: Some(flush_timeout),
+ reject_write: false,
})
.await;
assert_matches!(reply, InstructionReply::DowngradeRegion(_));
@@ -246,8 +285,9 @@ mod tests {
MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| {
region_engine.mock_role = Some(Some(RegionRole::Leader));
region_engine.handle_request_delay = Some(Duration::from_millis(300));
- region_engine.handle_set_readonly_gracefully_mock_fn =
- Some(Box::new(|_| Ok(SetReadonlyResponse::success(Some(1024)))))
+ region_engine.handle_set_readonly_gracefully_mock_fn = Some(Box::new(|_| {
+ Ok(SetRegionRoleStateResponse::success(Some(1024)))
+ }))
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext::new_for_test(mock_region_server);
@@ -263,6 +303,7 @@ mod tests {
.handle_downgrade_region_instruction(DowngradeRegion {
region_id,
flush_timeout,
+ reject_write: false,
})
.await;
assert_matches!(reply, InstructionReply::DowngradeRegion(_));
@@ -277,6 +318,7 @@ mod tests {
.handle_downgrade_region_instruction(DowngradeRegion {
region_id,
flush_timeout: Some(Duration::from_millis(500)),
+ reject_write: false,
})
.await;
assert_matches!(reply, InstructionReply::DowngradeRegion(_));
@@ -304,8 +346,9 @@ mod tests {
}
.fail()
}));
- region_engine.handle_set_readonly_gracefully_mock_fn =
- Some(Box::new(|_| Ok(SetReadonlyResponse::success(Some(1024)))))
+ region_engine.handle_set_readonly_gracefully_mock_fn = Some(Box::new(|_| {
+ Ok(SetRegionRoleStateResponse::success(Some(1024)))
+ }))
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext::new_for_test(mock_region_server);
@@ -321,6 +364,7 @@ mod tests {
.handle_downgrade_region_instruction(DowngradeRegion {
region_id,
flush_timeout,
+ reject_write: false,
})
.await;
assert_matches!(reply, InstructionReply::DowngradeRegion(_));
@@ -335,6 +379,7 @@ mod tests {
.handle_downgrade_region_instruction(DowngradeRegion {
region_id,
flush_timeout: Some(Duration::from_millis(500)),
+ reject_write: false,
})
.await;
assert_matches!(reply, InstructionReply::DowngradeRegion(_));
@@ -356,7 +401,7 @@ mod tests {
MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| {
region_engine.mock_role = Some(Some(RegionRole::Leader));
region_engine.handle_set_readonly_gracefully_mock_fn =
- Some(Box::new(|_| Ok(SetReadonlyResponse::NotFound)));
+ Some(Box::new(|_| Ok(SetRegionRoleStateResponse::NotFound)));
});
mock_region_server.register_test_region(region_id, mock_engine);
let handler_context = HandlerContext::new_for_test(mock_region_server);
@@ -365,6 +410,7 @@ mod tests {
.handle_downgrade_region_instruction(DowngradeRegion {
region_id,
flush_timeout: None,
+ reject_write: false,
})
.await;
assert_matches!(reply, InstructionReply::DowngradeRegion(_));
@@ -396,6 +442,7 @@ mod tests {
.handle_downgrade_region_instruction(DowngradeRegion {
region_id,
flush_timeout: None,
+ reject_write: false,
})
.await;
assert_matches!(reply, InstructionReply::DowngradeRegion(_));
diff --git a/src/datanode/src/heartbeat/handler/upgrade_region.rs b/src/datanode/src/heartbeat/handler/upgrade_region.rs
index 0d1ef0476c95..9acb3da9c348 100644
--- a/src/datanode/src/heartbeat/handler/upgrade_region.rs
+++ b/src/datanode/src/heartbeat/handler/upgrade_region.rs
@@ -31,7 +31,7 @@ impl HandlerContext {
}: UpgradeRegion,
) -> BoxFuture<'static, InstructionReply> {
Box::pin(async move {
- let Some(writable) = self.region_server.is_writable(region_id) else {
+ let Some(writable) = self.region_server.is_region_leader(region_id) else {
return InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: false,
exists: false,
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index aa80f52a5c24..0bac53e4d6a2 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -54,7 +54,10 @@ use snafu::{ensure, OptionExt, ResultExt};
use store_api::metric_engine_consts::{
FILE_ENGINE_NAME, LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME,
};
-use store_api::region_engine::{RegionEngineRef, RegionRole, RegionStatistic, SetReadonlyResponse};
+use store_api::region_engine::{
+ RegionEngineRef, RegionRole, RegionStatistic, SetRegionRoleStateResponse,
+ SettableRegionRoleState,
+};
use store_api::region_request::{
AffectedRows, RegionCloseRequest, RegionOpenRequest, RegionRequest,
};
@@ -274,37 +277,47 @@ impl RegionServer {
.collect()
}
- pub fn is_writable(&self, region_id: RegionId) -> Option<bool> {
- // TODO(weny): Finds a better way.
+ pub fn is_region_leader(&self, region_id: RegionId) -> Option<bool> {
self.inner.region_map.get(®ion_id).and_then(|engine| {
engine.role(region_id).map(|role| match role {
RegionRole::Follower => false,
RegionRole::Leader => true,
+ RegionRole::DowngradingLeader => true,
})
})
}
- pub fn set_writable(&self, region_id: RegionId, writable: bool) -> Result<()> {
+ pub fn set_region_role(&self, region_id: RegionId, role: RegionRole) -> Result<()> {
let engine = self
.inner
.region_map
.get(®ion_id)
.with_context(|| RegionNotFoundSnafu { region_id })?;
engine
- .set_writable(region_id, writable)
+ .set_region_role(region_id, role)
.with_context(|_| HandleRegionRequestSnafu { region_id })
}
- pub async fn set_readonly_gracefully(
+ /// Set region role state gracefully.
+ ///
+ /// For [SettableRegionRoleState::Follower]:
+ /// After the call returns, the engine ensures that
+ /// no **further** write or flush operations will succeed in this region.
+ ///
+ /// For [SettableRegionRoleState::DowngradingLeader]:
+ /// After the call returns, the engine ensures that
+ /// no **further** write operations will succeed in this region.
+ pub async fn set_region_role_state_gracefully(
&self,
region_id: RegionId,
- ) -> Result<SetReadonlyResponse> {
+ state: SettableRegionRoleState,
+ ) -> Result<SetRegionRoleStateResponse> {
match self.inner.region_map.get(®ion_id) {
Some(engine) => Ok(engine
- .set_readonly_gracefully(region_id)
+ .set_region_role_state_gracefully(region_id, state)
.await
.with_context(|_| HandleRegionRequestSnafu { region_id })?),
- None => Ok(SetReadonlyResponse::NotFound),
+ None => Ok(SetRegionRoleStateResponse::NotFound),
}
}
@@ -842,7 +855,7 @@ impl RegionServerInner {
info!("Region {region_id} is deregistered from engine {engine_type}");
self.region_map
.remove(®ion_id)
- .map(|(id, engine)| engine.set_writable(id, false));
+ .map(|(id, engine)| engine.set_region_role(id, RegionRole::Follower));
self.event_listener.on_region_deregistered(region_id);
}
RegionChange::Catchup => {
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index 35f513bc8348..2acc66a5927d 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -32,7 +32,8 @@ use query::{QueryEngine, QueryEngineContext};
use session::context::QueryContextRef;
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{
- RegionEngine, RegionRole, RegionScannerRef, RegionStatistic, SetReadonlyResponse,
+ RegionEngine, RegionRole, RegionScannerRef, RegionStatistic, SetRegionRoleStateResponse,
+ SettableRegionRoleState,
};
use store_api::region_request::{AffectedRows, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
@@ -106,7 +107,7 @@ pub type MockRequestHandler =
Box<dyn Fn(RegionId, RegionRequest) -> Result<AffectedRows, Error> + Send + Sync>;
pub type MockSetReadonlyGracefullyHandler =
- Box<dyn Fn(RegionId) -> Result<SetReadonlyResponse, Error> + Send + Sync>;
+ Box<dyn Fn(RegionId) -> Result<SetRegionRoleStateResponse, Error> + Send + Sync>;
pub struct MockRegionEngine {
sender: Sender<(RegionId, RegionRequest)>,
@@ -220,14 +221,15 @@ impl RegionEngine for MockRegionEngine {
Ok(())
}
- fn set_writable(&self, _region_id: RegionId, _writable: bool) -> Result<(), BoxedError> {
+ fn set_region_role(&self, _region_id: RegionId, _role: RegionRole) -> Result<(), BoxedError> {
Ok(())
}
- async fn set_readonly_gracefully(
+ async fn set_region_role_state_gracefully(
&self,
region_id: RegionId,
- ) -> Result<SetReadonlyResponse, BoxedError> {
+ _region_role_state: SettableRegionRoleState,
+ ) -> Result<SetRegionRoleStateResponse, BoxedError> {
if let Some(mock_fn) = &self.handle_set_readonly_gracefully_mock_fn {
return mock_fn(region_id).map_err(BoxedError::new);
};
diff --git a/src/file-engine/src/engine.rs b/src/file-engine/src/engine.rs
index 32e1a1d58d0d..e6313f4322cc 100644
--- a/src/file-engine/src/engine.rs
+++ b/src/file-engine/src/engine.rs
@@ -26,8 +26,8 @@ use object_store::ObjectStore;
use snafu::{ensure, OptionExt};
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{
- RegionEngine, RegionRole, RegionScannerRef, RegionStatistic, SetReadonlyResponse,
- SinglePartitionScanner,
+ RegionEngine, RegionRole, RegionScannerRef, RegionStatistic, SetRegionRoleStateResponse,
+ SettableRegionRoleState, SinglePartitionScanner,
};
use store_api::region_request::{
AffectedRows, RegionCloseRequest, RegionCreateRequest, RegionDropRequest, RegionOpenRequest,
@@ -113,22 +113,23 @@ impl RegionEngine for FileRegionEngine {
None
}
- fn set_writable(&self, region_id: RegionId, writable: bool) -> Result<(), BoxedError> {
+ fn set_region_role(&self, region_id: RegionId, role: RegionRole) -> Result<(), BoxedError> {
self.inner
- .set_writable(region_id, writable)
+ .set_region_role(region_id, role)
.map_err(BoxedError::new)
}
- async fn set_readonly_gracefully(
+ async fn set_region_role_state_gracefully(
&self,
region_id: RegionId,
- ) -> Result<SetReadonlyResponse, BoxedError> {
+ _region_role_state: SettableRegionRoleState,
+ ) -> Result<SetRegionRoleStateResponse, BoxedError> {
let exists = self.inner.get_region(region_id).await.is_some();
if exists {
- Ok(SetReadonlyResponse::success(None))
+ Ok(SetRegionRoleStateResponse::success(None))
} else {
- Ok(SetReadonlyResponse::NotFound)
+ Ok(SetRegionRoleStateResponse::NotFound)
}
}
@@ -189,7 +190,7 @@ impl EngineInner {
Ok(())
}
- fn set_writable(&self, _region_id: RegionId, _writable: bool) -> EngineResult<()> {
+ fn set_region_role(&self, _region_id: RegionId, _region_role: RegionRole) -> EngineResult<()> {
// TODO(zhongzc): Improve the semantics and implementation of this API.
Ok(())
}
diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs
index 06cf818d237e..de491da37150 100644
--- a/src/meta-srv/src/handler/region_lease_handler.rs
+++ b/src/meta-srv/src/handler/region_lease_handler.rs
@@ -111,7 +111,7 @@ mod test {
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeper;
- use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
+ use common_meta::rpc::router::{LeaderState, Region, RegionRoute};
use store_api::region_engine::RegionRole;
use store_api::storage::RegionId;
@@ -297,7 +297,7 @@ mod test {
region: Region::new_test(region_id),
leader_peer: Some(peer.clone()),
follower_peers: vec![follower_peer.clone()],
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
leader_down_since: Some(1),
},
RegionRoute {
@@ -352,7 +352,7 @@ mod test {
assert_region_lease(
acc,
vec![
- GrantedRegion::new(region_id, RegionRole::Follower),
+ GrantedRegion::new(region_id, RegionRole::DowngradingLeader),
GrantedRegion::new(another_region_id, RegionRole::Leader),
],
);
diff --git a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
index 836ca4c53212..ec5114b9eb6a 100644
--- a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
@@ -22,8 +22,10 @@ use common_meta::instruction::{
};
use common_procedure::Status;
use common_telemetry::{error, info, warn};
+use common_wal::options::WalOptions;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
+use store_api::storage::RegionId;
use tokio::time::{sleep, Instant};
use super::update_metadata::UpdateMetadata;
@@ -95,15 +97,32 @@ impl DowngradeLeaderRegion {
&self,
ctx: &Context,
flush_timeout: Duration,
+ reject_write: bool,
) -> Instruction {
let pc = &ctx.persistent_ctx;
let region_id = pc.region_id;
Instruction::DowngradeRegion(DowngradeRegion {
region_id,
flush_timeout: Some(flush_timeout),
+ reject_write,
})
}
+ async fn should_reject_write(ctx: &mut Context, region_id: RegionId) -> Result<bool> {
+ let datanode_table_value = ctx.get_from_peer_datanode_table_value().await?;
+ if let Some(wal_option) = datanode_table_value
+ .region_info
+ .region_wal_options
+ .get(®ion_id.region_number())
+ {
+ let options: WalOptions = serde_json::from_str(wal_option)
+ .with_context(|_| error::DeserializeFromJsonSnafu { input: wal_option })?;
+ return Ok(matches!(options, WalOptions::RaftEngine));
+ }
+
+ Ok(true)
+ }
+
/// Tries to downgrade a leader region.
///
/// Retry:
@@ -118,16 +137,17 @@ impl DowngradeLeaderRegion {
/// - [ExceededDeadline](error::Error::ExceededDeadline)
/// - Invalid JSON.
async fn downgrade_region(&self, ctx: &mut Context) -> Result<()> {
- let pc = &ctx.persistent_ctx;
- let region_id = pc.region_id;
- let leader = &pc.from_peer;
+ let region_id = ctx.persistent_ctx.region_id;
let operation_timeout =
ctx.next_operation_timeout()
.context(error::ExceededDeadlineSnafu {
operation: "Downgrade region",
})?;
- let downgrade_instruction = self.build_downgrade_region_instruction(ctx, operation_timeout);
+ let reject_write = Self::should_reject_write(ctx, region_id).await?;
+ let downgrade_instruction =
+ self.build_downgrade_region_instruction(ctx, operation_timeout, reject_write);
+ let leader = &ctx.persistent_ctx.from_peer;
let msg = MailboxMessage::json_message(
&format!("Downgrade leader region: {}", region_id),
&format!("Meta@{}", ctx.server_addr()),
@@ -240,8 +260,13 @@ impl DowngradeLeaderRegion {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
+ use std::collections::HashMap;
+ use common_meta::key::table_route::TableRouteValue;
+ use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
+ use common_meta::rpc::router::{Region, RegionRoute};
+ use common_wal::options::KafkaWalOptions;
use store_api::storage::RegionId;
use tokio::time::Instant;
@@ -264,19 +289,73 @@ mod tests {
}
}
+ async fn prepare_table_metadata(ctx: &Context, wal_options: HashMap<u32, String>) {
+ let table_info =
+ new_test_table_info(ctx.persistent_ctx.region_id.table_id(), vec![1]).into();
+ let region_routes = vec![RegionRoute {
+ region: Region::new_test(ctx.persistent_ctx.region_id),
+ leader_peer: Some(ctx.persistent_ctx.from_peer.clone()),
+ follower_peers: vec![ctx.persistent_ctx.to_peer.clone()],
+ ..Default::default()
+ }];
+ ctx.table_metadata_manager
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(region_routes),
+ wal_options,
+ )
+ .await
+ .unwrap();
+ }
+
#[tokio::test]
async fn test_datanode_is_unreachable() {
let state = DowngradeLeaderRegion::default();
let persistent_context = new_persistent_context();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
-
+ prepare_table_metadata(&ctx, HashMap::default()).await;
let err = state.downgrade_region(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::PusherNotFound { .. });
assert!(!err.is_retryable());
}
+ #[tokio::test]
+ async fn test_should_reject_writes() {
+ let persistent_context = new_persistent_context();
+ let region_id = persistent_context.region_id;
+ let env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let wal_options =
+ HashMap::from([(1, serde_json::to_string(&WalOptions::RaftEngine).unwrap())]);
+ prepare_table_metadata(&ctx, wal_options).await;
+
+ let reject_write = DowngradeLeaderRegion::should_reject_write(&mut ctx, region_id)
+ .await
+ .unwrap();
+ assert!(reject_write);
+
+ // Remote WAL
+ let persistent_context = new_persistent_context();
+ let region_id = persistent_context.region_id;
+ let env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let wal_options = HashMap::from([(
+ 1,
+ serde_json::to_string(&WalOptions::Kafka(KafkaWalOptions {
+ topic: "my_topic".to_string(),
+ }))
+ .unwrap(),
+ )]);
+ prepare_table_metadata(&ctx, wal_options).await;
+
+ let reject_write = DowngradeLeaderRegion::should_reject_write(&mut ctx, region_id)
+ .await
+ .unwrap();
+ assert!(!reject_write);
+ }
+
#[tokio::test]
async fn test_pusher_dropped() {
let state = DowngradeLeaderRegion::default();
@@ -285,6 +364,7 @@ mod tests {
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
+ prepare_table_metadata(&ctx, HashMap::default()).await;
let mailbox_ctx = env.mailbox_context();
let (tx, rx) = tokio::sync::mpsc::channel(1);
@@ -307,6 +387,7 @@ mod tests {
let persistent_context = new_persistent_context();
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
+ prepare_table_metadata(&ctx, HashMap::default()).await;
ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
let err = state.downgrade_region(&mut ctx).await.unwrap_err();
@@ -330,6 +411,7 @@ mod tests {
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
+ prepare_table_metadata(&ctx, HashMap::default()).await;
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
@@ -356,6 +438,7 @@ mod tests {
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
+ prepare_table_metadata(&ctx, HashMap::default()).await;
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
@@ -383,6 +466,7 @@ mod tests {
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
+ prepare_table_metadata(&ctx, HashMap::default()).await;
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
@@ -416,6 +500,7 @@ mod tests {
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
+ prepare_table_metadata(&ctx, HashMap::default()).await;
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
@@ -508,6 +593,7 @@ mod tests {
let mut env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
+ prepare_table_metadata(&ctx, HashMap::default()).await;
let mailbox_ctx = env.mailbox_context();
let mailbox = mailbox_ctx.mailbox().clone();
diff --git a/src/meta-srv/src/procedure/region_migration/manager.rs b/src/meta-srv/src/procedure/region_migration/manager.rs
index bb3eff80c0b3..01ea887ca9fd 100644
--- a/src/meta-srv/src/procedure/region_migration/manager.rs
+++ b/src/meta-srv/src/procedure/region_migration/manager.rs
@@ -246,7 +246,7 @@ impl RegionMigrationManager {
region_route: &RegionRoute,
task: &RegionMigrationProcedureTask,
) -> Result<bool> {
- if region_route.is_leader_downgraded() {
+ if region_route.is_leader_downgrading() {
return Ok(false);
}
diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs
index 65e33ab3d99e..cb3b5a3dc3ab 100644
--- a/src/meta-srv/src/procedure/region_migration/test_util.rs
+++ b/src/meta-srv/src/procedure/region_migration/test_util.rs
@@ -449,7 +449,7 @@ impl ProcedureMigrationTestSuite {
.find(|route| route.region.id == region_id)
.unwrap();
- assert!(!region_route.is_leader_downgraded());
+ assert!(!region_route.is_leader_downgrading());
assert_eq!(
region_route.leader_peer.as_ref().unwrap().id,
expected_leader_id
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
index 3b3f6a6c0c3b..d8bad44871d6 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use common_error::ext::BoxedError;
-use common_meta::rpc::router::RegionStatus;
+use common_meta::rpc::router::LeaderState;
use snafu::ResultExt;
use crate::error::{self, Result};
@@ -53,7 +53,7 @@ impl UpdateMetadata {
.as_ref()
.is_some_and(|leader_peer| leader_peer.id == from_peer_id)
{
- Some(Some(RegionStatus::Downgraded))
+ Some(Some(LeaderState::Downgrading))
} else {
None
}
@@ -81,7 +81,7 @@ mod tests {
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
- use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
+ use common_meta::rpc::router::{LeaderState, Region, RegionRoute};
use store_api::storage::RegionId;
use crate::error::Error;
@@ -155,7 +155,7 @@ mod tests {
table_metadata_manager
.update_leader_region_status(table_id, &original_table_route, |route| {
if route.region.id == RegionId::new(1024, 2) {
- Some(Some(RegionStatus::Downgraded))
+ Some(Some(LeaderState::Downgrading))
} else {
None
}
@@ -210,7 +210,7 @@ mod tests {
// It should remain unchanged.
assert_eq!(latest_table_route.version().unwrap(), 0);
- assert!(!latest_table_route.region_routes().unwrap()[0].is_leader_downgraded());
+ assert!(!latest_table_route.region_routes().unwrap()[0].is_leader_downgrading());
assert!(ctx.volatile_ctx.table_route.is_none());
}
@@ -251,7 +251,7 @@ mod tests {
.unwrap()
.unwrap();
- assert!(latest_table_route.region_routes().unwrap()[0].is_leader_downgraded());
+ assert!(latest_table_route.region_routes().unwrap()[0].is_leader_downgrading());
assert!(ctx.volatile_ctx.table_route.is_none());
}
}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
index 4e6f20ef195f..0d568ab7b0bb 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
@@ -65,7 +65,7 @@ mod tests {
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
- use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
+ use common_meta::rpc::router::{LeaderState, Region, RegionRoute};
use store_api::storage::RegionId;
use crate::error::Error;
@@ -110,13 +110,13 @@ mod tests {
RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(from_peer.clone()),
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
..Default::default()
},
RegionRoute {
region: Region::new_test(RegionId::new(1024, 2)),
leader_peer: Some(Peer::empty(4)),
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
..Default::default()
},
RegionRoute {
@@ -128,8 +128,8 @@ mod tests {
let expected_region_routes = {
let mut region_routes = region_routes.clone();
- region_routes[0].leader_status = None;
- region_routes[1].leader_status = None;
+ region_routes[0].leader_state = None;
+ region_routes[1].leader_state = None;
region_routes
};
@@ -207,13 +207,13 @@ mod tests {
RegionRoute {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(from_peer.clone()),
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
..Default::default()
},
RegionRoute {
region: Region::new_test(RegionId::new(1024, 2)),
leader_peer: Some(Peer::empty(4)),
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
..Default::default()
},
RegionRoute {
@@ -225,7 +225,7 @@ mod tests {
let expected_region_routes = {
let mut region_routes = region_routes.clone();
- region_routes[0].leader_status = None;
+ region_routes[0].leader_state = None;
region_routes
};
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
index 75f93f760e75..b710a0e1f3e0 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
@@ -43,7 +43,7 @@ impl UpdateMetadata {
.context(error::RegionRouteNotFoundSnafu { region_id })?;
// Removes downgraded status.
- region_route.set_leader_status(None);
+ region_route.set_leader_state(None);
let candidate = &ctx.persistent_ctx.to_peer;
let expected_old_leader = &ctx.persistent_ctx.from_peer;
@@ -106,7 +106,7 @@ impl UpdateMetadata {
if leader_peer.id == candidate_peer_id {
ensure!(
- !region_route.is_leader_downgraded(),
+ !region_route.is_leader_downgrading(),
error::UnexpectedSnafu {
violated: format!("Unexpected intermediate state is found during the update metadata for upgrading region {region_id}"),
}
@@ -190,7 +190,7 @@ mod tests {
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeper;
- use common_meta::rpc::router::{Region, RegionRoute, RegionStatus};
+ use common_meta::rpc::router::{LeaderState, Region, RegionRoute};
use common_time::util::current_time_millis;
use store_api::storage::RegionId;
@@ -286,7 +286,7 @@ mod tests {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
leader_down_since: Some(current_time_millis()),
}];
@@ -298,7 +298,7 @@ mod tests {
.await
.unwrap();
- assert!(!new_region_routes[0].is_leader_downgraded());
+ assert!(!new_region_routes[0].is_leader_downgrading());
assert!(new_region_routes[0].leader_down_since.is_none());
assert_eq!(new_region_routes[0].follower_peers, vec![Peer::empty(3)]);
assert_eq!(new_region_routes[0].leader_peer.as_ref().unwrap().id, 2);
@@ -319,13 +319,13 @@ mod tests {
region: Region::new_test(RegionId::new(table_id, 1)),
leader_peer: Some(Peer::empty(1)),
follower_peers: vec![Peer::empty(5), Peer::empty(3)],
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
leader_down_since: Some(current_time_millis()),
},
RegionRoute {
region: Region::new_test(RegionId::new(table_id, 2)),
leader_peer: Some(Peer::empty(4)),
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
..Default::default()
},
];
@@ -382,7 +382,7 @@ mod tests {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(leader_peer),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
}];
@@ -406,7 +406,7 @@ mod tests {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(candidate_peer),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
}];
@@ -430,7 +430,7 @@ mod tests {
region: Region::new_test(RegionId::new(1024, 1)),
leader_peer: Some(candidate_peer),
follower_peers: vec![Peer::empty(2), Peer::empty(3)],
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
leader_down_since: None,
}];
@@ -455,7 +455,7 @@ mod tests {
let region_routes = vec![RegionRoute {
region: Region::new_test(RegionId::new(table_id, 1)),
leader_peer: Some(Peer::empty(1)),
- leader_status: Some(RegionStatus::Downgraded),
+ leader_state: Some(LeaderState::Downgrading),
..Default::default()
}];
@@ -485,7 +485,7 @@ mod tests {
assert!(ctx.volatile_ctx.table_route.is_none());
assert!(ctx.volatile_ctx.opening_region_guard.is_none());
assert_eq!(region_routes.len(), 1);
- assert!(!region_routes[0].is_leader_downgraded());
+ assert!(!region_routes[0].is_leader_downgrading());
assert!(region_routes[0].follower_peers.is_empty());
assert_eq!(region_routes[0].leader_peer.as_ref().unwrap().id, 2);
}
diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs
index a1065d4cbbc2..194f3710c853 100644
--- a/src/meta-srv/src/region/lease_keeper.rs
+++ b/src/meta-srv/src/region/lease_keeper.rs
@@ -62,8 +62,8 @@ fn renew_region_lease_via_region_route(
// If it's a leader region on this datanode.
if let Some(leader) = ®ion_route.leader_peer {
if leader.id == datanode_id {
- let region_role = if region_route.is_leader_downgraded() {
- RegionRole::Follower
+ let region_role = if region_route.is_leader_downgrading() {
+ RegionRole::DowngradingLeader
} else {
RegionRole::Leader
};
@@ -220,7 +220,7 @@ mod tests {
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeper;
- use common_meta::rpc::router::{Region, RegionRouteBuilder, RegionStatus};
+ use common_meta::rpc::router::{LeaderState, Region, RegionRouteBuilder};
use store_api::region_engine::RegionRole;
use store_api::storage::RegionId;
use table::metadata::RawTableInfo;
@@ -265,11 +265,11 @@ mod tests {
Some((region_id, RegionRole::Follower))
);
- region_route.leader_status = Some(RegionStatus::Downgraded);
+ region_route.leader_state = Some(LeaderState::Downgrading);
// The downgraded leader region on the datanode.
assert_eq!(
renew_region_lease_via_region_route(®ion_route, leader_peer_id, region_id),
- Some((region_id, RegionRole::Follower))
+ Some((region_id, RegionRole::DowngradingLeader))
);
}
@@ -492,7 +492,7 @@ mod tests {
.region(Region::new_test(region_id))
.leader_peer(Peer::empty(leader_peer_id))
.follower_peers(vec![Peer::empty(follower_peer_id)])
- .leader_status(RegionStatus::Downgraded)
+ .leader_state(LeaderState::Downgrading)
.build()
.unwrap();
diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs
index 576d1aa92365..6b9ccc99a0fa 100644
--- a/src/meta-srv/src/test_util.rs
+++ b/src/meta-srv/src/test_util.rs
@@ -36,7 +36,7 @@ pub(crate) fn new_region_route(region_id: u64, peers: &[Peer], leader_node: u64)
region,
leader_peer,
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
}
}
diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs
index 358da1d2167a..42948aa6cd7d 100644
--- a/src/metric-engine/src/engine.rs
+++ b/src/metric-engine/src/engine.rs
@@ -37,7 +37,8 @@ use snafu::ResultExt;
use store_api::metadata::RegionMetadataRef;
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
use store_api::region_engine::{
- RegionEngine, RegionRole, RegionScannerRef, RegionStatistic, SetReadonlyResponse,
+ RegionEngine, RegionRole, RegionScannerRef, RegionStatistic, SetRegionRoleStateResponse,
+ SettableRegionRoleState,
};
use store_api::region_request::RegionRequest;
use store_api::storage::{RegionId, ScanRequest};
@@ -201,14 +202,14 @@ impl RegionEngine for MetricEngine {
Ok(())
}
- fn set_writable(&self, region_id: RegionId, writable: bool) -> Result<(), BoxedError> {
+ fn set_region_role(&self, region_id: RegionId, role: RegionRole) -> Result<(), BoxedError> {
// ignore the region not found error
for x in [
utils::to_metadata_region_id(region_id),
utils::to_data_region_id(region_id),
region_id,
] {
- if let Err(e) = self.inner.mito.set_writable(x, writable)
+ if let Err(e) = self.inner.mito.set_region_role(x, role)
&& e.status_code() != StatusCode::RegionNotFound
{
return Err(e);
@@ -217,11 +218,15 @@ impl RegionEngine for MetricEngine {
Ok(())
}
- async fn set_readonly_gracefully(
+ async fn set_region_role_state_gracefully(
&self,
region_id: RegionId,
- ) -> std::result::Result<SetReadonlyResponse, BoxedError> {
- self.inner.mito.set_readonly_gracefully(region_id).await
+ region_role_state: SettableRegionRoleState,
+ ) -> std::result::Result<SetRegionRoleStateResponse, BoxedError> {
+ self.inner
+ .mito
+ .set_region_role_state_gracefully(region_id, region_role_state)
+ .await
}
/// Returns the physical region role.
diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs
index 12b9dd5fefb6..d919633ba964 100644
--- a/src/mito2/src/compaction/compactor.rs
+++ b/src/mito2/src/compaction/compactor.rs
@@ -39,8 +39,7 @@ use crate::read::Source;
use crate::region::opener::new_manifest_dir;
use crate::region::options::RegionOptions;
use crate::region::version::{VersionBuilder, VersionRef};
-use crate::region::ManifestContext;
-use crate::region::RegionState::Writable;
+use crate::region::{ManifestContext, RegionLeaderState, RegionRoleState};
use crate::schedule::scheduler::LocalScheduler;
use crate::sst::file::{FileMeta, IndexType};
use crate::sst::file_purger::LocalFilePurger;
@@ -129,7 +128,10 @@ pub async fn open_compaction_region(
let manifest = manifest_manager.manifest();
let region_metadata = manifest.metadata.clone();
- let manifest_ctx = Arc::new(ManifestContext::new(manifest_manager, Writable));
+ let manifest_ctx = Arc::new(ManifestContext::new(
+ manifest_manager,
+ RegionRoleState::Leader(RegionLeaderState::Writable),
+ ));
let file_purger = {
let purge_scheduler = Arc::new(LocalScheduler::new(mito_config.max_background_jobs));
@@ -379,7 +381,7 @@ impl Compactor for DefaultCompactor {
// TODO: We might leak files if we fail to update manifest. We can add a cleanup task to remove them later.
compaction_region
.manifest_ctx
- .update_manifest(Writable, action_list)
+ .update_manifest(RegionLeaderState::Writable, action_list)
.await?;
Ok(edit)
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index e9177d40bf24..ed8cc9290906 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -53,7 +53,7 @@ mod prune_test;
#[cfg(test)]
mod row_selector_test;
#[cfg(test)]
-mod set_readonly_test;
+mod set_role_state_test;
#[cfg(test)]
mod truncate_test;
@@ -77,7 +77,7 @@ use store_api::logstore::LogStore;
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{
BatchResponses, RegionEngine, RegionRole, RegionScannerRef, RegionStatistic,
- SetReadonlyResponse,
+ SetRegionRoleStateResponse, SettableRegionRoleState,
};
use store_api::region_request::{AffectedRows, RegionOpenRequest, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
@@ -436,22 +436,27 @@ impl EngineInner {
Ok(scan_region)
}
- /// Set writable mode for a region.
- fn set_writable(&self, region_id: RegionId, writable: bool) -> Result<()> {
+ /// Converts the [`RegionRole`].
+ fn set_region_role(&self, region_id: RegionId, role: RegionRole) -> Result<()> {
let region = self
.workers
.get_region(region_id)
.context(RegionNotFoundSnafu { region_id })?;
- region.set_writable(writable);
+ region.set_role(role);
Ok(())
}
/// Sets read-only for a region and ensures no more writes in the region after it returns.
- async fn set_readonly_gracefully(&self, region_id: RegionId) -> Result<SetReadonlyResponse> {
+ async fn set_region_role_state_gracefully(
+ &self,
+ region_id: RegionId,
+ region_role_state: SettableRegionRoleState,
+ ) -> Result<SetRegionRoleStateResponse> {
// Notes: It acquires the mutable ownership to ensure no other threads,
// Therefore, we submit it to the worker.
- let (request, receiver) = WorkerRequest::new_set_readonly_gracefully(region_id);
+ let (request, receiver) =
+ WorkerRequest::new_set_readonly_gracefully(region_id, region_role_state);
self.workers.submit_to_worker(region_id, request).await?;
receiver.await.context(RecvSnafu)
@@ -459,7 +464,7 @@ impl EngineInner {
fn role(&self, region_id: RegionId) -> Option<RegionRole> {
self.workers.get_region(region_id).map(|region| {
- if region.is_readonly() {
+ if region.is_follower() {
RegionRole::Follower
} else {
RegionRole::Leader
@@ -547,22 +552,23 @@ impl RegionEngine for MitoEngine {
self.get_region_statistic(region_id)
}
- fn set_writable(&self, region_id: RegionId, writable: bool) -> Result<(), BoxedError> {
+ fn set_region_role(&self, region_id: RegionId, role: RegionRole) -> Result<(), BoxedError> {
self.inner
- .set_writable(region_id, writable)
+ .set_region_role(region_id, role)
.map_err(BoxedError::new)
}
- async fn set_readonly_gracefully(
+ async fn set_region_role_state_gracefully(
&self,
region_id: RegionId,
- ) -> Result<SetReadonlyResponse, BoxedError> {
+ region_role_state: SettableRegionRoleState,
+ ) -> Result<SetRegionRoleStateResponse, BoxedError> {
let _timer = HANDLE_REQUEST_ELAPSED
- .with_label_values(&["set_readonly_gracefully"])
+ .with_label_values(&["set_region_role_state_gracefully"])
.start_timer();
self.inner
- .set_readonly_gracefully(region_id)
+ .set_region_role_state_gracefully(region_id, region_role_state)
.await
.map_err(BoxedError::new)
}
diff --git a/src/mito2/src/engine/alter_test.rs b/src/mito2/src/engine/alter_test.rs
index b48dc2ccfb08..2e75bf19faa0 100644
--- a/src/mito2/src/engine/alter_test.rs
+++ b/src/mito2/src/engine/alter_test.rs
@@ -24,7 +24,7 @@ use common_recordbatch::RecordBatches;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::ColumnSchema;
use store_api::metadata::ColumnMetadata;
-use store_api::region_engine::RegionEngine;
+use store_api::region_engine::{RegionEngine, RegionRole};
use store_api::region_request::{
AddColumn, AddColumnLocation, AlterKind, RegionAlterRequest, RegionOpenRequest, RegionRequest,
};
@@ -213,8 +213,10 @@ async fn test_put_after_alter() {
)
.await
.unwrap();
- // Set writable.
- engine.set_writable(region_id, true).unwrap();
+ // Convert region to leader.
+ engine
+ .set_region_role(region_id, RegionRole::Leader)
+ .unwrap();
// Put with old schema.
let rows = Rows {
diff --git a/src/mito2/src/engine/catchup_test.rs b/src/mito2/src/engine/catchup_test.rs
index 5f4dd3b15acf..a9de0d6008ff 100644
--- a/src/mito2/src/engine/catchup_test.rs
+++ b/src/mito2/src/engine/catchup_test.rs
@@ -22,7 +22,7 @@ use common_recordbatch::RecordBatches;
use common_wal::options::{KafkaWalOptions, WalOptions, WAL_OPTIONS_KEY};
use rstest::rstest;
use rstest_reuse::{self, apply};
-use store_api::region_engine::{RegionEngine, SetReadonlyResponse};
+use store_api::region_engine::{RegionEngine, RegionRole, SetRegionRoleStateResponse};
use store_api::region_request::{RegionCatchupRequest, RegionOpenRequest, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
@@ -34,8 +34,8 @@ use crate::test_util::{
};
use crate::wal::EntryId;
-fn get_last_entry_id(resp: SetReadonlyResponse) -> Option<EntryId> {
- if let SetReadonlyResponse::Success { last_entry_id } = resp {
+fn get_last_entry_id(resp: SetRegionRoleStateResponse) -> Option<EntryId> {
+ if let SetRegionRoleStateResponse::Success { last_entry_id } = resp {
last_entry_id
} else {
unreachable!();
@@ -45,6 +45,8 @@ fn get_last_entry_id(resp: SetReadonlyResponse) -> Option<EntryId> {
#[apply(single_kafka_log_store_factory)]
async fn test_catchup_with_last_entry_id(factory: Option<LogStoreFactory>) {
+ use store_api::region_engine::SettableRegionRoleState;
+
common_telemetry::init_default_ut_logging();
let Some(factory) = factory else {
return;
@@ -102,7 +104,7 @@ async fn test_catchup_with_last_entry_id(factory: Option<LogStoreFactory>) {
put_rows(&leader_engine, region_id, rows).await;
let resp = leader_engine
- .set_readonly_gracefully(region_id)
+ .set_region_role_state_gracefully(region_id, SettableRegionRoleState::Follower)
.await
.unwrap();
@@ -159,6 +161,8 @@ async fn test_catchup_with_last_entry_id(factory: Option<LogStoreFactory>) {
#[apply(single_kafka_log_store_factory)]
async fn test_catchup_with_incorrect_last_entry_id(factory: Option<LogStoreFactory>) {
+ use store_api::region_engine::SettableRegionRoleState;
+
common_telemetry::init_default_ut_logging();
let Some(factory) = factory else {
return;
@@ -217,7 +221,7 @@ async fn test_catchup_with_incorrect_last_entry_id(factory: Option<LogStoreFacto
put_rows(&leader_engine, region_id, rows).await;
let resp = leader_engine
- .set_readonly_gracefully(region_id)
+ .set_region_role_state_gracefully(region_id, SettableRegionRoleState::Follower)
.await
.unwrap();
@@ -243,7 +247,7 @@ async fn test_catchup_with_incorrect_last_entry_id(factory: Option<LogStoreFacto
assert_matches!(err, error::Error::UnexpectedReplay { .. });
// It should ignore requests to writable regions.
- region.set_writable(true);
+ region.set_role(RegionRole::Leader);
let resp = follower_engine
.handle_request(
region_id,
diff --git a/src/mito2/src/engine/compaction_test.rs b/src/mito2/src/engine/compaction_test.rs
index e19f95088c46..7951331b20d9 100644
--- a/src/mito2/src/engine/compaction_test.rs
+++ b/src/mito2/src/engine/compaction_test.rs
@@ -19,7 +19,7 @@ use api::v1::{ColumnSchema, Rows};
use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use datatypes::prelude::ScalarVector;
use datatypes::vectors::TimestampMillisecondVector;
-use store_api::region_engine::RegionEngine;
+use store_api::region_engine::{RegionEngine, RegionRole};
use store_api::region_request::{
RegionCompactRequest, RegionDeleteRequest, RegionFlushRequest, RegionRequest,
};
@@ -302,8 +302,10 @@ async fn test_readonly_during_compaction() {
// Waits until the engine receives compaction finished request.
listener.wait_handle_finished().await;
- // Sets the region to read only mode.
- engine.set_writable(region_id, false).unwrap();
+ // Converts region to follower.
+ engine
+ .set_region_role(region_id, RegionRole::Follower)
+ .unwrap();
// Wakes up the listener.
listener.wake();
diff --git a/src/mito2/src/engine/open_test.rs b/src/mito2/src/engine/open_test.rs
index 9e9a9e3486c0..c7ad47535c81 100644
--- a/src/mito2/src/engine/open_test.rs
+++ b/src/mito2/src/engine/open_test.rs
@@ -53,7 +53,9 @@ async fn test_engine_open_empty() {
.await
.unwrap_err();
assert_eq!(StatusCode::RegionNotFound, err.status_code());
- let err = engine.set_writable(region_id, true).unwrap_err();
+ let err = engine
+ .set_region_role(region_id, RegionRole::Leader)
+ .unwrap_err();
assert_eq!(StatusCode::RegionNotFound, err.status_code());
let role = engine.role(region_id);
assert_eq!(role, None);
@@ -134,8 +136,10 @@ async fn test_engine_open_readonly() {
assert_eq!(StatusCode::RegionNotReady, err.status_code());
assert_eq!(Some(RegionRole::Follower), engine.role(region_id));
- // Set writable and write.
- engine.set_writable(region_id, true).unwrap();
+ // Converts region to leader.
+ engine
+ .set_region_role(region_id, RegionRole::Leader)
+ .unwrap();
assert_eq!(Some(RegionRole::Leader), engine.role(region_id));
put_rows(&engine, region_id, rows).await;
diff --git a/src/mito2/src/engine/set_readonly_test.rs b/src/mito2/src/engine/set_readonly_test.rs
deleted file mode 100644
index 9de3f0a83288..000000000000
--- a/src/mito2/src/engine/set_readonly_test.rs
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use api::v1::Rows;
-use common_error::ext::ErrorExt;
-use common_error::status_code::StatusCode;
-use store_api::region_engine::{RegionEngine, SetReadonlyResponse};
-use store_api::region_request::{RegionPutRequest, RegionRequest};
-use store_api::storage::RegionId;
-
-use crate::config::MitoConfig;
-use crate::test_util::{build_rows, put_rows, rows_schema, CreateRequestBuilder, TestEnv};
-
-#[tokio::test]
-async fn test_set_readonly_gracefully() {
- let mut env = TestEnv::new();
- let engine = env.create_engine(MitoConfig::default()).await;
-
- let region_id = RegionId::new(1, 1);
- let request = CreateRequestBuilder::new().build();
-
- let column_schemas = rows_schema(&request);
- engine
- .handle_request(region_id, RegionRequest::Create(request))
- .await
- .unwrap();
-
- let result = engine.set_readonly_gracefully(region_id).await.unwrap();
- assert_eq!(
- SetReadonlyResponse::Success {
- last_entry_id: Some(0)
- },
- result
- );
-
- // set readonly again.
- let result = engine.set_readonly_gracefully(region_id).await.unwrap();
- assert_eq!(
- SetReadonlyResponse::Success {
- last_entry_id: Some(0)
- },
- result
- );
-
- let rows = Rows {
- schema: column_schemas,
- rows: build_rows(0, 3),
- };
-
- let error = engine
- .handle_request(
- region_id,
- RegionRequest::Put(RegionPutRequest { rows: rows.clone() }),
- )
- .await
- .unwrap_err();
-
- assert_eq!(error.status_code(), StatusCode::RegionNotReady);
-
- engine.set_writable(region_id, true).unwrap();
-
- put_rows(&engine, region_id, rows).await;
-
- let result = engine.set_readonly_gracefully(region_id).await.unwrap();
-
- assert_eq!(
- SetReadonlyResponse::Success {
- last_entry_id: Some(1)
- },
- result
- );
-}
-
-#[tokio::test]
-async fn test_set_readonly_gracefully_not_exist() {
- let mut env = TestEnv::new();
- let engine = env.create_engine(MitoConfig::default()).await;
-
- let non_exist_region_id = RegionId::new(1, 1);
-
- // For fast-path.
- let result = engine
- .set_readonly_gracefully(non_exist_region_id)
- .await
- .unwrap();
- assert_eq!(SetReadonlyResponse::NotFound, result);
-}
diff --git a/src/mito2/src/engine/set_role_state_test.rs b/src/mito2/src/engine/set_role_state_test.rs
new file mode 100644
index 000000000000..1d7a46f43647
--- /dev/null
+++ b/src/mito2/src/engine/set_role_state_test.rs
@@ -0,0 +1,159 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::Rows;
+use common_error::ext::ErrorExt;
+use common_error::status_code::StatusCode;
+use store_api::region_engine::{
+ RegionEngine, RegionRole, SetRegionRoleStateResponse, SettableRegionRoleState,
+};
+use store_api::region_request::{RegionPutRequest, RegionRequest};
+use store_api::storage::RegionId;
+
+use crate::config::MitoConfig;
+use crate::test_util::{build_rows, put_rows, rows_schema, CreateRequestBuilder, TestEnv};
+
+#[tokio::test]
+async fn test_set_role_state_gracefully() {
+ let settable_role_states = [
+ SettableRegionRoleState::Follower,
+ SettableRegionRoleState::DowngradingLeader,
+ ];
+ for settable_role_state in settable_role_states {
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new().build();
+
+ let column_schemas = rows_schema(&request);
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+
+ let result = engine
+ .set_region_role_state_gracefully(region_id, settable_role_state)
+ .await
+ .unwrap();
+ assert_eq!(
+ SetRegionRoleStateResponse::Success {
+ last_entry_id: Some(0)
+ },
+ result
+ );
+
+ // set Follower again.
+ let result = engine
+ .set_region_role_state_gracefully(region_id, settable_role_state)
+ .await
+ .unwrap();
+ assert_eq!(
+ SetRegionRoleStateResponse::Success {
+ last_entry_id: Some(0)
+ },
+ result
+ );
+
+ let rows = Rows {
+ schema: column_schemas,
+ rows: build_rows(0, 3),
+ };
+
+ let error = engine
+ .handle_request(
+ region_id,
+ RegionRequest::Put(RegionPutRequest { rows: rows.clone() }),
+ )
+ .await
+ .unwrap_err();
+
+ assert_eq!(error.status_code(), StatusCode::RegionNotReady);
+
+ engine
+ .set_region_role(region_id, RegionRole::Leader)
+ .unwrap();
+
+ put_rows(&engine, region_id, rows).await;
+
+ let result = engine
+ .set_region_role_state_gracefully(region_id, settable_role_state)
+ .await
+ .unwrap();
+
+ assert_eq!(
+ SetRegionRoleStateResponse::Success {
+ last_entry_id: Some(1)
+ },
+ result
+ );
+ }
+}
+
+#[tokio::test]
+async fn test_set_role_state_gracefully_not_exist() {
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let non_exist_region_id = RegionId::new(1, 1);
+
+ // For fast-path.
+ let result = engine
+ .set_region_role_state_gracefully(non_exist_region_id, SettableRegionRoleState::Follower)
+ .await
+ .unwrap();
+ assert_eq!(SetRegionRoleStateResponse::NotFound, result);
+}
+
+#[tokio::test]
+async fn test_write_downgrading_region() {
+ let mut env = TestEnv::with_prefix("write-to-downgrading-region");
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new().build();
+
+ let column_schemas = rows_schema(&request);
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+
+ let rows = Rows {
+ schema: column_schemas.clone(),
+ rows: build_rows(0, 42),
+ };
+ put_rows(&engine, region_id, rows).await;
+
+ let result = engine
+ .set_region_role_state_gracefully(region_id, SettableRegionRoleState::DowngradingLeader)
+ .await
+ .unwrap();
+ assert_eq!(
+ SetRegionRoleStateResponse::Success {
+ last_entry_id: Some(1)
+ },
+ result
+ );
+
+ let rows = Rows {
+ schema: column_schemas,
+ rows: build_rows(0, 42),
+ };
+ let err = engine
+ .handle_request(region_id, RegionRequest::Put(RegionPutRequest { rows }))
+ .await
+ .unwrap_err();
+ assert_eq!(err.status_code(), StatusCode::RegionNotReady)
+}
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 2d045e8c579a..8aa799cbb913 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -32,7 +32,7 @@ use store_api::manifest::ManifestVersion;
use store_api::storage::RegionId;
use crate::cache::file_cache::FileType;
-use crate::region::RegionState;
+use crate::region::{RegionLeaderState, RegionRoleState};
use crate::schedule::remote_job_scheduler::JobId;
use crate::sst::file::FileId;
use crate::worker::WorkerId;
@@ -483,10 +483,22 @@ pub enum Error {
},
#[snafu(display("Region {} is in {:?} state, expect: {:?}", region_id, state, expect))]
- RegionState {
+ RegionLeaderState {
region_id: RegionId,
- state: RegionState,
- expect: RegionState,
+ state: RegionRoleState,
+ expect: RegionLeaderState,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display(
+ "Region {} is in {:?} state, expect: Leader or Leader(Downgrading)",
+ region_id,
+ state
+ ))]
+ FlushableRegionState {
+ region_id: RegionId,
+ state: RegionRoleState,
#[snafu(implicit)]
location: Location,
},
@@ -954,7 +966,8 @@ impl ErrorExt for Error {
CompactRegion { source, .. } => source.status_code(),
CompatReader { .. } => StatusCode::Unexpected,
InvalidRegionRequest { source, .. } => source.status_code(),
- RegionState { .. } => StatusCode::RegionNotReady,
+ RegionLeaderState { .. } => StatusCode::RegionNotReady,
+ &FlushableRegionState { .. } => StatusCode::RegionNotReady,
JsonOptions { .. } => StatusCode::InvalidArguments,
EmptyRegionDir { .. } | EmptyManifestDir { .. } => StatusCode::RegionNotFound,
ArrowReader { .. } => StatusCode::StorageUnavailable,
diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs
index 05561b6080ff..9606e92d04db 100644
--- a/src/mito2/src/flush.rs
+++ b/src/mito2/src/flush.rs
@@ -36,7 +36,7 @@ use crate::metrics::{FLUSH_BYTES_TOTAL, FLUSH_ELAPSED, FLUSH_ERRORS_TOTAL, FLUSH
use crate::read::Source;
use crate::region::options::IndexOptions;
use crate::region::version::{VersionControlData, VersionControlRef};
-use crate::region::{ManifestContextRef, RegionState};
+use crate::region::{ManifestContextRef, RegionLeaderState};
use crate::request::{
BackgroundNotify, FlushFailed, FlushFinished, OptionOutputTx, OutputTx, SenderDdlRequest,
SenderWriteRequest, WorkerRequest,
@@ -195,6 +195,8 @@ pub enum FlushReason {
Alter,
/// Flush periodically.
Periodically,
+ /// Flush memtable during downgrading state.
+ Downgrading,
}
impl FlushReason {
@@ -407,11 +409,23 @@ impl RegionFlushTask {
info!("Applying {edit:?} to region {}", self.region_id);
let action_list = RegionMetaActionList::with_action(RegionMetaAction::Edit(edit.clone()));
+
+ let expected_state = if matches!(self.reason, FlushReason::Downgrading) {
+ RegionLeaderState::Downgrading
+ } else {
+ RegionLeaderState::Writable
+ };
// We will leak files if the manifest update fails, but we ignore them for simplicity. We can
// add a cleanup job to remove them later.
- self.manifest_ctx
- .update_manifest(RegionState::Writable, action_list)
+ let version = self
+ .manifest_ctx
+ .update_manifest(expected_state, action_list)
.await?;
+ info!(
+ "Successfully update manifest version to {version}, region: {}, reason: {}",
+ self.region_id,
+ self.reason.as_str()
+ );
Ok(edit)
}
diff --git a/src/mito2/src/region.rs b/src/mito2/src/region.rs
index 8fc9095ae5fb..b05daf3da076 100644
--- a/src/mito2/src/region.rs
+++ b/src/mito2/src/region.rs
@@ -27,12 +27,16 @@ use common_telemetry::{error, info, warn};
use crossbeam_utils::atomic::AtomicCell;
use snafu::{ensure, OptionExt};
use store_api::logstore::provider::Provider;
+use store_api::manifest::ManifestVersion;
use store_api::metadata::RegionMetadataRef;
-use store_api::region_engine::RegionStatistic;
+use store_api::region_engine::{RegionRole, RegionStatistic, SettableRegionRoleState};
use store_api::storage::RegionId;
use crate::access_layer::AccessLayerRef;
-use crate::error::{RegionNotFoundSnafu, RegionStateSnafu, RegionTruncatedSnafu, Result};
+use crate::error::{
+ FlushableRegionStateSnafu, RegionLeaderStateSnafu, RegionNotFoundSnafu, RegionTruncatedSnafu,
+ Result,
+};
use crate::manifest::action::{RegionMetaAction, RegionMetaActionList};
use crate::manifest::manager::RegionManifestManager;
use crate::memtable::MemtableBuilderRef;
@@ -59,11 +63,8 @@ impl RegionUsage {
}
}
-/// State of the region.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-pub enum RegionState {
- /// The region is opened but is still read-only.
- ReadOnly,
+pub enum RegionLeaderState {
/// The region is opened and is writable.
Writable,
/// The region is altering.
@@ -74,6 +75,14 @@ pub enum RegionState {
Truncating,
/// The region is handling a region edit.
Editing,
+ /// The region is stepping down.
+ Downgrading,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum RegionRoleState {
+ Leader(RegionLeaderState),
+ Follower,
}
/// Metadata and runtime status of a region.
@@ -172,83 +181,91 @@ impl MitoRegion {
/// Returns whether the region is writable.
pub(crate) fn is_writable(&self) -> bool {
- self.manifest_ctx.state.load() == RegionState::Writable
+ self.manifest_ctx.state.load() == RegionRoleState::Leader(RegionLeaderState::Writable)
+ }
+
+ /// Returns whether the region is flushable.
+ pub(crate) fn is_flushable(&self) -> bool {
+ matches!(
+ self.manifest_ctx.state.load(),
+ RegionRoleState::Leader(RegionLeaderState::Writable)
+ | RegionRoleState::Leader(RegionLeaderState::Downgrading)
+ )
+ }
+
+ /// Returns whether the region is downgrading.
+ pub(crate) fn is_downgrading(&self) -> bool {
+ matches!(
+ self.manifest_ctx.state.load(),
+ RegionRoleState::Leader(RegionLeaderState::Downgrading)
+ )
}
/// Returns whether the region is readonly.
- pub(crate) fn is_readonly(&self) -> bool {
- self.manifest_ctx.state.load() == RegionState::ReadOnly
+ pub(crate) fn is_follower(&self) -> bool {
+ self.manifest_ctx.state.load() == RegionRoleState::Follower
}
/// Returns the state of the region.
- pub(crate) fn state(&self) -> RegionState {
+ pub(crate) fn state(&self) -> RegionRoleState {
self.manifest_ctx.state.load()
}
- /// Sets the writable state.
- pub(crate) fn set_writable(&self, writable: bool) {
- if writable {
- // Only sets the region to writable if it is read only.
- // This prevents others updating the manifest.
- match self
- .manifest_ctx
- .state
- .compare_exchange(RegionState::ReadOnly, RegionState::Writable)
- {
- Ok(state) => info!(
- "Set region {} to writable, previous state: {:?}",
- self.region_id, state
- ),
- Err(state) => {
- if state != RegionState::Writable {
- warn!(
- "Failed to set region {} to writable, current state: {:?}",
- self.region_id, state
- )
- }
- }
- }
- } else {
- self.manifest_ctx.state.store(RegionState::ReadOnly);
- }
+ /// Sets the region role state.
+ pub(crate) fn set_role(&self, next_role: RegionRole) {
+ self.manifest_ctx.set_role(next_role, self.region_id);
}
/// Sets the altering state.
/// You should call this method in the worker loop.
pub(crate) fn set_altering(&self) -> Result<()> {
- self.compare_exchange_state(RegionState::Writable, RegionState::Altering)
+ self.compare_exchange_state(
+ RegionLeaderState::Writable,
+ RegionRoleState::Leader(RegionLeaderState::Altering),
+ )
}
/// Sets the dropping state.
/// You should call this method in the worker loop.
pub(crate) fn set_dropping(&self) -> Result<()> {
- self.compare_exchange_state(RegionState::Writable, RegionState::Dropping)
+ self.compare_exchange_state(
+ RegionLeaderState::Writable,
+ RegionRoleState::Leader(RegionLeaderState::Dropping),
+ )
}
/// Sets the truncating state.
/// You should call this method in the worker loop.
pub(crate) fn set_truncating(&self) -> Result<()> {
- self.compare_exchange_state(RegionState::Writable, RegionState::Truncating)
+ self.compare_exchange_state(
+ RegionLeaderState::Writable,
+ RegionRoleState::Leader(RegionLeaderState::Truncating),
+ )
}
/// Sets the editing state.
/// You should call this method in the worker loop.
pub(crate) fn set_editing(&self) -> Result<()> {
- self.compare_exchange_state(RegionState::Writable, RegionState::Editing)
+ self.compare_exchange_state(
+ RegionLeaderState::Writable,
+ RegionRoleState::Leader(RegionLeaderState::Editing),
+ )
}
/// Sets the region to readonly gracefully. This acquires the manifest write lock.
- pub(crate) async fn set_readonly_gracefully(&self) {
+ pub(crate) async fn set_role_state_gracefully(&self, state: SettableRegionRoleState) {
let _manager = self.manifest_ctx.manifest_manager.write().await;
// We acquires the write lock of the manifest manager to ensure that no one is updating the manifest.
// Then we change the state.
- self.set_writable(false);
+ self.set_role(state.into());
}
- /// Switches the region state to `RegionState::Writable` if the current state is `expect`.
+ /// Switches the region state to `RegionRoleState::Leader(RegionLeaderState::Writable)` if the current state is `expect`.
/// Otherwise, logs an error.
- pub(crate) fn switch_state_to_writable(&self, expect: RegionState) {
- if let Err(e) = self.compare_exchange_state(expect, RegionState::Writable) {
+ pub(crate) fn switch_state_to_writable(&self, expect: RegionLeaderState) {
+ if let Err(e) = self
+ .compare_exchange_state(expect, RegionRoleState::Leader(RegionLeaderState::Writable))
+ {
error!(e; "failed to switch region state to writable, expect state is {:?}", expect);
}
}
@@ -280,12 +297,16 @@ impl MitoRegion {
/// Sets the state of the region to given state if the current state equals to
/// the expected.
- fn compare_exchange_state(&self, expect: RegionState, state: RegionState) -> Result<()> {
+ fn compare_exchange_state(
+ &self,
+ expect: RegionLeaderState,
+ state: RegionRoleState,
+ ) -> Result<()> {
self.manifest_ctx
.state
- .compare_exchange(expect, state)
+ .compare_exchange(RegionRoleState::Leader(expect), state)
.map_err(|actual| {
- RegionStateSnafu {
+ RegionLeaderStateSnafu {
region_id: self.region_id,
state: actual,
expect,
@@ -303,17 +324,25 @@ pub(crate) struct ManifestContext {
manifest_manager: tokio::sync::RwLock<RegionManifestManager>,
/// The state of the region. The region checks the state before updating
/// manifest.
- state: AtomicCell<RegionState>,
+ state: AtomicCell<RegionRoleState>,
}
impl ManifestContext {
- pub(crate) fn new(manager: RegionManifestManager, state: RegionState) -> Self {
+ pub(crate) fn new(manager: RegionManifestManager, state: RegionRoleState) -> Self {
ManifestContext {
manifest_manager: tokio::sync::RwLock::new(manager),
state: AtomicCell::new(state),
}
}
+ pub(crate) async fn manifest_version(&self) -> ManifestVersion {
+ self.manifest_manager
+ .read()
+ .await
+ .manifest()
+ .manifest_version
+ }
+
pub(crate) async fn has_update(&self) -> Result<bool> {
self.manifest_manager.read().await.has_update().await
}
@@ -321,9 +350,9 @@ impl ManifestContext {
/// Updates the manifest if current state is `expect_state`.
pub(crate) async fn update_manifest(
&self,
- expect_state: RegionState,
+ expect_state: RegionLeaderState,
action_list: RegionMetaActionList,
- ) -> Result<()> {
+ ) -> Result<ManifestVersion> {
// Acquires the write lock of the manifest manager.
let mut manager = self.manifest_manager.write().await;
// Gets current manifest.
@@ -332,8 +361,8 @@ impl ManifestContext {
// after `set_readonly_gracefully()` is called.
let current_state = self.state.load();
ensure!(
- current_state == expect_state,
- RegionStateSnafu {
+ current_state == RegionRoleState::Leader(expect_state),
+ RegionLeaderStateSnafu {
region_id: manifest.metadata.region_id,
state: current_state,
expect: expect_state,
@@ -376,18 +405,92 @@ impl ManifestContext {
}
// Now we can update the manifest.
- manager.update(action_list).await.inspect_err(
+ let version = manager.update(action_list).await.inspect_err(
|e| error!(e; "Failed to update manifest, region_id: {}", manifest.metadata.region_id),
)?;
- if self.state.load() == RegionState::ReadOnly {
+ if self.state.load() == RegionRoleState::Follower {
warn!(
- "Region {} becomes read-only while updating manifest which may cause inconsistency",
+ "Region {} becomes follower while updating manifest which may cause inconsistency, manifest version: {version}",
manifest.metadata.region_id
);
}
- Ok(())
+ Ok(version)
+ }
+
+ /// Sets the [`RegionRole`].
+ ///
+ /// ```
+ /// +------------------------------------------+
+ /// | +-----------------+ |
+ /// | | | |
+ /// +---+------+ +-------+-----+ +--v-v---+
+ /// | Follower | | Downgrading | | Leader |
+ /// +---^-^----+ +-----+-^-----+ +--+-+---+
+ /// | | | | | |
+ /// | +------------------+ +-----------------+ |
+ /// +------------------------------------------+
+ ///
+ /// Transition:
+ /// - Follower -> Leader
+ /// - Downgrading Leader -> Leader
+ /// - Leader -> Follower
+ /// - Downgrading Leader -> Follower
+ /// - Leader -> Downgrading Leader
+ ///
+ /// ```
+ pub(crate) fn set_role(&self, next_role: RegionRole, region_id: RegionId) {
+ match next_role {
+ RegionRole::Follower => {
+ self.state.store(RegionRoleState::Follower);
+ }
+ RegionRole::Leader => {
+ match self.state.fetch_update(|state| {
+ if matches!(
+ state,
+ RegionRoleState::Follower
+ | RegionRoleState::Leader(RegionLeaderState::Downgrading)
+ ) {
+ Some(RegionRoleState::Leader(RegionLeaderState::Writable))
+ } else {
+ None
+ }
+ }) {
+ Ok(state) => info!(
+ "Convert region {} to leader, previous role state: {:?}",
+ region_id, state
+ ),
+ Err(state) => {
+ if state != RegionRoleState::Leader(RegionLeaderState::Writable) {
+ warn!(
+ "Failed to convert region {} to leader, current role state: {:?}",
+ region_id, state
+ )
+ }
+ }
+ }
+ }
+ RegionRole::DowngradingLeader => {
+ match self.state.compare_exchange(
+ RegionRoleState::Leader(RegionLeaderState::Writable),
+ RegionRoleState::Leader(RegionLeaderState::Downgrading),
+ ) {
+ Ok(state) => info!(
+ "Convert region {} to downgrading region, previous role state: {:?}",
+ region_id, state
+ ),
+ Err(state) => {
+ if state != RegionRoleState::Leader(RegionLeaderState::Downgrading) {
+ warn!(
+ "Failed to convert region {} to downgrading leader, current role state: {:?}",
+ region_id, state
+ )
+ }
+ }
+ }
+ }
+ }
}
}
@@ -434,10 +537,10 @@ impl RegionMap {
.context(RegionNotFoundSnafu { region_id })?;
ensure!(
region.is_writable(),
- RegionStateSnafu {
+ RegionLeaderStateSnafu {
region_id,
state: region.state(),
- expect: RegionState::Writable,
+ expect: RegionLeaderState::Writable,
}
);
Ok(region)
@@ -460,6 +563,40 @@ impl RegionMap {
}
}
+ /// Gets flushable region by region id.
+ ///
+ /// Returns error if the region does not exist or is not operable.
+ fn flushable_region(&self, region_id: RegionId) -> Result<MitoRegionRef> {
+ let region = self
+ .get_region(region_id)
+ .context(RegionNotFoundSnafu { region_id })?;
+ ensure!(
+ region.is_flushable(),
+ FlushableRegionStateSnafu {
+ region_id,
+ state: region.state(),
+ }
+ );
+ Ok(region)
+ }
+
+ /// Gets flushable region by region id.
+ ///
+ /// Calls the callback if the region does not exist or is not operable.
+ pub(crate) fn flushable_region_or<F: OnFailure>(
+ &self,
+ region_id: RegionId,
+ cb: &mut F,
+ ) -> Option<MitoRegionRef> {
+ match self.flushable_region(region_id) {
+ Ok(region) => Some(region),
+ Err(e) => {
+ cb.on_failure(e);
+ None
+ }
+ }
+ }
+
/// Remove region by id.
pub(crate) fn remove_region(&self, region_id: RegionId) {
let mut regions = self.regions.write().unwrap();
@@ -548,12 +685,70 @@ impl ManifestStats {
#[cfg(test)]
mod tests {
+ use std::sync::Arc;
+
use crossbeam_utils::atomic::AtomicCell;
+ use store_api::region_engine::RegionRole;
+ use store_api::storage::RegionId;
- use crate::region::RegionState;
+ use crate::region::{RegionLeaderState, RegionRoleState};
+ use crate::test_util::scheduler_util::SchedulerEnv;
+ use crate::test_util::version_util::VersionControlBuilder;
#[test]
fn test_region_state_lock_free() {
- assert!(AtomicCell::<RegionState>::is_lock_free());
+ assert!(AtomicCell::<RegionRoleState>::is_lock_free());
+ }
+
+ #[tokio::test]
+ async fn test_set_region_state() {
+ let env = SchedulerEnv::new().await;
+ let builder = VersionControlBuilder::new();
+ let version_control = Arc::new(builder.build());
+ let manifest_ctx = env
+ .mock_manifest_context(version_control.current().version.metadata.clone())
+ .await;
+
+ let region_id = RegionId::new(1024, 0);
+ // Leader -> Follower
+ manifest_ctx.set_role(RegionRole::Follower, region_id);
+ assert_eq!(manifest_ctx.state.load(), RegionRoleState::Follower);
+
+ // Follower -> Leader
+ manifest_ctx.set_role(RegionRole::Leader, region_id);
+ assert_eq!(
+ manifest_ctx.state.load(),
+ RegionRoleState::Leader(RegionLeaderState::Writable)
+ );
+
+ // Leader -> Downgrading Leader
+ manifest_ctx.set_role(RegionRole::DowngradingLeader, region_id);
+ assert_eq!(
+ manifest_ctx.state.load(),
+ RegionRoleState::Leader(RegionLeaderState::Downgrading)
+ );
+
+ // Downgrading Leader -> Follower
+ manifest_ctx.set_role(RegionRole::Follower, region_id);
+ assert_eq!(manifest_ctx.state.load(), RegionRoleState::Follower);
+
+ // Can't downgrade from follower (Follower -> Downgrading Leader)
+ manifest_ctx.set_role(RegionRole::DowngradingLeader, region_id);
+ assert_eq!(manifest_ctx.state.load(), RegionRoleState::Follower);
+
+ // Set region role too Downgrading Leader
+ manifest_ctx.set_role(RegionRole::Leader, region_id);
+ manifest_ctx.set_role(RegionRole::DowngradingLeader, region_id);
+ assert_eq!(
+ manifest_ctx.state.load(),
+ RegionRoleState::Leader(RegionLeaderState::Downgrading)
+ );
+
+ // Downgrading Leader -> Leader
+ manifest_ctx.set_role(RegionRole::Leader, region_id);
+ assert_eq!(
+ manifest_ctx.state.load(),
+ RegionRoleState::Leader(RegionLeaderState::Writable)
+ );
}
}
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index 64272a183bc9..b2a76490cc27 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -28,6 +28,7 @@ use snafu::{ensure, OptionExt};
use store_api::logstore::provider::Provider;
use store_api::logstore::LogStore;
use store_api::metadata::{ColumnMetadata, RegionMetadata};
+use store_api::region_engine::RegionRole;
use store_api::storage::{ColumnId, RegionId};
use crate::access_layer::AccessLayer;
@@ -42,7 +43,9 @@ use crate::memtable::time_partition::TimePartitions;
use crate::memtable::MemtableBuilderProvider;
use crate::region::options::RegionOptions;
use crate::region::version::{VersionBuilder, VersionControl, VersionControlRef};
-use crate::region::{ManifestContext, ManifestStats, MitoRegion, RegionState};
+use crate::region::{
+ ManifestContext, ManifestStats, MitoRegion, RegionLeaderState, RegionRoleState,
+};
use crate::region_write_ctx::RegionWriteCtx;
use crate::request::OptionOutputTx;
use crate::schedule::scheduler::SchedulerRef;
@@ -169,8 +172,8 @@ impl RegionOpener {
&expect.column_metadatas,
&expect.primary_key,
)?;
- // To keep consistence with Create behavior, set the opened Region writable.
- region.set_writable(true);
+ // To keep consistence with Create behavior, set the opened Region to RegionRole::Leader.
+ region.set_role(RegionRole::Leader);
return Ok(region);
}
Ok(None) => {
@@ -235,7 +238,7 @@ impl RegionOpener {
// Region is writable after it is created.
manifest_ctx: Arc::new(ManifestContext::new(
manifest_manager,
- RegionState::Writable,
+ RegionRoleState::Leader(RegionLeaderState::Writable),
)),
file_purger: Arc::new(LocalFilePurger::new(
self.purge_scheduler,
@@ -362,9 +365,10 @@ impl RegionOpener {
let version_control = Arc::new(VersionControl::new(version));
if !self.skip_wal_replay {
info!(
- "Start replaying memtable at flushed_entry_id + 1 {} for region {}",
+ "Start replaying memtable at flushed_entry_id + 1: {} for region {}, manifest version: {}",
flushed_entry_id + 1,
- region_id
+ region_id,
+ manifest.manifest_version
);
replay_memtable(
&provider,
@@ -377,7 +381,10 @@ impl RegionOpener {
)
.await?;
} else {
- info!("Skip the WAL replay for region: {}", region_id);
+ info!(
+ "Skip the WAL replay for region: {}, manifest version: {}",
+ region_id, manifest.manifest_version
+ );
}
let now = self.time_provider.current_time_millis();
@@ -388,7 +395,7 @@ impl RegionOpener {
// Region is always opened in read only mode.
manifest_ctx: Arc::new(ManifestContext::new(
manifest_manager,
- RegionState::ReadOnly,
+ RegionRoleState::Follower,
)),
file_purger,
provider: provider.clone(),
diff --git a/src/mito2/src/request.rs b/src/mito2/src/request.rs
index d88bc994e97e..1e4c6b8dc986 100644
--- a/src/mito2/src/request.rs
+++ b/src/mito2/src/request.rs
@@ -31,7 +31,7 @@ use prost::Message;
use smallvec::SmallVec;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataRef};
-use store_api::region_engine::SetReadonlyResponse;
+use store_api::region_engine::{SetRegionRoleStateResponse, SettableRegionRoleState};
use store_api::region_request::{
AffectedRows, RegionAlterRequest, RegionCatchupRequest, RegionCloseRequest,
RegionCompactRequest, RegionCreateRequest, RegionDropRequest, RegionFlushRequest,
@@ -483,11 +483,13 @@ pub(crate) enum WorkerRequest {
},
/// The internal commands.
- SetReadonlyGracefully {
+ SetRegionRoleStateGracefully {
/// Id of the region to send.
region_id: RegionId,
+ /// The [SettableRegionRoleState].
+ region_role_state: SettableRegionRoleState,
/// The sender of [SetReadonlyResponse].
- sender: Sender<SetReadonlyResponse>,
+ sender: Sender<SetRegionRoleStateResponse>,
},
/// Notify a worker to stop.
@@ -587,11 +589,16 @@ impl WorkerRequest {
pub(crate) fn new_set_readonly_gracefully(
region_id: RegionId,
- ) -> (WorkerRequest, Receiver<SetReadonlyResponse>) {
+ region_role_state: SettableRegionRoleState,
+ ) -> (WorkerRequest, Receiver<SetRegionRoleStateResponse>) {
let (sender, receiver) = oneshot::channel();
(
- WorkerRequest::SetReadonlyGracefully { region_id, sender },
+ WorkerRequest::SetRegionRoleStateGracefully {
+ region_id,
+ region_role_state,
+ sender,
+ },
receiver,
)
}
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index cd449e53fae6..0bd85747c0f1 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -52,7 +52,7 @@ use rskafka::client::{Client, ClientBuilder};
use rskafka::record::Record;
use rstest_reuse::template;
use store_api::metadata::{ColumnMetadata, RegionMetadataRef};
-use store_api::region_engine::RegionEngine;
+use store_api::region_engine::{RegionEngine, RegionRole};
use store_api::region_request::{
RegionCloseRequest, RegionCreateRequest, RegionDeleteRequest, RegionFlushRequest,
RegionOpenRequest, RegionPutRequest, RegionRequest,
@@ -1114,6 +1114,8 @@ pub async fn reopen_region(
.unwrap();
if writable {
- engine.set_writable(region_id, true).unwrap();
+ engine
+ .set_region_role(region_id, RegionRole::Leader)
+ .unwrap();
}
}
diff --git a/src/mito2/src/test_util/scheduler_util.rs b/src/mito2/src/test_util/scheduler_util.rs
index c1b85279deda..ba777b157fc3 100644
--- a/src/mito2/src/test_util/scheduler_util.rs
+++ b/src/mito2/src/test_util/scheduler_util.rs
@@ -31,7 +31,7 @@ use crate::config::MitoConfig;
use crate::error::Result;
use crate::flush::FlushScheduler;
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
-use crate::region::{ManifestContext, ManifestContextRef, RegionState};
+use crate::region::{ManifestContext, ManifestContextRef, RegionLeaderState, RegionRoleState};
use crate::request::WorkerRequest;
use crate::schedule::scheduler::{Job, LocalScheduler, Scheduler, SchedulerRef};
use crate::sst::index::intermediate::IntermediateManager;
@@ -124,7 +124,7 @@ impl SchedulerEnv {
)
.await
.unwrap(),
- RegionState::Writable,
+ RegionRoleState::Leader(RegionLeaderState::Writable),
))
}
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index c2fbc8098203..e790ed08c1a9 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -39,7 +39,7 @@ use prometheus::IntGauge;
use rand::{thread_rng, Rng};
use snafu::{ensure, ResultExt};
use store_api::logstore::LogStore;
-use store_api::region_engine::SetReadonlyResponse;
+use store_api::region_engine::{SetRegionRoleStateResponse, SettableRegionRoleState};
use store_api::storage::RegionId;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::sync::{mpsc, oneshot, watch, Mutex};
@@ -734,8 +734,13 @@ impl<S: LogStore> RegionWorkerLoop<S> {
// For background notify, we handle it directly.
self.handle_background_notify(region_id, notify).await;
}
- WorkerRequest::SetReadonlyGracefully { region_id, sender } => {
- self.set_readonly_gracefully(region_id, sender).await;
+ WorkerRequest::SetRegionRoleStateGracefully {
+ region_id,
+ region_role_state,
+ sender,
+ } => {
+ self.set_role_state_gracefully(region_id, region_role_state, sender)
+ .await;
}
WorkerRequest::EditRegion(request) => {
self.handle_region_edit(request).await;
@@ -834,22 +839,23 @@ impl<S: LogStore> RegionWorkerLoop<S> {
}
}
- /// Handles `set_readonly_gracefully`.
- async fn set_readonly_gracefully(
+ /// Handles `set_region_role_gracefully`.
+ async fn set_role_state_gracefully(
&mut self,
region_id: RegionId,
- sender: oneshot::Sender<SetReadonlyResponse>,
+ region_role_state: SettableRegionRoleState,
+ sender: oneshot::Sender<SetRegionRoleStateResponse>,
) {
if let Some(region) = self.regions.get_region(region_id) {
// We need to do this in background as we need the manifest lock.
common_runtime::spawn_global(async move {
- region.set_readonly_gracefully().await;
+ region.set_role_state_gracefully(region_role_state).await;
let last_entry_id = region.version_control.current().last_entry_id;
- let _ = sender.send(SetReadonlyResponse::success(Some(last_entry_id)));
+ let _ = sender.send(SetRegionRoleStateResponse::success(Some(last_entry_id)));
});
} else {
- let _ = sender.send(SetReadonlyResponse::NotFound);
+ let _ = sender.send(SetRegionRoleStateResponse::NotFound);
}
}
}
diff --git a/src/mito2/src/worker/handle_catchup.rs b/src/mito2/src/worker/handle_catchup.rs
index 505c994d3607..cacd563ed78e 100644
--- a/src/mito2/src/worker/handle_catchup.rs
+++ b/src/mito2/src/worker/handle_catchup.rs
@@ -20,6 +20,7 @@ use common_telemetry::info;
use common_telemetry::tracing::warn;
use snafu::ensure;
use store_api::logstore::LogStore;
+use store_api::region_engine::RegionRole;
use store_api::region_request::{AffectedRows, RegionCatchupRequest};
use store_api::storage::RegionId;
use tokio::time::Instant;
@@ -47,7 +48,8 @@ impl<S: LogStore> RegionWorkerLoop<S> {
// Utilizes the short circuit evaluation.
let region = if !is_mutable_empty || region.manifest_ctx.has_update().await? {
- info!("Reopening the region: {region_id}, empty mutable: {is_mutable_empty}");
+ let manifest_version = region.manifest_ctx.manifest_version().await;
+ info!("Reopening the region: {region_id}, empty mutable: {is_mutable_empty}, manifest version: {manifest_version}");
let reopened_region = Arc::new(
RegionOpener::new(
region_id,
@@ -112,7 +114,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
}
if request.set_writable {
- region.set_writable(true);
+ region.set_role(RegionRole::Leader);
}
Ok(0)
diff --git a/src/mito2/src/worker/handle_drop.rs b/src/mito2/src/worker/handle_drop.rs
index 51b42acb406f..a569f2236029 100644
--- a/src/mito2/src/worker/handle_drop.rs
+++ b/src/mito2/src/worker/handle_drop.rs
@@ -28,7 +28,7 @@ use store_api::storage::RegionId;
use tokio::time::sleep;
use crate::error::{OpenDalSnafu, Result};
-use crate::region::{RegionMapRef, RegionState};
+use crate::region::{RegionLeaderState, RegionMapRef};
use crate::worker::{RegionWorkerLoop, DROPPING_MARKER_FILE};
const GC_TASK_INTERVAL_SEC: u64 = 5 * 60; // 5 minutes
@@ -62,7 +62,7 @@ where
// Sets the state back to writable. It's possible that the marker file has been written.
// We set the state back to writable so we can retry the drop operation.
- region.switch_state_to_writable(RegionState::Dropping);
+ region.switch_state_to_writable(RegionLeaderState::Dropping);
})?;
region.stop().await;
diff --git a/src/mito2/src/worker/handle_flush.rs b/src/mito2/src/worker/handle_flush.rs
index 14a70225bbe1..b2bc5fd2e865 100644
--- a/src/mito2/src/worker/handle_flush.rs
+++ b/src/mito2/src/worker/handle_flush.rs
@@ -36,16 +36,18 @@ impl<S> RegionWorkerLoop<S> {
request: RegionFlushRequest,
mut sender: OptionOutputTx,
) {
- let Some(region) = self.regions.writable_region_or(region_id, &mut sender) else {
+ let Some(region) = self.regions.flushable_region_or(region_id, &mut sender) else {
return;
};
- let mut task = self.new_flush_task(
- ®ion,
- FlushReason::Manual,
- request.row_group_size,
- self.config.clone(),
- );
+ let reason = if region.is_downgrading() {
+ FlushReason::Downgrading
+ } else {
+ FlushReason::Manual
+ };
+
+ let mut task =
+ self.new_flush_task(®ion, reason, request.row_group_size, self.config.clone());
task.push_sender(sender);
if let Err(e) =
self.flush_scheduler
diff --git a/src/mito2/src/worker/handle_manifest.rs b/src/mito2/src/worker/handle_manifest.rs
index de5f4e563d43..e97b30afec76 100644
--- a/src/mito2/src/worker/handle_manifest.rs
+++ b/src/mito2/src/worker/handle_manifest.rs
@@ -27,7 +27,7 @@ use crate::error::{RegionBusySnafu, RegionNotFoundSnafu, Result};
use crate::manifest::action::{
RegionChange, RegionEdit, RegionMetaAction, RegionMetaActionList, RegionTruncate,
};
-use crate::region::{MitoRegionRef, RegionState};
+use crate::region::{MitoRegionRef, RegionLeaderState, RegionRoleState};
use crate::request::{
BackgroundNotify, OptionOutputTx, RegionChangeResult, RegionEditRequest, RegionEditResult,
TruncateResult, WorkerRequest,
@@ -84,7 +84,7 @@ impl<S> RegionWorkerLoop<S> {
};
if !region.is_writable() {
- if region.state() == RegionState::Editing {
+ if region.state() == RegionRoleState::Leader(RegionLeaderState::Editing) {
self.region_edit_queues
.entry(region_id)
.or_insert_with(|| RegionEditQueue::new(region_id))
@@ -159,7 +159,7 @@ impl<S> RegionWorkerLoop<S> {
}
// Sets the region as writable.
- region.switch_state_to_writable(RegionState::Editing);
+ region.switch_state_to_writable(RegionLeaderState::Editing);
let _ = edit_result.sender.send(edit_result.result);
@@ -199,8 +199,9 @@ impl<S> RegionWorkerLoop<S> {
RegionMetaActionList::with_action(RegionMetaAction::Truncate(truncate.clone()));
let result = manifest_ctx
- .update_manifest(RegionState::Truncating, action_list)
- .await;
+ .update_manifest(RegionLeaderState::Truncating, action_list)
+ .await
+ .map(|_| ());
// Sends the result back to the request sender.
let truncate_result = TruncateResult {
@@ -241,8 +242,9 @@ impl<S> RegionWorkerLoop<S> {
let result = region
.manifest_ctx
- .update_manifest(RegionState::Altering, action_list)
- .await;
+ .update_manifest(RegionLeaderState::Altering, action_list)
+ .await
+ .map(|_| ());
let notify = WorkerRequest::Background {
region_id: region.region_id,
notify: BackgroundNotify::RegionChange(RegionChangeResult {
@@ -291,7 +293,7 @@ impl<S> RegionWorkerLoop<S> {
}
// Sets the region as writable.
- region.switch_state_to_writable(RegionState::Altering);
+ region.switch_state_to_writable(RegionLeaderState::Altering);
change_result.sender.send(change_result.result.map(|_| 0));
}
@@ -338,6 +340,7 @@ async fn edit_region(
let action_list = RegionMetaActionList::with_action(RegionMetaAction::Edit(edit));
region
.manifest_ctx
- .update_manifest(RegionState::Editing, action_list)
+ .update_manifest(RegionLeaderState::Editing, action_list)
.await
+ .map(|_| ())
}
diff --git a/src/mito2/src/worker/handle_truncate.rs b/src/mito2/src/worker/handle_truncate.rs
index da5b74e511f3..863b1961a34f 100644
--- a/src/mito2/src/worker/handle_truncate.rs
+++ b/src/mito2/src/worker/handle_truncate.rs
@@ -20,7 +20,7 @@ use store_api::storage::RegionId;
use crate::error::RegionNotFoundSnafu;
use crate::manifest::action::RegionTruncate;
-use crate::region::RegionState;
+use crate::region::RegionLeaderState;
use crate::request::{OptionOutputTx, TruncateResult};
use crate::worker::RegionWorkerLoop;
@@ -63,7 +63,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
};
// We are already in the worker loop so we can set the state first.
- region.switch_state_to_writable(RegionState::Truncating);
+ region.switch_state_to_writable(RegionLeaderState::Truncating);
match truncate_result.result {
Ok(()) => {
diff --git a/src/operator/src/tests/partition_manager.rs b/src/operator/src/tests/partition_manager.rs
index bc7907903681..2f1ffeffb587 100644
--- a/src/operator/src/tests/partition_manager.rs
+++ b/src/operator/src/tests/partition_manager.rs
@@ -142,7 +142,7 @@ pub(crate) async fn create_partition_rule_manager(
},
leader_peer: Some(Peer::new(3, "")),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
@@ -173,7 +173,7 @@ pub(crate) async fn create_partition_rule_manager(
},
leader_peer: Some(Peer::new(2, "")),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
RegionRoute {
@@ -196,7 +196,7 @@ pub(crate) async fn create_partition_rule_manager(
},
leader_peer: Some(Peer::new(1, "")),
follower_peers: vec![],
- leader_status: None,
+ leader_state: None,
leader_down_since: None,
},
]),
diff --git a/src/query/src/optimizer/test_util.rs b/src/query/src/optimizer/test_util.rs
index 6b14a5af52e4..58e014affff7 100644
--- a/src/query/src/optimizer/test_util.rs
+++ b/src/query/src/optimizer/test_util.rs
@@ -28,7 +28,8 @@ use store_api::metadata::{
ColumnMetadata, RegionMetadata, RegionMetadataBuilder, RegionMetadataRef,
};
use store_api::region_engine::{
- RegionEngine, RegionRole, RegionScannerRef, RegionStatistic, SetReadonlyResponse,
+ RegionEngine, RegionRole, RegionScannerRef, RegionStatistic, SetRegionRoleStateResponse,
+ SettableRegionRoleState,
};
use store_api::region_request::RegionRequest;
use store_api::storage::{ConcreteDataType, RegionId, ScanRequest};
@@ -89,14 +90,15 @@ impl RegionEngine for MetaRegionEngine {
Ok(())
}
- fn set_writable(&self, _region_id: RegionId, _writable: bool) -> Result<(), BoxedError> {
+ fn set_region_role(&self, _region_id: RegionId, _role: RegionRole) -> Result<(), BoxedError> {
unimplemented!()
}
- async fn set_readonly_gracefully(
+ async fn set_region_role_state_gracefully(
&self,
_region_id: RegionId,
- ) -> Result<SetReadonlyResponse, BoxedError> {
+ _region_role_state: SettableRegionRoleState,
+ ) -> Result<SetRegionRoleStateResponse, BoxedError> {
unimplemented!()
}
diff --git a/src/store-api/src/region_engine.rs b/src/store-api/src/region_engine.rs
index 483d3cc1adbe..850b9ad3e2d6 100644
--- a/src/store-api/src/region_engine.rs
+++ b/src/store-api/src/region_engine.rs
@@ -36,9 +36,32 @@ use crate::metadata::RegionMetadataRef;
use crate::region_request::{RegionOpenRequest, RegionRequest};
use crate::storage::{RegionId, ScanRequest};
-/// The result of setting readonly for the region.
+/// The settable region role state.
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
+pub enum SettableRegionRoleState {
+ Follower,
+ DowngradingLeader,
+}
+
+impl From<SettableRegionRoleState> for RegionRole {
+ fn from(value: SettableRegionRoleState) -> Self {
+ match value {
+ SettableRegionRoleState::Follower => RegionRole::Follower,
+ SettableRegionRoleState::DowngradingLeader => RegionRole::DowngradingLeader,
+ }
+ }
+}
+
+/// The request to set region role state.
+#[derive(Debug, PartialEq, Eq)]
+pub struct SetRegionRoleStateRequest {
+ region_id: RegionId,
+ region_role_state: SettableRegionRoleState,
+}
+
+/// The response of setting region role state.
#[derive(Debug, PartialEq, Eq)]
-pub enum SetReadonlyResponse {
+pub enum SetRegionRoleStateResponse {
Success {
/// Returns `last_entry_id` of the region if available(e.g., It's not available in file engine).
last_entry_id: Option<entry::Id>,
@@ -46,8 +69,8 @@ pub enum SetReadonlyResponse {
NotFound,
}
-impl SetReadonlyResponse {
- /// Returns a [SetReadonlyResponse::Success] with the `last_entry_id`.
+impl SetRegionRoleStateResponse {
+ /// Returns a [SetRegionRoleStateResponse::Success] with the `last_entry_id`.
pub fn success(last_entry_id: Option<entry::Id>) -> Self {
Self::Success { last_entry_id }
}
@@ -58,6 +81,7 @@ pub struct GrantedRegion {
pub region_id: RegionId,
pub region_role: RegionRole,
}
+
impl GrantedRegion {
pub fn new(region_id: RegionId, region_role: RegionRole) -> Self {
Self {
@@ -85,12 +109,18 @@ impl From<PbGrantedRegion> for GrantedRegion {
}
}
+/// The role of the region.
+/// TODO(weny): rename it to `RegionRoleState`
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum RegionRole {
// Readonly region(mito2)
Follower,
// Writable region(mito2), Readonly region(file).
Leader,
+ // Leader is downgrading to follower.
+ //
+ // This state is used to prevent new write requests.
+ DowngradingLeader,
}
impl Display for RegionRole {
@@ -98,6 +128,7 @@ impl Display for RegionRole {
match self {
RegionRole::Follower => write!(f, "Follower"),
RegionRole::Leader => write!(f, "Leader"),
+ RegionRole::DowngradingLeader => write!(f, "Leader(Downgrading)"),
}
}
}
@@ -113,6 +144,7 @@ impl From<RegionRole> for PbRegionRole {
match value {
RegionRole::Follower => PbRegionRole::Follower,
RegionRole::Leader => PbRegionRole::Leader,
+ RegionRole::DowngradingLeader => PbRegionRole::DowngradingLeader,
}
}
}
@@ -122,6 +154,7 @@ impl From<PbRegionRole> for RegionRole {
match value {
PbRegionRole::Leader => RegionRole::Leader,
PbRegionRole::Follower => RegionRole::Follower,
+ PbRegionRole::DowngradingLeader => RegionRole::DowngradingLeader,
}
}
}
@@ -331,20 +364,21 @@ pub trait RegionEngine: Send + Sync {
/// Stops the engine
async fn stop(&self) -> Result<(), BoxedError>;
- /// Sets writable mode for a region.
+ /// Sets [RegionRole] for a region.
///
/// The engine checks whether the region is writable before writing to the region. Setting
/// the region as readonly doesn't guarantee that write operations in progress will not
/// take effect.
- fn set_writable(&self, region_id: RegionId, writable: bool) -> Result<(), BoxedError>;
+ fn set_region_role(&self, region_id: RegionId, role: RegionRole) -> Result<(), BoxedError>;
- /// Sets readonly for a region gracefully.
+ /// Sets region role state gracefully.
///
/// After the call returns, the engine ensures no more write operations will succeed in the region.
- async fn set_readonly_gracefully(
+ async fn set_region_role_state_gracefully(
&self,
region_id: RegionId,
- ) -> Result<SetReadonlyResponse, BoxedError>;
+ region_role_state: SettableRegionRoleState,
+ ) -> Result<SetRegionRoleStateResponse, BoxedError>;
/// Indicates region role.
///
|
feat
|
support to reject write after flushing (#4759)
|
a8f2e4468d928e775d62301f593c483809fe3e17
|
2023-08-15 13:52:46
|
Niwaka
|
feat: handle multiple grpc deletes (#2150)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 77c54cd39222..1fbceaba80a0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -64,9 +64,9 @@ dependencies = [
[[package]]
name = "aho-corasick"
-version = "1.0.2"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
+checksum = "86b8f9420f797f2d9e935edf629310eb938a0d839f984e25327f3c7eed22300c"
dependencies = [
"memchr",
]
@@ -183,9 +183,9 @@ dependencies = [
[[package]]
name = "anstyle-wincon"
-version = "1.0.1"
+version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188"
+checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c"
dependencies = [
"anstyle",
"windows-sys 0.48.0",
@@ -469,7 +469,7 @@ version = "43.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bebcb57eef570b15afbcf2d07d813eb476fde9f6dd69c81004d6476c197e87e"
dependencies = [
- "bitflags 2.3.3",
+ "bitflags 2.4.0",
"serde",
]
@@ -589,9 +589,9 @@ dependencies = [
[[package]]
name = "async-lock"
-version = "2.7.0"
+version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7"
+checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b"
dependencies = [
"event-listener",
]
@@ -631,9 +631,9 @@ dependencies = [
[[package]]
name = "async-trait"
-version = "0.1.72"
+version = "0.1.73"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09"
+checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0"
dependencies = [
"proc-macro2",
"quote",
@@ -712,9 +712,9 @@ dependencies = [
[[package]]
name = "axum"
-version = "0.6.19"
+version = "0.6.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a6a1de45611fdb535bfde7b7de4fd54f4fd2b17b1737c0a59b69bf9b92074b8c"
+checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"
dependencies = [
"async-trait",
"axum-core",
@@ -849,7 +849,7 @@ name = "benchmarks"
version = "0.3.2"
dependencies = [
"arrow",
- "clap 4.3.19",
+ "clap 4.3.21",
"client",
"indicatif",
"itertools 0.10.5",
@@ -903,7 +903,7 @@ version = "0.66.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7"
dependencies = [
- "bitflags 2.3.3",
+ "bitflags 2.4.0",
"cexpr",
"clang-sys",
"lazy_static",
@@ -940,9 +940,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
-version = "2.3.3"
+version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42"
+checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
[[package]]
name = "bitvec"
@@ -1072,7 +1072,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05"
dependencies = [
"memchr",
- "regex-automata 0.3.4",
+ "regex-automata 0.3.6",
"serde",
]
@@ -1204,7 +1204,7 @@ checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa"
dependencies = [
"camino",
"cargo-platform",
- "semver 1.0.18",
+ "semver",
"serde",
"serde_json",
]
@@ -1256,7 +1256,7 @@ dependencies = [
"meta-client",
"metrics",
"mito",
- "moka 0.11.2",
+ "moka 0.11.3",
"object-store",
"parking_lot 0.12.1",
"regex",
@@ -1272,11 +1272,12 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.0.79"
+version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
+checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01"
dependencies = [
"jobserver",
+ "libc",
]
[[package]]
@@ -1446,9 +1447,9 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.3.19"
+version = "4.3.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5fd304a20bff958a57f04c4e96a2e7594cc4490a0e809cbd48bb6437edaa452d"
+checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd"
dependencies = [
"clap_builder",
"clap_derive 4.3.12",
@@ -1457,9 +1458,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.3.19"
+version = "4.3.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01c6a3f08f1fe5662a35cfe393aec09c4df95f60ee93b7556505260f75eee9e1"
+checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa"
dependencies = [
"anstream",
"anstyle",
@@ -1530,7 +1531,7 @@ dependencies = [
"derive-new",
"enum_dispatch",
"futures-util",
- "moka 0.9.8",
+ "moka 0.9.9",
"parking_lot 0.12.1",
"prost",
"rand",
@@ -2070,9 +2071,9 @@ checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3"
[[package]]
name = "const-oid"
-version = "0.9.4"
+version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747"
+checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f"
[[package]]
name = "const-random"
@@ -2521,7 +2522,7 @@ dependencies = [
"lazy_static",
"sqlparser 0.35.0",
"strum 0.25.0",
- "strum_macros 0.25.1",
+ "strum_macros 0.25.2",
]
[[package]]
@@ -2612,7 +2613,7 @@ dependencies = [
"object_store",
"prost",
"prost-types",
- "substrait 0.12.3",
+ "substrait 0.12.4",
"tokio",
]
@@ -2731,20 +2732,20 @@ dependencies = [
[[package]]
name = "der"
-version = "0.7.7"
+version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946"
+checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c"
dependencies = [
- "const-oid 0.9.4",
+ "const-oid 0.9.5",
"pem-rfc7468 0.7.0",
"zeroize",
]
[[package]]
name = "deranged"
-version = "0.3.6"
+version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8810e7e2cf385b1e9b50d68264908ec367ba642c96d02edfe61c39e88e2a3c01"
+checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929"
[[package]]
name = "derive-new"
@@ -2832,7 +2833,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
- "const-oid 0.9.4",
+ "const-oid 0.9.5",
"crypto-common",
"subtle",
]
@@ -3142,7 +3143,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5"
dependencies = [
"cfg-if 1.0.0",
- "rustix 0.38.4",
+ "rustix 0.38.8",
"windows-sys 0.48.0",
]
@@ -3175,13 +3176,13 @@ dependencies = [
[[package]]
name = "filetime"
-version = "0.2.21"
+version = "0.2.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153"
+checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0"
dependencies = [
"cfg-if 1.0.0",
"libc",
- "redox_syscall 0.2.16",
+ "redox_syscall 0.3.5",
"windows-sys 0.48.0",
]
@@ -3216,7 +3217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dac53e22462d78c16d64a1cd22371b54cc3fe94aa15e7886a2fa6e5d1ab8640"
dependencies = [
"bitflags 1.3.2",
- "rustc_version 0.4.0",
+ "rustc_version",
]
[[package]]
@@ -3296,7 +3297,7 @@ dependencies = [
"meter-macros",
"metrics",
"mito",
- "moka 0.9.8",
+ "moka 0.9.9",
"object-store",
"openmetrics-parser",
"opentelemetry-proto",
@@ -3744,7 +3745,7 @@ dependencies = [
"bstr 1.6.0",
"itoa",
"thiserror",
- "time 0.3.24",
+ "time 0.3.25",
]
[[package]]
@@ -4135,7 +4136,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=9b68af55c050a010f202fcccb22d58f080f0a868#9b68af55c050a010f202fcccb22d58f080f0a868"
+source = "git+https://github.com/NiwakaDev/greptime-proto.git?rev=ec402b6500f908a0acfab6c889225cd4dc2228a4#ec402b6500f908a0acfab6c889225cd4dc2228a4"
dependencies = [
"prost",
"serde",
@@ -4374,9 +4375,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904"
[[package]]
name = "httpdate"
-version = "1.0.2"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"
+checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "humantime"
@@ -4427,7 +4428,7 @@ dependencies = [
"futures-util",
"http",
"hyper",
- "rustls 0.21.5",
+ "rustls 0.21.6",
"tokio",
"tokio-rustls 0.24.1",
]
@@ -4541,9 +4542,9 @@ dependencies = [
[[package]]
name = "indicatif"
-version = "0.17.5"
+version = "0.17.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ff8cc23a7393a397ed1d7f56e6365cba772aba9f9912ab968b03043c395d057"
+checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730"
dependencies = [
"console",
"instant",
@@ -4664,7 +4665,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
dependencies = [
"hermit-abi 0.3.2",
- "rustix 0.38.4",
+ "rustix 0.38.8",
"windows-sys 0.48.0",
]
@@ -4976,9 +4977,9 @@ dependencies = [
[[package]]
name = "log"
-version = "0.4.19"
+version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
+checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "log-store"
@@ -5174,9 +5175,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
[[package]]
name = "matchit"
-version = "0.7.1"
+version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "67827e6ea8ee8a7c4a72227ef4fc08957040acffdb5f122733b24fa12daff41b"
+checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef"
[[package]]
name = "matrixmultiply"
@@ -5563,9 +5564,9 @@ dependencies = [
[[package]]
name = "moka"
-version = "0.9.8"
+version = "0.9.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "19ca9b167ed904bc89a2f64c4be5014615c26fd9c4ddd2042c6094744c7df11a"
+checksum = "b28455ac4363046076054a7e9cfbd7f168019c29dba32a625f59fc0aeffaaea4"
dependencies = [
"async-io",
"async-lock",
@@ -5577,7 +5578,7 @@ dependencies = [
"once_cell",
"parking_lot 0.12.1",
"quanta 0.11.1",
- "rustc_version 0.4.0",
+ "rustc_version",
"scheduled-thread-pool",
"skeptic",
"smallvec",
@@ -5589,9 +5590,9 @@ dependencies = [
[[package]]
name = "moka"
-version = "0.11.2"
+version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "206bf83f415b0579fd885fe0804eb828e727636657dc1bf73d80d2f1218e14a1"
+checksum = "fa6e72583bf6830c956235bff0d5afec8cf2952f579ebad18ae7821a917d950f"
dependencies = [
"async-io",
"async-lock",
@@ -5602,7 +5603,7 @@ dependencies = [
"once_cell",
"parking_lot 0.12.1",
"quanta 0.11.1",
- "rustc_version 0.4.0",
+ "rustc_version",
"scheduled-thread-pool",
"skeptic",
"smallvec",
@@ -5665,7 +5666,7 @@ dependencies = [
"percent-encoding",
"pin-project",
"priority-queue",
- "rustls 0.21.5",
+ "rustls 0.21.6",
"rustls-pemfile",
"serde",
"serde_json",
@@ -5689,7 +5690,7 @@ dependencies = [
"base64 0.21.2",
"bigdecimal",
"bindgen 0.66.1",
- "bitflags 2.3.3",
+ "bitflags 2.4.0",
"bitvec",
"byteorder",
"bytes",
@@ -5715,7 +5716,7 @@ dependencies = [
"smallvec",
"subprocess",
"thiserror",
- "time 0.3.24",
+ "time 0.3.25",
"uuid",
]
@@ -5868,9 +5869,9 @@ dependencies = [
[[package]]
name = "num-complex"
-version = "0.4.3"
+version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d"
+checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214"
dependencies = [
"num-traits",
]
@@ -6452,7 +6453,7 @@ dependencies = [
"datafusion-expr",
"datatypes",
"meta-client",
- "moka 0.9.8",
+ "moka 0.9.9",
"serde",
"serde_json",
"snafu",
@@ -6523,9 +6524,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
[[package]]
name = "pest"
-version = "2.7.1"
+version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d2d1d55045829d65aad9d389139882ad623b33b904e7c9f1b10c5b8927298e5"
+checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a"
dependencies = [
"thiserror",
"ucd-trie",
@@ -6533,9 +6534,9 @@ dependencies = [
[[package]]
name = "pest_derive"
-version = "2.7.1"
+version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f94bca7e7a599d89dea5dfa309e217e7906c3c007fb9c3299c40b10d6a315d3"
+checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853"
dependencies = [
"pest",
"pest_generator",
@@ -6543,9 +6544,9 @@ dependencies = [
[[package]]
name = "pest_generator"
-version = "2.7.1"
+version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "99d490fe7e8556575ff6911e45567ab95e71617f43781e5c05490dc8d75c965c"
+checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929"
dependencies = [
"pest",
"pest_meta",
@@ -6556,9 +6557,9 @@ dependencies = [
[[package]]
name = "pest_meta"
-version = "2.7.1"
+version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2674c66ebb4b4d9036012091b537aae5878970d6999f81a265034d85b136b341"
+checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48"
dependencies = [
"once_cell",
"pest",
@@ -6596,7 +6597,7 @@ dependencies = [
"ring",
"stringprep",
"thiserror",
- "time 0.3.24",
+ "time 0.3.25",
"tokio",
"tokio-rustls 0.24.1",
"tokio-util",
@@ -6653,18 +6654,18 @@ dependencies = [
[[package]]
name = "pin-project"
-version = "1.1.2"
+version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842"
+checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
-version = "1.1.2"
+version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c"
+checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405"
dependencies = [
"proc-macro2",
"quote",
@@ -6673,9 +6674,9 @@ dependencies = [
[[package]]
name = "pin-project-lite"
-version = "0.2.10"
+version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57"
+checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05"
[[package]]
name = "pin-utils"
@@ -6700,7 +6701,7 @@ version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
dependencies = [
- "der 0.7.7",
+ "der 0.7.8",
"pkcs8 0.10.2",
"spki 0.7.2",
]
@@ -6722,7 +6723,7 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
dependencies = [
- "der 0.7.7",
+ "der 0.7.8",
"spki 0.7.2",
]
@@ -7185,9 +7186,9 @@ checksum = "3b7e158a385023d209d6d5f2585c4b468f6dcb3dd5aca9b75c4f1678c05bb375"
[[package]]
name = "pyo3"
-version = "0.19.1"
+version = "0.19.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffb88ae05f306b4bfcde40ac4a51dc0b05936a9207a4b75b798c7729c4258a59"
+checksum = "e681a6cfdc4adcc93b4d3cf993749a4552018ee0a9b65fc0ccfad74352c72a38"
dependencies = [
"cfg-if 1.0.0",
"indoc",
@@ -7202,9 +7203,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
-version = "0.19.1"
+version = "0.19.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "554db24f0b3c180a9c0b1268f91287ab3f17c162e15b54caaae5a6b3773396b0"
+checksum = "076c73d0bc438f7a4ef6fdd0c3bb4732149136abd952b110ac93e4edb13a6ba5"
dependencies = [
"once_cell",
"target-lexicon",
@@ -7212,9 +7213,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
-version = "0.19.1"
+version = "0.19.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "922ede8759e8600ad4da3195ae41259654b9c55da4f7eec84a0ccc7d067a70a4"
+checksum = "e53cee42e77ebe256066ba8aa77eff722b3bb91f3419177cf4cd0f304d3284d9"
dependencies = [
"libc",
"pyo3-build-config",
@@ -7222,9 +7223,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
-version = "0.19.1"
+version = "0.19.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a5caec6a1dd355964a841fcbeeb1b89fe4146c87295573f94228911af3cc5a2"
+checksum = "dfeb4c99597e136528c6dd7d5e3de5434d1ceaf487436a3f03b2d56b6fc9efd1"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@@ -7234,9 +7235,9 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
-version = "0.19.1"
+version = "0.19.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e0b78ccbb160db1556cdb6fd96c50334c5d4ec44dc5e0a968d0a1208fa0efa8b"
+checksum = "947dc12175c254889edc0c02e399476c2f652b4b9ebd123aa655c224de259536"
dependencies = [
"proc-macro2",
"quote",
@@ -7536,13 +7537,13 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.9.1"
+version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575"
+checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a"
dependencies = [
- "aho-corasick 1.0.2",
+ "aho-corasick 1.0.3",
"memchr",
- "regex-automata 0.3.4",
+ "regex-automata 0.3.6",
"regex-syntax 0.7.4",
]
@@ -7557,11 +7558,11 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.3.4"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294"
+checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69"
dependencies = [
- "aho-corasick 1.0.2",
+ "aho-corasick 1.0.3",
"memchr",
"regex-syntax 0.7.4",
]
@@ -7662,7 +7663,7 @@ dependencies = [
"once_cell",
"percent-encoding",
"pin-project-lite",
- "rustls 0.21.5",
+ "rustls 0.21.6",
"rustls-native-certs",
"rustls-pemfile",
"serde",
@@ -7847,7 +7848,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ab43bb47d23c1a631b4b680199a45255dce26fa9ab2fa902581f624ff13e6a8"
dependencies = [
"byteorder",
- "const-oid 0.9.4",
+ "const-oid 0.9.5",
"digest",
"num-bigint-dig",
"num-integer",
@@ -7871,7 +7872,7 @@ dependencies = [
"futures",
"futures-timer",
"rstest_macros",
- "rustc_version 0.4.0",
+ "rustc_version",
]
[[package]]
@@ -7883,7 +7884,7 @@ dependencies = [
"cfg-if 1.0.0",
"proc-macro2",
"quote",
- "rustc_version 0.4.0",
+ "rustc_version",
"syn 1.0.109",
"unicode-ident",
]
@@ -7896,7 +7897,7 @@ checksum = "45f80dcc84beab3a327bbe161f77db25f336a1452428176787c8c79ac79d7073"
dependencies = [
"quote",
"rand",
- "rustc_version 0.4.0",
+ "rustc_version",
"syn 1.0.109",
]
@@ -7983,22 +7984,13 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
-[[package]]
-name = "rustc_version"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee"
-dependencies = [
- "semver 0.11.0",
-]
-
[[package]]
name = "rustc_version"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
- "semver 1.0.18",
+ "semver",
]
[[package]]
@@ -8031,11 +8023,11 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.38.4"
+version = "0.38.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5"
+checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f"
dependencies = [
- "bitflags 2.3.3",
+ "bitflags 2.4.0",
"errno 0.3.2",
"libc",
"linux-raw-sys 0.4.5",
@@ -8056,13 +8048,13 @@ dependencies = [
[[package]]
name = "rustls"
-version = "0.21.5"
+version = "0.21.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36"
+checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb"
dependencies = [
"log",
"ring",
- "rustls-webpki 0.101.2",
+ "rustls-webpki 0.101.3",
"sct",
]
@@ -8099,9 +8091,9 @@ dependencies = [
[[package]]
name = "rustls-webpki"
-version = "0.101.2"
+version = "0.101.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59"
+checksum = "261e9e0888cba427c3316e6322805653c9425240b6fd96cee7cb671ab70ab8d0"
dependencies = [
"ring",
"untrusted",
@@ -8359,7 +8351,7 @@ dependencies = [
"paste",
"rand",
"result-like",
- "rustc_version 0.4.0",
+ "rustc_version",
"rustpython-ast",
"rustpython-codegen",
"rustpython-common",
@@ -8676,15 +8668,6 @@ dependencies = [
"libc",
]
-[[package]]
-name = "semver"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6"
-dependencies = [
- "semver-parser",
-]
-
[[package]]
name = "semver"
version = "1.0.18"
@@ -8694,15 +8677,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "semver-parser"
-version = "0.10.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7"
-dependencies = [
- "pest",
-]
-
[[package]]
name = "seq-macro"
version = "0.3.5"
@@ -8711,9 +8685,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
[[package]]
name = "serde"
-version = "1.0.180"
+version = "1.0.183"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ea67f183f058fe88a4e3ec6e2788e003840893b91bac4559cabedd00863b3ed"
+checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c"
dependencies = [
"serde_derive",
]
@@ -8730,9 +8704,9 @@ dependencies = [
[[package]]
name = "serde_derive"
-version = "1.0.180"
+version = "1.0.183"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24e744d7782b686ab3b73267ef05697159cc0e5abbed3f47f9933165e5219036"
+checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816"
dependencies = [
"proc-macro2",
"quote",
@@ -8904,7 +8878,7 @@ dependencies = [
"rand",
"regex",
"rust-embed",
- "rustls 0.21.5",
+ "rustls 0.21.6",
"rustls-pemfile",
"schemars",
"script",
@@ -9065,7 +9039,7 @@ dependencies = [
"num-bigint",
"num-traits",
"thiserror",
- "time 0.3.24",
+ "time 0.3.25",
]
[[package]]
@@ -9205,7 +9179,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a"
dependencies = [
"base64ct",
- "der 0.7.7",
+ "der 0.7.8",
]
[[package]]
@@ -9619,7 +9593,7 @@ version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
dependencies = [
- "strum_macros 0.25.1",
+ "strum_macros 0.25.2",
]
[[package]]
@@ -9649,9 +9623,9 @@ dependencies = [
[[package]]
name = "strum_macros"
-version = "0.25.1"
+version = "0.25.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6069ca09d878a33f883cc06aaa9718ede171841d3832450354410b718b097232"
+checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059"
dependencies = [
"heck 0.4.1",
"proc-macro2",
@@ -9691,7 +9665,7 @@ dependencies = [
"prost",
"session",
"snafu",
- "substrait 0.12.3",
+ "substrait 0.12.4",
"table",
"tokio",
]
@@ -9709,7 +9683,7 @@ dependencies = [
"prost-build",
"prost-types",
"schemars",
- "semver 1.0.18",
+ "semver",
"serde",
"serde_json",
"serde_yaml",
@@ -9720,9 +9694,9 @@ dependencies = [
[[package]]
name = "substrait"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2ac1ce8315086b127ca0abf162c62279550942bb26ebf7946fe17fe114446472"
+checksum = "658f6cbbd29a250869b87e1bb5a4b42db534cacfc1c03284f2536cd36b6c1617"
dependencies = [
"git2",
"heck 0.4.1",
@@ -9731,7 +9705,7 @@ dependencies = [
"prost-build",
"prost-types",
"schemars",
- "semver 1.0.18",
+ "semver",
"serde",
"serde_json",
"serde_yaml",
@@ -9915,14 +9889,14 @@ dependencies = [
[[package]]
name = "tempfile"
-version = "3.7.0"
+version = "3.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998"
+checksum = "dc02fddf48964c42031a0b3fe0428320ecf3a73c401040fc0096f97794310651"
dependencies = [
"cfg-if 1.0.0",
"fastrand 2.0.0",
"redox_syscall 0.3.5",
- "rustix 0.38.4",
+ "rustix 0.38.8",
"windows-sys 0.48.0",
]
@@ -10156,9 +10130,9 @@ dependencies = [
[[package]]
name = "time"
-version = "0.3.24"
+version = "0.3.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b79eabcd964882a646b3584543ccabeae7869e9ac32a46f6f22b7a5bd405308b"
+checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea"
dependencies = [
"deranged",
"itoa",
@@ -10226,11 +10200,10 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.29.1"
+version = "1.31.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da"
+checksum = "40de3a2ba249dcb097e01be5e67a5ff53cf250397715a071a81543e8a832a920"
dependencies = [
- "autocfg",
"backtrace",
"bytes",
"libc",
@@ -10239,7 +10212,7 @@ dependencies = [
"parking_lot 0.12.1",
"pin-project-lite",
"signal-hook-registry",
- "socket2 0.4.9",
+ "socket2 0.5.3",
"tokio-macros",
"tracing",
"windows-sys 0.48.0",
@@ -10298,7 +10271,7 @@ checksum = "dd5831152cb0d3f79ef5523b357319ba154795d64c7078b2daa95a803b54057f"
dependencies = [
"futures",
"ring",
- "rustls 0.21.5",
+ "rustls 0.21.6",
"tokio",
"tokio-postgres",
"tokio-rustls 0.24.1",
@@ -10321,7 +10294,7 @@ version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
dependencies = [
- "rustls 0.21.5",
+ "rustls 0.21.6",
"tokio",
]
@@ -10579,7 +10552,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
dependencies = [
"crossbeam-channel",
- "time 0.3.24",
+ "time 0.3.25",
"tracing-subscriber",
]
@@ -10605,7 +10578,7 @@ dependencies = [
"log",
"serde",
"serde_json",
- "time 0.3.24",
+ "time 0.3.25",
"tracing",
"tracing-core",
"tracing-log",
@@ -11142,7 +11115,7 @@ dependencies = [
"getset",
"rustversion",
"thiserror",
- "time 0.3.24",
+ "time 0.3.25",
]
[[package]]
@@ -11153,12 +11126,12 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "vob"
-version = "3.0.2"
+version = "3.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cbdb3eee5dd38a27129832bca4a3171888e699a6ac36de86547975466997986f"
+checksum = "c058f4c41e71a043c67744cb76dcc1ae63ece328c1732a72489ccccc2dec23e6"
dependencies = [
"num-traits",
- "rustc_version 0.3.3",
+ "rustc_version",
"serde",
]
@@ -11591,9 +11564,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
[[package]]
name = "winnow"
-version = "0.5.2"
+version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bd122eb777186e60c3fdf765a58ac76e41c582f1f535fbf3314434c6b58f3f7"
+checksum = "5504cc7644f4b593cbc05c4a55bf9bd4e94b867c3c0bd440934174d50482427d"
dependencies = [
"memchr",
]
@@ -11625,7 +11598,7 @@ dependencies = [
"bcder",
"bytes",
"chrono",
- "der 0.7.7",
+ "der 0.7.8",
"hex",
"pem 2.0.1",
"ring",
diff --git a/Cargo.toml b/Cargo.toml
index 6cc91e9c2f6f..7e5f5268d449 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -77,7 +77,7 @@ datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git
derive_builder = "0.12"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "9b68af55c050a010f202fcccb22d58f080f0a868" }
+greptime-proto = { git = "https://github.com/NiwakaDev/greptime-proto.git", rev = "ec402b6500f908a0acfab6c889225cd4dc2228a4" }
itertools = "0.10"
lazy_static = "1.4"
once_cell = "1.18"
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index 98107438a69a..c894a4c72350 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -300,9 +300,9 @@ pub fn request_type(request: &Request) -> &'static str {
Request::Inserts(_) => "inserts",
Request::Query(query_req) => query_request_type(query_req),
Request::Ddl(ddl_req) => ddl_request_type(ddl_req),
- Request::Delete(_) => "delete",
Request::RowInserts(_) => "row_inserts",
Request::RowDelete(_) => "row_delete",
+ Request::Deletes(_) => "deletes",
}
}
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 80452a0cfd6c..e0c702228227 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -17,7 +17,7 @@ use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{
- AlterExpr, AuthHeader, CompactTableExpr, CreateTableExpr, DdlRequest, DeleteRequest,
+ AlterExpr, AuthHeader, CompactTableExpr, CreateTableExpr, DdlRequest, DeleteRequests,
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery, QueryRequest,
RequestHeader, TruncateTableExpr,
};
@@ -132,9 +132,9 @@ impl Database {
Ok(stream_inserter)
}
- pub async fn delete(&self, request: DeleteRequest) -> Result<u32> {
+ pub async fn delete(&self, request: DeleteRequests) -> Result<u32> {
let _timer = timer!(metrics::METRIC_GRPC_DELETE);
- self.handle(Request::Delete(request)).await
+ self.handle(Request::Deletes(request)).await
}
async fn handle(&self, request: Request) -> Result<u32> {
diff --git a/src/common/grpc-expr/src/delete.rs b/src/common/grpc-expr/src/delete.rs
index cd228857edf5..18a480e8a4c3 100644
--- a/src/common/grpc-expr/src/delete.rs
+++ b/src/common/grpc-expr/src/delete.rs
@@ -23,7 +23,11 @@ use table::requests::DeleteRequest;
use crate::error::{ColumnDataTypeSnafu, IllegalDeleteRequestSnafu, Result};
use crate::insert::add_values_to_builder;
-pub fn to_table_delete_request(request: GrpcDeleteRequest) -> Result<DeleteRequest> {
+pub fn to_table_delete_request(
+ catalog_name: &str,
+ schema_name: &str,
+ request: GrpcDeleteRequest,
+) -> Result<DeleteRequest> {
let row_count = request.row_count as usize;
let mut key_column_values = HashMap::with_capacity(request.key_columns.len());
@@ -52,7 +56,12 @@ pub fn to_table_delete_request(request: GrpcDeleteRequest) -> Result<DeleteReque
);
}
- Ok(DeleteRequest { key_column_values })
+ Ok(DeleteRequest {
+ catalog_name: catalog_name.to_string(),
+ schema_name: schema_name.to_string(),
+ table_name: request.table_name,
+ key_column_values,
+ })
}
#[cfg(test)]
@@ -94,8 +103,12 @@ mod tests {
row_count: 3,
};
- let mut request = to_table_delete_request(grpc_request).unwrap();
+ let mut request =
+ to_table_delete_request("foo_catalog", "foo_schema", grpc_request).unwrap();
+ assert_eq!(request.catalog_name, "foo_catalog");
+ assert_eq!(request.schema_name, "foo_schema");
+ assert_eq!(request.table_name, "foo");
assert_eq!(
Arc::new(Int32Vector::from_slice(vec![1, 2, 3])) as VectorRef,
request.key_column_values.remove("id").unwrap()
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index 0b8d3f0a4697..f8dcff589175 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -18,7 +18,7 @@ use std::sync::Arc;
use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
-use api::v1::{CreateDatabaseExpr, DdlRequest, DeleteRequest, InsertRequests};
+use api::v1::{CreateDatabaseExpr, DdlRequest, DeleteRequests, InsertRequests};
use async_trait::async_trait;
use catalog::CatalogManagerRef;
use common_grpc_expr::insert::to_table_insert_request;
@@ -164,27 +164,38 @@ impl Instance {
Ok(Output::AffectedRows(affected_rows))
}
- async fn handle_delete(&self, request: DeleteRequest, ctx: QueryContextRef) -> Result<Output> {
- let catalog = ctx.current_catalog();
- let schema = ctx.current_schema();
- let table_name = &request.table_name.clone();
- let table_ref = TableReference::full(catalog, schema, table_name);
-
- let table = self
- .catalog_manager
- .table(catalog, schema, table_name)
- .await
- .context(CatalogSnafu)?
- .with_context(|| TableNotFoundSnafu {
- table_name: table_ref.to_string(),
- })?;
+ async fn handle_deletes(
+ &self,
+ request: DeleteRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ let results = future::try_join_all(request.deletes.into_iter().map(|delete| {
+ let catalog_manager = self.catalog_manager.clone();
+ let catalog = ctx.current_catalog().to_string();
+ let schema = ctx.current_schema().to_string();
+ common_runtime::spawn_write(async move {
+ let table_name = delete.table_name.clone();
+ let table_ref = TableReference::full(&catalog, &schema, &table_name);
+ let table = catalog_manager
+ .table(&catalog, &schema, &table_name)
+ .await
+ .context(CatalogSnafu)?
+ .with_context(|| TableNotFoundSnafu {
+ table_name: table_ref.to_string(),
+ })?;
- let request = common_grpc_expr::delete::to_table_delete_request(request)
- .context(DeleteExprToRequestSnafu)?;
+ let request =
+ common_grpc_expr::delete::to_table_delete_request(&catalog, &schema, delete)
+ .context(DeleteExprToRequestSnafu)?;
- let affected_rows = table.delete(request).await.with_context(|_| DeleteSnafu {
- table_name: table_ref.to_string(),
- })?;
+ table.delete(request).await.with_context(|_| DeleteSnafu {
+ table_name: table_ref.to_string(),
+ })
+ })
+ }))
+ .await
+ .context(JoinTaskSnafu)?;
+ let affected_rows = results.into_iter().sum::<Result<usize>>()?;
Ok(Output::AffectedRows(affected_rows))
}
@@ -211,7 +222,7 @@ impl GrpcQueryHandler for Instance {
async fn do_query(&self, request: Request, ctx: QueryContextRef) -> Result<Output> {
match request {
Request::Inserts(requests) => self.handle_inserts(requests, &ctx).await,
- Request::Delete(request) => self.handle_delete(request, ctx).await,
+ Request::Deletes(request) => self.handle_deletes(request, ctx).await,
Request::Query(query_request) => {
let query = query_request
.query
@@ -310,8 +321,8 @@ mod test {
use api::v1::column::Values;
use api::v1::{
alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
- CreateDatabaseExpr, CreateTableExpr, DropTableExpr, InsertRequest, InsertRequests,
- QueryRequest, RenameTable, SemanticType, TableId, TruncateTableExpr,
+ CreateDatabaseExpr, CreateTableExpr, DeleteRequest, DropTableExpr, InsertRequest,
+ InsertRequests, QueryRequest, RenameTable, SemanticType, TableId, TruncateTableExpr,
};
use common_catalog::consts::MITO_ENGINE;
use common_error::ext::ErrorExt;
@@ -903,7 +914,7 @@ mod test {
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
assert!(matches!(output, Output::AffectedRows(3)));
- let request = DeleteRequest {
+ let request1 = DeleteRequest {
table_name: "demo".to_string(),
region_number: 0,
key_columns: vec![
@@ -928,13 +939,39 @@ mod test {
],
row_count: 1,
};
-
- let request = Request::Delete(request);
+ let request2 = DeleteRequest {
+ table_name: "demo".to_string(),
+ region_number: 0,
+ key_columns: vec![
+ Column {
+ column_name: "host".to_string(),
+ values: Some(Values {
+ string_values: vec!["host3".to_string()],
+ ..Default::default()
+ }),
+ datatype: ColumnDataType::String as i32,
+ ..Default::default()
+ },
+ Column {
+ column_name: "ts".to_string(),
+ values: Some(Values {
+ ts_millisecond_values: vec![1672201026000],
+ ..Default::default()
+ }),
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ ..Default::default()
+ },
+ ],
+ row_count: 1,
+ };
+ let request = Request::Deletes(DeleteRequests {
+ deletes: vec![request1, request2],
+ });
let output = instance
.do_query(request, QueryContext::arc())
.await
.unwrap();
- assert!(matches!(output, Output::AffectedRows(1)));
+ assert!(matches!(output, Output::AffectedRows(2)));
let output = exec_selection(instance, "SELECT ts, host, cpu FROM demo").await;
let Output::Stream(stream) = output else {
@@ -946,7 +983,6 @@ mod test {
| ts | host | cpu |
+---------------------+-------+------+
| 2022-12-28T04:17:05 | host1 | 66.6 |
-| 2022-12-28T04:17:06 | host3 | 88.8 |
+---------------------+-------+------+";
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
}
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index 1121a2a0a7a6..e5c4d03c1a9c 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -209,6 +209,12 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to split delete request, source: {}", source))]
+ SplitDelete {
+ source: partition::error::Error,
+ location: Location,
+ },
+
#[snafu(display("Failed to create table info, source: {}", source))]
CreateTableInfo {
#[snafu(backtrace)]
@@ -409,6 +415,12 @@ pub enum Error {
source: table::error::Error,
},
+ #[snafu(display("Missing time index column: {}", source))]
+ MissingTimeIndexColumn {
+ location: Location,
+ source: table::error::Error,
+ },
+
#[snafu(display("Failed to start script manager, source: {}", source))]
StartScriptManager {
#[snafu(backtrace)]
@@ -644,6 +656,8 @@ impl ErrorExt for Error {
source.status_code()
}
+ Error::MissingTimeIndexColumn { source, .. } => source.status_code(),
+
Error::FindDatanode { .. }
| Error::CreateTableRoute { .. }
| Error::FindRegionRoute { .. }
@@ -693,7 +707,8 @@ impl ErrorExt for Error {
Error::DeserializePartition { source, .. }
| Error::FindTablePartitionRule { source, .. }
| Error::FindTableRoute { source, .. }
- | Error::SplitInsert { source, .. } => source.status_code(),
+ | Error::SplitInsert { source, .. }
+ | Error::SplitDelete { source, .. } => source.status_code(),
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index 99452fb9c755..67f8abd4afc2 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub mod deleter;
pub(crate) mod inserter;
use std::collections::HashMap;
@@ -21,7 +22,7 @@ use api::helper::ColumnDataTypeWrapper;
use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::{
- column_def, AlterExpr, CompactTableExpr, CreateDatabaseExpr, CreateTableExpr, DeleteRequest,
+ column_def, AlterExpr, CompactTableExpr, CreateDatabaseExpr, CreateTableExpr, DeleteRequests,
FlushTableExpr, InsertRequests, TruncateTableExpr,
};
use async_trait::async_trait;
@@ -56,7 +57,6 @@ use sql::statements::create::{PartitionEntry, Partitions};
use sql::statements::statement::Statement;
use sql::statements::{self, sql_value_to_value};
use store_api::storage::RegionNumber;
-use table::engine::TableReference;
use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableInfo, TableType};
use table::requests::{AlterTableRequest, TableOptions};
use table::TableRef;
@@ -66,9 +66,10 @@ use crate::error::{
self, AlterExprToRequestSnafu, CatalogSnafu, ColumnDataTypeSnafu, ColumnNotFoundSnafu,
DeserializePartitionSnafu, InvokeDatanodeSnafu, NotSupportedSnafu, ParseSqlSnafu,
RequestDatanodeSnafu, RequestMetaSnafu, Result, SchemaExistsSnafu, TableAlreadyExistSnafu,
- TableNotFoundSnafu, TableSnafu, ToTableDeleteRequestSnafu, UnrecognizedTableOptionSnafu,
+ TableNotFoundSnafu, TableSnafu, UnrecognizedTableOptionSnafu,
};
use crate::expr_factory;
+use crate::instance::distributed::deleter::DistDeleter;
use crate::instance::distributed::inserter::DistInserter;
use crate::table::DistTable;
@@ -626,27 +627,15 @@ impl DistInstance {
async fn handle_dist_delete(
&self,
- request: DeleteRequest,
+ request: DeleteRequests,
ctx: QueryContextRef,
) -> Result<Output> {
- let catalog = ctx.current_catalog();
- let schema = ctx.current_schema();
- let table_name = &request.table_name;
- let table_ref = TableReference::full(catalog, schema, table_name);
-
- let table = self
- .catalog_manager
- .table(catalog, schema, table_name)
- .await
- .context(CatalogSnafu)?
- .with_context(|| TableNotFoundSnafu {
- table_name: table_ref.to_string(),
- })?;
-
- let request = common_grpc_expr::delete::to_table_delete_request(request)
- .context(ToTableDeleteRequestSnafu)?;
-
- let affected_rows = table.delete(request).await.context(TableSnafu)?;
+ let deleter = DistDeleter::new(
+ ctx.current_catalog().to_string(),
+ ctx.current_schema().to_string(),
+ self.catalog_manager(),
+ );
+ let affected_rows = deleter.grpc_delete(request).await?;
Ok(Output::AffectedRows(affected_rows))
}
@@ -676,11 +665,11 @@ impl GrpcQueryHandler for DistInstance {
async fn do_query(&self, request: Request, ctx: QueryContextRef) -> Result<Output> {
match request {
Request::Inserts(requests) => self.handle_dist_insert(requests, ctx).await,
- Request::Delete(request) => self.handle_dist_delete(request, ctx).await,
Request::RowInserts(_) | Request::RowDelete(_) => NotSupportedSnafu {
feat: "row insert/delete",
}
.fail(),
+ Request::Deletes(requests) => self.handle_dist_delete(requests, ctx).await,
Request::Query(_) => {
unreachable!("Query should have been handled directly in Frontend Instance!")
}
diff --git a/src/frontend/src/instance/distributed/deleter.rs b/src/frontend/src/instance/distributed/deleter.rs
new file mode 100644
index 000000000000..3ebba9827bc5
--- /dev/null
+++ b/src/frontend/src/instance/distributed/deleter.rs
@@ -0,0 +1,386 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::iter;
+use std::sync::Arc;
+
+use api::v1::DeleteRequests;
+use catalog::CatalogManager;
+use client::Database;
+use common_grpc_expr::delete::to_table_delete_request;
+use common_meta::peer::Peer;
+use common_meta::table_name::TableName;
+use futures::future;
+use snafu::{OptionExt, ResultExt};
+use table::requests::DeleteRequest;
+
+use crate::catalog::FrontendCatalogManager;
+use crate::error::{
+ CatalogSnafu, FindDatanodeSnafu, FindTableRouteSnafu, JoinTaskSnafu,
+ MissingTimeIndexColumnSnafu, RequestDatanodeSnafu, Result, SplitDeleteSnafu,
+ TableNotFoundSnafu, ToTableDeleteRequestSnafu,
+};
+use crate::table::delete::to_grpc_delete_request;
+
+/// A distributed deleter. It ingests GRPC [DeleteRequests] or table [DeleteRequest] (so it can be
+/// used in protocol handlers or table deletion API).
+///
+/// Table data partitioning and Datanode requests batching are handled inside.
+///
+/// Note that the deleter is confined to a single catalog and schema. I.e., it cannot handle
+/// multiple deletes requests with different catalog or schema (will throw "NotSupported" error).
+/// This is because we currently do not have this kind of requirements. Let's keep it simple for now.
+pub(crate) struct DistDeleter {
+ catalog: String,
+ schema: String,
+ catalog_manager: Arc<FrontendCatalogManager>,
+}
+
+impl DistDeleter {
+ pub(crate) fn new(
+ catalog: String,
+ schema: String,
+ catalog_manager: Arc<FrontendCatalogManager>,
+ ) -> Self {
+ Self {
+ catalog,
+ schema,
+ catalog_manager,
+ }
+ }
+
+ pub async fn grpc_delete(&self, requests: DeleteRequests) -> Result<usize> {
+ let deletes = requests
+ .deletes
+ .into_iter()
+ .map(|delete| {
+ to_table_delete_request(&self.catalog, &self.schema, delete)
+ .context(ToTableDeleteRequestSnafu)
+ })
+ .collect::<Result<Vec<_>>>()?;
+ self.delete(deletes).await
+ }
+
+ pub(crate) async fn delete(&self, requests: Vec<DeleteRequest>) -> Result<usize> {
+ debug_assert!(requests
+ .iter()
+ .all(|x| x.catalog_name == self.catalog && x.schema_name == self.schema));
+ let deletes = self.split_deletes(requests).await?;
+ self.request_datanodes(deletes).await
+ }
+
+ async fn split_deletes(
+ &self,
+ requests: Vec<DeleteRequest>,
+ ) -> Result<HashMap<Peer, DeleteRequests>> {
+ let partition_manager = self.catalog_manager.partition_manager();
+
+ let mut deletes = HashMap::new();
+
+ for request in requests {
+ let table_name = &request.table_name;
+ let table = self
+ .catalog_manager
+ .table(&self.catalog, &self.schema, table_name)
+ .await
+ .context(CatalogSnafu)?
+ .with_context(|| TableNotFoundSnafu {
+ table_name: table_name.to_string(),
+ })?;
+ let table_info = table.table_info();
+ let table_meta = &table_info.meta;
+
+ let table_id = table_info.table_id();
+ let table_name = &request.table_name;
+ let schema = table.schema();
+ let time_index = &schema
+ .timestamp_column()
+ .with_context(|| table::error::MissingTimeIndexColumnSnafu {
+ table_name: table_name.to_string(),
+ })
+ .context(MissingTimeIndexColumnSnafu)?
+ .name;
+ let primary_key_column_names = table_info
+ .meta
+ .row_key_column_names()
+ .chain(iter::once(time_index))
+ .collect::<Vec<_>>();
+ let table_name = request.table_name.clone();
+ let split = partition_manager
+ .split_delete_request(table_id, request, primary_key_column_names)
+ .await
+ .context(SplitDeleteSnafu)?;
+ let table_route = partition_manager
+ .find_table_route(table_id)
+ .await
+ .with_context(|_| FindTableRouteSnafu {
+ table_name: table_name.to_string(),
+ })?;
+
+ for (region_number, delete) in split {
+ let datanode =
+ table_route
+ .find_region_leader(region_number)
+ .context(FindDatanodeSnafu {
+ region: region_number,
+ })?;
+ let table_name = TableName::new(&self.catalog, &self.schema, &table_name);
+ let delete =
+ to_grpc_delete_request(table_meta, &table_name, region_number, delete)?;
+ deletes
+ .entry(datanode.clone())
+ .or_insert_with(|| DeleteRequests { deletes: vec![] })
+ .deletes
+ .push(delete);
+ }
+ }
+ Ok(deletes)
+ }
+
+ async fn request_datanodes(&self, deletes: HashMap<Peer, DeleteRequests>) -> Result<usize> {
+ let results = future::try_join_all(deletes.into_iter().map(|(peer, deletes)| {
+ let datanode_clients = self.catalog_manager.datanode_clients();
+ let catalog = self.catalog.clone();
+ let schema = self.schema.clone();
+
+ common_runtime::spawn_write(async move {
+ let client = datanode_clients.get_client(&peer).await;
+ let database = Database::new(&catalog, &schema, client);
+ database.delete(deletes).await.context(RequestDatanodeSnafu)
+ })
+ }))
+ .await
+ .context(JoinTaskSnafu)?;
+
+ let affected_rows = results.into_iter().sum::<Result<u32>>()?;
+ Ok(affected_rows as usize)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use api::v1::column::Values;
+ use api::v1::{Column, ColumnDataType, DeleteRequest as GrpcDeleteRequest, SemanticType};
+ use client::client_manager::DatanodeClients;
+ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+ use common_meta::helper::{CatalogValue, SchemaValue};
+ use common_meta::key::catalog_name::CatalogNameKey;
+ use common_meta::key::schema_name::SchemaNameKey;
+ use common_meta::key::table_name::TableNameKey;
+ use common_meta::key::table_region::RegionDistribution;
+ use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
+ use common_meta::kv_backend::memory::MemoryKvBackend;
+ use common_meta::kv_backend::{KvBackend, KvBackendRef};
+ use common_meta::rpc::store::PutRequest;
+ use datatypes::prelude::{ConcreteDataType, VectorRef};
+ use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, Schema};
+ use datatypes::vectors::Int32Vector;
+ use table::metadata::{RawTableInfo, TableInfoBuilder, TableMetaBuilder};
+
+ use super::*;
+ use crate::heartbeat::handler::tests::MockKvCacheInvalidator;
+ use crate::table::test::create_partition_rule_manager;
+
+ async fn prepare_mocked_backend() -> KvBackendRef {
+ let backend = Arc::new(MemoryKvBackend::default());
+
+ let default_catalog = CatalogNameKey {
+ catalog: DEFAULT_CATALOG_NAME,
+ }
+ .to_string();
+ let req = PutRequest::new()
+ .with_key(default_catalog.as_bytes())
+ .with_value(CatalogValue.as_bytes().unwrap());
+ backend.put(req).await.unwrap();
+
+ let default_schema = SchemaNameKey {
+ catalog: DEFAULT_CATALOG_NAME,
+ schema: DEFAULT_SCHEMA_NAME,
+ }
+ .to_string();
+ let req = PutRequest::new()
+ .with_key(default_schema.as_bytes())
+ .with_value(SchemaValue.as_bytes().unwrap());
+ backend.put(req).await.unwrap();
+
+ backend
+ }
+
+ async fn create_testing_table(
+ table_name: &str,
+ table_metadata_manager: &TableMetadataManagerRef,
+ ) {
+ let schema = Arc::new(Schema::new(vec![
+ ColumnSchema::new("ts", ConcreteDataType::int64_datatype(), false)
+ .with_time_index(true)
+ .with_default_constraint(Some(ColumnDefaultConstraint::Function(
+ "current_timestamp()".to_string(),
+ )))
+ .unwrap(),
+ ColumnSchema::new("a", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new("value", ConcreteDataType::int32_datatype(), false),
+ ]));
+
+ let table_meta = TableMetaBuilder::default()
+ .schema(schema)
+ .primary_key_indices(vec![1])
+ .next_column_id(1)
+ .build()
+ .unwrap();
+
+ let table_id = 1;
+ let table_info: RawTableInfo = TableInfoBuilder::new(table_name, table_meta)
+ .table_id(table_id)
+ .build()
+ .unwrap()
+ .into();
+
+ let key = TableNameKey::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name);
+ assert!(table_metadata_manager
+ .table_name_manager()
+ .create(&key, table_id)
+ .await
+ .is_ok());
+
+ assert!(table_metadata_manager
+ .table_info_manager()
+ .compare_and_put(table_id, None, table_info)
+ .await
+ .is_ok());
+
+ let _ = table_metadata_manager
+ .table_region_manager()
+ .compare_and_put(
+ 1,
+ None,
+ RegionDistribution::from([(1, vec![1]), (2, vec![2]), (3, vec![3])]),
+ )
+ .await
+ .unwrap();
+ }
+
+ #[tokio::test]
+ async fn test_split_deletes() {
+ let backend = prepare_mocked_backend().await;
+
+ let table_metadata_manager = Arc::new(TableMetadataManager::new(backend.clone()));
+ let table_name = "one_column_partitioning_table";
+ create_testing_table(table_name, &table_metadata_manager).await;
+
+ let catalog_manager = Arc::new(FrontendCatalogManager::new(
+ backend,
+ Arc::new(MockKvCacheInvalidator::default()),
+ create_partition_rule_manager().await,
+ Arc::new(DatanodeClients::default()),
+ table_metadata_manager,
+ ));
+
+ let new_delete_request = |vector: VectorRef| -> DeleteRequest {
+ DeleteRequest {
+ catalog_name: DEFAULT_CATALOG_NAME.to_string(),
+ schema_name: DEFAULT_SCHEMA_NAME.to_string(),
+ table_name: table_name.to_string(),
+ key_column_values: HashMap::from([("a".to_string(), vector)]),
+ }
+ };
+ let requests = vec![
+ new_delete_request(Arc::new(Int32Vector::from(vec![
+ Some(1),
+ Some(11),
+ Some(50),
+ ]))),
+ new_delete_request(Arc::new(Int32Vector::from(vec![
+ Some(2),
+ Some(12),
+ Some(102),
+ ]))),
+ ];
+
+ let deleter = DistDeleter::new(
+ DEFAULT_CATALOG_NAME.to_string(),
+ DEFAULT_SCHEMA_NAME.to_string(),
+ catalog_manager,
+ );
+ let mut deletes = deleter.split_deletes(requests).await.unwrap();
+
+ assert_eq!(deletes.len(), 3);
+
+ let new_grpc_delete_request = |column_values: Vec<i32>,
+ null_mask: Vec<u8>,
+ row_count: u32,
+ region_number: u32|
+ -> GrpcDeleteRequest {
+ GrpcDeleteRequest {
+ table_name: table_name.to_string(),
+ key_columns: vec![Column {
+ column_name: "a".to_string(),
+ semantic_type: SemanticType::Tag as i32,
+ values: Some(Values {
+ i32_values: column_values,
+ ..Default::default()
+ }),
+ null_mask,
+ datatype: ColumnDataType::Int32 as i32,
+ }],
+ row_count,
+ region_number,
+ }
+ };
+
+ // region to datanode placement:
+ // 1 -> 1
+ // 2 -> 2
+ // 3 -> 3
+ //
+ // region value ranges:
+ // 1 -> [50, max)
+ // 2 -> [10, 50)
+ // 3 -> (min, 10)
+
+ let datanode_deletes = deletes.remove(&Peer::new(1, "")).unwrap().deletes;
+ assert_eq!(datanode_deletes.len(), 2);
+
+ assert_eq!(
+ datanode_deletes[0],
+ new_grpc_delete_request(vec![50], vec![0], 1, 1)
+ );
+ assert_eq!(
+ datanode_deletes[1],
+ new_grpc_delete_request(vec![102], vec![0], 1, 1)
+ );
+
+ let datanode_deletes = deletes.remove(&Peer::new(2, "")).unwrap().deletes;
+ assert_eq!(datanode_deletes.len(), 2);
+ assert_eq!(
+ datanode_deletes[0],
+ new_grpc_delete_request(vec![11], vec![0], 1, 2)
+ );
+ assert_eq!(
+ datanode_deletes[1],
+ new_grpc_delete_request(vec![12], vec![0], 1, 2)
+ );
+
+ let datanode_deletes = deletes.remove(&Peer::new(3, "")).unwrap().deletes;
+ assert_eq!(datanode_deletes.len(), 2);
+ assert_eq!(
+ datanode_deletes[0],
+ new_grpc_delete_request(vec![1], vec![0], 1, 3)
+ );
+ assert_eq!(
+ datanode_deletes[1],
+ new_grpc_delete_request(vec![2], vec![0], 1, 3)
+ );
+ }
+}
diff --git a/src/frontend/src/instance/distributed/inserter.rs b/src/frontend/src/instance/distributed/inserter.rs
index dbbaf4b51488..fa8b6307203f 100644
--- a/src/frontend/src/instance/distributed/inserter.rs
+++ b/src/frontend/src/instance/distributed/inserter.rs
@@ -312,6 +312,7 @@ mod tests {
];
let mut inserts = inserter.split_inserts(requests).await.unwrap();
+
assert_eq!(inserts.len(), 3);
let new_grpc_insert_request = |column_values: Vec<i32>,
diff --git a/src/frontend/src/instance/grpc.rs b/src/frontend/src/instance/grpc.rs
index 148a36ea0e02..93254b9312e3 100644
--- a/src/frontend/src/instance/grpc.rs
+++ b/src/frontend/src/instance/grpc.rs
@@ -90,7 +90,7 @@ impl GrpcQueryHandler for Instance {
}
}
}
- Request::Ddl(_) | Request::Delete(_) => {
+ Request::Ddl(_) | Request::Deletes(_) => {
GrpcQueryHandler::do_query(self.grpc_query_handler.as_ref(), request, ctx.clone())
.await?
}
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 52758ad64ade..281dcdab26ad 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::any::Any;
-use std::iter;
use std::pin::Pin;
use std::sync::Arc;
@@ -24,7 +23,6 @@ use common_meta::table_name::TableName;
use common_query::error::Result as QueryResult;
use common_query::logical_plan::Expr;
use common_query::physical_plan::{PhysicalPlan, PhysicalPlanRef};
-use common_query::Output;
use common_recordbatch::adapter::AsyncRecordBatchStreamAdapter;
use common_recordbatch::error::{
InitRecordbatchStreamSnafu, PollStreamSnafu, Result as RecordBatchResult,
@@ -39,9 +37,8 @@ use datafusion::physical_plan::{
use datafusion_common::DataFusionError;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use futures_util::{Stream, StreamExt};
-use partition::splitter::WriteSplitter;
use snafu::prelude::*;
-use store_api::storage::{RegionNumber, ScanRequest};
+use store_api::storage::ScanRequest;
use table::error::TableOperationSnafu;
use table::metadata::{FilterPushDownType, TableInfoRef, TableType};
use table::requests::{DeleteRequest, InsertRequest};
@@ -49,12 +46,12 @@ use table::Table;
use tokio::sync::RwLock;
use crate::catalog::FrontendCatalogManager;
-use crate::error::{FindDatanodeSnafu, FindTableRouteSnafu, Result};
+use crate::error::Result;
+use crate::instance::distributed::deleter::DistDeleter;
use crate::instance::distributed::inserter::DistInserter;
-use crate::table::delete::to_grpc_delete_request;
use crate::table::scan::{DatanodeInstance, TableScanPlan};
-mod delete;
+pub mod delete;
pub mod insert;
pub(crate) mod scan;
@@ -174,58 +171,17 @@ impl Table for DistTable {
}
async fn delete(&self, request: DeleteRequest) -> table::Result<usize> {
- let partition_manager = self.catalog_manager.partition_manager();
-
- let table_id = self.table_info.table_id();
- let partition_rule = partition_manager
- .find_table_partition_rule(table_id)
- .await
- .map_err(BoxedError::new)
- .context(TableOperationSnafu)?;
-
- let schema = self.schema();
- let time_index = &schema
- .timestamp_column()
- .with_context(|| table::error::MissingTimeIndexColumnSnafu {
- table_name: self.table_name.to_string(),
- })?
- .name;
-
- let table_info = self.table_info();
- let key_column_names = table_info
- .meta
- .row_key_column_names()
- .chain(iter::once(time_index))
- .collect::<Vec<_>>();
-
- let requests = WriteSplitter::with_partition_rule(partition_rule)
- .split_delete(request, key_column_names)
- .map_err(BoxedError::new)
- .and_then(|requests| {
- requests
- .into_iter()
- .map(|(region_number, request)| {
- to_grpc_delete_request(
- &table_info.meta,
- &self.table_name,
- region_number,
- request,
- )
- })
- .collect::<Result<Vec<_>>>()
- .map_err(BoxedError::new)
- })
- .context(TableOperationSnafu)?;
-
- let output = self
- .dist_delete(requests)
+ let deleter = DistDeleter::new(
+ request.catalog_name.clone(),
+ request.schema_name.clone(),
+ self.catalog_manager.clone(),
+ );
+ let affected_rows = deleter
+ .delete(vec![request])
.await
.map_err(BoxedError::new)
.context(TableOperationSnafu)?;
- let Output::AffectedRows(rows) = output else {
- unreachable!()
- };
- Ok(rows)
+ Ok(affected_rows)
}
}
@@ -241,39 +197,6 @@ impl DistTable {
catalog_manager,
}
}
-
- async fn find_datanode_instances(
- &self,
- regions: &[RegionNumber],
- ) -> Result<Vec<DatanodeInstance>> {
- let table_name = &self.table_name;
- let route = self
- .catalog_manager
- .partition_manager()
- .find_table_route(self.table_info.table_id())
- .await
- .with_context(|_| FindTableRouteSnafu {
- table_name: table_name.to_string(),
- })?;
-
- let datanodes = regions
- .iter()
- .map(|&n| {
- route
- .find_region_leader(n)
- .context(FindDatanodeSnafu { region: n })
- })
- .collect::<Result<Vec<_>>>()?;
-
- let datanode_clients = self.catalog_manager.datanode_clients();
- let mut instances = Vec::with_capacity(datanodes.len());
- for datanode in datanodes {
- let client = datanode_clients.get_client(datanode).await;
- let db = Database::new(&table_name.catalog_name, &table_name.schema_name, client);
- instances.push(DatanodeInstance::new(Arc::new(self.clone()) as _, db));
- }
- Ok(instances)
- }
}
fn project_schema(table_schema: SchemaRef, projection: Option<&Vec<usize>>) -> SchemaRef {
diff --git a/src/frontend/src/table/delete.rs b/src/frontend/src/table/delete.rs
index c8a3a709ecae..9a6b3056db1c 100644
--- a/src/frontend/src/table/delete.rs
+++ b/src/frontend/src/table/delete.rs
@@ -14,41 +14,14 @@
use api::v1::DeleteRequest as GrpcDeleteRequest;
use common_meta::table_name::TableName;
-use common_query::Output;
-use futures::future;
-use snafu::ResultExt;
use store_api::storage::RegionNumber;
use table::metadata::TableMeta;
use table::requests::DeleteRequest;
-use crate::error::{JoinTaskSnafu, RequestDatanodeSnafu, Result};
+use crate::error::Result;
use crate::table::insert::to_grpc_columns;
-use crate::table::DistTable;
-impl DistTable {
- pub(super) async fn dist_delete(&self, requests: Vec<GrpcDeleteRequest>) -> Result<Output> {
- let regions = requests.iter().map(|x| x.region_number).collect::<Vec<_>>();
- let instances = self.find_datanode_instances(®ions).await?;
-
- let results = future::try_join_all(instances.into_iter().zip(requests).map(
- |(instance, request)| {
- common_runtime::spawn_write(async move {
- instance
- .grpc_delete(request)
- .await
- .context(RequestDatanodeSnafu)
- })
- },
- ))
- .await
- .context(JoinTaskSnafu)?;
-
- let affected_rows = results.into_iter().sum::<Result<u32>>()?;
- Ok(Output::AffectedRows(affected_rows as _))
- }
-}
-
-pub(super) fn to_grpc_delete_request(
+pub fn to_grpc_delete_request(
table_meta: &TableMeta,
table_name: &TableName,
region_number: RegionNumber,
@@ -103,7 +76,12 @@ mod tests {
"id".to_string(),
Arc::new(Int32Vector::from_slice(vec![1, 2, 3])) as VectorRef,
)]);
- let request = DeleteRequest { key_column_values };
+ let request = DeleteRequest {
+ catalog_name: table_name.catalog_name.to_string(),
+ schema_name: table_name.schema_name.to_string(),
+ table_name: table_name.table_name.to_string(),
+ key_column_values,
+ };
let result =
to_grpc_delete_request(&table_meta, &table_name, region_number, request).unwrap();
diff --git a/src/frontend/src/table/scan.rs b/src/frontend/src/table/scan.rs
index 55b001518526..979eb6ca6402 100644
--- a/src/frontend/src/table/scan.rs
+++ b/src/frontend/src/table/scan.rs
@@ -15,7 +15,6 @@
use std::fmt::Formatter;
use std::sync::Arc;
-use api::v1::DeleteRequest;
use client::Database;
use common_meta::table_name::TableName;
use common_query::prelude::Expr;
@@ -47,10 +46,6 @@ impl DatanodeInstance {
Self { table, db }
}
- pub(crate) async fn grpc_delete(&self, request: DeleteRequest) -> client::Result<u32> {
- self.db.delete(request).await
- }
-
pub(crate) async fn grpc_table_scan(&self, plan: TableScanPlan) -> Result<RecordBatches> {
let logical_plan = self.build_logical_plan(&plan)?;
diff --git a/src/mito/src/engine/tests.rs b/src/mito/src/engine/tests.rs
index 7ef3946d87f9..be533a8ca9c1 100644
--- a/src/mito/src/engine/tests.rs
+++ b/src/mito/src/engine/tests.rs
@@ -843,7 +843,12 @@ async fn test_table_delete_rows() {
let del_tss: VectorRef = Arc::new(TimestampMillisecondVector::from_vec(vec![1, 2]));
let key_column_values =
HashMap::from([("host".to_string(), del_hosts), ("ts".to_string(), del_tss)]);
- let del_req = DeleteRequest { key_column_values };
+ let del_req = DeleteRequest {
+ catalog_name: "foo_catalog".to_string(),
+ schema_name: "foo_schema".to_string(),
+ table_name: "demo".to_string(),
+ key_column_values,
+ };
let _ = table.delete(del_req).await.unwrap();
let stream = table.scan_to_stream(ScanRequest::default()).await.unwrap();
diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs
index 80fbc19d4ee1..60ac8e8648f1 100644
--- a/src/mito/src/table.rs
+++ b/src/mito/src/table.rs
@@ -278,7 +278,6 @@ impl<R: Region> Table for MitoTable<R> {
let key_column_values = request.key_column_values.clone();
// Safety: key_column_values isn't empty.
let rows_num = key_column_values.values().next().unwrap().len();
-
logging::trace!(
"Delete from table {} where key_columns are: {:?}",
self.table_info().name,
diff --git a/src/partition/src/manager.rs b/src/partition/src/manager.rs
index 60cea2f8fa9b..e6f3021d5f6a 100644
--- a/src/partition/src/manager.rs
+++ b/src/partition/src/manager.rs
@@ -24,14 +24,14 @@ use datatypes::schema::Schema;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::{RegionId, RegionNumber};
use table::metadata::TableId;
-use table::requests::InsertRequest;
+use table::requests::{DeleteRequest, InsertRequest};
use crate::columns::RangeColumnsPartitionRule;
use crate::error::{FindLeaderSnafu, Result};
use crate::partition::{PartitionBound, PartitionDef, PartitionExpr};
use crate::range::RangePartitionRule;
use crate::route::TableRoutes;
-use crate::splitter::{InsertRequestSplit, WriteSplitter};
+use crate::splitter::{DeleteRequestSplit, InsertRequestSplit, WriteSplitter};
use crate::{error, PartitionRuleRef};
#[async_trait::async_trait]
@@ -242,10 +242,21 @@ impl PartitionRuleManager {
req: InsertRequest,
schema: &Schema,
) -> Result<InsertRequestSplit> {
- let partition_rule = self.find_table_partition_rule(table).await.unwrap();
+ let partition_rule = self.find_table_partition_rule(table).await?;
let splitter = WriteSplitter::with_partition_rule(partition_rule);
splitter.split_insert(req, schema)
}
+
+ pub async fn split_delete_request(
+ &self,
+ table: TableId,
+ req: DeleteRequest,
+ primary_key_column_names: Vec<&String>,
+ ) -> Result<DeleteRequestSplit> {
+ let partition_rule = self.find_table_partition_rule(table).await?;
+ let splitter = WriteSplitter::with_partition_rule(partition_rule);
+ splitter.split_delete(req, primary_key_column_names)
+ }
}
fn find_regions0(partition_rule: PartitionRuleRef, filter: &Expr) -> Result<HashSet<RegionNumber>> {
diff --git a/src/partition/src/splitter.rs b/src/partition/src/splitter.rs
index 7ff422b25793..ceeac2e1da60 100644
--- a/src/partition/src/splitter.rs
+++ b/src/partition/src/splitter.rs
@@ -121,7 +121,15 @@ impl WriteSplitter {
(column_name.to_string(), builder.to_vector())
})
.collect();
- (region_id, DeleteRequest { key_column_values })
+ (
+ region_id,
+ DeleteRequest {
+ catalog_name: request.catalog_name.clone(),
+ schema_name: request.schema_name.clone(),
+ table_name: request.table_name.clone(),
+ key_column_values,
+ },
+ )
})
.collect();
Ok(requests)
@@ -439,7 +447,12 @@ mod tests {
"host".to_string(),
Arc::new(StringVector::from(vec!["localhost"])) as _,
);
- let delete = DeleteRequest { key_column_values };
+ let delete = DeleteRequest {
+ catalog_name: "foo_catalog".to_string(),
+ schema_name: "foo_schema".to_string(),
+ table_name: "foo_table".to_string(),
+ key_column_values,
+ };
let rule = Arc::new(EmptyPartitionRule) as PartitionRuleRef;
let spliter = WriteSplitter::with_partition_rule(rule);
let ret = spliter
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 7f387d259d64..3bccb75054fc 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -146,6 +146,9 @@ impl DatafusionQueryEngine {
table: &TableRef,
column_vectors: HashMap<String, VectorRef>,
) -> Result<usize> {
+ let catalog_name = table_name.catalog.to_string();
+ let schema_name = table_name.schema.to_string();
+ let table_name = table_name.table.to_string();
let table_schema = table.schema();
let ts_column = table_schema
.timestamp_column()
@@ -165,6 +168,9 @@ impl DatafusionQueryEngine {
.collect::<HashMap<_, _>>();
let request = DeleteRequest {
+ catalog_name,
+ schema_name,
+ table_name,
key_column_values: column_vectors,
};
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index e0674204a1d2..350280764470 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -160,7 +160,7 @@ impl GrpcQueryHandler for DummyInstance {
) -> std::result::Result<Output, Self::Error> {
let output = match request {
Request::Inserts(_)
- | Request::Delete(_)
+ | Request::Deletes(_)
| Request::RowInserts(_)
| Request::RowDelete(_) => unimplemented!(),
Request::Query(query_request) => {
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index 879a46b9e935..7ca269c68d82 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -251,6 +251,9 @@ pub struct InsertRequest {
/// Delete (by primary key) request
#[derive(Debug)]
pub struct DeleteRequest {
+ pub catalog_name: String,
+ pub schema_name: String,
+ pub table_name: String,
/// Values of each column in this table's primary key and time index.
///
/// The key is the column name, and the value is the column value.
diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs
index 1ea5573afa68..7447fdfeb84c 100644
--- a/tests-integration/src/grpc.rs
+++ b/tests-integration/src/grpc.rs
@@ -22,8 +22,8 @@ mod test {
use api::v1::query_request::Query;
use api::v1::{
alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
- CreateDatabaseExpr, CreateTableExpr, DdlRequest, DeleteRequest, DropTableExpr,
- FlushTableExpr, InsertRequest, InsertRequests, QueryRequest, SemanticType,
+ CreateDatabaseExpr, CreateTableExpr, DdlRequest, DeleteRequest, DeleteRequests,
+ DropTableExpr, FlushTableExpr, InsertRequest, InsertRequests, QueryRequest, SemanticType,
};
use common_catalog::consts::MITO_ENGINE;
use common_query::Output;
@@ -216,7 +216,6 @@ CREATE TABLE {table_name} (
| ts | a | b |
+---------------------+---+-------------------+
| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
-| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
| 2023-01-01T07:26:17 | | ts: 1672557977000 |
@@ -250,7 +249,6 @@ CREATE TABLE {table_name} (
+---------------------+----+-------------------+
| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
-| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
+---------------------+----+-------------------+",
),
]),
@@ -527,7 +525,7 @@ CREATE TABLE {table_name} (
+---------------------+----+-------------------+";
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
- let delete = DeleteRequest {
+ let new_grpc_delete_request = |a, b, ts, row_count| DeleteRequest {
table_name: table_name.to_string(),
region_number: 0,
key_columns: vec![
@@ -535,7 +533,7 @@ CREATE TABLE {table_name} (
column_name: "a".to_string(),
semantic_type: SemanticType::Field as i32,
values: Some(Values {
- i32_values: vec![2, 12, 22, 52],
+ i32_values: a,
..Default::default()
}),
datatype: ColumnDataType::Int32 as i32,
@@ -545,12 +543,7 @@ CREATE TABLE {table_name} (
column_name: "b".to_string(),
semantic_type: SemanticType::Tag as i32,
values: Some(Values {
- string_values: vec![
- "ts: 1672557973000".to_string(),
- "ts: 1672557979000".to_string(),
- "ts: 1672557982000".to_string(),
- "ts: 1672557986000".to_string(),
- ],
+ string_values: b,
..Default::default()
}),
datatype: ColumnDataType::String as i32,
@@ -560,22 +553,43 @@ CREATE TABLE {table_name} (
column_name: "ts".to_string(),
semantic_type: SemanticType::Timestamp as i32,
values: Some(Values {
- ts_millisecond_values: vec![
- 1672557973000,
- 1672557979000,
- 1672557982000,
- 1672557986000,
- ],
+ ts_millisecond_values: ts,
..Default::default()
}),
datatype: ColumnDataType::TimestampMillisecond as i32,
..Default::default()
},
],
- row_count: 4,
+ row_count,
};
- let output = query(instance, Request::Delete(delete)).await;
- assert!(matches!(output, Output::AffectedRows(4)));
+ let delete1 = new_grpc_delete_request(
+ vec![2, 12, 22, 52],
+ vec![
+ "ts: 1672557973000".to_string(),
+ "ts: 1672557979000".to_string(),
+ "ts: 1672557982000".to_string(),
+ "ts: 1672557986000".to_string(),
+ ],
+ vec![1672557973000, 1672557979000, 1672557982000, 1672557986000],
+ 4,
+ );
+ let delete2 = new_grpc_delete_request(
+ vec![3, 53],
+ vec![
+ "ts: 1672557974000".to_string(),
+ "ts: 1672557987000".to_string(),
+ ],
+ vec![1672557974000, 1672557987000],
+ 2,
+ );
+ let output = query(
+ instance,
+ Request::Deletes(DeleteRequests {
+ deletes: vec![delete1, delete2],
+ }),
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(6)));
let output = query(instance, request).await;
let Output::Stream(stream) = output else {
@@ -587,7 +601,6 @@ CREATE TABLE {table_name} (
| ts | a | b |
+---------------------+----+-------------------+
| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
-| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
| 2023-01-01T07:26:17 | | ts: 1672557977000 |
@@ -597,7 +610,6 @@ CREATE TABLE {table_name} (
| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
-| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
+---------------------+----+-------------------+";
assert_eq!(recordbatches.pretty_print().unwrap(), expected);
}
@@ -765,7 +777,13 @@ CREATE TABLE {table_name} (
row_count: 2,
};
- let output = query(instance, Request::Delete(delete)).await;
+ let output = query(
+ instance,
+ Request::Deletes(DeleteRequests {
+ deletes: vec![delete],
+ }),
+ )
+ .await;
assert!(matches!(output, Output::AffectedRows(2)));
let output = query(instance, request).await;
|
feat
|
handle multiple grpc deletes (#2150)
|
76732d65060f3b86465f2aef7aff22bc6456cb4b
|
2022-11-14 19:25:26
|
Ning Sun
|
fix: add more parameters to postgresql for python client (#493)
| false
|
diff --git a/src/servers/src/postgres/auth_handler.rs b/src/servers/src/postgres/auth_handler.rs
index 46cf8f6ac896..ab94e4eb4fb1 100644
--- a/src/servers/src/postgres/auth_handler.rs
+++ b/src/servers/src/postgres/auth_handler.rs
@@ -36,8 +36,11 @@ impl ServerParameterProvider for GreptimeDBStartupParameters {
where
C: ClientInfo,
{
- let mut params = HashMap::with_capacity(1);
+ let mut params = HashMap::with_capacity(4);
params.insert("server_version".to_owned(), self.version.to_owned());
+ params.insert("server_encoding".to_owned(), "UTF8".to_owned());
+ params.insert("client_encoding".to_owned(), "UTF8".to_owned());
+ params.insert("DateStyle".to_owned(), "ISO YMD".to_owned());
Some(params)
}
|
fix
|
add more parameters to postgresql for python client (#493)
|
f035a7c79cc9441e7909b049340505b0a8b67596
|
2024-07-01 15:26:15
|
discord9
|
feat: flow cli for distributed (#4226)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 9d5a9a6ab2a4..594cf82088bc 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3955,10 +3955,12 @@ dependencies = [
"catalog",
"common-base",
"common-catalog",
+ "common-config",
"common-decimal",
"common-error",
"common-frontend",
"common-function",
+ "common-grpc",
"common-macro",
"common-meta",
"common-query",
diff --git a/src/cmd/src/bin/greptime.rs b/src/cmd/src/bin/greptime.rs
index ac5a62a46546..d5a35c6837e8 100644
--- a/src/cmd/src/bin/greptime.rs
+++ b/src/cmd/src/bin/greptime.rs
@@ -17,7 +17,7 @@
use clap::{Parser, Subcommand};
use cmd::error::Result;
use cmd::options::GlobalOptions;
-use cmd::{cli, datanode, frontend, metasrv, standalone, App};
+use cmd::{cli, datanode, flownode, frontend, metasrv, standalone, App};
use common_version::version;
#[derive(Parser)]
@@ -37,6 +37,10 @@ enum SubCommand {
#[clap(name = "datanode")]
Datanode(datanode::Command),
+ /// Start flownode service.
+ #[clap(name = "flownode")]
+ Flownode(flownode::Command),
+
/// Start frontend service.
#[clap(name = "frontend")]
Frontend(frontend::Command),
@@ -72,6 +76,12 @@ async fn start(cli: Command) -> Result<()> {
.run()
.await
}
+ SubCommand::Flownode(cmd) => {
+ cmd.build(cmd.load_options(&cli.global_options)?)
+ .await?
+ .run()
+ .await
+ }
SubCommand::Frontend(cmd) => {
cmd.build(cmd.load_options(&cli.global_options)?)
.await?
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index fa5371545fcb..d11e2421435c 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -87,6 +87,20 @@ pub enum Error {
source: datanode::error::Error,
},
+ #[snafu(display("Failed to start flownode"))]
+ StartFlownode {
+ #[snafu(implicit)]
+ location: Location,
+ source: flow::Error,
+ },
+
+ #[snafu(display("Failed to shutdown flownode"))]
+ ShutdownFlownode {
+ #[snafu(implicit)]
+ location: Location,
+ source: flow::Error,
+ },
+
#[snafu(display("Failed to start frontend"))]
StartFrontend {
#[snafu(implicit)]
@@ -380,6 +394,9 @@ impl ErrorExt for Error {
Error::BuildRuntime { source, .. } => source.status_code(),
Error::CacheRequired { .. } | Error::BuildCacheRegistry { .. } => StatusCode::Internal,
+ Self::StartFlownode { source, .. } | Self::ShutdownFlownode { source, .. } => {
+ source.status_code()
+ }
}
}
diff --git a/src/cmd/src/flownode.rs b/src/cmd/src/flownode.rs
new file mode 100644
index 000000000000..9c7cd6695d59
--- /dev/null
+++ b/src/cmd/src/flownode.rs
@@ -0,0 +1,301 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use cache::{build_fundamental_cache_registry, with_default_composite_cache_registry};
+use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
+use clap::Parser;
+use common_base::Plugins;
+use common_config::Configurable;
+use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
+use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
+use common_meta::heartbeat::handler::HandlerGroupExecutor;
+use common_meta::key::TableMetadataManager;
+use common_telemetry::info;
+use common_telemetry::logging::TracingOptions;
+use common_version::{short_version, version};
+use flow::{FlownodeBuilder, FlownodeInstance};
+use frontend::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
+use meta_client::MetaClientOptions;
+use servers::Mode;
+use snafu::{OptionExt, ResultExt};
+use tracing_appender::non_blocking::WorkerGuard;
+
+use crate::error::{
+ BuildCacheRegistrySnafu, InitMetadataSnafu, LoadLayeredConfigSnafu, MissingConfigSnafu, Result,
+ ShutdownFlownodeSnafu, StartFlownodeSnafu,
+};
+use crate::options::{GlobalOptions, GreptimeOptions};
+use crate::{log_versions, App};
+
+pub const APP_NAME: &str = "greptime-flownode";
+
+type FlownodeOptions = GreptimeOptions<flow::FlownodeOptions>;
+
+pub struct Instance {
+ flownode: FlownodeInstance,
+
+ // Keep the logging guard to prevent the worker from being dropped.
+ _guard: Vec<WorkerGuard>,
+}
+
+impl Instance {
+ pub fn new(flownode: FlownodeInstance, guard: Vec<WorkerGuard>) -> Self {
+ Self {
+ flownode,
+ _guard: guard,
+ }
+ }
+
+ pub fn flownode_mut(&mut self) -> &mut FlownodeInstance {
+ &mut self.flownode
+ }
+
+ pub fn flownode(&self) -> &FlownodeInstance {
+ &self.flownode
+ }
+}
+
+#[async_trait::async_trait]
+impl App for Instance {
+ fn name(&self) -> &str {
+ APP_NAME
+ }
+
+ async fn start(&mut self) -> Result<()> {
+ self.flownode.start().await.context(StartFlownodeSnafu)
+ }
+
+ async fn stop(&self) -> Result<()> {
+ self.flownode
+ .shutdown()
+ .await
+ .context(ShutdownFlownodeSnafu)
+ }
+}
+
+#[derive(Parser)]
+pub struct Command {
+ #[clap(subcommand)]
+ subcmd: SubCommand,
+}
+
+impl Command {
+ pub async fn build(&self, opts: FlownodeOptions) -> Result<Instance> {
+ self.subcmd.build(opts).await
+ }
+
+ pub fn load_options(&self, global_options: &GlobalOptions) -> Result<FlownodeOptions> {
+ match &self.subcmd {
+ SubCommand::Start(cmd) => cmd.load_options(global_options),
+ }
+ }
+}
+
+#[derive(Parser)]
+enum SubCommand {
+ Start(StartCommand),
+}
+
+impl SubCommand {
+ async fn build(&self, opts: FlownodeOptions) -> Result<Instance> {
+ match self {
+ SubCommand::Start(cmd) => cmd.build(opts).await,
+ }
+ }
+}
+
+#[derive(Debug, Parser, Default)]
+struct StartCommand {
+ #[clap(long)]
+ node_id: Option<u64>,
+ #[clap(long)]
+ rpc_addr: Option<String>,
+ #[clap(long)]
+ rpc_hostname: Option<String>,
+ #[clap(long, value_delimiter = ',', num_args = 1..)]
+ metasrv_addrs: Option<Vec<String>>,
+ #[clap(short, long)]
+ config_file: Option<String>,
+ #[clap(long, default_value = "GREPTIMEDB_FLOWNODE")]
+ env_prefix: String,
+}
+
+impl StartCommand {
+ fn load_options(&self, global_options: &GlobalOptions) -> Result<FlownodeOptions> {
+ let mut opts = FlownodeOptions::load_layered_options(
+ self.config_file.as_deref(),
+ self.env_prefix.as_ref(),
+ )
+ .context(LoadLayeredConfigSnafu)?;
+
+ self.merge_with_cli_options(global_options, &mut opts)?;
+
+ Ok(opts)
+ }
+
+ // The precedence order is: cli > config file > environment variables > default values.
+ fn merge_with_cli_options(
+ &self,
+ global_options: &GlobalOptions,
+ opts: &mut FlownodeOptions,
+ ) -> Result<()> {
+ let opts = &mut opts.component;
+
+ if let Some(dir) = &global_options.log_dir {
+ opts.logging.dir.clone_from(dir);
+ }
+
+ if global_options.log_level.is_some() {
+ opts.logging.level.clone_from(&global_options.log_level);
+ }
+
+ opts.tracing = TracingOptions {
+ #[cfg(feature = "tokio-console")]
+ tokio_console_addr: global_options.tokio_console_addr.clone(),
+ };
+
+ if let Some(addr) = &self.rpc_addr {
+ opts.grpc.addr.clone_from(addr);
+ }
+
+ if let Some(hostname) = &self.rpc_hostname {
+ opts.grpc.hostname.clone_from(hostname);
+ }
+
+ if let Some(node_id) = self.node_id {
+ opts.node_id = Some(node_id);
+ }
+
+ if let Some(metasrv_addrs) = &self.metasrv_addrs {
+ opts.meta_client
+ .get_or_insert_with(MetaClientOptions::default)
+ .metasrv_addrs
+ .clone_from(metasrv_addrs);
+ opts.mode = Mode::Distributed;
+ }
+
+ if let (Mode::Distributed, None) = (&opts.mode, &opts.node_id) {
+ return MissingConfigSnafu {
+ msg: "Missing node id option",
+ }
+ .fail();
+ }
+
+ Ok(())
+ }
+
+ async fn build(&self, opts: FlownodeOptions) -> Result<Instance> {
+ common_runtime::init_global_runtimes(&opts.runtime);
+
+ let guard = common_telemetry::init_global_logging(
+ APP_NAME,
+ &opts.component.logging,
+ &opts.component.tracing,
+ opts.component.node_id.map(|x| x.to_string()),
+ );
+ log_versions(version!(), short_version!());
+
+ info!("Flownode start command: {:#?}", self);
+ info!("Flownode options: {:#?}", opts);
+
+ let opts = opts.component;
+
+ let cluster_id = opts.cluster_id.context(MissingConfigSnafu {
+ msg: "'cluster_id'",
+ })?;
+
+ let node_id = opts
+ .node_id
+ .context(MissingConfigSnafu { msg: "'node_id'" })?;
+
+ let meta_config = opts.meta_client.as_ref().context(MissingConfigSnafu {
+ msg: "'meta_client_options'",
+ })?;
+
+ let meta_client = Arc::new(
+ flow::heartbeat::new_metasrv_client(cluster_id, node_id, meta_config)
+ .await
+ .context(StartFlownodeSnafu)?,
+ );
+
+ let cache_max_capacity = meta_config.metadata_cache_max_capacity;
+ let cache_ttl = meta_config.metadata_cache_ttl;
+ let cache_tti = meta_config.metadata_cache_tti;
+
+ // TODO(discord9): add helper function to ease the creation of cache registry&such
+ let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
+ .cache_max_capacity(cache_max_capacity)
+ .cache_ttl(cache_ttl)
+ .cache_tti(cache_tti)
+ .build();
+ let cached_meta_backend = Arc::new(cached_meta_backend);
+
+ // Builds cache registry
+ let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
+ CacheRegistryBuilder::default()
+ .add_cache(cached_meta_backend.clone())
+ .build(),
+ );
+ let fundamental_cache_registry =
+ build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
+ let layered_cache_registry = Arc::new(
+ with_default_composite_cache_registry(
+ layered_cache_builder.add_cache_registry(fundamental_cache_registry),
+ )
+ .context(BuildCacheRegistrySnafu)?
+ .build(),
+ );
+
+ let catalog_manager = KvBackendCatalogManager::new(
+ opts.mode,
+ Some(meta_client.clone()),
+ cached_meta_backend.clone(),
+ layered_cache_registry.clone(),
+ );
+
+ let table_metadata_manager = Arc::new(TableMetadataManager::new(cached_meta_backend));
+ table_metadata_manager
+ .init()
+ .await
+ .context(InitMetadataSnafu)?;
+
+ let executor = HandlerGroupExecutor::new(vec![
+ Arc::new(ParseMailboxMessageHandler),
+ Arc::new(InvalidateTableCacheHandler::new(
+ layered_cache_registry.clone(),
+ )),
+ ]);
+
+ let heartbeat_task = flow::heartbeat::HeartbeatTask::new(
+ &opts,
+ meta_client.clone(),
+ opts.heartbeat.clone(),
+ Arc::new(executor),
+ );
+
+ let flownode_builder = FlownodeBuilder::new(
+ opts,
+ Plugins::new(),
+ table_metadata_manager,
+ catalog_manager,
+ )
+ .with_heartbeat_task(heartbeat_task);
+
+ let flownode = flownode_builder.build().await.context(StartFlownodeSnafu)?;
+
+ Ok(Instance::new(flownode, guard))
+ }
+}
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 31f4a108481b..3081f4b75f77 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -283,6 +283,7 @@ impl StartCommand {
.await
.context(StartFrontendSnafu)?;
+ // TODO(discord9): add helper function to ease the creation of cache registry&such
let cached_meta_backend = CachedMetaKvBackendBuilder::new(meta_client.clone())
.cache_max_capacity(cache_max_capacity)
.cache_ttl(cache_ttl)
diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs
index f9f9e699650b..85b848c62835 100644
--- a/src/cmd/src/lib.rs
+++ b/src/cmd/src/lib.rs
@@ -22,6 +22,7 @@ use crate::error::Result;
pub mod cli;
pub mod datanode;
pub mod error;
+pub mod flownode;
pub mod frontend;
pub mod metasrv;
pub mod options;
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 316aa6db7f25..75be49a6233d 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -21,6 +21,7 @@ use catalog::kvbackend::KvBackendCatalogManager;
use clap::Parser;
use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
use common_config::{metadata_store_dir, Configurable, KvBackendConfig};
+use common_error::ext::BoxedError;
use common_meta::cache::LayeredCacheRegistryBuilder;
use common_meta::cache_invalidator::CacheInvalidatorRef;
use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
@@ -65,9 +66,9 @@ use tracing_appender::non_blocking::WorkerGuard;
use crate::error::{
BuildCacheRegistrySnafu, CreateDirSnafu, IllegalConfigSnafu, InitDdlManagerSnafu,
- InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, Result, ShutdownDatanodeSnafu,
- ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu, StartProcedureManagerSnafu,
- StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
+ InitMetadataSnafu, InitTimezoneSnafu, LoadLayeredConfigSnafu, OtherSnafu, Result,
+ ShutdownDatanodeSnafu, ShutdownFrontendSnafu, StartDatanodeSnafu, StartFrontendSnafu,
+ StartProcedureManagerSnafu, StartWalOptionsAllocatorSnafu, StopProcedureManagerSnafu,
};
use crate::options::{GlobalOptions, GreptimeOptions};
use crate::{log_versions, App};
@@ -448,13 +449,18 @@ impl StartCommand {
Self::create_table_metadata_manager(kv_backend.clone()).await?;
let flow_builder = FlownodeBuilder::new(
- 1,
Default::default(),
fe_plugins.clone(),
table_metadata_manager.clone(),
catalog_manager.clone(),
);
- let flownode = Arc::new(flow_builder.build().await);
+ let flownode = Arc::new(
+ flow_builder
+ .build()
+ .await
+ .map_err(BoxedError::new)
+ .context(OtherSnafu)?,
+ );
let datanode = DatanodeBuilder::new(dn_opts, fe_plugins.clone())
.with_kv_backend(kv_backend.clone())
@@ -464,7 +470,7 @@ impl StartCommand {
let node_manager = Arc::new(StandaloneDatanodeManager {
region_server: datanode.region_server(),
- flow_server: flownode.clone(),
+ flow_server: flownode.flow_worker_manager(),
});
let table_id_sequence = Arc::new(
@@ -516,11 +522,12 @@ impl StartCommand {
.context(StartFrontendSnafu)?;
// flow server need to be able to use frontend to write insert requests back
- flownode
+ let flow_worker_manager = flownode.flow_worker_manager();
+ flow_worker_manager
.set_frontend_invoker(Box::new(frontend.clone()))
.await;
// TODO(discord9): unify with adding `start` and `shutdown` method to flownode too.
- let _handle = flownode.clone().run_background();
+ let _handle = flow_worker_manager.run_background();
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
.build()
diff --git a/src/flow/Cargo.toml b/src/flow/Cargo.toml
index ebf276069502..395f9cf07510 100644
--- a/src/flow/Cargo.toml
+++ b/src/flow/Cargo.toml
@@ -15,10 +15,12 @@ async-trait.workspace = true
bytes.workspace = true
catalog.workspace = true
common-base.workspace = true
+common-config.workspace = true
common-decimal.workspace = true
common-error.workspace = true
common-frontend.workspace = true
common-function.workspace = true
+common-grpc.workspace = true
common-macro.workspace = true
common-meta.workspace = true
common-query.workspace = true
diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs
index 373f1fb35558..c9d1ca570a76 100644
--- a/src/flow/src/adapter.rs
+++ b/src/flow/src/adapter.rs
@@ -21,40 +21,41 @@ use std::sync::Arc;
use std::time::{Instant, SystemTime};
use api::v1::{RowDeleteRequest, RowDeleteRequests, RowInsertRequest, RowInsertRequests};
-use catalog::CatalogManagerRef;
-use common_base::Plugins;
+use common_config::Configurable;
use common_error::ext::BoxedError;
use common_frontend::handler::FrontendInvoker;
use common_meta::key::TableMetadataManagerRef;
use common_runtime::JoinHandle;
+use common_telemetry::logging::{LoggingOptions, TracingOptions};
use common_telemetry::{debug, info};
use datatypes::schema::ColumnSchema;
use datatypes::value::Value;
use greptime_proto::v1;
use itertools::Itertools;
-use query::{QueryEngine, QueryEngineFactory};
+use meta_client::MetaClientOptions;
+use query::QueryEngine;
use serde::{Deserialize, Serialize};
use servers::grpc::GrpcOptions;
+use servers::heartbeat_options::HeartbeatOptions;
+use servers::Mode;
use session::context::QueryContext;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::{ConcreteDataType, RegionId};
use table::metadata::TableId;
-use tokio::sync::{oneshot, watch, Mutex, RwLock};
+use tokio::sync::{watch, Mutex, RwLock};
-use crate::adapter::error::{ExternalSnafu, InternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
pub(crate) use crate::adapter::node_context::FlownodeContext;
use crate::adapter::table_source::TableSource;
use crate::adapter::util::column_schemas_to_proto;
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
use crate::compute::ErrCollector;
+use crate::error::{ExternalSnafu, InternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
use crate::expr::GlobalId;
use crate::repr::{self, DiffRow, Row};
-use crate::transform::{register_function_to_query_engine, sql_to_flow_plan};
+use crate::transform::sql_to_flow_plan;
-pub(crate) mod error;
mod flownode_impl;
mod parse_expr;
-mod server;
#[cfg(test)]
mod tests;
mod util;
@@ -63,7 +64,7 @@ mod worker;
pub(crate) mod node_context;
mod table_source;
-use error::Error;
+use crate::error::Error;
// TODO(discord9): replace this with `GREPTIME_TIMESTAMP` before v0.9
pub const AUTO_CREATED_PLACEHOLDER_TS_COL: &str = "__ts_placeholder";
@@ -76,79 +77,43 @@ pub type FlowId = u64;
pub type TableName = [String; 3];
/// Options for flow node
-#[derive(Clone, Default, Debug, Serialize, Deserialize)]
+#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct FlownodeOptions {
+ pub mode: Mode,
+ pub cluster_id: Option<u64>,
pub node_id: Option<u64>,
pub grpc: GrpcOptions,
+ pub meta_client: Option<MetaClientOptions>,
+ pub logging: LoggingOptions,
+ pub tracing: TracingOptions,
+ pub heartbeat: HeartbeatOptions,
}
-/// Flownode Builder
-pub struct FlownodeBuilder {
- flow_node_id: u32,
- opts: FlownodeOptions,
- plugins: Plugins,
- table_meta: TableMetadataManagerRef,
- catalog_manager: CatalogManagerRef,
-}
-
-impl FlownodeBuilder {
- /// init flownode builder
- pub fn new(
- flow_node_id: u32,
- opts: FlownodeOptions,
- plugins: Plugins,
- table_meta: TableMetadataManagerRef,
- catalog_manager: CatalogManagerRef,
- ) -> Self {
+impl Default for FlownodeOptions {
+ fn default() -> Self {
Self {
- flow_node_id,
- opts,
- plugins,
- table_meta,
- catalog_manager,
+ mode: servers::Mode::Standalone,
+ cluster_id: None,
+ node_id: None,
+ grpc: GrpcOptions::default().with_addr("127.0.0.1:3004"),
+ meta_client: None,
+ logging: LoggingOptions::default(),
+ tracing: TracingOptions::default(),
+ heartbeat: HeartbeatOptions::default(),
}
}
-
- /// TODO(discord9): error handling
- pub async fn build(self) -> FlownodeManager {
- let query_engine_factory = QueryEngineFactory::new_with_plugins(
- // query engine in flownode only translate plan with resolved table source.
- self.catalog_manager.clone(),
- None,
- None,
- None,
- false,
- self.plugins.clone(),
- );
- let query_engine = query_engine_factory.query_engine();
-
- register_function_to_query_engine(&query_engine);
-
- let (tx, rx) = oneshot::channel();
-
- let node_id = Some(self.flow_node_id);
-
- let _handle = std::thread::spawn(move || {
- let (flow_node_manager, mut worker) =
- FlownodeManager::new_with_worker(node_id, query_engine, self.table_meta.clone());
- let _ = tx.send(flow_node_manager);
- info!("Flow Worker started in new thread");
- worker.run();
- });
- let man = rx.await.unwrap();
- info!("Flow Node Manager started");
- man
- }
}
+impl Configurable for FlownodeOptions {}
+
/// Arc-ed FlowNodeManager, cheaper to clone
-pub type FlownodeManagerRef = Arc<FlownodeManager>;
+pub type FlowWorkerManagerRef = Arc<FlowWorkerManager>;
/// FlowNodeManager manages the state of all tasks in the flow node, which should be run on the same thread
///
/// The choice of timestamp is just using current system timestamp for now
-pub struct FlownodeManager {
+pub struct FlowWorkerManager {
/// The handler to the worker that will run the dataflow
/// which is `!Send` so a handle is used
pub worker_handles: Vec<Mutex<WorkerHandle>>,
@@ -166,7 +131,7 @@ pub struct FlownodeManager {
}
/// Building FlownodeManager
-impl FlownodeManager {
+impl FlowWorkerManager {
/// set frontend invoker
pub async fn set_frontend_invoker(
self: &Arc<Self>,
@@ -188,7 +153,7 @@ impl FlownodeManager {
let node_context = FlownodeContext::default();
let tick_manager = FlowTickManager::new();
let worker_handles = Vec::new();
- FlownodeManager {
+ FlowWorkerManager {
worker_handles,
query_engine,
table_info_source: srv_map,
@@ -248,7 +213,7 @@ pub fn diff_row_to_request(rows: Vec<DiffRow>) -> Vec<DiffRequest> {
}
/// This impl block contains methods to send writeback requests to frontend
-impl FlownodeManager {
+impl FlowWorkerManager {
/// TODO(discord9): merge all same type of diff row into one requests
///
/// Return the number of requests it made
@@ -494,7 +459,7 @@ impl FlownodeManager {
}
/// Flow Runtime related methods
-impl FlownodeManager {
+impl FlowWorkerManager {
/// run in common_runtime background runtime
pub fn run_background(self: Arc<Self>) -> JoinHandle<()> {
info!("Starting flownode manager's background task");
@@ -604,7 +569,7 @@ impl FlownodeManager {
}
/// Create&Remove flow
-impl FlownodeManager {
+impl FlowWorkerManager {
/// remove a flow by it's id
pub async fn remove_flow(&self, flow_id: FlowId) -> Result<(), Error> {
for handle in self.worker_handles.iter() {
diff --git a/src/flow/src/adapter/flownode_impl.rs b/src/flow/src/adapter/flownode_impl.rs
index e337e96a0ab3..f780745f621e 100644
--- a/src/flow/src/adapter/flownode_impl.rs
+++ b/src/flow/src/adapter/flownode_impl.rs
@@ -26,11 +26,11 @@ use itertools::Itertools;
use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionId;
-use crate::adapter::error::InternalSnafu;
-use crate::adapter::FlownodeManager;
+use crate::adapter::FlowWorkerManager;
+use crate::error::InternalSnafu;
use crate::repr::{self, DiffRow};
-fn to_meta_err(err: crate::adapter::error::Error) -> common_meta::error::Error {
+fn to_meta_err(err: crate::error::Error) -> common_meta::error::Error {
// TODO(discord9): refactor this
Err::<(), _>(BoxedError::new(err))
.with_context(|_| ExternalSnafu)
@@ -38,7 +38,7 @@ fn to_meta_err(err: crate::adapter::error::Error) -> common_meta::error::Error {
}
#[async_trait::async_trait]
-impl Flownode for FlownodeManager {
+impl Flownode for FlowWorkerManager {
async fn handle(&self, request: FlowRequest) -> Result<FlowResponse> {
let query_ctx = request
.header
diff --git a/src/flow/src/adapter/node_context.rs b/src/flow/src/adapter/node_context.rs
index 40c5169f5ed8..e8defc7652a6 100644
--- a/src/flow/src/adapter/node_context.rs
+++ b/src/flow/src/adapter/node_context.rs
@@ -23,8 +23,8 @@ use snafu::{OptionExt, ResultExt};
use table::metadata::TableId;
use tokio::sync::{broadcast, mpsc, RwLock};
-use crate::adapter::error::{Error, EvalSnafu, TableNotFoundSnafu};
use crate::adapter::{FlowId, TableName, TableSource};
+use crate::error::{Error, EvalSnafu, TableNotFoundSnafu};
use crate::expr::error::InternalSnafu;
use crate::expr::GlobalId;
use crate::repr::{DiffRow, RelationDesc, BROADCAST_CAP};
@@ -317,7 +317,6 @@ impl FlownodeContext {
/// Assign a schema to a table
///
- /// TODO(discord9): error handling
pub fn assign_table_schema(
&mut self,
table_name: &TableName,
@@ -327,7 +326,10 @@ impl FlownodeContext {
.table_repr
.get_by_name(table_name)
.map(|(_, gid)| gid)
- .unwrap();
+ .context(TableNotFoundSnafu {
+ name: format!("Table not found: {:?} in flownode cache", table_name),
+ })?;
+
self.schema.insert(gid, schema);
Ok(())
}
diff --git a/src/flow/src/adapter/table_source.rs b/src/flow/src/adapter/table_source.rs
index 24cf05c4b649..0454ab16b1d3 100644
--- a/src/flow/src/adapter/table_source.rs
+++ b/src/flow/src/adapter/table_source.rs
@@ -20,10 +20,10 @@ use common_meta::key::table_name::{TableNameKey, TableNameManager};
use snafu::{OptionExt, ResultExt};
use table::metadata::TableId;
-use crate::adapter::error::{
+use crate::adapter::TableName;
+use crate::error::{
Error, ExternalSnafu, TableNotFoundMetaSnafu, TableNotFoundSnafu, UnexpectedSnafu,
};
-use crate::adapter::TableName;
use crate::repr::{self, ColumnType, RelationDesc, RelationType};
/// mapping of table name <-> table id should be query from tableinfo manager
diff --git a/src/flow/src/adapter/util.rs b/src/flow/src/adapter/util.rs
index 1946d4265d3f..0a23a86167aa 100644
--- a/src/flow/src/adapter/util.rs
+++ b/src/flow/src/adapter/util.rs
@@ -19,7 +19,7 @@ use datatypes::schema::ColumnSchema;
use itertools::Itertools;
use snafu::ResultExt;
-use crate::adapter::error::{Error, ExternalSnafu};
+use crate::error::{Error, ExternalSnafu};
/// convert `ColumnSchema` lists to it's corresponding proto type
pub fn column_schemas_to_proto(
diff --git a/src/flow/src/adapter/worker.rs b/src/flow/src/adapter/worker.rs
index f69a396cda27..e5819a7f0437 100644
--- a/src/flow/src/adapter/worker.rs
+++ b/src/flow/src/adapter/worker.rs
@@ -24,9 +24,9 @@ use hydroflow::scheduled::graph::Hydroflow;
use snafu::ensure;
use tokio::sync::{broadcast, mpsc, oneshot, Mutex};
-use crate::adapter::error::{Error, FlowAlreadyExistSnafu, InternalSnafu, UnexpectedSnafu};
use crate::adapter::FlowId;
use crate::compute::{Context, DataflowState, ErrCollector};
+use crate::error::{Error, FlowAlreadyExistSnafu, InternalSnafu, UnexpectedSnafu};
use crate::expr::GlobalId;
use crate::plan::TypedPlan;
use crate::repr::{self, DiffRow};
diff --git a/src/flow/src/compute/render.rs b/src/flow/src/compute/render.rs
index 4a4704d28e57..618f9654257d 100644
--- a/src/flow/src/compute/render.rs
+++ b/src/flow/src/compute/render.rs
@@ -32,9 +32,9 @@ use itertools::Itertools;
use snafu::{ensure, OptionExt, ResultExt};
use super::state::Scheduler;
-use crate::adapter::error::{Error, EvalSnafu, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu};
use crate::compute::state::DataflowState;
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
+use crate::error::{Error, EvalSnafu, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu};
use crate::expr::error::{DataTypeSnafu, InternalSnafu};
use crate::expr::{
self, EvalError, GlobalId, LocalId, MapFilterProject, MfpPlan, SafeMfpPlan, ScalarExpr,
diff --git a/src/flow/src/compute/render/map.rs b/src/flow/src/compute/render/map.rs
index 2a6b49cb7c75..d2278dc3b358 100644
--- a/src/flow/src/compute/render/map.rs
+++ b/src/flow/src/compute/render/map.rs
@@ -19,10 +19,10 @@ use hydroflow::scheduled::port::{PortCtx, SEND};
use itertools::Itertools;
use snafu::OptionExt;
-use crate::adapter::error::{Error, PlanSnafu};
use crate::compute::render::Context;
use crate::compute::state::Scheduler;
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
+use crate::error::{Error, PlanSnafu};
use crate::expr::{EvalError, MapFilterProject, MfpPlan, ScalarExpr};
use crate::plan::{Plan, TypedPlan};
use crate::repr::{self, DiffRow, KeyValDiffRow, Row};
diff --git a/src/flow/src/compute/render/reduce.rs b/src/flow/src/compute/render/reduce.rs
index b55ed58f900a..d44c290d9474 100644
--- a/src/flow/src/compute/render/reduce.rs
+++ b/src/flow/src/compute/render/reduce.rs
@@ -22,10 +22,10 @@ use hydroflow::scheduled::port::{PortCtx, SEND};
use itertools::Itertools;
use snafu::{ensure, OptionExt, ResultExt};
-use crate::adapter::error::{Error, PlanSnafu};
use crate::compute::render::{Context, SubgraphArg};
use crate::compute::state::Scheduler;
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
+use crate::error::{Error, PlanSnafu};
use crate::expr::error::{DataAlreadyExpiredSnafu, DataTypeSnafu, InternalSnafu};
use crate::expr::{AggregateExpr, EvalError, ScalarExpr};
use crate::plan::{AccumulablePlan, AggrWithIndex, KeyValPlan, Plan, ReducePlan, TypedPlan};
diff --git a/src/flow/src/compute/render/src_sink.rs b/src/flow/src/compute/render/src_sink.rs
index 2b9fd5e601f0..fd757852ca70 100644
--- a/src/flow/src/compute/render/src_sink.rs
+++ b/src/flow/src/compute/render/src_sink.rs
@@ -23,9 +23,9 @@ use snafu::OptionExt;
use tokio::sync::broadcast::error::TryRecvError;
use tokio::sync::{broadcast, mpsc};
-use crate::adapter::error::{Error, PlanSnafu};
use crate::compute::render::Context;
use crate::compute::types::{Arranged, Collection, CollectionBundle, Toff};
+use crate::error::{Error, PlanSnafu};
use crate::expr::error::InternalSnafu;
use crate::expr::{EvalError, GlobalId};
use crate::repr::{DiffRow, Row, BROADCAST_CAP};
diff --git a/src/flow/src/adapter/error.rs b/src/flow/src/error.rs
similarity index 82%
rename from src/flow/src/adapter/error.rs
rename to src/flow/src/error.rs
index 9d5692aa1ab4..6d84b6be05b1 100644
--- a/src/flow/src/adapter/error.rs
+++ b/src/flow/src/error.rs
@@ -162,6 +162,34 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Failed to start server"))]
+ StartServer {
+ #[snafu(implicit)]
+ location: Location,
+ source: servers::error::Error,
+ },
+
+ #[snafu(display("Failed to shutdown server"))]
+ ShutdownServer {
+ #[snafu(implicit)]
+ location: Location,
+ source: servers::error::Error,
+ },
+
+ #[snafu(display("Failed to initialize meta client"))]
+ MetaClientInit {
+ #[snafu(implicit)]
+ location: Location,
+ source: meta_client::error::Error,
+ },
+
+ #[snafu(display("Failed to parse address {}", addr))]
+ ParseAddr {
+ addr: String,
+ #[snafu(source)]
+ error: std::net::AddrParseError,
+ },
}
/// Result type for flow module
@@ -184,11 +212,16 @@ impl ErrorExt for Error {
| &Self::Plan { .. }
| &Self::Datatypes { .. } => StatusCode::PlanQuery,
Self::NoProtoType { .. } | Self::Unexpected { .. } => StatusCode::Unexpected,
- &Self::NotImplemented { .. } | Self::UnsupportedTemporalFilter { .. } => {
+ Self::NotImplemented { .. } | Self::UnsupportedTemporalFilter { .. } => {
StatusCode::Unsupported
}
- &Self::External { .. } => StatusCode::Unknown,
+ Self::External { source, .. } => source.status_code(),
Self::Internal { .. } => StatusCode::Internal,
+ Self::StartServer { source, .. } | Self::ShutdownServer { source, .. } => {
+ source.status_code()
+ }
+ Self::MetaClientInit { source, .. } => source.status_code(),
+ Self::ParseAddr { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/flow/src/expr/func.rs b/src/flow/src/expr/func.rs
index c30b67dbffa4..39b469207169 100644
--- a/src/flow/src/expr/func.rs
+++ b/src/flow/src/expr/func.rs
@@ -32,7 +32,7 @@ use snafu::{ensure, OptionExt, ResultExt};
use strum::{EnumIter, IntoEnumIterator};
use substrait::df_logical_plan::consumer::name_to_op;
-use crate::adapter::error::{Error, ExternalSnafu, InvalidQuerySnafu, PlanSnafu};
+use crate::error::{Error, ExternalSnafu, InvalidQuerySnafu, PlanSnafu};
use crate::expr::error::{
CastValueSnafu, DivisionByZeroSnafu, EvalError, InternalSnafu, OverflowSnafu,
TryFromValueSnafu, TypeMismatchSnafu,
diff --git a/src/flow/src/expr/linear.rs b/src/flow/src/expr/linear.rs
index b0e32c94d87b..5eaf3ebd3547 100644
--- a/src/flow/src/expr/linear.rs
+++ b/src/flow/src/expr/linear.rs
@@ -21,7 +21,7 @@ use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt};
-use crate::adapter::error::{Error, InvalidQuerySnafu};
+use crate::error::{Error, InvalidQuerySnafu};
use crate::expr::error::EvalError;
use crate::expr::{Id, InvalidArgumentSnafu, LocalId, ScalarExpr};
use crate::repr::{self, value_to_internal_ts, Diff, Row};
diff --git a/src/flow/src/expr/relation/func.rs b/src/flow/src/expr/relation/func.rs
index 6aa53c80ca9d..7923f8149353 100644
--- a/src/flow/src/expr/relation/func.rs
+++ b/src/flow/src/expr/relation/func.rs
@@ -24,7 +24,7 @@ use smallvec::smallvec;
use snafu::{OptionExt, ResultExt};
use strum::{EnumIter, IntoEnumIterator};
-use crate::adapter::error::{DatafusionSnafu, Error, InvalidQuerySnafu};
+use crate::error::{DatafusionSnafu, Error, InvalidQuerySnafu};
use crate::expr::error::{EvalError, TryFromValueSnafu, TypeMismatchSnafu};
use crate::expr::relation::accum::{Accum, Accumulator};
use crate::expr::signature::{GenericFn, Signature};
diff --git a/src/flow/src/expr/scalar.rs b/src/flow/src/expr/scalar.rs
index 591d2c246fc1..0bf7c4dea4f0 100644
--- a/src/flow/src/expr/scalar.rs
+++ b/src/flow/src/expr/scalar.rs
@@ -32,7 +32,7 @@ use substrait::error::{DecodeRelSnafu, EncodeRelSnafu};
use substrait::substrait_proto_df::proto::expression::{RexType, ScalarFunction};
use substrait::substrait_proto_df::proto::Expression;
-use crate::adapter::error::{
+use crate::error::{
DatafusionSnafu, Error, InvalidQuerySnafu, UnexpectedSnafu, UnsupportedTemporalFilterSnafu,
};
use crate::expr::error::{
@@ -284,7 +284,7 @@ impl RawDfScalarFn {
f.encode(&mut buf)
.context(EncodeRelSnafu)
.map_err(BoxedError::new)
- .context(crate::adapter::error::ExternalSnafu)?;
+ .context(crate::error::ExternalSnafu)?;
Ok(Self {
f: buf,
input_schema,
@@ -295,7 +295,7 @@ impl RawDfScalarFn {
let f = ScalarFunction::decode(&mut self.f.as_ref())
.context(DecodeRelSnafu)
.map_err(BoxedError::new)
- .context(crate::adapter::error::ExternalSnafu)?;
+ .context(crate::error::ExternalSnafu)?;
let input_schema = &self.input_schema;
let extensions = &self.extensions;
@@ -371,7 +371,7 @@ impl ScalarExpr {
})?;
let typ = ConcreteDataType::try_from(&arrow_typ)
.map_err(BoxedError::new)
- .context(crate::adapter::error::ExternalSnafu)?;
+ .context(crate::error::ExternalSnafu)?;
Ok(ColumnType::new_nullable(typ))
}
}
diff --git a/src/flow/src/heartbeat.rs b/src/flow/src/heartbeat.rs
index ed3fe66a8651..339f53520d0d 100644
--- a/src/flow/src/heartbeat.rs
+++ b/src/flow/src/heartbeat.rs
@@ -14,25 +14,29 @@
//! Send heartbeat from flownode to metasrv
+use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use api::v1::meta::{HeartbeatRequest, Peer};
use common_error::ext::BoxedError;
+use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
+use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::{
- HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
+ HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
-use common_telemetry::{debug, error, info};
+use common_telemetry::{debug, error, info, warn};
use greptime_proto::v1::meta::NodeInfo;
-use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
+use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient, MetaClientBuilder};
+use meta_client::MetaClientOptions;
use servers::addrs;
use servers::heartbeat_options::HeartbeatOptions;
use snafu::ResultExt;
use tokio::sync::mpsc;
use tokio::time::{Duration, Instant};
-use crate::adapter::error::ExternalSnafu;
+use crate::error::{ExternalSnafu, MetaClientInitSnafu};
use crate::{Error, FlownodeOptions};
/// The flownode heartbeat task which sending `[HeartbeatRequest]` to Metasrv periodically in background.
@@ -45,6 +49,7 @@ pub struct HeartbeatTask {
retry_interval: Duration,
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
start_time_ms: u64,
+ running: Arc<AtomicBool>,
}
impl HeartbeatTask {
@@ -62,10 +67,19 @@ impl HeartbeatTask {
retry_interval: heartbeat_opts.retry_interval,
resp_handler_executor,
start_time_ms: common_time::util::current_time_millis() as u64,
+ running: Arc::new(AtomicBool::new(false)),
}
}
pub async fn start(&self) -> Result<(), Error> {
+ if self
+ .running
+ .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
+ .is_err()
+ {
+ warn!("Heartbeat task started multiple times");
+ return Ok(());
+ }
info!("Start to establish the heartbeat connection to metasrv.");
let (req_sender, resp_stream) = self
.meta_client
@@ -86,6 +100,17 @@ impl HeartbeatTask {
Ok(())
}
+ pub fn shutdown(&self) {
+ info!("Close heartbeat task for flownode");
+ if self
+ .running
+ .compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
+ .is_err()
+ {
+ warn!("Call close heartbeat task multiple times");
+ }
+ }
+
fn create_heartbeat_request(
message: Option<OutgoingMessage>,
peer: Option<Peer>,
@@ -208,3 +233,38 @@ impl HeartbeatTask {
}
}
}
+
+/// Create metasrv client instance and spawn heartbeat loop.
+pub async fn new_metasrv_client(
+ cluster_id: u64,
+ node_id: u64,
+ meta_config: &MetaClientOptions,
+) -> Result<MetaClient, Error> {
+ let member_id = node_id;
+ let config = ChannelConfig::new()
+ .timeout(meta_config.timeout)
+ .connect_timeout(meta_config.connect_timeout)
+ .tcp_nodelay(meta_config.tcp_nodelay);
+ let channel_manager = ChannelManager::with_config(config.clone());
+ let heartbeat_channel_manager = ChannelManager::with_config(
+ config
+ .timeout(meta_config.timeout)
+ .connect_timeout(meta_config.connect_timeout),
+ );
+
+ let mut meta_client = MetaClientBuilder::flownode_default_options(cluster_id, member_id)
+ .channel_manager(channel_manager)
+ .heartbeat_channel_manager(heartbeat_channel_manager)
+ .build();
+ meta_client
+ .start(&meta_config.metasrv_addrs)
+ .await
+ .context(MetaClientInitSnafu)?;
+
+ // required only when the heartbeat_client is enabled
+ meta_client
+ .ask_leader()
+ .await
+ .context(MetaClientInitSnafu)?;
+ Ok(meta_client)
+}
diff --git a/src/flow/src/lib.rs b/src/flow/src/lib.rs
index 606ae65144ed..636a722b04c2 100644
--- a/src/flow/src/lib.rs
+++ b/src/flow/src/lib.rs
@@ -25,12 +25,15 @@
// allow unused for now because it should be use later
mod adapter;
mod compute;
+mod error;
mod expr;
-mod heartbeat;
+pub mod heartbeat;
mod plan;
mod repr;
+mod server;
mod transform;
mod utils;
-pub use adapter::error::{Error, Result};
-pub use adapter::{FlownodeBuilder, FlownodeManager, FlownodeManagerRef, FlownodeOptions};
+pub use adapter::{FlowWorkerManager, FlowWorkerManagerRef, FlownodeOptions};
+pub use error::{Error, Result};
+pub use server::{FlownodeBuilder, FlownodeInstance, FlownodeServer};
diff --git a/src/flow/src/plan.rs b/src/flow/src/plan.rs
index 95816b17cb03..c31ddb652e3b 100644
--- a/src/flow/src/plan.rs
+++ b/src/flow/src/plan.rs
@@ -23,7 +23,7 @@ use std::collections::BTreeSet;
use datatypes::arrow::ipc::Map;
use serde::{Deserialize, Serialize};
-use crate::adapter::error::Error;
+use crate::error::Error;
use crate::expr::{
AggregateExpr, EvalError, GlobalId, Id, LocalId, MapFilterProject, SafeMfpPlan, ScalarExpr,
TypedExpr,
diff --git a/src/flow/src/repr/relation.rs b/src/flow/src/repr/relation.rs
index 43947dc47236..382c7a63c3df 100644
--- a/src/flow/src/repr/relation.rs
+++ b/src/flow/src/repr/relation.rs
@@ -21,9 +21,7 @@ use itertools::Itertools;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
-use crate::adapter::error::{
- DatafusionSnafu, InternalSnafu, InvalidQuerySnafu, Result, UnexpectedSnafu,
-};
+use crate::error::{DatafusionSnafu, InternalSnafu, InvalidQuerySnafu, Result, UnexpectedSnafu};
use crate::expr::{MapFilterProject, SafeMfpPlan, ScalarExpr};
/// a set of column indices that are "keys" for the collection.
diff --git a/src/flow/src/adapter/server.rs b/src/flow/src/server.rs
similarity index 50%
rename from src/flow/src/adapter/server.rs
rename to src/flow/src/server.rs
index c0d0854572c7..166f6b5f5f63 100644
--- a/src/flow/src/adapter/server.rs
+++ b/src/flow/src/server.rs
@@ -15,26 +15,48 @@
//! Implementation of grpc service for flow node
use std::net::SocketAddr;
+use std::sync::Arc;
+use catalog::CatalogManagerRef;
+use common_base::Plugins;
+use common_meta::ddl::table_meta;
+use common_meta::heartbeat::handler::HandlerGroupExecutor;
+use common_meta::key::TableMetadataManagerRef;
+use common_meta::kv_backend::KvBackendRef;
use common_meta::node_manager::Flownode;
use common_telemetry::tracing::info;
use futures::FutureExt;
use greptime_proto::v1::flow::{flow_server, FlowRequest, FlowResponse, InsertRequests};
use itertools::Itertools;
+use meta_client::client::MetaClient;
+use query::QueryEngineFactory;
+use serde::de::Unexpected;
use servers::error::{AlreadyStartedSnafu, StartGrpcSnafu, TcpBindSnafu, TcpIncomingSnafu};
+use servers::heartbeat_options::HeartbeatOptions;
+use servers::server::Server;
use snafu::{ensure, ResultExt};
use tokio::net::TcpListener;
use tokio::sync::{oneshot, Mutex};
use tonic::transport::server::TcpIncoming;
use tonic::{Request, Response, Status};
-use crate::adapter::FlownodeManagerRef;
+use crate::adapter::FlowWorkerManagerRef;
+use crate::error::{ParseAddrSnafu, ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu};
+use crate::heartbeat::HeartbeatTask;
+use crate::transform::register_function_to_query_engine;
+use crate::{Error, FlowWorkerManager, FlownodeOptions};
pub const FLOW_NODE_SERVER_NAME: &str = "FLOW_NODE_SERVER";
/// wrapping flow node manager to avoid orphan rule with Arc<...>
#[derive(Clone)]
pub struct FlowService {
- pub manager: FlownodeManagerRef,
+ pub manager: FlowWorkerManagerRef,
+}
+
+impl FlowService {
+ pub fn new(manager: FlowWorkerManagerRef) -> Self {
+ Self { manager }
+ }
}
#[async_trait::async_trait]
@@ -82,8 +104,17 @@ impl flow_server::Flow for FlowService {
}
pub struct FlownodeServer {
- pub shutdown_tx: Mutex<Option<oneshot::Sender<()>>>,
- pub flow_service: FlowService,
+ shutdown_tx: Mutex<Option<oneshot::Sender<()>>>,
+ flow_service: FlowService,
+}
+
+impl FlownodeServer {
+ pub fn new(flow_service: FlowService) -> Self {
+ Self {
+ flow_service,
+ shutdown_tx: Mutex::new(None),
+ }
+ }
}
impl FlownodeServer {
@@ -134,7 +165,6 @@ impl servers::server::Server for FlownodeServer {
.context(StartGrpcSnafu);
});
- // TODO(discord9): better place for dataflow to run per second
let manager_ref = self.flow_service.manager.clone();
let _handle = manager_ref.clone().run_background();
@@ -145,3 +175,126 @@ impl servers::server::Server for FlownodeServer {
FLOW_NODE_SERVER_NAME
}
}
+
+/// The flownode server instance.
+pub struct FlownodeInstance {
+ server: FlownodeServer,
+ addr: SocketAddr,
+ heartbeat_task: Option<HeartbeatTask>,
+}
+
+impl FlownodeInstance {
+ pub async fn start(&mut self) -> Result<(), crate::Error> {
+ if let Some(task) = &self.heartbeat_task {
+ task.start().await?;
+ }
+
+ self.addr = self
+ .server
+ .start(self.addr)
+ .await
+ .context(StartServerSnafu)?;
+ Ok(())
+ }
+ pub async fn shutdown(&self) -> Result<(), crate::Error> {
+ self.server.shutdown().await.context(ShutdownServerSnafu)?;
+
+ if let Some(task) = &self.heartbeat_task {
+ task.shutdown();
+ }
+
+ Ok(())
+ }
+
+ pub fn flow_worker_manager(&self) -> FlowWorkerManagerRef {
+ self.server.flow_service.manager.clone()
+ }
+}
+
+/// [`FlownodeInstance`] Builder
+pub struct FlownodeBuilder {
+ opts: FlownodeOptions,
+ plugins: Plugins,
+ table_meta: TableMetadataManagerRef,
+ catalog_manager: CatalogManagerRef,
+ heartbeat_task: Option<HeartbeatTask>,
+}
+
+impl FlownodeBuilder {
+ /// init flownode builder
+ pub fn new(
+ opts: FlownodeOptions,
+ plugins: Plugins,
+ table_meta: TableMetadataManagerRef,
+ catalog_manager: CatalogManagerRef,
+ ) -> Self {
+ Self {
+ opts,
+ plugins,
+ table_meta,
+ catalog_manager,
+ heartbeat_task: None,
+ }
+ }
+
+ pub fn with_heartbeat_task(self, heartbeat_task: HeartbeatTask) -> Self {
+ Self {
+ heartbeat_task: Some(heartbeat_task),
+ ..self
+ }
+ }
+
+ pub async fn build(self) -> Result<FlownodeInstance, Error> {
+ let manager = Arc::new(self.build_manager().await?);
+ let server = FlownodeServer::new(FlowService::new(manager.clone()));
+
+ let heartbeat_task = self.heartbeat_task;
+
+ let addr = self.opts.grpc.addr;
+ let instance = FlownodeInstance {
+ server,
+ addr: addr.parse().context(ParseAddrSnafu { addr })?,
+ heartbeat_task,
+ };
+ Ok(instance)
+ }
+
+ /// build [`FlowWorkerManager`], note this doesn't take ownership of `self`,
+ /// nor does it actually start running the worker.
+ async fn build_manager(&self) -> Result<FlowWorkerManager, Error> {
+ let catalog_manager = self.catalog_manager.clone();
+ let table_meta = self.table_meta.clone();
+
+ let query_engine_factory = QueryEngineFactory::new_with_plugins(
+ // query engine in flownode is only used for translate plan with resolved table source.
+ catalog_manager,
+ None,
+ None,
+ None,
+ false,
+ self.plugins.clone(),
+ );
+ let query_engine = query_engine_factory.query_engine();
+
+ register_function_to_query_engine(&query_engine);
+
+ let (tx, rx) = oneshot::channel();
+
+ let node_id = self.opts.node_id.map(|id| id as u32);
+ let _handle = std::thread::spawn(move || {
+ let (flow_node_manager, mut worker) =
+ FlowWorkerManager::new_with_worker(node_id, query_engine, table_meta);
+ let _ = tx.send(flow_node_manager);
+ info!("Flow Worker started in new thread");
+ worker.run();
+ });
+ let man = rx.await.map_err(|_e| {
+ UnexpectedSnafu {
+ reason: "sender is dropped, failed to create flow node manager",
+ }
+ .build()
+ })?;
+ info!("Flow Node Manager started");
+ Ok(man)
+ }
+}
diff --git a/src/flow/src/transform.rs b/src/flow/src/transform.rs
index e86dac85fb7f..ab3fdd87c001 100644
--- a/src/flow/src/transform.rs
+++ b/src/flow/src/transform.rs
@@ -37,11 +37,11 @@ use substrait::{
use substrait_proto::proto::extensions::simple_extension_declaration::MappingType;
use substrait_proto::proto::extensions::SimpleExtensionDeclaration;
-use crate::adapter::error::{
+use crate::adapter::FlownodeContext;
+use crate::error::{
Error, ExternalSnafu, InvalidQueryProstSnafu, NotImplementedSnafu, TableNotFoundSnafu,
UnexpectedSnafu,
};
-use crate::adapter::FlownodeContext;
use crate::expr::GlobalId;
use crate::plan::TypedPlan;
use crate::repr::RelationType;
diff --git a/src/flow/src/transform/aggr.rs b/src/flow/src/transform/aggr.rs
index 6456f00a5c75..64ecc3eec506 100644
--- a/src/flow/src/transform/aggr.rs
+++ b/src/flow/src/transform/aggr.rs
@@ -45,7 +45,7 @@ use substrait_proto::proto::read_rel::ReadType;
use substrait_proto::proto::rel::RelType;
use substrait_proto::proto::{self, plan_rel, Expression, Plan as SubPlan, Rel};
-use crate::adapter::error::{
+use crate::error::{
DatatypesSnafu, Error, EvalSnafu, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu,
TableNotFoundSnafu,
};
diff --git a/src/flow/src/transform/expr.rs b/src/flow/src/transform/expr.rs
index a10e9b121f8c..524ab9b546cc 100644
--- a/src/flow/src/transform/expr.rs
+++ b/src/flow/src/transform/expr.rs
@@ -25,7 +25,7 @@ use substrait_proto::proto::expression::{IfThen, RexType, ScalarFunction};
use substrait_proto::proto::function_argument::ArgType;
use substrait_proto::proto::Expression;
-use crate::adapter::error::{
+use crate::error::{
DatafusionSnafu, DatatypesSnafu, Error, EvalSnafu, InvalidQuerySnafu, NotImplementedSnafu,
PlanSnafu,
};
diff --git a/src/flow/src/transform/literal.rs b/src/flow/src/transform/literal.rs
index 1fa5bc86a81c..bd0f041dd825 100644
--- a/src/flow/src/transform/literal.rs
+++ b/src/flow/src/transform/literal.rs
@@ -26,7 +26,7 @@ use substrait_proto::proto::expression::literal::LiteralType;
use substrait_proto::proto::expression::Literal;
use substrait_proto::proto::r#type::Kind;
-use crate::adapter::error::{Error, NotImplementedSnafu, PlanSnafu};
+use crate::error::{Error, NotImplementedSnafu, PlanSnafu};
use crate::transform::substrait_proto;
/// Convert a Substrait literal into a Value and its ConcreteDataType (So that we can know type even if the value is null)
diff --git a/src/flow/src/transform/plan.rs b/src/flow/src/transform/plan.rs
index f1f6ba53dd35..200226fb352a 100644
--- a/src/flow/src/transform/plan.rs
+++ b/src/flow/src/transform/plan.rs
@@ -22,7 +22,7 @@ use substrait_proto::proto::read_rel::ReadType;
use substrait_proto::proto::rel::RelType;
use substrait_proto::proto::{plan_rel, Plan as SubPlan, ProjectRel, Rel};
-use crate::adapter::error::{
+use crate::error::{
Error, InternalSnafu, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu, UnexpectedSnafu,
};
use crate::expr::{MapFilterProject, ScalarExpr, TypedExpr, UnaryFunc};
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index 93b999635d62..d43840e73ad2 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -100,6 +100,13 @@ impl MetaClientBuilder {
.enable_heartbeat()
}
+ /// Returns the role of Flownode's default options.
+ pub fn flownode_default_options(cluster_id: ClusterId, member_id: u64) -> Self {
+ Self::new(cluster_id, member_id, Role::Flownode)
+ .enable_store()
+ .enable_heartbeat()
+ }
+
pub fn enable_heartbeat(self) -> Self {
Self {
enable_heartbeat: true,
diff --git a/src/meta-srv/src/cache_invalidator.rs b/src/meta-srv/src/cache_invalidator.rs
index c14a20965a02..86b11ae007db 100644
--- a/src/meta-srv/src/cache_invalidator.rs
+++ b/src/meta-srv/src/cache_invalidator.rs
@@ -57,6 +57,21 @@ impl MetasrvCacheInvalidator {
.broadcast(&BroadcastChannel::Frontend, msg)
.await
.map_err(BoxedError::new)
+ .context(meta_error::ExternalSnafu)?;
+
+ let msg = &MailboxMessage::json_message(
+ subject,
+ &format!("Metasrv@{}", self.info.server_addr),
+ "Flownode broadcast",
+ common_time::util::current_time_millis(),
+ &instruction,
+ )
+ .with_context(|_| meta_error::SerdeJsonSnafu)?;
+
+ self.mailbox
+ .broadcast(&BroadcastChannel::Flownode, msg)
+ .await
+ .map_err(BoxedError::new)
.context(meta_error::ExternalSnafu)
}
}
diff --git a/src/meta-srv/src/service/mailbox.rs b/src/meta-srv/src/service/mailbox.rs
index 97aad7427a29..6d34401d8da3 100644
--- a/src/meta-srv/src/service/mailbox.rs
+++ b/src/meta-srv/src/service/mailbox.rs
@@ -33,6 +33,7 @@ pub type MessageId = u64;
pub enum Channel {
Datanode(u64),
Frontend(u64),
+ Flownode(u64),
}
impl Display for Channel {
@@ -44,6 +45,9 @@ impl Display for Channel {
Channel::Frontend(id) => {
write!(f, "Frontend-{}", id)
}
+ Channel::Flownode(id) => {
+ write!(f, "Flownode-{}", id)
+ }
}
}
}
@@ -53,12 +57,14 @@ impl Channel {
match self {
Channel::Datanode(id) => format!("{}-{}", Role::Datanode as i32, id),
Channel::Frontend(id) => format!("{}-{}", Role::Frontend as i32, id),
+ Channel::Flownode(id) => format!("{}-{}", Role::Flownode as i32, id),
}
}
}
pub enum BroadcastChannel {
Datanode,
Frontend,
+ Flownode,
}
impl BroadcastChannel {
@@ -70,7 +76,11 @@ impl BroadcastChannel {
},
BroadcastChannel::Frontend => Range {
start: format!("{}-", Role::Frontend as i32),
- end: format!("{}-", Role::Frontend as i32 + 1),
+ end: format!("{}-", Role::Flownode as i32),
+ },
+ BroadcastChannel::Flownode => Range {
+ start: format!("{}-", Role::Flownode as i32),
+ end: format!("{}-", Role::Flownode as i32 + 1),
},
}
}
@@ -144,5 +154,9 @@ mod tests {
BroadcastChannel::Frontend.pusher_range(),
("1-".to_string().."2-".to_string())
);
+ assert_eq!(
+ BroadcastChannel::Flownode.pusher_range(),
+ ("2-".to_string().."3-".to_string())
+ );
}
}
diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs
index 458f51b0948a..77a31fb75fbf 100644
--- a/tests-integration/src/standalone.rs
+++ b/tests-integration/src/standalone.rs
@@ -151,17 +151,16 @@ impl GreptimeDbStandaloneBuilder {
);
let flow_builder = FlownodeBuilder::new(
- 1, // for standalone mode this value is default to one
Default::default(),
plugins.clone(),
table_metadata_manager.clone(),
catalog_manager.clone(),
);
- let flownode = Arc::new(flow_builder.build().await);
+ let flownode = Arc::new(flow_builder.build().await.unwrap());
let node_manager = Arc::new(StandaloneDatanodeManager {
region_server: datanode.region_server(),
- flow_server: flownode.clone(),
+ flow_server: flownode.flow_worker_manager(),
});
let table_id_sequence = Arc::new(
@@ -219,10 +218,11 @@ impl GreptimeDbStandaloneBuilder {
.await
.unwrap();
- flownode
+ let flow_manager = flownode.flow_worker_manager();
+ flow_manager
.set_frontend_invoker(Box::new(instance.clone()))
.await;
- let _node_handle = flownode.run_background();
+ let _node_handle = flow_manager.run_background();
procedure_manager.start().await.unwrap();
wal_options_allocator.start().await.unwrap();
|
feat
|
flow cli for distributed (#4226)
|
7c16a4a17b49290746c42306ab1444223ba054ba
|
2022-12-16 13:02:59
|
Yingwen
|
refactor(storage): Move write_batch::codec to a separate file (#757)
| false
|
diff --git a/src/storage/src/write_batch.rs b/src/storage/src/write_batch.rs
index 7f8768298fbf..25c51fef9b08 100644
--- a/src/storage/src/write_batch.rs
+++ b/src/storage/src/write_batch.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub mod codec;
mod compat;
use std::any::Any;
@@ -499,280 +500,23 @@ impl<'a> IntoIterator for &'a WriteBatch {
}
}
-pub mod codec {
-
- use std::io::Cursor;
- use std::sync::Arc;
-
- use datatypes::arrow::ipc::reader::StreamReader;
- use datatypes::arrow::ipc::writer::{IpcWriteOptions, StreamWriter};
- use datatypes::arrow::record_batch::RecordBatch;
- use datatypes::schema::{Schema, SchemaRef};
- use datatypes::vectors::Helper;
- use prost::Message;
- use snafu::{ensure, OptionExt, ResultExt};
- use store_api::storage::WriteRequest;
-
- use crate::codec::{Decoder, Encoder};
- use crate::proto::wal::MutationType;
- use crate::proto::write_batch::{self, gen_columns, gen_put_data_vector};
- use crate::write_batch::{
- DataCorruptedSnafu, DecodeArrowSnafu, DecodeProtobufSnafu, DecodeVectorSnafu,
- EncodeArrowSnafu, EncodeProtobufSnafu, Error as WriteBatchError, FromProtobufSnafu,
- MissingColumnSnafu, Mutation, ParseSchemaSnafu, PutData, Result, ToProtobufSnafu,
- WriteBatch,
- };
-
- // TODO(jiachun): We can make a comparison with protobuf, including performance, storage cost,
- // CPU consumption, etc
- #[derive(Default)]
- pub struct WriteBatchArrowEncoder {}
-
- impl WriteBatchArrowEncoder {
- pub fn new() -> Self {
- Self::default()
- }
- }
-
- impl Encoder for WriteBatchArrowEncoder {
- type Item = WriteBatch;
- type Error = WriteBatchError;
-
- fn encode(&self, item: &WriteBatch, dst: &mut Vec<u8>) -> Result<()> {
- let item_schema = item.schema();
- let arrow_schema = item_schema.arrow_schema();
-
- let opts = IpcWriteOptions::default();
- let mut writer = StreamWriter::try_new_with_options(dst, arrow_schema, opts)
- .context(EncodeArrowSnafu)?;
-
- for mutation in item.iter() {
- let rb = match mutation {
- Mutation::Put(put) => {
- let arrays = item_schema
- .column_schemas()
- .iter()
- .map(|column_schema| {
- let vector = put.column_by_name(&column_schema.name).context(
- MissingColumnSnafu {
- name: &column_schema.name,
- },
- )?;
- Ok(vector.to_arrow_array())
- })
- .collect::<Result<Vec<_>>>()?;
-
- RecordBatch::try_new(arrow_schema.clone(), arrays)
- .context(EncodeArrowSnafu)?
- }
- };
- writer.write(&rb).context(EncodeArrowSnafu)?;
- }
- writer.finish().context(EncodeArrowSnafu)?;
- Ok(())
- }
- }
-
- pub struct WriteBatchArrowDecoder {
- mutation_types: Vec<i32>,
- }
-
- impl WriteBatchArrowDecoder {
- pub fn new(mutation_types: Vec<i32>) -> Self {
- Self { mutation_types }
- }
- }
-
- impl Decoder for WriteBatchArrowDecoder {
- type Item = WriteBatch;
- type Error = WriteBatchError;
-
- fn decode(&self, src: &[u8]) -> Result<WriteBatch> {
- let reader = Cursor::new(src);
- let mut reader = StreamReader::try_new(reader, None).context(DecodeArrowSnafu)?;
- let arrow_schema = reader.schema();
- let mut chunks = Vec::with_capacity(self.mutation_types.len());
-
- for maybe_record_batch in reader.by_ref() {
- let record_batch = maybe_record_batch.context(DecodeArrowSnafu)?;
- chunks.push(record_batch);
- }
-
- // check if exactly finished
- ensure!(
- reader.is_finished(),
- DataCorruptedSnafu {
- message: "Impossible, the num of data chunks is different than expected."
- }
- );
-
- ensure!(
- chunks.len() == self.mutation_types.len(),
- DataCorruptedSnafu {
- message: format!(
- "expected {} mutations, but got {}",
- self.mutation_types.len(),
- chunks.len()
- )
- }
- );
-
- let schema = Arc::new(Schema::try_from(arrow_schema).context(ParseSchemaSnafu)?);
- let mut write_batch = WriteBatch::new(schema.clone());
-
- for (mutation_type, record_batch) in self.mutation_types.iter().zip(chunks.into_iter())
- {
- match MutationType::from_i32(*mutation_type) {
- Some(MutationType::Put) => {
- let mut put_data = PutData::with_num_columns(schema.num_columns());
- for (column_schema, array) in schema
- .column_schemas()
- .iter()
- .zip(record_batch.columns().iter())
- {
- let vector =
- Helper::try_into_vector(array).context(DecodeVectorSnafu)?;
- put_data.add_column_by_name(&column_schema.name, vector)?;
- }
-
- write_batch.put(put_data)?;
- }
- Some(MutationType::Delete) => {
- unimplemented!("delete mutation is not implemented")
- }
- _ => {
- return DataCorruptedSnafu {
- message: format!("Unexpceted mutation type: {}", mutation_type),
- }
- .fail()
- }
- }
- }
- Ok(write_batch)
- }
- }
-
- pub struct WriteBatchProtobufEncoder {}
-
- impl Encoder for WriteBatchProtobufEncoder {
- type Item = WriteBatch;
- type Error = WriteBatchError;
-
- fn encode(&self, item: &WriteBatch, dst: &mut Vec<u8>) -> Result<()> {
- let schema = item.schema().into();
-
- let mutations = item
- .iter()
- .map(|mtn| match mtn {
- Mutation::Put(put_data) => item
- .schema()
- .column_schemas()
- .iter()
- .map(|cs| {
- let vector = put_data
- .column_by_name(&cs.name)
- .context(MissingColumnSnafu { name: &cs.name })?;
- gen_columns(vector).context(ToProtobufSnafu)
- })
- .collect::<Result<Vec<_>>>(),
- })
- .collect::<Result<Vec<_>>>()?
- .into_iter()
- .map(|columns| write_batch::Mutation {
- mutation: Some(write_batch::mutation::Mutation::Put(write_batch::Put {
- columns,
- })),
- })
- .collect();
-
- let write_batch = write_batch::WriteBatch {
- schema: Some(schema),
- mutations,
- };
-
- write_batch.encode(dst).context(EncodeProtobufSnafu)
- }
- }
-
- pub struct WriteBatchProtobufDecoder {
- mutation_types: Vec<i32>,
- }
-
- impl WriteBatchProtobufDecoder {
- #[allow(dead_code)]
- pub fn new(mutation_types: Vec<i32>) -> Self {
- Self { mutation_types }
- }
- }
-
- impl Decoder for WriteBatchProtobufDecoder {
- type Item = WriteBatch;
- type Error = WriteBatchError;
-
- fn decode(&self, src: &[u8]) -> Result<WriteBatch> {
- let write_batch = write_batch::WriteBatch::decode(src).context(DecodeProtobufSnafu)?;
-
- let schema = write_batch.schema.context(DataCorruptedSnafu {
- message: "schema required",
- })?;
-
- let schema = SchemaRef::try_from(schema).context(FromProtobufSnafu {})?;
-
- ensure!(
- write_batch.mutations.len() == self.mutation_types.len(),
- DataCorruptedSnafu {
- message: &format!(
- "expected {} mutations, but got {}",
- self.mutation_types.len(),
- write_batch.mutations.len()
- )
- }
- );
-
- let mutations = write_batch
- .mutations
- .into_iter()
- .map(|mtn| match mtn.mutation {
- Some(write_batch::mutation::Mutation::Put(put)) => {
- let mut put_data = PutData::with_num_columns(put.columns.len());
-
- let res = schema
- .column_schemas()
- .iter()
- .map(|column| (column.name.clone(), column.data_type.clone()))
- .zip(put.columns.into_iter())
- .map(|((name, data_type), column)| {
- gen_put_data_vector(data_type, column)
- .map(|vector| (name, vector))
- .context(FromProtobufSnafu)
- })
- .collect::<Result<Vec<_>>>()?
- .into_iter()
- .map(|(name, vector)| put_data.add_column_by_name(&name, vector))
- .collect::<Result<Vec<_>>>();
-
- res.map(|_| Mutation::Put(put_data))
- }
- Some(write_batch::mutation::Mutation::Delete(_)) => todo!(),
- _ => DataCorruptedSnafu {
- message: "invalid mutation type",
- }
- .fail(),
- })
- .collect::<Result<Vec<_>>>()?;
-
- let mut write_batch = WriteBatch::new(schema);
+#[cfg(test)]
+pub(crate) fn new_test_batch() -> WriteBatch {
+ use datatypes::type_id::LogicalTypeId;
- mutations
- .into_iter()
- .try_for_each(|mutation| match mutation {
- Mutation::Put(put_data) => write_batch.put(put_data),
- })?;
+ use crate::test_util::write_batch_util;
- Ok(write_batch)
- }
- }
+ write_batch_util::new_write_batch(
+ &[
+ ("k1", LogicalTypeId::UInt64, false),
+ (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false),
+ ("ts", LogicalTypeId::TimestampMillisecond, false),
+ ("v1", LogicalTypeId::Boolean, true),
+ ],
+ Some(2),
+ )
}
+
#[cfg(test)]
mod tests {
use std::iter;
@@ -785,8 +529,6 @@ mod tests {
};
use super::*;
- use crate::codec::{Decoder, Encoder};
- use crate::proto;
use crate::test_util::write_batch_util;
#[test]
@@ -824,18 +566,6 @@ mod tests {
assert!(put_data.is_empty());
}
- fn new_test_batch() -> WriteBatch {
- write_batch_util::new_write_batch(
- &[
- ("k1", LogicalTypeId::UInt64, false),
- (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false),
- ("ts", LogicalTypeId::TimestampMillisecond, false),
- ("v1", LogicalTypeId::Boolean, true),
- ],
- Some(2),
- )
- }
-
#[test]
fn test_write_batch_put() {
let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
@@ -959,7 +689,7 @@ mod tests {
}
#[test]
- pub fn test_align_timestamp() {
+ fn test_align_timestamp() {
let duration_millis = 20;
let ts = [-21, -20, -19, -1, 0, 5, 15, 19, 20, 21];
let res = ts.map(|t| align_timestamp(t, duration_millis));
@@ -967,7 +697,7 @@ mod tests {
}
#[test]
- pub fn test_align_timestamp_overflow() {
+ fn test_align_timestamp_overflow() {
assert_eq!(Some(i64::MIN), align_timestamp(i64::MIN, 1));
assert_eq!(Some(-9223372036854775808), align_timestamp(i64::MIN, 2));
assert_eq!(
@@ -982,7 +712,7 @@ mod tests {
}
#[test]
- pub fn test_write_batch_time_range() {
+ fn test_write_batch_time_range() {
let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3, 4, 5, 6]));
let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![
-21, -20, -1, 0, 1, 20,
@@ -1011,7 +741,7 @@ mod tests {
}
#[test]
- pub fn test_write_batch_time_range_const_vector() {
+ fn test_write_batch_time_range_const_vector() {
let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3, 4, 5, 6]));
let tsv = Arc::new(ConstantVector::new(
Arc::new(TimestampMillisecondVector::from_vec(vec![20])),
@@ -1039,111 +769,4 @@ mod tests {
ranges.as_slice()
)
}
-
- fn gen_new_batch_and_types() -> (WriteBatch, Vec<i32>) {
- let mut batch = new_test_batch();
- for i in 0..10 {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
- let boolv = Arc::new(BooleanVector::from(vec![Some(true), Some(false), None]));
- let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![i, i, i]));
-
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- put_data.add_version_column(intv).unwrap();
- put_data.add_value_column("v1", boolv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
-
- batch.put(put_data).unwrap();
- }
-
- let types = proto::wal::gen_mutation_types(&batch);
-
- (batch, types)
- }
-
- #[test]
- fn test_codec_arrow() -> Result<()> {
- let (batch, mutation_types) = gen_new_batch_and_types();
-
- let encoder = codec::WriteBatchArrowEncoder::new();
- let mut dst = vec![];
- let result = encoder.encode(&batch, &mut dst);
- assert!(result.is_ok());
-
- let decoder = codec::WriteBatchArrowDecoder::new(mutation_types);
- let result = decoder.decode(&dst);
- let batch2 = result?;
- assert_eq!(batch.num_rows, batch2.num_rows);
-
- Ok(())
- }
-
- #[test]
- fn test_codec_protobuf() -> Result<()> {
- let (batch, mutation_types) = gen_new_batch_and_types();
-
- let encoder = codec::WriteBatchProtobufEncoder {};
- let mut dst = vec![];
- let result = encoder.encode(&batch, &mut dst);
- assert!(result.is_ok());
-
- let decoder = codec::WriteBatchProtobufDecoder::new(mutation_types);
- let result = decoder.decode(&dst);
- let batch2 = result?;
- assert_eq!(batch.num_rows, batch2.num_rows);
-
- Ok(())
- }
-
- fn gen_new_batch_and_types_with_none_column() -> (WriteBatch, Vec<i32>) {
- let mut batch = new_test_batch();
- for _ in 0..10 {
- let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
- let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![0, 0, 0]));
-
- let mut put_data = PutData::new();
- put_data.add_key_column("k1", intv.clone()).unwrap();
- put_data.add_version_column(intv).unwrap();
- put_data.add_key_column("ts", tsv).unwrap();
-
- batch.put(put_data).unwrap();
- }
-
- let types = proto::wal::gen_mutation_types(&batch);
-
- (batch, types)
- }
-
- #[test]
- fn test_codec_with_none_column_arrow() -> Result<()> {
- let (batch, mutation_types) = gen_new_batch_and_types_with_none_column();
-
- let encoder = codec::WriteBatchArrowEncoder::new();
- let mut dst = vec![];
- let result = encoder.encode(&batch, &mut dst);
- assert!(result.is_ok());
-
- let decoder = codec::WriteBatchArrowDecoder::new(mutation_types);
- let result = decoder.decode(&dst);
- let batch2 = result?;
- assert_eq!(batch.num_rows, batch2.num_rows);
-
- Ok(())
- }
-
- #[test]
- fn test_codec_with_none_column_protobuf() -> Result<()> {
- let (batch, mutation_types) = gen_new_batch_and_types_with_none_column();
-
- let encoder = codec::WriteBatchProtobufEncoder {};
- let mut dst = vec![];
- encoder.encode(&batch, &mut dst).unwrap();
-
- let decoder = codec::WriteBatchProtobufDecoder::new(mutation_types);
- let result = decoder.decode(&dst);
- let batch2 = result?;
- assert_eq!(batch.num_rows, batch2.num_rows);
-
- Ok(())
- }
}
diff --git a/src/storage/src/write_batch/codec.rs b/src/storage/src/write_batch/codec.rs
new file mode 100644
index 000000000000..56bc387bb277
--- /dev/null
+++ b/src/storage/src/write_batch/codec.rs
@@ -0,0 +1,399 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::io::Cursor;
+use std::sync::Arc;
+
+use datatypes::arrow::ipc::reader::StreamReader;
+use datatypes::arrow::ipc::writer::{IpcWriteOptions, StreamWriter};
+use datatypes::arrow::record_batch::RecordBatch;
+use datatypes::schema::{Schema, SchemaRef};
+use datatypes::vectors::Helper;
+use prost::Message;
+use snafu::{ensure, OptionExt, ResultExt};
+use store_api::storage::WriteRequest;
+
+use crate::codec::{Decoder, Encoder};
+use crate::proto::wal::MutationType;
+use crate::proto::write_batch::{self, gen_columns, gen_put_data_vector};
+use crate::write_batch::{
+ DataCorruptedSnafu, DecodeArrowSnafu, DecodeProtobufSnafu, DecodeVectorSnafu, EncodeArrowSnafu,
+ EncodeProtobufSnafu, Error as WriteBatchError, FromProtobufSnafu, MissingColumnSnafu, Mutation,
+ ParseSchemaSnafu, PutData, Result, ToProtobufSnafu, WriteBatch,
+};
+
+// TODO(jiachun): We can make a comparison with protobuf, including performance, storage cost,
+// CPU consumption, etc
+#[derive(Default)]
+pub struct WriteBatchArrowEncoder {}
+
+impl WriteBatchArrowEncoder {
+ pub fn new() -> Self {
+ Self::default()
+ }
+}
+
+impl Encoder for WriteBatchArrowEncoder {
+ type Item = WriteBatch;
+ type Error = WriteBatchError;
+
+ fn encode(&self, item: &WriteBatch, dst: &mut Vec<u8>) -> Result<()> {
+ let item_schema = item.schema();
+ let arrow_schema = item_schema.arrow_schema();
+
+ let opts = IpcWriteOptions::default();
+ let mut writer = StreamWriter::try_new_with_options(dst, arrow_schema, opts)
+ .context(EncodeArrowSnafu)?;
+
+ for mutation in item.iter() {
+ let rb = match mutation {
+ Mutation::Put(put) => {
+ let arrays = item_schema
+ .column_schemas()
+ .iter()
+ .map(|column_schema| {
+ let vector = put.column_by_name(&column_schema.name).context(
+ MissingColumnSnafu {
+ name: &column_schema.name,
+ },
+ )?;
+ Ok(vector.to_arrow_array())
+ })
+ .collect::<Result<Vec<_>>>()?;
+
+ RecordBatch::try_new(arrow_schema.clone(), arrays).context(EncodeArrowSnafu)?
+ }
+ };
+ writer.write(&rb).context(EncodeArrowSnafu)?;
+ }
+ writer.finish().context(EncodeArrowSnafu)?;
+ Ok(())
+ }
+}
+
+pub struct WriteBatchArrowDecoder {
+ mutation_types: Vec<i32>,
+}
+
+impl WriteBatchArrowDecoder {
+ pub fn new(mutation_types: Vec<i32>) -> Self {
+ Self { mutation_types }
+ }
+}
+
+impl Decoder for WriteBatchArrowDecoder {
+ type Item = WriteBatch;
+ type Error = WriteBatchError;
+
+ fn decode(&self, src: &[u8]) -> Result<WriteBatch> {
+ let reader = Cursor::new(src);
+ let mut reader = StreamReader::try_new(reader, None).context(DecodeArrowSnafu)?;
+ let arrow_schema = reader.schema();
+ let mut chunks = Vec::with_capacity(self.mutation_types.len());
+
+ for maybe_record_batch in reader.by_ref() {
+ let record_batch = maybe_record_batch.context(DecodeArrowSnafu)?;
+ chunks.push(record_batch);
+ }
+
+ // check if exactly finished
+ ensure!(
+ reader.is_finished(),
+ DataCorruptedSnafu {
+ message: "Impossible, the num of data chunks is different than expected."
+ }
+ );
+
+ ensure!(
+ chunks.len() == self.mutation_types.len(),
+ DataCorruptedSnafu {
+ message: format!(
+ "expected {} mutations, but got {}",
+ self.mutation_types.len(),
+ chunks.len()
+ )
+ }
+ );
+
+ let schema = Arc::new(Schema::try_from(arrow_schema).context(ParseSchemaSnafu)?);
+ let mut write_batch = WriteBatch::new(schema.clone());
+
+ for (mutation_type, record_batch) in self.mutation_types.iter().zip(chunks.into_iter()) {
+ match MutationType::from_i32(*mutation_type) {
+ Some(MutationType::Put) => {
+ let mut put_data = PutData::with_num_columns(schema.num_columns());
+ for (column_schema, array) in schema
+ .column_schemas()
+ .iter()
+ .zip(record_batch.columns().iter())
+ {
+ let vector = Helper::try_into_vector(array).context(DecodeVectorSnafu)?;
+ put_data.add_column_by_name(&column_schema.name, vector)?;
+ }
+
+ write_batch.put(put_data)?;
+ }
+ Some(MutationType::Delete) => {
+ unimplemented!("delete mutation is not implemented")
+ }
+ _ => {
+ return DataCorruptedSnafu {
+ message: format!("Unexpceted mutation type: {}", mutation_type),
+ }
+ .fail()
+ }
+ }
+ }
+ Ok(write_batch)
+ }
+}
+
+pub struct WriteBatchProtobufEncoder {}
+
+impl Encoder for WriteBatchProtobufEncoder {
+ type Item = WriteBatch;
+ type Error = WriteBatchError;
+
+ fn encode(&self, item: &WriteBatch, dst: &mut Vec<u8>) -> Result<()> {
+ let schema = item.schema().into();
+
+ let mutations = item
+ .iter()
+ .map(|mtn| match mtn {
+ Mutation::Put(put_data) => item
+ .schema()
+ .column_schemas()
+ .iter()
+ .map(|cs| {
+ let vector = put_data
+ .column_by_name(&cs.name)
+ .context(MissingColumnSnafu { name: &cs.name })?;
+ gen_columns(vector).context(ToProtobufSnafu)
+ })
+ .collect::<Result<Vec<_>>>(),
+ })
+ .collect::<Result<Vec<_>>>()?
+ .into_iter()
+ .map(|columns| write_batch::Mutation {
+ mutation: Some(write_batch::mutation::Mutation::Put(write_batch::Put {
+ columns,
+ })),
+ })
+ .collect();
+
+ let write_batch = write_batch::WriteBatch {
+ schema: Some(schema),
+ mutations,
+ };
+
+ write_batch.encode(dst).context(EncodeProtobufSnafu)
+ }
+}
+
+pub struct WriteBatchProtobufDecoder {
+ mutation_types: Vec<i32>,
+}
+
+impl WriteBatchProtobufDecoder {
+ #[allow(dead_code)]
+ pub fn new(mutation_types: Vec<i32>) -> Self {
+ Self { mutation_types }
+ }
+}
+
+impl Decoder for WriteBatchProtobufDecoder {
+ type Item = WriteBatch;
+ type Error = WriteBatchError;
+
+ fn decode(&self, src: &[u8]) -> Result<WriteBatch> {
+ let write_batch = write_batch::WriteBatch::decode(src).context(DecodeProtobufSnafu)?;
+
+ let schema = write_batch.schema.context(DataCorruptedSnafu {
+ message: "schema required",
+ })?;
+
+ let schema = SchemaRef::try_from(schema).context(FromProtobufSnafu {})?;
+
+ ensure!(
+ write_batch.mutations.len() == self.mutation_types.len(),
+ DataCorruptedSnafu {
+ message: &format!(
+ "expected {} mutations, but got {}",
+ self.mutation_types.len(),
+ write_batch.mutations.len()
+ )
+ }
+ );
+
+ let mutations = write_batch
+ .mutations
+ .into_iter()
+ .map(|mtn| match mtn.mutation {
+ Some(write_batch::mutation::Mutation::Put(put)) => {
+ let mut put_data = PutData::with_num_columns(put.columns.len());
+
+ let res = schema
+ .column_schemas()
+ .iter()
+ .map(|column| (column.name.clone(), column.data_type.clone()))
+ .zip(put.columns.into_iter())
+ .map(|((name, data_type), column)| {
+ gen_put_data_vector(data_type, column)
+ .map(|vector| (name, vector))
+ .context(FromProtobufSnafu)
+ })
+ .collect::<Result<Vec<_>>>()?
+ .into_iter()
+ .map(|(name, vector)| put_data.add_column_by_name(&name, vector))
+ .collect::<Result<Vec<_>>>();
+
+ res.map(|_| Mutation::Put(put_data))
+ }
+ Some(write_batch::mutation::Mutation::Delete(_)) => todo!(),
+ _ => DataCorruptedSnafu {
+ message: "invalid mutation type",
+ }
+ .fail(),
+ })
+ .collect::<Result<Vec<_>>>()?;
+
+ let mut write_batch = WriteBatch::new(schema);
+
+ mutations
+ .into_iter()
+ .try_for_each(|mutation| match mutation {
+ Mutation::Put(put_data) => write_batch.put(put_data),
+ })?;
+
+ Ok(write_batch)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use datatypes::vectors::{BooleanVector, TimestampMillisecondVector, UInt64Vector};
+ use store_api::storage::PutOperation;
+
+ use super::*;
+ use crate::{proto, write_batch};
+
+ fn gen_new_batch_and_types() -> (WriteBatch, Vec<i32>) {
+ let mut batch = write_batch::new_test_batch();
+ for i in 0..10 {
+ let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
+ let boolv = Arc::new(BooleanVector::from(vec![Some(true), Some(false), None]));
+ let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![i, i, i]));
+
+ let mut put_data = PutData::new();
+ put_data.add_key_column("k1", intv.clone()).unwrap();
+ put_data.add_version_column(intv).unwrap();
+ put_data.add_value_column("v1", boolv).unwrap();
+ put_data.add_key_column("ts", tsv).unwrap();
+
+ batch.put(put_data).unwrap();
+ }
+
+ let types = proto::wal::gen_mutation_types(&batch);
+
+ (batch, types)
+ }
+
+ #[test]
+ fn test_codec_arrow() -> Result<()> {
+ let (batch, mutation_types) = gen_new_batch_and_types();
+
+ let encoder = WriteBatchArrowEncoder::new();
+ let mut dst = vec![];
+ let result = encoder.encode(&batch, &mut dst);
+ assert!(result.is_ok());
+
+ let decoder = WriteBatchArrowDecoder::new(mutation_types);
+ let result = decoder.decode(&dst);
+ let batch2 = result?;
+ assert_eq!(batch.num_rows, batch2.num_rows);
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_codec_protobuf() -> Result<()> {
+ let (batch, mutation_types) = gen_new_batch_and_types();
+
+ let encoder = WriteBatchProtobufEncoder {};
+ let mut dst = vec![];
+ let result = encoder.encode(&batch, &mut dst);
+ assert!(result.is_ok());
+
+ let decoder = WriteBatchProtobufDecoder::new(mutation_types);
+ let result = decoder.decode(&dst);
+ let batch2 = result?;
+ assert_eq!(batch.num_rows, batch2.num_rows);
+
+ Ok(())
+ }
+
+ fn gen_new_batch_and_types_with_none_column() -> (WriteBatch, Vec<i32>) {
+ let mut batch = write_batch::new_test_batch();
+ for _ in 0..10 {
+ let intv = Arc::new(UInt64Vector::from_slice(&[1, 2, 3]));
+ let tsv = Arc::new(TimestampMillisecondVector::from_vec(vec![0, 0, 0]));
+
+ let mut put_data = PutData::new();
+ put_data.add_key_column("k1", intv.clone()).unwrap();
+ put_data.add_version_column(intv).unwrap();
+ put_data.add_key_column("ts", tsv).unwrap();
+
+ batch.put(put_data).unwrap();
+ }
+
+ let types = proto::wal::gen_mutation_types(&batch);
+
+ (batch, types)
+ }
+
+ #[test]
+ fn test_codec_with_none_column_arrow() -> Result<()> {
+ let (batch, mutation_types) = gen_new_batch_and_types_with_none_column();
+
+ let encoder = WriteBatchArrowEncoder::new();
+ let mut dst = vec![];
+ let result = encoder.encode(&batch, &mut dst);
+ assert!(result.is_ok());
+
+ let decoder = WriteBatchArrowDecoder::new(mutation_types);
+ let result = decoder.decode(&dst);
+ let batch2 = result?;
+ assert_eq!(batch.num_rows, batch2.num_rows);
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_codec_with_none_column_protobuf() -> Result<()> {
+ let (batch, mutation_types) = gen_new_batch_and_types_with_none_column();
+
+ let encoder = WriteBatchProtobufEncoder {};
+ let mut dst = vec![];
+ encoder.encode(&batch, &mut dst).unwrap();
+
+ let decoder = WriteBatchProtobufDecoder::new(mutation_types);
+ let result = decoder.decode(&dst);
+ let batch2 = result?;
+ assert_eq!(batch.num_rows, batch2.num_rows);
+
+ Ok(())
+ }
+}
|
refactor
|
Move write_batch::codec to a separate file (#757)
|
60f8dbf7f01dc08e43b1145f7444ff467d741e38
|
2024-12-11 19:03:54
|
Ruihang Xia
|
feat: implement `v1/sql/parse` endpoint to parse GreptimeDB's SQL dialect (#5144)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 628c6a582418..311caafcb2fe 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -11295,6 +11295,7 @@ dependencies = [
"jsonb",
"lazy_static",
"regex",
+ "serde",
"serde_json",
"snafu 0.8.5",
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
@@ -11371,6 +11372,7 @@ dependencies = [
"lazy_static",
"log",
"regex",
+ "serde",
"sqlparser 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)",
"sqlparser_derive 0.2.2 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
]
diff --git a/Cargo.toml b/Cargo.toml
index d1d360850e70..990bc71a907b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -180,6 +180,7 @@ sysinfo = "0.30"
# on branch v0.44.x
sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "54a267ac89c09b11c0c88934690530807185d3e7", features = [
"visitor",
+ "serde",
] }
strum = { version = "0.25", features = ["derive"] }
tempfile = "3"
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 6682a1c78967..071de93683cc 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -189,6 +189,13 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to parse query"))]
+ FailedToParseQuery {
+ #[snafu(implicit)]
+ location: Location,
+ source: sql::error::Error,
+ },
+
#[snafu(display("Failed to parse InfluxDB line protocol"))]
InfluxdbLineProtocol {
#[snafu(implicit)]
@@ -651,7 +658,8 @@ impl ErrorExt for Error {
| OpenTelemetryLog { .. }
| UnsupportedJsonDataTypeForTag { .. }
| InvalidTableName { .. }
- | PrepareStatementNotFound { .. } => StatusCode::InvalidArguments,
+ | PrepareStatementNotFound { .. }
+ | FailedToParseQuery { .. } => StatusCode::InvalidArguments,
Catalog { source, .. } => source.status_code(),
RowWriter { source, .. } => source.status_code(),
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index d8d07ed31fa0..1107870c9a25 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -755,6 +755,10 @@ impl HttpServer {
fn route_sql<S>(api_state: ApiState) -> Router<S> {
Router::new()
.route("/sql", routing::get(handler::sql).post(handler::sql))
+ .route(
+ "/sql/parse",
+ routing::get(handler::sql_parse).post(handler::sql_parse),
+ )
.route(
"/promql",
routing::get(handler::promql).post(handler::promql),
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index 15a1a0e16c73..153b824d6ef1 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -30,8 +30,13 @@ use query::parser::{PromQuery, DEFAULT_LOOKBACK_STRING};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use session::context::{Channel, QueryContext, QueryContextRef};
+use snafu::ResultExt;
+use sql::dialect::GreptimeDbDialect;
+use sql::parser::{ParseOptions, ParserContext};
+use sql::statements::statement::Statement;
use super::header::collect_plan_metrics;
+use crate::error::{FailedToParseQuerySnafu, InvalidQuerySnafu, Result};
use crate::http::result::arrow_result::ArrowResponse;
use crate::http::result::csv_result::CsvResponse;
use crate::http::result::error_result::ErrorResponse;
@@ -146,10 +151,31 @@ pub async fn sql(
resp.with_execution_time(start.elapsed().as_millis() as u64)
}
+/// Handler to parse sql
+#[axum_macros::debug_handler]
+#[tracing::instrument(skip_all, fields(protocol = "http", request_type = "sql"))]
+pub async fn sql_parse(
+ Query(query_params): Query<SqlQuery>,
+ Form(form_params): Form<SqlQuery>,
+) -> Result<Json<Vec<Statement>>> {
+ let Some(sql) = query_params.sql.or(form_params.sql) else {
+ return InvalidQuerySnafu {
+ reason: "sql parameter is required.",
+ }
+ .fail();
+ };
+
+ let stmts =
+ ParserContext::create_with_dialect(&sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .context(FailedToParseQuerySnafu)?;
+
+ Ok(stmts.into())
+}
+
/// Create a response from query result
pub async fn from_output(
outputs: Vec<crate::error::Result<Output>>,
-) -> Result<(Vec<GreptimeQueryOutput>, HashMap<String, Value>), ErrorResponse> {
+) -> std::result::Result<(Vec<GreptimeQueryOutput>, HashMap<String, Value>), ErrorResponse> {
// TODO(sunng87): this api response structure cannot represent error well.
// It hides successful execution results from error response
let mut results = Vec::with_capacity(outputs.len());
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
index e3340a8f6c90..3cb81d6dd494 100644
--- a/src/sql/Cargo.toml
+++ b/src/sql/Cargo.toml
@@ -30,6 +30,7 @@ itertools.workspace = true
jsonb.workspace = true
lazy_static.workspace = true
regex.workspace = true
+serde.workspace = true
serde_json.workspace = true
snafu.workspace = true
sqlparser.workspace = true
diff --git a/src/sql/src/statements/admin.rs b/src/sql/src/statements/admin.rs
index bbe805a4c163..ed068ea47510 100644
--- a/src/sql/src/statements/admin.rs
+++ b/src/sql/src/statements/admin.rs
@@ -14,12 +14,13 @@
use std::fmt::Display;
+use serde::Serialize;
use sqlparser_derive::{Visit, VisitMut};
use crate::ast::Function;
/// `ADMIN` statement to execute some administration commands.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub enum Admin {
/// Run a admin function.
Func(Function),
diff --git a/src/sql/src/statements/alter.rs b/src/sql/src/statements/alter.rs
index cf59257e8931..174bdbbdc310 100644
--- a/src/sql/src/statements/alter.rs
+++ b/src/sql/src/statements/alter.rs
@@ -18,10 +18,11 @@ use api::v1;
use common_query::AddColumnLocation;
use datatypes::schema::FulltextOptions;
use itertools::Itertools;
+use serde::Serialize;
use sqlparser::ast::{ColumnDef, DataType, Ident, ObjectName, TableConstraint};
use sqlparser_derive::{Visit, VisitMut};
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct AlterTable {
pub table_name: ObjectName,
pub alter_operation: AlterTableOperation,
@@ -56,7 +57,7 @@ impl Display for AlterTable {
}
}
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub enum AlterTableOperation {
/// `ADD <table_constraint>`
AddConstraint(TableConstraint),
@@ -151,7 +152,7 @@ impl Display for AlterTableOperation {
}
}
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct KeyValueOption {
pub key: String,
pub value: String,
@@ -166,7 +167,7 @@ impl From<KeyValueOption> for v1::Option {
}
}
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct AlterDatabase {
pub database_name: ObjectName,
pub alter_operation: AlterDatabaseOperation,
@@ -197,7 +198,7 @@ impl Display for AlterDatabase {
}
}
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub enum AlterDatabaseOperation {
SetDatabaseOption { options: Vec<KeyValueOption> },
UnsetDatabaseOption { keys: Vec<String> },
diff --git a/src/sql/src/statements/copy.rs b/src/sql/src/statements/copy.rs
index c68b9d8c0321..436d86d3abaf 100644
--- a/src/sql/src/statements/copy.rs
+++ b/src/sql/src/statements/copy.rs
@@ -14,12 +14,13 @@
use std::fmt::Display;
+use serde::Serialize;
use sqlparser::ast::ObjectName;
use sqlparser_derive::{Visit, VisitMut};
use crate::statements::OptionMap;
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub enum Copy {
CopyTable(CopyTable),
CopyDatabase(CopyDatabase),
@@ -34,7 +35,7 @@ impl Display for Copy {
}
}
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub enum CopyTable {
To(CopyTableArgument),
From(CopyTableArgument),
@@ -65,7 +66,7 @@ impl Display for CopyTable {
}
}
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub enum CopyDatabase {
To(CopyDatabaseArgument),
From(CopyDatabaseArgument),
@@ -96,7 +97,7 @@ impl Display for CopyDatabase {
}
}
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct CopyDatabaseArgument {
pub database_name: ObjectName,
pub with: OptionMap,
@@ -104,7 +105,7 @@ pub struct CopyDatabaseArgument {
pub location: String,
}
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct CopyTableArgument {
pub table_name: ObjectName,
pub with: OptionMap,
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index 20ed7b555965..e4ea46572e5f 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -18,6 +18,7 @@ use std::fmt::{Display, Formatter};
use common_catalog::consts::FILE_ENGINE;
use datatypes::schema::FulltextOptions;
use itertools::Itertools;
+use serde::Serialize;
use snafu::ResultExt;
use sqlparser::ast::{ColumnOptionDef, DataType, Expr, Query};
use sqlparser_derive::{Visit, VisitMut};
@@ -58,7 +59,7 @@ fn format_table_constraint(constraints: &[TableConstraint]) -> String {
}
/// Table constraint for create table statement.
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)]
pub enum TableConstraint {
/// Primary key constraint.
PrimaryKey { columns: Vec<Ident> },
@@ -84,7 +85,7 @@ impl Display for TableConstraint {
}
}
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)]
pub struct CreateTable {
/// Create if not exists
pub if_not_exists: bool,
@@ -100,7 +101,7 @@ pub struct CreateTable {
}
/// Column definition in `CREATE TABLE` statement.
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)]
pub struct Column {
/// `ColumnDef` from `sqlparser::ast`
pub column_def: ColumnDef,
@@ -109,7 +110,7 @@ pub struct Column {
}
/// Column extensions for greptimedb dialect.
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Default)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Default, Serialize)]
pub struct ColumnExtensions {
/// Fulltext options.
pub fulltext_options: Option<OptionMap>,
@@ -172,7 +173,7 @@ impl ColumnExtensions {
}
}
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)]
pub struct Partitions {
pub column_list: Vec<Ident>,
pub exprs: Vec<Expr>,
@@ -244,7 +245,7 @@ impl Display for CreateTable {
}
}
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)]
pub struct CreateDatabase {
pub name: ObjectName,
/// Create if not exists
@@ -278,7 +279,7 @@ impl Display for CreateDatabase {
}
}
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)]
pub struct CreateExternalTable {
/// Table name
pub name: ObjectName,
@@ -309,7 +310,7 @@ impl Display for CreateExternalTable {
}
}
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)]
pub struct CreateTableLike {
/// Table name
pub table_name: ObjectName,
@@ -325,7 +326,7 @@ impl Display for CreateTableLike {
}
}
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)]
pub struct CreateFlow {
/// Flow name
pub flow_name: ObjectName,
@@ -367,7 +368,7 @@ impl Display for CreateFlow {
}
/// Create SQL view statement.
-#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut, Serialize)]
pub struct CreateView {
/// View name
pub name: ObjectName,
diff --git a/src/sql/src/statements/cursor.rs b/src/sql/src/statements/cursor.rs
index 72ef4cdcae98..4381cc5e7be5 100644
--- a/src/sql/src/statements/cursor.rs
+++ b/src/sql/src/statements/cursor.rs
@@ -14,6 +14,7 @@
use std::fmt::Display;
+use serde::Serialize;
use sqlparser::ast::ObjectName;
use sqlparser_derive::{Visit, VisitMut};
@@ -22,7 +23,7 @@ use super::query::Query;
/// Represents a DECLARE CURSOR statement
///
/// This statement will carry a SQL query
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct DeclareCursor {
pub cursor_name: ObjectName,
pub query: Box<Query>,
@@ -35,7 +36,7 @@ impl Display for DeclareCursor {
}
/// Represents a FETCH FROM cursor statement
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct FetchCursor {
pub cursor_name: ObjectName,
pub fetch_size: u64,
@@ -48,7 +49,7 @@ impl Display for FetchCursor {
}
/// Represents a CLOSE cursor statement
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct CloseCursor {
pub cursor_name: ObjectName,
}
diff --git a/src/sql/src/statements/delete.rs b/src/sql/src/statements/delete.rs
index 4346610b7d19..dc8f5d69014e 100644
--- a/src/sql/src/statements/delete.rs
+++ b/src/sql/src/statements/delete.rs
@@ -12,10 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use serde::Serialize;
use sqlparser::ast::Statement;
use sqlparser_derive::{Visit, VisitMut};
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct Delete {
pub inner: Statement,
}
diff --git a/src/sql/src/statements/describe.rs b/src/sql/src/statements/describe.rs
index 743f2b0123c2..1a7bba24e5d3 100644
--- a/src/sql/src/statements/describe.rs
+++ b/src/sql/src/statements/describe.rs
@@ -14,11 +14,12 @@
use std::fmt::Display;
+use serde::Serialize;
use sqlparser::ast::ObjectName;
use sqlparser_derive::{Visit, VisitMut};
/// SQL structure for `DESCRIBE TABLE`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct DescribeTable {
name: ObjectName,
}
diff --git a/src/sql/src/statements/drop.rs b/src/sql/src/statements/drop.rs
index a46450db78f7..799722904dab 100644
--- a/src/sql/src/statements/drop.rs
+++ b/src/sql/src/statements/drop.rs
@@ -14,11 +14,12 @@
use std::fmt::Display;
+use serde::Serialize;
use sqlparser::ast::ObjectName;
use sqlparser_derive::{Visit, VisitMut};
/// DROP TABLE statement.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct DropTable {
table_names: Vec<ObjectName>,
@@ -62,7 +63,7 @@ impl Display for DropTable {
}
/// DROP DATABASE statement.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct DropDatabase {
name: ObjectName,
/// drop table if exists
@@ -99,7 +100,7 @@ impl Display for DropDatabase {
}
/// DROP FLOW statement.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct DropFlow {
flow_name: ObjectName,
/// drop flow if exists
@@ -138,7 +139,7 @@ impl Display for DropFlow {
}
/// `DROP VIEW` statement.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct DropView {
// The view name
pub view_name: ObjectName,
diff --git a/src/sql/src/statements/explain.rs b/src/sql/src/statements/explain.rs
index 5b3a2671f939..96a12c7a41c6 100644
--- a/src/sql/src/statements/explain.rs
+++ b/src/sql/src/statements/explain.rs
@@ -14,13 +14,14 @@
use std::fmt::{Display, Formatter};
+use serde::Serialize;
use sqlparser::ast::Statement as SpStatement;
use sqlparser_derive::{Visit, VisitMut};
use crate::error::Error;
/// Explain statement.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct Explain {
pub inner: SpStatement,
}
diff --git a/src/sql/src/statements/insert.rs b/src/sql/src/statements/insert.rs
index 4eae7f1e1874..f1c0b7144441 100644
--- a/src/sql/src/statements/insert.rs
+++ b/src/sql/src/statements/insert.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use serde::Serialize;
use sqlparser::ast::{ObjectName, Query, SetExpr, Statement, UnaryOperator, Values};
use sqlparser::parser::ParserError;
use sqlparser_derive::{Visit, VisitMut};
@@ -20,7 +21,7 @@ use crate::ast::{Expr, Value};
use crate::error::Result;
use crate::statements::query::Query as GtQuery;
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct Insert {
// Can only be sqlparser::ast::Statement::Insert variant
pub inner: Statement,
diff --git a/src/sql/src/statements/option_map.rs b/src/sql/src/statements/option_map.rs
index 9ff8d94312fd..d66cadf16461 100644
--- a/src/sql/src/statements/option_map.rs
+++ b/src/sql/src/statements/option_map.rs
@@ -16,14 +16,16 @@ use std::collections::{BTreeMap, HashMap};
use std::ops::ControlFlow;
use common_base::secrets::{ExposeSecret, ExposeSecretMut, SecretString};
+use serde::Serialize;
use sqlparser::ast::{Visit, VisitMut, Visitor, VisitorMut};
const REDACTED_OPTIONS: [&str; 2] = ["access_key_id", "secret_access_key"];
/// Options hashmap.
-#[derive(Clone, Debug, Default)]
+#[derive(Clone, Debug, Default, Serialize)]
pub struct OptionMap {
options: BTreeMap<String, String>,
+ #[serde(skip_serializing)]
secrets: BTreeMap<String, SecretString>,
}
diff --git a/src/sql/src/statements/query.rs b/src/sql/src/statements/query.rs
index 3b571a1a0ba1..b5221a226356 100644
--- a/src/sql/src/statements/query.rs
+++ b/src/sql/src/statements/query.rs
@@ -14,13 +14,14 @@
use std::fmt;
+use serde::Serialize;
use sqlparser::ast::Query as SpQuery;
use sqlparser_derive::{Visit, VisitMut};
use crate::error::Error;
/// Query statement instance.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct Query {
pub inner: SpQuery,
}
diff --git a/src/sql/src/statements/set_variables.rs b/src/sql/src/statements/set_variables.rs
index 7a2a94a531df..748d077d84ce 100644
--- a/src/sql/src/statements/set_variables.rs
+++ b/src/sql/src/statements/set_variables.rs
@@ -14,11 +14,12 @@
use std::fmt::Display;
+use serde::Serialize;
use sqlparser::ast::{Expr, ObjectName};
use sqlparser_derive::{Visit, VisitMut};
/// SET variables statement.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct SetVariables {
pub variable: ObjectName,
pub value: Vec<Expr>,
diff --git a/src/sql/src/statements/show.rs b/src/sql/src/statements/show.rs
index f6a8dab72897..055cd7768f02 100644
--- a/src/sql/src/statements/show.rs
+++ b/src/sql/src/statements/show.rs
@@ -14,12 +14,13 @@
use std::fmt::{self, Display};
+use serde::Serialize;
use sqlparser_derive::{Visit, VisitMut};
use crate::ast::{Expr, Ident, ObjectName};
/// Show kind for SQL expressions like `SHOW DATABASE` or `SHOW TABLE`
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub enum ShowKind {
All,
Like(Ident),
@@ -46,14 +47,14 @@ macro_rules! format_kind {
}
/// SQL structure for `SHOW DATABASES`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowDatabases {
pub kind: ShowKind,
pub full: bool,
}
/// The SQL `SHOW COLUMNS` statement
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowColumns {
pub kind: ShowKind,
pub table: String,
@@ -77,7 +78,7 @@ impl Display for ShowColumns {
}
/// The SQL `SHOW INDEX` statement
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowIndex {
pub kind: ShowKind,
pub table: String,
@@ -118,7 +119,7 @@ impl Display for ShowDatabases {
}
/// SQL structure for `SHOW TABLES`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowTables {
pub kind: ShowKind,
pub database: Option<String>,
@@ -142,7 +143,7 @@ impl Display for ShowTables {
}
/// SQL structure for `SHOW TABLE STATUS`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowTableStatus {
pub kind: ShowKind,
pub database: Option<String>,
@@ -162,7 +163,7 @@ impl Display for ShowTableStatus {
}
/// SQL structure for `SHOW CREATE DATABASE`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowCreateDatabase {
pub database_name: ObjectName,
}
@@ -175,7 +176,7 @@ impl Display for ShowCreateDatabase {
}
/// SQL structure for `SHOW CREATE TABLE`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowCreateTable {
pub table_name: ObjectName,
}
@@ -188,7 +189,7 @@ impl Display for ShowCreateTable {
}
/// SQL structure for `SHOW CREATE FLOW`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowCreateFlow {
pub flow_name: ObjectName,
}
@@ -201,7 +202,7 @@ impl Display for ShowCreateFlow {
}
/// SQL structure for `SHOW FLOWS`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowFlows {
pub kind: ShowKind,
pub database: Option<String>,
@@ -220,7 +221,7 @@ impl Display for ShowFlows {
}
/// SQL structure for `SHOW CREATE VIEW`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowCreateView {
pub view_name: ObjectName,
}
@@ -233,7 +234,7 @@ impl Display for ShowCreateView {
}
/// SQL structure for `SHOW VIEWS`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowViews {
pub kind: ShowKind,
pub database: Option<String>,
@@ -252,7 +253,7 @@ impl Display for ShowViews {
}
/// SQL structure for `SHOW VARIABLES xxx`.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowVariables {
pub variable: ObjectName,
}
@@ -265,7 +266,7 @@ impl Display for ShowVariables {
}
/// SQL structure for "SHOW STATUS"
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct ShowStatus {}
impl Display for ShowStatus {
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
index 8ad391a00dd2..2870f2b64a6a 100644
--- a/src/sql/src/statements/statement.rs
+++ b/src/sql/src/statements/statement.rs
@@ -15,12 +15,14 @@
use std::fmt::Display;
use datafusion_sql::parser::Statement as DfStatement;
+use serde::Serialize;
use sqlparser::ast::Statement as SpStatement;
use sqlparser_derive::{Visit, VisitMut};
use crate::error::{ConvertToDfStatementSnafu, Error};
use crate::statements::admin::Admin;
use crate::statements::alter::{AlterDatabase, AlterTable};
+use crate::statements::copy::Copy;
use crate::statements::create::{
CreateDatabase, CreateExternalTable, CreateFlow, CreateTable, CreateTableLike, CreateView,
};
@@ -42,7 +44,7 @@ use crate::statements::truncate::TruncateTable;
/// Tokens parsed by `DFParser` are converted into these values.
#[allow(clippy::large_enum_variant)]
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub enum Statement {
// Query
Query(Box<Query>),
@@ -107,7 +109,8 @@ pub enum Statement {
// EXPLAIN QUERY
Explain(Explain),
// COPY
- Copy(crate::statements::copy::Copy),
+ Copy(Copy),
+ // Telemetry Query Language
Tql(Tql),
// TRUNCATE TABLE
TruncateTable(TruncateTable),
diff --git a/src/sql/src/statements/tql.rs b/src/sql/src/statements/tql.rs
index 0f7a85f95ab8..7980103431ef 100644
--- a/src/sql/src/statements/tql.rs
+++ b/src/sql/src/statements/tql.rs
@@ -14,9 +14,10 @@
use std::fmt::Display;
+use serde::Serialize;
use sqlparser_derive::{Visit, VisitMut};
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub enum Tql {
Eval(TqlEval),
Explain(TqlExplain),
@@ -49,7 +50,7 @@ fn format_tql(
}
/// TQL EVAL (<start>, <end>, <step>, [lookback]) <promql>
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct TqlEval {
pub start: String,
pub end: String,
@@ -74,7 +75,7 @@ impl Display for TqlEval {
/// TQL EXPLAIN [VERBOSE] [<start>, <end>, <step>, [lookback]] <promql>
/// doesn't execute the query but tells how the query would be executed (similar to SQL EXPLAIN).
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct TqlExplain {
pub start: String,
pub end: String,
@@ -103,7 +104,7 @@ impl Display for TqlExplain {
/// TQL ANALYZE [VERBOSE] (<start>, <end>, <step>, [lookback]) <promql>
/// executes the plan and tells the detailed per-step execution time (similar to SQL ANALYZE).
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct TqlAnalyze {
pub start: String,
pub end: String,
diff --git a/src/sql/src/statements/truncate.rs b/src/sql/src/statements/truncate.rs
index c1a063f959ce..710b5f72df3c 100644
--- a/src/sql/src/statements/truncate.rs
+++ b/src/sql/src/statements/truncate.rs
@@ -14,11 +14,12 @@
use std::fmt::Display;
+use serde::Serialize;
use sqlparser::ast::ObjectName;
use sqlparser_derive::{Visit, VisitMut};
/// TRUNCATE TABLE statement.
-#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut, Serialize)]
pub struct TruncateTable {
table_name: ObjectName,
}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 4da65f0b21f5..5a48fef39e43 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -361,6 +361,14 @@ pub async fn test_sql_api(store_type: StorageType) {
let body = serde_json::from_str::<ErrorResponse>(&res.text().await).unwrap();
assert_eq!(body.code(), ErrorCode::DatabaseNotFound as u32);
+ // test parse method
+ let res = client.get("/v1/sql/parse?sql=desc table t").send().await;
+ assert_eq!(res.status(), StatusCode::OK);
+ assert_eq!(
+ res.text().await,
+ "[{\"DescribeTable\":{\"name\":[{\"value\":\"t\",\"quote_style\":null}]}}]"
+ );
+
// test timezone header
let res = client
.get("/v1/sql?&sql=show variables system_time_zone")
|
feat
|
implement `v1/sql/parse` endpoint to parse GreptimeDB's SQL dialect (#5144)
|
b9661818f297d88f1e45d976c5b119410e5fa39b
|
2023-05-08 19:24:24
|
localhost
|
chore: remove useless Option type in plugins (#1544)
| false
| null |
chore
|
remove useless Option type in plugins (#1544)
|
0aceebf0a390e1af45dd4f62f19724b2233847b4
|
2024-06-17 12:26:31
|
yuanbohan
|
feat(pipeline): transform support on_failure (#4123)
| false
|
diff --git a/src/pipeline/src/etl/transform/mod.rs b/src/pipeline/src/etl/transform/mod.rs
index 991aa05df644..7b09bca965a6 100644
--- a/src/pipeline/src/etl/transform/mod.rs
+++ b/src/pipeline/src/etl/transform/mod.rs
@@ -27,6 +27,7 @@ const TRANSFORM_FIELDS: &str = "fields";
const TRANSFORM_TYPE: &str = "type";
const TRANSFORM_INDEX: &str = "index";
const TRANSFORM_DEFAULT: &str = "default";
+const TRANSFORM_ON_FAILURE: &str = "on_failure";
pub use transformer::greptime::GreptimeTransformer;
// pub use transformer::noop::NoopTransformer;
@@ -38,6 +39,38 @@ pub trait Transformer: std::fmt::Display + Sized + Send + Sync + 'static {
fn transform(&self, val: crate::etl::value::Value) -> Result<Self::Output, String>;
}
+/// On Failure behavior when transform fails
+#[derive(Debug, Clone, Default)]
+pub enum OnFailure {
+ // Return None if transform fails
+ #[default]
+ Ignore,
+ // Return default value of the field if transform fails
+ // Default value depends on the type of the field, or explicitly set by user
+ Default,
+}
+
+impl std::str::FromStr for OnFailure {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "ignore" => Ok(OnFailure::Ignore),
+ "default" => Ok(OnFailure::Default),
+ _ => Err(format!("invalid transform on_failure value: {}", s)),
+ }
+ }
+}
+
+impl std::fmt::Display for OnFailure {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ match self {
+ OnFailure::Ignore => write!(f, "ignore"),
+ OnFailure::Default => write!(f, "default"),
+ }
+ }
+}
+
#[derive(Debug, Default, Clone)]
pub struct Transforms {
transforms: Vec<Transform>,
@@ -97,6 +130,8 @@ pub struct Transform {
pub default: Option<Value>,
pub index: Option<Index>,
+
+ pub on_failure: Option<OnFailure>,
}
impl std::fmt::Display for Transform {
@@ -107,10 +142,21 @@ impl std::fmt::Display for Transform {
"".to_string()
};
- let fields = format!("field(s): {}", self.fields);
let type_ = format!("type: {}", self.type_);
+ let fields = format!("field(s): {}", self.fields);
+ let default = if let Some(default) = &self.default {
+ format!(", default: {}", default)
+ } else {
+ "".to_string()
+ };
+
+ let on_failure = if let Some(on_failure) = &self.on_failure {
+ format!(", on_failure: {}", on_failure)
+ } else {
+ "".to_string()
+ };
- write!(f, "{type_}{index}, {fields}")
+ write!(f, "{type_}{index}, {fields}{default}{on_failure}",)
}
}
@@ -121,6 +167,7 @@ impl Default for Transform {
type_: Value::Null,
default: None,
index: None,
+ on_failure: None,
}
}
}
@@ -155,9 +202,17 @@ impl Transform {
self.index = Some(index);
}
+ fn with_on_failure(&mut self, on_failure: OnFailure) {
+ self.on_failure = Some(on_failure);
+ }
+
pub(crate) fn get_default(&self) -> Option<&Value> {
self.default.as_ref()
}
+
+ pub(crate) fn get_type_matched_default_val(&self) -> &Value {
+ &self.type_
+ }
}
impl TryFrom<&yaml_rust::yaml::Hash> for Transform {
@@ -192,6 +247,12 @@ impl TryFrom<&yaml_rust::yaml::Hash> for Transform {
TRANSFORM_DEFAULT => {
default_opt = Some(Value::try_from(v)?);
}
+
+ TRANSFORM_ON_FAILURE => {
+ let on_failure = yaml_string(v, TRANSFORM_ON_FAILURE)?;
+ transform.with_on_failure(on_failure.parse()?);
+ }
+
_ => {}
}
}
diff --git a/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs b/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs
index 6b077a22dca5..49e008e438ea 100644
--- a/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs
+++ b/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs
@@ -16,7 +16,7 @@ use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{ColumnDataType, ColumnSchema, SemanticType};
use crate::etl::transform::index::Index;
-use crate::etl::transform::Transform;
+use crate::etl::transform::{OnFailure, Transform};
use crate::etl::value::{Epoch, Time, Value};
impl TryFrom<Value> for ValueData {
@@ -177,8 +177,20 @@ fn coerce_bool_value(b: bool, transform: &Transform) -> Result<Option<ValueData>
Value::Boolean(_) => ValueData::BoolValue(b),
Value::String(_) => ValueData::StringValue(b.to_string()),
- Value::Time(_) => return Err("Boolean type not supported for Time".to_string()),
- Value::Epoch(_) => return Err("Boolean type not supported for Epoch".to_string()),
+ Value::Time(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => return Ok(None),
+ Some(OnFailure::Default) => {
+ return Err("default value not supported for Time".to_string())
+ }
+ None => return Err("Boolean type not supported for Time".to_string()),
+ },
+ Value::Epoch(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => return Ok(None),
+ Some(OnFailure::Default) => {
+ return Err("default value not supported for Epoch".to_string())
+ }
+ None => return Err("Boolean type not supported for Epoch".to_string()),
+ },
Value::Array(_) => unimplemented!("Array type not supported"),
Value::Map(_) => unimplemented!("Object type not supported"),
@@ -207,8 +219,21 @@ fn coerce_i64_value(n: i64, transform: &Transform) -> Result<Option<ValueData>,
Value::Boolean(_) => ValueData::BoolValue(n != 0),
Value::String(_) => ValueData::StringValue(n.to_string()),
- Value::Time(_) => return Err("Integer type not supported for Time".to_string()),
- Value::Epoch(_) => return Err("Integer type not supported for Epoch".to_string()),
+ Value::Time(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => return Ok(None),
+ Some(OnFailure::Default) => {
+ return Err("default value not supported for Time".to_string())
+ }
+ None => return Err("Integer type not supported for Time".to_string()),
+ },
+
+ Value::Epoch(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => return Ok(None),
+ Some(OnFailure::Default) => {
+ return Err("default value not supported for Epoch".to_string())
+ }
+ None => return Err("Integer type not supported for Epoch".to_string()),
+ },
Value::Array(_) => unimplemented!("Array type not supported"),
Value::Map(_) => unimplemented!("Object type not supported"),
@@ -237,8 +262,21 @@ fn coerce_u64_value(n: u64, transform: &Transform) -> Result<Option<ValueData>,
Value::Boolean(_) => ValueData::BoolValue(n != 0),
Value::String(_) => ValueData::StringValue(n.to_string()),
- Value::Time(_) => return Err("Integer type not supported for Time".to_string()),
- Value::Epoch(_) => return Err("Integer type not supported for Epoch".to_string()),
+ Value::Time(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => return Ok(None),
+ Some(OnFailure::Default) => {
+ return Err("default value not supported for Time".to_string())
+ }
+ None => return Err("Integer type not supported for Time".to_string()),
+ },
+
+ Value::Epoch(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => return Ok(None),
+ Some(OnFailure::Default) => {
+ return Err("default value not supported for Epoch".to_string())
+ }
+ None => return Err("Integer type not supported for Epoch".to_string()),
+ },
Value::Array(_) => unimplemented!("Array type not supported"),
Value::Map(_) => unimplemented!("Object type not supported"),
@@ -267,8 +305,21 @@ fn coerce_f64_value(n: f64, transform: &Transform) -> Result<Option<ValueData>,
Value::Boolean(_) => ValueData::BoolValue(n != 0.0),
Value::String(_) => ValueData::StringValue(n.to_string()),
- Value::Time(_) => return Err("Float type not supported for Time".to_string()),
- Value::Epoch(_) => return Err("Float type not supported for Epoch".to_string()),
+ Value::Time(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => return Ok(None),
+ Some(OnFailure::Default) => {
+ return Err("default value not supported for Time".to_string())
+ }
+ None => return Err("Float type not supported for Time".to_string()),
+ },
+
+ Value::Epoch(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => return Ok(None),
+ Some(OnFailure::Default) => {
+ return Err("default value not supported for Epoch".to_string())
+ }
+ None => return Err("Float type not supported for Epoch".to_string()),
+ },
Value::Array(_) => unimplemented!("Array type not supported"),
Value::Map(_) => unimplemented!("Object type not supported"),
@@ -280,31 +331,156 @@ fn coerce_f64_value(n: f64, transform: &Transform) -> Result<Option<ValueData>,
}
fn coerce_string_value(s: &str, transform: &Transform) -> Result<Option<ValueData>, String> {
- let val = match transform.type_ {
- Value::Int8(_) => ValueData::I8Value(s.parse::<i32>().map_err(|e| e.to_string())?),
- Value::Int16(_) => ValueData::I16Value(s.parse::<i32>().map_err(|e| e.to_string())?),
- Value::Int32(_) => ValueData::I32Value(s.parse::<i32>().map_err(|e| e.to_string())?),
- Value::Int64(_) => ValueData::I64Value(s.parse::<i64>().map_err(|e| e.to_string())?),
+ match transform.type_ {
+ Value::Int8(_) if s.parse::<i32>().is_ok() => {
+ Ok(Some(ValueData::I8Value(s.parse().unwrap())))
+ }
+ Value::Int16(_) if s.parse::<i32>().is_ok() => {
+ Ok(Some(ValueData::I16Value(s.parse().unwrap())))
+ }
+ Value::Int32(_) if s.parse::<i32>().is_ok() => {
+ Ok(Some(ValueData::I32Value(s.parse().unwrap())))
+ }
+ Value::Int64(_) if s.parse::<i64>().is_ok() => {
+ Ok(Some(ValueData::I64Value(s.parse().unwrap())))
+ }
- Value::Uint8(_) => ValueData::U8Value(s.parse::<u32>().map_err(|e| e.to_string())?),
- Value::Uint16(_) => ValueData::U16Value(s.parse::<u32>().map_err(|e| e.to_string())?),
- Value::Uint32(_) => ValueData::U32Value(s.parse::<u32>().map_err(|e| e.to_string())?),
- Value::Uint64(_) => ValueData::U64Value(s.parse::<u64>().map_err(|e| e.to_string())?),
+ Value::Uint8(_) if s.parse::<u32>().is_ok() => {
+ Ok(Some(ValueData::U8Value(s.parse().unwrap())))
+ }
+ Value::Uint16(_) if s.parse::<u32>().is_ok() => {
+ Ok(Some(ValueData::U16Value(s.parse().unwrap())))
+ }
+ Value::Uint32(_) if s.parse::<u32>().is_ok() => {
+ Ok(Some(ValueData::U32Value(s.parse().unwrap())))
+ }
+ Value::Uint64(_) if s.parse::<u64>().is_ok() => {
+ Ok(Some(ValueData::U64Value(s.parse().unwrap())))
+ }
- Value::Float32(_) => ValueData::F32Value(s.parse::<f32>().map_err(|e| e.to_string())?),
- Value::Float64(_) => ValueData::F64Value(s.parse::<f64>().map_err(|e| e.to_string())?),
+ Value::Float32(_) if s.parse::<f32>().is_ok() => {
+ Ok(Some(ValueData::F32Value(s.parse().unwrap())))
+ }
+ Value::Float64(_) if s.parse::<f64>().is_ok() => {
+ Ok(Some(ValueData::F64Value(s.parse().unwrap())))
+ }
- Value::Boolean(_) => ValueData::BoolValue(s.parse::<bool>().map_err(|e| e.to_string())?),
- Value::String(_) => ValueData::StringValue(s.to_string()),
+ Value::Boolean(_) if s.parse::<bool>().is_ok() => {
+ Ok(Some(ValueData::BoolValue(s.parse().unwrap())))
+ }
- Value::Time(_) => return Err("String type not supported for Time".to_string()),
- Value::Epoch(_) => return Err("String type not supported for Epoch".to_string()),
+ // on_failure
+ Value::Int8(_)
+ | Value::Int16(_)
+ | Value::Int32(_)
+ | Value::Int64(_)
+ | Value::Uint8(_)
+ | Value::Uint16(_)
+ | Value::Uint32(_)
+ | Value::Uint64(_)
+ | Value::Float32(_)
+ | Value::Float64(_)
+ | Value::Boolean(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => Ok(None),
+ Some(OnFailure::Default) => match transform.get_default() {
+ Some(default) => coerce_value(default, transform),
+ None => coerce_value(transform.get_type_matched_default_val(), transform),
+ },
+ None => Err(format!(
+ "failed to coerce string value '{s}' to type '{}'",
+ transform.type_.to_str_type()
+ )),
+ },
+
+ Value::String(_) => Ok(Some(ValueData::StringValue(s.to_string()))),
+
+ Value::Time(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => Ok(None),
+ Some(OnFailure::Default) => Err("default value not supported for Time".to_string()),
+ None => Err("String type not supported for Time".to_string()),
+ },
+
+ Value::Epoch(_) => match transform.on_failure {
+ Some(OnFailure::Ignore) => Ok(None),
+ Some(OnFailure::Default) => Err("default value not supported for Epoch".to_string()),
+ None => Err("String type not supported for Epoch".to_string()),
+ },
Value::Array(_) => unimplemented!("Array type not supported"),
Value::Map(_) => unimplemented!("Object type not supported"),
- Value::Null => return Ok(None),
- };
+ Value::Null => Ok(None),
+ }
+}
- Ok(Some(val))
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::etl::field::Fields;
+
+ #[test]
+ fn test_coerce_string_without_on_failure() {
+ let transform = Transform {
+ fields: Fields::default(),
+ type_: Value::Int32(0),
+ default: None,
+ index: None,
+ on_failure: None,
+ };
+
+ // valid string
+ {
+ let val = Value::String("123".to_string());
+ let result = coerce_value(&val, &transform).unwrap();
+ assert_eq!(result, Some(ValueData::I32Value(123)));
+ }
+
+ // invalid string
+ {
+ let val = Value::String("hello".to_string());
+ let result = coerce_value(&val, &transform);
+ assert!(result.is_err());
+ }
+ }
+
+ #[test]
+ fn test_coerce_string_with_on_failure_ignore() {
+ let transform = Transform {
+ fields: Fields::default(),
+ type_: Value::Int32(0),
+ default: None,
+ index: None,
+ on_failure: Some(OnFailure::Ignore),
+ };
+
+ let val = Value::String("hello".to_string());
+ let result = coerce_value(&val, &transform).unwrap();
+ assert_eq!(result, None);
+ }
+
+ #[test]
+ fn test_coerce_string_with_on_failure_default() {
+ let mut transform = Transform {
+ fields: Fields::default(),
+ type_: Value::Int32(0),
+ default: None,
+ index: None,
+ on_failure: Some(OnFailure::Default),
+ };
+
+ // with no explicit default value
+ {
+ let val = Value::String("hello".to_string());
+ let result = coerce_value(&val, &transform).unwrap();
+ assert_eq!(result, Some(ValueData::I32Value(0)));
+ }
+
+ // with explicit default value
+ {
+ transform.default = Some(Value::Int32(42));
+ let val = Value::String("hello".to_string());
+ let result = coerce_value(&val, &transform).unwrap();
+ assert_eq!(result, Some(ValueData::I32Value(42)));
+ }
+ }
}
diff --git a/src/pipeline/src/etl/transform/transformer/greptime/mod.rs b/src/pipeline/src/etl/transform/transformer/greptime/mod.rs
index bbbfa0e9104b..6134657c44b6 100644
--- a/src/pipeline/src/etl/transform/transformer/greptime/mod.rs
+++ b/src/pipeline/src/etl/transform/transformer/greptime/mod.rs
@@ -47,6 +47,7 @@ impl GreptimeTransformer {
type_,
default,
index: Some(Index::Timestamp),
+ on_failure: None,
}
}
diff --git a/src/pipeline/src/etl/value/mod.rs b/src/pipeline/src/etl/value/mod.rs
index a8daa5fa6149..01b7c43fd98a 100644
--- a/src/pipeline/src/etl/value/mod.rs
+++ b/src/pipeline/src/etl/value/mod.rs
@@ -193,6 +193,34 @@ impl Value {
v => v.to_string(),
}
}
+
+ pub fn to_str_type(&self) -> &str {
+ match self {
+ Value::Int8(_) => "int8",
+ Value::Int16(_) => "int16",
+ Value::Int32(_) => "int32",
+ Value::Int64(_) => "int64",
+
+ Value::Uint8(_) => "uint8",
+ Value::Uint16(_) => "uint16",
+ Value::Uint32(_) => "uint32",
+ Value::Uint64(_) => "uint64",
+
+ Value::Float32(_) => "float32",
+ Value::Float64(_) => "float64",
+
+ Value::Boolean(_) => "boolean",
+ Value::String(_) => "string",
+
+ Value::Time(_) => "time",
+ Value::Epoch(_) => "epoch",
+
+ Value::Array(_) => "array",
+ Value::Map(_) => "map",
+
+ Value::Null => "null",
+ }
+ }
}
impl std::fmt::Display for Value {
diff --git a/src/pipeline/tests/on_failure.rs b/src/pipeline/tests/on_failure.rs
new file mode 100644
index 000000000000..4934048e1989
--- /dev/null
+++ b/src/pipeline/tests/on_failure.rs
@@ -0,0 +1,224 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use greptime_proto::v1::value::ValueData::{U16Value, U8Value};
+use greptime_proto::v1::{ColumnDataType, ColumnSchema, SemanticType};
+use pipeline::{parse, Content, GreptimeTransformer, Pipeline, Value};
+
+#[test]
+fn test_on_failure_with_ignore() {
+ let input_value_str = r#"
+ [
+ {
+ "version": "-"
+ }
+ ]
+"#;
+ let input_value: Value = serde_json::from_str::<serde_json::Value>(input_value_str)
+ .expect("failed to parse input value")
+ .try_into()
+ .expect("failed to convert input value");
+
+ let pipeline_yaml = r#"
+---
+description: Pipeline for Testing on-failure
+
+transform:
+ - fields:
+ - version
+ type: uint8
+ on_failure: ignore
+"#;
+
+ let yaml_content = Content::Yaml(pipeline_yaml.into());
+ let pipeline: Pipeline<GreptimeTransformer> =
+ parse(&yaml_content).expect("failed to parse pipeline");
+ let output = pipeline.exec(input_value).expect("failed to exec pipeline");
+
+ let expected_schema = vec![
+ ColumnSchema {
+ column_name: "version".to_string(),
+ datatype: ColumnDataType::Uint8.into(),
+ semantic_type: SemanticType::Field.into(),
+ datatype_extension: None,
+ },
+ ColumnSchema {
+ column_name: "greptime_timestamp".to_string(),
+ datatype: ColumnDataType::TimestampNanosecond.into(),
+ semantic_type: SemanticType::Timestamp.into(),
+ datatype_extension: None,
+ },
+ ];
+
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.rows[0].values[0].value_data, None);
+}
+
+#[test]
+fn test_on_failure_with_default() {
+ let input_value_str = r#"
+ [
+ {
+ "version": "-"
+ }
+ ]
+"#;
+ let input_value: Value = serde_json::from_str::<serde_json::Value>(input_value_str)
+ .expect("failed to parse input value")
+ .try_into()
+ .expect("failed to convert input value");
+
+ let pipeline_yaml = r#"
+---
+description: Pipeline for Testing on-failure
+
+transform:
+ - fields:
+ - version
+ type: uint8
+ default: 0
+ on_failure: default
+"#;
+
+ let yaml_content = Content::Yaml(pipeline_yaml.into());
+ let pipeline: Pipeline<GreptimeTransformer> =
+ parse(&yaml_content).expect("failed to parse pipeline");
+ let output = pipeline.exec(input_value).expect("failed to exec pipeline");
+
+ let expected_schema = vec![
+ ColumnSchema {
+ column_name: "version".to_string(),
+ datatype: ColumnDataType::Uint8.into(),
+ semantic_type: SemanticType::Field.into(),
+ datatype_extension: None,
+ },
+ ColumnSchema {
+ column_name: "greptime_timestamp".to_string(),
+ datatype: ColumnDataType::TimestampNanosecond.into(),
+ semantic_type: SemanticType::Timestamp.into(),
+ datatype_extension: None,
+ },
+ ];
+
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.rows[0].values[0].value_data, Some(U8Value(0)));
+}
+
+#[test]
+fn test_default() {
+ let input_value_str = r#"
+ [{}]
+"#;
+ let input_value: Value = serde_json::from_str::<serde_json::Value>(input_value_str)
+ .expect("failed to parse input value")
+ .try_into()
+ .expect("failed to convert input value");
+
+ let pipeline_yaml = r#"
+---
+description: Pipeline for Testing on-failure
+
+transform:
+ - fields:
+ - version
+ type: uint8
+ default: 0
+"#;
+
+ let yaml_content = Content::Yaml(pipeline_yaml.into());
+ let pipeline: Pipeline<GreptimeTransformer> =
+ parse(&yaml_content).expect("failed to parse pipeline");
+ let output = pipeline.exec(input_value).expect("failed to exec pipeline");
+
+ let expected_schema = vec![
+ ColumnSchema {
+ column_name: "version".to_string(),
+ datatype: ColumnDataType::Uint8.into(),
+ semantic_type: SemanticType::Field.into(),
+ datatype_extension: None,
+ },
+ ColumnSchema {
+ column_name: "greptime_timestamp".to_string(),
+ datatype: ColumnDataType::TimestampNanosecond.into(),
+ semantic_type: SemanticType::Timestamp.into(),
+ datatype_extension: None,
+ },
+ ];
+
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.rows[0].values[0].value_data, Some(U8Value(0)));
+}
+
+#[test]
+fn test_multiple_on_failure() {
+ let input_value_str = r#"
+ [
+ {
+ "version": "-",
+ "spec_version": "-"
+ }
+ ]
+"#;
+ let input_value: Value = serde_json::from_str::<serde_json::Value>(input_value_str)
+ .expect("failed to parse input value")
+ .try_into()
+ .expect("failed to convert input value");
+
+ let pipeline_yaml = r#"
+---
+description: Pipeline for Testing on-failure
+
+transform:
+ - fields:
+ - version
+ type: uint8
+ default: 0
+ on_failure: default
+ - fields:
+ - spec_version
+ type: uint16
+ default: 0
+ on_failure: default
+"#;
+
+ let yaml_content = Content::Yaml(pipeline_yaml.into());
+ let pipeline: Pipeline<GreptimeTransformer> =
+ parse(&yaml_content).expect("failed to parse pipeline");
+ let output = pipeline.exec(input_value).expect("failed to exec pipeline");
+
+ let expected_schema = vec![
+ ColumnSchema {
+ column_name: "version".to_string(),
+ datatype: ColumnDataType::Uint8.into(),
+ semantic_type: SemanticType::Field.into(),
+ datatype_extension: None,
+ },
+ ColumnSchema {
+ column_name: "spec_version".to_string(),
+ datatype: ColumnDataType::Uint16.into(),
+ semantic_type: SemanticType::Field.into(),
+ datatype_extension: None,
+ },
+ ColumnSchema {
+ column_name: "greptime_timestamp".to_string(),
+ datatype: ColumnDataType::TimestampNanosecond.into(),
+ semantic_type: SemanticType::Timestamp.into(),
+ datatype_extension: None,
+ },
+ ];
+
+ assert_eq!(output.schema, expected_schema);
+ assert_eq!(output.rows[0].values[0].value_data, Some(U8Value(0)));
+ assert_eq!(output.rows[0].values[1].value_data, Some(U16Value(0)));
+}
diff --git a/src/pipeline/tests/pipeline.rs b/src/pipeline/tests/pipeline.rs
index 869bd13c78f3..ff9cad1bdea1 100644
--- a/src/pipeline/tests/pipeline.rs
+++ b/src/pipeline/tests/pipeline.rs
@@ -19,12 +19,8 @@ use greptime_proto::v1::value::ValueData::{
use greptime_proto::v1::Value as GreptimeValue;
use pipeline::{parse, Content, GreptimeTransformer, Pipeline, Value};
-// use pipeline::transform::GreptimeTransformer;
-// use pipeline::value::Value;
-// use pipeline::{parse, Content, Pipeline};
-
#[test]
-fn main() {
+fn test_complex_data() {
let input_value_str = r#"
[
{
|
feat
|
transform support on_failure (#4123)
|
f4b9eac465798caf0b765d313b4bf5963edbd7eb
|
2024-11-13 16:45:37
|
Yohan Wal
|
build(deps): switch to upstream jsonb (#4986)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 13e5ed111f95..c42187922c2a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5650,7 +5650,7 @@ dependencies = [
[[package]]
name = "jsonb"
version = "0.4.3"
-source = "git+https://github.com/CookiePieWw/jsonb.git?rev=ed2d4f8575419ed434a4ae09dee18ca900915d9c#ed2d4f8575419ed434a4ae09dee18ca900915d9c"
+source = "git+https://github.com/databendlabs/jsonb.git?rev=8c8d2fc294a39f3ff08909d60f718639cfba3875#8c8d2fc294a39f3ff08909d60f718639cfba3875"
dependencies = [
"byteorder",
"fast-float",
diff --git a/Cargo.toml b/Cargo.toml
index c2a89ea0c318..0a06bf16aba5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -127,7 +127,7 @@ hex = "0.4"
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
-jsonb = { git = "https://github.com/CookiePieWw/jsonb.git", rev = "ed2d4f8575419ed434a4ae09dee18ca900915d9c", default-features = false }
+jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "8c8d2fc294a39f3ff08909d60f718639cfba3875", default-features = false }
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
mockall = "0.11.4"
|
build
|
switch to upstream jsonb (#4986)
|
afe4633320d17945ed431c90ccd1b018e9ffdcfe
|
2024-02-24 19:20:49
|
Lei, HUANG
|
feat: merge tree dedup reader (#3375)
| false
|
diff --git a/src/mito2/src/memtable/merge_tree.rs b/src/mito2/src/memtable/merge_tree.rs
index dd0511b1ec7d..d075a9deb380 100644
--- a/src/mito2/src/memtable/merge_tree.rs
+++ b/src/mito2/src/memtable/merge_tree.rs
@@ -14,7 +14,8 @@
//! Memtable implementation based on a merge tree.
-mod data;
+pub(crate) mod data;
+mod dedup;
mod dict;
mod merger;
mod metrics;
@@ -59,6 +60,8 @@ pub struct MergeTreeConfig {
pub index_max_keys_per_shard: usize,
/// Number of rows to freeze a data part.
pub data_freeze_threshold: usize,
+ /// Whether to delete duplicates rows.
+ pub dedup: bool,
}
impl Default for MergeTreeConfig {
@@ -66,6 +69,7 @@ impl Default for MergeTreeConfig {
Self {
index_max_keys_per_shard: 8192,
data_freeze_threshold: 102400,
+ dedup: true,
}
}
}
diff --git a/src/mito2/src/memtable/merge_tree/data.rs b/src/mito2/src/memtable/merge_tree/data.rs
index 9497791d2d3c..418784fc71e0 100644
--- a/src/mito2/src/memtable/merge_tree/data.rs
+++ b/src/mito2/src/memtable/merge_tree/data.rs
@@ -63,11 +63,11 @@ pub(crate) struct DataBatchRange {
impl DataBatchRange {
pub(crate) fn len(&self) -> usize {
- (self.start..self.end).len()
+ self.end - self.start
}
pub(crate) fn is_empty(&self) -> bool {
- (self.start..self.end).is_empty()
+ self.len() == 0
}
}
@@ -163,6 +163,10 @@ impl<'a> DataBatch<'a> {
},
}
}
+
+ pub(crate) fn num_rows(&self) -> usize {
+ self.range.len()
+ }
}
/// Buffer for the value part (pk_index, ts, sequence, op_type, field columns) in a shard.
@@ -180,11 +184,13 @@ pub struct DataBuffer {
op_type_builder: UInt8VectorBuilder,
/// Builders for field columns.
field_builders: Vec<LazyMutableVectorBuilder>,
+
+ dedup: bool,
}
impl DataBuffer {
/// Creates a `DataBuffer` instance with given schema and capacity.
- pub fn with_capacity(metadata: RegionMetadataRef, init_capacity: usize) -> Self {
+ pub fn with_capacity(metadata: RegionMetadataRef, init_capacity: usize, dedup: bool) -> Self {
let ts_builder = metadata
.time_index_column()
.column_schema
@@ -209,6 +215,7 @@ impl DataBuffer {
sequence_builder,
op_type_builder,
field_builders,
+ dedup,
}
}
@@ -237,7 +244,13 @@ impl DataBuffer {
pk_weights: Option<&[u16]>,
replace_pk_index: bool,
) -> Result<DataPart> {
- let encoder = DataPartEncoder::new(&self.metadata, pk_weights, None, replace_pk_index);
+ let encoder = DataPartEncoder::new(
+ &self.metadata,
+ pk_weights,
+ None,
+ replace_pk_index,
+ self.dedup,
+ );
let parts = encoder.write(self)?;
Ok(parts)
}
@@ -246,13 +259,12 @@ impl DataBuffer {
/// If pk_weights is present, yielded rows are sorted according to weights,
/// otherwise rows are sorted by "pk_weights" values as they are actually weights.
pub fn read(&mut self, pk_weights: Option<&[u16]>) -> Result<DataBufferReader> {
- // todo(hl): control whether to dedup while invoking `read`.
let batch = data_buffer_to_record_batches(
self.data_part_schema.clone(),
self,
pk_weights,
true,
- true,
+ self.dedup,
// replace_pk_index is always set to false since:
// - for DataBuffer in ShardBuilder, pk dict is not frozen
// - for DataBuffer in Shard, values in pk_index column has already been replaced during `freeze`.
@@ -629,6 +641,7 @@ struct DataPartEncoder<'a> {
pk_weights: Option<&'a [u16]>,
row_group_size: Option<usize>,
replace_pk_index: bool,
+ dedup: bool,
}
impl<'a> DataPartEncoder<'a> {
@@ -637,6 +650,7 @@ impl<'a> DataPartEncoder<'a> {
pk_weights: Option<&'a [u16]>,
row_group_size: Option<usize>,
replace_pk_index: bool,
+ dedup: bool,
) -> DataPartEncoder<'a> {
let schema = memtable_schema_to_encoded_schema(metadata);
Self {
@@ -644,6 +658,7 @@ impl<'a> DataPartEncoder<'a> {
pk_weights,
row_group_size,
replace_pk_index,
+ dedup,
}
}
@@ -663,7 +678,7 @@ impl<'a> DataPartEncoder<'a> {
source,
self.pk_weights,
false,
- true,
+ self.dedup,
self.replace_pk_index,
)?;
writer.write(&rb).context(error::EncodeMemtableSnafu)?;
@@ -803,9 +818,9 @@ pub struct DataParts {
}
impl DataParts {
- pub(crate) fn new(metadata: RegionMetadataRef, capacity: usize) -> Self {
+ pub(crate) fn new(metadata: RegionMetadataRef, capacity: usize, dedup: bool) -> Self {
Self {
- active: DataBuffer::with_capacity(metadata, capacity),
+ active: DataBuffer::with_capacity(metadata, capacity, dedup),
frozen: Vec::new(),
}
}
@@ -868,6 +883,29 @@ impl DataPartsReader {
}
}
+#[cfg(test)]
+pub(crate) fn write_rows_to_buffer(
+ buffer: &mut DataBuffer,
+ schema: &RegionMetadataRef,
+ pk_index: u16,
+ ts: Vec<i64>,
+ v0: Vec<Option<f64>>,
+ sequence: u64,
+) {
+ let kvs = crate::test_util::memtable_util::build_key_values_with_ts_seq_values(
+ schema,
+ "whatever".to_string(),
+ 1,
+ ts.into_iter(),
+ v0.into_iter(),
+ sequence,
+ );
+
+ for kv in kvs.iter() {
+ buffer.write_row(pk_index, kv);
+ }
+}
+
#[cfg(test)]
mod tests {
use datafusion::arrow::array::Float64Array;
@@ -876,7 +914,7 @@ mod tests {
use parquet::data_type::AsBytes;
use super::*;
- use crate::test_util::memtable_util::{build_key_values_with_ts_seq_values, metadata_for_test};
+ use crate::test_util::memtable_util::{extract_data_batch, metadata_for_test};
#[test]
fn test_lazy_mutable_vector_builder() {
@@ -900,7 +938,7 @@ mod tests {
fn check_test_data_buffer_to_record_batches(keep_data: bool) {
let meta = metadata_for_test();
- let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
write_rows_to_buffer(&mut buffer, &meta, 0, vec![1, 2], vec![Some(0.1), None], 1);
write_rows_to_buffer(&mut buffer, &meta, 1, vec![1, 2], vec![Some(1.1), None], 2);
@@ -968,10 +1006,50 @@ mod tests {
check_test_data_buffer_to_record_batches(false);
}
+ fn check_data_buffer_dedup(dedup: bool) {
+ let metadata = metadata_for_test();
+ let mut buffer = DataBuffer::with_capacity(metadata.clone(), 10, dedup);
+ write_rows_to_buffer(
+ &mut buffer,
+ &metadata,
+ 0,
+ vec![2, 3],
+ vec![Some(1.0), Some(2.0)],
+ 0,
+ );
+ write_rows_to_buffer(
+ &mut buffer,
+ &metadata,
+ 0,
+ vec![1, 2],
+ vec![Some(1.1), Some(2.1)],
+ 2,
+ );
+
+ let mut reader = buffer.read(Some(&[0])).unwrap();
+ let mut res = vec![];
+ while reader.is_valid() {
+ let batch = reader.current_data_batch();
+ res.push(extract_data_batch(&batch));
+ reader.next().unwrap();
+ }
+ if dedup {
+ assert_eq!(vec![(0, vec![(1, 2), (2, 3), (3, 1)])], res);
+ } else {
+ assert_eq!(vec![(0, vec![(1, 2), (2, 3), (2, 0), (3, 1)])], res);
+ }
+ }
+
+ #[test]
+ fn test_data_buffer_dedup() {
+ check_data_buffer_dedup(true);
+ check_data_buffer_dedup(false);
+ }
+
#[test]
fn test_data_buffer_to_record_batches_with_dedup() {
let meta = metadata_for_test();
- let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
write_rows_to_buffer(&mut buffer, &meta, 0, vec![1, 2], vec![Some(0.1), None], 1);
write_rows_to_buffer(&mut buffer, &meta, 1, vec![2], vec![Some(1.1)], 2);
@@ -1026,7 +1104,7 @@ mod tests {
#[test]
fn test_data_buffer_to_record_batches_without_dedup() {
let meta = metadata_for_test();
- let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
write_rows_to_buffer(&mut buffer, &meta, 0, vec![1, 2], vec![Some(0.1), None], 1);
write_rows_to_buffer(&mut buffer, &meta, 1, vec![1, 2], vec![Some(1.1), None], 2);
@@ -1064,35 +1142,13 @@ mod tests {
);
}
- fn write_rows_to_buffer(
- buffer: &mut DataBuffer,
- schema: &RegionMetadataRef,
- pk_index: u16,
- ts: Vec<i64>,
- v0: Vec<Option<f64>>,
- sequence: u64,
- ) {
- let kvs = build_key_values_with_ts_seq_values(
- schema,
- "whatever".to_string(),
- 1,
- ts.into_iter(),
- v0.into_iter(),
- sequence,
- );
-
- for kv in kvs.iter() {
- buffer.write_row(pk_index, kv);
- }
- }
-
fn check_data_buffer_freeze(
pk_weights: Option<&[u16]>,
replace_pk_weights: bool,
expected: &[(u16, Vec<(i64, u64)>)],
) {
let meta = metadata_for_test();
- let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
// write rows with null values.
write_rows_to_buffer(
@@ -1113,21 +1169,7 @@ mod tests {
.unwrap();
while reader.is_valid() {
let batch = reader.current_data_batch();
- let rb = batch.slice_record_batch();
- let ts = timestamp_array_to_i64_slice(rb.column(1));
- let sequence = rb
- .column(2)
- .as_any()
- .downcast_ref::<UInt64Array>()
- .unwrap()
- .values();
- let ts_and_seq = ts
- .iter()
- .zip(sequence.iter())
- .map(|(ts, seq)| (*ts, *seq))
- .collect::<Vec<_>>();
- res.push((batch.pk_index(), ts_and_seq));
-
+ res.push(extract_data_batch(&batch));
reader.next().unwrap();
}
assert_eq!(expected, res);
@@ -1163,7 +1205,7 @@ mod tests {
#[test]
fn test_encode_data_buffer() {
let meta = metadata_for_test();
- let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
// write rows with null values.
write_rows_to_buffer(
@@ -1181,7 +1223,7 @@ mod tests {
assert_eq!(4, buffer.num_rows());
- let encoder = DataPartEncoder::new(&meta, Some(&[0, 1, 2]), None, true);
+ let encoder = DataPartEncoder::new(&meta, Some(&[0, 1, 2]), None, true, true);
let encoded = match encoder.write(&mut buffer).unwrap() {
DataPart::Parquet(data) => data.data,
};
@@ -1228,7 +1270,7 @@ mod tests {
fn check_iter_data_buffer(pk_weights: Option<&[u16]>, expected: &[Vec<f64>]) {
let meta = metadata_for_test();
- let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
write_rows_to_buffer(
&mut buffer,
@@ -1268,7 +1310,7 @@ mod tests {
#[test]
fn test_iter_empty_data_buffer() {
let meta = metadata_for_test();
- let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
let mut iter = buffer.read(Some(&[0, 1, 3, 2])).unwrap();
check_buffer_values_equal(&mut iter, &[]);
}
@@ -1294,7 +1336,7 @@ mod tests {
fn check_iter_data_part(weights: &[u16], expected_values: &[Vec<f64>]) {
let meta = metadata_for_test();
- let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
write_rows_to_buffer(
&mut buffer,
@@ -1323,7 +1365,7 @@ mod tests {
4,
);
- let encoder = DataPartEncoder::new(&meta, Some(weights), Some(4), true);
+ let encoder = DataPartEncoder::new(&meta, Some(weights), Some(4), true, true);
let encoded = encoder.write(&mut buffer).unwrap();
let mut iter = encoded.read().unwrap();
diff --git a/src/mito2/src/memtable/merge_tree/dedup.rs b/src/mito2/src/memtable/merge_tree/dedup.rs
new file mode 100644
index 000000000000..889db134debd
--- /dev/null
+++ b/src/mito2/src/memtable/merge_tree/dedup.rs
@@ -0,0 +1,235 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::ops::Range;
+
+use crate::error::Result;
+use crate::memtable::merge_tree::data::DataBatch;
+use crate::memtable::merge_tree::PkId;
+
+pub trait DedupSource {
+ /// Returns whether current source is still valid.
+ fn is_valid(&self) -> bool;
+
+ /// Advances source to next data batch.
+ fn next(&mut self) -> Result<()>;
+
+ /// Returns current pk id.
+ /// # Panics
+ /// If source is not valid.
+ fn current_pk_id(&self) -> PkId;
+
+ /// Returns the current primary key bytes.
+ /// # Panics
+ /// If source is not valid.
+ fn current_key(&self) -> &[u8];
+
+ /// Returns the data part.
+ /// # Panics
+ /// If source is not valid.
+ fn current_data_batch(&self) -> DataBatch;
+}
+
+struct DedupReader<T> {
+ prev_batch_last_row: Option<(PkId, i64)>,
+ current_batch_range: Option<Range<usize>>,
+ inner: T,
+}
+
+impl<T: DedupSource> DedupReader<T> {
+ fn try_new(inner: T) -> Result<Self> {
+ let mut res = Self {
+ prev_batch_last_row: None,
+ current_batch_range: None,
+ inner,
+ };
+ res.next()?;
+ Ok(res)
+ }
+
+ fn is_valid(&self) -> bool {
+ self.current_batch_range.is_some()
+ }
+
+ /// Returns current encoded primary key.
+ /// # Panics
+ /// If inner reader is exhausted.
+ fn current_key(&self) -> &[u8] {
+ self.inner.current_key()
+ }
+
+ fn current_data_batch(&self) -> DataBatch {
+ let range = self.current_batch_range.as_ref().unwrap();
+ let data_batch = self.inner.current_data_batch();
+ data_batch.slice(range.start, range.len())
+ }
+
+ fn next(&mut self) -> Result<()> {
+ loop {
+ match &mut self.prev_batch_last_row {
+ None => {
+ // First shot, fill prev_batch_last_row and current_batch_range with first batch.
+ let current_batch = self.inner.current_data_batch();
+ let pk_id = self.inner.current_pk_id();
+ let (last_ts, _) = current_batch.last_row();
+ self.prev_batch_last_row = Some((pk_id, last_ts));
+ self.current_batch_range = Some(0..current_batch.num_rows());
+ break;
+ }
+ Some(prev_last_row) => {
+ self.inner.next()?;
+ if !self.inner.is_valid() {
+ // Resets current_batch_range if inner reader is exhausted.
+ self.current_batch_range = None;
+ break;
+ }
+ let current_batch = self.inner.current_data_batch();
+ let current_pk_id = self.inner.current_pk_id();
+ let (first_ts, _) = current_batch.first_row();
+ let rows_in_batch = current_batch.num_rows();
+
+ let (start, end) = if &(current_pk_id, first_ts) == prev_last_row {
+ // First row in this batch duplicated with the last row in previous batch
+ if rows_in_batch == 1 {
+ // If batch is exhausted, move to next batch.
+ continue;
+ } else {
+ // Skip the first row, start from offset 1.
+ (1, rows_in_batch)
+ }
+ } else {
+ // No duplicates found, yield whole batch.
+ (0, rows_in_batch)
+ };
+
+ let (last_ts, _) = current_batch.last_row();
+ *prev_last_row = (current_pk_id, last_ts);
+ self.current_batch_range = Some(start..end);
+ break;
+ }
+ }
+ }
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use store_api::metadata::RegionMetadataRef;
+
+ use super::*;
+ use crate::memtable::merge_tree::data::{
+ write_rows_to_buffer, DataBuffer, DataParts, DataPartsReader,
+ };
+ use crate::test_util::memtable_util::{extract_data_batch, metadata_for_test};
+
+ impl DedupSource for DataPartsReader {
+ fn is_valid(&self) -> bool {
+ self.is_valid()
+ }
+
+ fn next(&mut self) -> Result<()> {
+ self.next()
+ }
+
+ fn current_pk_id(&self) -> PkId {
+ PkId {
+ shard_id: 0,
+ pk_index: self.current_data_batch().pk_index(),
+ }
+ }
+
+ fn current_key(&self) -> &[u8] {
+ b"abcf"
+ }
+
+ fn current_data_batch(&self) -> DataBatch {
+ self.current_data_batch()
+ }
+ }
+
+ fn build_data_buffer(
+ meta: RegionMetadataRef,
+ rows: Vec<(u16, Vec<i64>)>,
+ seq: &mut u64,
+ ) -> DataBuffer {
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10, true);
+
+ for row in rows {
+ let (pk_index, timestamps) = row;
+ let num_rows = timestamps.len() as u64;
+ let v = timestamps.iter().map(|v| Some(*v as f64)).collect();
+
+ write_rows_to_buffer(&mut buffer, &meta, pk_index, timestamps, v, *seq);
+ *seq += num_rows;
+ }
+ buffer
+ }
+
+ fn check_data_parts_reader_dedup(
+ parts: Vec<Vec<(u16, Vec<i64>)>>,
+ expected: Vec<(u16, Vec<(i64, u64)>)>,
+ ) {
+ let meta = metadata_for_test();
+ let mut seq = 0;
+
+ let mut frozens = Vec::with_capacity(parts.len());
+ for part in parts {
+ let mut buffer1 = build_data_buffer(meta.clone(), part, &mut seq);
+ let part1 = buffer1.freeze(None, false).unwrap();
+ frozens.push(part1);
+ }
+
+ let mut parts = DataParts::new(meta, 10, true).with_frozen(frozens);
+
+ let mut res = Vec::with_capacity(expected.len());
+ let mut reader = DedupReader::try_new(parts.read().unwrap()).unwrap();
+ while reader.is_valid() {
+ let batch = reader.current_data_batch();
+ res.push(extract_data_batch(&batch));
+ reader.next().unwrap();
+ }
+
+ assert_eq!(expected, res);
+ }
+
+ #[test]
+ fn test_data_parts_reader_dedup() {
+ check_data_parts_reader_dedup(vec![vec![(0, vec![1, 2])]], vec![(0, vec![(1, 0), (2, 1)])]);
+
+ check_data_parts_reader_dedup(
+ vec![
+ vec![(0, vec![1, 2])],
+ vec![(0, vec![1, 2])],
+ vec![(0, vec![2, 3])],
+ ],
+ vec![(0, vec![(1, 2)]), (0, vec![(2, 4)]), (0, vec![(3, 5)])],
+ );
+
+ check_data_parts_reader_dedup(
+ vec![vec![(0, vec![1])], vec![(0, vec![2])], vec![(0, vec![3])]],
+ vec![(0, vec![(1, 0)]), (0, vec![(2, 1)]), (0, vec![(3, 2)])],
+ );
+
+ check_data_parts_reader_dedup(
+ vec![vec![(0, vec![1])], vec![(0, vec![1])], vec![(0, vec![1])]],
+ vec![(0, vec![(1, 2)])],
+ );
+
+ check_data_parts_reader_dedup(
+ vec![vec![(0, vec![1])], vec![(1, vec![1])], vec![(2, vec![1])]],
+ vec![(0, vec![(1, 0)]), (1, vec![(1, 1)]), (2, vec![(1, 2)])],
+ );
+ }
+}
diff --git a/src/mito2/src/memtable/merge_tree/merger.rs b/src/mito2/src/memtable/merge_tree/merger.rs
index a6394ea924fe..c5012e5ee86e 100644
--- a/src/mito2/src/memtable/merge_tree/merger.rs
+++ b/src/mito2/src/memtable/merge_tree/merger.rs
@@ -357,7 +357,7 @@ mod tests {
#[test]
fn test_merger() {
let metadata = metadata_for_test();
- let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10, true);
let weight = &[2, 1, 0];
let mut seq = 0;
write_rows_to_buffer(&mut buffer1, &metadata, 1, vec![2, 3], &mut seq);
@@ -366,7 +366,7 @@ mod tests {
buffer1.freeze(Some(weight), true).unwrap().read().unwrap(),
));
- let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer2, &metadata, 1, vec![3], &mut seq);
write_rows_to_buffer(&mut buffer2, &metadata, 0, vec![1], &mut seq);
let node2 = DataNode::new(DataSource::Part(
@@ -388,7 +388,7 @@ mod tests {
#[test]
fn test_merger2() {
let metadata = metadata_for_test();
- let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10, true);
let weight = &[2, 1, 0];
let mut seq = 0;
write_rows_to_buffer(&mut buffer1, &metadata, 1, vec![2, 3], &mut seq);
@@ -397,13 +397,13 @@ mod tests {
buffer1.freeze(Some(weight), true).unwrap().read().unwrap(),
));
- let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer2, &metadata, 1, vec![3], &mut seq);
let node2 = DataNode::new(DataSource::Part(
buffer2.freeze(Some(weight), true).unwrap().read().unwrap(),
));
- let mut buffer3 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer3 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer3, &metadata, 0, vec![2, 3], &mut seq);
let node3 = DataNode::new(DataSource::Part(
buffer3.freeze(Some(weight), true).unwrap().read().unwrap(),
@@ -426,7 +426,7 @@ mod tests {
#[test]
fn test_merger_overlapping() {
let metadata = metadata_for_test();
- let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10, true);
let weight = &[0, 1, 2];
let mut seq = 0;
write_rows_to_buffer(&mut buffer1, &metadata, 0, vec![1, 2, 3], &mut seq);
@@ -434,13 +434,13 @@ mod tests {
buffer1.freeze(Some(weight), true).unwrap().read().unwrap(),
));
- let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer2, &metadata, 1, vec![2, 3], &mut seq);
let node2 = DataNode::new(DataSource::Part(
buffer2.freeze(Some(weight), true).unwrap().read().unwrap(),
));
- let mut buffer3 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer3 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer3, &metadata, 0, vec![2, 3], &mut seq);
let node3 = DataNode::new(DataSource::Part(
buffer3.freeze(Some(weight), true).unwrap().read().unwrap(),
@@ -462,19 +462,19 @@ mod tests {
#[test]
fn test_merger_parts_and_buffer() {
let metadata = metadata_for_test();
- let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10, true);
let weight = &[0, 1, 2];
let mut seq = 0;
write_rows_to_buffer(&mut buffer1, &metadata, 0, vec![1, 2, 3], &mut seq);
let node1 = DataNode::new(DataSource::Buffer(buffer1.read(Some(weight)).unwrap()));
- let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer2, &metadata, 1, vec![2, 3], &mut seq);
let node2 = DataNode::new(DataSource::Part(
buffer2.freeze(Some(weight), true).unwrap().read().unwrap(),
));
- let mut buffer3 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer3 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer3, &metadata, 0, vec![2, 3], &mut seq);
let node3 = DataNode::new(DataSource::Part(
buffer3.freeze(Some(weight), true).unwrap().read().unwrap(),
@@ -496,7 +496,7 @@ mod tests {
#[test]
fn test_merger_overlapping_2() {
let metadata = metadata_for_test();
- let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10, true);
let weight = &[0, 1, 2];
let mut seq = 0;
write_rows_to_buffer(&mut buffer1, &metadata, 0, vec![1, 2, 2], &mut seq);
@@ -504,13 +504,13 @@ mod tests {
buffer1.freeze(Some(weight), true).unwrap().read().unwrap(),
));
- let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer2, &metadata, 0, vec![2], &mut seq);
let node2 = DataNode::new(DataSource::Part(
buffer2.freeze(Some(weight), true).unwrap().read().unwrap(),
));
- let mut buffer3 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer3 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer3, &metadata, 0, vec![2], &mut seq);
let node3 = DataNode::new(DataSource::Part(
buffer3.freeze(Some(weight), true).unwrap().read().unwrap(),
@@ -530,7 +530,7 @@ mod tests {
#[test]
fn test_merger_overlapping_3() {
let metadata = metadata_for_test();
- let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer1 = DataBuffer::with_capacity(metadata.clone(), 10, true);
let weight = &[0, 1, 2];
let mut seq = 0;
write_rows_to_buffer(&mut buffer1, &metadata, 0, vec![0, 1], &mut seq);
@@ -538,7 +538,7 @@ mod tests {
buffer1.freeze(Some(weight), true).unwrap().read().unwrap(),
));
- let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10);
+ let mut buffer2 = DataBuffer::with_capacity(metadata.clone(), 10, true);
write_rows_to_buffer(&mut buffer2, &metadata, 0, vec![1], &mut seq);
let node2 = DataNode::new(DataSource::Part(
buffer2.freeze(Some(weight), true).unwrap().read().unwrap(),
diff --git a/src/mito2/src/memtable/merge_tree/partition.rs b/src/mito2/src/memtable/merge_tree/partition.rs
index dc817d134ded..89302906b27e 100644
--- a/src/mito2/src/memtable/merge_tree/partition.rs
+++ b/src/mito2/src/memtable/merge_tree/partition.rs
@@ -46,7 +46,7 @@ impl Partition {
let shard_builder = ShardBuilder::new(metadata.clone(), config);
Partition {
- inner: RwLock::new(Inner::new(metadata, shard_builder)),
+ inner: RwLock::new(Inner::new(metadata, shard_builder, config.dedup)),
}
}
@@ -128,6 +128,7 @@ impl Partition {
active_shard_id: inner.active_shard_id,
shards,
num_rows: 0,
+ dedup: config.dedup,
}),
}
}
@@ -194,21 +195,23 @@ struct Inner {
/// Shards with frozen dictionary.
shards: Vec<Shard>,
num_rows: usize,
+ dedup: bool,
}
impl Inner {
- fn new(metadata: RegionMetadataRef, shard_builder: ShardBuilder) -> Self {
+ fn new(metadata: RegionMetadataRef, shard_builder: ShardBuilder, dedup: bool) -> Self {
let mut inner = Self {
metadata,
shard_builder,
active_shard_id: 0,
shards: Vec::new(),
num_rows: 0,
+ dedup,
};
if inner.metadata.primary_key.is_empty() {
- let data_parts = DataParts::new(inner.metadata.clone(), DATA_INIT_CAP);
- inner.shards.push(Shard::new(0, None, data_parts));
+ let data_parts = DataParts::new(inner.metadata.clone(), DATA_INIT_CAP, dedup);
+ inner.shards.push(Shard::new(0, None, data_parts, dedup));
inner.active_shard_id = 1;
}
diff --git a/src/mito2/src/memtable/merge_tree/shard.rs b/src/mito2/src/memtable/merge_tree/shard.rs
index 86c5ea18f1a2..a9ad6e30b822 100644
--- a/src/mito2/src/memtable/merge_tree/shard.rs
+++ b/src/mito2/src/memtable/merge_tree/shard.rs
@@ -28,15 +28,22 @@ pub struct Shard {
key_dict: Option<KeyDictRef>,
/// Data in the shard.
data_parts: DataParts,
+ dedup: bool,
}
impl Shard {
/// Returns a new shard.
- pub fn new(shard_id: ShardId, key_dict: Option<KeyDictRef>, data_parts: DataParts) -> Shard {
+ pub fn new(
+ shard_id: ShardId,
+ key_dict: Option<KeyDictRef>,
+ data_parts: DataParts,
+ dedup: bool,
+ ) -> Shard {
Shard {
shard_id,
key_dict,
data_parts,
+ dedup,
}
}
@@ -77,7 +84,8 @@ impl Shard {
Shard {
shard_id: self.shard_id,
key_dict: self.key_dict.clone(),
- data_parts: DataParts::new(metadata, DATA_INIT_CAP),
+ data_parts: DataParts::new(metadata, DATA_INIT_CAP, self.dedup),
+ dedup: self.dedup,
}
}
}
@@ -144,9 +152,9 @@ mod tests {
}
let dict = dict_builder.finish().unwrap();
- let data_parts = DataParts::new(metadata, DATA_INIT_CAP);
+ let data_parts = DataParts::new(metadata, DATA_INIT_CAP, true);
- Shard::new(shard_id, Some(Arc::new(dict)), data_parts)
+ Shard::new(shard_id, Some(Arc::new(dict)), data_parts, true)
}
#[test]
diff --git a/src/mito2/src/memtable/merge_tree/shard_builder.rs b/src/mito2/src/memtable/merge_tree/shard_builder.rs
index 96e33ce0698a..68ebac37a2f5 100644
--- a/src/mito2/src/memtable/merge_tree/shard_builder.rs
+++ b/src/mito2/src/memtable/merge_tree/shard_builder.rs
@@ -38,15 +38,18 @@ pub struct ShardBuilder {
data_buffer: DataBuffer,
/// Number of rows to freeze a data part.
data_freeze_threshold: usize,
+ dedup: bool,
}
impl ShardBuilder {
/// Returns a new builder.
pub fn new(metadata: RegionMetadataRef, config: &MergeTreeConfig) -> ShardBuilder {
+ let dedup = config.dedup;
ShardBuilder {
dict_builder: KeyDictBuilder::new(config.index_max_keys_per_shard),
- data_buffer: DataBuffer::with_capacity(metadata, DATA_INIT_CAP),
+ data_buffer: DataBuffer::with_capacity(metadata, DATA_INIT_CAP, dedup),
data_freeze_threshold: config.data_freeze_threshold,
+ dedup,
}
}
@@ -87,10 +90,11 @@ impl ShardBuilder {
};
// build data parts.
- let data_parts = DataParts::new(metadata, DATA_INIT_CAP).with_frozen(vec![data_part]);
+ let data_parts =
+ DataParts::new(metadata, DATA_INIT_CAP, self.dedup).with_frozen(vec![data_part]);
let key_dict = key_dict.map(Arc::new);
- Ok(Some(Shard::new(shard_id, key_dict, data_parts)))
+ Ok(Some(Shard::new(shard_id, key_dict, data_parts, self.dedup)))
}
/// Scans the shard builder.
@@ -165,9 +169,9 @@ mod tests {
}
let dict = dict_builder.finish().unwrap();
- let data_parts = DataParts::new(metadata, DATA_INIT_CAP);
+ let data_parts = DataParts::new(metadata, DATA_INIT_CAP, true);
- Shard::new(shard_id, Some(Arc::new(dict)), data_parts)
+ Shard::new(shard_id, Some(Arc::new(dict)), data_parts, true)
}
#[test]
diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs
index a640d3c20edf..26d110415b60 100644
--- a/src/mito2/src/test_util/memtable_util.rs
+++ b/src/mito2/src/test_util/memtable_util.rs
@@ -20,6 +20,7 @@ use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::value::ValueData;
use api::v1::{Row, Rows, SemanticType};
+use datatypes::arrow::array::UInt64Array;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::ColumnSchema;
use datatypes::value::ValueRef;
@@ -29,6 +30,7 @@ use table::predicate::Predicate;
use crate::error::Result;
use crate::memtable::key_values::KeyValue;
+use crate::memtable::merge_tree::data::{timestamp_array_to_i64_slice, DataBatch, DataBuffer};
use crate::memtable::{
BoxedBatchIterator, KeyValues, Memtable, MemtableBuilder, MemtableId, MemtableRef,
MemtableStats,
@@ -177,6 +179,46 @@ pub(crate) fn build_key_values(
)
}
+pub(crate) fn write_rows_to_buffer(
+ buffer: &mut DataBuffer,
+ schema: &RegionMetadataRef,
+ pk_index: u16,
+ ts: Vec<i64>,
+ v0: Vec<Option<f64>>,
+ sequence: u64,
+) {
+ let kvs = crate::test_util::memtable_util::build_key_values_with_ts_seq_values(
+ schema,
+ "whatever".to_string(),
+ 1,
+ ts.into_iter(),
+ v0.into_iter(),
+ sequence,
+ );
+
+ for kv in kvs.iter() {
+ buffer.write_row(pk_index, kv);
+ }
+}
+
+/// Extracts pk index, timestamps and sequences from [DataBatch].
+pub(crate) fn extract_data_batch(batch: &DataBatch) -> (u16, Vec<(i64, u64)>) {
+ let rb = batch.slice_record_batch();
+ let ts = timestamp_array_to_i64_slice(rb.column(1));
+ let seq = rb
+ .column(2)
+ .as_any()
+ .downcast_ref::<UInt64Array>()
+ .unwrap()
+ .values();
+ let ts_and_seq = ts
+ .iter()
+ .zip(seq.iter())
+ .map(|(ts, seq)| (*ts, *seq))
+ .collect::<Vec<_>>();
+ (batch.pk_index(), ts_and_seq)
+}
+
/// Builds key values with timestamps (ms) and sequences for test.
pub(crate) fn build_key_values_with_ts_seq_values(
schema: &RegionMetadataRef,
|
feat
|
merge tree dedup reader (#3375)
|
214fd38f693ce1edbebdcc1c3ce4bfcdfe645612
|
2024-07-01 08:49:25
|
Jeremyhi
|
feat: add build info for flow heartbeat task (#4228)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 68663ae65c5a..9d5a9a6ab2a4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3966,6 +3966,7 @@ dependencies = [
"common-runtime",
"common-telemetry",
"common-time",
+ "common-version",
"datafusion 38.0.0",
"datafusion-common 38.0.0",
"datafusion-expr 38.0.0",
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 04dc6c196a61..fa07c00adfb3 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -136,7 +136,6 @@ impl Datanode {
if let Some(heartbeat_task) = &self.heartbeat_task {
heartbeat_task
.close()
- .await
.map_err(BoxedError::new)
.context(ShutdownInstanceSnafu)?;
}
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index ff872b7959dd..20eac6f8f5a8 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -158,7 +158,7 @@ impl HeartbeatTask {
ctx: HeartbeatResponseHandlerContext,
handler_executor: HeartbeatResponseHandlerExecutorRef,
) -> Result<()> {
- trace!("heartbeat response: {:?}", ctx.response);
+ trace!("Heartbeat response: {:?}", ctx.response);
handler_executor
.handle(ctx)
.await
@@ -245,7 +245,7 @@ impl HeartbeatTask {
}
_ = &mut sleep => {
let build_info = common_version::build_info();
- let region_stats = Self::load_region_stats(®ion_server_clone).await;
+ let region_stats = Self::load_region_stats(®ion_server_clone);
let now = Instant::now();
let duration_since_epoch = (now - epoch).as_millis() as u64;
let req = HeartbeatRequest {
@@ -313,30 +313,23 @@ impl HeartbeatTask {
Ok(())
}
- async fn load_region_stats(region_server: &RegionServer) -> Vec<RegionStat> {
- let regions = region_server.reportable_regions();
-
- let mut region_stats = Vec::new();
- for stat in regions {
- let approximate_bytes = region_server
- .region_disk_usage(stat.region_id)
- .await
- .unwrap_or(0);
- let region_stat = RegionStat {
+ fn load_region_stats(region_server: &RegionServer) -> Vec<RegionStat> {
+ region_server
+ .reportable_regions()
+ .into_iter()
+ .map(|stat| RegionStat {
region_id: stat.region_id.as_u64(),
engine: stat.engine,
role: RegionRole::from(stat.role).into(),
- approximate_bytes,
- // TODO(ruihang): scratch more info
+ // TODO(jeremy): w/rcus
rcus: 0,
wcus: 0,
- };
- region_stats.push(region_stat);
- }
- region_stats
+ approximate_bytes: region_server.region_disk_usage(stat.region_id).unwrap_or(0),
+ })
+ .collect()
}
- pub async fn close(&self) -> Result<()> {
+ pub fn close(&self) -> Result<()> {
let running = self.running.clone();
if running
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index 8fa2eae383e5..83225334eaf9 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -290,7 +290,7 @@ impl RegionServer {
self.inner.runtime.clone()
}
- pub async fn region_disk_usage(&self, region_id: RegionId) -> Option<i64> {
+ pub fn region_disk_usage(&self, region_id: RegionId) -> Option<i64> {
match self.inner.region_map.get(®ion_id) {
Some(e) => e.region_disk_usage(region_id),
None => None,
diff --git a/src/flow/Cargo.toml b/src/flow/Cargo.toml
index fcf33e45fe44..ebf276069502 100644
--- a/src/flow/Cargo.toml
+++ b/src/flow/Cargo.toml
@@ -26,6 +26,7 @@ common-recordbatch.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+common-version.workspace = true
datafusion.workspace = true
datafusion-common.workspace = true
datafusion-expr.workspace = true
diff --git a/src/flow/src/heartbeat.rs b/src/flow/src/heartbeat.rs
index e46769aeb699..ed3fe66a8651 100644
--- a/src/flow/src/heartbeat.rs
+++ b/src/flow/src/heartbeat.rs
@@ -24,6 +24,7 @@ use common_meta::heartbeat::handler::{
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
use common_telemetry::{debug, error, info};
+use greptime_proto::v1::meta::NodeInfo;
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
use servers::addrs;
use servers::heartbeat_options::HeartbeatOptions;
@@ -43,6 +44,7 @@ pub struct HeartbeatTask {
report_interval: Duration,
retry_interval: Duration,
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
+ start_time_ms: u64,
}
impl HeartbeatTask {
@@ -59,6 +61,7 @@ impl HeartbeatTask {
report_interval: heartbeat_opts.interval,
retry_interval: heartbeat_opts.retry_interval,
resp_handler_executor,
+ start_time_ms: common_time::util::current_time_millis() as u64,
}
}
@@ -84,23 +87,34 @@ impl HeartbeatTask {
}
fn create_heartbeat_request(
- message: OutgoingMessage,
- self_peer: &Option<Peer>,
+ message: Option<OutgoingMessage>,
+ peer: Option<Peer>,
+ start_time_ms: u64,
) -> Option<HeartbeatRequest> {
- match outgoing_message_to_mailbox_message(message) {
- Ok(message) => {
- let req = HeartbeatRequest {
- mailbox_message: Some(message),
- peer: self_peer.clone(),
- ..Default::default()
- };
- Some(req)
- }
- Err(e) => {
+ let mailbox_message = match message.map(outgoing_message_to_mailbox_message) {
+ Some(Ok(message)) => Some(message),
+ Some(Err(e)) => {
error!(e; "Failed to encode mailbox messages");
- None
+ return None;
}
- }
+ None => None,
+ };
+
+ Some(HeartbeatRequest {
+ mailbox_message,
+ peer,
+ info: Self::build_node_info(start_time_ms),
+ ..Default::default()
+ })
+ }
+
+ fn build_node_info(start_time_ms: u64) -> Option<NodeInfo> {
+ let build_info = common_version::build_info();
+ Some(NodeInfo {
+ version: build_info.version.to_string(),
+ git_commit: build_info.commit_short.to_string(),
+ start_time_ms,
+ })
}
fn start_heartbeat_report(
@@ -109,6 +123,7 @@ impl HeartbeatTask {
mut outgoing_rx: mpsc::Receiver<OutgoingMessage>,
) {
let report_interval = self.report_interval;
+ let start_time_ms = self.start_time_ms;
let self_peer = Some(Peer {
id: self.node_id,
addr: self.peer_addr.clone(),
@@ -124,18 +139,14 @@ impl HeartbeatTask {
let req = tokio::select! {
message = outgoing_rx.recv() => {
if let Some(message) = message {
- Self::create_heartbeat_request(message, &self_peer)
+ Self::create_heartbeat_request(Some(message), self_peer.clone(), start_time_ms)
} else {
// Receives None that means Sender was dropped, we need to break the current loop
break
}
}
_ = interval.tick() => {
- let req = HeartbeatRequest {
- peer: self_peer.clone(),
- ..Default::default()
- };
- Some(req)
+ Self::create_heartbeat_request(None, self_peer.clone(), start_time_ms)
}
};
diff --git a/src/frontend/src/heartbeat.rs b/src/frontend/src/heartbeat.rs
index 48c09bacd5ce..1bea71e87e3e 100644
--- a/src/frontend/src/heartbeat.rs
+++ b/src/frontend/src/heartbeat.rs
@@ -110,14 +110,36 @@ impl HeartbeatTask {
});
}
- fn build_node_info(start_time_ms: u64) -> NodeInfo {
+ fn create_heartbeat_request(
+ message: Option<OutgoingMessage>,
+ peer: Option<Peer>,
+ start_time_ms: u64,
+ ) -> Option<HeartbeatRequest> {
+ let mailbox_message = match message.map(outgoing_message_to_mailbox_message) {
+ Some(Ok(message)) => Some(message),
+ Some(Err(e)) => {
+ error!(e; "Failed to encode mailbox messages");
+ return None;
+ }
+ None => None,
+ };
+
+ Some(HeartbeatRequest {
+ mailbox_message,
+ peer,
+ info: Self::build_node_info(start_time_ms),
+ ..Default::default()
+ })
+ }
+
+ fn build_node_info(start_time_ms: u64) -> Option<NodeInfo> {
let build_info = common_version::build_info();
- NodeInfo {
+ Some(NodeInfo {
version: build_info.version.to_string(),
git_commit: build_info.commit_short.to_string(),
start_time_ms,
- }
+ })
}
fn start_heartbeat_report(
@@ -141,21 +163,7 @@ impl HeartbeatTask {
let req = tokio::select! {
message = outgoing_rx.recv() => {
if let Some(message) = message {
- match outgoing_message_to_mailbox_message(message) {
- Ok(message) => {
- let req = HeartbeatRequest {
- mailbox_message: Some(message),
- peer: self_peer.clone(),
- info: Some(Self::build_node_info(start_time_ms)),
- ..Default::default()
- };
- Some(req)
- }
- Err(e) => {
- error!(e; "Failed to encode mailbox messages");
- None
- }
- }
+ Self::create_heartbeat_request(Some(message), self_peer.clone(), start_time_ms)
} else {
// Receives None that means Sender was dropped, we need to break the current loop
break
@@ -163,12 +171,7 @@ impl HeartbeatTask {
}
_ = &mut sleep => {
sleep.as_mut().reset(Instant::now() + Duration::from_millis(report_interval));
- let req = HeartbeatRequest {
- peer: self_peer.clone(),
- info: Some(Self::build_node_info(start_time_ms)),
- ..Default::default()
- };
- Some(req)
+ Self::create_heartbeat_request(None, self_peer.clone(), start_time_ms)
}
};
|
feat
|
add build info for flow heartbeat task (#4228)
|
3150f4b22e6163571e50c4f451f164d95f992de1
|
2023-08-18 15:15:46
|
Ruihang Xia
|
fix: specify input ordering and distribution for prom plan (#2204)
| false
|
diff --git a/src/promql/src/extension_plan/normalize.rs b/src/promql/src/extension_plan/normalize.rs
index 28977ef4d506..968e8a6d3782 100644
--- a/src/promql/src/extension_plan/normalize.rs
+++ b/src/promql/src/extension_plan/normalize.rs
@@ -26,7 +26,7 @@ use datafusion::logical_expr::{EmptyRelation, Expr, LogicalPlan, UserDefinedLogi
use datafusion::physical_expr::PhysicalSortExpr;
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
use datafusion::physical_plan::{
- DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, RecordBatchStream,
+ DisplayAs, DisplayFormatType, Distribution, ExecutionPlan, Partitioning, RecordBatchStream,
SendableRecordBatchStream,
};
use datatypes::arrow::array::TimestampMillisecondArray;
@@ -166,6 +166,10 @@ impl ExecutionPlan for SeriesNormalizeExec {
self.input.schema()
}
+ fn required_input_distribution(&self) -> Vec<Distribution> {
+ vec![Distribution::SinglePartition]
+ }
+
fn output_partitioning(&self) -> Partitioning {
self.input.output_partitioning()
}
diff --git a/src/promql/src/extension_plan/series_divide.rs b/src/promql/src/extension_plan/series_divide.rs
index 502d08ce0b32..55be29ab1804 100644
--- a/src/promql/src/extension_plan/series_divide.rs
+++ b/src/promql/src/extension_plan/series_divide.rs
@@ -24,7 +24,8 @@ use datafusion::common::{DFSchema, DFSchemaRef};
use datafusion::error::Result as DataFusionResult;
use datafusion::execution::context::TaskContext;
use datafusion::logical_expr::{EmptyRelation, Expr, LogicalPlan, UserDefinedLogicalNodeCore};
-use datafusion::physical_expr::PhysicalSortExpr;
+use datafusion::physical_expr::{PhysicalSortExpr, PhysicalSortRequirement};
+use datafusion::physical_plan::expressions::Column as ColumnExpr;
use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
use datafusion::physical_plan::{
DisplayAs, DisplayFormatType, Distribution, ExecutionPlan, Partitioning, RecordBatchStream,
@@ -136,7 +137,19 @@ impl ExecutionPlan for SeriesDivideExec {
vec![Distribution::SinglePartition]
}
- // TODO(ruihang): specify required input ordering
+ fn required_input_ordering(&self) -> Vec<Option<Vec<PhysicalSortRequirement>>> {
+ let input_schema = self.input.schema();
+ let exprs = self
+ .tag_columns
+ .iter()
+ .map(|tag| PhysicalSortRequirement {
+ // Safety: the tag column names is verified in the planning phase
+ expr: Arc::new(ColumnExpr::new_with_schema(tag, &input_schema).unwrap()),
+ options: None,
+ })
+ .collect();
+ vec![Some(exprs)]
+ }
fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> {
self.input.output_ordering()
diff --git a/tests/cases/distributed/tql-explain-analyze/analyze.result b/tests/cases/distributed/tql-explain-analyze/analyze.result
index c75c9b00f4df..c8e8a785f964 100644
--- a/tests/cases/distributed/tql-explain-analyze/analyze.result
+++ b/tests/cases/distributed/tql-explain-analyze/analyze.result
@@ -19,10 +19,10 @@ TQL ANALYZE (0, 10, '5s') test;
| plan_type_| plan_|
+-+-+
| Plan with Metrics | PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j], REDACTED
-|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false], REDACTED
-|_|_RepartitionExec: partitioning=REDACTED
|_|_RepartitionExec: partitioning=REDACTED
+|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false], REDACTED
|_|_PromSeriesDivideExec: tags=["k"], REDACTED
+|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST], REDACTED
|_|_MergeScanExec: peers=[REDACTED
|_|_|
+-+-+
diff --git a/tests/cases/distributed/tql-explain-analyze/explain.result b/tests/cases/distributed/tql-explain-analyze/explain.result
index 5f924aafde1b..9c6bf36e3fc3 100644
--- a/tests/cases/distributed/tql-explain-analyze/explain.result
+++ b/tests/cases/distributed/tql-explain-analyze/explain.result
@@ -22,8 +22,8 @@ TQL EXPLAIN (0, 10, '5s') test;
| | MergeScan [is_placeholder=false] |
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
| | PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] |
-| | RepartitionExec: partitioning=REDACTED
-| | PromSeriesDivideExec: tags=["k"] |
+| | PromSeriesDivideExec: tags=["k"] |
+| | SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] |
| | MergeScanExec: peers=[REDACTED
| | |
+---------------+-----------------------------------------------------------------------------------------------+
@@ -57,8 +57,8 @@ TQL EXPLAIN host_load1{__field__="val"};
| | MergeScan [is_placeholder=false] |
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[ts] |
| | PromSeriesNormalizeExec: offset=[0], time index=[ts], filter NaN: [false] |
-| | RepartitionExec: partitioning=REDACTED
-| | PromSeriesDivideExec: tags=["collector", "host"] |
+| | PromSeriesDivideExec: tags=["collector", "host"] |
+| | SortExec: expr=[collector@1 DESC NULLS LAST,host@2 DESC NULLS LAST,ts@3 DESC NULLS LAST] |
| | MergeScanExec: peers=[REDACTED
| | |
+---------------+------------------------------------------------------------------------------------------------------------------+
diff --git a/tests/cases/standalone/tql-explain-analyze/analyze.result b/tests/cases/standalone/tql-explain-analyze/analyze.result
index 38d7b58777c2..46e4b3b4e7a3 100644
--- a/tests/cases/standalone/tql-explain-analyze/analyze.result
+++ b/tests/cases/standalone/tql-explain-analyze/analyze.result
@@ -18,10 +18,10 @@ TQL ANALYZE (0, 10, '5s') test;
| plan_type_| plan_|
+-+-+
| Plan with Metrics | PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j], REDACTED
-|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false], REDACTED
-|_|_RepartitionExec: partitioning=REDACTED
|_|_RepartitionExec: partitioning=REDACTED
+|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false], REDACTED
|_|_PromSeriesDivideExec: tags=["k"], REDACTED
+|_|_SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST], REDACTED
|_|_StreamScanAdapter { stream: "<SendableRecordBatchStream>", schema: [Field { name: "i", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "j", data_type: Timestamp(Millisecond, None), nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {"greptime:time_index": "true"} }, Field { name: "k", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }] }, REDACTED
|_|_|
+-+-+
diff --git a/tests/cases/standalone/tql-explain-analyze/explain.result b/tests/cases/standalone/tql-explain-analyze/explain.result
index 4c00bbeef142..ae1b4bb9368d 100644
--- a/tests/cases/standalone/tql-explain-analyze/explain.result
+++ b/tests/cases/standalone/tql-explain-analyze/explain.result
@@ -21,8 +21,8 @@ TQL EXPLAIN (0, 10, '5s') test;
| | TableScan: test projection=[i, j, k], partial_filters=[j >= TimestampMillisecond(-300000, None), j <= TimestampMillisecond(300000, None)] |
| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
| | PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] |
-| | RepartitionExec: partitioning=REDACTED
-| | PromSeriesDivideExec: tags=["k"] |
+| | PromSeriesDivideExec: tags=["k"] |
+| | SortExec: expr=[k@2 DESC NULLS LAST,j@1 DESC NULLS LAST] |
| | StreamScanAdapter { stream: "<SendableRecordBatchStream>", schema: [Field { name: "i", data_type: Float64, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }, Field { name: "j", data_type: Timestamp(Millisecond, None), nullable: false, dict_id: 0, dict_is_ordered: false, metadata: {"greptime:time_index": "true"} }, Field { name: "k", data_type: Utf8, nullable: true, dict_id: 0, dict_is_ordered: false, metadata: {} }] } |
| | |
+---------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
fix
|
specify input ordering and distribution for prom plan (#2204)
|
bd98a26ccaa24e3d26681a1054d2d21972755bd3
|
2023-03-07 08:22:42
|
Weny Xu
|
chore: bump greptime-proto to latest(ad01872) (#1102)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 6439d25f14ca..7049f98ff9ca 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3043,7 +3043,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=1599ae2a0d1d8f42ee23ed26e4ad7a7b34134c60#1599ae2a0d1d8f42ee23ed26e4ad7a7b34134c60"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=ad0187295035e83f76272da553453e649b7570de#ad0187295035e83f76272da553453e649b7570de"
dependencies = [
"prost",
"tonic",
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index 14c14546dfc1..bc4f957659b6 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "1599ae2a0d1d8f42ee23ed26e4ad7a7b34134c60" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "ad0187295035e83f76272da553453e649b7570de" }
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic.workspace = true
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index 78e3e181945e..8c229520fb84 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -98,6 +98,7 @@ impl Instance {
DdlExpr::Alter(expr) => self.handle_alter(expr).await,
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, query_ctx).await,
DdlExpr::DropTable(expr) => self.handle_drop_table(expr).await,
+ DdlExpr::FlushTable(_) => todo!(),
}
}
}
diff --git a/src/frontend/src/instance/distributed/grpc.rs b/src/frontend/src/instance/distributed/grpc.rs
index 76888ed63b44..513e4cac84ac 100644
--- a/src/frontend/src/instance/distributed/grpc.rs
+++ b/src/frontend/src/instance/distributed/grpc.rs
@@ -57,6 +57,7 @@ impl GrpcQueryHandler for DistInstance {
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
self.drop_table(table_name).await
}
+ DdlExpr::FlushTable(_) => todo!(),
}
}
}
|
chore
|
bump greptime-proto to latest(ad01872) (#1102)
|
c915916b621c4024da607dcf735a33f4ee457de7
|
2024-05-16 12:00:20
|
Weny Xu
|
feat(cli): export metric physical tables first (#3949)
| false
|
diff --git a/src/cmd/src/cli.rs b/src/cmd/src/cli.rs
index a5f089bfe705..b47293240c98 100644
--- a/src/cmd/src/cli.rs
+++ b/src/cmd/src/cli.rs
@@ -64,6 +64,10 @@ impl App for Instance {
self.tool.do_work().await
}
+ fn wait_signal(&self) -> bool {
+ false
+ }
+
async fn stop(&self) -> Result<()> {
Ok(())
}
diff --git a/src/cmd/src/cli/export.rs b/src/cmd/src/cli/export.rs
index 70ca80d11db3..d653889dae68 100644
--- a/src/cmd/src/cli/export.rs
+++ b/src/cmd/src/cli/export.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashSet;
use std::path::Path;
use std::sync::Arc;
@@ -28,6 +29,7 @@ use snafu::{OptionExt, ResultExt};
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
use tokio::sync::Semaphore;
+use tokio::time::Instant;
use crate::cli::{Instance, Tool};
use crate::error::{
@@ -176,6 +178,28 @@ impl Export {
/// Return a list of [`TableReference`] to be exported.
/// Includes all tables under the given `catalog` and `schema`
async fn get_table_list(&self, catalog: &str, schema: &str) -> Result<Vec<TableReference>> {
+ // Puts all metric table first
+ let sql = format!(
+ "select table_catalog, table_schema, table_name from \
+ information_schema.columns where column_name = '__tsid' \
+ and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'"
+ );
+ let result = self.sql(&sql).await?;
+ let Some(records) = result else {
+ EmptyResultSnafu.fail()?
+ };
+ let mut metric_physical_tables = HashSet::with_capacity(records.len());
+ for value in records {
+ let mut t = Vec::with_capacity(3);
+ for v in &value {
+ let serde_json::Value::String(value) = v else {
+ unreachable!()
+ };
+ t.push(value);
+ }
+ metric_physical_tables.insert((t[0].clone(), t[1].clone(), t[2].clone()));
+ }
+
// TODO: SQL injection hurts
let sql = format!(
"select table_catalog, table_schema, table_name from \
@@ -193,7 +217,7 @@ impl Export {
return Ok(vec![]);
}
- let mut result = Vec::with_capacity(records.len());
+ let mut remaining_tables = Vec::with_capacity(records.len());
for value in records {
let mut t = Vec::with_capacity(3);
for v in &value {
@@ -202,10 +226,17 @@ impl Export {
};
t.push(value);
}
- result.push((t[0].clone(), t[1].clone(), t[2].clone()));
+ let table = (t[0].clone(), t[1].clone(), t[2].clone());
+ // Ignores the physical table
+ if !metric_physical_tables.contains(&table) {
+ remaining_tables.push(table);
+ }
}
+ let mut tables = Vec::with_capacity(metric_physical_tables.len() + remaining_tables.len());
+ tables.extend(metric_physical_tables.into_iter());
+ tables.extend(remaining_tables);
- Ok(result)
+ Ok(tables)
}
async fn show_create_table(&self, catalog: &str, schema: &str, table: &str) -> Result<String> {
@@ -225,6 +256,7 @@ impl Export {
}
async fn export_create_table(&self) -> Result<()> {
+ let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_count = db_names.len();
@@ -270,12 +302,14 @@ impl Export {
})
.count();
- info!("success {success}/{db_count} jobs");
+ let elapsed = timer.elapsed();
+ info!("Success {success}/{db_count} jobs, cost: {:?}", elapsed);
Ok(())
}
async fn export_table_data(&self) -> Result<()> {
+ let timer = Instant::now();
let semaphore = Arc::new(Semaphore::new(self.parallelism));
let db_names = self.iter_db_names().await?;
let db_count = db_names.len();
@@ -351,8 +385,8 @@ impl Export {
}
})
.count();
-
- info!("success {success}/{db_count} jobs");
+ let elapsed = timer.elapsed();
+ info!("Success {success}/{db_count} jobs, costs: {:?}", elapsed);
Ok(())
}
diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs
index 7a5aa44ff488..715bd9fe3fb5 100644
--- a/src/cmd/src/lib.rs
+++ b/src/cmd/src/lib.rs
@@ -41,6 +41,11 @@ pub trait App: Send {
async fn start(&mut self) -> error::Result<()>;
+ /// Waits the quit signal by default.
+ fn wait_signal(&self) -> bool {
+ true
+ }
+
async fn stop(&self) -> error::Result<()>;
}
@@ -51,11 +56,13 @@ pub async fn start_app(mut app: Box<dyn App>) -> error::Result<()> {
app.start().await?;
- if let Err(e) = tokio::signal::ctrl_c().await {
- error!("Failed to listen for ctrl-c signal: {}", e);
- // It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
- // the underlying system. So we stop the app instead of running nonetheless to let people
- // investigate the issue.
+ if app.wait_signal() {
+ if let Err(e) = tokio::signal::ctrl_c().await {
+ error!("Failed to listen for ctrl-c signal: {}", e);
+ // It's unusual to fail to listen for ctrl-c signal, maybe there's something unexpected in
+ // the underlying system. So we stop the app instead of running nonetheless to let people
+ // investigate the issue.
+ }
}
app.stop().await?;
|
feat
|
export metric physical tables first (#3949)
|
69420793e2c74c91a1395b33400392f27f0e2780
|
2024-10-30 17:46:22
|
Ning Sun
|
feat: implement parse_query api (#4860)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 26554d7f81c9..53ab681cd6a5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8627,9 +8627,9 @@ dependencies = [
[[package]]
name = "promql-parser"
-version = "0.4.1"
+version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c1ad4a4cfa84ec4aa5831c82e57af0a3faf3f0af83bee13fa1390b2d0a32dc9"
+checksum = "7fe99e6f80a79abccf1e8fb48dd63473a36057e600cc6ea36147c8318698ae6f"
dependencies = [
"cfgrammar",
"chrono",
@@ -8637,6 +8637,8 @@ dependencies = [
"lrlex",
"lrpar",
"regex",
+ "serde",
+ "serde_json",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 2e7f70d2ab94..e45236758f25 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -145,7 +145,7 @@ parquet = { version = "51.0.0", default-features = false, features = ["arrow", "
paste = "1.0"
pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
-promql-parser = { version = "0.4.1" }
+promql-parser = { version = "0.4.3", features = ["ser"] }
prost = "0.12"
raft-engine = { version = "0.4.1", default-features = false }
rand = "0.8"
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index de4f2cf5ff44..12ac06db9070 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -62,8 +62,8 @@ use crate::http::influxdb::{influxdb_health, influxdb_ping, influxdb_write_v1, i
use crate::http::influxdb_result_v1::InfluxdbV1Response;
use crate::http::json_result::JsonResponse;
use crate::http::prometheus::{
- build_info_query, format_query, instant_query, label_values_query, labels_query, range_query,
- series_query,
+ build_info_query, format_query, instant_query, label_values_query, labels_query, parse_query,
+ range_query, series_query,
};
use crate::interceptor::LogIngestInterceptorRef;
use crate::metrics::http_metrics_layer;
@@ -819,6 +819,7 @@ impl HttpServer {
.route("/query_range", routing::post(range_query).get(range_query))
.route("/labels", routing::post(labels_query).get(labels_query))
.route("/series", routing::post(series_query).get(series_query))
+ .route("/parse_query", routing::post(parse_query).get(parse_query))
.route(
"/label/:label_name/values",
routing::get(label_values_query),
diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs
index 941cac253972..6f749f259508 100644
--- a/src/servers/src/http/prometheus.rs
+++ b/src/servers/src/http/prometheus.rs
@@ -101,6 +101,9 @@ pub enum PrometheusResponse {
LabelValues(Vec<String>),
FormatQuery(String),
BuildInfo(OwnedBuildInfo),
+ #[schemars(skip)]
+ #[serde(skip_deserializing)]
+ ParseResult(promql_parser::parser::Expr),
}
impl Default for PrometheusResponse {
@@ -1014,3 +1017,33 @@ pub async fn series_query(
resp.resp_metrics = merge_map;
resp
}
+
+#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
+pub struct ParseQuery {
+ query: Option<String>,
+ db: Option<String>,
+}
+
+#[axum_macros::debug_handler]
+#[tracing::instrument(
+ skip_all,
+ fields(protocol = "prometheus", request_type = "parse_query")
+)]
+pub async fn parse_query(
+ State(_handler): State<PrometheusHandlerRef>,
+ Query(params): Query<ParseQuery>,
+ Extension(_query_ctx): Extension<QueryContext>,
+ Form(form_params): Form<ParseQuery>,
+) -> PrometheusJsonResponse {
+ if let Some(query) = params.query.or(form_params.query) {
+ match promql_parser::parser::parse(&query) {
+ Ok(ast) => PrometheusJsonResponse::success(PrometheusResponse::ParseResult(ast)),
+ Err(err) => {
+ let msg = err.to_string();
+ PrometheusJsonResponse::error(StatusCode::InvalidArguments, msg)
+ }
+ }
+ } else {
+ PrometheusJsonResponse::error(StatusCode::InvalidArguments, "query is required")
+ }
+}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 823de40d1124..638734faba4f 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -660,6 +660,29 @@ pub async fn test_prom_http_api(store_type: StorageType) {
let body = serde_json::from_str::<PrometheusJsonResponse>(&res.text().await).unwrap();
assert_eq!(body.status, "success");
+ // parse_query
+ let res = client
+ .get("/v1/prometheus/api/v1/parse_query?query=http_requests")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+ let data = res.text().await;
+ // we don't have deserialization for ast so we keep test simple and compare
+ // the json output directly.
+ // the correctness should be covered by parser. In this test we only check
+ // response format.
+ let expected = "{\"status\":\"success\",\"data\":{\"type\":\"vectorSelector\",\"name\":\"http_requests\",\"matchers\":[],\"offset\":0,\"startOrEnd\":null,\"timestamp\":null}}";
+ assert_eq!(expected, data);
+
+ let res = client
+ .get("/v1/prometheus/api/v1/parse_query?query=not http_requests")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::BAD_REQUEST);
+ let data = res.text().await;
+ let expected = "{\"status\":\"error\",\"data\":{\"resultType\":\"\",\"result\":[]},\"error\":\"invalid promql query\",\"errorType\":\"InvalidArguments\"}";
+ assert_eq!(expected, data);
+
guard.remove_all().await;
}
|
feat
|
implement parse_query api (#4860)
|
8ed5bc5305cd65d840db19458aafaaec22416aed
|
2024-10-29 21:16:24
|
Yohan Wal
|
refactor: json conversion (#4893)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 40b5948bcc7d..0d8058f6a9ab 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5524,7 +5524,7 @@ dependencies = [
[[package]]
name = "jsonb"
version = "0.4.1"
-source = "git+https://github.com/datafuselabs/jsonb.git?rev=46ad50fc71cf75afbf98eec455f7892a6387c1fc#46ad50fc71cf75afbf98eec455f7892a6387c1fc"
+source = "git+https://github.com/databendlabs/jsonb.git?rev=46ad50fc71cf75afbf98eec455f7892a6387c1fc#46ad50fc71cf75afbf98eec455f7892a6387c1fc"
dependencies = [
"byteorder",
"fast-float",
diff --git a/Cargo.toml b/Cargo.toml
index 6795e8927f76..f06c83a458ba 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -125,7 +125,7 @@ greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", r
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
-jsonb = { git = "https://github.com/datafuselabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
+jsonb = { git = "https://github.com/databendlabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
mockall = "0.11.4"
diff --git a/src/datatypes/src/error.rs b/src/datatypes/src/error.rs
index 5a255dc0a644..aca9b883a952 100644
--- a/src/datatypes/src/error.rs
+++ b/src/datatypes/src/error.rs
@@ -189,6 +189,13 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Invalid JSON text: {}", value))]
+ InvalidJson {
+ value: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Value exceeds the precision {} bound", precision))]
ValueExceedsPrecision {
precision: u8,
@@ -222,7 +229,8 @@ impl ErrorExt for Error {
| DefaultValueType { .. }
| DuplicateMeta { .. }
| InvalidTimestampPrecision { .. }
- | InvalidPrecisionOrScale { .. } => StatusCode::InvalidArguments,
+ | InvalidPrecisionOrScale { .. }
+ | InvalidJson { .. } => StatusCode::InvalidArguments,
ValueExceedsPrecision { .. }
| CastType { .. }
diff --git a/src/datatypes/src/lib.rs b/src/datatypes/src/lib.rs
index 3766cebf8755..3ce78322fe97 100644
--- a/src/datatypes/src/lib.rs
+++ b/src/datatypes/src/lib.rs
@@ -13,6 +13,7 @@
// limitations under the License.
#![feature(let_chains)]
+#![feature(assert_matches)]
pub mod arrow_array;
pub mod data_type;
diff --git a/src/datatypes/src/vectors/binary.rs b/src/datatypes/src/vectors/binary.rs
index e2074f949c2b..9a690ea4114b 100644
--- a/src/datatypes/src/vectors/binary.rs
+++ b/src/datatypes/src/vectors/binary.rs
@@ -36,6 +36,36 @@ impl BinaryVector {
pub(crate) fn as_arrow(&self) -> &dyn Array {
&self.array
}
+
+ /// Creates a new binary vector of JSONB from a binary vector.
+ /// The binary vector must contain valid JSON strings.
+ pub fn convert_binary_to_json(&self) -> Result<BinaryVector> {
+ let arrow_array = self.to_arrow_array();
+ let mut vector = vec![];
+ for binary in arrow_array
+ .as_any()
+ .downcast_ref::<BinaryArray>()
+ .unwrap()
+ .iter()
+ {
+ let jsonb = if let Some(binary) = binary {
+ match jsonb::from_slice(binary) {
+ Ok(jsonb) => Some(jsonb.to_vec()),
+ Err(_) => {
+ let s = String::from_utf8_lossy(binary);
+ return error::InvalidJsonSnafu {
+ value: s.to_string(),
+ }
+ .fail();
+ }
+ }
+ } else {
+ None
+ };
+ vector.push(jsonb);
+ }
+ Ok(BinaryVector::from(vector))
+ }
}
impl From<BinaryArray> for BinaryVector {
@@ -233,6 +263,8 @@ vectors::impl_try_from_arrow_array_for_vector!(BinaryArray, BinaryVector);
#[cfg(test)]
mod tests {
+ use std::assert_matches::assert_matches;
+
use arrow::datatypes::DataType as ArrowDataType;
use common_base::bytes::Bytes;
use serde_json;
@@ -383,4 +415,52 @@ mod tests {
assert_eq!(b"four", vector.get_data(3).unwrap());
assert_eq!(builder.len(), 4);
}
+
+ #[test]
+ fn test_binary_json_conversion() {
+ // json strings
+ let json_strings = vec![
+ b"{\"hello\": \"world\"}".to_vec(),
+ b"{\"foo\": 1}".to_vec(),
+ b"123".to_vec(),
+ ];
+ let json_vector = BinaryVector::from(json_strings.clone())
+ .convert_binary_to_json()
+ .unwrap();
+ let jsonbs = json_strings
+ .iter()
+ .map(|v| jsonb::parse_value(v).unwrap().to_vec())
+ .collect::<Vec<_>>();
+ for i in 0..3 {
+ assert_eq!(
+ json_vector.get_ref(i).as_binary().unwrap().unwrap(),
+ jsonbs.get(i).unwrap().as_slice()
+ );
+ }
+
+ // jsonb
+ let json_vector = BinaryVector::from(jsonbs.clone())
+ .convert_binary_to_json()
+ .unwrap();
+ for i in 0..3 {
+ assert_eq!(
+ json_vector.get_ref(i).as_binary().unwrap().unwrap(),
+ jsonbs.get(i).unwrap().as_slice()
+ );
+ }
+
+ // binary with jsonb header (0x80, 0x40, 0x20)
+ let binary_with_jsonb_header: Vec<u8> = [0x80, 0x23, 0x40, 0x22].to_vec();
+ let error = BinaryVector::from(vec![binary_with_jsonb_header])
+ .convert_binary_to_json()
+ .unwrap_err();
+ assert_matches!(error, error::Error::InvalidJson { .. });
+
+ // invalid json string
+ let json_strings = vec![b"{\"hello\": \"world\"".to_vec()];
+ let error = BinaryVector::from(json_strings)
+ .convert_binary_to_json()
+ .unwrap_err();
+ assert_matches!(error, error::Error::InvalidJson { .. });
+ }
}
diff --git a/src/datatypes/src/vectors/operations.rs b/src/datatypes/src/vectors/operations.rs
index b2de83c6e6f3..caa0730f7298 100644
--- a/src/datatypes/src/vectors/operations.rs
+++ b/src/datatypes/src/vectors/operations.rs
@@ -18,6 +18,8 @@ mod find_unique;
mod replicate;
mod take;
+use std::sync::Arc;
+
use common_base::BitVec;
use crate::error::{self, Result};
@@ -89,6 +91,12 @@ macro_rules! impl_scalar_vector_op {
}
fn cast(&self, to_type: &ConcreteDataType) -> Result<VectorRef> {
+ if to_type == &ConcreteDataType::json_datatype() {
+ if let Some(vector) = self.as_any().downcast_ref::<BinaryVector>() {
+ let json_vector = vector.convert_binary_to_json()?;
+ return Ok(Arc::new(json_vector) as VectorRef);
+ }
+ }
cast::cast_non_constant!(self, to_type)
}
diff --git a/src/servers/src/postgres/types.rs b/src/servers/src/postgres/types.rs
index c268b893d847..a7ce2252b955 100644
--- a/src/servers/src/postgres/types.rs
+++ b/src/servers/src/postgres/types.rs
@@ -961,7 +961,7 @@ pub(super) fn parameters_to_scalar_values(
if let Some(server_type) = &server_type {
match server_type {
ConcreteDataType::Binary(_) => {
- ScalarValue::Binary(data.map(|d| jsonb::Value::from(d).to_vec()))
+ ScalarValue::Binary(data.map(|d| d.to_string().into_bytes()))
}
_ => {
return Err(invalid_parameter_error(
@@ -971,7 +971,7 @@ pub(super) fn parameters_to_scalar_values(
}
}
} else {
- ScalarValue::Binary(data.map(|d| jsonb::Value::from(d).to_vec()))
+ ScalarValue::Binary(data.map(|d| d.to_string().into_bytes()))
}
}
_ => Err(invalid_parameter_error(
diff --git a/tests-integration/tests/sql.rs b/tests-integration/tests/sql.rs
index 19acc37ea6c6..af9374a2a826 100644
--- a/tests-integration/tests/sql.rs
+++ b/tests-integration/tests/sql.rs
@@ -145,7 +145,7 @@ pub async fn test_mysql_crud(store_type: StorageType) {
.unwrap();
sqlx::query(
- "create table demo(i bigint, ts timestamp time index default current_timestamp, d date default null, dt datetime default null, b blob default null)",
+ "create table demo(i bigint, ts timestamp time index default current_timestamp, d date default null, dt datetime default null, b blob default null, j json default null)",
)
.execute(&pool)
.await
@@ -158,18 +158,30 @@ pub async fn test_mysql_crud(store_type: StorageType) {
let d = NaiveDate::from_yo_opt(2015, 100).unwrap();
let hello = format!("hello{i}");
let bytes = hello.as_bytes();
- sqlx::query("insert into demo values(?, ?, ?, ?, ?)")
+ let jsons = serde_json::json!({
+ "code": i,
+ "success": true,
+ "payload": {
+ "features": [
+ "serde",
+ "json"
+ ],
+ "homepage": null
+ }
+ });
+ sqlx::query("insert into demo values(?, ?, ?, ?, ?, ?)")
.bind(i)
.bind(i)
.bind(d)
.bind(dt)
.bind(bytes)
+ .bind(jsons)
.execute(&pool)
.await
.unwrap();
}
- let rows = sqlx::query("select i, d, dt, b from demo")
+ let rows = sqlx::query("select i, d, dt, b, j from demo")
.fetch_all(&pool)
.await
.unwrap();
@@ -180,6 +192,7 @@ pub async fn test_mysql_crud(store_type: StorageType) {
let d: NaiveDate = row.get("d");
let dt: DateTime<Utc> = row.get("dt");
let bytes: Vec<u8> = row.get("b");
+ let json: serde_json::Value = row.get("j");
assert_eq!(ret, i as i64);
let expected_d = NaiveDate::from_yo_opt(2015, 100).unwrap();
assert_eq!(expected_d, d);
@@ -194,6 +207,18 @@ pub async fn test_mysql_crud(store_type: StorageType) {
format!("{}", dt.format("%Y-%m-%d %H:%M:%S"))
);
assert_eq!(format!("hello{i}"), String::from_utf8_lossy(&bytes));
+ let expected_j = serde_json::json!({
+ "code": i,
+ "success": true,
+ "payload": {
+ "features": [
+ "serde",
+ "json"
+ ],
+ "homepage": null
+ }
+ });
+ assert_eq!(json, expected_j);
}
let rows = sqlx::query("select i from demo where i=?")
@@ -396,7 +421,7 @@ pub async fn test_postgres_crud(store_type: StorageType) {
let dt = d.and_hms_opt(0, 0, 0).unwrap().and_utc().timestamp_millis();
let bytes = "hello".as_bytes();
let json = serde_json::json!({
- "code": 200,
+ "code": i,
"success": true,
"payload": {
"features": [
@@ -444,7 +469,7 @@ pub async fn test_postgres_crud(store_type: StorageType) {
assert_eq!("hello".as_bytes(), bytes);
let expected_j = serde_json::json!({
- "code": 200,
+ "code": i,
"success": true,
"payload": {
"features": [
|
refactor
|
json conversion (#4893)
|
fbea07ea83804706e9555c5de512c238719988b5
|
2022-10-19 11:38:54
|
Ruihang Xia
|
chore: remove unused dependencies (#319)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index fbdeae5405fe..b6ff256ba847 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -790,7 +790,6 @@ version = "0.1.0"
dependencies = [
"api",
"async-stream",
- "catalog",
"common-base",
"common-error",
"common-grpc",
@@ -950,7 +949,6 @@ name = "common-recordbatch"
version = "0.1.0"
dependencies = [
"common-error",
- "datafusion",
"datafusion-common",
"datatypes",
"futures",
@@ -1002,7 +1000,6 @@ name = "common-time"
version = "0.1.0"
dependencies = [
"chrono",
- "common-error",
"serde",
"serde_json",
"snafu",
@@ -1733,7 +1730,6 @@ dependencies = [
"arrow2",
"async-stream",
"async-trait",
- "catalog",
"client",
"common-base",
"common-error",
@@ -1747,7 +1743,6 @@ dependencies = [
"datanode",
"datatypes",
"futures",
- "query",
"serde",
"servers",
"snafu",
@@ -4549,7 +4544,6 @@ dependencies = [
"axum-test-helper",
"bytes",
"catalog",
- "client",
"common-base",
"common-error",
"common-grpc",
@@ -4574,7 +4568,6 @@ dependencies = [
"serde",
"serde_json",
"snafu",
- "table",
"test-util",
"tokio",
"tokio-postgres",
@@ -4870,7 +4863,6 @@ dependencies = [
"datatypes",
"derive_builder",
"futures",
- "object-store",
"serde",
"serde_json",
"snafu",
diff --git a/src/client/Cargo.toml b/src/client/Cargo.toml
index 49854595db1e..82bb407f9c67 100644
--- a/src/client/Cargo.toml
+++ b/src/client/Cargo.toml
@@ -7,7 +7,6 @@ edition = "2021"
[dependencies]
api = { path = "../api" }
async-stream = "0.3"
-catalog = { path = "../catalog" }
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
diff --git a/src/common/function-macro/Cargo.toml b/src/common/function-macro/Cargo.toml
index 2bbb4e2d67cd..51749b388cf1 100644
--- a/src/common/function-macro/Cargo.toml
+++ b/src/common/function-macro/Cargo.toml
@@ -7,12 +7,12 @@ edition = "2021"
proc-macro = true
[dependencies]
-common-query = { path = "../query" }
-datatypes = { path = "../../datatypes" }
quote = "1.0"
-snafu = { version = "0.7", features = ["backtraces"] }
syn = "1.0"
[dev-dependencies]
arc-swap = "1.0"
+common-query = { path = "../query" }
+datatypes = { path = "../../datatypes" }
+snafu = { version = "0.7", features = ["backtraces"] }
static_assertions = "1.1.0"
diff --git a/src/common/recordbatch/Cargo.toml b/src/common/recordbatch/Cargo.toml
index aa6aa7dfab3f..69e5f889ce2d 100644
--- a/src/common/recordbatch/Cargo.toml
+++ b/src/common/recordbatch/Cargo.toml
@@ -5,7 +5,6 @@ edition = "2021"
[dependencies]
common-error = { path = "../error" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
datatypes = { path = "../../datatypes" }
futures = "0.3"
diff --git a/src/common/time/Cargo.toml b/src/common/time/Cargo.toml
index 86bfaef05926..37c0fa89fc64 100644
--- a/src/common/time/Cargo.toml
+++ b/src/common/time/Cargo.toml
@@ -6,7 +6,6 @@ edition = "2021"
[dependencies]
chrono = "0.4"
-common-error = { path = "../error" }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 3fea2cf4f2c5..274ef66c77e8 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -7,7 +7,6 @@ edition = "2021"
api = { path = "../api" }
async-stream = "0.3"
async-trait = "0.1"
-catalog = { path = "../catalog" }
client = { path = "../client" }
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
@@ -17,7 +16,6 @@ common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
-query = { path = "../query" }
serde = "1.0"
servers = { path = "../servers" }
snafu = { version = "0.7", features = ["backtraces"] }
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index ef6e47a9e43b..4d2df5d0bb87 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -9,7 +9,6 @@ async-trait = "0.1"
axum = "0.6.0-rc.2"
axum-macros = "0.3.0-rc.1"
bytes = "1.2"
-client = { path = "../client" }
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-grpc = { path = "../common/grpc" }
@@ -27,11 +26,9 @@ metrics = "0.20"
num_cpus = "1.13"
opensrv-mysql = "0.1"
pgwire = { version = "0.4" }
-query = { path = "../query" }
serde = "1.0"
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
-table = { path = "../table" }
tokio = { version = "1.20", features = ["full"] }
tokio-stream = { version = "0.1", features = ["net"] }
tonic = "0.8"
@@ -45,6 +42,7 @@ common-base = { path = "../common/base" }
mysql_async = { git = "https://github.com/Morranto/mysql_async.git", rev = "127b538" }
rand = "0.8"
script = { path = "../script", features = ["python"] }
+query = { path = "../query" }
test-util = { path = "../../test-util" }
tokio-postgres = "0.7"
tokio-test = "0.4"
diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml
index 2826b1be9039..ccc0a48d0a89 100644
--- a/src/storage/Cargo.toml
+++ b/src/storage/Cargo.toml
@@ -20,7 +20,6 @@ datatypes = { path = "../datatypes" }
futures = "0.3"
futures-util = "0.3"
lazy_static = "1.4"
-log-store = { path = "../log-store" }
object-store = { path = "../object-store" }
paste = "1.0"
planus = "0.2"
@@ -40,6 +39,7 @@ uuid = { version = "1.1", features = ["v4"] }
atomic_float = "0.1"
criterion = "0.3"
datatypes = { path = "../datatypes", features = ["test"] }
+log-store = { path = "../log-store" }
rand = "0.8"
tempdir = "0.3"
diff --git a/src/store-api/Cargo.toml b/src/store-api/Cargo.toml
index 0258a660eb09..666b0814077b 100644
--- a/src/store-api/Cargo.toml
+++ b/src/store-api/Cargo.toml
@@ -14,7 +14,6 @@ common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
derive_builder = "0.11"
futures = "0.3"
-object-store = { path = "../object-store" }
serde = { version = "1.0", features = ["derive"] }
snafu = { version = "0.7", features = ["backtraces"] }
|
chore
|
remove unused dependencies (#319)
|
aaa9b329085de638e5122bf132c239359dba65d6
|
2024-10-11 23:27:54
|
Ning Sun
|
feat: add more h3 functions (#4770)
| false
|
diff --git a/src/common/function/src/scalars/geo.rs b/src/common/function/src/scalars/geo.rs
index 37b6c0704b06..e47a1de9f2fa 100644
--- a/src/common/function/src/scalars/geo.rs
+++ b/src/common/function/src/scalars/geo.rs
@@ -29,18 +29,31 @@ impl GeoFunctions {
// geohash
registry.register(Arc::new(GeohashFunction));
registry.register(Arc::new(GeohashNeighboursFunction));
- // h3 family
+
+ // h3 index
registry.register(Arc::new(h3::H3LatLngToCell));
registry.register(Arc::new(h3::H3LatLngToCellString));
+
+ // h3 index inspection
registry.register(Arc::new(h3::H3CellBase));
- registry.register(Arc::new(h3::H3CellCenterChild));
- registry.register(Arc::new(h3::H3CellCenterLat));
- registry.register(Arc::new(h3::H3CellCenterLng));
registry.register(Arc::new(h3::H3CellIsPentagon));
- registry.register(Arc::new(h3::H3CellParent));
- registry.register(Arc::new(h3::H3CellResolution));
- registry.register(Arc::new(h3::H3CellToString));
- registry.register(Arc::new(h3::H3IsNeighbour));
registry.register(Arc::new(h3::H3StringToCell));
+ registry.register(Arc::new(h3::H3CellToString));
+ registry.register(Arc::new(h3::H3CellCenterLatLng));
+ registry.register(Arc::new(h3::H3CellResolution));
+
+ // h3 hierarchical grid
+ registry.register(Arc::new(h3::H3CellCenterChild));
+ registry.register(Arc::new(h3::H3CellParent));
+ registry.register(Arc::new(h3::H3CellToChildren));
+ registry.register(Arc::new(h3::H3CellToChildrenSize));
+ registry.register(Arc::new(h3::H3CellToChildPos));
+ registry.register(Arc::new(h3::H3ChildPosToCell));
+
+ // h3 grid traversal
+ registry.register(Arc::new(h3::H3GridDisk));
+ registry.register(Arc::new(h3::H3GridDiskDistances));
+ registry.register(Arc::new(h3::H3GridDistance));
+ registry.register(Arc::new(h3::H3GridPathCells));
}
}
diff --git a/src/common/function/src/scalars/geo/h3.rs b/src/common/function/src/scalars/geo/h3.rs
index 672fbfd7145d..00c567d8d727 100644
--- a/src/common/function/src/scalars/geo/h3.rs
+++ b/src/common/function/src/scalars/geo/h3.rs
@@ -20,18 +20,71 @@ use common_query::error::{self, InvalidFuncArgsSnafu, Result};
use common_query::prelude::{Signature, TypeSignature};
use datafusion::logical_expr::Volatility;
use datatypes::prelude::ConcreteDataType;
-use datatypes::scalars::ScalarVectorBuilder;
-use datatypes::value::Value;
+use datatypes::scalars::{Scalar, ScalarVectorBuilder};
+use datatypes::value::{ListValue, Value};
use datatypes::vectors::{
- BooleanVectorBuilder, Float64VectorBuilder, MutableVector, StringVectorBuilder,
- UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
+ BooleanVectorBuilder, Int32VectorBuilder, ListVectorBuilder, MutableVector,
+ StringVectorBuilder, UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
};
use derive_more::Display;
use h3o::{CellIndex, LatLng, Resolution};
+use once_cell::sync::Lazy;
use snafu::{ensure, ResultExt};
+use super::helpers::{ensure_columns_len, ensure_columns_n};
use crate::function::{Function, FunctionContext};
+static CELL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
+ vec![
+ ConcreteDataType::int64_datatype(),
+ ConcreteDataType::uint64_datatype(),
+ ]
+});
+
+static COORDINATE_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
+ vec![
+ ConcreteDataType::float32_datatype(),
+ ConcreteDataType::float64_datatype(),
+ ]
+});
+static RESOLUTION_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
+ vec![
+ ConcreteDataType::int8_datatype(),
+ ConcreteDataType::int16_datatype(),
+ ConcreteDataType::int32_datatype(),
+ ConcreteDataType::int64_datatype(),
+ ConcreteDataType::uint8_datatype(),
+ ConcreteDataType::uint16_datatype(),
+ ConcreteDataType::uint32_datatype(),
+ ConcreteDataType::uint64_datatype(),
+ ]
+});
+static DISTANCE_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
+ vec![
+ ConcreteDataType::int8_datatype(),
+ ConcreteDataType::int16_datatype(),
+ ConcreteDataType::int32_datatype(),
+ ConcreteDataType::int64_datatype(),
+ ConcreteDataType::uint8_datatype(),
+ ConcreteDataType::uint16_datatype(),
+ ConcreteDataType::uint32_datatype(),
+ ConcreteDataType::uint64_datatype(),
+ ]
+});
+
+static POSITION_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
+ vec![
+ ConcreteDataType::int8_datatype(),
+ ConcreteDataType::int16_datatype(),
+ ConcreteDataType::int32_datatype(),
+ ConcreteDataType::int64_datatype(),
+ ConcreteDataType::uint8_datatype(),
+ ConcreteDataType::uint16_datatype(),
+ ConcreteDataType::uint32_datatype(),
+ ConcreteDataType::uint64_datatype(),
+ ]
+});
+
/// Function that returns [h3] encoding cellid for a given geospatial coordinate.
///
/// [h3]: https://h3geo.org/
@@ -50,20 +103,8 @@ impl Function for H3LatLngToCell {
fn signature(&self) -> Signature {
let mut signatures = Vec::new();
- for coord_type in &[
- ConcreteDataType::float32_datatype(),
- ConcreteDataType::float64_datatype(),
- ] {
- for resolution_type in &[
- ConcreteDataType::int8_datatype(),
- ConcreteDataType::int16_datatype(),
- ConcreteDataType::int32_datatype(),
- ConcreteDataType::int64_datatype(),
- ConcreteDataType::uint8_datatype(),
- ConcreteDataType::uint16_datatype(),
- ConcreteDataType::uint32_datatype(),
- ConcreteDataType::uint64_datatype(),
- ] {
+ for coord_type in COORDINATE_TYPES.as_slice() {
+ for resolution_type in RESOLUTION_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![
// latitude
coord_type.clone(),
@@ -78,15 +119,7 @@ impl Function for H3LatLngToCell {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 3,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 3, provided : {}",
- columns.len()
- ),
- }
- );
+ ensure_columns_n!(columns, 3);
let lat_vec = &columns[0];
let lon_vec = &columns[1];
@@ -142,20 +175,8 @@ impl Function for H3LatLngToCellString {
fn signature(&self) -> Signature {
let mut signatures = Vec::new();
- for coord_type in &[
- ConcreteDataType::float32_datatype(),
- ConcreteDataType::float64_datatype(),
- ] {
- for resolution_type in &[
- ConcreteDataType::int8_datatype(),
- ConcreteDataType::int16_datatype(),
- ConcreteDataType::int32_datatype(),
- ConcreteDataType::int64_datatype(),
- ConcreteDataType::uint8_datatype(),
- ConcreteDataType::uint16_datatype(),
- ConcreteDataType::uint32_datatype(),
- ConcreteDataType::uint64_datatype(),
- ] {
+ for coord_type in COORDINATE_TYPES.as_slice() {
+ for resolution_type in RESOLUTION_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![
// latitude
coord_type.clone(),
@@ -170,15 +191,7 @@ impl Function for H3LatLngToCellString {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 3,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 3, provided : {}",
- columns.len()
- ),
- }
- );
+ ensure_columns_n!(columns, 3);
let lat_vec = &columns[0];
let lon_vec = &columns[1];
@@ -234,15 +247,7 @@ impl Function for H3CellToString {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 1,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 1, provided : {}",
- columns.len()
- ),
- }
- );
+ ensure_columns_n!(columns, 1);
let cell_vec = &columns[0];
let size = cell_vec.len();
@@ -280,15 +285,7 @@ impl Function for H3StringToCell {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 1,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 1, provided : {}",
- columns.len()
- ),
- }
- );
+ ensure_columns_n!(columns, 1);
let string_vec = &columns[0];
let size = string_vec.len();
@@ -319,18 +316,20 @@ impl Function for H3StringToCell {
}
}
-/// Function that returns centroid latitude of given cell id
+/// Function that returns centroid latitude and longitude of given cell id
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
-pub struct H3CellCenterLat;
+pub struct H3CellCenterLatLng;
-impl Function for H3CellCenterLat {
+impl Function for H3CellCenterLatLng {
fn name(&self) -> &str {
- "h3_cell_center_lat"
+ "h3_cell_center_latlng"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
- Ok(ConcreteDataType::float64_datatype())
+ Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::float64_datatype(),
+ ))
}
fn signature(&self) -> Signature {
@@ -338,69 +337,26 @@ impl Function for H3CellCenterLat {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 1,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 1, provided : {}",
- columns.len()
- ),
- }
- );
+ ensure_columns_n!(columns, 1);
let cell_vec = &columns[0];
let size = cell_vec.len();
- let mut results = Float64VectorBuilder::with_capacity(size);
+ let mut results =
+ ListVectorBuilder::with_type_capacity(ConcreteDataType::float64_datatype(), size);
for i in 0..size {
let cell = cell_from_value(cell_vec.get(i))?;
- let lat = cell.map(|cell| LatLng::from(cell).lat());
-
- results.push(lat);
- }
-
- Ok(results.to_vector())
- }
-}
-
-/// Function that returns centroid longitude of given cell id
-#[derive(Clone, Debug, Default, Display)]
-#[display("{}", self.name())]
-pub struct H3CellCenterLng;
-
-impl Function for H3CellCenterLng {
- fn name(&self) -> &str {
- "h3_cell_center_lng"
- }
-
- fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
- Ok(ConcreteDataType::float64_datatype())
- }
-
- fn signature(&self) -> Signature {
- signature_of_cell()
- }
-
- fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 1,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 1, provided : {}",
- columns.len()
- ),
+ let latlng = cell.map(LatLng::from);
+
+ if let Some(latlng) = latlng {
+ let result = ListValue::new(
+ vec![latlng.lat().into(), latlng.lng().into()],
+ ConcreteDataType::float64_datatype(),
+ );
+ results.push(Some(result.as_scalar_ref()));
+ } else {
+ results.push(None);
}
- );
-
- let cell_vec = &columns[0];
- let size = cell_vec.len();
- let mut results = Float64VectorBuilder::with_capacity(size);
-
- for i in 0..size {
- let cell = cell_from_value(cell_vec.get(i))?;
- let lat = cell.map(|cell| LatLng::from(cell).lng());
-
- results.push(lat);
}
Ok(results.to_vector())
@@ -470,15 +426,7 @@ impl Function for H3CellBase {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 1,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 1, provided : {}",
- columns.len()
- ),
- }
- );
+ ensure_columns_n!(columns, 1);
let cell_vec = &columns[0];
let size = cell_vec.len();
@@ -514,15 +462,7 @@ impl Function for H3CellIsPentagon {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 1,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 1, provided : {}",
- columns.len()
- ),
- }
- );
+ ensure_columns_n!(columns, 1);
let cell_vec = &columns[0];
let size = cell_vec.len();
@@ -558,15 +498,7 @@ impl Function for H3CellCenterChild {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 2,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 2, provided : {}",
- columns.len()
- ),
- }
- );
+ ensure_columns_n!(columns, 2);
let cell_vec = &columns[0];
let res_vec = &columns[1];
@@ -606,15 +538,7 @@ impl Function for H3CellParent {
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 2,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 2, provided : {}",
- columns.len()
- ),
- }
- );
+ ensure_columns_n!(columns, 2);
let cell_vec = &columns[0];
let res_vec = &columns[1];
@@ -633,48 +557,323 @@ impl Function for H3CellParent {
}
}
-/// Function that checks if two cells are neighbour
+/// Function that returns children cell list
#[derive(Clone, Debug, Default, Display)]
#[display("{}", self.name())]
-pub struct H3IsNeighbour;
+pub struct H3CellToChildren;
-impl Function for H3IsNeighbour {
+impl Function for H3CellToChildren {
fn name(&self) -> &str {
- "h3_is_neighbour"
+ "h3_cell_to_children"
}
fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
- Ok(ConcreteDataType::boolean_datatype())
+ Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::uint64_datatype(),
+ ))
}
fn signature(&self) -> Signature {
- signature_of_double_cell()
+ signature_of_cell_and_resolution()
}
fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
- ensure!(
- columns.len() == 2,
- InvalidFuncArgsSnafu {
- err_msg: format!(
- "The length of the args is not correct, expect 2, provided : {}",
- columns.len()
- ),
+ ensure_columns_n!(columns, 2);
+
+ let cell_vec = &columns[0];
+ let res_vec = &columns[1];
+ let size = cell_vec.len();
+ let mut results =
+ ListVectorBuilder::with_type_capacity(ConcreteDataType::uint64_datatype(), size);
+
+ for i in 0..size {
+ let cell = cell_from_value(cell_vec.get(i))?;
+ let res = value_to_resolution(res_vec.get(i))?;
+ let result = cell.map(|cell| {
+ let children: Vec<Value> = cell
+ .children(res)
+ .map(|child| Value::from(u64::from(child)))
+ .collect();
+ ListValue::new(children, ConcreteDataType::uint64_datatype())
+ });
+
+ if let Some(list_value) = result {
+ results.push(Some(list_value.as_scalar_ref()));
+ } else {
+ results.push(None);
}
- );
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Function that returns children cell count
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3CellToChildrenSize;
+
+impl Function for H3CellToChildrenSize {
+ fn name(&self) -> &str {
+ "h3_cell_to_children_size"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::uint64_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ signature_of_cell_and_resolution()
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
let cell_vec = &columns[0];
- let cell2_vec = &columns[1];
+ let res_vec = &columns[1];
let size = cell_vec.len();
- let mut results = BooleanVectorBuilder::with_capacity(size);
+ let mut results = UInt64VectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let cell = cell_from_value(cell_vec.get(i))?;
+ let res = value_to_resolution(res_vec.get(i))?;
+ let result = cell.map(|cell| cell.children_count(res));
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Function that returns the cell position if its parent at given resolution
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3CellToChildPos;
+
+impl Function for H3CellToChildPos {
+ fn name(&self) -> &str {
+ "h3_cell_to_child_pos"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::uint64_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ signature_of_cell_and_resolution()
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let cell_vec = &columns[0];
+ let res_vec = &columns[1];
+ let size = cell_vec.len();
+ let mut results = UInt64VectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let cell = cell_from_value(cell_vec.get(i))?;
+ let res = value_to_resolution(res_vec.get(i))?;
+ let result = cell.and_then(|cell| cell.child_position(res));
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Function that returns the cell at given position of the parent at given resolution
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3ChildPosToCell;
+
+impl Function for H3ChildPosToCell {
+ fn name(&self) -> &str {
+ "h3_child_pos_to_cell"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::uint64_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ let mut signatures =
+ Vec::with_capacity(POSITION_TYPES.len() * CELL_TYPES.len() * RESOLUTION_TYPES.len());
+ for position_type in POSITION_TYPES.as_slice() {
+ for cell_type in CELL_TYPES.as_slice() {
+ for resolution_type in RESOLUTION_TYPES.as_slice() {
+ signatures.push(TypeSignature::Exact(vec![
+ position_type.clone(),
+ cell_type.clone(),
+ resolution_type.clone(),
+ ]));
+ }
+ }
+ }
+ Signature::one_of(signatures, Volatility::Stable)
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 3);
+
+ let pos_vec = &columns[0];
+ let cell_vec = &columns[1];
+ let res_vec = &columns[2];
+ let size = cell_vec.len();
+ let mut results = UInt64VectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let cell = cell_from_value(cell_vec.get(i))?;
+ let pos = value_to_position(pos_vec.get(i))?;
+ let res = value_to_resolution(res_vec.get(i))?;
+ let result = cell.and_then(|cell| cell.child_at(pos, res).map(u64::from));
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Function that returns cells with k distances of given cell
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3GridDisk;
+
+impl Function for H3GridDisk {
+ fn name(&self) -> &str {
+ "h3_grid_disk"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::uint64_datatype(),
+ ))
+ }
+
+ fn signature(&self) -> Signature {
+ signature_of_cell_and_distance()
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let cell_vec = &columns[0];
+ let k_vec = &columns[1];
+ let size = cell_vec.len();
+ let mut results =
+ ListVectorBuilder::with_type_capacity(ConcreteDataType::uint64_datatype(), size);
+
+ for i in 0..size {
+ let cell = cell_from_value(cell_vec.get(i))?;
+ let k = value_to_distance(k_vec.get(i))?;
+
+ let result = cell.map(|cell| {
+ let children: Vec<Value> = cell
+ .grid_disk::<Vec<_>>(k)
+ .into_iter()
+ .map(|child| Value::from(u64::from(child)))
+ .collect();
+ ListValue::new(children, ConcreteDataType::uint64_datatype())
+ });
+
+ if let Some(list_value) = result {
+ results.push(Some(list_value.as_scalar_ref()));
+ } else {
+ results.push(None);
+ }
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Function that returns all cells within k distances of given cell
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3GridDiskDistances;
+
+impl Function for H3GridDiskDistances {
+ fn name(&self) -> &str {
+ "h3_grid_disk_distances"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::uint64_datatype(),
+ ))
+ }
+
+ fn signature(&self) -> Signature {
+ signature_of_cell_and_distance()
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let cell_vec = &columns[0];
+ let k_vec = &columns[1];
+ let size = cell_vec.len();
+ let mut results =
+ ListVectorBuilder::with_type_capacity(ConcreteDataType::uint64_datatype(), size);
+
+ for i in 0..size {
+ let cell = cell_from_value(cell_vec.get(i))?;
+ let k = value_to_distance(k_vec.get(i))?;
+
+ let result = cell.map(|cell| {
+ let children: Vec<Value> = cell
+ .grid_disk_distances::<Vec<_>>(k)
+ .into_iter()
+ .map(|(child, _distance)| Value::from(u64::from(child)))
+ .collect();
+ ListValue::new(children, ConcreteDataType::uint64_datatype())
+ });
+
+ if let Some(list_value) = result {
+ results.push(Some(list_value.as_scalar_ref()));
+ } else {
+ results.push(None);
+ }
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Function that returns distance between two cells
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3GridDistance;
+
+impl Function for H3GridDistance {
+ fn name(&self) -> &str {
+ "h3_grid_distance"
+ }
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::int32_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ signature_of_double_cells()
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let cell_this_vec = &columns[0];
+ let cell_that_vec = &columns[1];
+ let size = cell_this_vec.len();
+
+ let mut results = Int32VectorBuilder::with_capacity(size);
for i in 0..size {
let result = match (
- cell_from_value(cell_vec.get(i))?,
- cell_from_value(cell2_vec.get(i))?,
+ cell_from_value(cell_this_vec.get(i))?,
+ cell_from_value(cell_that_vec.get(i))?,
) {
(Some(cell_this), Some(cell_that)) => {
- let is_neighbour = cell_this
- .is_neighbor_with(cell_that)
+ let dist = cell_this
+ .grid_distance(cell_that)
.map_err(|e| {
BoxedError::new(PlainError::new(
format!("H3 error: {}", e),
@@ -682,7 +881,7 @@ impl Function for H3IsNeighbour {
))
})
.context(error::ExecuteSnafu)?;
- Some(is_neighbour)
+ Some(dist)
}
_ => None,
};
@@ -694,6 +893,73 @@ impl Function for H3IsNeighbour {
}
}
+/// Function that returns path cells between two cells
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3GridPathCells;
+
+impl Function for H3GridPathCells {
+ fn name(&self) -> &str {
+ "h3_grid_path_cells"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::uint64_datatype(),
+ ))
+ }
+
+ fn signature(&self) -> Signature {
+ signature_of_double_cells()
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let cell_this_vec = &columns[0];
+ let cell_that_vec = &columns[1];
+ let size = cell_this_vec.len();
+ let mut results =
+ ListVectorBuilder::with_type_capacity(ConcreteDataType::uint64_datatype(), size);
+
+ for i in 0..size {
+ let result = match (
+ cell_from_value(cell_this_vec.get(i))?,
+ cell_from_value(cell_that_vec.get(i))?,
+ ) {
+ (Some(cell_this), Some(cell_that)) => {
+ let cells = cell_this
+ .grid_path_cells(cell_that)
+ .and_then(|x| x.collect::<std::result::Result<Vec<CellIndex>, _>>())
+ .map_err(|e| {
+ BoxedError::new(PlainError::new(
+ format!("H3 error: {}", e),
+ StatusCode::EngineExecuteQuery,
+ ))
+ })
+ .context(error::ExecuteSnafu)?;
+ Some(ListValue::new(
+ cells
+ .into_iter()
+ .map(|c| Value::from(u64::from(c)))
+ .collect(),
+ ConcreteDataType::uint64_datatype(),
+ ))
+ }
+ _ => None,
+ };
+
+ if let Some(list_value) = result {
+ results.push(Some(list_value.as_scalar_ref()));
+ } else {
+ results.push(None);
+ }
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
fn value_to_resolution(v: Value) -> Result<Resolution> {
let r = match v {
Value::Int8(v) => v as u8,
@@ -716,26 +982,59 @@ fn value_to_resolution(v: Value) -> Result<Resolution> {
.context(error::ExecuteSnafu)
}
+macro_rules! ensure_and_coerce {
+ ($compare:expr, $coerce:expr) => {{
+ ensure!(
+ $compare,
+ InvalidFuncArgsSnafu {
+ err_msg: "Argument was outside of acceptable range "
+ }
+ );
+ Ok($coerce)
+ }};
+}
+
+fn value_to_position(v: Value) -> Result<u64> {
+ match v {
+ Value::Int8(v) => ensure_and_coerce!(v >= 0, v as u64),
+ Value::Int16(v) => ensure_and_coerce!(v >= 0, v as u64),
+ Value::Int32(v) => ensure_and_coerce!(v >= 0, v as u64),
+ Value::Int64(v) => ensure_and_coerce!(v >= 0, v as u64),
+ Value::UInt8(v) => Ok(v as u64),
+ Value::UInt16(v) => Ok(v as u64),
+ Value::UInt32(v) => Ok(v as u64),
+ Value::UInt64(v) => Ok(v),
+ _ => unreachable!(),
+ }
+}
+
+fn value_to_distance(v: Value) -> Result<u32> {
+ match v {
+ Value::Int8(v) => ensure_and_coerce!(v >= 0, v as u32),
+ Value::Int16(v) => ensure_and_coerce!(v >= 0, v as u32),
+ Value::Int32(v) => ensure_and_coerce!(v >= 0, v as u32),
+ Value::Int64(v) => ensure_and_coerce!(v >= 0, v as u32),
+ Value::UInt8(v) => Ok(v as u32),
+ Value::UInt16(v) => Ok(v as u32),
+ Value::UInt32(v) => Ok(v),
+ Value::UInt64(v) => Ok(v as u32),
+ _ => unreachable!(),
+ }
+}
+
fn signature_of_cell() -> Signature {
- let mut signatures = Vec::new();
- for cell_type in &[
- ConcreteDataType::uint64_datatype(),
- ConcreteDataType::int64_datatype(),
- ] {
+ let mut signatures = Vec::with_capacity(CELL_TYPES.len());
+ for cell_type in CELL_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![cell_type.clone()]));
}
Signature::one_of(signatures, Volatility::Stable)
}
-fn signature_of_double_cell() -> Signature {
- let mut signatures = Vec::new();
- let cell_types = &[
- ConcreteDataType::uint64_datatype(),
- ConcreteDataType::int64_datatype(),
- ];
- for cell_type in cell_types {
- for cell_type2 in cell_types {
+fn signature_of_double_cells() -> Signature {
+ let mut signatures = Vec::with_capacity(CELL_TYPES.len() * CELL_TYPES.len());
+ for cell_type in CELL_TYPES.as_slice() {
+ for cell_type2 in CELL_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![
cell_type.clone(),
cell_type2.clone(),
@@ -747,21 +1046,9 @@ fn signature_of_double_cell() -> Signature {
}
fn signature_of_cell_and_resolution() -> Signature {
- let mut signatures = Vec::new();
- for cell_type in &[
- ConcreteDataType::uint64_datatype(),
- ConcreteDataType::int64_datatype(),
- ] {
- for resolution_type in &[
- ConcreteDataType::int8_datatype(),
- ConcreteDataType::int16_datatype(),
- ConcreteDataType::int32_datatype(),
- ConcreteDataType::int64_datatype(),
- ConcreteDataType::uint8_datatype(),
- ConcreteDataType::uint16_datatype(),
- ConcreteDataType::uint32_datatype(),
- ConcreteDataType::uint64_datatype(),
- ] {
+ let mut signatures = Vec::with_capacity(CELL_TYPES.len() * RESOLUTION_TYPES.len());
+ for cell_type in CELL_TYPES.as_slice() {
+ for resolution_type in RESOLUTION_TYPES.as_slice() {
signatures.push(TypeSignature::Exact(vec![
cell_type.clone(),
resolution_type.clone(),
@@ -771,6 +1058,19 @@ fn signature_of_cell_and_resolution() -> Signature {
Signature::one_of(signatures, Volatility::Stable)
}
+fn signature_of_cell_and_distance() -> Signature {
+ let mut signatures = Vec::with_capacity(CELL_TYPES.len() * DISTANCE_TYPES.len());
+ for cell_type in CELL_TYPES.as_slice() {
+ for distance_type in DISTANCE_TYPES.as_slice() {
+ signatures.push(TypeSignature::Exact(vec![
+ cell_type.clone(),
+ distance_type.clone(),
+ ]));
+ }
+ }
+ Signature::one_of(signatures, Volatility::Stable)
+}
+
fn cell_from_value(v: Value) -> Result<Option<CellIndex>> {
let cell = match v {
Value::Int64(v) => Some(
diff --git a/tests/cases/standalone/common/function/geo.result b/tests/cases/standalone/common/function/geo.result
index 4f9d168ac05a..75caeb886b5b 100644
--- a/tests/cases/standalone/common/function/geo.result
+++ b/tests/cases/standalone/common/function/geo.result
@@ -102,35 +102,58 @@ SELECT h3_cell_to_string(h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64)) AS c
| 88283082e7fffff | 613196570438926335 |
+-----------------+--------------------+
-SELECT h3_cell_center_lat(h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64)) AS cell_center_lat, h3_cell_center_lng(h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64)) AS cell_center_lng;
+SELECT h3_cell_center_latlng(h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64)) AS cell_center;
-+-------------------+---------------------+
-| cell_center_lat | cell_center_lng |
-+-------------------+---------------------+
-| 37.77246152245501 | -122.39010997087324 |
-+-------------------+---------------------+
++------------------------------------------+
+| cell_center |
++------------------------------------------+
+| [37.77246152245501, -122.39010997087324] |
++------------------------------------------+
SELECT
h3_cell_resolution(cell) AS resolution,
h3_cell_base(cell) AS base,
h3_cell_is_pentagon(cell) AS pentagon,
h3_cell_parent(cell, 6::UInt64) AS parent,
+ h3_cell_to_children(cell, 10::UInt64) AS children,
+ h3_cell_to_children_size(cell, 10) AS children_count,
+ h3_cell_to_child_pos(cell, 6) AS child_pos,
+ h3_child_pos_to_cell(25, cell, 11) AS child
FROM (SELECT h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell);
-+------------+------+----------+--------------------+
-| resolution | base | pentagon | parent |
-+------------+------+----------+--------------------+
-| 8 | 20 | false | 604189371209351167 |
-+------------+------+----------+--------------------+
++------------+------+----------+--------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------+-----------+--------------------+
+| resolution | base | pentagon | parent | children | children_count | child_pos | child |
++------------+------+----------+--------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------+-----------+--------------------+
+| 8 | 20 | false | 604189371209351167 | [622203769691602943, 622203769691635711, 622203769691668479, 622203769691701247, 622203769691734015, 622203769691766783, 622203769691799551, 622203769691865087, 622203769691897855, 622203769691930623, 622203769691963391, 622203769691996159, 622203769692028927, 622203769692061695, 622203769692127231, 622203769692159999, 622203769692192767, 622203769692225535, 622203769692258303, 622203769692291071, 622203769692323839, 622203769692389375, 622203769692422143, 622203769692454911, 622203769692487679, 622203769692520447, 622203769692553215, 622203769692585983, 622203769692651519, 622203769692684287, 622203769692717055, 622203769692749823, 622203769692782591, 622203769692815359, 622203769692848127, 622203769692913663, 622203769692946431, 622203769692979199, 622203769693011967, 622203769693044735, 622203769693077503, 622203769693110271, 622203769693175807, 622203769693208575, 622203769693241343, 622203769693274111, 622203769693306879, 622203769693339647, 622203769693372415] | 49 | 45 | 626707369319059455 |
++------------+------+----------+--------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------+-----------+--------------------+
-SELECT h3_is_neighbour(cell1, cell2)
-FROM (SELECT h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell1, h3_latlng_to_cell(36.76938, -122.3889, 8::UInt64) AS cell2);
+SELECT
+ h3_grid_disk(cell, 0) AS current_cell,
+ h3_grid_disk(cell, 3) AS grids,
+ h3_grid_disk_distances(cell, 3) AS all_grids,
+FROM (SELECT h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell);
-+------------------------------+
-| h3_is_neighbour(cell1,cell2) |
-+------------------------------+
-| false |
-+------------------------------+
++----------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| current_cell | grids | all_grids |
++----------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| [613196570438926335] | [613196570438926335, 613196570436829183, 613196569755254783, 613196570378108927, 613196570373914623, 613196570434732031, 613196570432634879, 613196570445217791, 613196570250182655, 613196569753157631, 613196569744769023, 613196569746866175, 613196570369720319, 613196570365526015, 613196570376011775, 613196570336165887, 613196570344554495, 613196570443120639, 613196570441023487, 613196570220822527, 613196570258571263, 613196570248085503, 613196570254376959, 613196569757351935, 613196569748963327, 613196569751060479, 613196569686048767, 613196569688145919, 613196570371817471, 613196570367623167, 613196570394886143, 613196570338263039, 613196570331971583, 613196570340360191, 613196570405371903, 613196570403274751, 613196570216628223] | [613196570438926335, 613196570436829183, 613196569755254783, 613196570378108927, 613196570373914623, 613196570434732031, 613196570432634879, 613196570445217791, 613196570250182655, 613196569753157631, 613196569744769023, 613196569746866175, 613196570369720319, 613196570365526015, 613196570376011775, 613196570336165887, 613196570344554495, 613196570443120639, 613196570441023487, 613196570220822527, 613196570258571263, 613196570248085503, 613196570254376959, 613196569757351935, 613196569748963327, 613196569751060479, 613196569686048767, 613196569688145919, 613196570371817471, 613196570367623167, 613196570394886143, 613196570338263039, 613196570331971583, 613196570340360191, 613196570405371903, 613196570403274751, 613196570216628223] |
++----------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+SELECT
+ h3_grid_distance(cell1, cell2) AS distance,
+ h3_grid_path_cells(cell1, cell2) AS path_cells,
+FROM
+ (
+ SELECT
+ h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell1,
+ h3_latlng_to_cell(39.634, -104.999, 8::UInt64) AS cell2
+ );
+
++----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| distance | path_cells |
++----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| 1612 | [613196570438926335, 613196569755254783, 613196569744769023, 613196569748963327, 613196569669271551, 613196569673465855, 613196569763643391, 613196569767837695, 613196570023690239, 613196570021593087, 613196570025787391, 613196569998524415, 613196570002718719, 613196570040467455, 613196570029981695, 613196570034175999, 613196572437512191, 613196572441706495, 613196572414443519, 613196572418637823, 613196572456386559, 613196572445900799, 613196572450095103, 613196572705947647, 613196572710141951, 613196572691267583, 613196572680781823, 613196572684976127, 613196572722724863, 613196572726919167, 613196592932978687, 613196592937172991, 613196592421273599, 613196592410787839, 613196592414982143, 613196592452730879, 613196592456925183, 613196592261890047, 613196592266084351, 613196592689709055, 613196592679223295, 613196592683417599, 613196592496771071, 613196592500965375, 613196592538714111, 613196592528228351, 613196592532422655, 613196587587338239, 613196587591532543, 613196587396497407, 613196587400691711, 613196587438440447, 613196587427954687, 613196587432148991, 613196586916249599, 613196586920443903, 613196587664932863, 613196587669127167, 613196587706875903, 613196587704778751, 613196587708973055, 613196587681710079, 613196587685904383, 613196593444683775, 613196593434198015, 613196593438392319, 613196593476141055, 613196593480335359, 613196593453072383, 613196593457266687, 613196593713119231, 613196593702633471, 613196593706827775, 613196593744576511, 613196593748770815, 613196593729896447, 613196593719410687, 613196593723604991, 613196962272903167, 613196962277097471, 613196962249834495, 613196962254028799, 613196962291777535, 613196962281291775, 613196962285486079, 613196961601814527, 613196961606008831, 613196961578745855, 613196961582940159, 613196961620688895, 613196961610203135, 613196961614397439, 613196961931067391, 613196961935261695, 613196961855569919, 613196961845084159, 613196961849278463, 613196961887027199, 613196961891221503, 613196956830793727, 613196956834988031, 613196956755296255, 613196956744810495, 613196956749004799, 613196956786753535, 613196956790947839, 613196957099229183, 613196957103423487, 613196957023731711, 613196957021634559, 613196957025828863, 613196957116006399, 613196957120200703, 613196962878980095, 613196962868494335, 613196962872688639, 613196962792996863, 613196962797191167, 613196962887368703, 613196962891563007, 613196963147415551, 613196963136929791, 613196963141124095, 613196963061432319, 613196963065626623, 613196963164192767, 613196963153707007, 613196963157901311, 613196979519881215, 613196979524075519, 613196979496812543, 613196979501006847, 613196979538755583, 613196979528269823, 613196979532464127, 613196978848792575, 613196978852986879, 613196978825723903, 613196978829918207, 613196978867666943, 613196978857181183, 613196978861375487, 613196979614253055, 613196979618447359, 613196979102547967, 613196979092062207, 613196979096256511, 613196979134005247, 613196979138199551, 613196977869422591, 613196977873616895, 613196974002274303, 613196973991788543, 613196973995982847, 613196974033731583, 613196974037925887, 613196978137858047, 613196978142052351, 613196978139955199, 613196974268612607, 613196974272806911, 613196980520222719, 613196980524417023, 613196980562165759, 613196980551679999, 613196980555874303, 613196980039974911, 613196980044169215, 613196979849134079, 613196979853328383, 613196979891077119, 613196979880591359, 613196979884785663, 613196980308410367, 613196980312604671, 613196980125958143, 613196980115472383, 613196980119666687, 613196980157415423, 613196980161609727, 613196980134346751, 613196980138541055, 613220567281041407, 613220567270555647, 613220567274749951, 613220567312498687, 613220567316692991, 613220567289430015, 613220567293624319, 613220567549476863, 613220567538991103, 613220567543185407, 613220567524311039, 613220567528505343, 613220567566254079, 613220567555768319, 613220567559962623, 613220565802549247, 613220565806743551, 613220565779480575, 613220565783674879, 613220565821423615, 613220565810937855, 613220565815132159, 613220566070984703, 613220566075179007, 613220566047916031, 613220566052110335, 613220566050013183, 613220566087761919, 613220566091956223, 613220568547721215, 613220568551915519, 613220568472223743, 613220568461737983, 613220568465932287, 613220568503681023, 613220568507875327, 613220567876632575, 613220567880826879, 613220567801135103, 613220567790649343, 613220567794843647, 613220567832592383, 613220567836786687, 613220568153456639, 613220568142970879, 613220568147165183, 613220568067473407, 613220568071667711, 613220568161845247, 613220568166039551, 613220524398477311, 613220524387991551, 613220524392185855, 613220524312494079, 613220524316688383, 613220524406865919, 613220524411060223, 613220524666912767, 613220524656427007, 613220524669009919, 613220524641746943, 613220524645941247, 613220524683689983, 613220524673204223, 613220524677398527, 613220522919985151, 613220522924179455, 613220522896916479, 613220522901110783, 613220522938859519, 613220522928373759, 613220522932568063, 613220523188420607, 613220523192614911, 613220523165351935, 613220523169546239, 613220523167449087, 613220523205197823, 613220523209392127, 613220523014356991, 613220523018551295, 613220525589659647, 613220525579173887, 613220525583368191, 613220525621116927, 613220525625311231, 613220525430276095, 613220525434470399, 613220524918571007, 613220524908085247, 613220524912279551, 613220524950028287, 613220524954222591, 613220525707100159, 613220525696614399, 613220525700808703, 613220525184909311, 613220525189103615, 613220573312450559, 613220573316644863, 613220573354393599, 613220573343907839, 613220573348102143, 613220541559472127, 613220541563666431, 613220573580886015, 613220573585080319, 613220573622829055, 613220573612343295, 613220573624926207, 613220573597663231, 613220573601857535, 613220540571713535, 613220540561227775, 613220540565422079, 613220540603170815, 613220540607365119, 613220540580102143, 613220540584296447, 613220539900624895, 613220539890139135, 613220539894333439, 613220539932082175, 613220539936276479, 613220539909013503, 613220539913207807, 613220539911110655, 613220540166963199, 613220540171157503, 613220540143894527, 613220540148088831, 613220540185837567, 613220540175351807, 613220540179546111, 613220542582882303, 613220542587076607, 613220542559813631, 613220542564007935, 613220542601756671, 613220542591270911, 613220542595465215, 613220542851317759, 613220542855512063, 613220542836637695, 613220542826151935, 613220542830346239, 613220542868094975, 613220542872289279, 613220461710409727, 613220461714604031, 613220461634912255, 613220461624426495, 613220461628620799, 613220461666369535, 613220461670563839, 613220461978845183, 613220461983039487, 613220461903347711, 613220461892861951, 613220461905444863, 613220461995622399, 613220461999816703, 613220214221307903, 613220214210822143, 613220214215016447, 613220214135324671, 613220214139518975, 613220214229696511, 613220214233890815, 613220213550219263, 613220213539733503, 613220213543927807, 613220213464236031, 613220213468430335, 613220213558607871, 613220213562802175, 613220213560705023, 613220213816557567, 613220213820751871, 613220213793488895, 613220213797683199, 613220213835431935, 613220213824946175, 613220213829140479, 613220216232476671, 613220216236670975, 613220216209407999, 613220216213602303, 613220216251351039, 613220216240865279, 613220216245059583, 613220216500912127, 613220476981870591, 613220216486232063, 613220216475746303, 613220216479940607, 613220216517689343, 613220216521883647, 613220479393595391, 613220479397789695, 613220478881890303, 613220478871404543, 613220478875598847, 613220478913347583, 613220478917541887, 613220478722506751, 613220478726701055, 613220479150325759, 613220479139839999, 613220479152422911, 613220478957387775, 613220478961582079, 613220478999330815, 613220478988845055, 613220478993039359, 613220231382302719, 613220231386497023, 613220231191461887, 613220231195656191, 613220231233404927, 613220231222919167, 613220231227113471, 613220230711214079, 613220230715408383, 613220231459897343, 613220231464091647, 613220231461994495, 613220231499743231, 613220231503937535, 613220231476674559, 613220231480868863, 613220631302897663, 613220631292411903, 613220631296606207, 613220631334354943, 613220631338549247, 613220631311286271, 613220631315480575, 613220631571333119, 613220631560847359, 613220631565041663, 613220631602790399, 613220631550361599, 613220631588110335, 613220631577624575, 613220631581818879, 613220633985155071, 613220633989349375, 613220633962086399, 613220633966280703, 613220634004029439, 613220633993543679, 613220633997737983, 613220633314066431, 613220633318260735, 613220633290997759, 613220633295192063, 613220633332940799, 613220633322455039, 613220633335037951, 613220633643319295, 613220633647513599, 613220633567821823, 613220633557336063, 613220633561530367, 613220633599279103, 613220633603473407, 613220600625758207, 613220600629952511, 613220600550260735, 613220600539774975, 613220600543969279, 613220600581718015, 613220600585912319, 613220600894193663, 613220600898387967, 613220600896290815, 613220600816599039, 613220600820793343, 613220600910970879, 613220600915165183, 613220588420333567, 613220588409847807, 613220588414042111, 613220588334350335, 613220588338544639, 613220588428722175, 613220588432916479, 613220588688769023, 613220588678283263, 613220588682477567, 613220588602785791, 613220588667797503, 613220588705546239, 613220588695060479, 613220588699254783, 613220591102590975, 613220591106785279, 613220591079522303, 613220591083716607, 613220591121465343, 613220591110979583, 613220591115173887, 613220590431502335, 613220590435696639, 613220590408433663, 613220590412627967, 613220590450376703, 613220590439890943, 613220590452473855, 613220591196962815, 613220591201157119, 613220590685257727, 613220590674771967, 613220590678966271, 613220590716715007, 613220590720909311, 613220589452132351, 613220589456326655, 613220617797238783, 613220617786753023, 613220617790947327, 613220617828696063, 613220617832890367, 613220589720567807, 613220589718470655, 613220589722664959, 613220618063577087, 613220618067771391, 613220606061576191, 613220606065770495, 613220606103519231, 613220606093033471, 613220606097227775, 613220605581328383, 613220605585522687, 613220605390487551, 613220605394681855, 613220605432430591, 613220605421944831, 613220605426139135, 613220605849763839, 613220605411459071, 613220605667311615, 613220605656825855, 613220605661020159, 613220605698768895, 613220605702963199, 613220605675700223, 613220605679894527, 613220608083230719, 613220608072744959, 613220608076939263, 613220608114687999, 613220608118882303, 613220608091619327, 613220608095813631, 613220608351666175, 613220608341180415, 613220608353763327, 613220608326500351, 613220608330694655, 613220608368443391, 613220608357957631, 613220608362151935, 613220606604738559, 613220606608932863, 613220606581669887, 613220606585864191, 613220606623612927, 613220606613127167, 613220606617321471, 613220606873174015, 613220606877368319, 613220606850105343, 613220606848008191, 613220606852202495, 613220606889951231, 613220606894145535, 613221654100705279, 613221654104899583, 613221654025207807, 613221654014722047, 613221654018916351, 613221654056665087, 613221654060859391, 613221653429616639, 613221653433810943, 613221653354119167, 613221653343633407, 613221653347827711, 613221653385576447, 613221653450588159, 613221653706440703, 613221653695954943, 613221653700149247, 613221653620457471, 613221653624651775, 613221653714829311, 613221653719023615, 613221656122359807, 613221656111874047, 613221656116068351, 613221656036376575, 613221656040570879, 613221656130748415, 613221656134942719, 613221656390795263, 613221656388698111, 613221656392892415, 613221656365629439, 613221656369823743, 613221656407572479, 613221656397086719, 613221656401281023, 613221654643867647, 613221654648061951, 613221654620798975, 613221654624993279, 613221654662742015, 613221654652256255, 613221654656450559, 613221654912303103, 613221654916497407, 613221654889234431, 613221654887137279, 613221654891331583, 613221654929080319, 613221654933274623, 613221654738239487, 613221654742433791, 613221671272185855, 613221671261700095, 613221671265894399, 613221671303643135, 613221671307837439, 613221671112802303, 613221671116996607, 613221670601097215, 613221670590611455, 613221670594805759, 613221670632554495, 613221671351877631, 613221671389626367, 613221671379140607, 613221671383334911, 613221670867435519, 613221670871629823, 613221669602852863, 613221669607047167, 613221669644795903, 613221669634310143, 613221669638504447, 613221673283354623, 613221673287548927, 613221669871288319, 613221669875482623, 613221669913231359, 613221669911134207, 613221669915328511, 613221669888065535, 613221669892259839, 613221672295596031, 613221672285110271, 613221672289304575, 613221672327053311, 613221672331247615, 613221672303984639, 613221672308178943, 613221671624507391, 613221671614021631, 613221671618215935, 613221671655964671, 613221671660158975, 613221671632895999, 613221671630798847, 613221671634993151, 613221671890845695, 613221671895039999, 613221671867777023, 613221671871971327, 613221671909720063, 613221671899234303, 613221671903428607, 613221559416389631, 613221559420583935, 613221559393320959, 613221559397515263, 613221559435263999, 613221559424778239, 613221559428972543, 613221559684825087, 613221559749836799, 613221559670145023, 613221559659659263, 613221559663853567, 613221559701602303, 613221559705796607, 613221558000812031, 613221558005006335, 613221557925314559, 613221557914828799, 613221557919023103, 613221557956771839, 613221557960966143, 613221558269247487, 613221558273441791, 613221558193750015, 613221558191652863, 613221558195847167, 613221558286024703, 613221558290219007, 613221560693555199, 613221560683069439, 613221560687263743, 613221560607571967, 613221560611766271, 613221560701943807, 613221560706138111, 613221560022466559, 613221560011980799, 613221560016175103, 613221559936483327, 613221559940677631, 613221560030855167, 613221560028758015, 613221560032952319, 613221560288804863, 613221560292999167, 613221560265736191, 613221560269930495, 613221560307679231, 613221560297193471, 613221560301387775, 613221576663367679, 613221576667561983, 613221576640299007, 613221576644493311, 613221576682242047, 613221576671756287, 613221576675950591, 613221580784271359, 613221580788465663, 613221576917123071, 613221576906637311, 613221576910831615, 613221576948580351, 613221576952774655, 613221575683997695, 613221575688191999, 613221575172292607, 613221575161806847, 613221575166001151, 613221575203749887, 613221575207944191, 613221575012909055, 613221575017103359, 613221575440728063, 613221575438630911, 613221575442825215, 613221575247790079, 613221575251984383, 613221575289733119, 613221575279247359, 613221575283441663, 613221577854550015, 613221577858744319, 613221577663709183, 613221577667903487, 613221577705652223, 613221577695166463, 613221577699360767, 613221577183461375, 613221577187655679, 613221577932144639, 613221577930047487, 613221577934241791, 613221577971990527, 613221577976184831, 613221577948921855, 613221577953116159, 613221735109492735, 613221735099006975, 613221735103201279, 613221735140950015, 613221735145144319, 613221735117881343, 613221735122075647, 613221735377928191, 613221735367442431, 613221735371636735, 613221735352762367, 613221735356956671, 613221735394705407, 613221735384219647, 613221735388413951, 613221730275557375, 613221730279751679, 613221730252488703, 613221730256683007, 613221730294431743, 613221730283945983, 613221730288140287, 613221729604468735, 613221729608663039, 613221729581400063, 613221729585594367, 613221729623343103, 613221729621245951, 613221729625440255, 613221729933721599, 613221729937915903, 613221729858224127, 613221729847738367, 613221729851932671, 613221729889681407, 613221729893875711, 613221732349640703, 613221732353835007, 613221732274143231, 613221732263657471, 613221732267851775, 613221732305600511, 613221732309794815, 613221732626464767, 613221732615979007, 613221732620173311, 613221732540481535, 613221732544675839, 613221732634853375, 613221732639047679, 613221692226928639, 613221692216442879, 613221692220637183, 613221692140945407, 613221692145139711, 613221692235317247, 613221692239511551, 613221692495364095, 613221692484878335, 613221692489072639, 613221692470198271, 613221692474392575, 613221692512141311, 613221692501655551, 613221692505849855, 613221687392993279, 613221687397187583, 613221687369924607, 613221687374118911, 613221687411867647, 613221687401381887, 613221687405576191, 613221686721904639, 613221686726098943, 613221686698835967, 613221686703030271, 613221686740779007, 613221686738681855, 613221686742876159, 613221687487365119, 613221687491559423, 613221686975660031, 613221686965174271, 613221686969368575, 613221687007117311, 613221687011311615, 613221693258727423, 613221693262921727, 613221689391579135, 613221689381093375, 613221689385287679, 613221689423036415, 613221689427230719, 613221693535551487, 613221693525065727, 613221693529260031, 613221689657917439, 613221689662111743, 613221709868171263, 613221709872365567, 613221709910114303, 613221709899628543, 613221709903822847, 613221709387923455, 613221709392117759, 613221709197082623, 613221709201276927, 613221709239025663, 613221709228539903, 613221709232734207, 613221709213859839, 613221709218054143, 613221709473906687, 613221709463420927, 613221709467615231, 613221709505363967, 613221709509558271, 613221709482295295, 613221709486489599, 613221704373633023, 613221704363147263, 613221704367341567, 613221704405090303, 613221704409284607, 613221704382021631, 613221704386215935, 613221704642068479, 613221704639971327, 613221704644165631, 613221704616902655, 613221704621096959, 613221704658845695, 613221704648359935, 613221704652554239, 613221710411333631, 613221710415527935, 613221710388264959, 613221710392459263, 613221710430207999, 613221710419722239, 613221710423916543, 613221710679769087, 613221710683963391, 613221710665089023, 613221710654603263, 613221710658797567, 613221710696546303, 613221710700740607, 613168113669636095, 613168113665441791, 613168113627693055, 613168113623498751, 613168113633984511, 613168113713676287, 613168113709481983, 613168113401200639, 613168113397006335, 613168113359257599, 613168113355063295, 613168113365549055, 613168113384423423, 613168113380229119, 613168114063900671, 613168114059706367, 613168114070192127, 613168114032443391, 613168114028249087, 613168114055512063, 613168114051317759, 613168111647981567, 613168111643787263, 613168111654273023, 613168111616524287, 613168111612329983, 613168111639592959, 613168111635398655, 613168111637495807, 613168111381643263, 613168111377448959, 613168111404711935, 613168111400517631, 613168111362768895, 613168111358574591, 613168111369060351, 613168113126473727, 613168113122279423, 613168113149542399, 613168113145348095, 613168113107599359, 613168113103405055, 613168113113890815, 613168112858038271, 613168112853843967, 613168112872718335, 613168112868524031, 613168112879009791, 613168112841261055, 613168112837066751, 613168113032101887, 613168113027907583, 613168096498155519, 613168096493961215, 613168096504446975, 613168096466698239, 613168096462503935, 613168096657539071, 613168096653344767, 613168096229720063, 613168096225525759, 613168096236011519, 613168096422658047, 613168096418463743, 613168096380715007, 613168096376520703, 613168096387006463, 613168096902905855, 613168096898711551, 613168098167488511, 613168098163294207, 613168098125545471, 613168098121351167, 613168098131836927, 613168094486986751, 613168094482792447, 613168097899053055, 613168097894858751, 613168097896955903, 613168097859207167, 613168097855012863, 613168097882275839, 613168097878081535, 613168095474745343, 613168095470551039, 613168095481036799, 613168095443288063, 613168095439093759, 613168095466356735, 613168095462162431, 613168095206309887, 613168095202115583, 613168095212601343, 613168095174852607, 613168095170658303, 613168095189532671, 613168095185338367, 613168095195824127, 613168095879495679, 613168095875301375, 613168095785123839, 613168095780929535, 613168095860621311, 613168095856427007, 613168095866912767, 613168208353951743, 613168208349757439, 613168208259579903, 613168208255385599, 613168208335077375, 613168208330883071, 613168208341368831, 613168208024698879, 613168208020504575, 613168207982755839, 613168207978561535, 613168207989047295, 613168208068739071, 613168208064544767, 613168209769529343, 613168209765335039, 613168209727586303, 613168209723391999, 613168209733877759, 613168209813569535, 613168209809375231, 613168209501093887, 613168209496899583, 613168209498996735, 613168209461247999, 613168209457053695, 613168209484316671, 613168209480122367, 613168207076786175, 613168207072591871, 613168207083077631, 613168207045328895, 613168207041134591, 613168207068397567, 613168207064203263, 613168206808350719, 613168206804156415, 613168206814642175, 613168206776893439, 613168206772699135, 613168206791573503, 613168206787379199, 613168206797864959, 613168207481536511, 613168207477342207, 613168207504605183, 613168207500410879, 613168207462662143, 613168207458467839, 613168207468953599, 613168191106973695, 613168191102779391, 613168191130042367, 613168191125848063, 613168191088099327, 613168191083905023, 613168191086002175, 613168159068782591, 613168159064588287, 613168190853218303, 613168190849023999, 613168190859509759, 613168190821761023, 613168190817566719, 613168192086343679, 613168192082149375, 613168192598048767, 613168192593854463, 613168192604340223, 613168192566591487, 613168192562397183, 613168191817908223, 613168191813713919, 613168191815811071, 613168192331710463, 613168192327516159, 613168192522551295, 613168192518356991, 613168192480608255, 613168192476413951, 613168192486899711, 613168189915791359, 613168189911597055, 613168190106632191, 613168190102437887, 613168190064689151, 613168190060494847, 613168190070980607, 613168189647355903, 613168189643161599, 613168189829808127, 613168189825613823, 613168189836099583, 613168189798350847, 613168189794156543, 613168189821419519, 613168189817225215, 613168202312056831, 613168202307862527, 613168202318348287, 613168202280599551, 613168202276405247, 613168202303668223, 613168202299473919, 613168202043621375, 613168202039427071, 613168202041524223, 613168201951346687, 613168201947152383, 613168202026844159, 613168202022649855, 613168202033135615, 613168037494783999, 613168037490589695, 613168037400412159, 613168037396217855, 613168037475909631, 613168037471715327, 613168037482201087, 613168037226348543, 613168037222154239, 613168037131976703, 613168037127782399, 613168037129879551, 613168037209571327, 613168037205377023, 613168037836619775, 613168037832425471, 613168037794676735, 613168037790482431, 613168037800968191, 613168037880659967, 613168037876465663, 613168035420700671, 613168035416506367, 613168035378757631, 613168035374563327, 613168035385049087, 613168035464740863, 613168035399729151, 613168035143876607, 613168035139682303, 613168035150168063, 613168035112419327, 613168035108225023, 613168035135487999, 613168035131293695, 613168047626125311, 613168047621931007, 613168047632416767, 613168047594668031, 613168047590473727, 613168047617736703, 613168047613542399, 613168047357689855, 613168047353495551, 613168047355592703, 613168047382855679, 613168047378661375, 613168047340912639, 613168047336718335, 613168047347204095, 613168020247805951, 613168020243611647, 613168020270874623, 613168020266680319, 613168020228931583, 613168020224737279, 613168020235223039, 613168019979370495, 613168019975176191, 613168020002439167, 613168019998244863, 613168020000342015, 613168019962593279, 613168019958398975, 613168020153434111, 613168020149239807, 613168020665139199, 613168020660944895, 613168020671430655, 613168020633681919, 613168020629487615, 613168032635682815, 613168032631488511, 613168018249220095, 613168018245025791, 613168018255511551, 613168018217762815, 613168032396607487, 613168032358858751, 613168032354664447, 613168032365150207, 613168017982881791, 613168017978687487, 613168029984882687, 613168029980688383, 613168029942939647, 613168029938745343, 613168029949231103, 613168030465130495, 613168030460936191, 613168029716447231, 613168029712252927, 613168029674504191, 613168029670309887, 613168029672407039, 613168029699670015, 613168029695475711, 613168030379147263, 613168030374952959, 613168030385438719, 613168030347689983, 613168030343495679, 613168030370758655, 613168030366564351, 613168063396708351, 613168063392514047, 613168063402999807, 613168063365251071, 613168063361056767, 613168063388319743, 613168063384125439, 613168063386222591, 613168063130370047, 613168063126175743, 613168063035998207, 613168063031803903, 613168063111495679, 613168063107301375, 613168063117787135, 613168831526862847, 613168831522668543, 613168831432491007, 613168831428296703, 613168831507988479, 613168831503794175, 613168831514279935, 613168831258427391, 613168831193415679, 613168831155666943, 613168831151472639, 613168831161958399, 613168831241650175, 613168831237455871, 613168828781690879, 613168828777496575, 613168828739747839, 613168828735553535, 613168828746039295, 613168828825731071, 613168828821536767, 613168828513255423, 613168828509061119, 613168828471312383, 613168828467118079, 613168828469215231, 613168828496478207, 613168828492283903, 613168829175955455, 613168829171761151, 613168829182246911, 613168829144498175, 613168829140303871, 613168829167566847, 613168829163372543, 613167015357579263, 613167015353384959, 613167015363870719, 613167015326121983, 613167015321927679, 613167015349190655, 613167015336607743, 613167015347093503, 613167015091240959, 613167015087046655, 613167015114309631, 613167015110115327, 613167015072366591, 613167015068172287, 613167015078658047, 613168814279884799, 613168814275690495, 613168814302953471, 613168814298759167, 613168814261010431, 613168814256816127, 613168814267301887, 613168814011449343, 613168813510230015, 613168814026129407, 613168814021935103, 613168814032420863, 613168813994672127, 613168813990477823, 613168814185512959, 613168814181318655, 613168811610210303, 613168811606015999, 613168811616501759, 613168811578753023, 613168811574558719, 613168811769593855, 613168811765399551, 613168811341774847, 613168811337580543, 613168811339677695, 613168811534712831, 613168811530518527, 613168811492769791, 613168811488575487, 613168811499061247, 613168812014960639, 613168812010766335, 613168813279543295, 613168813275348991, 613168813237600255, 613168813233405951, 613168813243891711, 613166998196584447, 613166998192390143, 613168813011107839, 613168812998524927, 613168813009010687, 613168812971261951, 613168812967067647, 613168812994330623, 613168812990136319, 613167078641238015, 613167078637043711, 613167078647529471, 613167078609780735, 613167078605586431, 613167078632849407, 613167078628655103, 613167078372802559, 613167078368608255, 613167078379094015, 613167078341345279, 613167078276333567, 613167078356025343, 613167078351831039, 613167078362316799, 613167079045988351, 613167079041794047, 613167078951616511, 613167078947422207, 613167079027113983, 613167079022919679, 613167079033405439, 613167076630069247, 613167076625874943, 613167076535697407, 613167076531503103, 613167076611194879, 613167076607000575, 613167076609097727, 613167076300816383, 613167076296622079, 613167076258873343, 613167076254679039, 613167076265164799, 613167076344856575, 613167076340662271, 613167078045646847, 613167078041452543, 613167078003703807, 613167077999509503, 613167078009995263, 613167078089687039, 613167078085492735, 613167077777211391, 613167077764628479, 613167077775114239, 613167077737365503, 613167077733171199, 613167077760434175, 613167077756239871, 613167061394259967, 613167061390065663, 613167061400551423, 613167061362802687, 613167061358608383, 613167061385871359, 613167061381677055, 613167061125824511, 613167061121630207, 613167061132115967, 613167061094367231, 613167061146796031, 613167061109047295, 613167061104852991, 613167061115338751, 613167061799010303, 613167061794815999, 613167061822078975, 613167061817884671, 613167061780135935, 613167061775941631, 613167061786427391, 613167059383091199, 613167059378896895, 613167059406159871, 613167059401965567, 613167059364216831, 613167059366313983, 613167059362119679, 613167055262187519, 613167055257993215, 613167059129335807, 613167059125141503, 613167059135627263, 613167059097878527, 613167059093684223, 613167060362461183, 613167060358266879, 613167060874166271, 613167060869971967, 613167060880457727, 613167060842708991, 613167060838514687, 613167060094025727, 613167060081442815, 613167060091928575, 613167060607827967, 613167060603633663, 613167060798668799, 613167060794474495, 613167060756725759, 613167060752531455, 613167060763017215, 613167104362807295, 613167104358612991, 613167104553648127, 613167104549453823, 613167104511705087, 613167104507510783, 613167104517996543, 613167104094371839, 613167104532676607, 613167104276824063, 613167104272629759, 613167104283115519, 613167104245366783, 613167104241172479, 613167104268435455, 613167104264241151, 613167098505461759, 613167098501267455, 613167098511753215, 613167098474004479, 613167098469810175, 613167098497073151, 613167098492878847, 613167098237026303, 613167098239123455, 613167098234929151, 613167098144751615, 613167098140557311, 613167098220249087, 613167098216054783, 613167098226540543, 613167103339397119, 613167103335202815, 613167103245025279, 613167103240830975, 613167103320522751, 613167103316328447, 613167103326814207, 613167103070961663, 613167103066767359, 613167102976589823, 613167102964006911, 613167102974492671, 613167103054184447, 613167103049990143, 613167103681232895, 613167103677038591, 613167103639289855, 613167103635095551, 613167103645581311, 613167103725273087, 613167103721078783, 613166949867716607, 613166949863522303, 613166949825773567, 613166949821579263, 613166949832065023, 613166949850939391, 613166949846745087, 613166949590892543, 613166949586698239, 613166949597183999, 613166949559435263, 613166949555240959, 613166949582503935, 613166949578309631, 613166943819530239, 613166943815335935, 613166943825821695, 613166943788072959, 613166943783878655, 613166943811141631, 613166943806947327, 613166943551094783, 613166943553191935, 613166943548997631, 613166943576260607, 613166943572066303, 613166943534317567, 613166943530123263, 613166943540609023, 613166948653465599, 613166948649271295, 613166948676534271, 613166948672339967, 613166948634591231, 613166948630396927, 613166948640882687, 613166948385030143, 613166948380835839, 613166948408098815, 613166948395515903, 613166948406001663, 613166948368252927, 613166948364058623, 613166948559093759, 613166948554899455, 613166949070798847, 613166949066604543, 613166949077090303, 613166949039341567, 613166949035147263, 613166928829087743, 613166928824893439, 613166932696236031, 613166932692041727, 613166932702527487, 613166928594206719, 613166928590012415, 613166928552263679, 613166928548069375, 613166928558555135, 613166932429897727, 613166932425703423, 613166926178287615, 613166926174093311, 613166926136344575, 613166926132150271, 613166926142636031, 613166926658535423, 613166926654341119, 613166925909852159, 613166925905657855, 613166925867909119, 613166925870006271, 613166925865811967, 613166925893074943, 613166925888880639, 613166926572552191, 613166926568357887, 613166926578843647, 613166926541094911, 613166926536900607, 613166926564163583, 613166926559969279, 613166931672825855, 613166931668631551, 613166931679117311, 613166931641368575, 613166931637174271, 613166931664437247, 613166931651854335, 613166931662340095, 613166931406487551, 613166931402293247, 613166931312115711, 613166931307921407, 613166931387613183, 613166931383418879, 613166931393904639, 613167727720267775, 613167727716073471, 613167727625895935, 613167727621701631, 613167727701393407, 613167727697199103, 613167727707684863, 613167727391014911, 613167727386820607, 613167727349071871, 613167727344877567, 613167727355363327, 613167727435055103, 613167727430860799, 613167724975095807, 613167724970901503] |
++----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
SELECT geohash(37.76938, -122.3889, 9);
diff --git a/tests/cases/standalone/common/function/geo.sql b/tests/cases/standalone/common/function/geo.sql
index cd9d403e6e2a..af2a16517df1 100644
--- a/tests/cases/standalone/common/function/geo.sql
+++ b/tests/cases/standalone/common/function/geo.sql
@@ -26,18 +26,34 @@ SELECT h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64), h3_latlng_to_cell_stri
SELECT h3_cell_to_string(h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64)) AS cell_str, h3_string_to_cell(h3_latlng_to_cell_string(37.76938, -122.3889, 8::UInt64)) AS cell_index;
-SELECT h3_cell_center_lat(h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64)) AS cell_center_lat, h3_cell_center_lng(h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64)) AS cell_center_lng;
+SELECT h3_cell_center_latlng(h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64)) AS cell_center;
SELECT
h3_cell_resolution(cell) AS resolution,
h3_cell_base(cell) AS base,
h3_cell_is_pentagon(cell) AS pentagon,
h3_cell_parent(cell, 6::UInt64) AS parent,
+ h3_cell_to_children(cell, 10::UInt64) AS children,
+ h3_cell_to_children_size(cell, 10) AS children_count,
+ h3_cell_to_child_pos(cell, 6) AS child_pos,
+ h3_child_pos_to_cell(25, cell, 11) AS child
FROM (SELECT h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell);
-SELECT h3_is_neighbour(cell1, cell2)
-FROM (SELECT h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell1, h3_latlng_to_cell(36.76938, -122.3889, 8::UInt64) AS cell2);
+SELECT
+ h3_grid_disk(cell, 0) AS current_cell,
+ h3_grid_disk(cell, 3) AS grids,
+ h3_grid_disk_distances(cell, 3) AS all_grids,
+FROM (SELECT h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell);
+SELECT
+ h3_grid_distance(cell1, cell2) AS distance,
+ h3_grid_path_cells(cell1, cell2) AS path_cells,
+FROM
+ (
+ SELECT
+ h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell1,
+ h3_latlng_to_cell(39.634, -104.999, 8::UInt64) AS cell2
+ );
SELECT geohash(37.76938, -122.3889, 9);
|
feat
|
add more h3 functions (#4770)
|
dc83b0aa152cc2d2a9f00b3ca5616c4ca78da2e5
|
2024-12-06 13:52:25
|
Ning Sun
|
feat: add more transaction related statement for postgres interface (#5081)
| false
|
diff --git a/src/servers/src/postgres/fixtures.rs b/src/servers/src/postgres/fixtures.rs
index 18c3661b9334..895f5c03e4a9 100644
--- a/src/servers/src/postgres/fixtures.rs
+++ b/src/servers/src/postgres/fixtures.rs
@@ -51,22 +51,40 @@ static VAR_VALUES: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
static SHOW_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new("(?i)^SHOW (.*?);?$").unwrap());
static SET_TRANSACTION_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new("(?i)^SET TRANSACTION (.*?);?$").unwrap());
-static TRANSACTION_PATTERN: Lazy<Regex> =
- Lazy::new(|| Regex::new("(?i)^(BEGIN|ROLLBACK|COMMIT);?").unwrap());
+static START_TRANSACTION_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(START TRANSACTION.*|BEGIN);?").unwrap());
+static COMMIT_TRANSACTION_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(COMMIT TRANSACTION|COMMIT);?").unwrap());
+static ABORT_TRANSACTION_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(ABORT TRANSACTION|ROLLBACK);?").unwrap());
/// Test if given query statement matches the patterns
pub(crate) fn matches(query: &str) -> bool {
- TRANSACTION_PATTERN.captures(query).is_some()
+ START_TRANSACTION_PATTERN.is_match(query)
+ || COMMIT_TRANSACTION_PATTERN.is_match(query)
+ || ABORT_TRANSACTION_PATTERN.is_match(query)
|| SHOW_PATTERN.captures(query).is_some()
|| SET_TRANSACTION_PATTERN.is_match(query)
}
+fn set_transaction_warning(query_ctx: QueryContextRef) {
+ query_ctx.set_warning("Please note transaction is not supported in GreptimeDB.".to_string());
+}
+
/// Process unsupported SQL and return fixed result as a compatibility solution
-pub(crate) fn process<'a>(query: &str, _query_ctx: QueryContextRef) -> Option<Vec<Response<'a>>> {
+pub(crate) fn process<'a>(query: &str, query_ctx: QueryContextRef) -> Option<Vec<Response<'a>>> {
// Transaction directives:
- if let Some(tx) = TRANSACTION_PATTERN.captures(query) {
- let tx_tag = &tx[1];
- Some(vec![Response::Execution(Tag::new(&tx_tag.to_uppercase()))])
+ if START_TRANSACTION_PATTERN.is_match(query) {
+ set_transaction_warning(query_ctx);
+ if query.to_lowercase().starts_with("begin") {
+ Some(vec![Response::Execution(Tag::new("BEGIN"))])
+ } else {
+ Some(vec![Response::Execution(Tag::new("START TRANSACTION"))])
+ }
+ } else if ABORT_TRANSACTION_PATTERN.is_match(query) {
+ Some(vec![Response::Execution(Tag::new("ROLLBACK"))])
+ } else if COMMIT_TRANSACTION_PATTERN.is_match(query) {
+ Some(vec![Response::Execution(Tag::new("COMMIT"))])
} else if let Some(show_var) = SHOW_PATTERN.captures(query) {
let show_var = show_var[1].to_lowercase();
if let Some(value) = VAR_VALUES.get(&show_var.as_ref()) {
@@ -150,6 +168,19 @@ mod test {
"SET",
query_context.clone(),
);
+ assert_tag(
+ "START TRANSACTION isolation level READ COMMITTED;",
+ "START TRANSACTION",
+ query_context.clone(),
+ );
+ assert_tag(
+ "start transaction isolation level READ COMMITTED;",
+ "START TRANSACTION",
+ query_context.clone(),
+ );
+ assert_tag("abort transaction;", "ROLLBACK", query_context.clone());
+ assert_tag("commit transaction;", "COMMIT", query_context.clone());
+ assert_tag("COMMIT transaction;", "COMMIT", query_context.clone());
let resp = get_data("SHOW transaction isolation level", query_context.clone());
assert_eq!(1, resp.row_schema().len());
|
feat
|
add more transaction related statement for postgres interface (#5081)
|
2c3ff90dbce3380337922131832ec9642c18d384
|
2023-09-18 14:19:26
|
Weny Xu
|
feat: start services after first heartbeat response processed (#2424)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index c1baa7eb9217..808cc855d5a1 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -8,6 +8,9 @@ rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
+# Start services after regions are coordinated.
+# It will block the datanode start if it can't receive the heartbeat from metasrv.
+coordination = false
[heartbeat]
# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
diff --git a/src/datanode/src/alive_keeper.rs b/src/datanode/src/alive_keeper.rs
index 5f5fe9be1395..aed0abd5bd2a 100644
--- a/src/datanode/src/alive_keeper.rs
+++ b/src/datanode/src/alive_keeper.rs
@@ -34,6 +34,8 @@ use tokio::sync::{mpsc, Mutex};
use tokio::task::JoinHandle;
use tokio::time::{Duration, Instant};
+use crate::error::{self, Result};
+use crate::event_listener::{RegionServerEvent, RegionServerEventReceiver};
use crate::region_server::RegionServer;
const MAX_CLOSE_RETRY_TIMES: usize = 10;
@@ -54,7 +56,7 @@ pub struct RegionAliveKeeper {
region_server: RegionServer,
tasks: Arc<Mutex<HashMap<RegionId, Arc<CountdownTaskHandle>>>>,
heartbeat_interval_millis: u64,
- started: AtomicBool,
+ started: Arc<AtomicBool>,
/// The epoch when [RegionAliveKeeper] is created. It's used to get a monotonically non-decreasing
/// elapsed time when submitting heartbeats to Metasrv (because [Instant] is monotonically
@@ -69,7 +71,7 @@ impl RegionAliveKeeper {
region_server,
tasks: Arc::new(Mutex::new(HashMap::new())),
heartbeat_interval_millis,
- started: AtomicBool::new(false),
+ started: Arc::new(AtomicBool::new(false)),
epoch: Instant::now(),
}
}
@@ -141,17 +143,72 @@ impl RegionAliveKeeper {
deadline
}
- pub async fn start(&self) {
+ pub async fn start(
+ self: &Arc<Self>,
+ event_receiver: Option<RegionServerEventReceiver>,
+ ) -> Result<()> {
+ self.started.store(true, Ordering::Relaxed);
+
+ if let Some(mut event_receiver) = event_receiver {
+ let keeper = self.clone();
+ // Initializers region alive keeper.
+ // It makes sure all opened regions are registered to `RegionAliveKeeper.`
+ loop {
+ match event_receiver.0.try_recv() {
+ Ok(RegionServerEvent::Registered(region_id)) => {
+ keeper.register_region(region_id).await;
+ }
+ Ok(RegionServerEvent::Deregistered(region_id)) => {
+ keeper.deregister_region(region_id).await;
+ }
+ Err(mpsc::error::TryRecvError::Disconnected) => {
+ return error::UnexpectedSnafu {
+ violated: "RegionServerEventSender closed",
+ }
+ .fail()
+ }
+ Err(mpsc::error::TryRecvError::Empty) => {
+ break;
+ }
+ }
+ }
+ let running = self.started.clone();
+
+ // Watches changes
+ common_runtime::spawn_bg(async move {
+ loop {
+ if !running.load(Ordering::Relaxed) {
+ info!("RegionAliveKeeper stopped! Quits the watch loop!");
+ break;
+ }
+
+ match event_receiver.0.recv().await {
+ Some(RegionServerEvent::Registered(region_id)) => {
+ keeper.register_region(region_id).await;
+ }
+ Some(RegionServerEvent::Deregistered(region_id)) => {
+ keeper.deregister_region(region_id).await;
+ }
+ None => {
+ info!("RegionServerEventSender closed! Quits the watch loop!");
+ break;
+ }
+ }
+ }
+ });
+ }
+
let tasks = self.tasks.lock().await;
for task in tasks.values() {
task.start(self.heartbeat_interval_millis).await;
}
- self.started.store(true, Ordering::Relaxed);
info!(
"RegionAliveKeeper is started with region {:?}",
tasks.keys().map(|x| x.to_string()).collect::<Vec<_>>(),
);
+
+ Ok(())
}
pub fn epoch(&self) -> Instant {
@@ -383,14 +440,14 @@ mod test {
#[tokio::test(flavor = "multi_thread")]
async fn region_alive_keeper() {
let region_server = mock_region_server();
- let alive_keeper = RegionAliveKeeper::new(region_server, 300);
+ let alive_keeper = Arc::new(RegionAliveKeeper::new(region_server, 300));
let region_id = RegionId::new(1, 2);
// register a region before starting
alive_keeper.register_region(region_id).await;
assert!(alive_keeper.find_handle(region_id).await.is_some());
- alive_keeper.start().await;
+ alive_keeper.start(None).await.unwrap();
// started alive keeper should assign deadline to this region
let deadline = alive_keeper.deadline(region_id).await.unwrap();
diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs
index d21608dfd9fd..dbcf944cffca 100644
--- a/src/datanode/src/config.rs
+++ b/src/datanode/src/config.rs
@@ -320,6 +320,7 @@ impl From<&DatanodeOptions> for StorageEngineConfig {
pub struct DatanodeOptions {
pub mode: Mode,
pub node_id: Option<u64>,
+ pub coordination: bool,
pub rpc_addr: String,
pub rpc_hostname: Option<String>,
pub rpc_runtime_size: usize,
@@ -339,6 +340,7 @@ impl Default for DatanodeOptions {
Self {
mode: Mode::Standalone,
node_id: None,
+ coordination: false,
rpc_addr: "127.0.0.1:3001".to_string(),
rpc_hostname: None,
rpc_runtime_size: 8,
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 9903b9d39ed7..ab567121e7b8 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -44,6 +44,7 @@ use store_api::region_engine::RegionEngineRef;
use store_api::region_request::{RegionOpenRequest, RegionRequest};
use store_api::storage::RegionId;
use tokio::fs;
+use tokio::sync::Notify;
use crate::config::{DatanodeOptions, RegionEngineConfig};
use crate::error::{
@@ -71,6 +72,7 @@ pub struct Datanode {
region_event_receiver: Option<RegionServerEventReceiver>,
region_server: RegionServer,
greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
+ coordinated_notifier: Option<Arc<Notify>>,
}
impl Datanode {
@@ -78,6 +80,7 @@ impl Datanode {
info!("Starting datanode instance...");
self.start_heartbeat().await?;
+ self.wait_coordinated().await;
let _ = self.greptimedb_telemetry_task.start();
self.start_services().await
@@ -87,11 +90,20 @@ impl Datanode {
if let Some(task) = &self.heartbeat_task {
// Safety: The event_receiver must exist.
let receiver = self.region_event_receiver.take().unwrap();
- task.start(receiver).await?;
+
+ task.start(receiver, self.coordinated_notifier.clone())
+ .await?;
}
Ok(())
}
+ /// If `coordinated_notifier` exists, it waits for all regions to be coordinated.
+ pub async fn wait_coordinated(&mut self) {
+ if let Some(notifier) = self.coordinated_notifier.take() {
+ notifier.notified().await;
+ }
+ }
+
/// Start services of datanode. This method call will block until services are shutdown.
pub async fn start_services(&mut self) -> Result<()> {
if let Some(service) = self.services.as_mut() {
@@ -237,6 +249,12 @@ impl DatanodeBuilder {
)
.await;
+ let coordinated_notifier = if self.opts.coordination && matches!(mode, Mode::Distributed) {
+ Some(Arc::new(Notify::new()))
+ } else {
+ None
+ };
+
Ok(Datanode {
opts: self.opts,
services,
@@ -244,6 +262,7 @@ impl DatanodeBuilder {
region_server,
greptimedb_telemetry_task,
region_event_receiver,
+ coordinated_notifier,
})
}
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index 39e4310f9bd0..2b7cb04c5cbc 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -28,14 +28,14 @@ use common_telemetry::{debug, error, info, trace, warn};
use meta_client::client::{HeartbeatSender, MetaClient, MetaClientBuilder};
use meta_client::MetaClientOptions;
use snafu::ResultExt;
-use tokio::sync::mpsc;
+use tokio::sync::{mpsc, Notify};
use tokio::time::Instant;
use self::handler::RegionHeartbeatResponseHandler;
use crate::alive_keeper::RegionAliveKeeper;
use crate::config::DatanodeOptions;
use crate::error::{self, MetaClientInitSnafu, Result};
-use crate::event_listener::{RegionServerEvent, RegionServerEventReceiver};
+use crate::event_listener::RegionServerEventReceiver;
use crate::region_server::RegionServer;
pub(crate) mod handler;
@@ -96,6 +96,7 @@ impl HeartbeatTask {
running: Arc<AtomicBool>,
handler_executor: HeartbeatResponseHandlerExecutorRef,
mailbox: MailboxRef,
+ mut notify: Option<Arc<Notify>>,
) -> Result<HeartbeatSender> {
let client_id = meta_client.id();
@@ -111,11 +112,13 @@ impl HeartbeatTask {
if let Some(msg) = res.mailbox_message.as_ref() {
info!("Received mailbox message: {msg:?}, meta_client id: {client_id:?}");
}
-
let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), res);
if let Err(e) = Self::handle_response(ctx, handler_executor.clone()).await {
error!(e; "Error while handling heartbeat response");
}
+ if let Some(notify) = notify.take() {
+ notify.notify_one();
+ }
if !running.load(Ordering::Acquire) {
info!("Heartbeat task shutdown");
}
@@ -137,7 +140,11 @@ impl HeartbeatTask {
}
/// Start heartbeat task, spawn background task.
- pub async fn start(&self, mut event_receiver: RegionServerEventReceiver) -> Result<()> {
+ pub async fn start(
+ &self,
+ event_receiver: RegionServerEventReceiver,
+ notify: Option<Arc<Notify>>,
+ ) -> Result<()> {
let running = self.running.clone();
if running
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
@@ -152,8 +159,6 @@ impl HeartbeatTask {
let addr = resolve_addr(&self.server_addr, &self.server_hostname);
info!("Starting heartbeat to Metasrv with interval {interval}. My node id is {node_id}, address is {addr}.");
- self.region_alive_keeper.start().await;
-
let meta_client = self.meta_client.clone();
let region_server_clone = self.region_server.clone();
@@ -167,6 +172,7 @@ impl HeartbeatTask {
running.clone(),
handler_executor.clone(),
mailbox.clone(),
+ notify,
)
.await?;
@@ -176,31 +182,7 @@ impl HeartbeatTask {
});
let epoch = self.region_alive_keeper.epoch();
- let keeper = self.region_alive_keeper.clone();
-
- common_runtime::spawn_bg(async move {
- loop {
- if !running.load(Ordering::Relaxed) {
- info!("shutdown heartbeat task");
- break;
- }
-
- match event_receiver.0.recv().await {
- Some(RegionServerEvent::Registered(region_id)) => {
- keeper.register_region(region_id).await;
- }
- Some(RegionServerEvent::Deregistered(region_id)) => {
- keeper.deregister_region(region_id).await;
- }
- None => {
- info!("region server event sender closed!");
- break;
- }
- }
- }
- });
-
- let running = self.running.clone();
+ self.region_alive_keeper.start(Some(event_receiver)).await?;
common_runtime::spawn_bg(async move {
let sleep = tokio::time::sleep(Duration::from_millis(0));
@@ -256,6 +238,7 @@ impl HeartbeatTask {
running.clone(),
handler_executor.clone(),
mailbox.clone(),
+ None,
)
.await
{
diff --git a/tests/conf/datanode-test.toml.template b/tests/conf/datanode-test.toml.template
index 6280ad48b430..4a692e97423c 100644
--- a/tests/conf/datanode-test.toml.template
+++ b/tests/conf/datanode-test.toml.template
@@ -3,6 +3,7 @@ mode = 'distributed'
rpc_addr = '127.0.0.1:4100'
rpc_hostname = '127.0.0.1'
rpc_runtime_size = 8
+coordination = true
[wal]
file_size = '1GB'
|
feat
|
start services after first heartbeat response processed (#2424)
|
9af41600687ebaee837694d50bf1668bae5ffb1f
|
2025-01-23 17:18:37
|
Ruihang Xia
|
fix(log-query): panic on prometheus (#5429)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 98ac14afd248..ea055ec03d24 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -11762,6 +11762,7 @@ dependencies = [
"hex",
"hyper-util",
"itertools 0.10.5",
+ "log-query",
"loki-api",
"meta-client",
"meta-srv",
diff --git a/src/servers/src/http/logs.rs b/src/servers/src/http/logs.rs
index 0375865b31da..ffdfdb034b83 100644
--- a/src/servers/src/http/logs.rs
+++ b/src/servers/src/http/logs.rs
@@ -38,7 +38,7 @@ pub async fn logs(
query_ctx.set_channel(Channel::Http);
let query_ctx = Arc::new(query_ctx);
- let _timer = crate::metrics::METRIC_HTTP_LOGS_INGESTION_ELAPSED
+ let _timer = crate::metrics::METRIC_HTTP_LOGS_ELAPSED
.with_label_values(&[db.as_str()])
.start_timer();
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index 9f70eafc465b..e6b6a8e166ea 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -46,6 +46,7 @@ frontend = { workspace = true, features = ["testing"] }
futures.workspace = true
futures-util.workspace = true
hyper-util = { workspace = true, features = ["tokio"] }
+log-query = { workspace = true }
loki-api.workspace = true
meta-client.workspace = true
meta-srv = { workspace = true, features = ["mock"] }
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 7235833a3bb5..a326681e0f52 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -393,6 +393,7 @@ pub async fn setup_test_http_app(store_type: StorageType, name: &str) -> (Router
};
let http_server = HttpServerBuilder::new(http_opts)
.with_sql_handler(ServerSqlQueryHandlerAdapter::arc(instance.instance.clone()))
+ .with_logs_handler(instance.instance.clone())
.with_metrics_handler(MetricsHandler)
.with_greptime_config_options(instance.opts.datanode_options().to_toml().unwrap())
.build();
@@ -425,6 +426,7 @@ pub async fn setup_test_http_app_with_frontend_and_user_provider(
http_server = http_server
.with_sql_handler(ServerSqlQueryHandlerAdapter::arc(instance.instance.clone()))
.with_log_ingest_handler(instance.instance.clone(), None, None)
+ .with_logs_handler(instance.instance.clone())
.with_otlp_handler(instance.instance.clone())
.with_greptime_config_options(instance.opts.to_toml().unwrap());
@@ -474,6 +476,7 @@ pub async fn setup_test_prom_app_with_frontend(
let is_strict_mode = true;
let http_server = HttpServerBuilder::new(http_opts)
.with_sql_handler(ServerSqlQueryHandlerAdapter::arc(frontend_ref.clone()))
+ .with_logs_handler(instance.instance.clone())
.with_prom_handler(frontend_ref.clone(), true, is_strict_mode)
.with_prometheus_handler(frontend_ref)
.with_greptime_config_options(instance.opts.datanode_options().to_toml().unwrap())
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 76c17ac5cdcf..714f19a972ca 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -22,6 +22,7 @@ use axum::http::{HeaderName, HeaderValue, StatusCode};
use common_error::status_code::StatusCode as ErrorCode;
use flate2::write::GzEncoder;
use flate2::Compression;
+use log_query::{Context, Limit, LogQuery, TimeFilter};
use loki_api::logproto::{EntryAdapter, PushRequest, StreamAdapter};
use loki_api::prost_types::Timestamp;
use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
@@ -39,6 +40,7 @@ use servers::http::result::influxdb_result_v1::{InfluxdbOutput, InfluxdbV1Respon
use servers::http::test_helpers::{TestClient, TestResponse};
use servers::http::GreptimeQueryOutput;
use servers::prom_store;
+use table::table_name::TableName;
use tests_integration::test_util::{
setup_test_http_app, setup_test_http_app_with_frontend,
setup_test_http_app_with_frontend_and_user_provider, setup_test_prom_app_with_frontend,
@@ -99,6 +101,7 @@ macro_rules! http_tests {
test_loki_json_logs,
test_elasticsearch_logs,
test_elasticsearch_logs_with_index,
+ test_log_query,
);
)*
};
@@ -2140,6 +2143,61 @@ pub async fn test_elasticsearch_logs_with_index(store_type: StorageType) {
guard.remove_all().await;
}
+pub async fn test_log_query(store_type: StorageType) {
+ common_telemetry::init_default_ut_logging();
+ let (app, mut guard) = setup_test_http_app_with_frontend(store_type, "test_log_query").await;
+
+ let client = TestClient::new(app).await;
+
+ // prepare data with SQL API
+ let res = client
+ .get("/v1/sql?sql=create table logs (`ts` timestamp time index, message string);")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK, "{:?}", res.text().await);
+ let res = client
+ .post("/v1/sql?sql=insert into logs values ('2024-11-07 10:53:50', 'hello');")
+ .header("Content-Type", "application/x-www-form-urlencoded")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK, "{:?}", res.text().await);
+
+ // test log query
+ let log_query = LogQuery {
+ table: TableName {
+ catalog_name: "greptime".to_string(),
+ schema_name: "public".to_string(),
+ table_name: "logs".to_string(),
+ },
+ time_filter: TimeFilter {
+ start: Some("2024-11-07".to_string()),
+ end: None,
+ span: None,
+ },
+ limit: Limit {
+ skip: None,
+ fetch: Some(1),
+ },
+ columns: vec!["ts".to_string(), "message".to_string()],
+ filters: vec![],
+ context: Context::None,
+ exprs: vec![],
+ };
+ let res = client
+ .post("/v1/logs")
+ .header("Content-Type", "application/json")
+ .body(serde_json::to_string(&log_query).unwrap())
+ .send()
+ .await;
+
+ assert_eq!(res.status(), StatusCode::OK, "{:?}", res.text().await);
+ let resp = res.text().await;
+ let v = get_rows_from_output(&resp);
+ assert_eq!(v, "[[1730976830000,\"hello\"]]");
+
+ guard.remove_all().await;
+}
+
async fn validate_data(test_name: &str, client: &TestClient, sql: &str, expected: &str) {
let res = client
.get(format!("/v1/sql?sql={sql}").as_str())
|
fix
|
panic on prometheus (#5429)
|
56fc77e573517fc3263f2237a2edc196ae383da1
|
2023-11-23 08:29:49
|
Ruihang Xia
|
fix: add missing error display message (#2791)
| false
|
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index 05c675d29566..6602aecbd4ac 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -180,7 +180,7 @@ pub enum Error {
source: table::error::Error,
},
- #[snafu(display(""))]
+ #[snafu(display("Internal error"))]
Internal {
location: Location,
source: BoxedError,
@@ -216,7 +216,7 @@ pub enum Error {
#[snafu(display("Illegal access to catalog: {} and schema: {}", catalog, schema))]
QueryAccessDenied { catalog: String, schema: String },
- #[snafu(display(""))]
+ #[snafu(display("DataFusion error"))]
Datafusion {
#[snafu(source)]
error: DataFusionError,
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index 5f851768b2b8..b55cd398444d 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -44,7 +44,7 @@ pub enum Error {
source: common_meta::error::Error,
},
- #[snafu(display(""))]
+ #[snafu(display("External error"))]
External {
location: Location,
source: BoxedError,
@@ -170,7 +170,7 @@ pub enum Error {
source: query::error::Error,
},
- #[snafu(display(""))]
+ #[snafu(display("Operation to region server failed"))]
InvokeRegionServer {
location: Location,
source: servers::error::Error,
diff --git a/src/query/src/datafusion/error.rs b/src/query/src/datafusion/error.rs
index d94fb0d89006..9d52c8fa9d97 100644
--- a/src/query/src/datafusion/error.rs
+++ b/src/query/src/datafusion/error.rs
@@ -25,7 +25,7 @@ use snafu::{Location, Snafu};
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum InnerError {
- #[snafu(display(""))]
+ #[snafu(display("DataFusion error"))]
Datafusion {
#[snafu(source)]
error: DataFusionError,
diff --git a/src/query/src/error.rs b/src/query/src/error.rs
index 7999b4b49871..cbe8256215be 100644
--- a/src/query/src/error.rs
+++ b/src/query/src/error.rs
@@ -121,7 +121,7 @@ pub enum Error {
location: Location,
},
- #[snafu(display(""))]
+ #[snafu(display("DataFusion error"))]
DataFusion {
#[snafu(source)]
error: DataFusionError,
@@ -140,7 +140,7 @@ pub enum Error {
source: sql::error::Error,
},
- #[snafu(display(""))]
+ #[snafu(display("Failed to plan SQL"))]
PlanSql {
#[snafu(source)]
error: DataFusionError,
diff --git a/src/script/src/python/error.rs b/src/script/src/python/error.rs
index 376f734af693..ad00998f3a4c 100644
--- a/src/script/src/python/error.rs
+++ b/src/script/src/python/error.rs
@@ -76,7 +76,7 @@ pub enum Error {
error: ArrowError,
},
- #[snafu(display(""))]
+ #[snafu(display("DataFusion error"))]
DataFusion {
location: SnafuLocation,
#[snafu(source)]
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 2cf9cf8b89ae..35e8dc301679 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -339,8 +339,12 @@ pub enum Error {
source: crate::http::pprof::nix::Error,
},
- #[snafu(display(""))]
- Metrics { source: BoxedError },
+ #[snafu(display("Failed to update jemalloc metrics"))]
+ UpdateJemallocMetrics {
+ #[snafu(source)]
+ error: tikv_jemalloc_ctl::Error,
+ location: Location,
+ },
#[snafu(display("DataFrame operation error"))]
DataFrame {
@@ -408,7 +412,8 @@ impl ErrorExt for Error {
| TcpIncoming { .. }
| CatalogError { .. }
| GrpcReflectionService { .. }
- | BuildHttpResponse { .. } => StatusCode::Internal,
+ | BuildHttpResponse { .. }
+ | UpdateJemallocMetrics { .. } => StatusCode::Internal,
CollectRecordbatch { .. } => StatusCode::EngineExecuteQuery,
@@ -479,8 +484,6 @@ impl ErrorExt for Error {
#[cfg(feature = "pprof")]
DumpPprof { source, .. } => source.status_code(),
- Metrics { source } => source.status_code(),
-
ConvertScalarValue { source, .. } => source.status_code(),
ToJson { .. } => StatusCode::Internal,
diff --git a/src/servers/src/metrics/jemalloc.rs b/src/servers/src/metrics/jemalloc.rs
index 26cf5a21ac50..6b977301360f 100644
--- a/src/servers/src/metrics/jemalloc.rs
+++ b/src/servers/src/metrics/jemalloc.rs
@@ -12,10 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-mod error;
-
use common_telemetry::error;
-use error::UpdateJemallocMetricsSnafu;
use lazy_static::lazy_static;
use once_cell::sync::Lazy;
use prometheus::*;
@@ -23,6 +20,8 @@ use snafu::ResultExt;
use tikv_jemalloc_ctl::stats::{allocated_mib, resident_mib};
use tikv_jemalloc_ctl::{epoch, epoch_mib, stats};
+use crate::error::UpdateJemallocMetricsSnafu;
+
lazy_static! {
pub static ref SYS_JEMALLOC_RESIDEN: IntGauge = register_int_gauge!(
"sys_jemalloc_resident",
diff --git a/src/servers/src/metrics/jemalloc/error.rs b/src/servers/src/metrics/jemalloc/error.rs
deleted file mode 100644
index 14c125ce6f86..000000000000
--- a/src/servers/src/metrics/jemalloc/error.rs
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::any::Any;
-
-use common_error::ext::{BoxedError, ErrorExt};
-use common_error::status_code::StatusCode;
-use common_macro::stack_trace_debug;
-use snafu::{Location, Snafu};
-
-#[derive(Snafu)]
-#[snafu(visibility(pub))]
-#[stack_trace_debug]
-pub enum Error {
- #[snafu(display("Failed to update jemalloc metrics"))]
- UpdateJemallocMetrics {
- #[snafu(source)]
- error: tikv_jemalloc_ctl::Error,
- location: Location,
- },
-}
-
-impl ErrorExt for Error {
- fn status_code(&self) -> StatusCode {
- match self {
- Error::UpdateJemallocMetrics { .. } => StatusCode::Internal,
- }
- }
-
- fn as_any(&self) -> &dyn Any {
- self
- }
-}
-
-impl From<Error> for crate::error::Error {
- fn from(e: Error) -> Self {
- Self::Metrics {
- source: BoxedError::new(e),
- }
- }
-}
diff --git a/src/table/src/error.rs b/src/table/src/error.rs
index 6ac4c970fe9e..5d034d09e2c3 100644
--- a/src/table/src/error.rs
+++ b/src/table/src/error.rs
@@ -30,7 +30,7 @@ pub type Result<T> = std::result::Result<T, Error>;
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
- #[snafu(display(""))]
+ #[snafu(display("DataFusion error"))]
Datafusion {
#[snafu(source)]
error: DataFusionError,
diff --git a/tests/cases/distributed/optimizer/filter_push_down.result b/tests/cases/distributed/optimizer/filter_push_down.result
index 2b2d7af35550..67370b04e4a6 100644
--- a/tests/cases/distributed/optimizer/filter_push_down.result
+++ b/tests/cases/distributed/optimizer/filter_push_down.result
@@ -187,7 +187,7 @@ SELECT * FROM integers i1 WHERE NOT EXISTS(SELECT i FROM integers WHERE i=i1.i)
SELECT i1.i,i2.i FROM integers i1, integers i2 WHERE i1.i=(SELECT i FROM integers WHERE i1.i=i) AND i1.i=i2.i ORDER BY i1.i;
-Error: 3001(EngineExecuteQuery), Error during planning: Correlated scalar subquery must be aggregated to return at most one row
+Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Correlated scalar subquery must be aggregated to return at most one row
SELECT * FROM (SELECT i1.i AS a, i2.i AS b FROM integers i1, integers i2) a1 WHERE a=b ORDER BY 1;
@@ -241,7 +241,7 @@ Error: 3001(EngineExecuteQuery), Invalid argument error: must either specify a r
SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2 GROUP BY 1) a1 WHERE cond ORDER BY 1;
-Error: 3001(EngineExecuteQuery), Error during planning: Attempted to create Filter predicate with expression `Boolean(false)` aliased as 'Int64(0) = Int64(1)'. Filter predicates should not be aliased.
+Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Attempted to create Filter predicate with expression `Boolean(false)` aliased as 'Int64(0) = Int64(1)'. Filter predicates should not be aliased.
DROP TABLE integers;
diff --git a/tests/cases/standalone/common/aggregate/distinct_order_by.result b/tests/cases/standalone/common/aggregate/distinct_order_by.result
index a3fc6a8698d0..996048bed8e1 100644
--- a/tests/cases/standalone/common/aggregate/distinct_order_by.result
+++ b/tests/cases/standalone/common/aggregate/distinct_order_by.result
@@ -25,11 +25,11 @@ SELECT DISTINCT i%2 FROM integers ORDER BY 1;
-- +-----------------------+
SELECT DISTINCT i % 2 FROM integers WHERE i<3 ORDER BY i;
-Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions i must appear in select list
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: For SELECT DISTINCT, ORDER BY expressions i must appear in select list
SELECT DISTINCT ON (1) i % 2, i FROM integers WHERE i<3 ORDER BY i;
-Error: 3000(PlanQuery), This feature is not implemented: DISTINCT ON Exprs not supported
+Error: 3000(PlanQuery), Failed to plan SQL: This feature is not implemented: DISTINCT ON Exprs not supported
SELECT DISTINCT integers.i FROM integers ORDER BY i DESC;
diff --git a/tests/cases/standalone/common/alter/rename_table.result b/tests/cases/standalone/common/alter/rename_table.result
index 19015c91b258..bffd78ad94ab 100644
--- a/tests/cases/standalone/common/alter/rename_table.result
+++ b/tests/cases/standalone/common/alter/rename_table.result
@@ -35,7 +35,7 @@ Error: 4001(TableNotFound), Table not found: t
SELECT * FROM t;
-Error: 3000(PlanQuery), Error during planning: Table not found: greptime.public.t
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Table not found: greptime.public.t
CREATE TABLE t(i INTEGER, j TIMESTAMP TIME INDEX);
diff --git a/tests/cases/standalone/common/catalog/schema.result b/tests/cases/standalone/common/catalog/schema.result
index d62cd3d02d69..17e4a68536b4 100644
--- a/tests/cases/standalone/common/catalog/schema.result
+++ b/tests/cases/standalone/common/catalog/schema.result
@@ -118,7 +118,7 @@ Error: 1001(Unsupported), SQL statement is not supported: DROP SCHEMA test_publi
SELECT * FROM test_public_schema.hello;
-Error: 3000(PlanQuery), Error during planning: Table not found: greptime.test_public_schema.hello
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Table not found: greptime.test_public_schema.hello
USE public;
diff --git a/tests/cases/standalone/common/create/upper_case_table_name.result b/tests/cases/standalone/common/create/upper_case_table_name.result
index f7d434f85d0c..c2957f0496cc 100644
--- a/tests/cases/standalone/common/create/upper_case_table_name.result
+++ b/tests/cases/standalone/common/create/upper_case_table_name.result
@@ -16,7 +16,7 @@ Affected Rows: 2
select * from system_Metric;
-Error: 3000(PlanQuery), Error during planning: Table not found: greptime.upper_case_table_name.system_metric
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Table not found: greptime.upper_case_table_name.system_metric
select * from "system_Metric";
diff --git a/tests/cases/standalone/common/cte/cte.result b/tests/cases/standalone/common/cte/cte.result
index cd42b15566dc..2f143c964af4 100644
--- a/tests/cases/standalone/common/cte/cte.result
+++ b/tests/cases/standalone/common/cte/cte.result
@@ -57,12 +57,12 @@ with cte1 as (select i as j from a), cte2 as (select ref.j as k from cte1 as ref
with cte1 as (select 42), cte1 as (select 42) select * FROM cte1;
-Error: 3000(PlanQuery), sql parser error: WITH query name "cte1" specified more than once
+Error: 3000(PlanQuery), Failed to plan SQL: sql parser error: WITH query name "cte1" specified more than once
-- reference to CTE before its actually defined, it's not supported by datafusion
with cte3 as (select ref2.j as i from cte1 as ref2), cte1 as (Select i as j from a), cte2 as (select ref.j+1 as k from cte1 as ref) select * from cte2 union all select * FROM cte3;
-Error: 3000(PlanQuery), Error during planning: Table not found: greptime.public.cte1
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Table not found: greptime.public.cte1
with cte1 as (Select i as j from a) select * from cte1 cte11, cte1 cte12;
@@ -109,7 +109,7 @@ WITH RECURSIVE cte(d) AS (
)
SELECT max(d) FROM cte;
-Error: 3000(PlanQuery), This feature is not implemented: Recursive CTEs are not supported
+Error: 3000(PlanQuery), Failed to plan SQL: This feature is not implemented: Recursive CTEs are not supported
-- Nested aliases is not supported in datafusion
with cte (a) as (
@@ -121,7 +121,7 @@ select
from cte
where alias2 > 0;
-Error: 3000(PlanQuery), No field named alias2. Valid fields are cte.a.
+Error: 3000(PlanQuery), Failed to plan SQL: No field named alias2. Valid fields are cte.a.
drop table a;
diff --git a/tests/cases/standalone/common/cte/cte_in_cte.result b/tests/cases/standalone/common/cte/cte_in_cte.result
index 9e50caef2a25..b2380be7fc3f 100644
--- a/tests/cases/standalone/common/cte/cte_in_cte.result
+++ b/tests/cases/standalone/common/cte/cte_in_cte.result
@@ -40,7 +40,7 @@ with cte1 as (with b as (Select i as j from a) select j from b), cte2 as (with c
with cte1 as (select 42), cte1 as (select 42) select * FROM cte1;
-Error: 3000(PlanQuery), sql parser error: WITH query name "cte1" specified more than once
+Error: 3000(PlanQuery), Failed to plan SQL: sql parser error: WITH query name "cte1" specified more than once
with cte1 as (Select i as j from a) select * from (with cte2 as (select max(j) as j from cte1) select * from cte2) f;
@@ -64,12 +64,12 @@ with cte1 as (Select i as j from a) select * from cte1 where j = (with cte2 as (
-- this feature is not implemented in datafusion
with cte as (Select i as j from a) select * from cte where j = (with cte as (select max(j) as j from cte) select j from cte);
-Error: 3000(PlanQuery), sql parser error: WITH query name "cte" specified more than once
+Error: 3000(PlanQuery), Failed to plan SQL: sql parser error: WITH query name "cte" specified more than once
-- self-refer to non-existent cte-
with cte as (select * from cte) select * from cte;
-Error: 3000(PlanQuery), Error during planning: Table not found: greptime.public.cte
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Table not found: greptime.public.cte
drop table a;
diff --git a/tests/cases/standalone/common/insert/insert_select.result b/tests/cases/standalone/common/insert/insert_select.result
index 5fd5efbe61da..5bb2bb97fb55 100644
--- a/tests/cases/standalone/common/insert/insert_select.result
+++ b/tests/cases/standalone/common/insert/insert_select.result
@@ -12,15 +12,15 @@ Affected Rows: 2
insert into demo2(host) select * from demo1;
-Error: 3000(PlanQuery), Error during planning: Column count doesn't match insert query!
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Column count doesn't match insert query!
insert into demo2 select cpu,memory from demo1;
-Error: 3000(PlanQuery), Error during planning: Column count doesn't match insert query!
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Column count doesn't match insert query!
insert into demo2(ts) select memory from demo1;
-Error: 3000(PlanQuery), Error during planning: Cannot automatically convert Float64 to Timestamp(Millisecond, None)
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot automatically convert Float64 to Timestamp(Millisecond, None)
insert into demo2 select * from demo1;
diff --git a/tests/cases/standalone/common/insert/special_value.result b/tests/cases/standalone/common/insert/special_value.result
index e4b030e64a21..33048217d496 100644
--- a/tests/cases/standalone/common/insert/special_value.result
+++ b/tests/cases/standalone/common/insert/special_value.result
@@ -23,7 +23,7 @@ select * from data;
insert into data values (4, 'infinityyyy'::double);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast string 'infinityyyy' to value of Float64 type
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast string 'infinityyyy' to value of Float64 type
drop table data;
diff --git a/tests/cases/standalone/common/order/limit.result b/tests/cases/standalone/common/order/limit.result
index a12352ee8410..364846075837 100644
--- a/tests/cases/standalone/common/order/limit.result
+++ b/tests/cases/standalone/common/order/limit.result
@@ -16,27 +16,27 @@ SELECT a FROM test LIMIT 1;
SELECT a FROM test LIMIT 1.25;
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT a FROM test LIMIT 2-1;
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT a FROM test LIMIT a;
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT a FROM test LIMIT a+1;
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT a FROM test LIMIT SUM(42);
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT a FROM test LIMIT row_number() OVER ();
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
CREATE TABLE test2 (a STRING, ts TIMESTAMP TIME INDEX);
@@ -56,7 +56,7 @@ SELECT * FROM test2 LIMIT 3;
select 1 limit date '1992-01-01';
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
CREATE TABLE integers(i TIMESTAMP TIME INDEX);
@@ -89,35 +89,35 @@ SELECT * FROM integers LIMIT 4;
SELECT * FROM integers as int LIMIT (SELECT MIN(integers.i) FROM integers);
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT * FROM integers as int OFFSET (SELECT MIN(integers.i) FROM integers);
-Error: 3000(PlanQuery), Error during planning: Unexpected expression in OFFSET clause
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Unexpected expression in OFFSET clause
SELECT * FROM integers as int LIMIT (SELECT MAX(integers.i) FROM integers) OFFSET (SELECT MIN(integers.i) FROM integers);
-Error: 3000(PlanQuery), Error during planning: Unexpected expression in OFFSET clause
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Unexpected expression in OFFSET clause
SELECT * FROM integers as int LIMIT (SELECT max(integers.i) FROM integers where i > 5);
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT * FROM integers as int LIMIT (SELECT max(integers.i) FROM integers where i > 5);
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT * FROM integers as int LIMIT (SELECT NULL);
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT * FROM integers as int LIMIT (SELECT -1);
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
SELECT * FROM integers as int LIMIT (SELECT 'ab');
-Error: 3000(PlanQuery), Error during planning: LIMIT must not be negative
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: LIMIT must not be negative
DROP TABLE integers;
diff --git a/tests/cases/standalone/common/order/order_by.result b/tests/cases/standalone/common/order/order_by.result
index 289da619b6e7..36cb023e3d44 100644
--- a/tests/cases/standalone/common/order/order_by.result
+++ b/tests/cases/standalone/common/order/order_by.result
@@ -196,7 +196,7 @@ SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY k;
-- CONTROVERSIAL: SQLite allows both "k" and "l" to be referenced here, Postgres and MonetDB give an error.
SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY l;
-Error: 3000(PlanQuery), No field named l. Valid fields are k.
+Error: 3000(PlanQuery), Failed to plan SQL: No field named l. Valid fields are k.
-- Not compatible with duckdb, work in gretimedb
SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY 1-k;
@@ -215,7 +215,7 @@ SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY 1-k;
-- Error: 3000(PlanQuery), Schema error: No field named 'a'. Valid fields are 'k'.
SELECT a-10 AS k FROM test UNION SELECT a-10 AS l FROM test ORDER BY a-10;
-Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list
-- Not compatible with duckdb, give an error in greptimedb
-- TODO(LFC): Failed to meet the expected error:
@@ -223,7 +223,7 @@ Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY exp
-- Error: 3000(PlanQuery), Schema error: No field named 'a'. Valid fields are 'k'.
SELECT a-10 AS k FROM test UNION SELECT a-11 AS l FROM test ORDER BY a-11;
-Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list
DROP TABLE test;
diff --git a/tests/cases/standalone/common/order/order_by_exceptions.result b/tests/cases/standalone/common/order/order_by_exceptions.result
index f5d049f0f495..9edcddfc88d6 100644
--- a/tests/cases/standalone/common/order/order_by_exceptions.result
+++ b/tests/cases/standalone/common/order/order_by_exceptions.result
@@ -8,7 +8,7 @@ Affected Rows: 3
SELECT a FROM test ORDER BY 2;
-Error: 3000(PlanQuery), Error during planning: Order by column out of bounds, specified: 2, max: 1
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Order by column out of bounds, specified: 2, max: 1
-- Not work in greptimedb
SELECT a FROM test ORDER BY 'hello', a;
@@ -38,7 +38,7 @@ SELECT a AS k, b FROM test UNION SELECT a AS k, b FROM test ORDER BY k;
SELECT a % 2, b FROM test UNION SELECT b, a % 2 AS k ORDER BY a % 2;
-Error: 3000(PlanQuery), No field named b.
+Error: 3000(PlanQuery), Failed to plan SQL: No field named b.
-- Works duckdb, but not work in greptimedb
-- TODO(LFC): Failed to meet the expected error:
@@ -46,11 +46,11 @@ Error: 3000(PlanQuery), No field named b.
-- Error: 3000(PlanQuery), Schema error: No field named 'a'. Valid fields are 'test.a % Int64(2)', 'b'.
SELECT a % 2, b FROM test UNION SELECT a % 2 AS k, b FROM test ORDER BY a % 2;
-Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: For SELECT DISTINCT, ORDER BY expressions a must appear in select list
SELECT a % 2, b FROM test UNION SELECT a % 2 AS k, b FROM test ORDER BY 3;
-Error: 3000(PlanQuery), Error during planning: Order by column out of bounds, specified: 3, max: 2
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Order by column out of bounds, specified: 3, max: 2
SELECT a % 2, b FROM test UNION SELECT a % 2 AS k, b FROM test ORDER BY -1;
@@ -58,7 +58,7 @@ Error: 3001(EngineExecuteQuery), Error during planning: Sort operation is not ap
SELECT a % 2, b FROM test UNION SELECT a % 2 AS k FROM test ORDER BY -1;
-Error: 3000(PlanQuery), Error during planning: Union queries must have the same number of columns, (left is 2, right is 1)
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Union queries must have the same number of columns, (left is 2, right is 1)
DROP TABLE test;
diff --git a/tests/cases/standalone/common/range/by.result b/tests/cases/standalone/common/range/by.result
index 3c1d83d0bc37..0da499a30fe4 100644
--- a/tests/cases/standalone/common/range/by.result
+++ b/tests/cases/standalone/common/range/by.result
@@ -63,7 +63,7 @@ SELECT ts, CAST(length(host) as INT64) + 2, max(val) RANGE '5s' FROM host ALIGN
-- project non-aggregation key
SELECT ts, host, max(val) RANGE '5s' FROM host ALIGN '20s' BY () ORDER BY ts;
-Error: 3001(EngineExecuteQuery), No field named host.host. Valid fields are "MAX(host.val) RANGE 5s FILL NULL", host.ts, "Int64(1)".
+Error: 3001(EngineExecuteQuery), DataFusion error: No field named host.host. Valid fields are "MAX(host.val) RANGE 5s FILL NULL", host.ts, "Int64(1)".
DROP TABLE host;
diff --git a/tests/cases/standalone/common/range/error.result b/tests/cases/standalone/common/range/error.result
index eeead0c8b2d8..556e14846aff 100644
--- a/tests/cases/standalone/common/range/error.result
+++ b/tests/cases/standalone/common/range/error.result
@@ -41,7 +41,7 @@ Error: 2000(InvalidSyntax), sql parser error: Illegal Range select, no RANGE key
SELECT min(val) RANGE '10s', max(val) FROM host ALIGN '5s';
-Error: 3001(EngineExecuteQuery), No field named "MAX(host.val)". Valid fields are "MIN(host.val) RANGE 10s FILL NULL", host.ts, host.host.
+Error: 3001(EngineExecuteQuery), DataFusion error: No field named "MAX(host.val)". Valid fields are "MIN(host.val) RANGE 10s FILL NULL", host.ts, host.host.
SELECT min(val) * 2 RANGE '10s' FROM host ALIGN '5s';
@@ -54,7 +54,7 @@ Error: 2000(InvalidSyntax), sql parser error: Can't use the RANGE keyword in Exp
-- 2.2 no align param
SELECT min(val) RANGE '5s' FROM host;
-Error: 3000(PlanQuery), Error during planning: Missing argument in range select query
+Error: 3000(PlanQuery), DataFusion error: Error during planning: Missing argument in range select query
-- 2.3 type mismatch
SELECT covar(ceil(val), floor(val)) RANGE '20s' FROM host ALIGN '10s';
@@ -75,11 +75,11 @@ Error: 2000(InvalidSyntax), Range Query: Window functions is not allowed in Rang
-- 2.6 invalid fill
SELECT min(val) RANGE '5s', min(val) RANGE '5s' FILL NULL FROM host ALIGN '5s';
-Error: 3001(EngineExecuteQuery), Schema contains duplicate unqualified field name "MIN(host.val) RANGE 5s FILL NULL"
+Error: 3001(EngineExecuteQuery), DataFusion error: Schema contains duplicate unqualified field name "MIN(host.val) RANGE 5s FILL NULL"
SELECT min(val) RANGE '5s' FROM host ALIGN '5s' FILL 3.0;
-Error: 3000(PlanQuery), Error during planning: 3.0 is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string '3.0' to value of Int64 type }
+Error: 3000(PlanQuery), DataFusion error: Error during planning: 3.0 is not a valid fill option, fail to convert to a const value. { Arrow error: Cast error: Cannot cast string '3.0' to value of Int64 type }
DROP TABLE host;
diff --git a/tests/cases/standalone/common/select/dummy.result b/tests/cases/standalone/common/select/dummy.result
index fbe9191f0742..ccc8e8c7173f 100644
--- a/tests/cases/standalone/common/select/dummy.result
+++ b/tests/cases/standalone/common/select/dummy.result
@@ -24,15 +24,15 @@ select 4 + 0.5;
select "a";
-Error: 3000(PlanQuery), No field named a.
+Error: 3000(PlanQuery), Failed to plan SQL: No field named a.
select "A";
-Error: 3000(PlanQuery), No field named "A".
+Error: 3000(PlanQuery), Failed to plan SQL: No field named "A".
select * where "a" = "A";
-Error: 3000(PlanQuery), No field named a.
+Error: 3000(PlanQuery), Failed to plan SQL: No field named a.
select TO_UNIXTIME('2023-03-01T06:35:02Z');
diff --git a/tests/cases/standalone/common/types/float/nan_cast.result b/tests/cases/standalone/common/types/float/nan_cast.result
index 927aabf071ff..585981f44906 100644
--- a/tests/cases/standalone/common/types/float/nan_cast.result
+++ b/tests/cases/standalone/common/types/float/nan_cast.result
@@ -11,23 +11,23 @@ SELECT 'nan'::FLOAT;
-- cannot cast nan, inf or -inf to these types
SELECT 'nan'::FLOAT::INT;
-Error: 3001(EngineExecuteQuery), Cast error: Can't cast value NaN to type Int32
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Can't cast value NaN to type Int32
SELECT 'nan'::FLOAT::DECIMAL(4,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(4, 1). Overflowing on NaN
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(4, 1). Overflowing on NaN
SELECT 'nan'::FLOAT::DECIMAL(9,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(9, 1). Overflowing on NaN
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(9, 1). Overflowing on NaN
SELECT 'nan'::FLOAT::DECIMAL(18,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(18, 1). Overflowing on NaN
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(18, 1). Overflowing on NaN
SELECT 'nan'::FLOAT::DECIMAL(38,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(38, 1). Overflowing on NaN
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 1). Overflowing on NaN
SELECT 'inf'::FLOAT;
@@ -40,23 +40,23 @@ SELECT 'inf'::FLOAT;
-- cannot cast nan, inf or -inf to these types
SELECT 'inf'::FLOAT::INT;
-Error: 3001(EngineExecuteQuery), Cast error: Can't cast value inf to type Int32
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Can't cast value inf to type Int32
SELECT 'inf'::FLOAT::DECIMAL(4,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(4, 1). Overflowing on inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(4, 1). Overflowing on inf
SELECT 'inf'::FLOAT::DECIMAL(9,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(9, 1). Overflowing on inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(9, 1). Overflowing on inf
SELECT 'inf'::FLOAT::DECIMAL(18,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(18, 1). Overflowing on inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(18, 1). Overflowing on inf
SELECT 'inf'::FLOAT::DECIMAL(38,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(38, 1). Overflowing on inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 1). Overflowing on inf
SELECT '-inf'::FLOAT;
@@ -69,23 +69,23 @@ SELECT '-inf'::FLOAT;
-- cannot cast nan, inf or -inf to these types
SELECT '-inf'::FLOAT::INT;
-Error: 3001(EngineExecuteQuery), Cast error: Can't cast value -inf to type Int32
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Can't cast value -inf to type Int32
SELECT '-inf'::FLOAT::DECIMAL(4,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(4, 1). Overflowing on -inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(4, 1). Overflowing on -inf
SELECT '-inf'::FLOAT::DECIMAL(9,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(9, 1). Overflowing on -inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(9, 1). Overflowing on -inf
SELECT '-inf'::FLOAT::DECIMAL(18,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(18, 1). Overflowing on -inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(18, 1). Overflowing on -inf
SELECT '-inf'::FLOAT::DECIMAL(38,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(38, 1). Overflowing on -inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 1). Overflowing on -inf
SELECT 'nan'::DOUBLE;
@@ -98,23 +98,23 @@ SELECT 'nan'::DOUBLE;
-- cannot cast nan, inf or -inf to these types
SELECT 'nan'::DOUBLE::INT;
-Error: 3001(EngineExecuteQuery), Cast error: Can't cast value NaN to type Int32
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Can't cast value NaN to type Int32
SELECT 'nan'::DOUBLE::DECIMAL(4,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(4, 1). Overflowing on NaN
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(4, 1). Overflowing on NaN
SELECT 'nan'::DOUBLE::DECIMAL(9,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(9, 1). Overflowing on NaN
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(9, 1). Overflowing on NaN
SELECT 'nan'::DOUBLE::DECIMAL(18,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(18, 1). Overflowing on NaN
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(18, 1). Overflowing on NaN
SELECT 'nan'::DOUBLE::DECIMAL(38,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(38, 1). Overflowing on NaN
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 1). Overflowing on NaN
SELECT 'inf'::DOUBLE;
@@ -127,23 +127,23 @@ SELECT 'inf'::DOUBLE;
-- cannot cast nan, inf or -inf to these types
SELECT 'inf'::DOUBLE::INT;
-Error: 3001(EngineExecuteQuery), Cast error: Can't cast value inf to type Int32
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Can't cast value inf to type Int32
SELECT 'inf'::DOUBLE::DECIMAL(4,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(4, 1). Overflowing on inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(4, 1). Overflowing on inf
SELECT 'inf'::DOUBLE::DECIMAL(9,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(9, 1). Overflowing on inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(9, 1). Overflowing on inf
SELECT 'inf'::DOUBLE::DECIMAL(18,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(18, 1). Overflowing on inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(18, 1). Overflowing on inf
SELECT 'inf'::DOUBLE::DECIMAL(38,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(38, 1). Overflowing on inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 1). Overflowing on inf
SELECT '-inf'::DOUBLE;
@@ -156,23 +156,23 @@ SELECT '-inf'::DOUBLE;
-- cannot cast nan, inf or -inf to these types
SELECT '-inf'::DOUBLE::INT;
-Error: 3001(EngineExecuteQuery), Cast error: Can't cast value -inf to type Int32
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Can't cast value -inf to type Int32
SELECT '-inf'::DOUBLE::DECIMAL(4,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(4, 1). Overflowing on -inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(4, 1). Overflowing on -inf
SELECT '-inf'::DOUBLE::DECIMAL(9,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(9, 1). Overflowing on -inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(9, 1). Overflowing on -inf
SELECT '-inf'::DOUBLE::DECIMAL(18,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(18, 1). Overflowing on -inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(18, 1). Overflowing on -inf
SELECT '-inf'::DOUBLE::DECIMAL(38,1);
-Error: 3001(EngineExecuteQuery), Cast error: Cannot cast to Decimal128(38, 1). Overflowing on -inf
+Error: 3001(EngineExecuteQuery), DataFusion error: Cast error: Cannot cast to Decimal128(38, 1). Overflowing on -inf
-- we can cast nan, inf and -inf between floats and doubles, as well as to/from strings
SELECT 'nan'::FLOAT::VARCHAR;
diff --git a/tests/cases/standalone/common/types/interval/interval.result b/tests/cases/standalone/common/types/interval/interval.result
index 3bb9f46ea5ff..90d1d318fe69 100644
--- a/tests/cases/standalone/common/types/interval/interval.result
+++ b/tests/cases/standalone/common/types/interval/interval.result
@@ -41,15 +41,15 @@ SELECT INTERVAL '1 year 2 months 3 days 4 hours' - INTERVAL '1 year';
SELECT INTERVAL '6 years' * 2;
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported interval operator: Multiply
+Error: 3000(PlanQuery), Failed to plan SQL: This feature is not implemented: Unsupported interval operator: Multiply
SELECT INTERVAL '6 years' / 2;
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported interval operator: Divide
+Error: 3000(PlanQuery), Failed to plan SQL: This feature is not implemented: Unsupported interval operator: Divide
SELECT INTERVAL '6 years' = INTERVAL '72 months';
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported interval operator: Eq
+Error: 3000(PlanQuery), Failed to plan SQL: This feature is not implemented: Unsupported interval operator: Eq
SELECT arrow_typeof(INTERVAL '1 month');
@@ -62,7 +62,7 @@ SELECT arrow_typeof(INTERVAL '1 month');
-- INTERVAL + TIME CONSTANT
SELECT current_time() + INTERVAL '1 hour';
-Error: 3000(PlanQuery), Error during planning: Cannot coerce arithmetic expression Time64(Nanosecond) + Interval(MonthDayNano) to valid types
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot coerce arithmetic expression Time64(Nanosecond) + Interval(MonthDayNano) to valid types
-- table with interval type test
CREATE TABLE IF NOT EXISTS intervals(
@@ -259,13 +259,13 @@ This was likely caused by a bug in DataFusion's code and we would welcome that y
SELECT SUM(interval_value) from intervals;
-Error: 3000(PlanQuery), Error during planning: No function matches the given name and argument types 'SUM(Interval(MonthDayNano))'. You might need to add explicit type casts.
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: No function matches the given name and argument types 'SUM(Interval(MonthDayNano))'. You might need to add explicit type casts.
Candidate functions:
SUM(Int8/Int16/Int32/Int64/UInt8/UInt16/UInt32/UInt64/Float32/Float64)
SELECT AVG(interval_value) from intervals;
-Error: 3000(PlanQuery), Error during planning: No function matches the given name and argument types 'AVG(Interval(MonthDayNano))'. You might need to add explicit type casts.
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: No function matches the given name and argument types 'AVG(Interval(MonthDayNano))'. You might need to add explicit type casts.
Candidate functions:
AVG(Int8/Int16/Int32/Int64/UInt8/UInt16/UInt32/UInt64/Float32/Float64)
diff --git a/tests/cases/standalone/common/types/timestamp/timestamp.result b/tests/cases/standalone/common/types/timestamp/timestamp.result
index 80173a778f0b..6eaeb2e7a4f1 100644
--- a/tests/cases/standalone/common/types/timestamp/timestamp.result
+++ b/tests/cases/standalone/common/types/timestamp/timestamp.result
@@ -32,15 +32,15 @@ SELECT timestamp ' 2017-07-23 13:10:11 ';
SELECT timestamp ' 2017-07-23 13:10:11 AA';
-Error: 3001(EngineExecuteQuery), Parser error: Invalid timezone "AA": 'AA' is not a valid timezone
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Invalid timezone "AA": 'AA' is not a valid timezone
SELECT timestamp 'AA2017-07-23 13:10:11';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from 'AA2017-07-23 13:10:11': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from 'AA2017-07-23 13:10:11': error parsing date
SELECT timestamp '2017-07-23A13:10:11';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '2017-07-23A13:10:11': invalid timestamp separator
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '2017-07-23A13:10:11': invalid timestamp separator
SELECT t FROM timestamp ORDER BY t;
@@ -75,31 +75,31 @@ SELECT MAX(t) FROM timestamp;
SELECT SUM(t) FROM timestamp;
-Error: 3000(PlanQuery), Error during planning: No function matches the given name and argument types 'SUM(Timestamp(Millisecond, None))'. You might need to add explicit type casts.
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: No function matches the given name and argument types 'SUM(Timestamp(Millisecond, None))'. You might need to add explicit type casts.
Candidate functions:
SUM(Int8/Int16/Int32/Int64/UInt8/UInt16/UInt32/UInt64/Float32/Float64)
SELECT AVG(t) FROM timestamp;
-Error: 3000(PlanQuery), Error during planning: No function matches the given name and argument types 'AVG(Timestamp(Millisecond, None))'. You might need to add explicit type casts.
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: No function matches the given name and argument types 'AVG(Timestamp(Millisecond, None))'. You might need to add explicit type casts.
Candidate functions:
AVG(Int8/Int16/Int32/Int64/UInt8/UInt16/UInt32/UInt64/Float32/Float64)
SELECT t+t FROM timestamp;
-Error: 3000(PlanQuery), Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) + Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) + Timestamp(Millisecond, None)
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) + Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) + Timestamp(Millisecond, None)
SELECT t*t FROM timestamp;
-Error: 3000(PlanQuery), Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) * Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) * Timestamp(Millisecond, None)
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) * Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) * Timestamp(Millisecond, None)
SELECT t/t FROM timestamp;
-Error: 3000(PlanQuery), Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) / Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) / Timestamp(Millisecond, None)
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) / Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) / Timestamp(Millisecond, None)
SELECT t%t FROM timestamp;
-Error: 3000(PlanQuery), Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) % Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) % Timestamp(Millisecond, None)
+Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) % Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) % Timestamp(Millisecond, None)
-- TODO(dennis): It can't run on distributed mode, uncomment it when the issue is fixed: https://github.com/GreptimeTeam/greptimedb/issues/2071 --
-- SELECT t-t FROM timestamp; --
@@ -169,11 +169,11 @@ SELECT TIMESTAMP '2008-01-01 00:00:01.5'::VARCHAR;
SELECT TIMESTAMP '-8-01-01 00:00:01.5'::VARCHAR;
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '-8-01-01 00:00:01.5': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '-8-01-01 00:00:01.5': error parsing date
SELECT TIMESTAMP '100000-01-01 00:00:01.5'::VARCHAR;
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '100000-01-01 00:00:01.5': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '100000-01-01 00:00:01.5': error parsing date
DROP TABLE timestamp;
diff --git a/tests/cases/standalone/common/types/timestamp/timestamp_limits.result b/tests/cases/standalone/common/types/timestamp/timestamp_limits.result
index a3c8c619b452..4680d298f030 100644
--- a/tests/cases/standalone/common/types/timestamp/timestamp_limits.result
+++ b/tests/cases/standalone/common/types/timestamp/timestamp_limits.result
@@ -9,73 +9,73 @@ select timestamp '1970-01-01';
select '290309-12-22 (BC) 00:00:00'::timestamp;
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
select '290309-12-21 (BC) 12:59:59.999999'::timestamp;
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '290309-12-21 (BC) 12:59:59.999999': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '290309-12-21 (BC) 12:59:59.999999': error parsing date
select '290309-12-22 (BC) 00:00:00'::timestamp + interval '1 day';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
select '290309-12-22 (BC) 00:00:00'::timestamp - interval '1 microsecond';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
select '290309-12-22 (BC) 00:00:00'::timestamp - interval '1 second';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
select '290309-12-22 (BC) 00:00:00'::timestamp - interval '1 day';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
select '290309-12-22 (BC) 00:00:00'::timestamp - interval '1 month';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
select '290309-12-22 (BC) 00:00:00'::timestamp - interval '1 year';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00': error parsing date
select timestamp '294247-01-10 04:00:54.775806';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
select timestamp '294247-01-10 04:00:54.775807';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775807': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775807': error parsing date
select timestamp '294247-01-10 04:00:54.775806' + interval '1 microsecond';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
select timestamp '294247-01-10 04:00:54.775806' + interval '1 second';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
select timestamp '294247-01-10 04:00:54.775806' + interval '1 hour';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
select timestamp '294247-01-10 04:00:54.775806' + interval '1 day';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
select timestamp '294247-01-10 04:00:54.775806' + interval '1 month';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
select timestamp '294247-01-10 04:00:54.775806' + interval '1 year';
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '294247-01-10 04:00:54.775806': error parsing date
SELECT '290309-12-22 (BC) 00:00:00+07:00'::TIMESTAMP;
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00+07:00': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '290309-12-22 (BC) 00:00:00+07:00': error parsing date
SELECT '294247-01-10 04:00:54-07:00'::TIMESTAMP;
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '294247-01-10 04:00:54-07:00': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '294247-01-10 04:00:54-07:00': error parsing date
diff --git a/tests/cases/standalone/common/types/timestamp/timestamp_types.result b/tests/cases/standalone/common/types/timestamp/timestamp_types.result
index 02d024216769..e78b11dec393 100644
--- a/tests/cases/standalone/common/types/timestamp/timestamp_types.result
+++ b/tests/cases/standalone/common/types/timestamp/timestamp_types.result
@@ -49,7 +49,7 @@ Affected Rows: 1
select '90000-01-19 03:14:07.999999'::TIMESTAMP_US::TIMESTAMP_NS;
-Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '90000-01-19 03:14:07.999999': error parsing date
+Error: 3001(EngineExecuteQuery), DataFusion error: Parser error: Error parsing timestamp from '90000-01-19 03:14:07.999999': error parsing date
select sec::DATE from timestamp;
diff --git a/tests/cases/standalone/optimizer/filter_push_down.result b/tests/cases/standalone/optimizer/filter_push_down.result
index 5deb4641b926..7e7cb8ca30e6 100644
--- a/tests/cases/standalone/optimizer/filter_push_down.result
+++ b/tests/cases/standalone/optimizer/filter_push_down.result
@@ -140,7 +140,7 @@ SELECT * FROM integers i1 WHERE NOT EXISTS(SELECT i FROM integers WHERE i=i1.i)
SELECT i1.i,i2.i FROM integers i1, integers i2 WHERE i1.i=(SELECT i FROM integers WHERE i1.i=i) AND i1.i=i2.i ORDER BY i1.i;
-Error: 3001(EngineExecuteQuery), Error during planning: Correlated scalar subquery must be aggregated to return at most one row
+Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Correlated scalar subquery must be aggregated to return at most one row
SELECT * FROM (SELECT i1.i AS a, i2.i AS b FROM integers i1, integers i2) a1 WHERE a=b ORDER BY 1;
@@ -192,7 +192,7 @@ SELECT i FROM (SELECT * FROM integers i1 UNION SELECT * FROM integers i2) a WHER
-- SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2) a1 WHERE cond ORDER BY 1;
SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2 GROUP BY 1) a1 WHERE cond ORDER BY 1;
-Error: 3001(EngineExecuteQuery), Error during planning: Attempted to create Filter predicate with expression `Boolean(false)` aliased as 'Int64(0) = Int64(1)'. Filter predicates should not be aliased.
+Error: 3001(EngineExecuteQuery), DataFusion error: Error during planning: Attempted to create Filter predicate with expression `Boolean(false)` aliased as 'Int64(0) = Int64(1)'. Filter predicates should not be aliased.
DROP TABLE integers;
|
fix
|
add missing error display message (#2791)
|
98ef74bff4507b730369b5a15ba69a31d3206903
|
2023-02-23 08:50:45
|
Xuanwo
|
chore: Bump OpenDAL to v0.27 (#1057)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c2c15a6a99d8..b93ffab82429 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -653,6 +653,18 @@ dependencies = [
"tokio",
]
+[[package]]
+name = "backon"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f34fac4d7cdaefa2deded0eda2d5d59dbfd43370ff3f856209e72340ae84c294"
+dependencies = [
+ "futures",
+ "pin-project",
+ "rand 0.8.5",
+ "tokio",
+]
+
[[package]]
name = "backtrace"
version = "0.3.67"
@@ -729,25 +741,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "bincode"
-version = "2.0.0-rc.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7bb50c5a2ef4b9b1e7ae73e3a73b52ea24b20312d629f9c4df28260b7ad2c3c4"
-dependencies = [
- "bincode_derive",
- "serde",
-]
-
-[[package]]
-name = "bincode_derive"
-version = "2.0.0-rc.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a45a23389446d2dd25dc8e73a7a3b3c43522b630cac068927f0649d43d719d2"
-dependencies = [
- "virtue",
-]
-
[[package]]
name = "bindgen"
version = "0.59.2"
@@ -2172,7 +2165,7 @@ dependencies = [
"axum",
"axum-macros",
"axum-test-helper",
- "backon",
+ "backon 0.2.0",
"catalog",
"client",
"common-base",
@@ -2248,6 +2241,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de"
dependencies = [
"const-oid",
+ "pem-rfc7468",
+ "zeroize",
]
[[package]]
@@ -2336,6 +2331,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f"
dependencies = [
"block-buffer",
+ "const-oid",
"crypto-common",
"subtle",
]
@@ -3464,6 +3460,9 @@ name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+dependencies = [
+ "spin",
+]
[[package]]
name = "lazycell"
@@ -3662,7 +3661,7 @@ version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f270b952b07995fe874b10a5ed7dd28c80aa2130e37a7de7ed667d034e0a521"
dependencies = [
- "bincode 1.3.3",
+ "bincode",
"cactus",
"cfgrammar",
"filetime",
@@ -4321,6 +4320,23 @@ dependencies = [
"serde",
]
+[[package]]
+name = "num-bigint-dig"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905"
+dependencies = [
+ "byteorder",
+ "lazy_static",
+ "libm",
+ "num-integer",
+ "num-iter",
+ "num-traits",
+ "rand 0.8.5",
+ "smallvec",
+ "zeroize",
+]
+
[[package]]
name = "num-complex"
version = "0.4.3"
@@ -4480,16 +4496,15 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "opendal"
-version = "0.25.1"
+version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73829d3a057542556dc2c2d2b70700a44dda913cdb5483094c20ef9673ca283c"
+checksum = "ef6f7b936f2f8483e19643357cb50d9ec9a49c506971ef69ca676913cf5afd91"
dependencies = [
"anyhow",
"async-compat",
"async-trait",
- "backon",
+ "backon 0.4.0",
"base64 0.21.0",
- "bincode 2.0.0-rc.2",
"bytes",
"flagset",
"futures",
@@ -4795,6 +4810,15 @@ dependencies = [
"base64 0.13.1",
]
+[[package]]
+name = "pem-rfc7468"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac"
+dependencies = [
+ "base64ct",
+]
+
[[package]]
name = "percent-encoding"
version = "2.2.0"
@@ -4996,6 +5020,28 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+[[package]]
+name = "pkcs1"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eff33bdbdfc54cc98a2eca766ebdec3e1b8fb7387523d5c9c9a2891da856f719"
+dependencies = [
+ "der",
+ "pkcs8",
+ "spki",
+ "zeroize",
+]
+
+[[package]]
+name = "pkcs8"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba"
+dependencies = [
+ "der",
+ "spki",
+]
+
[[package]]
name = "pkg-config"
version = "0.3.26"
@@ -5763,12 +5809,12 @@ dependencies = [
[[package]]
name = "reqsign"
-version = "0.8.1"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f446438814fde3785305a59a85a6d1b361ce2c9d29e58dd87c9103a242c40b6"
+checksum = "ef4d5fefeaaa1e64f4aabb79da4ea68bf6d0e7935ad927728280d2a8e95735fc"
dependencies = [
"anyhow",
- "backon",
+ "backon 0.4.0",
"base64 0.21.0",
"bytes",
"dirs",
@@ -5781,6 +5827,8 @@ dependencies = [
"once_cell",
"percent-encoding",
"quick-xml",
+ "rand 0.8.5",
+ "rsa",
"rust-ini",
"serde",
"serde_json",
@@ -5905,6 +5953,27 @@ dependencies = [
"serde",
]
+[[package]]
+name = "rsa"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "89b3896c9b7790b70a9aa314a30e4ae114200992a19c96cbe0ca6070edd32ab8"
+dependencies = [
+ "byteorder",
+ "digest",
+ "num-bigint-dig",
+ "num-integer",
+ "num-iter",
+ "num-traits",
+ "pkcs1",
+ "pkcs8",
+ "rand_core 0.6.4",
+ "sha2",
+ "signature",
+ "subtle",
+ "zeroize",
+]
+
[[package]]
name = "rust-ini"
version = "0.18.0"
@@ -6076,7 +6145,7 @@ name = "rustpython-compiler-core"
version = "0.1.2"
source = "git+https://github.com/discord9/RustPython?rev=2e126345#2e12634569d01674724490193eb9638f056e51ca"
dependencies = [
- "bincode 1.3.3",
+ "bincode",
"bitflags",
"bstr",
"itertools",
@@ -6826,6 +6895,10 @@ name = "signature"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fe458c98333f9c8152221191a77e2a44e8325d0193484af2e9421a53019e57d"
+dependencies = [
+ "digest",
+ "rand_core 0.6.4",
+]
[[package]]
name = "simba"
@@ -8368,12 +8441,6 @@ version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
-[[package]]
-name = "virtue"
-version = "0.0.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b60dcd6a64dd45abf9bd426970c9843726da7fc08f44cd6fcebf68c21220a63"
-
[[package]]
name = "vob"
version = "3.0.2"
@@ -8756,6 +8823,12 @@ dependencies = [
"lzma-sys",
]
+[[package]]
+name = "zeroize"
+version = "1.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
+
[[package]]
name = "zstd"
version = "0.12.2+zstd.1.5.2"
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index 71d96ba45efd..cd55446d28d2 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -399,7 +399,7 @@ mod tests {
use log_store::NoopLogStore;
use mito::config::EngineConfig;
use mito::engine::MitoEngine;
- use object_store::ObjectStore;
+ use object_store::{ObjectStore, ObjectStoreBuilder};
use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
@@ -486,7 +486,7 @@ mod tests {
.root(&store_dir)
.build()
.unwrap();
- let object_store = ObjectStore::new(accessor);
+ let object_store = ObjectStore::new(accessor).finish();
let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = Arc::new(MitoEngine::new(
EngineConfig::default(),
diff --git a/src/common/procedure/src/local.rs b/src/common/procedure/src/local.rs
index 468c8f6dcdc3..7022231351af 100644
--- a/src/common/procedure/src/local.rs
+++ b/src/common/procedure/src/local.rs
@@ -411,7 +411,8 @@ impl ProcedureManager for LocalManager {
/// Create a new [ProcedureMeta] for test purpose.
#[cfg(test)]
mod test_util {
- use object_store::services::fs::Builder;
+ use object_store::services::Fs as Builder;
+ use object_store::ObjectStoreBuilder;
use tempdir::TempDir;
use super::*;
@@ -423,7 +424,7 @@ mod test_util {
pub(crate) fn new_object_store(dir: &TempDir) -> ObjectStore {
let store_dir = dir.path().to_str().unwrap();
let accessor = Builder::default().root(store_dir).build().unwrap();
- ObjectStore::new(accessor)
+ ObjectStore::new(accessor).finish()
}
}
diff --git a/src/common/procedure/src/store.rs b/src/common/procedure/src/store.rs
index 680c2ecf9dd1..5198e1c1102f 100644
--- a/src/common/procedure/src/store.rs
+++ b/src/common/procedure/src/store.rs
@@ -246,7 +246,8 @@ impl ParsedKey {
#[cfg(test)]
mod tests {
use async_trait::async_trait;
- use object_store::services::fs::Builder;
+ use object_store::services::Fs as Builder;
+ use object_store::ObjectStoreBuilder;
use tempdir::TempDir;
use super::*;
@@ -255,7 +256,7 @@ mod tests {
fn procedure_store_for_test(dir: &TempDir) -> ProcedureStore {
let store_dir = dir.path().to_str().unwrap();
let accessor = Builder::default().root(store_dir).build().unwrap();
- let object_store = ObjectStore::new(accessor);
+ let object_store = ObjectStore::new(accessor).finish();
ProcedureStore::from(object_store)
}
diff --git a/src/common/procedure/src/store/state_store.rs b/src/common/procedure/src/store/state_store.rs
index 052d26a45ad3..97cebdbd5abc 100644
--- a/src/common/procedure/src/store/state_store.rs
+++ b/src/common/procedure/src/store/state_store.rs
@@ -20,9 +20,7 @@ use futures::{Stream, TryStreamExt};
use object_store::{ObjectMode, ObjectStore};
use snafu::ResultExt;
-use crate::error::{
- DeleteStateSnafu, Error, ListStateSnafu, PutStateSnafu, ReadStateSnafu, Result,
-};
+use crate::error::{DeleteStateSnafu, Error, PutStateSnafu, Result};
/// Key value from state store.
type KeyValue = (String, Vec<u8>);
@@ -72,22 +70,23 @@ impl StateStore for ObjectStateStore {
async fn walk_top_down(&self, path: &str) -> Result<KeyValueStream> {
let path_string = path.to_string();
- let op = self.store.batch();
- // Note that there is no guarantee about the order between files and dirs
- // at the same level.
- // See https://docs.rs/opendal/0.25.2/opendal/raw/struct.TopDownWalker.html#note
- let stream = op
- .walk_top_down(path)
- .context(ListStateSnafu { path })?
- .map_err(move |e| Error::ListState {
+
+ let lister = self
+ .store
+ .object(path)
+ .scan()
+ .await
+ .map_err(|e| Error::ListState {
path: path_string.clone(),
source: e,
- })
+ })?;
+
+ let stream = lister
.try_filter_map(|entry| async move {
let key = entry.path();
- let key_value = match entry.mode().await.context(ReadStateSnafu { key })? {
+ let key_value = match entry.mode().await? {
ObjectMode::FILE => {
- let value = entry.read().await.context(ReadStateSnafu { key })?;
+ let value = entry.read().await?;
Some((key.to_string(), value))
}
@@ -95,6 +94,10 @@ impl StateStore for ObjectStateStore {
};
Ok(key_value)
+ })
+ .map_err(move |e| Error::ListState {
+ path: path_string.clone(),
+ source: e,
});
Ok(Box::pin(stream))
@@ -112,7 +115,8 @@ impl StateStore for ObjectStateStore {
#[cfg(test)]
mod tests {
- use object_store::services::fs::Builder;
+ use object_store::services::Fs as Builder;
+ use object_store::ObjectStoreBuilder;
use tempdir::TempDir;
use super::*;
@@ -122,7 +126,7 @@ mod tests {
let dir = TempDir::new("state_store").unwrap();
let store_dir = dir.path().to_str().unwrap();
let accessor = Builder::default().root(store_dir).build().unwrap();
- let object_store = ObjectStore::new(accessor);
+ let object_store = ObjectStore::new(accessor).finish();
let state_store = ObjectStateStore::new(object_store);
let data: Vec<_> = state_store
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 4caa01b26901..a684dd42c211 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -16,7 +16,6 @@ use std::sync::Arc;
use std::time::Duration;
use std::{fs, path};
-use backon::ExponentialBackoff;
use catalog::remote::MetaKvBackend;
use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
use common_base::readable_size::ReadableSize;
@@ -29,12 +28,10 @@ use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_client::MetaClientOpts;
use mito::config::EngineConfig as TableEngineConfig;
use mito::engine::MitoEngine;
-use object_store::cache_policy::LruCachePolicy;
-use object_store::layers::{CacheLayer, LoggingLayer, MetricsLayer, RetryLayer, TracingLayer};
-use object_store::services::fs::Builder as FsBuilder;
-use object_store::services::oss::Builder as OSSBuilder;
-use object_store::services::s3::Builder as S3Builder;
-use object_store::{util, ObjectStore};
+use object_store::cache_policy::LruCacheLayer;
+use object_store::layers::{LoggingLayer, MetricsLayer, RetryLayer, TracingLayer};
+use object_store::services::{Fs as FsBuilder, Oss as OSSBuilder, S3 as S3Builder};
+use object_store::{util, ObjectStore, ObjectStoreBuilder};
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
use servers::Mode;
use snafu::prelude::*;
@@ -227,7 +224,7 @@ pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result
object_store.map(|object_store| {
object_store
- .layer(RetryLayer::new(ExponentialBackoff::default().with_jitter()))
+ .layer(RetryLayer::new().with_jitter())
.layer(MetricsLayer)
.layer(LoggingLayer::default())
.layer(TracingLayer)
@@ -258,7 +255,7 @@ pub(crate) async fn new_oss_object_store(store_config: &ObjectStoreConfig) -> Re
config: store_config.clone(),
})?;
- create_object_store_with_cache(ObjectStore::new(accessor), store_config)
+ create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
}
fn create_object_store_with_cache(
@@ -285,13 +282,13 @@ fn create_object_store_with_cache(
if let Some(path) = cache_path {
let cache_store =
- ObjectStore::new(FsBuilder::default().root(path).build().with_context(|_| {
- error::InitBackendSnafu {
+ FsBuilder::default()
+ .root(path)
+ .build()
+ .with_context(|_| error::InitBackendSnafu {
config: store_config.clone(),
- }
- })?);
- let policy = LruCachePolicy::new(cache_capacity.0 as usize);
- let cache_layer = CacheLayer::new(cache_store).with_policy(policy);
+ })?;
+ let cache_layer = LruCacheLayer::new(Arc::new(cache_store), cache_capacity.0 as usize);
Ok(object_store.layer(cache_layer))
} else {
Ok(object_store)
@@ -328,7 +325,7 @@ pub(crate) async fn new_s3_object_store(store_config: &ObjectStoreConfig) -> Res
config: store_config.clone(),
})?;
- create_object_store_with_cache(ObjectStore::new(accessor), store_config)
+ create_object_store_with_cache(ObjectStore::new(accessor).finish(), store_config)
}
pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
@@ -351,7 +348,7 @@ pub(crate) async fn new_fs_object_store(store_config: &ObjectStoreConfig) -> Res
config: store_config.clone(),
})?;
- Ok(ObjectStore::new(accessor))
+ Ok(ObjectStore::new(accessor).finish())
}
/// Create metasrv client instance and spawn heartbeat loop.
diff --git a/src/datanode/src/sql.rs b/src/datanode/src/sql.rs
index 34667260d9ac..72b4342dee14 100644
--- a/src/datanode/src/sql.rs
+++ b/src/datanode/src/sql.rs
@@ -147,8 +147,8 @@ mod tests {
use log_store::NoopLogStore;
use mito::config::EngineConfig as TableEngineConfig;
use mito::engine::MitoEngine;
- use object_store::services::fs::Builder;
- use object_store::ObjectStore;
+ use object_store::services::Fs as Builder;
+ use object_store::{ObjectStore, ObjectStoreBuilder};
use query::parser::{QueryLanguageParser, QueryStatement};
use query::QueryEngineFactory;
use session::context::QueryContext;
@@ -213,7 +213,7 @@ mod tests {
let dir = TempDir::new("setup_test_engine_and_table").unwrap();
let store_dir = dir.path().to_string_lossy();
let accessor = Builder::default().root(&store_dir).build().unwrap();
- let object_store = ObjectStore::new(accessor);
+ let object_store = ObjectStore::new(accessor).finish();
let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let sql = r#"insert into demo(host, cpu, memory, ts) values
('host1', 66.6, 1024, 1655276557000),
diff --git a/src/datanode/src/sql/copy_table.rs b/src/datanode/src/sql/copy_table.rs
index 6159e33f010d..8131604111c6 100644
--- a/src/datanode/src/sql/copy_table.rs
+++ b/src/datanode/src/sql/copy_table.rs
@@ -22,8 +22,8 @@ use datafusion::parquet::basic::{Compression, Encoding};
use datafusion::parquet::file::properties::WriterProperties;
use datafusion::physical_plan::RecordBatchStream;
use futures::TryStreamExt;
-use object_store::services::fs::Builder;
-use object_store::ObjectStore;
+use object_store::services::Fs as Builder;
+use object_store::{ObjectStore, ObjectStoreBuilder};
use snafu::ResultExt;
use table::engine::TableReference;
use table::requests::CopyTableRequest;
@@ -53,7 +53,7 @@ impl SqlHandler {
let stream = Box::pin(DfRecordBatchStreamAdapter::new(stream));
let accessor = Builder::default().build().unwrap();
- let object_store = ObjectStore::new(accessor);
+ let object_store = ObjectStore::new(accessor).finish();
let mut parquet_writer = ParquetWriter::new(req.file_name, stream, object_store);
// TODO(jiachun):
diff --git a/src/mito/src/table/test_util.rs b/src/mito/src/table/test_util.rs
index 4a21e520f264..78eba284c313 100644
--- a/src/mito/src/table/test_util.rs
+++ b/src/mito/src/table/test_util.rs
@@ -21,8 +21,8 @@ use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
use datatypes::vectors::VectorRef;
use log_store::NoopLogStore;
-use object_store::services::fs::Builder;
-use object_store::ObjectStore;
+use object_store::services::Fs as Builder;
+use object_store::{ObjectStore, ObjectStoreBuilder};
use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
@@ -99,7 +99,7 @@ pub async fn new_test_object_store(prefix: &str) -> (TempDir, ObjectStore) {
let dir = TempDir::new(prefix).unwrap();
let store_dir = dir.path().to_string_lossy();
let accessor = Builder::default().root(&store_dir).build().unwrap();
- (dir, ObjectStore::new(accessor))
+ (dir, ObjectStore::new(accessor).finish())
}
pub fn new_create_request(schema: SchemaRef) -> CreateTableRequest {
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index f18e1e9f7f82..9390492e0358 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -8,10 +8,7 @@ license.workspace = true
lru = "0.9"
async-trait = "0.1"
futures = { version = "0.3" }
-opendal = { version = "0.25.1", features = [
- "layers-tracing",
- "layers-metrics",
-] }
+opendal = { version = "0.27", features = ["layers-tracing", "layers-metrics"] }
tokio.workspace = true
[dev-dependencies]
diff --git a/src/object-store/src/backend.rs b/src/object-store/src/backend.rs
index c4689d79d814..fb84212dfaa4 100644
--- a/src/object-store/src/backend.rs
+++ b/src/object-store/src/backend.rs
@@ -15,4 +15,5 @@
pub mod azblob;
pub mod fs;
pub mod memory;
+pub mod oss;
pub mod s3;
diff --git a/src/object-store/src/backend/azblob.rs b/src/object-store/src/backend/azblob.rs
index 755c77d60a4e..e4545682a150 100644
--- a/src/object-store/src/backend/azblob.rs
+++ b/src/object-store/src/backend/azblob.rs
@@ -12,4 +12,4 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub use opendal::services::azblob::Builder;
+pub use opendal::services::Azblob as Builder;
diff --git a/src/object-store/src/backend/fs.rs b/src/object-store/src/backend/fs.rs
index bc1cebe5b2e4..20dadc0e46e3 100644
--- a/src/object-store/src/backend/fs.rs
+++ b/src/object-store/src/backend/fs.rs
@@ -12,4 +12,4 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub use opendal::services::fs::Builder;
+pub use opendal::services::Fs as Builder;
diff --git a/src/object-store/src/backend/memory.rs b/src/object-store/src/backend/memory.rs
index 22fd16186a9f..456938bc98f2 100644
--- a/src/object-store/src/backend/memory.rs
+++ b/src/object-store/src/backend/memory.rs
@@ -12,4 +12,4 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub use opendal::services::memory::Builder;
+pub use opendal::services::Memory as Builder;
diff --git a/src/object-store/src/backend/oss.rs b/src/object-store/src/backend/oss.rs
new file mode 100644
index 000000000000..c68791ad9ee6
--- /dev/null
+++ b/src/object-store/src/backend/oss.rs
@@ -0,0 +1,15 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub use opendal::services::Oss as Builder;
diff --git a/src/object-store/src/backend/s3.rs b/src/object-store/src/backend/s3.rs
index 46faa2659d0c..4912b1ebc492 100644
--- a/src/object-store/src/backend/s3.rs
+++ b/src/object-store/src/backend/s3.rs
@@ -12,4 +12,4 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub use opendal::services::s3::Builder;
+pub use opendal::services::S3 as Builder;
diff --git a/src/object-store/src/cache_policy.rs b/src/object-store/src/cache_policy.rs
index 5c60b5ad5040..1ef6004c7e85 100644
--- a/src/object-store/src/cache_policy.rs
+++ b/src/object-store/src/cache_policy.rs
@@ -12,112 +12,177 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::io;
use std::num::NonZeroUsize;
use std::ops::DerefMut;
+use std::pin::Pin;
use std::sync::Arc;
+use std::task::{Context, Poll};
use async_trait::async_trait;
-use futures::future::BoxFuture;
+use futures::AsyncRead;
use lru::LruCache;
-use opendal::layers::CachePolicy;
-use opendal::raw::output::Reader;
-use opendal::raw::{Accessor, RpDelete, RpRead};
-use opendal::{ErrorKind, OpDelete, OpRead, OpWrite, Result};
+use opendal::ops::*;
+use opendal::raw::*;
+use opendal::{ErrorKind, Result};
use tokio::sync::Mutex;
-#[derive(Debug)]
-pub struct LruCachePolicy {
+pub struct LruCacheLayer<C> {
+ cache: Arc<C>,
lru_cache: Arc<Mutex<LruCache<String, ()>>>,
}
-impl LruCachePolicy {
- pub fn new(capacity: usize) -> Self {
+impl<C: Accessor> LruCacheLayer<C> {
+ pub fn new(cache: Arc<C>, capacity: usize) -> Self {
Self {
+ cache,
lru_cache: Arc::new(Mutex::new(LruCache::new(
NonZeroUsize::new(capacity).unwrap(),
))),
}
}
+}
+
+impl<I: Accessor, C: Accessor> Layer<I> for LruCacheLayer<C> {
+ type LayeredAccessor = LruCacheAccessor<I, C>;
+
+ fn layer(&self, inner: I) -> Self::LayeredAccessor {
+ LruCacheAccessor {
+ inner: Arc::new(inner),
+ cache: self.cache.clone(),
+ lru_cache: self.lru_cache.clone(),
+ }
+ }
+}
+#[derive(Debug)]
+pub struct LruCacheAccessor<I, C> {
+ inner: Arc<I>,
+ cache: Arc<C>,
+ lru_cache: Arc<Mutex<LruCache<String, ()>>>,
+}
+
+impl<I, C> LruCacheAccessor<I, C> {
fn cache_path(&self, path: &str, args: &OpRead) -> String {
format!("{}.cache-{}", path, args.range().to_header())
}
}
#[async_trait]
-impl CachePolicy for LruCachePolicy {
- fn on_read(
- &self,
- inner: Arc<dyn Accessor>,
- cache: Arc<dyn Accessor>,
- path: &str,
- args: OpRead,
- ) -> BoxFuture<'static, Result<(RpRead, Reader)>> {
+impl<I: Accessor, C: Accessor> LayeredAccessor for LruCacheAccessor<I, C> {
+ type Inner = I;
+ type Reader = output::Reader;
+ type BlockingReader = I::BlockingReader;
+ type Pager = I::Pager;
+ type BlockingPager = I::BlockingPager;
+
+ fn inner(&self) -> &Self::Inner {
+ &self.inner
+ }
+
+ async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> {
let path = path.to_string();
let cache_path = self.cache_path(&path, &args);
let lru_cache = self.lru_cache.clone();
- Box::pin(async move {
- match cache.read(&cache_path, OpRead::default()).await {
- Ok(v) => {
- // update lru when cache hit
- let mut lru_cache = lru_cache.lock().await;
- lru_cache.get_or_insert(cache_path.clone(), || ());
- Ok(v)
- }
- Err(err) if err.kind() == ErrorKind::ObjectNotFound => {
- let (rp, reader) = inner.read(&path, args.clone()).await?;
- let size = rp.clone().into_metadata().content_length();
- let _ = cache
- .write(&cache_path, OpWrite::new(size), Box::new(reader))
- .await?;
- match cache.read(&cache_path, OpRead::default()).await {
- Ok(v) => {
- let r = {
- // push new cache file name to lru
- let mut lru_cache = lru_cache.lock().await;
- lru_cache.push(cache_path.clone(), ())
- };
- // delete the evicted cache file
- if let Some((k, _v)) = r {
- let _ = cache.delete(&k, OpDelete::new()).await;
- }
- Ok(v)
+
+ match self.cache.read(&cache_path, OpRead::default()).await {
+ Ok((rp, r)) => {
+ // update lru when cache hit
+ let mut lru_cache = lru_cache.lock().await;
+ lru_cache.get_or_insert(cache_path.clone(), || ());
+ Ok(to_output_reader((rp, r)))
+ }
+ Err(err) if err.kind() == ErrorKind::ObjectNotFound => {
+ let (rp, reader) = self.inner.read(&path, args.clone()).await?;
+ let size = rp.clone().into_metadata().content_length();
+ let _ = self
+ .cache
+ .write(
+ &cache_path,
+ OpWrite::new(size),
+ Box::new(ReadWrapper(reader)),
+ )
+ .await?;
+ match self.cache.read(&cache_path, OpRead::default()).await {
+ Ok((rp, reader)) => {
+ let r = {
+ // push new cache file name to lru
+ let mut lru_cache = lru_cache.lock().await;
+ lru_cache.push(cache_path.clone(), ())
+ };
+ // delete the evicted cache file
+ if let Some((k, _v)) = r {
+ let _ = self.cache.delete(&k, OpDelete::new()).await;
}
- Err(_) => inner.read(&path, args).await,
+ return Ok(to_output_reader((rp, reader)));
}
+ Err(_) => return self.inner.read(&path, args).await.map(to_output_reader),
}
- Err(_) => inner.read(&path, args).await,
}
- })
+ Err(_) => return self.inner.read(&path, args).await.map(to_output_reader),
+ }
}
- fn on_delete(
- &self,
- inner: Arc<dyn Accessor>,
- cache: Arc<dyn Accessor>,
- path: &str,
- args: OpDelete,
- ) -> BoxFuture<'static, Result<RpDelete>> {
+ fn blocking_read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::BlockingReader)> {
+ self.inner.blocking_read(path, args)
+ }
+
+ async fn delete(&self, path: &str, args: OpDelete) -> Result<RpDelete> {
let path = path.to_string();
let lru_cache = self.lru_cache.clone();
- Box::pin(async move {
- let cache_files: Vec<String> = {
- let mut guard = lru_cache.lock().await;
- let lru = guard.deref_mut();
- let cache_files = lru
- .iter()
- .filter(|(k, _v)| k.starts_with(format!("{path}.cache-").as_str()))
- .map(|(k, _v)| k.clone())
- .collect::<Vec<_>>();
- for k in &cache_files {
- lru.pop(k);
- }
- cache_files
- };
- for file in cache_files {
- let _ = cache.delete(&file, OpDelete::new()).await;
+
+ let cache_files: Vec<String> = {
+ let mut guard = lru_cache.lock().await;
+ let lru = guard.deref_mut();
+ let cache_files = lru
+ .iter()
+ .filter(|(k, _v)| k.starts_with(format!("{path}.cache-").as_str()))
+ .map(|(k, _v)| k.clone())
+ .collect::<Vec<_>>();
+ for k in &cache_files {
+ lru.pop(k);
}
- inner.delete(&path, args).await
- })
+ cache_files
+ };
+ for file in cache_files {
+ let _ = self.cache.delete(&file, OpDelete::new()).await;
+ }
+ return self.inner.delete(&path, args).await;
+ }
+
+ async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Pager)> {
+ self.inner.list(path, args).await
+ }
+
+ async fn scan(&self, path: &str, args: OpScan) -> Result<(RpScan, Self::Pager)> {
+ self.inner.scan(path, args).await
+ }
+
+ fn blocking_list(&self, path: &str, args: OpList) -> Result<(RpList, Self::BlockingPager)> {
+ self.inner.blocking_list(path, args)
+ }
+
+ fn blocking_scan(&self, path: &str, args: OpScan) -> Result<(RpScan, Self::BlockingPager)> {
+ self.inner.blocking_scan(path, args)
}
}
+
+/// TODO: Workaround for output::Read doesn't implement input::Read
+///
+/// Should be remove after opendal fixed it.
+struct ReadWrapper<R>(R);
+
+impl<R: output::Read> AsyncRead for ReadWrapper<R> {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ self.0.poll_read(cx, buf)
+ }
+}
+
+#[inline]
+fn to_output_reader<R: output::Read + 'static>(input: (RpRead, R)) -> (RpRead, output::Reader) {
+ (input.0, Box::new(input.1))
+}
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index cd2d3b1d798b..32f315ee6313 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -13,8 +13,8 @@
// limitations under the License.
pub use opendal::{
- layers, services, Error, ErrorKind, Layer, Object, ObjectLister, ObjectMetadata, ObjectMode,
- Operator as ObjectStore, Result,
+ layers, services, Builder as ObjectStoreBuilder, Error, ErrorKind, Object, ObjectLister,
+ ObjectMetadata, ObjectMode, Operator as ObjectStore, Result,
};
pub mod backend;
pub mod cache_policy;
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index 60a779122612..046b37cf928d 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -13,15 +13,15 @@
// limitations under the License.
use std::env;
+use std::sync::Arc;
use anyhow::Result;
use common_telemetry::logging;
use object_store::backend::{fs, s3};
-use object_store::cache_policy::LruCachePolicy;
+use object_store::cache_policy::LruCacheLayer;
use object_store::test_util::TempFolder;
-use object_store::{util, Object, ObjectLister, ObjectMode, ObjectStore};
-use opendal::layers::CacheLayer;
-use opendal::services::oss;
+use object_store::{util, Object, ObjectLister, ObjectMode, ObjectStore, ObjectStoreBuilder};
+use opendal::services::Oss;
use opendal::Operator;
use tempdir::TempDir;
@@ -100,7 +100,8 @@ async fn test_fs_backend() -> Result<()> {
.root(&data_dir.path().to_string_lossy())
.atomic_write_dir(&tmp_dir.path().to_string_lossy())
.build()?,
- );
+ )
+ .finish();
test_object_crud(&store).await?;
test_object_list(&store).await?;
@@ -124,7 +125,7 @@ async fn test_s3_backend() -> Result<()> {
.bucket(&bucket)
.build()?;
- let store = ObjectStore::new(accessor);
+ let store = ObjectStore::new(accessor).finish();
let mut guard = TempFolder::new(&store, "/");
test_object_crud(&store).await?;
@@ -145,14 +146,14 @@ async fn test_oss_backend() -> Result<()> {
let root = uuid::Uuid::new_v4().to_string();
- let accessor = oss::Builder::default()
+ let accessor = Oss::default()
.root(&root)
.access_key_id(&env::var("GT_OSS_ACCESS_KEY_ID")?)
.access_key_secret(&env::var("GT_OSS_ACCESS_KEY")?)
.bucket(&bucket)
.build()?;
- let store = ObjectStore::new(accessor);
+ let store = ObjectStore::new(accessor).finish();
let mut guard = TempFolder::new(&store, "/");
test_object_crud(&store).await?;
@@ -204,16 +205,15 @@ async fn test_object_store_cache_policy() -> Result<()> {
// create file cache layer
let cache_dir = TempDir::new("test_fs_cache")?;
- let cache_op = ObjectStore::new(
- fs::Builder::default()
- .root(&cache_dir.path().to_string_lossy())
- .atomic_write_dir(&cache_dir.path().to_string_lossy())
- .build()?,
- );
+ let cache_acc = fs::Builder::default()
+ .root(&cache_dir.path().to_string_lossy())
+ .atomic_write_dir(&cache_dir.path().to_string_lossy())
+ .build()?;
+ let cache_store = ObjectStore::new(cache_acc.clone()).finish();
// create operator for cache dir to verify cache file
- let cache_store = ObjectStore::from(cache_op.inner());
- let policy = LruCachePolicy::new(3);
- let store = store.layer(CacheLayer::new(cache_op).with_policy(policy));
+ let store = store
+ .layer(LruCacheLayer::new(Arc::new(cache_acc), 3))
+ .finish();
// create several object handler.
let o1 = store.object("test_file1");
diff --git a/src/storage/src/compaction/writer.rs b/src/storage/src/compaction/writer.rs
index 61a891ff1a11..43a392469f68 100644
--- a/src/storage/src/compaction/writer.rs
+++ b/src/storage/src/compaction/writer.rs
@@ -92,7 +92,7 @@ mod tests {
TimestampMillisecondVector, TimestampMillisecondVectorBuilder, UInt64VectorBuilder,
};
use object_store::backend::fs::Builder;
- use object_store::ObjectStore;
+ use object_store::{ObjectStore, ObjectStoreBuilder};
use store_api::storage::{ChunkReader, OpType, SequenceNumber};
use tempdir::TempDir;
@@ -273,7 +273,7 @@ mod tests {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
let backend = Builder::default().root(path).build().unwrap();
- let object_store = ObjectStore::new(backend);
+ let object_store = ObjectStore::new(backend).finish();
let seq = AtomicU64::new(0);
let schema = schema_for_test();
@@ -350,7 +350,7 @@ mod tests {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
let backend = Builder::default().root(path).build().unwrap();
- let object_store = ObjectStore::new(backend);
+ let object_store = ObjectStore::new(backend).finish();
let schema = schema_for_test();
let seq = AtomicU64::new(0);
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index b11beed5b27c..068f1078cfde 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -380,6 +380,7 @@ mod tests {
use datatypes::type_id::LogicalTypeId;
use log_store::test_util::log_store_util;
use object_store::backend::fs::Builder;
+ use object_store::ObjectStoreBuilder;
use store_api::storage::Region;
use tempdir::TempDir;
@@ -395,7 +396,7 @@ mod tests {
let store_dir = dir.path().to_string_lossy();
let accessor = Builder::default().root(&store_dir).build().unwrap();
- let object_store = ObjectStore::new(accessor);
+ let object_store = ObjectStore::new(accessor).finish();
let config = EngineConfig::default();
diff --git a/src/storage/src/file_purger.rs b/src/storage/src/file_purger.rs
index f395e379148d..d5bec3c3540c 100644
--- a/src/storage/src/file_purger.rs
+++ b/src/storage/src/file_purger.rs
@@ -107,7 +107,7 @@ pub mod noop {
#[cfg(test)]
mod tests {
use object_store::backend::fs::Builder;
- use object_store::ObjectStore;
+ use object_store::{ObjectStore, ObjectStoreBuilder};
use store_api::storage::OpType;
use tempdir::TempDir;
@@ -172,7 +172,8 @@ mod tests {
.root(dir.path().to_str().unwrap())
.build()
.unwrap(),
- );
+ )
+ .finish();
let sst_file_name = "test-file-purge-handler.parquet";
let noop_file_purger = Arc::new(LocalScheduler::new(
@@ -209,7 +210,8 @@ mod tests {
.root(dir.path().to_str().unwrap())
.build()
.unwrap(),
- );
+ )
+ .finish();
let sst_file_name = "test-file-purger.parquet";
let scheduler = Arc::new(LocalScheduler::new(
SchedulerConfig::default(),
diff --git a/src/storage/src/manifest/region.rs b/src/storage/src/manifest/region.rs
index 5831ac6f2f4f..97def6f40a33 100644
--- a/src/storage/src/manifest/region.rs
+++ b/src/storage/src/manifest/region.rs
@@ -23,7 +23,7 @@ mod tests {
use std::sync::Arc;
use object_store::backend::fs;
- use object_store::ObjectStore;
+ use object_store::{ObjectStore, ObjectStoreBuilder};
use store_api::manifest::action::ProtocolAction;
use store_api::manifest::{Manifest, MetaActionIterator, MAX_VERSION};
use tempdir::TempDir;
@@ -41,7 +41,8 @@ mod tests {
.root(&tmp_dir.path().to_string_lossy())
.build()
.unwrap(),
- );
+ )
+ .finish();
let manifest = RegionManifest::new("/manifest/", object_store);
diff --git a/src/storage/src/manifest/storage.rs b/src/storage/src/manifest/storage.rs
index 04634bcd574f..8cf498ff3f5e 100644
--- a/src/storage/src/manifest/storage.rs
+++ b/src/storage/src/manifest/storage.rs
@@ -278,7 +278,7 @@ impl ManifestLogStorage for ManifestObjectStore {
#[cfg(test)]
mod tests {
use object_store::backend::fs;
- use object_store::ObjectStore;
+ use object_store::{ObjectStore, ObjectStoreBuilder};
use tempdir::TempDir;
use super::*;
@@ -292,7 +292,8 @@ mod tests {
.root(&tmp_dir.path().to_string_lossy())
.build()
.unwrap(),
- );
+ )
+ .finish();
let log_store = ManifestObjectStore::new("/", object_store);
diff --git a/src/storage/src/region/tests.rs b/src/storage/src/region/tests.rs
index a48fac33e88f..c6d8083d2be9 100644
--- a/src/storage/src/region/tests.rs
+++ b/src/storage/src/region/tests.rs
@@ -30,7 +30,7 @@ use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
use log_store::raft_engine::log_store::RaftEngineLogStore;
use log_store::NoopLogStore;
use object_store::backend::fs;
-use object_store::ObjectStore;
+use object_store::{ObjectStore, ObjectStoreBuilder};
use store_api::storage::{
consts, Chunk, ChunkReader, RegionMeta, ScanRequest, SequenceNumber, Snapshot, WriteRequest,
};
@@ -286,7 +286,8 @@ async fn test_recover_region_manifets() {
.root(&tmp_dir.path().to_string_lossy())
.build()
.unwrap(),
- );
+ )
+ .finish();
let manifest = RegionManifest::new("/manifest/", object_store.clone());
let region_meta = Arc::new(build_region_meta());
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index 0d6effa6538f..f79c5558fb2f 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -524,6 +524,7 @@ mod tests {
use datatypes::types::{TimestampMillisecondType, TimestampType};
use datatypes::vectors::TimestampMillisecondVector;
use object_store::backend::fs::Builder;
+ use object_store::ObjectStoreBuilder;
use store_api::storage::OpType;
use tempdir::TempDir;
@@ -563,7 +564,7 @@ mod tests {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
let backend = Builder::default().root(path).build().unwrap();
- let object_store = ObjectStore::new(backend);
+ let object_store = ObjectStore::new(backend).finish();
let sst_file_name = "test-flush.parquet";
let iter = memtable.iter(&IterContext::default()).unwrap();
let writer = ParquetWriter::new(sst_file_name, Source::Iter(iter), object_store.clone());
@@ -661,7 +662,7 @@ mod tests {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
let backend = Builder::default().root(path).build().unwrap();
- let object_store = ObjectStore::new(backend);
+ let object_store = ObjectStore::new(backend).finish();
let sst_file_name = "test-read-large.parquet";
let iter = memtable.iter(&IterContext::default()).unwrap();
let writer = ParquetWriter::new(sst_file_name, Source::Iter(iter), object_store.clone());
@@ -683,7 +684,8 @@ mod tests {
.root(dir.path().to_str().unwrap())
.build()
.unwrap(),
- );
+ )
+ .finish();
let projected_schema = Arc::new(ProjectedSchema::new(schema, Some(vec![1])).unwrap());
let reader = ParquetReader::new(
@@ -733,7 +735,7 @@ mod tests {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
let backend = Builder::default().root(path).build().unwrap();
- let object_store = ObjectStore::new(backend);
+ let object_store = ObjectStore::new(backend).finish();
let sst_file_name = "test-read.parquet";
let iter = memtable.iter(&IterContext::default()).unwrap();
let writer = ParquetWriter::new(sst_file_name, Source::Iter(iter), object_store.clone());
@@ -755,7 +757,8 @@ mod tests {
.root(dir.path().to_str().unwrap())
.build()
.unwrap(),
- );
+ )
+ .finish();
let projected_schema = Arc::new(ProjectedSchema::new(schema, Some(vec![1])).unwrap());
let reader = ParquetReader::new(
@@ -845,7 +848,7 @@ mod tests {
let dir = TempDir::new("read-parquet-by-range").unwrap();
let path = dir.path().to_str().unwrap();
let backend = Builder::default().root(path).build().unwrap();
- let object_store = ObjectStore::new(backend);
+ let object_store = ObjectStore::new(backend).finish();
let sst_file_name = "test-read.parquet";
let iter = memtable.iter(&IterContext::default()).unwrap();
let writer = ParquetWriter::new(sst_file_name, Source::Iter(iter), object_store.clone());
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
index 31459cecee95..dcf71515180e 100644
--- a/src/storage/src/test_util/config_util.rs
+++ b/src/storage/src/test_util/config_util.rs
@@ -16,8 +16,8 @@ use std::sync::Arc;
use log_store::raft_engine::log_store::RaftEngineLogStore;
use log_store::LogConfig;
-use object_store::backend::fs::Builder;
-use object_store::ObjectStore;
+use object_store::services::Fs as Builder;
+use object_store::{ObjectStore, ObjectStoreBuilder};
use crate::background::JobPoolImpl;
use crate::compaction::noop::NoopCompactionScheduler;
@@ -44,7 +44,7 @@ pub async fn new_store_config(
let manifest_dir = engine::region_manifest_dir(parent_dir, region_name);
let accessor = Builder::default().root(store_dir).build().unwrap();
- let object_store = ObjectStore::new(accessor);
+ let object_store = ObjectStore::new(accessor).finish();
let sst_layer = Arc::new(FsAccessLayer::new(&sst_dir, object_store.clone()));
let manifest = RegionManifest::new(&manifest_dir, object_store);
let job_pool = Arc::new(JobPoolImpl {});
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 517050e1eb50..e9e15890289a 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -31,10 +31,9 @@ use datanode::sql::SqlHandler;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema};
use frontend::instance::Instance as FeInstance;
-use object_store::backend::s3;
-use object_store::services::oss;
+use object_store::backend::{oss, s3};
use object_store::test_util::TempFolder;
-use object_store::ObjectStore;
+use object_store::{ObjectStore, ObjectStoreBuilder};
use once_cell::sync::OnceCell;
use rand::Rng;
use servers::grpc::GrpcServer;
@@ -116,7 +115,7 @@ fn get_test_store_config(
let config = ObjectStoreConfig::Oss(oss_config);
- let store = ObjectStore::new(accessor);
+ let store = ObjectStore::new(accessor).finish();
(
config,
@@ -145,7 +144,7 @@ fn get_test_store_config(
let config = ObjectStoreConfig::S3(s3_config);
- let store = ObjectStore::new(accessor);
+ let store = ObjectStore::new(accessor).finish();
(config, Some(TempDirGuard::S3(TempFolder::new(&store, "/"))))
}
|
chore
|
Bump OpenDAL to v0.27 (#1057)
|
fdd17c6eeb9ff0afc33a0212f76518634e37ffea
|
2023-02-23 11:42:34
|
Xuanwo
|
refactor: Clean up re-export of opendal services (#1067)
| false
|
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index cd55446d28d2..88a722ed3e92 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -482,7 +482,7 @@ mod tests {
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
let dir = TempDir::new("system-table-test").unwrap();
let store_dir = dir.path().to_string_lossy();
- let accessor = object_store::backend::fs::Builder::default()
+ let accessor = object_store::services::Fs::default()
.root(&store_dir)
.build()
.unwrap();
diff --git a/src/object-store/src/backend.rs b/src/object-store/src/backend.rs
deleted file mode 100644
index fb84212dfaa4..000000000000
--- a/src/object-store/src/backend.rs
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pub mod azblob;
-pub mod fs;
-pub mod memory;
-pub mod oss;
-pub mod s3;
diff --git a/src/object-store/src/backend/azblob.rs b/src/object-store/src/backend/azblob.rs
deleted file mode 100644
index e4545682a150..000000000000
--- a/src/object-store/src/backend/azblob.rs
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pub use opendal::services::Azblob as Builder;
diff --git a/src/object-store/src/backend/fs.rs b/src/object-store/src/backend/fs.rs
deleted file mode 100644
index 20dadc0e46e3..000000000000
--- a/src/object-store/src/backend/fs.rs
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pub use opendal::services::Fs as Builder;
diff --git a/src/object-store/src/backend/memory.rs b/src/object-store/src/backend/memory.rs
deleted file mode 100644
index 456938bc98f2..000000000000
--- a/src/object-store/src/backend/memory.rs
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pub use opendal::services::Memory as Builder;
diff --git a/src/object-store/src/backend/oss.rs b/src/object-store/src/backend/oss.rs
deleted file mode 100644
index c68791ad9ee6..000000000000
--- a/src/object-store/src/backend/oss.rs
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pub use opendal::services::Oss as Builder;
diff --git a/src/object-store/src/backend/s3.rs b/src/object-store/src/backend/s3.rs
deleted file mode 100644
index 4912b1ebc492..000000000000
--- a/src/object-store/src/backend/s3.rs
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pub use opendal::services::S3 as Builder;
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index 32f315ee6313..78d475be3a1d 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -16,7 +16,6 @@ pub use opendal::{
layers, services, Builder as ObjectStoreBuilder, Error, ErrorKind, Object, ObjectLister,
ObjectMetadata, ObjectMode, Operator as ObjectStore, Result,
};
-pub mod backend;
pub mod cache_policy;
pub mod test_util;
pub mod util;
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index 046b37cf928d..81d4dd5ec642 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -17,8 +17,8 @@ use std::sync::Arc;
use anyhow::Result;
use common_telemetry::logging;
-use object_store::backend::{fs, s3};
use object_store::cache_policy::LruCacheLayer;
+use object_store::services::{Fs, S3};
use object_store::test_util::TempFolder;
use object_store::{util, Object, ObjectLister, ObjectMode, ObjectStore, ObjectStoreBuilder};
use opendal::services::Oss;
@@ -96,7 +96,7 @@ async fn test_fs_backend() -> Result<()> {
let data_dir = TempDir::new("test_fs_backend")?;
let tmp_dir = TempDir::new("test_fs_backend")?;
let store = ObjectStore::new(
- fs::Builder::default()
+ Fs::default()
.root(&data_dir.path().to_string_lossy())
.atomic_write_dir(&tmp_dir.path().to_string_lossy())
.build()?,
@@ -118,7 +118,7 @@ async fn test_s3_backend() -> Result<()> {
let root = uuid::Uuid::new_v4().to_string();
- let accessor = s3::Builder::default()
+ let accessor = S3::default()
.root(&root)
.access_key_id(&env::var("GT_S3_ACCESS_KEY_ID")?)
.secret_access_key(&env::var("GT_S3_ACCESS_KEY")?)
@@ -197,7 +197,7 @@ async fn test_object_store_cache_policy() -> Result<()> {
// create file storage
let root_dir = TempDir::new("test_fs_backend")?;
let store = ObjectStore::new(
- fs::Builder::default()
+ Fs::default()
.root(&root_dir.path().to_string_lossy())
.atomic_write_dir(&root_dir.path().to_string_lossy())
.build()?,
@@ -205,7 +205,7 @@ async fn test_object_store_cache_policy() -> Result<()> {
// create file cache layer
let cache_dir = TempDir::new("test_fs_cache")?;
- let cache_acc = fs::Builder::default()
+ let cache_acc = Fs::default()
.root(&cache_dir.path().to_string_lossy())
.atomic_write_dir(&cache_dir.path().to_string_lossy())
.build()?;
diff --git a/src/storage/src/compaction/writer.rs b/src/storage/src/compaction/writer.rs
index 43a392469f68..b8fa8cb80470 100644
--- a/src/storage/src/compaction/writer.rs
+++ b/src/storage/src/compaction/writer.rs
@@ -91,7 +91,7 @@ mod tests {
use datatypes::vectors::{
TimestampMillisecondVector, TimestampMillisecondVectorBuilder, UInt64VectorBuilder,
};
- use object_store::backend::fs::Builder;
+ use object_store::services::Fs;
use object_store::{ObjectStore, ObjectStoreBuilder};
use store_api::storage::{ChunkReader, OpType, SequenceNumber};
use tempdir::TempDir;
@@ -272,7 +272,7 @@ mod tests {
async fn test_sst_reader() {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
- let backend = Builder::default().root(path).build().unwrap();
+ let backend = Fs::default().root(path).build().unwrap();
let object_store = ObjectStore::new(backend).finish();
let seq = AtomicU64::new(0);
@@ -349,7 +349,7 @@ mod tests {
async fn test_sst_split() {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
- let backend = Builder::default().root(path).build().unwrap();
+ let backend = Fs::default().root(path).build().unwrap();
let object_store = ObjectStore::new(backend).finish();
let schema = schema_for_test();
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index 068f1078cfde..5122c8e91873 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -379,7 +379,7 @@ impl<S: LogStore> EngineInner<S> {
mod tests {
use datatypes::type_id::LogicalTypeId;
use log_store::test_util::log_store_util;
- use object_store::backend::fs::Builder;
+ use object_store::services::Fs;
use object_store::ObjectStoreBuilder;
use store_api::storage::Region;
use tempdir::TempDir;
@@ -395,7 +395,7 @@ mod tests {
let dir = TempDir::new("test_create_new_region").unwrap();
let store_dir = dir.path().to_string_lossy();
- let accessor = Builder::default().root(&store_dir).build().unwrap();
+ let accessor = Fs::default().root(&store_dir).build().unwrap();
let object_store = ObjectStore::new(accessor).finish();
let config = EngineConfig::default();
diff --git a/src/storage/src/file_purger.rs b/src/storage/src/file_purger.rs
index d5bec3c3540c..78334396703a 100644
--- a/src/storage/src/file_purger.rs
+++ b/src/storage/src/file_purger.rs
@@ -106,7 +106,7 @@ pub mod noop {
#[cfg(test)]
mod tests {
- use object_store::backend::fs::Builder;
+ use object_store::services::Fs;
use object_store::{ObjectStore, ObjectStoreBuilder};
use store_api::storage::OpType;
use tempdir::TempDir;
@@ -168,7 +168,7 @@ mod tests {
async fn test_file_purger_handler() {
let dir = TempDir::new("file-purge").unwrap();
let object_store = ObjectStore::new(
- Builder::default()
+ Fs::default()
.root(dir.path().to_str().unwrap())
.build()
.unwrap(),
@@ -206,7 +206,7 @@ mod tests {
common_telemetry::init_default_ut_logging();
let dir = TempDir::new("file-purge").unwrap();
let object_store = ObjectStore::new(
- Builder::default()
+ Fs::default()
.root(dir.path().to_str().unwrap())
.build()
.unwrap(),
diff --git a/src/storage/src/manifest/region.rs b/src/storage/src/manifest/region.rs
index 97def6f40a33..e6e5678c2de5 100644
--- a/src/storage/src/manifest/region.rs
+++ b/src/storage/src/manifest/region.rs
@@ -22,7 +22,7 @@ pub type RegionManifest = ManifestImpl<RegionMetaActionList>;
mod tests {
use std::sync::Arc;
- use object_store::backend::fs;
+ use object_store::services::Fs;
use object_store::{ObjectStore, ObjectStoreBuilder};
use store_api::manifest::action::ProtocolAction;
use store_api::manifest::{Manifest, MetaActionIterator, MAX_VERSION};
@@ -37,7 +37,7 @@ mod tests {
common_telemetry::init_default_ut_logging();
let tmp_dir = TempDir::new("test_region_manifest").unwrap();
let object_store = ObjectStore::new(
- fs::Builder::default()
+ Fs::default()
.root(&tmp_dir.path().to_string_lossy())
.build()
.unwrap(),
diff --git a/src/storage/src/manifest/storage.rs b/src/storage/src/manifest/storage.rs
index 8cf498ff3f5e..6061dba6173f 100644
--- a/src/storage/src/manifest/storage.rs
+++ b/src/storage/src/manifest/storage.rs
@@ -277,7 +277,7 @@ impl ManifestLogStorage for ManifestObjectStore {
#[cfg(test)]
mod tests {
- use object_store::backend::fs;
+ use object_store::services::Fs;
use object_store::{ObjectStore, ObjectStoreBuilder};
use tempdir::TempDir;
@@ -288,7 +288,7 @@ mod tests {
common_telemetry::init_default_ut_logging();
let tmp_dir = TempDir::new("test_manifest_log_store").unwrap();
let object_store = ObjectStore::new(
- fs::Builder::default()
+ Fs::default()
.root(&tmp_dir.path().to_string_lossy())
.build()
.unwrap(),
diff --git a/src/storage/src/region/tests.rs b/src/storage/src/region/tests.rs
index c6d8083d2be9..f99210414753 100644
--- a/src/storage/src/region/tests.rs
+++ b/src/storage/src/region/tests.rs
@@ -29,7 +29,7 @@ use datatypes::type_id::LogicalTypeId;
use datatypes::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
use log_store::raft_engine::log_store::RaftEngineLogStore;
use log_store::NoopLogStore;
-use object_store::backend::fs;
+use object_store::services::Fs;
use object_store::{ObjectStore, ObjectStoreBuilder};
use store_api::storage::{
consts, Chunk, ChunkReader, RegionMeta, ScanRequest, SequenceNumber, Snapshot, WriteRequest,
@@ -282,7 +282,7 @@ async fn test_recover_region_manifets() {
let memtable_builder = Arc::new(DefaultMemtableBuilder::default()) as _;
let object_store = ObjectStore::new(
- fs::Builder::default()
+ Fs::default()
.root(&tmp_dir.path().to_string_lossy())
.build()
.unwrap(),
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index f79c5558fb2f..556a83ff3a8f 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -523,7 +523,7 @@ mod tests {
use datatypes::prelude::{ScalarVector, Vector};
use datatypes::types::{TimestampMillisecondType, TimestampType};
use datatypes::vectors::TimestampMillisecondVector;
- use object_store::backend::fs::Builder;
+ use object_store::services::Fs;
use object_store::ObjectStoreBuilder;
use store_api::storage::OpType;
use tempdir::TempDir;
@@ -563,7 +563,7 @@ mod tests {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
- let backend = Builder::default().root(path).build().unwrap();
+ let backend = Fs::default().root(path).build().unwrap();
let object_store = ObjectStore::new(backend).finish();
let sst_file_name = "test-flush.parquet";
let iter = memtable.iter(&IterContext::default()).unwrap();
@@ -661,7 +661,7 @@ mod tests {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
- let backend = Builder::default().root(path).build().unwrap();
+ let backend = Fs::default().root(path).build().unwrap();
let object_store = ObjectStore::new(backend).finish();
let sst_file_name = "test-read-large.parquet";
let iter = memtable.iter(&IterContext::default()).unwrap();
@@ -680,7 +680,7 @@ mod tests {
time_range
);
let operator = ObjectStore::new(
- object_store::backend::fs::Builder::default()
+ Fs::default()
.root(dir.path().to_str().unwrap())
.build()
.unwrap(),
@@ -734,7 +734,7 @@ mod tests {
let dir = TempDir::new("write_parquet").unwrap();
let path = dir.path().to_str().unwrap();
- let backend = Builder::default().root(path).build().unwrap();
+ let backend = Fs::default().root(path).build().unwrap();
let object_store = ObjectStore::new(backend).finish();
let sst_file_name = "test-read.parquet";
let iter = memtable.iter(&IterContext::default()).unwrap();
@@ -753,7 +753,7 @@ mod tests {
time_range
);
let operator = ObjectStore::new(
- object_store::backend::fs::Builder::default()
+ Fs::default()
.root(dir.path().to_str().unwrap())
.build()
.unwrap(),
@@ -847,7 +847,7 @@ mod tests {
let dir = TempDir::new("read-parquet-by-range").unwrap();
let path = dir.path().to_str().unwrap();
- let backend = Builder::default().root(path).build().unwrap();
+ let backend = Fs::default().root(path).build().unwrap();
let object_store = ObjectStore::new(backend).finish();
let sst_file_name = "test-read.parquet";
let iter = memtable.iter(&IterContext::default()).unwrap();
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index e9e15890289a..11ddb3b7b05d 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -31,7 +31,7 @@ use datanode::sql::SqlHandler;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, RawSchema};
use frontend::instance::Instance as FeInstance;
-use object_store::backend::{oss, s3};
+use object_store::services::{Oss, S3};
use object_store::test_util::TempFolder;
use object_store::{ObjectStore, ObjectStoreBuilder};
use once_cell::sync::OnceCell;
@@ -104,7 +104,7 @@ fn get_test_store_config(
cache_capacity: None,
};
- let accessor = oss::Builder::default()
+ let accessor = Oss::default()
.root(&oss_config.root)
.endpoint(&oss_config.endpoint)
.access_key_id(&oss_config.access_key_id)
@@ -134,7 +134,7 @@ fn get_test_store_config(
cache_capacity: None,
};
- let accessor = s3::Builder::default()
+ let accessor = S3::default()
.root(&s3_config.root)
.access_key_id(&s3_config.access_key_id)
.secret_access_key(&s3_config.secret_access_key)
|
refactor
|
Clean up re-export of opendal services (#1067)
|
d2e081c1f9da11584e6a5d7dbb8532092dfe8ae2
|
2024-04-16 12:56:20
|
Yingwen
|
docs: update memtable config example (#3712)
| false
|
diff --git a/config/config.md b/config/config.md
index 50c3f967b23b..54d09c247763 100644
--- a/config/config.md
+++ b/config/config.md
@@ -109,10 +109,10 @@
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
| `region_engine.mito.memtable` | -- | -- | -- |
-| `region_engine.mito.memtable.type` | String | `partition_tree` | Memtable type.<br/>- `partition_tree`: partition tree memtable<br/>- `time_series`: time-series memtable (deprecated) |
-| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard. |
-| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard. |
-| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes. |
+| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
+| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
+| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
+| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
@@ -354,10 +354,10 @@
| `region_engine.mito.inverted_index.mem_threshold_on_create` | String | `64M` | Memory threshold for performing an external sort during index creation.<br/>Setting to empty will disable external sorting, forcing all sorting operations to happen in memory. |
| `region_engine.mito.inverted_index.intermediate_path` | String | `""` | File system path to store intermediate files for external sorting (default `{data_home}/index_intermediate`). |
| `region_engine.mito.memtable` | -- | -- | -- |
-| `region_engine.mito.memtable.type` | String | `partition_tree` | Memtable type.<br/>- `partition_tree`: partition tree memtable<br/>- `time_series`: time-series memtable (deprecated) |
-| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard. |
-| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard. |
-| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes. |
+| `region_engine.mito.memtable.type` | String | `time_series` | Memtable type.<br/>- `time_series`: time-series memtable<br/>- `partition_tree`: partition tree memtable (experimental) |
+| `region_engine.mito.memtable.index_max_keys_per_shard` | Integer | `8192` | The max number of keys in one shard.<br/>Only available for `partition_tree` memtable. |
+| `region_engine.mito.memtable.data_freeze_threshold` | Integer | `32768` | The max rows of data inside the actively writing buffer in one shard.<br/>Only available for `partition_tree` memtable. |
+| `region_engine.mito.memtable.fork_dictionary_bytes` | String | `1GiB` | Max dictionary bytes.<br/>Only available for `partition_tree` memtable. |
| `logging` | -- | -- | The logging options. |
| `logging.dir` | String | `/tmp/greptimedb/logs` | The directory to store the log files. |
| `logging.level` | String | `None` | The log level. Can be `info`/`debug`/`warn`/`error`. |
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 498b7282411a..58a2d8d23191 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -366,17 +366,20 @@ intermediate_path = ""
[region_engine.mito.memtable]
## Memtable type.
-## - `partition_tree`: partition tree memtable
-## - `time_series`: time-series memtable (deprecated)
-type = "partition_tree"
+## - `time_series`: time-series memtable
+## - `partition_tree`: partition tree memtable (experimental)
+type = "time_series"
## The max number of keys in one shard.
+## Only available for `partition_tree` memtable.
index_max_keys_per_shard = 8192
## The max rows of data inside the actively writing buffer in one shard.
+## Only available for `partition_tree` memtable.
data_freeze_threshold = 32768
## Max dictionary bytes.
+## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
## The logging options.
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 189e679e8624..55fc42f4b4f0 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -413,17 +413,20 @@ intermediate_path = ""
[region_engine.mito.memtable]
## Memtable type.
-## - `partition_tree`: partition tree memtable
-## - `time_series`: time-series memtable (deprecated)
-type = "partition_tree"
+## - `time_series`: time-series memtable
+## - `partition_tree`: partition tree memtable (experimental)
+type = "time_series"
## The max number of keys in one shard.
+## Only available for `partition_tree` memtable.
index_max_keys_per_shard = 8192
## The max rows of data inside the actively writing buffer in one shard.
+## Only available for `partition_tree` memtable.
data_freeze_threshold = 32768
## Max dictionary bytes.
+## Only available for `partition_tree` memtable.
fork_dictionary_bytes = "1GiB"
## The logging options.
|
docs
|
update memtable config example (#3712)
|
fb6153f7e0aee60bf9940e2f9811c73b687684d9
|
2022-09-15 16:02:55
|
LFC
|
feat: a new type for supplying `Ord` to `Primitive` (#255)
| false
|
diff --git a/src/common/function/src/scalars/aggregate/median.rs b/src/common/function/src/scalars/aggregate/median.rs
index ef2e1bf3f240..7cd3601a5c3d 100644
--- a/src/common/function/src/scalars/aggregate/median.rs
+++ b/src/common/function/src/scalars/aggregate/median.rs
@@ -9,9 +9,10 @@ use common_query::error::{
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
use datatypes::prelude::*;
+use datatypes::types::OrdPrimitive;
use datatypes::value::ListValue;
use datatypes::vectors::{ConstantVector, ListVector};
-use datatypes::with_match_ordered_primitive_type_id;
+use datatypes::with_match_primitive_type_id;
use num::NumCast;
use snafu::{ensure, OptionExt, ResultExt};
@@ -36,17 +37,19 @@ use snafu::{ensure, OptionExt, ResultExt};
#[derive(Debug, Default)]
pub struct Median<T>
where
- T: Primitive + Ord,
+ T: Primitive,
{
- greater: BinaryHeap<Reverse<T>>,
- not_greater: BinaryHeap<T>,
+ greater: BinaryHeap<Reverse<OrdPrimitive<T>>>,
+ not_greater: BinaryHeap<OrdPrimitive<T>>,
}
impl<T> Median<T>
where
- T: Primitive + Ord,
+ T: Primitive,
{
fn push(&mut self, value: T) {
+ let value = OrdPrimitive::<T>(value);
+
if self.not_greater.is_empty() {
self.not_greater.push(value);
return;
@@ -70,7 +73,7 @@ where
// to use them.
impl<T> Accumulator for Median<T>
where
- T: Primitive + Ord,
+ T: Primitive,
for<'a> T: Scalar<RefType<'a> = T>,
{
// This function serializes our state to `ScalarValue`, which DataFusion uses to pass this
@@ -165,8 +168,8 @@ where
let greater = self.greater.peek().unwrap();
// the following three NumCast's `unwrap`s are safe because T is primitive
- let not_greater_v: f64 = NumCast::from(not_greater).unwrap();
- let greater_v: f64 = NumCast::from(greater.0).unwrap();
+ let not_greater_v: f64 = NumCast::from(not_greater.as_primitive()).unwrap();
+ let greater_v: f64 = NumCast::from(greater.0.as_primitive()).unwrap();
let median: T = NumCast::from((not_greater_v + greater_v) / 2.0).unwrap();
median.into()
};
@@ -182,7 +185,7 @@ impl AggregateFunctionCreator for MedianAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
let input_type = &types[0];
- with_match_ordered_primitive_type_id!(
+ with_match_primitive_type_id!(
input_type.logical_type_id(),
|$S| {
Ok(Box::new(Median::<$S>::default()))
diff --git a/src/common/function/src/scalars/aggregate/percentile.rs b/src/common/function/src/scalars/aggregate/percentile.rs
index 3b79a4bdf1f8..fbf497edcc92 100644
--- a/src/common/function/src/scalars/aggregate/percentile.rs
+++ b/src/common/function/src/scalars/aggregate/percentile.rs
@@ -10,9 +10,10 @@ use common_query::error::{
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
use datatypes::prelude::*;
+use datatypes::types::OrdPrimitive;
use datatypes::value::{ListValue, OrderedFloat};
use datatypes::vectors::{ConstantVector, Float64Vector, ListVector};
-use datatypes::with_match_ordered_primitive_type_id;
+use datatypes::with_match_primitive_type_id;
use num::NumCast;
use snafu::{ensure, OptionExt, ResultExt};
@@ -37,19 +38,21 @@ use snafu::{ensure, OptionExt, ResultExt};
#[derive(Debug, Default)]
pub struct Percentile<T>
where
- T: Primitive + Ord,
+ T: Primitive,
{
- greater: BinaryHeap<Reverse<T>>,
- not_greater: BinaryHeap<T>,
+ greater: BinaryHeap<Reverse<OrdPrimitive<T>>>,
+ not_greater: BinaryHeap<OrdPrimitive<T>>,
n: u64,
p: Option<f64>,
}
impl<T> Percentile<T>
where
- T: Primitive + Ord,
+ T: Primitive,
{
fn push(&mut self, value: T) {
+ let value = OrdPrimitive::<T>(value);
+
self.n += 1;
if self.not_greater.is_empty() {
self.not_greater.push(value);
@@ -76,7 +79,7 @@ where
impl<T> Accumulator for Percentile<T>
where
- T: Primitive + Ord,
+ T: Primitive,
for<'a> T: Scalar<RefType<'a> = T>,
{
fn state(&self) -> Result<Vec<Value>> {
@@ -212,7 +215,7 @@ where
if not_greater.is_none() {
return Ok(Value::Null);
}
- let not_greater = *self.not_greater.peek().unwrap();
+ let not_greater = (*self.not_greater.peek().unwrap()).as_primitive();
let percentile = if self.greater.is_empty() {
NumCast::from(not_greater).unwrap()
} else {
@@ -224,7 +227,7 @@ where
};
let fract = (((self.n - 1) as f64) * p / 100_f64).fract();
let not_greater_v: f64 = NumCast::from(not_greater).unwrap();
- let greater_v: f64 = NumCast::from(greater.0).unwrap();
+ let greater_v: f64 = NumCast::from(greater.0.as_primitive()).unwrap();
not_greater_v * (1.0 - fract) + greater_v * fract
};
Ok(Value::from(percentile))
@@ -239,7 +242,7 @@ impl AggregateFunctionCreator for PercentileAccumulatorCreator {
fn creator(&self) -> AccumulatorCreatorFunction {
let creator: AccumulatorCreatorFunction = Arc::new(move |types: &[ConcreteDataType]| {
let input_type = &types[0];
- with_match_ordered_primitive_type_id!(
+ with_match_primitive_type_id!(
input_type.logical_type_id(),
|$S| {
Ok(Box::new(Percentile::<$S>::default()))
diff --git a/src/datatypes/src/macros.rs b/src/datatypes/src/macros.rs
index da385cd4ced2..fb94195e3825 100644
--- a/src/datatypes/src/macros.rs
+++ b/src/datatypes/src/macros.rs
@@ -38,23 +38,6 @@ macro_rules! for_all_primitive_types {
};
}
-#[macro_export]
-macro_rules! for_all_ordered_primitive_types {
- ($macro:tt $(, $x:tt)*) => {
- $macro! {
- [$($x),*],
- { i8 },
- { i16 },
- { i32 },
- { i64 },
- { u8 },
- { u16 },
- { u32 },
- { u64 }
- }
- };
-}
-
#[macro_export]
macro_rules! with_match_primitive_type_id {
($key_type:expr, | $_:tt $T:ident | $body:tt, $nbody:tt) => {{
@@ -81,27 +64,3 @@ macro_rules! with_match_primitive_type_id {
}
}};
}
-
-#[macro_export]
-macro_rules! with_match_ordered_primitive_type_id {
- ($key_type:expr, | $_:tt $T:ident | $body:tt, $nbody:tt) => {{
- macro_rules! __with_ty__ {
- ( $_ $T:ident ) => {
- $body
- };
- }
-
- match $key_type {
- LogicalTypeId::Int8 => __with_ty__! { i8 },
- LogicalTypeId::Int16 => __with_ty__! { i16 },
- LogicalTypeId::Int32 => __with_ty__! { i32 },
- LogicalTypeId::Int64 => __with_ty__! { i64 },
- LogicalTypeId::UInt8 => __with_ty__! { u8 },
- LogicalTypeId::UInt16 => __with_ty__! { u16 },
- LogicalTypeId::UInt32 => __with_ty__! { u32 },
- LogicalTypeId::UInt64 => __with_ty__! { u64 },
-
- _ => $nbody,
- }
- }};
-}
diff --git a/src/datatypes/src/types.rs b/src/datatypes/src/types.rs
index a3bfda8aa5bd..3bb5bfdb93bf 100644
--- a/src/datatypes/src/types.rs
+++ b/src/datatypes/src/types.rs
@@ -15,7 +15,7 @@ pub use date::DateType;
pub use datetime::DateTimeType;
pub use list_type::ListType;
pub use null_type::NullType;
-pub use primitive_traits::Primitive;
+pub use primitive_traits::{OrdPrimitive, Primitive};
pub use primitive_type::{
Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, PrimitiveElement,
PrimitiveType, UInt16Type, UInt32Type, UInt64Type, UInt8Type,
diff --git a/src/datatypes/src/types/primitive_traits.rs b/src/datatypes/src/types/primitive_traits.rs
index be031fe1caf1..941d85734904 100644
--- a/src/datatypes/src/types/primitive_traits.rs
+++ b/src/datatypes/src/types/primitive_traits.rs
@@ -1,3 +1,5 @@
+use std::cmp::Ordering;
+
use arrow::compute::arithmetics::basic::NativeArithmetics;
use arrow::types::NativeType;
use num::NumCast;
@@ -41,3 +43,82 @@ impl_primitive!(i32, i64);
impl_primitive!(i64, i64);
impl_primitive!(f32, f64);
impl_primitive!(f64, f64);
+
+/// A new type for [Primitive], complement the `Ord` feature for it. Wrapping not ordered
+/// primitive types like `f32` and `f64` in `OrdPrimitive` can make them be used in places that
+/// require `Ord`. For example, in `Median` or `Percentile` UDAFs.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub struct OrdPrimitive<T: Primitive>(pub T);
+
+impl<T: Primitive> OrdPrimitive<T> {
+ pub fn as_primitive(&self) -> T {
+ self.0
+ }
+}
+
+impl<T: Primitive> Eq for OrdPrimitive<T> {}
+
+impl<T: Primitive> PartialOrd for OrdPrimitive<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<T: Primitive> Ord for OrdPrimitive<T> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.0.into().cmp(&other.0.into())
+ }
+}
+
+impl<T: Primitive> From<OrdPrimitive<T>> for Value {
+ fn from(p: OrdPrimitive<T>) -> Self {
+ p.0.into()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::BinaryHeap;
+
+ use super::*;
+
+ #[test]
+ fn test_ord_primitive() {
+ struct Foo<T>
+ where
+ T: Primitive,
+ {
+ heap: BinaryHeap<OrdPrimitive<T>>,
+ }
+
+ impl<T> Foo<T>
+ where
+ T: Primitive,
+ {
+ fn push(&mut self, value: T) {
+ let value = OrdPrimitive::<T>(value);
+ self.heap.push(value);
+ }
+ }
+
+ macro_rules! test {
+ ($Type:ident) => {
+ let mut foo = Foo::<$Type> {
+ heap: BinaryHeap::new(),
+ };
+ foo.push($Type::default());
+ };
+ }
+
+ test!(u8);
+ test!(u16);
+ test!(u32);
+ test!(u64);
+ test!(i8);
+ test!(i16);
+ test!(i32);
+ test!(i64);
+ test!(f32);
+ test!(f64);
+ }
+}
diff --git a/src/query/tests/percentile_test.rs b/src/query/tests/percentile_test.rs
index 550d9d9fe897..08bf7df8548a 100644
--- a/src/query/tests/percentile_test.rs
+++ b/src/query/tests/percentile_test.rs
@@ -9,7 +9,7 @@ use common_recordbatch::error::Result as RecordResult;
use common_recordbatch::{util, RecordBatch};
use datafusion::field_util::FieldExt;
use datafusion::field_util::SchemaExt;
-use datatypes::for_all_ordered_primitive_types;
+use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::types::PrimitiveElement;
@@ -25,9 +25,6 @@ async fn test_percentile_aggregator() -> Result<()> {
common_telemetry::init_default_ut_logging();
let engine = create_query_engine();
- test_percentile_failed::<f32>("f32_number", "numbers", engine.clone()).await?;
- test_percentile_failed::<f64>("f64_number", "numbers", engine.clone()).await?;
-
macro_rules! test_percentile {
([], $( { $T:ty } ),*) => {
$(
@@ -36,7 +33,7 @@ async fn test_percentile_aggregator() -> Result<()> {
)*
}
}
- for_all_ordered_primitive_types! { test_percentile }
+ for_all_primitive_types! { test_percentile }
Ok(())
}
@@ -114,24 +111,6 @@ async fn execute_percentile<'a>(
util::collect(recordbatch_stream).await
}
-async fn test_percentile_failed<T>(
- column_name: &str,
- table_name: &str,
- engine: Arc<dyn QueryEngine>,
-) -> Result<()>
-where
- T: PrimitiveElement,
-{
- let result = execute_percentile(column_name, table_name, engine).await;
- assert!(result.is_err());
- let error = result.unwrap_err();
- assert!(error.to_string().contains(&format!(
- "Failed to create accumulator: \"PERCENTILE\" aggregate function not support data type {}",
- T::type_name()
- )));
- Ok(())
-}
-
fn create_correctness_engine() -> Arc<dyn QueryEngine> {
// create engine
let schema_provider = Arc::new(MemorySchemaProvider::new());
diff --git a/src/query/tests/query_engine_test.rs b/src/query/tests/query_engine_test.rs
index 70703dd7770f..a1c9d7988250 100644
--- a/src/query/tests/query_engine_test.rs
+++ b/src/query/tests/query_engine_test.rs
@@ -14,11 +14,11 @@ use common_recordbatch::{util, RecordBatch};
use datafusion::field_util::FieldExt;
use datafusion::field_util::SchemaExt;
use datafusion::logical_plan::LogicalPlanBuilder;
-use datatypes::for_all_ordered_primitive_types;
+use datatypes::for_all_primitive_types;
use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
-use datatypes::types::PrimitiveElement;
-use datatypes::vectors::{Float32Vector, Float64Vector, PrimitiveVector, UInt32Vector};
+use datatypes::types::{OrdPrimitive, PrimitiveElement};
+use datatypes::vectors::{PrimitiveVector, UInt32Vector};
use num::NumCast;
use query::error::Result;
use query::plan::LogicalPlan;
@@ -149,7 +149,7 @@ fn create_query_engine() -> Arc<dyn QueryEngine> {
let catalog_provider = Arc::new(MemoryCatalogProvider::new());
let catalog_list = Arc::new(MemoryCatalogList::default());
- // create table with ordered primitives, and all columns' length are even
+ // create table with primitives, and all columns' length are even
let mut column_schemas = vec![];
let mut columns = vec![];
macro_rules! create_even_number_table {
@@ -161,13 +161,13 @@ fn create_query_engine() -> Arc<dyn QueryEngine> {
let column_schema = ColumnSchema::new(column_name, Value::from(<$T>::default()).data_type(), true);
column_schemas.push(column_schema);
- let numbers = (1..=100).map(|_| rng.gen_range(<$T>::MIN..<$T>::MAX)).collect::<Vec<$T>>();
+ let numbers = (1..=100).map(|_| rng.gen::<$T>()).collect::<Vec<$T>>();
let column: VectorRef = Arc::new(PrimitiveVector::<$T>::from_vec(numbers.to_vec()));
columns.push(column);
)*
}
}
- for_all_ordered_primitive_types! { create_even_number_table }
+ for_all_primitive_types! { create_even_number_table }
let schema = Arc::new(Schema::new(column_schemas.clone()));
let recordbatch = RecordBatch::new(schema, columns).unwrap();
@@ -179,7 +179,7 @@ fn create_query_engine() -> Arc<dyn QueryEngine> {
)
.unwrap();
- // create table with ordered primitives, and all columns' length are odd
+ // create table with primitives, and all columns' length are odd
let mut column_schemas = vec![];
let mut columns = vec![];
macro_rules! create_odd_number_table {
@@ -191,13 +191,13 @@ fn create_query_engine() -> Arc<dyn QueryEngine> {
let column_schema = ColumnSchema::new(column_name, Value::from(<$T>::default()).data_type(), true);
column_schemas.push(column_schema);
- let numbers = (1..=99).map(|_| rng.gen_range(<$T>::MIN..<$T>::MAX)).collect::<Vec<$T>>();
+ let numbers = (1..=99).map(|_| rng.gen::<$T>()).collect::<Vec<$T>>();
let column: VectorRef = Arc::new(PrimitiveVector::<$T>::from_vec(numbers.to_vec()));
columns.push(column);
)*
}
}
- for_all_ordered_primitive_types! { create_odd_number_table }
+ for_all_primitive_types! { create_odd_number_table }
let schema = Arc::new(Schema::new(column_schemas.clone()));
let recordbatch = RecordBatch::new(schema, columns).unwrap();
@@ -206,24 +206,6 @@ fn create_query_engine() -> Arc<dyn QueryEngine> {
.register_table(odd_number_table.table_name().to_string(), odd_number_table)
.unwrap();
- // create table with floating numbers
- let column_schemas = vec![
- ColumnSchema::new("f32_number", ConcreteDataType::float32_datatype(), true),
- ColumnSchema::new("f64_number", ConcreteDataType::float64_datatype(), true),
- ];
- let f32_numbers: VectorRef = Arc::new(Float32Vector::from_vec(vec![1.0f32, 2.0, 3.0]));
- let f64_numbers: VectorRef = Arc::new(Float64Vector::from_vec(vec![1.0f64, 2.0, 3.0]));
- let columns = vec![f32_numbers, f64_numbers];
- let schema = Arc::new(Schema::new(column_schemas));
- let recordbatch = RecordBatch::new(schema, columns).unwrap();
- let float_number_table = Arc::new(MemTable::new("float_numbers", recordbatch));
- schema_provider
- .register_table(
- float_number_table.table_name().to_string(),
- float_number_table,
- )
- .unwrap();
-
catalog_provider.register_schema(DEFAULT_SCHEMA_NAME.to_string(), schema_provider);
catalog_list.register_catalog(DEFAULT_CATALOG_NAME.to_string(), catalog_provider);
@@ -235,7 +217,7 @@ async fn get_numbers_from_table<'s, T>(
column_name: &'s str,
table_name: &'s str,
engine: Arc<dyn QueryEngine>,
-) -> Vec<T>
+) -> Vec<OrdPrimitive<T>>
where
T: PrimitiveElement,
for<'a> T: Scalar<RefType<'a> = T>,
@@ -253,7 +235,11 @@ where
let columns = numbers[0].df_recordbatch.columns();
let column = VectorHelper::try_into_vector(&columns[0]).unwrap();
let column: &<T as Scalar>::VectorType = unsafe { VectorHelper::static_cast(&column) };
- column.iter_data().flatten().collect::<Vec<T>>()
+ column
+ .iter_data()
+ .flatten()
+ .map(|x| OrdPrimitive::<T>(x))
+ .collect::<Vec<OrdPrimitive<T>>>()
}
#[tokio::test]
@@ -262,9 +248,6 @@ async fn test_median_aggregator() -> Result<()> {
let engine = create_query_engine();
- test_median_failed::<f32>("f32_number", "float_numbers", engine.clone()).await?;
- test_median_failed::<f64>("f64_number", "float_numbers", engine.clone()).await?;
-
macro_rules! test_median {
([], $( { $T:ty } ),*) => {
$(
@@ -276,7 +259,7 @@ async fn test_median_aggregator() -> Result<()> {
)*
}
}
- for_all_ordered_primitive_types! { test_median }
+ for_all_primitive_types! { test_median }
Ok(())
}
@@ -286,7 +269,7 @@ async fn test_median_success<T>(
engine: Arc<dyn QueryEngine>,
) -> Result<()>
where
- T: PrimitiveElement + Ord,
+ T: PrimitiveElement,
for<'a> T: Scalar<RefType<'a> = T>,
{
let result = execute_median(column_name, table_name, engine.clone())
@@ -310,33 +293,15 @@ where
let expected_median: Value = if len % 2 == 1 {
numbers[len / 2]
} else {
- let a: f64 = NumCast::from(numbers[len / 2 - 1]).unwrap();
- let b: f64 = NumCast::from(numbers[len / 2]).unwrap();
- NumCast::from(a / 2.0 + b / 2.0).unwrap()
+ let a: f64 = NumCast::from(numbers[len / 2 - 1].as_primitive()).unwrap();
+ let b: f64 = NumCast::from(numbers[len / 2].as_primitive()).unwrap();
+ OrdPrimitive::<T>(NumCast::from(a / 2.0 + b / 2.0).unwrap())
}
.into();
assert_eq!(expected_median, median);
Ok(())
}
-async fn test_median_failed<T>(
- column_name: &str,
- table_name: &str,
- engine: Arc<dyn QueryEngine>,
-) -> Result<()>
-where
- T: PrimitiveElement,
-{
- let result = execute_median(column_name, table_name, engine).await;
- assert!(result.is_err());
- let error = result.unwrap_err();
- assert!(error.to_string().contains(&format!(
- "Failed to create accumulator: \"MEDIAN\" aggregate function not support data type {}",
- T::type_name()
- )));
- Ok(())
-}
-
async fn execute_median<'a>(
column_name: &'a str,
table_name: &'a str,
|
feat
|
a new type for supplying `Ord` to `Primitive` (#255)
|
a0144ffa61694ae991235d4344df770204f45721
|
2022-11-15 12:22:47
|
Jiachun Feng
|
fix: leader checker (#510)
| false
|
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index f137b1ac6d3c..2f32847a5d76 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -655,8 +655,6 @@ mod test {
}
#[tokio::test(flavor = "multi_thread")]
- // FIXME(LFC): Remove ignore.
- #[ignore]
async fn test_dist_table_scan() {
common_telemetry::init_default_ut_logging();
let table = Arc::new(new_dist_table().await);
diff --git a/src/meta-srv/src/handler/check_leader.rs b/src/meta-srv/src/handler/check_leader.rs
index b9f7c4e750c1..67ca01096245 100644
--- a/src/meta-srv/src/handler/check_leader.rs
+++ b/src/meta-srv/src/handler/check_leader.rs
@@ -20,13 +20,11 @@ impl HeartbeatHandler for CheckLeaderHandler {
if election.is_leader() {
return Ok(());
}
+ if let Some(header) = &mut acc.header {
+ header.error = Some(Error::is_not_leader());
+ ctx.set_skip_all();
+ }
}
-
- if let Some(header) = &mut acc.header {
- header.error = Some(Error::is_not_leader());
- ctx.set_skip_all();
- }
-
Ok(())
}
}
|
fix
|
leader checker (#510)
|
7d6f4cd88b2623f7d9729cbfc915d6a753761f92
|
2023-02-11 12:22:29
|
Eugene Tolbakov
|
feat: remove backtrace from sql::error::Error (#966)
| false
|
diff --git a/src/sql/src/error.rs b/src/sql/src/error.rs
index 2ce09c2c7e4f..dc140d3e5267 100644
--- a/src/sql/src/error.rs
+++ b/src/sql/src/error.rs
@@ -53,11 +53,7 @@ pub enum Error {
expr,
column_name
))]
- UnsupportedDefaultValue {
- column_name: String,
- expr: Expr,
- backtrace: Backtrace,
- },
+ UnsupportedDefaultValue { column_name: String, expr: Expr },
// Syntax error from sql parser.
#[snafu(display("Syntax error, sql: {}, source: {}", sql, source))]
@@ -67,29 +63,22 @@ pub enum Error {
Tokenizer { sql: String, source: TokenizerError },
#[snafu(display("Missing time index constraint"))]
- MissingTimeIndex { backtrace: Backtrace },
+ MissingTimeIndex {},
#[snafu(display("Invalid time index: {}", msg))]
- InvalidTimeIndex { msg: String, backtrace: Backtrace },
+ InvalidTimeIndex { msg: String },
#[snafu(display("Invalid SQL, error: {}", msg))]
- InvalidSql { msg: String, backtrace: Backtrace },
+ InvalidSql { msg: String },
#[snafu(display("Invalid column option, column name: {}, error: {}", name, msg))]
- InvalidColumnOption {
- name: String,
- msg: String,
- backtrace: Backtrace,
- },
+ InvalidColumnOption { name: String, msg: String },
#[snafu(display("SQL data type not supported yet: {:?}", t))]
- SqlTypeNotSupported {
- t: crate::ast::DataType,
- backtrace: Backtrace,
- },
+ SqlTypeNotSupported { t: crate::ast::DataType },
#[snafu(display("Failed to parse value: {}", msg))]
- ParseSqlValue { msg: String, backtrace: Backtrace },
+ ParseSqlValue { msg: String },
#[snafu(display(
"Column {} expect type: {:?}, actual: {:?}",
@@ -104,10 +93,10 @@ pub enum Error {
},
#[snafu(display("Invalid database name: {}", name))]
- InvalidDatabaseName { name: String, backtrace: Backtrace },
+ InvalidDatabaseName { name: String },
#[snafu(display("Invalid table name: {}", name))]
- InvalidTableName { name: String, backtrace: Backtrace },
+ InvalidTableName { name: String },
#[snafu(display("Invalid default constraint, column: {}, source: {}", column, source))]
InvalidDefault {
@@ -117,7 +106,7 @@ pub enum Error {
},
#[snafu(display("Unsupported ALTER TABLE statement: {}", msg))]
- UnsupportedAlterTableStatement { msg: String, backtrace: Backtrace },
+ UnsupportedAlterTableStatement { msg: String },
#[snafu(display("Failed to serialize column default constraint, source: {}", source))]
SerializeColumnDefaultConstraint {
@@ -135,7 +124,7 @@ pub enum Error {
},
#[snafu(display("Invalid sql value: {}", value))]
- InvalidSqlValue { value: String, backtrace: Backtrace },
+ InvalidSqlValue { value: String },
#[snafu(display(
"Converting timestamp {:?} to unit {:?} overflow",
@@ -145,7 +134,6 @@ pub enum Error {
TimestampOverflow {
timestamp: Timestamp,
target_unit: TimeUnit,
- backtrace: Backtrace,
},
}
|
feat
|
remove backtrace from sql::error::Error (#966)
|
51feec25796aeac743f35ab285b6f419dcc9d8ef
|
2024-02-04 17:05:55
|
Ruihang Xia
|
feat: use simple filter to prune memtable (#3269)
| false
|
diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs
index bd81f17d6643..7cf5dc4e266f 100644
--- a/src/metric-engine/src/engine.rs
+++ b/src/metric-engine/src/engine.rs
@@ -28,7 +28,6 @@ use std::sync::{Arc, RwLock};
use async_trait::async_trait;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
-use common_query::Output;
use common_recordbatch::SendableRecordBatchStream;
use mito2::engine::MitoEngine;
use store_api::metadata::RegionMetadataRef;
@@ -133,7 +132,7 @@ impl RegionEngine for MetricEngine {
RegionRequest::Flush(_) => todo!(),
RegionRequest::Compact(_) => todo!(),
RegionRequest::Truncate(_) => todo!(),
- /// It always Ok(0), all data is latest.
+ // It always Ok(0), all data is the latest.
RegionRequest::Catchup(_) => Ok(0),
};
diff --git a/src/metric-engine/src/engine/close.rs b/src/metric-engine/src/engine/close.rs
index c708453edb9e..ad89fce36ac7 100644
--- a/src/metric-engine/src/engine/close.rs
+++ b/src/metric-engine/src/engine/close.rs
@@ -14,25 +14,15 @@
//! Close a metric region
-use mito2::engine::MITO_ENGINE_NAME;
-use object_store::util::join_dir;
-use snafu::{OptionExt, ResultExt};
-use store_api::metric_engine_consts::{
- DATA_REGION_SUBDIR, METADATA_REGION_SUBDIR, PHYSICAL_TABLE_METADATA_KEY,
-};
+use snafu::ResultExt;
use store_api::region_engine::RegionEngine;
-use store_api::region_request::{
- AffectedRows, RegionCloseRequest, RegionOpenRequest, RegionRequest,
-};
+use store_api::region_request::{AffectedRows, RegionCloseRequest, RegionRequest};
use store_api::storage::RegionId;
use super::MetricEngineInner;
-use crate::error::{
- CloseMitoRegionSnafu, Error, LogicalRegionNotFoundSnafu, OpenMitoRegionSnafu,
- PhysicalRegionNotFoundSnafu, Result,
-};
+use crate::error::{CloseMitoRegionSnafu, LogicalRegionNotFoundSnafu, Result};
use crate::metrics::PHYSICAL_REGION_COUNT;
-use crate::{metadata_region, utils};
+use crate::utils;
impl MetricEngineInner {
pub async fn close_region(
diff --git a/src/metric-engine/src/engine/drop.rs b/src/metric-engine/src/engine/drop.rs
index 9399844720b5..03c13d2109ee 100644
--- a/src/metric-engine/src/engine/drop.rs
+++ b/src/metric-engine/src/engine/drop.rs
@@ -14,25 +14,17 @@
//! Drop a metric region
-use mito2::engine::MITO_ENGINE_NAME;
-use object_store::util::join_dir;
-use snafu::{OptionExt, ResultExt};
-use store_api::metric_engine_consts::{
- DATA_REGION_SUBDIR, METADATA_REGION_SUBDIR, PHYSICAL_TABLE_METADATA_KEY,
-};
+use snafu::ResultExt;
use store_api::region_engine::RegionEngine;
-use store_api::region_request::{
- AffectedRows, RegionDropRequest, RegionOpenRequest, RegionRequest,
-};
+use store_api::region_request::{AffectedRows, RegionDropRequest, RegionRequest};
use store_api::storage::RegionId;
use super::MetricEngineInner;
use crate::error::{
- CloseMitoRegionSnafu, Error, LogicalRegionNotFoundSnafu, OpenMitoRegionSnafu,
- PhysicalRegionBusySnafu, PhysicalRegionNotFoundSnafu, Result,
+ CloseMitoRegionSnafu, LogicalRegionNotFoundSnafu, PhysicalRegionBusySnafu, Result,
};
use crate::metrics::PHYSICAL_REGION_COUNT;
-use crate::{metadata_region, utils};
+use crate::utils;
impl MetricEngineInner {
pub async fn drop_region(
diff --git a/src/metric-engine/src/engine/open.rs b/src/metric-engine/src/engine/open.rs
index fda8807a7b9c..df41d1cf12ef 100644
--- a/src/metric-engine/src/engine/open.rs
+++ b/src/metric-engine/src/engine/open.rs
@@ -26,7 +26,7 @@ use store_api::region_request::{AffectedRows, RegionOpenRequest, RegionRequest};
use store_api::storage::RegionId;
use super::MetricEngineInner;
-use crate::error::{Error, LogicalRegionNotFoundSnafu, OpenMitoRegionSnafu, Result};
+use crate::error::{OpenMitoRegionSnafu, Result};
use crate::metrics::{LOGICAL_REGION_COUNT, PHYSICAL_REGION_COUNT};
use crate::utils;
diff --git a/src/metric-engine/src/engine/put.rs b/src/metric-engine/src/engine/put.rs
index 6e9a32e18cce..5ce2bf227ba4 100644
--- a/src/metric-engine/src/engine/put.rs
+++ b/src/metric-engine/src/engine/put.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::hash::{BuildHasher, Hash, Hasher};
+use std::hash::Hash;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Row, Rows, SemanticType};
@@ -29,7 +29,7 @@ use crate::error::{
ColumnNotFoundSnafu, ForbiddenPhysicalAlterSnafu, LogicalRegionNotFoundSnafu, Result,
};
use crate::metrics::{FORBIDDEN_OPERATION_COUNT, MITO_OPERATION_ELAPSED};
-use crate::utils::{to_data_region_id, to_metadata_region_id};
+use crate::utils::to_data_region_id;
// A random number
const TSID_HASH_SEED: u32 = 846793005;
@@ -84,6 +84,7 @@ impl MetricEngineInner {
.await?;
// write to data region
+
// TODO: retrieve table name
self.modify_rows(logical_region_id.table_id(), &mut request.rows)?;
self.data_region.write_data(data_region_id, request).await
@@ -101,7 +102,6 @@ impl MetricEngineInner {
request: &RegionPutRequest,
) -> Result<()> {
// check if the region exists
- let metadata_region_id = to_metadata_region_id(physical_region_id);
let data_region_id = to_data_region_id(physical_region_id);
let state = self.state.read().unwrap();
if !state.is_logical_region_exist(logical_region_id) {
diff --git a/src/metric-engine/src/engine/read.rs b/src/metric-engine/src/engine/read.rs
index cba9fbd343b4..154a9a61b084 100644
--- a/src/metric-engine/src/engine/read.rs
+++ b/src/metric-engine/src/engine/read.rs
@@ -20,10 +20,9 @@ use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::{error, info, tracing};
use datafusion::logical_expr;
use snafu::{OptionExt, ResultExt};
-use store_api::metadata::{RegionMetadata, RegionMetadataBuilder, RegionMetadataRef};
+use store_api::metadata::{RegionMetadataBuilder, RegionMetadataRef};
use store_api::metric_engine_consts::DATA_SCHEMA_TABLE_ID_COLUMN_NAME;
use store_api::region_engine::RegionEngine;
-use store_api::storage::consts::ReservedColumnId;
use store_api::storage::{RegionId, ScanRequest};
use crate::engine::MetricEngineInner;
@@ -259,7 +258,6 @@ mod test {
use store_api::region_request::RegionRequest;
use super::*;
- use crate::engine::alter;
use crate::test_util::{
alter_logical_region_add_tag_columns, create_logical_region_request, TestEnv,
};
@@ -271,7 +269,6 @@ mod test {
let logical_region_id = env.default_logical_region_id();
let physical_region_id = env.default_physical_region_id();
- let data_region_id = utils::to_data_region_id(physical_region_id);
// create another logical region
let logical_region_id2 = RegionId::new(1112345678, 999);
@@ -291,7 +288,7 @@ mod test {
.unwrap();
// check explicit projection
- let mut scan_req = ScanRequest {
+ let scan_req = ScanRequest {
projection: Some(vec![0, 1, 2, 3, 4, 5, 6]),
filters: vec![],
..Default::default()
@@ -314,7 +311,7 @@ mod test {
);
// check default projection
- let mut scan_req = ScanRequest::default();
+ let scan_req = ScanRequest::default();
let scan_req = env
.metric()
.inner
diff --git a/src/metric-engine/src/engine/region_metadata.rs b/src/metric-engine/src/engine/region_metadata.rs
index 5a3979f99c43..150d1a324c2e 100644
--- a/src/metric-engine/src/engine/region_metadata.rs
+++ b/src/metric-engine/src/engine/region_metadata.rs
@@ -14,11 +14,7 @@
//! Implementation of retrieving logical region's region metadata.
-use std::collections::{HashMap, HashSet};
-
-use api::v1::SemanticType;
-use store_api::metadata::{ColumnMetadata, RegionMetadata};
-use store_api::storage::consts::ReservedColumnId;
+use store_api::metadata::ColumnMetadata;
use store_api::storage::RegionId;
use crate::engine::MetricEngineInner;
diff --git a/src/metric-engine/src/lib.rs b/src/metric-engine/src/lib.rs
index 9c2e269b737e..3fe4640da4aa 100644
--- a/src/metric-engine/src/lib.rs
+++ b/src/metric-engine/src/lib.rs
@@ -53,7 +53,6 @@
#![feature(let_chains)]
mod data_region;
-#[allow(unused)]
pub mod engine;
pub mod error;
mod metadata_region;
diff --git a/src/metric-engine/src/metadata_region.rs b/src/metric-engine/src/metadata_region.rs
index 97b73d7a9bef..918f7f1023d7 100644
--- a/src/metric-engine/src/metadata_region.rs
+++ b/src/metric-engine/src/metadata_region.rs
@@ -229,13 +229,11 @@ impl MetadataRegion {
format!("{COLUMN_PREFIX}{}_", region_id.as_u64())
}
- #[allow(dead_code)]
pub fn parse_region_key(key: &str) -> Option<&str> {
key.strip_prefix(REGION_PREFIX)
}
/// Parse column key to (logical_region_id, column_name)
- #[allow(dead_code)]
pub fn parse_column_key(key: &str) -> Result<Option<(RegionId, String)>> {
if let Some(stripped) = key.strip_prefix(COLUMN_PREFIX) {
let mut iter = stripped.split('_');
@@ -271,7 +269,6 @@ impl MetadataRegion {
// simulate to `KvBackend`
//
// methods in this block assume the given region id is transformed.
-#[allow(unused_variables)]
impl MetadataRegion {
/// Put if not exist, return if this put operation is successful (error other
/// than "key already exist" will be wrapped in [Err]).
diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs
index afebe927feff..3c86d5cd3bb5 100644
--- a/src/mito2/src/memtable/time_series.rs
+++ b/src/mito2/src/memtable/time_series.rs
@@ -20,17 +20,14 @@ use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use api::v1::OpType;
-use common_telemetry::{debug, error, trace};
+use common_recordbatch::filter::SimpleFilterEvaluator;
+use common_telemetry::{debug, error};
use common_time::Timestamp;
-use datafusion::physical_plan::PhysicalExpr;
-use datafusion_common::ScalarValue;
-use datafusion_expr::ColumnarValue;
use datatypes::arrow;
-use datatypes::arrow::array::{ArrayRef, BooleanArray};
-use datatypes::arrow::record_batch::RecordBatch;
+use datatypes::arrow::array::ArrayRef;
use datatypes::data_type::{ConcreteDataType, DataType};
use datatypes::prelude::{MutableVector, ScalarVectorBuilder, Vector, VectorRef};
-use datatypes::value::ValueRef;
+use datatypes::value::{Value, ValueRef};
use datatypes::vectors::{
Helper, UInt64Vector, UInt64VectorBuilder, UInt8Vector, UInt8VectorBuilder,
};
@@ -39,10 +36,7 @@ use store_api::metadata::RegionMetadataRef;
use store_api::storage::ColumnId;
use table::predicate::Predicate;
-use crate::error::{
- ComputeArrowSnafu, ConvertVectorSnafu, NewRecordBatchSnafu, PrimaryKeyLengthMismatchSnafu,
- Result,
-};
+use crate::error::{ComputeArrowSnafu, ConvertVectorSnafu, PrimaryKeyLengthMismatchSnafu, Result};
use crate::flush::WriteBufferManagerRef;
use crate::memtable::{
AllocTracker, BoxedBatchIterator, KeyValues, Memtable, MemtableBuilder, MemtableId,
@@ -305,48 +299,39 @@ impl SeriesSet {
/// Iterates all series in [SeriesSet].
fn iter_series(&self, projection: HashSet<ColumnId>, predicate: Option<Predicate>) -> Iter {
- let (primary_key_builders, primary_key_schema) =
- primary_key_builders(&self.region_metadata, 1);
-
- let physical_exprs: Vec<_> = predicate
- .and_then(|p| p.to_physical_exprs(&primary_key_schema).ok())
- .unwrap_or_default();
+ let primary_key_schema = primary_key_schema(&self.region_metadata);
+ let primary_key_datatypes = self
+ .region_metadata
+ .primary_key_columns()
+ .map(|pk| pk.column_schema.data_type.clone())
+ .collect();
- Iter {
- metadata: self.region_metadata.clone(),
- series: self.series.clone(),
+ Iter::new(
+ self.region_metadata.clone(),
+ self.series.clone(),
projection,
- last_key: None,
- predicate: physical_exprs,
- pk_schema: primary_key_schema,
- primary_key_builders,
- codec: self.codec.clone(),
- metrics: Metrics::default(),
- }
+ predicate,
+ primary_key_schema,
+ primary_key_datatypes,
+ self.codec.clone(),
+ )
}
}
-/// Creates primary key array builders and arrow's schema for primary keys of given region schema.
-fn primary_key_builders(
- region_metadata: &RegionMetadataRef,
- num_pk_rows: usize,
-) -> (Vec<Box<dyn MutableVector>>, arrow::datatypes::SchemaRef) {
- let (builders, fields): (_, Vec<_>) = region_metadata
+/// Creates an arrow [SchemaRef](arrow::datatypes::SchemaRef) that only contains primary keys
+/// of given region schema
+fn primary_key_schema(region_metadata: &RegionMetadataRef) -> arrow::datatypes::SchemaRef {
+ let fields = region_metadata
.primary_key_columns()
.map(|pk| {
- (
- pk.column_schema
- .data_type
- .create_mutable_vector(num_pk_rows),
- arrow::datatypes::Field::new(
- pk.column_schema.name.clone(),
- pk.column_schema.data_type.as_arrow_type(),
- pk.column_schema.is_nullable(),
- ),
+ arrow::datatypes::Field::new(
+ pk.column_schema.name.clone(),
+ pk.column_schema.data_type.as_arrow_type(),
+ pk.column_schema.is_nullable(),
)
})
- .unzip();
- (builders, Arc::new(arrow::datatypes::Schema::new(fields)))
+ .collect::<Vec<_>>();
+ Arc::new(arrow::datatypes::Schema::new(fields))
}
/// Metrics for reading the memtable.
@@ -369,13 +354,45 @@ struct Iter {
series: Arc<SeriesRwLockMap>,
projection: HashSet<ColumnId>,
last_key: Option<Vec<u8>>,
- predicate: Vec<Arc<dyn PhysicalExpr>>,
+ predicate: Vec<SimpleFilterEvaluator>,
pk_schema: arrow::datatypes::SchemaRef,
- primary_key_builders: Vec<Box<dyn MutableVector>>,
+ pk_datatypes: Vec<ConcreteDataType>,
codec: Arc<McmpRowCodec>,
metrics: Metrics,
}
+impl Iter {
+ pub(crate) fn new(
+ metadata: RegionMetadataRef,
+ series: Arc<SeriesRwLockMap>,
+ projection: HashSet<ColumnId>,
+ predicate: Option<Predicate>,
+ pk_schema: arrow::datatypes::SchemaRef,
+ pk_datatypes: Vec<ConcreteDataType>,
+ codec: Arc<McmpRowCodec>,
+ ) -> Self {
+ let simple_filters = predicate
+ .map(|p| {
+ p.exprs()
+ .iter()
+ .filter_map(|f| SimpleFilterEvaluator::try_new(f.df_expr()))
+ .collect::<Vec<_>>()
+ })
+ .unwrap_or_default();
+ Self {
+ metadata,
+ series,
+ projection,
+ last_key: None,
+ predicate: simple_filters,
+ pk_schema,
+ pk_datatypes,
+ codec,
+ metrics: Metrics::default(),
+ }
+ }
+}
+
impl Drop for Iter {
fn drop(&mut self) {
debug!(
@@ -415,7 +432,7 @@ impl Iterator for Iter {
&self.codec,
primary_key.as_slice(),
&mut series,
- &mut self.primary_key_builders,
+ &self.pk_datatypes,
self.pk_schema.clone(),
&self.predicate,
)
@@ -446,88 +463,48 @@ fn prune_primary_key(
codec: &Arc<McmpRowCodec>,
pk: &[u8],
series: &mut Series,
- builders: &mut [Box<dyn MutableVector>],
+ datatypes: &[ConcreteDataType],
pk_schema: arrow::datatypes::SchemaRef,
- predicate: &[Arc<dyn PhysicalExpr>],
+ predicates: &[SimpleFilterEvaluator],
) -> bool {
// no primary key, we simply return true.
if pk_schema.fields().is_empty() {
return true;
}
- if let Some(rb) = series.pk_cache.as_ref() {
- prune_inner(predicate, rb).unwrap_or(true)
+ // retrieve primary key values from cache or decode from bytes.
+ let pk_values = if let Some(pk_values) = series.pk_cache.as_ref() {
+ pk_values
} else {
- let rb = match pk_to_record_batch(codec, pk, builders, pk_schema) {
- Ok(rb) => rb,
- Err(e) => {
- error!(e; "Failed to build record batch from primary keys");
- return true;
- }
- };
- let res = prune_inner(predicate, &rb).unwrap_or(true);
- series.update_pk_cache(rb);
- res
- }
-}
-
-fn prune_inner(predicates: &[Arc<dyn PhysicalExpr>], primary_key: &RecordBatch) -> Result<bool> {
- for expr in predicates {
- // evaluate every filter against primary key
- let Ok(eva) = expr.evaluate(primary_key) else {
+ let pk_values = codec.decode(pk);
+ if let Err(e) = pk_values {
+ error!(e; "Failed to decode primary key");
+ return true;
+ }
+ series.update_pk_cache(pk_values.unwrap());
+ series.pk_cache.as_ref().unwrap()
+ };
+
+ // evaluate predicates against primary key values
+ let mut result = true;
+ for predicate in predicates {
+ // ignore predicates that are not referencing primary key columns
+ let Ok(index) = pk_schema.index_of(predicate.column_name()) else {
continue;
};
- let result = match eva {
- ColumnarValue::Array(array) => {
- let predicate_array = array.as_any().downcast_ref::<BooleanArray>().unwrap();
- predicate_array
- .into_iter()
- .map(|x| x.unwrap_or(true))
- .next()
- .unwrap_or(true)
- }
- // result was a column
- ColumnarValue::Scalar(ScalarValue::Boolean(v)) => v.unwrap_or(true),
- _ => {
- unreachable!("Unexpected primary key record batch evaluation result: {:?}, primary key: {:?}", eva, primary_key);
- }
- };
- trace!(
- "Evaluate primary key {:?} against filter: {:?}, result: {:?}",
- primary_key,
- expr,
- result
- );
- if !result {
- return Ok(false);
- }
+ // Safety: arrow schema and datatypes are constructed from the same source.
+ let scalar_value = pk_values[index]
+ .try_to_scalar_value(&datatypes[index])
+ .unwrap();
+ result &= predicate.evaluate_scalar(&scalar_value).unwrap_or(true);
}
- Ok(true)
-}
-
-fn pk_to_record_batch(
- codec: &Arc<McmpRowCodec>,
- bytes: &[u8],
- builders: &mut [Box<dyn MutableVector>],
- pk_schema: arrow::datatypes::SchemaRef,
-) -> Result<RecordBatch> {
- let pk_values = codec.decode(bytes).unwrap();
-
- let arrays = builders
- .iter_mut()
- .zip(pk_values.iter())
- .map(|(builder, pk_value)| {
- builder.push_value_ref(pk_value.as_value_ref());
- builder.to_vector().to_arrow_array()
- })
- .collect();
- RecordBatch::try_new(pk_schema, arrays).context(NewRecordBatchSnafu)
+ result
}
/// A `Series` holds a list of field values of some given primary key.
struct Series {
- pk_cache: Option<RecordBatch>,
+ pk_cache: Option<Vec<Value>>,
active: ValueBuilder,
frozen: Vec<Values>,
}
@@ -546,8 +523,8 @@ impl Series {
self.active.push(ts, sequence, op_type as u8, values);
}
- fn update_pk_cache(&mut self, pk_batch: RecordBatch) {
- self.pk_cache = Some(pk_batch);
+ fn update_pk_cache(&mut self, pk_values: Vec<Value>) {
+ self.pk_cache = Some(pk_values);
}
/// Freezes the active part and push it to `frozen`.
diff --git a/src/mito2/src/worker/handle_write.rs b/src/mito2/src/worker/handle_write.rs
index e10012d57447..495af6cb05b1 100644
--- a/src/mito2/src/worker/handle_write.rs
+++ b/src/mito2/src/worker/handle_write.rs
@@ -60,7 +60,13 @@ impl<S: LogStore> RegionWorkerLoop<S> {
return;
}
- let mut region_ctxs = self.prepare_region_write_ctx(write_requests);
+ // Prepare write context.
+ let mut region_ctxs = {
+ let _timer = WRITE_STAGE_ELAPSED
+ .with_label_values(&["prepare_ctx"])
+ .start_timer();
+ self.prepare_region_write_ctx(write_requests)
+ };
// Write to WAL.
{
|
feat
|
use simple filter to prune memtable (#3269)
|
bfd32571d9fd11228fb1a0a6529afe06ec1b74cf
|
2024-04-01 08:48:14
|
Yingwen
|
fix: run purge jobs in another scheduler (#3621)
| false
|
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index d0fa4ea708ac..5549e55c4477 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -56,7 +56,7 @@ pub(crate) struct RegionOpener {
memtable_builder_provider: MemtableBuilderProvider,
object_store_manager: ObjectStoreManagerRef,
region_dir: String,
- scheduler: SchedulerRef,
+ purge_scheduler: SchedulerRef,
options: Option<RegionOptions>,
cache_manager: Option<CacheManagerRef>,
skip_wal_replay: bool,
@@ -71,7 +71,7 @@ impl RegionOpener {
region_dir: &str,
memtable_builder_provider: MemtableBuilderProvider,
object_store_manager: ObjectStoreManagerRef,
- scheduler: SchedulerRef,
+ purge_scheduler: SchedulerRef,
intermediate_manager: IntermediateManager,
) -> RegionOpener {
RegionOpener {
@@ -80,7 +80,7 @@ impl RegionOpener {
memtable_builder_provider,
object_store_manager,
region_dir: normalize_dir(region_dir),
- scheduler,
+ purge_scheduler,
options: None,
cache_manager: None,
skip_wal_replay: false,
@@ -204,7 +204,7 @@ impl RegionOpener {
access_layer: access_layer.clone(),
manifest_manager,
file_purger: Arc::new(LocalFilePurger::new(
- self.scheduler,
+ self.purge_scheduler,
access_layer,
self.cache_manager,
)),
@@ -277,7 +277,7 @@ impl RegionOpener {
self.intermediate_manager.clone(),
));
let file_purger = Arc::new(LocalFilePurger::new(
- self.scheduler.clone(),
+ self.purge_scheduler.clone(),
access_layer.clone(),
self.cache_manager.clone(),
));
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index a9aa652d8673..c3fe64e83cd8 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -110,6 +110,8 @@ pub(crate) struct WorkerGroup {
workers: Vec<RegionWorker>,
/// Global background job scheduelr.
scheduler: SchedulerRef,
+ /// Scheduler for file purgers.
+ purge_scheduler: SchedulerRef,
/// Cache.
cache_manager: CacheManagerRef,
}
@@ -131,6 +133,9 @@ impl WorkerGroup {
.await?
.with_buffer_size(Some(config.inverted_index.write_buffer_size.as_bytes() as _));
let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
+ // We use another scheduler to avoid purge jobs blocking other jobs.
+ // A purge job is cheaper than other background jobs so they share the same job limit.
+ let purge_scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
let write_cache = write_cache_from_config(
&config,
object_store_manager.clone(),
@@ -156,6 +161,7 @@ impl WorkerGroup {
object_store_manager: object_store_manager.clone(),
write_buffer_manager: write_buffer_manager.clone(),
scheduler: scheduler.clone(),
+ purge_scheduler: purge_scheduler.clone(),
listener: WorkerListener::default(),
cache_manager: cache_manager.clone(),
intermediate_manager: intermediate_manager.clone(),
@@ -168,6 +174,7 @@ impl WorkerGroup {
Ok(WorkerGroup {
workers,
scheduler,
+ purge_scheduler,
cache_manager,
})
}
@@ -178,6 +185,8 @@ impl WorkerGroup {
// Stops the scheduler gracefully.
self.scheduler.stop(true).await?;
+ // Stops the purge scheduler gracefully.
+ self.purge_scheduler.stop(true).await?;
try_join_all(self.workers.iter().map(|worker| worker.stop())).await?;
@@ -238,6 +247,7 @@ impl WorkerGroup {
))
});
let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
+ let purge_scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
let intermediate_manager =
IntermediateManager::init_fs(&config.inverted_index.intermediate_path)
.await?
@@ -265,6 +275,7 @@ impl WorkerGroup {
object_store_manager: object_store_manager.clone(),
write_buffer_manager: write_buffer_manager.clone(),
scheduler: scheduler.clone(),
+ purge_scheduler: purge_scheduler.clone(),
listener: WorkerListener::new(listener.clone()),
cache_manager: cache_manager.clone(),
intermediate_manager: intermediate_manager.clone(),
@@ -277,6 +288,7 @@ impl WorkerGroup {
Ok(WorkerGroup {
workers,
scheduler,
+ purge_scheduler,
cache_manager,
})
}
@@ -323,6 +335,7 @@ struct WorkerStarter<S> {
object_store_manager: ObjectStoreManagerRef,
write_buffer_manager: WriteBufferManagerRef,
scheduler: SchedulerRef,
+ purge_scheduler: SchedulerRef,
listener: WorkerListener,
cache_manager: CacheManagerRef,
intermediate_manager: IntermediateManager,
@@ -351,7 +364,7 @@ impl<S: LogStore> WorkerStarter<S> {
Some(self.write_buffer_manager.clone()),
self.config,
),
- scheduler: self.scheduler.clone(),
+ purge_scheduler: self.purge_scheduler.clone(),
write_buffer_manager: self.write_buffer_manager,
flush_scheduler: FlushScheduler::new(self.scheduler.clone()),
compaction_scheduler: CompactionScheduler::new(
@@ -507,8 +520,8 @@ struct RegionWorkerLoop<S> {
running: Arc<AtomicBool>,
/// Memtable builder provider for each region.
memtable_builder_provider: MemtableBuilderProvider,
- /// Background job scheduler.
- scheduler: SchedulerRef,
+ /// Background purge job scheduler.
+ purge_scheduler: SchedulerRef,
/// Engine write buffer manager.
write_buffer_manager: WriteBufferManagerRef,
/// Schedules background flush requests.
diff --git a/src/mito2/src/worker/handle_catchup.rs b/src/mito2/src/worker/handle_catchup.rs
index 4a9730dd7a9b..ed2c52c2118a 100644
--- a/src/mito2/src/worker/handle_catchup.rs
+++ b/src/mito2/src/worker/handle_catchup.rs
@@ -53,7 +53,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
region.region_dir(),
self.memtable_builder_provider.clone(),
self.object_store_manager.clone(),
- self.scheduler.clone(),
+ self.purge_scheduler.clone(),
self.intermediate_manager.clone(),
)
.cache(Some(self.cache_manager.clone()))
diff --git a/src/mito2/src/worker/handle_create.rs b/src/mito2/src/worker/handle_create.rs
index 6b5428963753..f07a1f38a183 100644
--- a/src/mito2/src/worker/handle_create.rs
+++ b/src/mito2/src/worker/handle_create.rs
@@ -60,7 +60,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
&request.region_dir,
self.memtable_builder_provider.clone(),
self.object_store_manager.clone(),
- self.scheduler.clone(),
+ self.purge_scheduler.clone(),
self.intermediate_manager.clone(),
)
.metadata(metadata)
diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs
index 884012473e1d..bcd050220e61 100644
--- a/src/mito2/src/worker/handle_open.rs
+++ b/src/mito2/src/worker/handle_open.rs
@@ -67,7 +67,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
&request.region_dir,
self.memtable_builder_provider.clone(),
self.object_store_manager.clone(),
- self.scheduler.clone(),
+ self.purge_scheduler.clone(),
self.intermediate_manager.clone(),
)
.skip_wal_replay(request.skip_wal_replay)
|
fix
|
run purge jobs in another scheduler (#3621)
|
420ae054b388026dc09d57d503f065554d8cf812
|
2023-06-13 13:13:26
|
Weny Xu
|
chore: add debug log for heartbeat (#1770)
| false
|
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index 8b193f84e406..fa3e362df0bf 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -23,7 +23,7 @@ use common_meta::heartbeat::handler::{
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
-use common_telemetry::{error, info, trace, warn};
+use common_telemetry::{debug, error, info, trace, warn};
use meta_client::client::{HeartbeatSender, MetaClient};
use snafu::ResultExt;
use tokio::sync::mpsc;
@@ -199,6 +199,7 @@ impl HeartbeatTask {
}
};
if let Some(req) = req {
+ debug!("Sending heartbeat request: {:?}", req);
if let Err(e) = tx.send(req).await {
error!("Failed to send heartbeat to metasrv, error: {:?}", e);
match Self::create_streams(
diff --git a/src/frontend/src/heartbeat.rs b/src/frontend/src/heartbeat.rs
index c069e5c548ac..72644fb25e1f 100644
--- a/src/frontend/src/heartbeat.rs
+++ b/src/frontend/src/heartbeat.rs
@@ -20,8 +20,7 @@ use common_meta::heartbeat::handler::{
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
-use common_telemetry::tracing::trace;
-use common_telemetry::{error, info};
+use common_telemetry::{debug, error, info};
use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
use snafu::ResultExt;
use tokio::sync::mpsc;
@@ -83,7 +82,7 @@ impl HeartbeatTask {
loop {
match resp_stream.message().await {
Ok(Some(resp)) => {
- trace!("Received a heartbeat response: {:?}", resp);
+ debug!("Receiving heartbeat response: {:?}", resp);
let ctx = HeartbeatResponseHandlerContext::new(mailbox.clone(), resp);
if let Err(e) = capture_self.handle_response(ctx) {
error!(e; "Error while handling heartbeat response");
@@ -92,7 +91,6 @@ impl HeartbeatTask {
Ok(None) => break,
Err(e) => {
error!(e; "Occur error while reading heartbeat response");
-
capture_self
.start_with_retry(Duration::from_secs(retry_interval))
.await;
@@ -148,7 +146,7 @@ impl HeartbeatTask {
error!(e; "Failed to send heartbeat to metasrv");
break;
} else {
- trace!("Send a heartbeat request to metasrv, content: {:?}", req);
+ debug!("Send a heartbeat request to metasrv, content: {:?}", req);
}
}
}
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index 1d0d9950338b..0da329fd14c5 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -20,7 +20,7 @@ use api::v1::meta::{
heartbeat_server, AskLeaderRequest, AskLeaderResponse, HeartbeatRequest, HeartbeatResponse,
Peer, RequestHeader, ResponseHeader, Role,
};
-use common_telemetry::{error, info, warn};
+use common_telemetry::{debug, error, info, warn};
use futures::StreamExt;
use once_cell::sync::OnceCell;
use tokio::sync::mpsc;
@@ -59,6 +59,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
break;
}
};
+ debug!("Receiving heartbeat request: {:?}", req);
if pusher_key.is_none() {
let node_id = get_node_id(header);
@@ -76,6 +77,7 @@ impl heartbeat_server::Heartbeat for MetaSrv {
is_not_leader = res.as_ref().map_or(false, |r| r.is_not_leader());
+ debug!("Sending heartbeat response: {:?}", res);
tx.send(res).await.expect("working rx");
}
Err(err) => {
|
chore
|
add debug log for heartbeat (#1770)
|
4fc173acf0267aeda2a8ddb77c03baf35d22b7b2
|
2023-05-13 20:07:47
|
zyy17
|
refactor: support layered configuration (#1535)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index d103e0181b0b..9013fb86ad7c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1518,6 +1518,7 @@ dependencies = [
"common-recordbatch",
"common-telemetry",
"common-test-util",
+ "config",
"datanode",
"either",
"frontend",
@@ -1534,6 +1535,7 @@ dependencies = [
"session",
"snafu",
"substrait 0.2.0",
+ "temp-env",
"tikv-jemalloc-ctl",
"tikv-jemallocator",
"tokio",
@@ -1878,6 +1880,25 @@ dependencies = [
"crossbeam-utils",
]
+[[package]]
+name = "config"
+version = "0.13.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7"
+dependencies = [
+ "async-trait",
+ "json5",
+ "lazy_static",
+ "nom",
+ "pathdiff",
+ "ron",
+ "rust-ini",
+ "serde",
+ "serde_json",
+ "toml",
+ "yaml-rust",
+]
+
[[package]]
name = "console"
version = "0.15.5"
@@ -4288,6 +4309,17 @@ dependencies = [
"wasm-bindgen",
]
+[[package]]
+name = "json5"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1"
+dependencies = [
+ "pest",
+ "pest_derive",
+ "serde",
+]
+
[[package]]
name = "jsonwebtoken"
version = "8.3.0"
@@ -4493,6 +4525,12 @@ dependencies = [
"cc",
]
+[[package]]
+name = "linked-hash-map"
+version = "0.5.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
+
[[package]]
name = "linux-raw-sys"
version = "0.3.4"
@@ -5750,6 +5788,12 @@ version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79"
+[[package]]
+name = "pathdiff"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd"
+
[[package]]
name = "peeking_take_while"
version = "0.1.2"
@@ -8660,6 +8704,15 @@ version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae9980cab1db3fceee2f6c6f643d5d8de2997c58ee8d25fb0cc8a9e9e7348e5"
+[[package]]
+name = "temp-env"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9547444bfe52cbd79515c6c8087d8ae6ca8d64d2d31a27746320f5cb81d1a15c"
+dependencies = [
+ "parking_lot",
+]
+
[[package]]
name = "tempfile"
version = "3.5.0"
@@ -10282,6 +10335,15 @@ dependencies = [
"lzma-sys",
]
+[[package]]
+name = "yaml-rust"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85"
+dependencies = [
+ "linked-hash-map",
+]
+
[[package]]
name = "zeroize"
version = "1.6.0"
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index ea80dde09555..1e82a0a9eef4 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -25,6 +25,7 @@ common-recordbatch = { path = "../common/recordbatch" }
common-telemetry = { path = "../common/telemetry", features = [
"deadlock_detection",
] }
+config = "0.13"
datanode = { path = "../datanode" }
either = "1.8"
frontend = { path = "../frontend" }
@@ -37,20 +38,19 @@ query = { path = "../query" }
rustyline = "10.1"
serde.workspace = true
servers = { path = "../servers" }
-
session = { path = "../session" }
snafu.workspace = true
substrait = { path = "../common/substrait" }
tikv-jemalloc-ctl = { version = "0.5", optional = true }
tikv-jemallocator = { version = "0.5", optional = true }
tokio.workspace = true
-toml = "0.5"
-
[dev-dependencies]
common-test-util = { path = "../common/test-util" }
rexpect = "0.5"
+temp-env = "0.3"
serde.workspace = true
+toml = "0.5"
[build-dependencies]
build-data = "0.1.3"
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index b3ce41450b47..2eada5261b91 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -23,7 +23,6 @@ use snafu::ResultExt;
use crate::error::{MissingConfigSnafu, Result, ShutdownDatanodeSnafu, StartDatanodeSnafu};
use crate::options::{Options, TopLevelOptions};
-use crate::toml_loader;
pub struct Instance {
datanode: Datanode,
@@ -99,15 +98,14 @@ struct StartCommand {
http_addr: Option<String>,
#[clap(long)]
http_timeout: Option<u64>,
+ #[clap(long, default_value = "GREPTIMEDB_DATANODE")]
+ env_prefix: String,
}
impl StartCommand {
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
- let mut opts: DatanodeOptions = if let Some(path) = &self.config_file {
- toml_loader::from_file!(path)?
- } else {
- DatanodeOptions::default()
- };
+ let mut opts: DatanodeOptions =
+ Options::load_layered_options(self.config_file.as_deref(), self.env_prefix.as_ref())?;
if let Some(dir) = top_level_opts.log_dir {
opts.logging.dir = dir;
@@ -116,26 +114,27 @@ impl StartCommand {
opts.logging.level = level;
}
- if let Some(addr) = self.rpc_addr.clone() {
- opts.rpc_addr = addr;
+ if let Some(addr) = &self.rpc_addr {
+ opts.rpc_addr = addr.clone();
}
if self.rpc_hostname.is_some() {
opts.rpc_hostname = self.rpc_hostname.clone();
}
- if let Some(addr) = self.mysql_addr.clone() {
- opts.mysql_addr = addr;
+ if let Some(addr) = &self.mysql_addr {
+ opts.mysql_addr = addr.clone();
}
if let Some(node_id) = self.node_id {
opts.node_id = Some(node_id);
}
- if let Some(meta_addr) = self.metasrv_addr.clone() {
+ if let Some(meta_addr) = &self.metasrv_addr {
opts.meta_client_options
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = meta_addr
+ .clone()
.split(',')
.map(&str::trim)
.map(&str::to_string)
@@ -150,16 +149,20 @@ impl StartCommand {
.fail();
}
- if let Some(data_dir) = self.data_dir.clone() {
- opts.storage.store = ObjectStoreConfig::File(FileConfig { data_dir });
+ if let Some(data_dir) = &self.data_dir {
+ opts.storage.store = ObjectStoreConfig::File(FileConfig {
+ data_dir: data_dir.clone(),
+ });
}
- if let Some(wal_dir) = self.wal_dir.clone() {
- opts.wal.dir = wal_dir;
+ if let Some(wal_dir) = &self.wal_dir {
+ opts.wal.dir = wal_dir.clone();
}
- if let Some(http_addr) = self.http_addr.clone() {
- opts.http_opts.addr = http_addr
+
+ if let Some(http_addr) = &self.http_addr {
+ opts.http_opts.addr = http_addr.clone();
}
+
if let Some(http_timeout) = self.http_timeout {
opts.http_opts.timeout = Duration::from_secs(http_timeout)
}
@@ -191,6 +194,7 @@ mod tests {
use servers::Mode;
use super::*;
+ use crate::options::ENV_VAR_SEP;
#[test]
fn test_read_from_config_file() {
@@ -350,4 +354,110 @@ mod tests {
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
assert_eq!("debug", logging_opt.level);
}
+
+ #[test]
+ fn test_config_precedence_order() {
+ let mut file = create_named_temp_file();
+ let toml_str = r#"
+ mode = "distributed"
+ enable_memory_catalog = false
+ node_id = 42
+ rpc_addr = "127.0.0.1:3001"
+ rpc_hostname = "127.0.0.1"
+ rpc_runtime_size = 8
+ mysql_addr = "127.0.0.1:4406"
+ mysql_runtime_size = 2
+
+ [meta_client_options]
+ metasrv_addrs = ["127.0.0.1:3002"]
+ timeout_millis = 3000
+ connect_timeout_millis = 5000
+ tcp_nodelay = true
+
+ [wal]
+ dir = "/tmp/greptimedb/wal"
+ file_size = "1GB"
+ purge_threshold = "50GB"
+ purge_interval = "10m"
+ read_batch_size = 128
+ sync_write = false
+
+ [storage]
+ type = "File"
+ data_dir = "/tmp/greptimedb/data/"
+
+ [storage.compaction]
+ max_inflight_tasks = 3
+ max_files_in_level0 = 7
+ max_purge_tasks = 32
+
+ [storage.manifest]
+ checkpoint_on_startup = true
+
+ [logging]
+ level = "debug"
+ dir = "/tmp/greptimedb/test/logs"
+ "#;
+ write!(file, "{}", toml_str).unwrap();
+
+ let env_prefix = "DATANODE_UT";
+ temp_env::with_vars(
+ vec![
+ (
+ // storage.manifest.gc_duration = 9s
+ vec![
+ env_prefix.to_string(),
+ "storage".to_uppercase(),
+ "manifest".to_uppercase(),
+ "gc_duration".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("9s"),
+ ),
+ (
+ // storage.compaction.max_purge_tasks = 99
+ vec![
+ env_prefix.to_string(),
+ "storage".to_uppercase(),
+ "compaction".to_uppercase(),
+ "max_purge_tasks".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("99"),
+ ),
+ ],
+ || {
+ let command = StartCommand {
+ config_file: Some(file.path().to_str().unwrap().to_string()),
+ wal_dir: Some("/other/wal/dir".to_string()),
+ env_prefix: env_prefix.to_string(),
+ ..Default::default()
+ };
+
+ let Options::Datanode(opts) =
+ command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
+
+ // Should be read from env, env > default values.
+ assert_eq!(
+ opts.storage.manifest.gc_duration,
+ Some(Duration::from_secs(9))
+ );
+
+ // Should be read from config file, config file > env > default values.
+ assert_eq!(opts.storage.compaction.max_purge_tasks, 32);
+
+ // Should be read from cli, cli > config file > env > default values.
+ assert_eq!(opts.wal.dir, "/other/wal/dir");
+
+ // Should be default value.
+ assert_eq!(
+ opts.storage.manifest.checkpoint_margin,
+ DatanodeOptions::default()
+ .storage
+ .manifest
+ .checkpoint_margin
+ );
+ },
+ );
+ }
}
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index f680a33c7e7e..1a11dddae81a 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -15,6 +15,7 @@
use std::any::Any;
use common_error::prelude::*;
+use config::ConfigError;
use rustyline::error::ReadlineError;
use snafu::Location;
@@ -70,12 +71,6 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Failed to parse config, source: {}", source))]
- ParseConfig {
- source: toml::de::Error,
- location: Location,
- },
-
#[snafu(display("Missing config, msg: {}", msg))]
MissingConfig { msg: String, location: Location },
@@ -153,6 +148,12 @@ pub enum Error {
#[snafu(backtrace)]
source: substrait::error::Error,
},
+
+ #[snafu(display("Failed to load layered config, source: {}", source))]
+ LoadLayeredConfig {
+ source: ConfigError,
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -168,13 +169,12 @@ impl ErrorExt for Error {
Error::ShutdownMetaServer { source } => source.status_code(),
Error::BuildMetaServer { source } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
- Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
- StatusCode::InvalidArguments
- }
- Error::IllegalConfig { .. } | Error::InvalidReplCommand { .. } => {
- StatusCode::InvalidArguments
- }
- Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
+ Error::ReadConfig { .. }
+ | Error::MissingConfig { .. }
+ | Error::LoadLayeredConfig { .. }
+ | Error::IllegalConfig { .. }
+ | Error::InvalidReplCommand { .. }
+ | Error::IllegalAuthConfig { .. } => StatusCode::InvalidArguments,
Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal,
Error::RequestDatabase { source, .. } => source.status_code(),
Error::CollectRecordBatches { source } | Error::PrettyPrintRecordBatches { source } => {
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index ef58f0decac0..a082a2ab1fea 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -17,12 +17,8 @@ use std::sync::Arc;
use clap::Parser;
use common_base::Plugins;
use frontend::frontend::FrontendOptions;
-use frontend::grpc::GrpcOptions;
use frontend::influxdb::InfluxdbOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
-use frontend::mysql::MysqlOptions;
-use frontend::opentsdb::OpentsdbOptions;
-use frontend::postgres::PostgresOptions;
use frontend::prom::PromOptions;
use meta_client::MetaClientOptions;
use servers::auth::UserProviderRef;
@@ -32,7 +28,6 @@ use snafu::ResultExt;
use crate::error::{self, IllegalAuthConfigSnafu, Result};
use crate::options::{Options, TopLevelOptions};
-use crate::toml_loader;
pub struct Instance {
frontend: FeInstance,
@@ -89,7 +84,7 @@ impl SubCommand {
}
}
-#[derive(Debug, Parser)]
+#[derive(Debug, Default, Parser)]
pub struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
@@ -119,15 +114,14 @@ pub struct StartCommand {
user_provider: Option<String>,
#[clap(long)]
disable_dashboard: Option<bool>,
+ #[clap(long, default_value = "GREPTIMEDB_FRONTEND")]
+ env_prefix: String,
}
impl StartCommand {
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
- let mut opts: FrontendOptions = if let Some(path) = &self.config_file {
- toml_loader::from_file!(path)?
- } else {
- FrontendOptions::default()
- };
+ let mut opts: FrontendOptions =
+ Options::load_layered_options(self.config_file.as_deref(), self.env_prefix.as_ref())?;
if let Some(dir) = top_level_opts.log_dir {
opts.logging.dir = dir;
@@ -136,14 +130,16 @@ impl StartCommand {
opts.logging.level = level;
}
- let tls_option = TlsOption::new(
+ let tls_opts = TlsOption::new(
self.tls_mode.clone(),
self.tls_cert_path.clone(),
self.tls_key_path.clone(),
);
- if let Some(addr) = self.http_addr.clone() {
- opts.http_options.get_or_insert_with(Default::default).addr = addr;
+ if let Some(addr) = &self.http_addr {
+ if let Some(http_opts) = &mut opts.http_options {
+ http_opts.addr = addr.clone()
+ }
}
if let Some(disable_dashboard) = self.disable_dashboard {
@@ -152,43 +148,45 @@ impl StartCommand {
.disable_dashboard = disable_dashboard;
}
- if let Some(addr) = self.grpc_addr.clone() {
- opts.grpc_options = Some(GrpcOptions {
- addr,
- ..Default::default()
- });
+ if let Some(addr) = &self.grpc_addr {
+ if let Some(grpc_opts) = &mut opts.grpc_options {
+ grpc_opts.addr = addr.clone()
+ }
}
- if let Some(addr) = self.mysql_addr.clone() {
- opts.mysql_options = Some(MysqlOptions {
- addr,
- tls: tls_option.clone(),
- ..Default::default()
- });
+ if let Some(addr) = &self.mysql_addr {
+ if let Some(mysql_opts) = &mut opts.mysql_options {
+ mysql_opts.addr = addr.clone();
+ mysql_opts.tls = tls_opts.clone();
+ }
}
- if let Some(addr) = self.prom_addr.clone() {
- opts.prom_options = Some(PromOptions { addr });
+
+ if let Some(addr) = &self.prom_addr {
+ opts.prom_options = Some(PromOptions { addr: addr.clone() });
}
- if let Some(addr) = self.postgres_addr.clone() {
- opts.postgres_options = Some(PostgresOptions {
- addr,
- tls: tls_option,
- ..Default::default()
- });
+
+ if let Some(addr) = &self.postgres_addr {
+ if let Some(postgres_opts) = &mut opts.postgres_options {
+ postgres_opts.addr = addr.clone();
+ postgres_opts.tls = tls_opts;
+ }
}
- if let Some(addr) = self.opentsdb_addr.clone() {
- opts.opentsdb_options = Some(OpentsdbOptions {
- addr,
- ..Default::default()
- });
+
+ if let Some(addr) = &self.opentsdb_addr {
+ if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
+ opentsdb_addr.addr = addr.clone();
+ }
}
+
if let Some(enable) = self.influxdb_enable {
opts.influxdb_options = Some(InfluxdbOptions { enable });
}
- if let Some(metasrv_addr) = self.metasrv_addr.clone() {
+
+ if let Some(metasrv_addr) = &self.metasrv_addr {
opts.meta_client_options
.get_or_insert_with(MetaClientOptions::default)
.metasrv_addrs = metasrv_addr
+ .clone()
.split(',')
.map(&str::trim)
.map(&str::to_string)
@@ -231,27 +229,23 @@ mod tests {
use std::time::Duration;
use common_test_util::temp_dir::create_named_temp_file;
+ use frontend::grpc::GrpcOptions;
use servers::auth::{Identity, Password, UserProviderRef};
use super::*;
+ use crate::options::ENV_VAR_SEP;
#[test]
fn test_try_from_start_command() {
let command = StartCommand {
http_addr: Some("127.0.0.1:1234".to_string()),
- grpc_addr: None,
prom_addr: Some("127.0.0.1:4444".to_string()),
mysql_addr: Some("127.0.0.1:5678".to_string()),
postgres_addr: Some("127.0.0.1:5432".to_string()),
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
influxdb_enable: Some(false),
- config_file: None,
- metasrv_addr: None,
- tls_mode: None,
- tls_cert_path: None,
- tls_key_path: None,
- user_provider: None,
disable_dashboard: Some(false),
+ ..Default::default()
};
let Options::Frontend(opts) =
@@ -307,20 +301,9 @@ mod tests {
write!(file, "{}", toml_str).unwrap();
let command = StartCommand {
- http_addr: None,
- grpc_addr: None,
- mysql_addr: None,
- prom_addr: None,
- postgres_addr: None,
- opentsdb_addr: None,
- influxdb_enable: None,
config_file: Some(file.path().to_str().unwrap().to_string()),
- metasrv_addr: None,
- tls_mode: None,
- tls_cert_path: None,
- tls_key_path: None,
- user_provider: None,
disable_dashboard: Some(false),
+ ..Default::default()
};
let Options::Frontend(fe_opts) =
@@ -342,20 +325,9 @@ mod tests {
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
- http_addr: None,
- grpc_addr: None,
- mysql_addr: None,
- prom_addr: None,
- postgres_addr: None,
- opentsdb_addr: None,
- influxdb_enable: None,
- config_file: None,
- metasrv_addr: None,
- tls_mode: None,
- tls_cert_path: None,
- tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
disable_dashboard: Some(false),
+ ..Default::default()
};
let plugins = load_frontend_plugins(&command.user_provider);
@@ -377,20 +349,8 @@ mod tests {
#[test]
fn test_top_level_options() {
let cmd = StartCommand {
- http_addr: None,
- grpc_addr: None,
- mysql_addr: None,
- prom_addr: None,
- postgres_addr: None,
- opentsdb_addr: None,
- influxdb_enable: None,
- config_file: None,
- metasrv_addr: None,
- tls_mode: None,
- tls_cert_path: None,
- tls_key_path: None,
- user_provider: None,
disable_dashboard: Some(false),
+ ..Default::default()
};
let options = cmd
@@ -404,4 +364,91 @@ mod tests {
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
assert_eq!("debug", logging_opt.level);
}
+
+ #[test]
+ fn test_config_precedence_order() {
+ let mut file = create_named_temp_file();
+ let toml_str = r#"
+ mode = "distributed"
+
+ [http_options]
+ addr = "127.0.0.1:4000"
+
+ [mysql_options]
+ addr = "127.0.0.1:4002"
+ "#;
+ write!(file, "{}", toml_str).unwrap();
+
+ let env_prefix = "FRONTEND_UT";
+ temp_env::with_vars(
+ vec![
+ (
+ // mysql_options.addr = 127.0.0.1:14002
+ vec![
+ env_prefix.to_string(),
+ "mysql_options".to_uppercase(),
+ "addr".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("127.0.0.1:14002"),
+ ),
+ (
+ // mysql_options.runtime_size = 11
+ vec![
+ env_prefix.to_string(),
+ "mysql_options".to_uppercase(),
+ "runtime_size".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("11"),
+ ),
+ (
+ // http_options.addr = 127.0.0.1:24000
+ vec![
+ env_prefix.to_string(),
+ "http_options".to_uppercase(),
+ "addr".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("127.0.0.1:24000"),
+ ),
+ ],
+ || {
+ let command = StartCommand {
+ config_file: Some(file.path().to_str().unwrap().to_string()),
+ http_addr: Some("127.0.0.1:14000".to_string()),
+ env_prefix: env_prefix.to_string(),
+ ..Default::default()
+ };
+
+ let top_level_opts = TopLevelOptions {
+ log_dir: None,
+ log_level: Some("error".to_string()),
+ };
+ let Options::Frontend(fe_opts) =
+ command.load_options(top_level_opts).unwrap() else {unreachable!()};
+
+ // Should be read from env, env > default values.
+ assert_eq!(fe_opts.mysql_options.as_ref().unwrap().runtime_size, 11);
+
+ // Should be read from config file, config file > env > default values.
+ assert_eq!(
+ fe_opts.mysql_options.as_ref().unwrap().addr,
+ "127.0.0.1:4002"
+ );
+
+ // Should be read from cli, cli > config file > env > default values.
+ assert_eq!(
+ fe_opts.http_options.as_ref().unwrap().addr,
+ "127.0.0.1:14000"
+ );
+
+ // Should be default value.
+ assert_eq!(
+ fe_opts.grpc_options.as_ref().unwrap().addr,
+ GrpcOptions::default().addr
+ );
+ },
+ );
+ }
}
diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs
index df2a46199b07..b10af430954e 100644
--- a/src/cmd/src/lib.rs
+++ b/src/cmd/src/lib.rs
@@ -21,4 +21,3 @@ pub mod frontend;
pub mod metasrv;
pub mod options;
pub mod standalone;
-mod toml_loader;
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index bc60c8da6442..59edf69b1a69 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -20,9 +20,8 @@ use meta_srv::bootstrap::MetaSrvInstance;
use meta_srv::metasrv::MetaSrvOptions;
use snafu::ResultExt;
-use crate::error::Result;
+use crate::error::{self, Result};
use crate::options::{Options, TopLevelOptions};
-use crate::{error, toml_loader};
pub struct Instance {
instance: MetaSrvInstance,
@@ -79,7 +78,7 @@ impl SubCommand {
}
}
-#[derive(Debug, Parser)]
+#[derive(Debug, Default, Parser)]
struct StartCommand {
#[clap(long)]
bind_addr: Option<String>,
@@ -97,15 +96,14 @@ struct StartCommand {
http_addr: Option<String>,
#[clap(long)]
http_timeout: Option<u64>,
+ #[clap(long, default_value = "GREPTIMEDB_METASRV")]
+ env_prefix: String,
}
impl StartCommand {
fn load_options(&self, top_level_opts: TopLevelOptions) -> Result<Options> {
- let mut opts: MetaSrvOptions = if let Some(path) = &self.config_file {
- toml_loader::from_file!(path)?
- } else {
- MetaSrvOptions::default()
- };
+ let mut opts: MetaSrvOptions =
+ Options::load_layered_options(self.config_file.as_deref(), self.env_prefix.as_ref())?;
if let Some(dir) = top_level_opts.log_dir {
opts.logging.dir = dir;
@@ -114,15 +112,18 @@ impl StartCommand {
opts.logging.level = level;
}
- if let Some(addr) = self.bind_addr.clone() {
- opts.bind_addr = addr;
+ if let Some(addr) = &self.bind_addr {
+ opts.bind_addr = addr.clone();
}
- if let Some(addr) = self.server_addr.clone() {
- opts.server_addr = addr;
+
+ if let Some(addr) = &self.server_addr {
+ opts.server_addr = addr.clone();
}
- if let Some(addr) = self.store_addr.clone() {
- opts.store_addr = addr;
+
+ if let Some(addr) = &self.store_addr {
+ opts.store_addr = addr.clone();
}
+
if let Some(selector_type) = &self.selector {
opts.selector = selector_type[..]
.try_into()
@@ -133,9 +134,10 @@ impl StartCommand {
opts.use_memory_store = true;
}
- if let Some(http_addr) = self.http_addr.clone() {
- opts.http_opts.addr = http_addr;
+ if let Some(http_addr) = &self.http_addr {
+ opts.http_opts.addr = http_addr.clone();
}
+
if let Some(http_timeout) = self.http_timeout {
opts.http_opts.timeout = Duration::from_secs(http_timeout);
}
@@ -167,6 +169,7 @@ mod tests {
use meta_srv::selector::SelectorType;
use super::*;
+ use crate::options::ENV_VAR_SEP;
#[test]
fn test_read_from_cmd() {
@@ -174,11 +177,8 @@ mod tests {
bind_addr: Some("127.0.0.1:3002".to_string()),
server_addr: Some("127.0.0.1:3002".to_string()),
store_addr: Some("127.0.0.1:2380".to_string()),
- config_file: None,
selector: Some("LoadBased".to_string()),
- use_memory_store: false,
- http_addr: None,
- http_timeout: None,
+ ..Default::default()
};
let Options::Metasrv(options) =
@@ -206,14 +206,8 @@ mod tests {
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
- bind_addr: None,
- server_addr: None,
- store_addr: None,
- selector: None,
config_file: Some(file.path().to_str().unwrap().to_string()),
- use_memory_store: false,
- http_addr: None,
- http_timeout: None,
+ ..Default::default()
};
let Options::Metasrv(options) =
@@ -233,11 +227,8 @@ mod tests {
bind_addr: Some("127.0.0.1:3002".to_string()),
server_addr: Some("127.0.0.1:3002".to_string()),
store_addr: Some("127.0.0.1:2380".to_string()),
- config_file: None,
selector: Some("LoadBased".to_string()),
- use_memory_store: false,
- http_addr: None,
- http_timeout: None,
+ ..Default::default()
};
let options = cmd
@@ -251,4 +242,72 @@ mod tests {
assert_eq!("/tmp/greptimedb/test/logs", logging_opt.dir);
assert_eq!("debug", logging_opt.level);
}
+
+ #[test]
+ fn test_config_precedence_order() {
+ let mut file = create_named_temp_file();
+ let toml_str = r#"
+ server_addr = "127.0.0.1:3002"
+ datanode_lease_secs = 15
+ selector = "LeaseBased"
+ use_memory_store = false
+
+ [http_options]
+ addr = "127.0.0.1:4000"
+
+ [logging]
+ level = "debug"
+ dir = "/tmp/greptimedb/test/logs"
+ "#;
+ write!(file, "{}", toml_str).unwrap();
+
+ let env_prefix = "METASRV_UT";
+ temp_env::with_vars(
+ vec![
+ (
+ // bind_addr = 127.0.0.1:14002
+ vec![env_prefix.to_string(), "bind_addr".to_uppercase()].join(ENV_VAR_SEP),
+ Some("127.0.0.1:14002"),
+ ),
+ (
+ // server_addr = 127.0.0.1:13002
+ vec![env_prefix.to_string(), "server_addr".to_uppercase()].join(ENV_VAR_SEP),
+ Some("127.0.0.1:13002"),
+ ),
+ (
+ // http_options.addr = 127.0.0.1:24000
+ vec![
+ env_prefix.to_string(),
+ "http_options".to_uppercase(),
+ "addr".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("127.0.0.1:24000"),
+ ),
+ ],
+ || {
+ let command = StartCommand {
+ http_addr: Some("127.0.0.1:14000".to_string()),
+ config_file: Some(file.path().to_str().unwrap().to_string()),
+ env_prefix: env_prefix.to_string(),
+ ..Default::default()
+ };
+
+ let Options::Metasrv(opts) =
+ command.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
+
+ // Should be read from env, env > default values.
+ assert_eq!(opts.bind_addr, "127.0.0.1:14002");
+
+ // Should be read from config file, config file > env > default values.
+ assert_eq!(opts.server_addr, "127.0.0.1:3002");
+
+ // Should be read from cli, cli > config file > env > default values.
+ assert_eq!(opts.http_opts.addr, "127.0.0.1:14000");
+
+ // Should be default value.
+ assert_eq!(opts.store_addr, "127.0.0.1:2379");
+ },
+ );
+ }
}
diff --git a/src/cmd/src/options.rs b/src/cmd/src/options.rs
index eeec11f2aaac..595cf099b934 100644
--- a/src/cmd/src/options.rs
+++ b/src/cmd/src/options.rs
@@ -11,10 +11,18 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+
use common_telemetry::logging::LoggingOptions;
+use config::{Config, Environment, File, FileFormat};
use datanode::datanode::DatanodeOptions;
use frontend::frontend::FrontendOptions;
use meta_srv::metasrv::MetaSrvOptions;
+use serde::{Deserialize, Serialize};
+use snafu::ResultExt;
+
+use crate::error::{LoadLayeredConfigSnafu, Result};
+
+pub const ENV_VAR_SEP: &str = "__";
pub struct MixOptions {
pub fe_opts: FrontendOptions,
@@ -30,6 +38,12 @@ pub enum Options {
Cli(Box<LoggingOptions>),
}
+#[derive(Clone, Debug, Default)]
+pub struct TopLevelOptions {
+ pub log_dir: Option<String>,
+ pub log_level: Option<String>,
+}
+
impl Options {
pub fn logging_options(&self) -> &LoggingOptions {
match self {
@@ -40,10 +54,190 @@ impl Options {
Options::Cli(opts) => opts,
}
}
+
+ /// Load the configuration from multiple sources and merge them.
+ /// The precedence order is: config file > environment variables > default values.
+ /// `env_prefix` is the prefix of environment variables, e.g. "FRONTEND__xxx".
+ /// The function will use dunder(double underscore) `__` as the separator for environment variables, for example:
+ /// `DATANODE__STORAGE__MANIFEST__CHECKPOINT_MARGIN` will be mapped to `DatanodeOptions.storage.manifest.checkpoint_margin` field in the configuration.
+ pub fn load_layered_options<'de, T: Serialize + Deserialize<'de> + Default>(
+ config_file: Option<&str>,
+ env_prefix: &str,
+ ) -> Result<T> {
+ let default_opts = T::default();
+
+ let env_source = {
+ let mut env = Environment::default();
+
+ if !env_prefix.is_empty() {
+ env = env.prefix(env_prefix);
+ }
+
+ env.try_parsing(true)
+ .separator(ENV_VAR_SEP)
+ .ignore_empty(true)
+ };
+
+ // Add default values and environment variables as the sources of the configuration.
+ let mut layered_config = Config::builder()
+ .add_source(Config::try_from(&default_opts).context(LoadLayeredConfigSnafu)?)
+ .add_source(env_source);
+
+ // Add config file as the source of the configuration if it is specified.
+ if let Some(config_file) = config_file {
+ layered_config = layered_config.add_source(File::new(config_file, FileFormat::Toml));
+ }
+
+ let opts = layered_config
+ .build()
+ .context(LoadLayeredConfigSnafu)?
+ .try_deserialize()
+ .context(LoadLayeredConfigSnafu)?;
+
+ Ok(opts)
+ }
}
-#[derive(Clone, Debug, Default)]
-pub struct TopLevelOptions {
- pub log_dir: Option<String>,
- pub log_level: Option<String>,
+#[cfg(test)]
+mod tests {
+ use std::io::Write;
+ use std::time::Duration;
+
+ use common_test_util::temp_dir::create_named_temp_file;
+ use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
+
+ use super::*;
+
+ #[test]
+ fn test_load_layered_options() {
+ let mut file = create_named_temp_file();
+ let toml_str = r#"
+ mode = "distributed"
+ enable_memory_catalog = false
+ rpc_addr = "127.0.0.1:3001"
+ rpc_hostname = "127.0.0.1"
+ rpc_runtime_size = 8
+ mysql_addr = "127.0.0.1:4406"
+ mysql_runtime_size = 2
+
+ [meta_client_options]
+ metasrv_addrs = ["127.0.0.1:3002"]
+ timeout_millis = 3000
+ connect_timeout_millis = 5000
+ tcp_nodelay = true
+
+ [wal]
+ dir = "/tmp/greptimedb/wal"
+ file_size = "1GB"
+ purge_threshold = "50GB"
+ purge_interval = "10m"
+ read_batch_size = 128
+ sync_write = false
+
+ [storage.compaction]
+ max_inflight_tasks = 3
+ max_files_in_level0 = 7
+ max_purge_tasks = 32
+
+ [logging]
+ level = "debug"
+ dir = "/tmp/greptimedb/test/logs"
+ "#;
+ write!(file, "{}", toml_str).unwrap();
+
+ let env_prefix = "DATANODE_UT";
+ temp_env::with_vars(
+ // The following environment variables will be used to override the values in the config file.
+ vec![
+ (
+ // storage.manifest.checkpoint_margin = 99
+ vec![
+ env_prefix.to_string(),
+ "storage".to_uppercase(),
+ "manifest".to_uppercase(),
+ "checkpoint_margin".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("99"),
+ ),
+ (
+ // storage.type = S3
+ vec![
+ env_prefix.to_string(),
+ "storage".to_uppercase(),
+ "type".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("S3"),
+ ),
+ (
+ // storage.bucket = mybucket
+ vec![
+ env_prefix.to_string(),
+ "storage".to_uppercase(),
+ "bucket".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("mybucket"),
+ ),
+ (
+ // storage.manifest.gc_duration = 42s
+ vec![
+ env_prefix.to_string(),
+ "storage".to_uppercase(),
+ "manifest".to_uppercase(),
+ "gc_duration".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("42s"),
+ ),
+ (
+ // storage.manifest.checkpoint_on_startup = true
+ vec![
+ env_prefix.to_string(),
+ "storage".to_uppercase(),
+ "manifest".to_uppercase(),
+ "checkpoint_on_startup".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("true"),
+ ),
+ (
+ // wal.dir = /other/wal/dir
+ vec![
+ env_prefix.to_string(),
+ "wal".to_uppercase(),
+ "dir".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("/other/wal/dir"),
+ ),
+ ],
+ || {
+ let opts: DatanodeOptions =
+ Options::load_layered_options(Some(file.path().to_str().unwrap()), env_prefix)
+ .unwrap();
+
+ // Check the configs from environment variables.
+ assert_eq!(opts.storage.manifest.checkpoint_margin, Some(99));
+ match opts.storage.store {
+ ObjectStoreConfig::S3(s3_config) => {
+ assert_eq!(s3_config.bucket, "mybucket".to_string());
+ }
+ _ => panic!("unexpected store type"),
+ }
+ assert_eq!(
+ opts.storage.manifest.gc_duration,
+ Some(Duration::from_secs(42))
+ );
+ assert!(opts.storage.manifest.checkpoint_on_startup);
+
+ // Should be the values from config file, not environment variables.
+ assert_eq!(opts.wal.dir, "/tmp/greptimedb/wal".to_string());
+
+ // Should be default values.
+ assert_eq!(opts.node_id, None);
+ },
+ );
+ }
}
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index e29db68c7568..7016e0c5ad8a 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -41,7 +41,6 @@ use crate::error::{
};
use crate::frontend::load_frontend_plugins;
use crate::options::{MixOptions, Options, TopLevelOptions};
-use crate::toml_loader;
#[derive(Parser)]
pub struct Command {
@@ -184,7 +183,7 @@ impl Instance {
}
}
-#[derive(Debug, Parser)]
+#[derive(Debug, Default, Parser)]
struct StartCommand {
#[clap(long)]
http_addr: Option<String>,
@@ -212,43 +211,42 @@ struct StartCommand {
tls_key_path: Option<String>,
#[clap(long)]
user_provider: Option<String>,
+ #[clap(long, default_value = "GREPTIMEDB_STANDALONE")]
+ env_prefix: String,
}
impl StartCommand {
fn load_options(&self, top_level_options: TopLevelOptions) -> Result<Options> {
- let enable_memory_catalog = self.enable_memory_catalog;
- let config_file = &self.config_file;
- let mut opts: StandaloneOptions = if let Some(path) = config_file {
- toml_loader::from_file!(path)?
- } else {
- StandaloneOptions::default()
- };
+ let mut opts: StandaloneOptions =
+ Options::load_layered_options(self.config_file.as_deref(), self.env_prefix.as_ref())?;
- opts.enable_memory_catalog = enable_memory_catalog;
+ opts.enable_memory_catalog = self.enable_memory_catalog;
- let mut fe_opts = opts.clone().frontend_options();
- let mut logging = opts.logging.clone();
- let dn_opts = opts.datanode_options();
+ opts.mode = Mode::Standalone;
if let Some(dir) = top_level_options.log_dir {
- logging.dir = dir;
+ opts.logging.dir = dir;
}
if let Some(level) = top_level_options.log_level {
- logging.level = level;
+ opts.logging.level = level;
}
- fe_opts.mode = Mode::Standalone;
+ let tls_opts = TlsOption::new(
+ self.tls_mode.clone(),
+ self.tls_cert_path.clone(),
+ self.tls_key_path.clone(),
+ );
- if let Some(addr) = self.http_addr.clone() {
- fe_opts.http_options = Some(HttpOptions {
- addr,
- ..Default::default()
- });
+ if let Some(addr) = &self.http_addr {
+ if let Some(http_opts) = &mut opts.http_options {
+ http_opts.addr = addr.clone()
+ }
}
- if let Some(addr) = self.rpc_addr.clone() {
+
+ if let Some(addr) = &self.rpc_addr {
// frontend grpc addr conflict with datanode default grpc addr
let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
- if addr == datanode_grpc_addr {
+ if addr.eq(&datanode_grpc_addr) {
return IllegalConfigSnafu {
msg: format!(
"gRPC listen address conflicts with datanode reserved gRPC addr: {datanode_grpc_addr}",
@@ -256,56 +254,42 @@ impl StartCommand {
}
.fail();
}
- fe_opts.grpc_options = Some(GrpcOptions {
- addr,
- ..Default::default()
- });
+ if let Some(grpc_opts) = &mut opts.grpc_options {
+ grpc_opts.addr = addr.clone()
+ }
}
- if let Some(addr) = self.mysql_addr.clone() {
- fe_opts.mysql_options = Some(MysqlOptions {
- addr,
- ..Default::default()
- })
+ if let Some(addr) = &self.mysql_addr {
+ if let Some(mysql_opts) = &mut opts.mysql_options {
+ mysql_opts.addr = addr.clone();
+ mysql_opts.tls = tls_opts.clone();
+ }
}
- if let Some(addr) = self.prom_addr.clone() {
- fe_opts.prom_options = Some(PromOptions { addr })
+ if let Some(addr) = &self.prom_addr {
+ opts.prom_options = Some(PromOptions { addr: addr.clone() })
}
- if let Some(addr) = self.postgres_addr.clone() {
- fe_opts.postgres_options = Some(PostgresOptions {
- addr,
- ..Default::default()
- })
+ if let Some(addr) = &self.postgres_addr {
+ if let Some(postgres_opts) = &mut opts.postgres_options {
+ postgres_opts.addr = addr.clone();
+ postgres_opts.tls = tls_opts;
+ }
}
- if let Some(addr) = self.opentsdb_addr.clone() {
- fe_opts.opentsdb_options = Some(OpentsdbOptions {
- addr,
- ..Default::default()
- });
+ if let Some(addr) = &self.opentsdb_addr {
+ if let Some(opentsdb_addr) = &mut opts.opentsdb_options {
+ opentsdb_addr.addr = addr.clone();
+ }
}
if self.influxdb_enable {
- fe_opts.influxdb_options = Some(InfluxdbOptions { enable: true });
- }
-
- let tls_option = TlsOption::new(
- self.tls_mode.clone(),
- self.tls_cert_path.clone(),
- self.tls_key_path.clone(),
- );
-
- if let Some(mut mysql_options) = fe_opts.mysql_options {
- mysql_options.tls = tls_option.clone();
- fe_opts.mysql_options = Some(mysql_options);
+ opts.influxdb_options = Some(InfluxdbOptions { enable: true });
}
- if let Some(mut postgres_options) = fe_opts.postgres_options {
- postgres_options.tls = tls_option;
- fe_opts.postgres_options = Some(postgres_options);
- }
+ let fe_opts = opts.clone().frontend_options();
+ let logging = opts.logging.clone();
+ let dn_opts = opts.datanode_options();
Ok(Options::Standalone(Box::new(MixOptions {
fe_opts,
@@ -351,6 +335,7 @@ async fn build_frontend(
#[cfg(test)]
mod tests {
+ use std::default::Default;
use std::io::Write;
use std::time::Duration;
@@ -359,23 +344,13 @@ mod tests {
use servers::Mode;
use super::*;
+ use crate::options::ENV_VAR_SEP;
#[tokio::test]
async fn test_try_from_start_command_to_anymap() {
let command = StartCommand {
- http_addr: None,
- rpc_addr: None,
- prom_addr: None,
- mysql_addr: None,
- postgres_addr: None,
- opentsdb_addr: None,
- config_file: None,
- influxdb_enable: false,
- enable_memory_catalog: false,
- tls_mode: None,
- tls_cert_path: None,
- tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
+ ..Default::default()
};
let plugins = load_frontend_plugins(&command.user_provider);
@@ -441,19 +416,9 @@ mod tests {
"#;
write!(file, "{}", toml_str).unwrap();
let cmd = StartCommand {
- http_addr: None,
- rpc_addr: None,
- prom_addr: None,
- mysql_addr: None,
- postgres_addr: None,
- opentsdb_addr: None,
config_file: Some(file.path().to_str().unwrap().to_string()),
- influxdb_enable: false,
- enable_memory_catalog: false,
- tls_mode: None,
- tls_cert_path: None,
- tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
+ ..Default::default()
};
let Options::Standalone(options) = cmd.load_options(TopLevelOptions::default()).unwrap() else {unreachable!()};
@@ -504,19 +469,8 @@ mod tests {
#[test]
fn test_top_level_options() {
let cmd = StartCommand {
- http_addr: None,
- rpc_addr: None,
- prom_addr: None,
- mysql_addr: None,
- postgres_addr: None,
- opentsdb_addr: None,
- config_file: None,
- influxdb_enable: false,
- enable_memory_catalog: false,
- tls_mode: None,
- tls_cert_path: None,
- tls_key_path: None,
user_provider: Some("static_user_provider:cmd:test=test".to_string()),
+ ..Default::default()
};
let Options::Standalone(opts) = cmd
@@ -531,4 +485,88 @@ mod tests {
assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
assert_eq!("debug", opts.logging.level);
}
+
+ #[test]
+ fn test_config_precedence_order() {
+ let mut file = create_named_temp_file();
+ let toml_str = r#"
+ mode = "standalone"
+
+ [http_options]
+ addr = "127.0.0.1:4000"
+
+ [logging]
+ level = "debug"
+ "#;
+ write!(file, "{}", toml_str).unwrap();
+
+ let env_prefix = "STANDALONE_UT";
+ temp_env::with_vars(
+ vec![
+ (
+ // logging.dir = /other/log/dir
+ vec![
+ env_prefix.to_string(),
+ "logging".to_uppercase(),
+ "dir".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("/other/log/dir"),
+ ),
+ (
+ // logging.level = info
+ vec![
+ env_prefix.to_string(),
+ "logging".to_uppercase(),
+ "level".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("info"),
+ ),
+ (
+ // http_options.addr = 127.0.0.1:24000
+ vec![
+ env_prefix.to_string(),
+ "http_options".to_uppercase(),
+ "addr".to_uppercase(),
+ ]
+ .join(ENV_VAR_SEP),
+ Some("127.0.0.1:24000"),
+ ),
+ ],
+ || {
+ let command = StartCommand {
+ config_file: Some(file.path().to_str().unwrap().to_string()),
+ http_addr: Some("127.0.0.1:14000".to_string()),
+ env_prefix: env_prefix.to_string(),
+ ..Default::default()
+ };
+
+ let top_level_opts = TopLevelOptions {
+ log_dir: None,
+ log_level: None,
+ };
+ let Options::Standalone(opts) =
+ command.load_options(top_level_opts).unwrap() else {unreachable!()};
+
+ // Should be read from env, env > default values.
+ assert_eq!(opts.logging.dir, "/other/log/dir");
+
+ // Should be read from config file, config file > env > default values.
+ assert_eq!(opts.logging.level, "debug");
+
+ // Should be read from cli, cli > config file > env > default values.
+ assert_eq!(
+ opts.fe_opts.http_options.as_ref().unwrap().addr,
+ "127.0.0.1:14000"
+ );
+
+ // Should be default value.
+ assert_eq!(
+ opts.fe_opts.grpc_options.unwrap().addr,
+ GrpcOptions::default().addr
+ );
+ },
+ );
+ }
}
diff --git a/src/cmd/src/toml_loader.rs b/src/cmd/src/toml_loader.rs
deleted file mode 100644
index 9d689e50bc3c..000000000000
--- a/src/cmd/src/toml_loader.rs
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-macro_rules! from_file {
- ($path: expr) => {
- toml::from_str(
- &std::fs::read_to_string($path)
- .context(crate::error::ReadConfigSnafu { path: $path })?,
- )
- .context(crate::error::ParseConfigSnafu)
- };
-}
-
-pub(crate) use from_file;
-
-#[cfg(test)]
-mod tests {
- use std::fs::File;
- use std::io::Write;
-
- use common_test_util::temp_dir::create_temp_dir;
- use serde::{Deserialize, Serialize};
- use snafu::ResultExt;
-
- use super::*;
- use crate::error::Result;
-
- #[derive(Clone, PartialEq, Debug, Deserialize, Serialize)]
- #[serde(default)]
- struct MockConfig {
- path: String,
- port: u32,
- host: String,
- }
-
- impl Default for MockConfig {
- fn default() -> Self {
- Self {
- path: "test".to_string(),
- port: 0,
- host: "localhost".to_string(),
- }
- }
- }
-
- #[test]
- fn test_from_file() -> Result<()> {
- let config = MockConfig {
- path: "/tmp".to_string(),
- port: 999,
- host: "greptime.test".to_string(),
- };
-
- let dir = create_temp_dir("test_from_file");
- let test_file = format!("{}/test.toml", dir.path().to_str().unwrap());
-
- let s = toml::to_string(&config).unwrap();
- assert!(s.contains("host") && s.contains("path") && s.contains("port"));
-
- let mut file = File::create(&test_file).unwrap();
- file.write_all(s.as_bytes()).unwrap();
-
- let loaded_config: MockConfig = from_file!(&test_file)?;
- assert_eq!(loaded_config, config);
-
- // Only host in file
- let mut file = File::create(&test_file).unwrap();
- file.write_all("host='greptime.test'\n".as_bytes()).unwrap();
-
- let loaded_config: MockConfig = from_file!(&test_file)?;
- assert_eq!(loaded_config.host, "greptime.test");
- assert_eq!(loaded_config.port, 0);
- assert_eq!(loaded_config.path, "test");
-
- // Truncate the file.
- let file = File::create(&test_file).unwrap();
- file.set_len(0).unwrap();
- let loaded_config: MockConfig = from_file!(&test_file)?;
- assert_eq!(loaded_config, MockConfig::default());
-
- Ok(())
- }
-}
|
refactor
|
support layered configuration (#1535)
|
c595a56ac89bef78b19a76aa60d8c6bcac7354a5
|
2024-07-16 15:37:21
|
discord9
|
test(flow): ignore flow tests for now (#4377)
| false
|
diff --git a/tests/cases/standalone/common/flow/basic.result b/tests/cases/standalone/common/flow/basic.result
deleted file mode 100644
index 77f3b8683bfe..000000000000
--- a/tests/cases/standalone/common/flow/basic.result
+++ /dev/null
@@ -1,105 +0,0 @@
-CREATE TABLE numbers_input (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
-Affected Rows: 0
-
-CREATE FLOW test_numbers
-SINK TO out_num_cnt
-AS
-SELECT sum(number) FROM numbers_input GROUP BY tumble(ts, '1 second', '2021-07-01 00:00:00');
-
-Affected Rows: 0
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
-Affected Rows: 2
-
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt;
-
-+-------+---------------------+---------------------+
-| col_0 | window_start | window_end |
-+-------+---------------------+---------------------+
-| 42 | 2021-07-01T00:00:00 | 2021-07-01T00:00:01 |
-+-------+---------------------+---------------------+
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (24,"2021-07-01 00:00:01.500");
-
-Affected Rows: 2
-
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt;
-
-+-------+---------------------+---------------------+
-| col_0 | window_start | window_end |
-+-------+---------------------+---------------------+
-| 42 | 2021-07-01T00:00:00 | 2021-07-01T00:00:01 |
-| 47 | 2021-07-01T00:00:01 | 2021-07-01T00:00:02 |
-+-------+---------------------+---------------------+
-
-DROP FLOW test_numbers;
-
-Affected Rows: 0
-
-DROP TABLE numbers_input;
-
-Affected Rows: 0
-
-DROP TABLE out_num_cnt;
-
-Affected Rows: 0
-
--- test interprete interval
-CREATE TABLE numbers_input (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
-Affected Rows: 0
-
-create table out_num_cnt (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP TIME INDEX);
-
-Affected Rows: 0
-
-CREATE FLOW filter_numbers SINK TO out_num_cnt AS SELECT INTERVAL '1 day 1 second', INTERVAL '1 month 1 day 1 second', INTERVAL '1 year 1 month' FROM numbers_input where number > 10;
-
-Affected Rows: 0
-
-SHOW CREATE FLOW filter_numbers;
-
-+----------------+----------------------------------------------------------------------------------------------------------------------------------------+
-| Flow | Create Flow |
-+----------------+----------------------------------------------------------------------------------------------------------------------------------------+
-| filter_numbers | CREATE OR REPLACE FLOW IF NOT EXISTS filter_numbers |
-| | SINK TO out_num_cnt |
-| | AS SELECT INTERVAL '1 day 1 second', INTERVAL '1 month 1 day 1 second', INTERVAL '1 year 1 month' FROM numbers_input WHERE number > 10 |
-+----------------+----------------------------------------------------------------------------------------------------------------------------------------+
-
-drop flow filter_numbers;
-
-Affected Rows: 0
-
-drop table out_num_cnt;
-
-Affected Rows: 0
-
-drop table numbers_input;
-
-Affected Rows: 0
-
diff --git a/tests/cases/standalone/common/flow/basic.sql b/tests/cases/standalone/common/flow/basic.sql
deleted file mode 100644
index ddef6cff9f5d..000000000000
--- a/tests/cases/standalone/common/flow/basic.sql
+++ /dev/null
@@ -1,55 +0,0 @@
-CREATE TABLE numbers_input (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
-CREATE FLOW test_numbers
-SINK TO out_num_cnt
-AS
-SELECT sum(number) FROM numbers_input GROUP BY tumble(ts, '1 second', '2021-07-01 00:00:00');
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt;
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (24,"2021-07-01 00:00:01.500");
-
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt;
-
-DROP FLOW test_numbers;
-DROP TABLE numbers_input;
-DROP TABLE out_num_cnt;
-
--- test interprete interval
-
-CREATE TABLE numbers_input (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-create table out_num_cnt (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP TIME INDEX);
-
-CREATE FLOW filter_numbers SINK TO out_num_cnt AS SELECT INTERVAL '1 day 1 second', INTERVAL '1 month 1 day 1 second', INTERVAL '1 year 1 month' FROM numbers_input where number > 10;
-
-SHOW CREATE FLOW filter_numbers;
-
-drop flow filter_numbers;
-
-drop table out_num_cnt;
-
-drop table numbers_input;
diff --git a/tests/cases/standalone/common/flow/df_func.result b/tests/cases/standalone/common/flow/df_func.result
deleted file mode 100644
index 9f209a33da20..000000000000
--- a/tests/cases/standalone/common/flow/df_func.result
+++ /dev/null
@@ -1,258 +0,0 @@
-CREATE TABLE numbers_input_df_func (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
-Affected Rows: 0
-
--- call `sum(abs(number))` where `abs` is DataFusion Function and `sum` is flow function
-CREATE FLOW test_numbers_df_func
-SINK TO out_num_cnt_df_func
-AS
-SELECT sum(abs(number)) FROM numbers_input_df_func GROUP BY tumble(ts, '1 second', '2021-07-01 00:00:00');
-
-Affected Rows: 0
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input_df_func
-VALUES
- (-20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
-Affected Rows: 2
-
--- sleep a little bit longer to make sure that table is created and data is inserted
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt_df_func;
-
-+-------+---------------------+---------------------+
-| col_0 | window_start | window_end |
-+-------+---------------------+---------------------+
-| 42 | 2021-07-01T00:00:00 | 2021-07-01T00:00:01 |
-+-------+---------------------+---------------------+
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input_df_func
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (-24,"2021-07-01 00:00:01.500");
-
-Affected Rows: 2
-
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt_df_func;
-
-+-------+---------------------+---------------------+
-| col_0 | window_start | window_end |
-+-------+---------------------+---------------------+
-| 42 | 2021-07-01T00:00:00 | 2021-07-01T00:00:01 |
-| 47 | 2021-07-01T00:00:01 | 2021-07-01T00:00:02 |
-+-------+---------------------+---------------------+
-
-DROP FLOW test_numbers_df_func;
-
-Affected Rows: 0
-
-DROP TABLE numbers_input_df_func;
-
-Affected Rows: 0
-
-DROP TABLE out_num_cnt_df_func;
-
-Affected Rows: 0
-
-CREATE TABLE numbers_input_df_func (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
-Affected Rows: 0
-
--- call `abs(sum(number))`to make sure that calling `abs` function(impl by datafusion) on `sum` function(impl by flow) is working
-CREATE FLOW test_numbers_df_func
-SINK TO out_num_cnt_df_func
-AS
-SELECT abs(sum(number)) FROM numbers_input_df_func GROUP BY tumble(ts, '1 second', '2021-07-01 00:00:00');
-
-Affected Rows: 0
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input_df_func
-VALUES
- (-20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
-Affected Rows: 2
-
--- sleep a little bit longer to make sure that table is created and data is inserted
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt_df_func;
-
-+-------+---------------------+---------------------+
-| col_0 | window_start | window_end |
-+-------+---------------------+---------------------+
-| 2 | 2021-07-01T00:00:00 | 2021-07-01T00:00:01 |
-+-------+---------------------+---------------------+
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input_df_func
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (-24,"2021-07-01 00:00:01.500");
-
-Affected Rows: 2
-
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt_df_func;
-
-+-------+---------------------+---------------------+
-| col_0 | window_start | window_end |
-+-------+---------------------+---------------------+
-| 2 | 2021-07-01T00:00:00 | 2021-07-01T00:00:01 |
-| 1 | 2021-07-01T00:00:01 | 2021-07-01T00:00:02 |
-+-------+---------------------+---------------------+
-
-DROP FLOW test_numbers_df_func;
-
-Affected Rows: 0
-
-DROP TABLE numbers_input_df_func;
-
-Affected Rows: 0
-
-DROP TABLE out_num_cnt_df_func;
-
-Affected Rows: 0
-
--- test date_bin
-CREATE TABLE numbers_input (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
-Affected Rows: 0
-
-CREATE FLOW test_numbers
-SINK TO out_num_cnt
-AS
-SELECT max(number) - min(number), date_bin(INTERVAL '1 second', ts, '2021-07-01 00:00:00'::TimestampNanosecond) FROM numbers_input GROUP BY date_bin(INTERVAL '1 second', ts, '2021-07-01 00:00:00'::TimestampNanosecond);
-
-Affected Rows: 0
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
-Affected Rows: 2
-
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
-
-+-------+---------------------+
-| col_0 | col_1 |
-+-------+---------------------+
-| 2 | 2021-07-01T00:00:00 |
-+-------+---------------------+
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (24,"2021-07-01 00:00:01.500");
-
-Affected Rows: 2
-
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
-
-+-------+---------------------+
-| col_0 | col_1 |
-+-------+---------------------+
-| 2 | 2021-07-01T00:00:00 |
-| 1 | 2021-07-01T00:00:01 |
-+-------+---------------------+
-
-DROP FLOW test_numbers;
-
-Affected Rows: 0
-
-DROP TABLE numbers_input;
-
-Affected Rows: 0
-
-DROP TABLE out_num_cnt;
-
-Affected Rows: 0
-
--- test date_trunc
-CREATE TABLE numbers_input (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
-Affected Rows: 0
-
-CREATE FLOW test_numbers
-SINK TO out_num_cnt
-AS
-SELECT date_trunc('second', ts), sum(number) FROM numbers_input GROUP BY date_trunc('second', ts);
-
-Affected Rows: 0
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
-Affected Rows: 2
-
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
-
-+---------------------+-------+
-| col_0 | col_1 |
-+---------------------+-------+
-| 2021-07-01T00:00:00 | 42 |
-+---------------------+-------+
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (24,"2021-07-01 00:00:01.500");
-
-Affected Rows: 2
-
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
-
-+---------------------+-------+
-| col_0 | col_1 |
-+---------------------+-------+
-| 2021-07-01T00:00:00 | 42 |
-| 2021-07-01T00:00:01 | 47 |
-+---------------------+-------+
-
-DROP FLOW test_numbers;
-
-Affected Rows: 0
-
-DROP TABLE numbers_input;
-
-Affected Rows: 0
-
-DROP TABLE out_num_cnt;
-
-Affected Rows: 0
-
diff --git a/tests/cases/standalone/common/flow/df_func.sql b/tests/cases/standalone/common/flow/df_func.sql
deleted file mode 100644
index b3c035a120f1..000000000000
--- a/tests/cases/standalone/common/flow/df_func.sql
+++ /dev/null
@@ -1,142 +0,0 @@
-CREATE TABLE numbers_input_df_func (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
--- call `sum(abs(number))` where `abs` is DataFusion Function and `sum` is flow function
-CREATE FLOW test_numbers_df_func
-SINK TO out_num_cnt_df_func
-AS
-SELECT sum(abs(number)) FROM numbers_input_df_func GROUP BY tumble(ts, '1 second', '2021-07-01 00:00:00');
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input_df_func
-VALUES
- (-20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
--- sleep a little bit longer to make sure that table is created and data is inserted
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt_df_func;
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input_df_func
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (-24,"2021-07-01 00:00:01.500");
-
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt_df_func;
-
-DROP FLOW test_numbers_df_func;
-DROP TABLE numbers_input_df_func;
-DROP TABLE out_num_cnt_df_func;
-
-CREATE TABLE numbers_input_df_func (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
--- call `abs(sum(number))`to make sure that calling `abs` function(impl by datafusion) on `sum` function(impl by flow) is working
-CREATE FLOW test_numbers_df_func
-SINK TO out_num_cnt_df_func
-AS
-SELECT abs(sum(number)) FROM numbers_input_df_func GROUP BY tumble(ts, '1 second', '2021-07-01 00:00:00');
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input_df_func
-VALUES
- (-20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
--- sleep a little bit longer to make sure that table is created and data is inserted
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt_df_func;
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input_df_func
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (-24,"2021-07-01 00:00:01.500");
-
--- SQLNESS SLEEP 3s
-SELECT col_0, window_start, window_end FROM out_num_cnt_df_func;
-
-DROP FLOW test_numbers_df_func;
-DROP TABLE numbers_input_df_func;
-DROP TABLE out_num_cnt_df_func;
-
--- test date_bin
-CREATE TABLE numbers_input (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
-CREATE FLOW test_numbers
-SINK TO out_num_cnt
-AS
-SELECT max(number) - min(number), date_bin(INTERVAL '1 second', ts, '2021-07-01 00:00:00'::TimestampNanosecond) FROM numbers_input GROUP BY date_bin(INTERVAL '1 second', ts, '2021-07-01 00:00:00'::TimestampNanosecond);
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (24,"2021-07-01 00:00:01.500");
-
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
-
-DROP FLOW test_numbers;
-DROP TABLE numbers_input;
-DROP TABLE out_num_cnt;
-
-
--- test date_trunc
-CREATE TABLE numbers_input (
- number INT,
- ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- PRIMARY KEY(number),
- TIME INDEX(ts)
-);
-
-CREATE FLOW test_numbers
-SINK TO out_num_cnt
-AS
-SELECT date_trunc('second', ts), sum(number) FROM numbers_input GROUP BY date_trunc('second', ts);
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (20, "2021-07-01 00:00:00.200"),
- (22, "2021-07-01 00:00:00.600");
-
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
-
--- SQLNESS SLEEP 500ms
-INSERT INTO numbers_input
-VALUES
- (23,"2021-07-01 00:00:01.000"),
- (24,"2021-07-01 00:00:01.500");
-
--- SQLNESS SLEEP 3s
-SELECT col_0, col_1 FROM out_num_cnt;
-
-DROP FLOW test_numbers;
-DROP TABLE numbers_input;
-DROP TABLE out_num_cnt;
diff --git a/tests/cases/standalone/optimizer/last_value.result b/tests/cases/standalone/optimizer/last_value.result
index 6824916d9b7c..aa9163f1d01d 100644
--- a/tests/cases/standalone/optimizer/last_value.result
+++ b/tests/cases/standalone/optimizer/last_value.result
@@ -20,10 +20,13 @@ insert into t values
Affected Rows: 9
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (elapsed_compute.*) REDACTED
+-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
-- SQLNESS REPLACE (metrics.*) REDACTED
-- SQLNESS REPLACE (partitioning.*) REDACTED
--- SQLNESS REPLACE \-+
--- SQLNESS REPLACE (\s\s+) _
explain analyze
select
last_value(host order by ts),
@@ -32,10 +35,10 @@ explain analyze
from t
group by host;
-++++
++-+-+-+
| stage | node | plan_|
-++++
-| 0_| 0_|_MergeScanExec: peers=[5695126634496(1326, 0), ] REDACTED
++-+-+-+
+| 0_| 0_|_MergeScanExec: REDACTED
|_|_|_|
| 1_| 0_|_ProjectionExec: expr=[last_value(t.host) ORDER BY [t.ts ASC NULLS LAST]@1 as last_value(t.host) ORDER BY [t.ts ASC NULLS LAST], last_value(t.not_pk) ORDER BY [t.ts ASC NULLS LAST]@2 as last_value(t.not_pk) ORDER BY [t.ts ASC NULLS LAST], last_value(t.val) ORDER BY [t.ts ASC NULLS LAST]@3 as last_value(t.val) ORDER BY [t.ts ASC NULLS LAST]] REDACTED
|_|_|_AggregateExec: mode=FinalPartitioned, gby=[host@0 as host], aggr=[last_value(t.host) ORDER BY [t.ts ASC NULLS LAST], last_value(t.not_pk) ORDER BY [t.ts ASC NULLS LAST], last_value(t.val) ORDER BY [t.ts ASC NULLS LAST]] REDACTED
@@ -47,7 +50,7 @@ explain analyze
|_|_|_SeqScan: partition_count=1 (1 memtable ranges, 0 file ranges), selector=LastRow REDACTED
|_|_|_|
|_|_| Total rows: 4_|
-++++
++-+-+-+
drop table t;
diff --git a/tests/cases/standalone/optimizer/last_value.sql b/tests/cases/standalone/optimizer/last_value.sql
index 3b34c4de38b9..903551d3b967 100644
--- a/tests/cases/standalone/optimizer/last_value.sql
+++ b/tests/cases/standalone/optimizer/last_value.sql
@@ -16,10 +16,13 @@ insert into t values
(7, 'c', '🌔', 8.0),
(8, 'd', '🌕', 9.0);
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (elapsed_compute.*) REDACTED
+-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
-- SQLNESS REPLACE (metrics.*) REDACTED
-- SQLNESS REPLACE (partitioning.*) REDACTED
--- SQLNESS REPLACE \-+
--- SQLNESS REPLACE (\s\s+) _
explain analyze
select
last_value(host order by ts),
|
test
|
ignore flow tests for now (#4377)
|
5af87baeb0b2fed644bde99a79d47fc79fa3d87f
|
2024-04-16 12:04:41
|
Lei, HUANG
|
feat: add `filter_deleted` option to avoid removing deletion markers (#3707)
| false
|
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index 54df1b1b6e32..955aa9c7ee4b 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -31,6 +31,8 @@ mod create_test;
#[cfg(test)]
mod drop_test;
#[cfg(test)]
+mod filter_deleted_test;
+#[cfg(test)]
mod flush_test;
#[cfg(any(test, feature = "test"))]
pub mod listener;
diff --git a/src/mito2/src/engine/append_mode_test.rs b/src/mito2/src/engine/append_mode_test.rs
index 05d7dad1d67b..d1fc41739034 100644
--- a/src/mito2/src/engine/append_mode_test.rs
+++ b/src/mito2/src/engine/append_mode_test.rs
@@ -16,14 +16,12 @@
use api::v1::Rows;
use common_recordbatch::RecordBatches;
-use datatypes::arrow::compute::{self, SortColumn};
-use datatypes::arrow::record_batch::RecordBatch;
-use datatypes::arrow::util::pretty;
use store_api::region_engine::RegionEngine;
use store_api::region_request::{RegionCompactRequest, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
use crate::config::MitoConfig;
+use crate::test_util::batch_util::sort_batches_and_print;
use crate::test_util::{
build_rows, build_rows_for_key, flush_region, put_rows, reopen_region, rows_schema,
CreateRequestBuilder, TestEnv,
@@ -191,31 +189,3 @@ async fn test_append_mode_compaction() {
let batches = RecordBatches::try_collect(stream).await.unwrap();
assert_eq!(expected, sort_batches_and_print(&batches, &["tag_0", "ts"]));
}
-
-/// Sorts `batches` by column `names`.
-fn sort_batches_and_print(batches: &RecordBatches, names: &[&str]) -> String {
- let schema = batches.schema();
- let record_batches = batches.iter().map(|batch| batch.df_record_batch());
- let record_batch = compute::concat_batches(schema.arrow_schema(), record_batches).unwrap();
- let columns: Vec<_> = names
- .iter()
- .map(|name| {
- let array = record_batch.column_by_name(name).unwrap();
- SortColumn {
- values: array.clone(),
- options: None,
- }
- })
- .collect();
- let indices = compute::lexsort_to_indices(&columns, None).unwrap();
- let columns = record_batch
- .columns()
- .iter()
- .map(|array| compute::take(&array, &indices, None).unwrap())
- .collect();
- let record_batch = RecordBatch::try_new(record_batch.schema(), columns).unwrap();
-
- pretty::pretty_format_batches(&[record_batch])
- .unwrap()
- .to_string()
-}
diff --git a/src/mito2/src/engine/filter_deleted_test.rs b/src/mito2/src/engine/filter_deleted_test.rs
new file mode 100644
index 000000000000..c3c35f9ba0c8
--- /dev/null
+++ b/src/mito2/src/engine/filter_deleted_test.rs
@@ -0,0 +1,102 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::Rows;
+use common_recordbatch::RecordBatches;
+use store_api::region_engine::RegionEngine;
+use store_api::region_request::RegionRequest;
+use store_api::storage::{RegionId, ScanRequest};
+
+use crate::config::MitoConfig;
+use crate::test_util::batch_util::sort_batches_and_print;
+use crate::test_util::{
+ build_rows, delete_rows, flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv,
+};
+
+#[tokio::test]
+async fn test_scan_without_filtering_deleted() {
+ common_telemetry::init_default_ut_logging();
+
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new()
+ .insert_option("compaction.type", "twcs")
+ .insert_option("compaction.twcs.max_active_window_files", "10")
+ .build();
+
+ let column_schemas = rows_schema(&request);
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+
+ // put 1, 2, 3, 4 and flush
+ put_rows(
+ &engine,
+ region_id,
+ Rows {
+ schema: column_schemas.clone(),
+ rows: build_rows(1, 5),
+ },
+ )
+ .await;
+ flush_region(&engine, region_id, None).await;
+
+ // delete 2, 3 and flush
+ delete_rows(
+ &engine,
+ region_id,
+ Rows {
+ schema: column_schemas.clone(),
+ rows: build_rows(2, 4),
+ },
+ )
+ .await;
+ flush_region(&engine, region_id, None).await;
+
+ // scan
+ let request = ScanRequest::default();
+ let stream = engine.handle_query(region_id, request).await.unwrap();
+ let batches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++-------+---------+---------------------+
+| tag_0 | field_0 | ts |
++-------+---------+---------------------+
+| 1 | 1.0 | 1970-01-01T00:00:01 |
+| 4 | 4.0 | 1970-01-01T00:00:04 |
++-------+---------+---------------------+";
+ assert_eq!(expected, sort_batches_and_print(&batches, &["tag_0", "ts"]));
+
+ // Tries to use seq scan to test it under append mode.
+ let scan = engine
+ .scan_region(region_id, ScanRequest::default())
+ .unwrap();
+
+ let seq_scan = scan.scan_without_filter_deleted().unwrap();
+
+ let stream = seq_scan.build_stream().await.unwrap();
+ let batches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++-------+---------+---------------------+
+| tag_0 | field_0 | ts |
++-------+---------+---------------------+
+| 1 | 1.0 | 1970-01-01T00:00:01 |
+| 2 | 2.0 | 1970-01-01T00:00:02 |
+| 3 | 3.0 | 1970-01-01T00:00:03 |
+| 4 | 4.0 | 1970-01-01T00:00:04 |
++-------+---------+---------------------+";
+ assert_eq!(expected, batches.pretty_print().unwrap());
+}
diff --git a/src/mito2/src/read/merge.rs b/src/mito2/src/read/merge.rs
index ca758d28253b..f8ba26064576 100644
--- a/src/mito2/src/read/merge.rs
+++ b/src/mito2/src/read/merge.rs
@@ -51,6 +51,8 @@ pub struct MergeReader {
output_batch: Option<Batch>,
/// Remove duplicate timestamps.
dedup: bool,
+ /// Remove deletion markers
+ filter_deleted: bool,
/// Local metrics.
metrics: Metrics,
}
@@ -101,7 +103,11 @@ impl Drop for MergeReader {
impl MergeReader {
/// Creates and initializes a new [MergeReader].
- pub async fn new(sources: Vec<Source>, dedup: bool) -> Result<MergeReader> {
+ pub async fn new(
+ sources: Vec<Source>,
+ dedup: bool,
+ filter_deleted: bool,
+ ) -> Result<MergeReader> {
let start = Instant::now();
let mut metrics = Metrics::default();
@@ -115,11 +121,15 @@ impl MergeReader {
}
}
+ // If dedup is false, we don't expect delete happens and we skip filtering deletion markers.
+ let filter_deleted = filter_deleted && dedup;
+
let mut reader = MergeReader {
hot,
cold,
output_batch: None,
dedup,
+ filter_deleted,
metrics,
};
// Initializes the reader.
@@ -154,7 +164,12 @@ impl MergeReader {
let mut hottest = self.hot.pop().unwrap();
let batch = hottest.fetch_batch(&mut self.metrics).await?;
- Self::maybe_output_batch(batch, &mut self.output_batch, self.dedup, &mut self.metrics)?;
+ Self::maybe_output_batch(
+ batch,
+ &mut self.output_batch,
+ self.filter_deleted,
+ &mut self.metrics,
+ )?;
self.reheap(hottest)
}
@@ -188,7 +203,7 @@ impl MergeReader {
Self::maybe_output_batch(
top.slice(0, pos),
&mut self.output_batch,
- self.dedup,
+ self.filter_deleted,
&mut self.metrics,
)?;
top_node.skip_rows(pos, &mut self.metrics).await?;
@@ -203,7 +218,7 @@ impl MergeReader {
Self::maybe_output_batch(
top.slice(0, duplicate_pos),
&mut self.output_batch,
- self.dedup,
+ self.filter_deleted,
&mut self.metrics,
)?;
// This keep the duplicate timestamp in the node.
@@ -228,7 +243,7 @@ impl MergeReader {
Self::maybe_output_batch(
top.slice(0, output_end),
&mut self.output_batch,
- self.dedup,
+ self.filter_deleted,
&mut self.metrics,
)?;
top_node.skip_rows(output_end, &mut self.metrics).await?;
@@ -318,21 +333,20 @@ impl MergeReader {
Ok(())
}
- /// Removeds deleted entries and sets the `batch` to the `output_batch`.
+ /// If `filter_deleted` is set to true, removes deleted entries and sets the `batch` to the `output_batch`.
///
/// Ignores the `batch` if it is empty.
fn maybe_output_batch(
mut batch: Batch,
output_batch: &mut Option<Batch>,
- dedup: bool,
+ filter_deleted: bool,
metrics: &mut Metrics,
) -> Result<()> {
debug_assert!(output_batch.is_none());
let num_rows = batch.num_rows();
- // If dedup is false, we don't expect delete happens and we skip checking whether there
- // is any deleted entry.
- if dedup {
+
+ if filter_deleted {
batch.filter_deleted()?;
}
// Update deleted rows metrics.
@@ -354,6 +368,8 @@ pub struct MergeReaderBuilder {
sources: Vec<Source>,
/// Remove duplicate timestamps. Default is true.
dedup: bool,
+ /// Remove deletion markers.
+ filter_deleted: bool,
}
impl MergeReaderBuilder {
@@ -363,8 +379,16 @@ impl MergeReaderBuilder {
}
/// Creates a builder from sources.
- pub fn from_sources(sources: Vec<Source>, dedup: bool) -> MergeReaderBuilder {
- MergeReaderBuilder { sources, dedup }
+ pub fn from_sources(
+ sources: Vec<Source>,
+ dedup: bool,
+ filter_deleted: bool,
+ ) -> MergeReaderBuilder {
+ MergeReaderBuilder {
+ sources,
+ dedup,
+ filter_deleted,
+ }
}
/// Pushes a batch reader to sources.
@@ -382,7 +406,7 @@ impl MergeReaderBuilder {
/// Builds and initializes the reader, then resets the builder.
pub async fn build(&mut self) -> Result<MergeReader> {
let sources = mem::take(&mut self.sources);
- MergeReader::new(sources, self.dedup).await
+ MergeReader::new(sources, self.dedup, self.filter_deleted).await
}
}
@@ -391,6 +415,7 @@ impl Default for MergeReaderBuilder {
MergeReaderBuilder {
sources: Vec::new(),
dedup: true,
+ filter_deleted: true,
}
}
}
@@ -964,7 +989,7 @@ mod tests {
Source::Reader(Box::new(reader1)),
Source::Iter(Box::new(reader2)),
];
- let mut reader = MergeReaderBuilder::from_sources(sources, false)
+ let mut reader = MergeReaderBuilder::from_sources(sources, false, true)
.build()
.await
.unwrap();
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index 111c737d5e8e..0ba6290c6950 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -204,7 +204,7 @@ impl ScanRegion {
/// Scan sequentially.
pub(crate) fn seq_scan(self) -> Result<SeqScan> {
- let input = self.scan_input()?;
+ let input = self.scan_input(true)?;
let seq_scan = SeqScan::new(input);
Ok(seq_scan)
@@ -212,14 +212,21 @@ impl ScanRegion {
/// Unordered scan.
pub(crate) fn unordered_scan(self) -> Result<UnorderedScan> {
- let input = self.scan_input()?;
+ let input = self.scan_input(true)?;
let scan = UnorderedScan::new(input);
Ok(scan)
}
+ #[cfg(test)]
+ pub(crate) fn scan_without_filter_deleted(self) -> Result<SeqScan> {
+ let input = self.scan_input(false)?;
+ let scan = SeqScan::new(input);
+ Ok(scan)
+ }
+
/// Creates a scan input.
- fn scan_input(self) -> Result<ScanInput> {
+ fn scan_input(self, filter_deleted: bool) -> Result<ScanInput> {
let time_range = self.build_time_range_predicate();
let ssts = &self.version.ssts;
@@ -278,7 +285,8 @@ impl ScanRegion {
.with_index_applier(index_applier)
.with_parallelism(self.parallelism)
.with_start_time(self.start_time)
- .with_append_mode(self.version.options.append_mode);
+ .with_append_mode(self.version.options.append_mode)
+ .with_filter_deleted(filter_deleted);
Ok(input)
}
@@ -383,6 +391,8 @@ pub(crate) struct ScanInput {
pub(crate) query_start: Option<Instant>,
/// The region is using append mode.
pub(crate) append_mode: bool,
+ /// Whether to remove deletion markers.
+ pub(crate) filter_deleted: bool,
}
impl ScanInput {
@@ -402,6 +412,7 @@ impl ScanInput {
index_applier: None,
query_start: None,
append_mode: false,
+ filter_deleted: true,
}
}
@@ -474,6 +485,14 @@ impl ScanInput {
self
}
+ /// Sets whether to remove deletion markers during scan.
+ #[allow(unused)]
+ #[must_use]
+ pub(crate) fn with_filter_deleted(mut self, filter_deleted: bool) -> Self {
+ self.filter_deleted = filter_deleted;
+ self
+ }
+
/// Builds and returns sources to read.
pub(crate) async fn build_sources(&self) -> Result<Vec<Source>> {
let mut sources = Vec::with_capacity(self.memtables.len() + self.files.len());
diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs
index e77097dc42fc..2277a8df32f9 100644
--- a/src/mito2/src/read/seq_scan.rs
+++ b/src/mito2/src/read/seq_scan.rs
@@ -105,7 +105,8 @@ impl SeqScan {
// Scans all memtables and SSTs. Builds a merge reader to merge results.
let sources = self.input.build_sources().await?;
let dedup = !self.input.append_mode;
- let mut builder = MergeReaderBuilder::from_sources(sources, dedup);
+ let mut builder =
+ MergeReaderBuilder::from_sources(sources, dedup, self.input.filter_deleted);
let reader = builder.build().await?;
Ok(Box::new(reader))
}
@@ -114,7 +115,8 @@ impl SeqScan {
async fn build_parallel_reader(&self) -> Result<BoxedBatchReader> {
let sources = self.input.build_parallel_sources().await?;
let dedup = !self.input.append_mode;
- let mut builder = MergeReaderBuilder::from_sources(sources, dedup);
+ let mut builder =
+ MergeReaderBuilder::from_sources(sources, dedup, self.input.filter_deleted);
let reader = builder.build().await?;
Ok(Box::new(reader))
}
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index 5efa9dae8919..59debe15ac45 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -14,6 +14,7 @@
//! Utilities for testing.
+pub mod batch_util;
pub mod memtable_util;
pub mod meta_util;
pub mod scheduler_util;
diff --git a/src/mito2/src/test_util/batch_util.rs b/src/mito2/src/test_util/batch_util.rs
new file mode 100644
index 000000000000..3c4e89b98f47
--- /dev/null
+++ b/src/mito2/src/test_util/batch_util.rs
@@ -0,0 +1,47 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_recordbatch::RecordBatches;
+use datatypes::arrow::array::RecordBatch;
+use datatypes::arrow::compute;
+use datatypes::arrow::compute::SortColumn;
+use datatypes::arrow::util::pretty;
+
+/// Sorts `batches` by column `names`.
+pub fn sort_batches_and_print(batches: &RecordBatches, names: &[&str]) -> String {
+ let schema = batches.schema();
+ let record_batches = batches.iter().map(|batch| batch.df_record_batch());
+ let record_batch = compute::concat_batches(schema.arrow_schema(), record_batches).unwrap();
+ let columns: Vec<_> = names
+ .iter()
+ .map(|name| {
+ let array = record_batch.column_by_name(name).unwrap();
+ SortColumn {
+ values: array.clone(),
+ options: None,
+ }
+ })
+ .collect();
+ let indices = compute::lexsort_to_indices(&columns, None).unwrap();
+ let columns = record_batch
+ .columns()
+ .iter()
+ .map(|array| compute::take(&array, &indices, None).unwrap())
+ .collect();
+ let record_batch = RecordBatch::try_new(record_batch.schema(), columns).unwrap();
+
+ pretty::pretty_format_batches(&[record_batch])
+ .unwrap()
+ .to_string()
+}
|
feat
|
add `filter_deleted` option to avoid removing deletion markers (#3707)
|
dfd91a1bf829216f812c0569607186747033e176
|
2023-03-10 22:41:23
|
Yingwen
|
chore: Bump version to 0.1.1 (#1155)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index d4cb5f736b99..b668be39da68 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -135,7 +135,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
[[package]]
name = "api"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"arrow-flight",
"common-base",
@@ -752,7 +752,7 @@ dependencies = [
[[package]]
name = "benchmarks"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"arrow",
"clap 4.1.8",
@@ -1086,7 +1086,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"api",
"arc-swap",
@@ -1337,7 +1337,7 @@ dependencies = [
[[package]]
name = "client"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"api",
"arrow-flight",
@@ -1360,7 +1360,7 @@ dependencies = [
"prost",
"rand",
"snafu",
- "substrait 0.1.0",
+ "substrait 0.1.1",
"substrait 0.4.1",
"tokio",
"tonic",
@@ -1390,7 +1390,7 @@ dependencies = [
[[package]]
name = "cmd"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"anymap",
"build-data",
@@ -1418,7 +1418,7 @@ dependencies = [
"servers",
"session",
"snafu",
- "substrait 0.1.0",
+ "substrait 0.1.1",
"tikv-jemalloc-ctl",
"tikv-jemallocator",
"tokio",
@@ -1454,7 +1454,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"anymap",
"bitvec",
@@ -1468,7 +1468,7 @@ dependencies = [
[[package]]
name = "common-catalog"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"async-trait",
"chrono",
@@ -1485,7 +1485,7 @@ dependencies = [
[[package]]
name = "common-error"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"snafu",
"strum",
@@ -1493,7 +1493,7 @@ dependencies = [
[[package]]
name = "common-function"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"arc-swap",
"chrono-tz",
@@ -1516,7 +1516,7 @@ dependencies = [
[[package]]
name = "common-function-macro"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"arc-swap",
"common-query",
@@ -1530,7 +1530,7 @@ dependencies = [
[[package]]
name = "common-grpc"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"api",
"arrow-flight",
@@ -1556,7 +1556,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"api",
"async-trait",
@@ -1574,7 +1574,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"common-error",
"snafu",
@@ -1587,7 +1587,7 @@ dependencies = [
[[package]]
name = "common-procedure"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"async-trait",
"common-error",
@@ -1607,7 +1607,7 @@ dependencies = [
[[package]]
name = "common-query"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"async-trait",
"common-base",
@@ -1625,7 +1625,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"common-error",
"datafusion",
@@ -1641,7 +1641,7 @@ dependencies = [
[[package]]
name = "common-runtime"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"common-error",
"common-telemetry",
@@ -1655,7 +1655,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"backtrace",
"common-error",
@@ -1677,14 +1677,14 @@ dependencies = [
[[package]]
name = "common-test-util"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"tempfile",
]
[[package]]
name = "common-time"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"chrono",
"common-error",
@@ -2246,7 +2246,7 @@ dependencies = [
[[package]]
name = "datanode"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"api",
"async-compat",
@@ -2297,7 +2297,7 @@ dependencies = [
"sql",
"storage",
"store-api",
- "substrait 0.1.0",
+ "substrait 0.1.1",
"table",
"table-procedure",
"tokio",
@@ -2311,7 +2311,7 @@ dependencies = [
[[package]]
name = "datatypes"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"arrow",
"arrow-schema",
@@ -2759,7 +2759,7 @@ dependencies = [
[[package]]
name = "frontend"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"api",
"async-stream",
@@ -2801,7 +2801,7 @@ dependencies = [
"sql",
"store-api",
"strfmt",
- "substrait 0.1.0",
+ "substrait 0.1.1",
"table",
"tokio",
"toml",
@@ -3709,7 +3709,7 @@ dependencies = [
[[package]]
name = "log-store"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"arc-swap",
"async-stream",
@@ -3952,7 +3952,7 @@ dependencies = [
[[package]]
name = "meta-client"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"api",
"async-trait",
@@ -3979,7 +3979,7 @@ dependencies = [
[[package]]
name = "meta-srv"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"anymap",
"api",
@@ -4114,7 +4114,7 @@ dependencies = [
[[package]]
name = "mito"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"anymap",
"arc-swap",
@@ -4517,7 +4517,7 @@ dependencies = [
[[package]]
name = "object-store"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"anyhow",
"async-trait",
@@ -4839,7 +4839,7 @@ dependencies = [
[[package]]
name = "partition"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"common-catalog",
"common-error",
@@ -5360,7 +5360,7 @@ dependencies = [
[[package]]
name = "promql"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"async-recursion",
"async-trait",
@@ -5592,7 +5592,7 @@ dependencies = [
[[package]]
name = "query"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"approx_eq",
"arc-swap",
@@ -6605,7 +6605,7 @@ checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1"
[[package]]
name = "script"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"arrow",
"async-trait",
@@ -6835,7 +6835,7 @@ dependencies = [
[[package]]
name = "servers"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"aide",
"api",
@@ -6911,7 +6911,7 @@ dependencies = [
[[package]]
name = "session"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"arc-swap",
"common-catalog",
@@ -7148,7 +7148,7 @@ dependencies = [
[[package]]
name = "sql"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"api",
"catalog",
@@ -7183,7 +7183,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"async-trait",
"client",
@@ -7261,7 +7261,7 @@ dependencies = [
[[package]]
name = "storage"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"arc-swap",
"arrow",
@@ -7309,7 +7309,7 @@ dependencies = [
[[package]]
name = "store-api"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"async-stream",
"async-trait",
@@ -7441,7 +7441,7 @@ dependencies = [
[[package]]
name = "substrait"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"async-recursion",
"async-trait",
@@ -7535,7 +7535,7 @@ dependencies = [
[[package]]
name = "table"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"anymap",
"async-trait",
@@ -7570,7 +7570,7 @@ dependencies = [
[[package]]
name = "table-procedure"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"async-trait",
"catalog",
@@ -7652,7 +7652,7 @@ dependencies = [
[[package]]
name = "tests-integration"
-version = "0.1.0"
+version = "0.1.1"
dependencies = [
"api",
"axum",
diff --git a/Cargo.toml b/Cargo.toml
index 01eb2ad28938..b1ee1d0e6258 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -45,7 +45,7 @@ members = [
]
[workspace.package]
-version = "0.1.0"
+version = "0.1.1"
edition = "2021"
license = "Apache-2.0"
|
chore
|
Bump version to 0.1.1 (#1155)
|
e5f4ca2dab7fae0b0682efce397a3f469a68f6a7
|
2023-08-22 08:24:05
|
niebayes
|
feat: streaming do_get (#2171)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index bba4a449988a..aaa49b2af13a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9241,6 +9241,7 @@ dependencies = [
"common-error",
"common-grpc",
"common-query",
+ "common-recordbatch",
"common-time",
"serde",
"sqlness",
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 5bd4fc79e1c3..90228ba55987 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -21,16 +21,19 @@ use api::v1::{
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery, QueryRequest,
RequestHeader, TruncateTableExpr,
};
-use arrow_flight::{FlightData, Ticket};
+use arrow_flight::Ticket;
+use async_stream::stream;
use common_error::ext::{BoxedError, ErrorExt};
-use common_grpc::flight::{flight_messages_to_recordbatches, FlightDecoder, FlightMessage};
+use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_query::Output;
+use common_recordbatch::error::ExternalSnafu;
+use common_recordbatch::RecordBatchStreamAdaptor;
use common_telemetry::{logging, timer};
-use futures_util::{TryFutureExt, TryStreamExt};
+use futures_util::StreamExt;
use prost::Message;
use snafu::{ensure, ResultExt};
-use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu, ServerSnafu};
+use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
use crate::{error, from_grpc_response, metrics, Client, Result, StreamInserter};
#[derive(Clone, Debug, Default)]
@@ -283,55 +286,81 @@ impl Database {
let mut client = self.client.make_flight_client()?;
- let flight_data: Vec<FlightData> = client
- .mut_inner()
- .do_get(request)
- .and_then(|response| response.into_inner().try_collect())
- .await
- .map_err(|e| {
- let tonic_code = e.code();
- let e: error::Error = e.into();
- let code = e.status_code();
- let msg = e.to_string();
- ServerSnafu { code, msg }
- .fail::<()>()
- .map_err(BoxedError::new)
- .context(error::FlightGetSnafu {
- tonic_code,
- addr: client.addr(),
- })
- .map_err(|error| {
- logging::error!(
- "Failed to do Flight get, addr: {}, code: {}, source: {}",
- client.addr(),
- tonic_code,
- error
- );
- error
- })
- .unwrap_err()
- })?;
-
- let decoder = &mut FlightDecoder::default();
- let flight_messages = flight_data
- .into_iter()
- .map(|x| decoder.try_decode(x).context(ConvertFlightDataSnafu))
- .collect::<Result<Vec<_>>>()?;
-
- let output = if let Some(FlightMessage::AffectedRows(rows)) = flight_messages.get(0) {
- ensure!(
- flight_messages.len() == 1,
- IllegalFlightMessagesSnafu {
- reason: "Expect 'AffectedRows' Flight messages to be one and only!"
- }
+ let response = client.mut_inner().do_get(request).await.map_err(|e| {
+ let tonic_code = e.code();
+ let e: error::Error = e.into();
+ let code = e.status_code();
+ let msg = e.to_string();
+ let error = Error::FlightGet {
+ tonic_code,
+ addr: client.addr().to_string(),
+ source: BoxedError::new(ServerSnafu { code, msg }.build()),
+ };
+ logging::error!(
+ "Failed to do Flight get, addr: {}, code: {}, source: {}",
+ client.addr(),
+ tonic_code,
+ error
);
- Output::AffectedRows(*rows)
- } else {
- let recordbatches = flight_messages_to_recordbatches(flight_messages)
- .context(ConvertFlightDataSnafu)?;
- Output::RecordBatches(recordbatches)
+ error
+ })?;
+
+ let flight_data_stream = response.into_inner();
+ let mut decoder = FlightDecoder::default();
+
+ let mut flight_message_stream = flight_data_stream.map(move |flight_data| {
+ flight_data
+ .map_err(Error::from)
+ .and_then(|data| decoder.try_decode(data).context(ConvertFlightDataSnafu))
+ });
+
+ let Some(first_flight_message) = flight_message_stream.next().await else {
+ return IllegalFlightMessagesSnafu {
+ reason: "Expect the response not to be empty",
+ }
+ .fail();
};
- Ok(output)
+
+ let first_flight_message = first_flight_message?;
+
+ match first_flight_message {
+ FlightMessage::AffectedRows(rows) => {
+ ensure!(
+ flight_message_stream.next().await.is_none(),
+ IllegalFlightMessagesSnafu {
+ reason: "Expect 'AffectedRows' Flight messages to be the one and the only!"
+ }
+ );
+ Ok(Output::AffectedRows(rows))
+ }
+ FlightMessage::Recordbatch(_) => IllegalFlightMessagesSnafu {
+ reason: "The first flight message cannot be a RecordBatch message",
+ }
+ .fail(),
+ FlightMessage::Schema(schema) => {
+ let stream = Box::pin(stream!({
+ while let Some(flight_message) = flight_message_stream.next().await {
+ let flight_message = flight_message
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ let FlightMessage::Recordbatch(record_batch) = flight_message else {
+ yield IllegalFlightMessagesSnafu {reason: "A Schema message must be succeeded exclusively by a set of RecordBatch messages"}
+ .fail()
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu);
+ break;
+ };
+ yield Ok(record_batch);
+ }
+ }));
+ let record_batch_stream = RecordBatchStreamAdaptor {
+ schema,
+ stream,
+ output_ordering: None,
+ };
+ Ok(Output::Stream(Box::pin(record_batch_stream)))
+ }
+ }
}
}
diff --git a/src/common/recordbatch/src/adapter.rs b/src/common/recordbatch/src/adapter.rs
index 93fe9a3a5b3c..6c113562cddf 100644
--- a/src/common/recordbatch/src/adapter.rs
+++ b/src/common/recordbatch/src/adapter.rs
@@ -34,13 +34,8 @@ use crate::{
SendableRecordBatchStream, Stream,
};
-type FutureStream = Pin<
- Box<
- dyn std::future::Future<
- Output = std::result::Result<DfSendableRecordBatchStream, DataFusionError>,
- > + Send,
- >,
->;
+type FutureStream =
+ Pin<Box<dyn std::future::Future<Output = Result<SendableRecordBatchStream>> + Send>>;
/// ParquetRecordBatchStream -> DataFusion RecordBatchStream
pub struct ParquetRecordBatchStreamAdapter<T> {
@@ -223,7 +218,7 @@ impl Stream for RecordBatchStreamAdapter {
enum AsyncRecordBatchStreamAdapterState {
Uninit(FutureStream),
- Ready(DfSendableRecordBatchStream),
+ Ready(SendableRecordBatchStream),
Failed,
}
@@ -261,17 +256,12 @@ impl Stream for AsyncRecordBatchStreamAdapter {
}
Err(e) => {
self.state = AsyncRecordBatchStreamAdapterState::Failed;
- return Poll::Ready(Some(
- Err(e).context(error::InitRecordbatchStreamSnafu),
- ));
+ return Poll::Ready(Some(Err(e)));
}
};
}
AsyncRecordBatchStreamAdapterState::Ready(stream) => {
- return Poll::Ready(ready!(Pin::new(stream).poll_next(cx)).map(|x| {
- let df_record_batch = x.context(error::PollStreamSnafu)?;
- RecordBatch::try_from_df_record_batch(self.schema(), df_record_batch)
- }))
+ return Poll::Ready(ready!(Pin::new(stream).poll_next(cx)))
}
AsyncRecordBatchStreamAdapterState::Failed => return Poll::Ready(None),
}
@@ -330,12 +320,7 @@ mod test {
) -> FutureStream {
Box::pin(async move {
maybe_recordbatches
- .map(|items| {
- Box::pin(DfRecordBatchStreamAdapter::new(Box::pin(
- MaybeErrorRecordBatchStream { items },
- ))) as _
- })
- .map_err(|e| DataFusionError::External(Box::new(e)))
+ .map(|items| Box::pin(MaybeErrorRecordBatchStream { items }) as _)
})
}
@@ -372,7 +357,7 @@ mod test {
let result = RecordBatches::try_collect(Box::pin(adapter)).await;
assert_eq!(
result.unwrap_err().to_string(),
- "Failed to poll stream, source: External error: External error, source: Unknown"
+ "External error, source: Unknown",
);
let failed_to_init_stream =
@@ -382,7 +367,7 @@ mod test {
let result = RecordBatches::try_collect(Box::pin(adapter)).await;
assert_eq!(
result.unwrap_err().to_string(),
- "Failed to init Recordbatch stream, source: External error: External error, source: Internal"
+ "External error, source: Internal",
);
}
}
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 281dcdab26ad..f6cad01041d7 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::any::Any;
-use std::pin::Pin;
use std::sync::Arc;
use async_trait::async_trait;
@@ -23,30 +22,24 @@ use common_meta::table_name::TableName;
use common_query::error::Result as QueryResult;
use common_query::logical_plan::Expr;
use common_query::physical_plan::{PhysicalPlan, PhysicalPlanRef};
+use common_query::Output;
use common_recordbatch::adapter::AsyncRecordBatchStreamAdapter;
use common_recordbatch::error::{
- InitRecordbatchStreamSnafu, PollStreamSnafu, Result as RecordBatchResult,
-};
-use common_recordbatch::{
- RecordBatch, RecordBatchStreamAdaptor, RecordBatches, SendableRecordBatchStream,
+ ExternalSnafu as RecordBatchExternalSnafu, Result as RecordBatchResult,
};
+use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
use datafusion::execution::context::TaskContext;
-use datafusion::physical_plan::{
- Partitioning, SendableRecordBatchStream as DfSendableRecordBatchStream,
-};
-use datafusion_common::DataFusionError;
+use datafusion::physical_plan::Partitioning;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
-use futures_util::{Stream, StreamExt};
+use futures_util::StreamExt;
use snafu::prelude::*;
use store_api::storage::ScanRequest;
use table::error::TableOperationSnafu;
use table::metadata::{FilterPushDownType, TableInfoRef, TableType};
use table::requests::{DeleteRequest, InsertRequest};
use table::Table;
-use tokio::sync::RwLock;
use crate::catalog::FrontendCatalogManager;
-use crate::error::Result;
use crate::instance::distributed::deleter::DistDeleter;
use crate::instance::distributed::inserter::DistInserter;
use crate::table::scan::{DatanodeInstance, TableScanPlan};
@@ -132,35 +125,26 @@ impl Table for DistTable {
projection: request.projection.clone(),
filters: request.filters.clone(),
limit: request.limit,
- batches: Arc::new(RwLock::new(None)),
}));
}
- let schema = project_schema(self.schema(), request.projection.as_ref());
- let schema_to_move = schema.clone();
- let stream: Pin<Box<dyn Stream<Item = RecordBatchResult<RecordBatch>> + Send>> = Box::pin(
- async_stream::try_stream! {
- for partition_exec in partition_execs {
- partition_exec
- .maybe_init()
- .await
- .map_err(|e| DataFusionError::External(Box::new(e)))
- .context(InitRecordbatchStreamSnafu)?;
- let mut stream = partition_exec.as_stream().await.context(InitRecordbatchStreamSnafu)?;
-
- while let Some(batch) = stream.next().await{
- yield RecordBatch::try_from_df_record_batch(schema_to_move.clone(),batch.context(PollStreamSnafu)?)?
- }
+ let stream = Box::pin(async_stream::stream!({
+ for partition_exec in partition_execs {
+ let mut stream = partition_exec.scan_to_stream().await?;
+ while let Some(record_batch) = stream.next().await {
+ yield record_batch;
}
- },
- );
- let record_batch_stream = RecordBatchStreamAdaptor {
+ }
+ }));
+
+ let schema = project_schema(self.schema(), request.projection.as_ref());
+ let stream = RecordBatchStreamAdaptor {
schema,
stream,
output_ordering: None,
};
- Ok(Box::pin(record_batch_stream))
+ Ok(Box::pin(stream))
}
fn supports_filters_pushdown(
@@ -245,12 +229,7 @@ impl PhysicalPlan for DistTableScan {
_context: Arc<TaskContext>,
) -> QueryResult<SendableRecordBatchStream> {
let exec = self.partition_execs[partition].clone();
- let stream = Box::pin(async move {
- exec.maybe_init()
- .await
- .map_err(|e| DataFusionError::External(Box::new(e)))?;
- exec.as_stream().await
- });
+ let stream = Box::pin(async move { exec.scan_to_stream().await });
let stream = AsyncRecordBatchStreamAdapter::new(self.schema(), stream);
Ok(Box::pin(stream))
}
@@ -263,38 +242,29 @@ struct PartitionExec {
projection: Option<Vec<usize>>,
filters: Vec<Expr>,
limit: Option<usize>,
- batches: Arc<RwLock<Option<RecordBatches>>>,
}
impl PartitionExec {
- async fn maybe_init(&self) -> Result<()> {
- if self.batches.read().await.is_some() {
- return Ok(());
- }
-
- let mut batches = self.batches.write().await;
- if batches.is_some() {
- return Ok(());
- }
-
+ async fn scan_to_stream(&self) -> RecordBatchResult<SendableRecordBatchStream> {
let plan: TableScanPlan = TableScanPlan {
table_name: self.table_name.clone(),
projection: self.projection.clone(),
filters: self.filters.clone(),
limit: self.limit,
};
- let result = self.datanode_instance.grpc_table_scan(plan).await?;
- let _ = batches.insert(result);
- Ok(())
- }
- /// Notice: the record batch will be consumed.
- async fn as_stream(&self) -> std::result::Result<DfSendableRecordBatchStream, DataFusionError> {
- let mut batches = self.batches.write().await;
- Ok(batches
- .take()
- .expect("should have been initialized in \"maybe_init\"")
- .into_df_stream())
+ let output = self
+ .datanode_instance
+ .grpc_table_scan(plan)
+ .await
+ .map_err(BoxedError::new)
+ .context(RecordBatchExternalSnafu)?;
+
+ let Output::Stream(stream) = output else {
+ unreachable!()
+ };
+
+ Ok(stream)
}
}
diff --git a/src/frontend/src/table/scan.rs b/src/frontend/src/table/scan.rs
index 979eb6ca6402..d149ef2805f0 100644
--- a/src/frontend/src/table/scan.rs
+++ b/src/frontend/src/table/scan.rs
@@ -19,7 +19,6 @@ use client::Database;
use common_meta::table_name::TableName;
use common_query::prelude::Expr;
use common_query::Output;
-use common_recordbatch::RecordBatches;
use datafusion::datasource::DefaultTableSource;
use datafusion_expr::{LogicalPlan, LogicalPlanBuilder};
use snafu::ResultExt;
@@ -46,22 +45,17 @@ impl DatanodeInstance {
Self { table, db }
}
- pub(crate) async fn grpc_table_scan(&self, plan: TableScanPlan) -> Result<RecordBatches> {
+ pub(crate) async fn grpc_table_scan(&self, plan: TableScanPlan) -> Result<Output> {
let logical_plan = self.build_logical_plan(&plan)?;
let substrait_plan = DFLogicalSubstraitConvertor
.encode(&logical_plan)
.context(error::EncodeSubstraitLogicalPlanSnafu)?;
- let result = self
- .db
+ self.db
.logical_plan(substrait_plan.to_vec(), None)
.await
- .context(error::RequestDatanodeSnafu)?;
- let Output::RecordBatches(record_batches) = result else {
- unreachable!()
- };
- Ok(record_batches)
+ .context(error::RequestDatanodeSnafu)
}
fn build_logical_plan(&self, table_scan: &TableScanPlan) -> Result<LogicalPlan> {
diff --git a/src/query/src/dist_plan/merge_scan.rs b/src/query/src/dist_plan/merge_scan.rs
index cebfd877f1f1..af53f446b260 100644
--- a/src/query/src/dist_plan/merge_scan.rs
+++ b/src/query/src/dist_plan/merge_scan.rs
@@ -16,7 +16,7 @@ use std::any::Any;
use std::sync::Arc;
use arrow_schema::{Schema as ArrowSchema, SchemaRef as ArrowSchemaRef};
-use async_stream::try_stream;
+use async_stream::stream;
use client::client_manager::DatanodeClients;
use client::Database;
use common_base::bytes::Bytes;
@@ -149,11 +149,14 @@ impl MergeScanExec {
let trace_id = context.task_id().and_then(|id| id.parse().ok());
let metric = MergeScanMetric::new(&self.metric);
- let stream = try_stream! {
+ let stream = Box::pin(stream!({
+ let _finish_timer = metric.finish_time().timer();
+ let mut ready_timer = metric.ready_time().timer();
+ let mut first_consume_timer = Some(metric.first_consume_time().timer());
+
for peer in peers {
let client = clients.get_client(&peer).await;
let database = Database::new(&table.catalog_name, &table.schema_name, client);
- let _timer = metric.grpc_time().timer();
let output: Output = database
.logical_plan(substrait_plan.clone(), trace_id)
.await
@@ -161,37 +164,34 @@ impl MergeScanExec {
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
- match output {
- Output::AffectedRows(_) => {
- Err(BoxedError::new(
- UnexpectedOutputKindSnafu {
- expected: "RecordBatches or Stream",
- got: "AffectedRows",
- }
- .build(),
- ))
- .context(ExternalSnafu)?;
- }
- Output::RecordBatches(record_batches) => {
- for batch in record_batches.into_iter() {
- metric.record_output_batch_rows(batch.num_rows());
- yield Self::remove_metadata_from_record_batch(batch);
- }
+ let Output::Stream(mut stream) = output else {
+ yield UnexpectedOutputKindSnafu {
+ expected: "Stream",
+ got: "RecordBatches or AffectedRows",
}
- Output::Stream(mut stream) => {
- while let Some(batch) = stream.next().await {
- let batch = batch?;
- metric.record_output_batch_rows(batch.num_rows());
- yield Self::remove_metadata_from_record_batch(batch);
- }
+ .fail()
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu);
+ return;
+ };
+
+ ready_timer.stop();
+
+ while let Some(batch) = stream.next().await {
+ let batch = batch?;
+ metric.record_output_batch_rows(batch.num_rows());
+ yield Ok(Self::remove_metadata_from_record_batch(batch));
+
+ if let Some(first_consume_timer) = first_consume_timer.as_mut().take() {
+ first_consume_timer.stop();
}
}
}
- };
+ }));
Ok(Box::pin(RecordBatchStreamAdaptor {
schema: self.schema.clone(),
- stream: Box::pin(stream),
+ stream,
output_ordering: None,
}))
}
@@ -285,8 +285,12 @@ impl DisplayAs for MergeScanExec {
#[derive(Debug, Clone)]
struct MergeScanMetric {
- /// Nanosecond spent on fetching data from remote
- grpc_time: Time,
+ /// Nanosecond elapsed till the scan operator is ready to emit data
+ ready_time: Time,
+ /// Nanosecond elapsed till the first record batch emitted from the scan operator gets consumed
+ first_consume_time: Time,
+ /// Nanosecond elapsed till the scan operator finished execution
+ finish_time: Time,
/// Count of rows fetched from remote
output_rows: Count,
}
@@ -294,13 +298,23 @@ struct MergeScanMetric {
impl MergeScanMetric {
pub fn new(metric: &ExecutionPlanMetricsSet) -> Self {
Self {
- grpc_time: MetricBuilder::new(metric).subset_time("gRPC", 1),
+ ready_time: MetricBuilder::new(metric).subset_time("ready_time", 1),
+ first_consume_time: MetricBuilder::new(metric).subset_time("first_consume_time", 1),
+ finish_time: MetricBuilder::new(metric).subset_time("finish_time", 1),
output_rows: MetricBuilder::new(metric).output_rows(1),
}
}
- pub fn grpc_time(&self) -> &Time {
- &self.grpc_time
+ pub fn ready_time(&self) -> &Time {
+ &self.ready_time
+ }
+
+ pub fn first_consume_time(&self) -> &Time {
+ &self.first_consume_time
+ }
+
+ pub fn finish_time(&self) -> &Time {
+ &self.finish_time
}
pub fn record_output_batch_rows(&self, num_rows: usize) {
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index 3a4ac348722e..973d3877ab43 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -14,10 +14,12 @@
use std::ops::Deref;
+use common_error::ext::BoxedError;
use common_query::Output;
-use common_recordbatch::{util, RecordBatch};
+use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datatypes::prelude::{ConcreteDataType, Value};
use datatypes::schema::SchemaRef;
+use futures::StreamExt;
use metrics::increment_counter;
use opensrv_mysql::{
Column, ColumnFlags, ColumnType, ErrorKind, OkResponse, QueryResultWriter, RowWriter,
@@ -26,7 +28,7 @@ use session::context::QueryContextRef;
use snafu::prelude::*;
use tokio::io::AsyncWrite;
-use crate::error::{self, Error, Result};
+use crate::error::{self, Error, OtherSnafu, Result};
use crate::metrics::*;
/// Try to write multiple output to the writer if possible.
@@ -50,8 +52,8 @@ pub async fn write_output<W: AsyncWrite + Send + Sync + Unpin>(
}
struct QueryResult {
- recordbatches: Vec<RecordBatch>,
schema: SchemaRef,
+ stream: SendableRecordBatchStream,
}
pub struct MysqlResultWriter<'a, W: AsyncWrite + Unpin> {
@@ -80,20 +82,16 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
match output {
Ok(output) => match output {
Output::Stream(stream) => {
- let schema = stream.schema().clone();
- let recordbatches = util::collect(stream)
- .await
- .context(error::CollectRecordbatchSnafu)?;
let query_result = QueryResult {
- recordbatches,
- schema,
+ schema: stream.schema(),
+ stream,
};
Self::write_query_result(query_result, self.writer, self.query_context).await?;
}
Output::RecordBatches(recordbatches) => {
let query_result = QueryResult {
schema: recordbatches.schema(),
- recordbatches: recordbatches.take(),
+ stream: recordbatches.as_stream(),
};
Self::write_query_result(query_result, self.writer, self.query_context).await?;
}
@@ -130,7 +128,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
}
async fn write_query_result(
- query_result: QueryResult,
+ mut query_result: QueryResult,
writer: QueryResultWriter<'a, W>,
query_context: QueryContextRef,
) -> Result<()> {
@@ -139,9 +137,20 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
// The RowWriter's lifetime is bound to `column_def` thus we can't use finish_one()
// to return a new QueryResultWriter.
let mut row_writer = writer.start(&column_def).await?;
- for recordbatch in &query_result.recordbatches {
- Self::write_recordbatch(&mut row_writer, recordbatch, query_context.clone())
- .await?;
+ while let Some(record_batch) = query_result.stream.next().await {
+ match record_batch {
+ Ok(record_batch) => {
+ Self::write_recordbatch(
+ &mut row_writer,
+ &record_batch,
+ query_context.clone(),
+ )
+ .await?
+ }
+ Err(e) => {
+ return Err(e).map_err(BoxedError::new).context(OtherSnafu);
+ }
+ }
}
row_writer.finish().await?;
Ok(())
diff --git a/src/table/src/test_util/memtable.rs b/src/table/src/test_util/memtable.rs
index 6df6e6cb237e..e1aa84b5a87d 100644
--- a/src/table/src/test_util/memtable.rs
+++ b/src/table/src/test_util/memtable.rs
@@ -17,6 +17,7 @@ use std::pin::Pin;
use std::sync::Arc;
use async_trait::async_trait;
+use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::BoxedError;
use common_recordbatch::error::Result as RecordBatchResult;
use common_recordbatch::{RecordBatch, RecordBatchStream, SendableRecordBatchStream};
@@ -54,8 +55,8 @@ impl MemTable {
table_name,
recordbatch,
1,
- "greptime".to_string(),
- "public".to_string(),
+ DEFAULT_CATALOG_NAME.to_string(),
+ DEFAULT_SCHEMA_NAME.to_string(),
regions,
)
}
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 184e03c3928f..5abe02f086e4 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -23,6 +23,7 @@ use auth::user_provider_from_option;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::consts::{MIN_USER_TABLE_ID, MITO_ENGINE};
use common_query::Output;
+use common_recordbatch::RecordBatches;
use servers::prometheus::{PromData, PromSeries, PrometheusJsonResponse, PrometheusResponse};
use servers::server::Server;
use tests_integration::test_util::{
@@ -310,14 +311,19 @@ async fn insert_and_assert(db: &Database) {
assert!(matches!(result, Output::AffectedRows(2)));
// select
- let result = db
+ let output = db
.sql("SELECT host, cpu, memory, ts FROM demo")
.await
.unwrap();
- match result {
- Output::RecordBatches(recordbatches) => {
- let pretty = recordbatches.pretty_print().unwrap();
- let expected = "\
+
+ let record_batches = match output {
+ Output::RecordBatches(record_batches) => record_batches,
+ Output::Stream(stream) => RecordBatches::try_collect(stream).await.unwrap(),
+ Output::AffectedRows(_) => unreachable!(),
+ };
+
+ let pretty = record_batches.pretty_print().unwrap();
+ let expected = "\
+-------+------+--------+-------------------------+
| host | cpu | memory | ts |
+-------+------+--------+-------------------------+
@@ -329,10 +335,7 @@ async fn insert_and_assert(db: &Database) {
| host6 | 88.8 | 333.3 | 2022-12-28T04:17:08 |
+-------+------+--------+-------------------------+\
";
- assert_eq!(pretty, expected);
- }
- _ => unreachable!(),
- }
+ assert_eq!(pretty, expected);
}
fn testing_create_expr() -> CreateTableExpr {
diff --git a/tests/runner/Cargo.toml b/tests/runner/Cargo.toml
index 7032fe8052a3..89ae0233a602 100644
--- a/tests/runner/Cargo.toml
+++ b/tests/runner/Cargo.toml
@@ -11,6 +11,7 @@ common-base = { workspace = true }
common-error = { workspace = true }
common-grpc = { workspace = true }
common-query = { workspace = true }
+common-recordbatch = { workspace = true }
common-time = { workspace = true }
serde.workspace = true
sqlness = { version = "0.5" }
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index af04eabe9eb6..94463078f284 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -23,12 +23,14 @@ use std::sync::{Arc, Mutex};
use std::time::Duration;
use async_trait::async_trait;
+use client::error::ServerSnafu;
use client::{
Client, Database as DB, Error as ClientError, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME,
};
use common_error::ext::ErrorExt;
use common_error::snafu::ErrorCompat;
use common_query::Output;
+use common_recordbatch::RecordBatches;
use serde::Serialize;
use sqlness::{Database, EnvController, QueryContext};
use tinytemplate::TinyTemplate;
@@ -358,7 +360,21 @@ impl Database for GreptimeDB {
result: Ok(Output::AffectedRows(0)),
}) as _
} else {
- let result = client.sql(&query).await;
+ let mut result = client.sql(&query).await;
+ if let Ok(Output::Stream(stream)) = result {
+ match RecordBatches::try_collect(stream).await {
+ Ok(recordbatches) => result = Ok(Output::RecordBatches(recordbatches)),
+ Err(e) => {
+ let status_code = e.status_code();
+ let source_error = e.iter_chain().last().unwrap();
+ result = ServerSnafu {
+ code: status_code,
+ msg: source_error.to_string(),
+ }
+ .fail();
+ }
+ }
+ }
Box::new(ResultDisplayer { result }) as _
}
}
|
feat
|
streaming do_get (#2171)
|
69acf32914b1db23ab4da1952fc3790667c06887
|
2023-04-25 07:48:41
|
shuiyisong
|
chore: add `len()` to Bytes and StringBytes (#1455)
| false
|
diff --git a/src/common/base/src/bytes.rs b/src/common/base/src/bytes.rs
index 78a872a5cbf8..96f47588f19f 100644
--- a/src/common/base/src/bytes.rs
+++ b/src/common/base/src/bytes.rs
@@ -20,6 +20,16 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
pub struct Bytes(bytes::Bytes);
+impl Bytes {
+ pub fn len(&self) -> usize {
+ self.0.len()
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+}
+
impl From<Bytes> for bytes::Bytes {
fn from(value: Bytes) -> Self {
value.0
@@ -92,6 +102,14 @@ impl StringBytes {
pub fn as_utf8(&self) -> &str {
unsafe { std::str::from_utf8_unchecked(&self.0) }
}
+
+ pub fn len(&self) -> usize {
+ self.0.len()
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
}
impl From<String> for StringBytes {
@@ -178,6 +196,17 @@ mod tests {
assert_eq!(world, &bytes);
}
+ #[test]
+ fn test_bytes_len() {
+ let hello = b"hello".to_vec();
+ let bytes = Bytes::from(hello.clone());
+ assert_eq!(bytes.len(), hello.len());
+
+ let zero = b"".to_vec();
+ let bytes = Bytes::from(zero);
+ assert!(bytes.is_empty());
+ }
+
#[test]
fn test_string_bytes_from() {
let hello = "hello".to_string();
@@ -191,6 +220,17 @@ mod tests {
assert_eq!(&bytes, world);
}
+ #[test]
+ fn test_string_bytes_len() {
+ let hello = "hello".to_string();
+ let bytes = StringBytes::from(hello.clone());
+ assert_eq!(bytes.len(), hello.len());
+
+ let zero = "".to_string();
+ let bytes = StringBytes::from(zero);
+ assert!(bytes.is_empty());
+ }
+
fn check_str(expect: &str, given: &str) {
assert_eq!(expect, given);
}
diff --git a/src/promql/src/engine.rs b/src/promql/src/engine.rs
deleted file mode 100644
index d21a421e2207..000000000000
--- a/src/promql/src/engine.rs
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::sync::Arc;
-
-use promql_parser::parser::Value;
-
-use crate::error::Result;
-
-mod evaluator;
-mod functions;
-
-pub use evaluator::*;
-
-pub struct Context {}
-
-pub struct Query {}
-
-pub struct Engine {}
-
-impl Engine {
- pub fn exec(_ctx: &Context, _q: Query) -> Result<Arc<dyn Value>> {
- unimplemented!();
- }
-}
|
chore
|
add `len()` to Bytes and StringBytes (#1455)
|
f4e22282a43d0ce2d8026d3c0ac08ffa6d14cef1
|
2022-10-31 09:12:07
|
Yingwen
|
feat: Region supports reading data with different schema (#342)
| false
|
diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs
index 851f0d11b333..7de6a4e0b7db 100644
--- a/src/storage/src/error.rs
+++ b/src/storage/src/error.rs
@@ -177,12 +177,6 @@ pub enum Error {
backtrace: Backtrace,
},
- #[snafu(display("Parquet file schema is invalid, source: {}", source))]
- InvalidParquetSchema {
- #[snafu(backtrace)]
- source: MetadataError,
- },
-
#[snafu(display("Region is under {} state, cannot proceed operation", state))]
InvalidRegionState {
state: &'static str,
@@ -308,6 +302,40 @@ pub enum Error {
#[snafu(backtrace)]
source: MetadataError,
},
+
+ #[snafu(display("Incompatible schema to read, reason: {}", reason))]
+ CompatRead {
+ reason: String,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display(
+ "Failed to read column {}, could not create default value, source: {}",
+ column,
+ source
+ ))]
+ CreateDefaultToRead {
+ column: String,
+ #[snafu(backtrace)]
+ source: datatypes::error::Error,
+ },
+
+ #[snafu(display("Failed to read column {}, no proper default value for it", column))]
+ NoDefaultToRead {
+ column: String,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display(
+ "Failed to convert arrow chunk to batch, name: {}, source: {}",
+ name,
+ source
+ ))]
+ ConvertChunk {
+ name: String,
+ #[snafu(backtrace)]
+ source: datatypes::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -334,14 +362,16 @@ impl ErrorExt for Error {
| Cancelled { .. }
| DecodeMetaActionList { .. }
| Readline { .. }
- | InvalidParquetSchema { .. }
| WalDataCorrupted { .. }
| VersionNotFound { .. }
| SequenceNotMonotonic { .. }
| ConvertStoreSchema { .. }
| InvalidRawRegion { .. }
| FilterColumn { .. }
- | AlterMetadata { .. } => StatusCode::Unexpected,
+ | AlterMetadata { .. }
+ | CompatRead { .. }
+ | CreateDefaultToRead { .. }
+ | NoDefaultToRead { .. } => StatusCode::Unexpected,
FlushIo { .. }
| WriteParquet { .. }
@@ -364,6 +394,7 @@ impl ErrorExt for Error {
| ConvertColumnSchema { source, .. } => source.status_code(),
PushBatch { source, .. } => source.status_code(),
AddDefault { source, .. } => source.status_code(),
+ ConvertChunk { source, .. } => source.status_code(),
}
}
diff --git a/src/storage/src/memtable/btree.rs b/src/storage/src/memtable/btree.rs
index 945d4e2df202..efd5fe1cd9c8 100644
--- a/src/storage/src/memtable/btree.rs
+++ b/src/storage/src/memtable/btree.rs
@@ -18,6 +18,7 @@ use crate::memtable::{
BatchIterator, BoxedBatchIterator, IterContext, KeyValues, Memtable, MemtableId, RowOrdering,
};
use crate::read::Batch;
+use crate::schema::compat::ReadAdapter;
use crate::schema::{ProjectedSchema, ProjectedSchemaRef, RegionSchemaRef};
type RwLockMap = RwLock<BTreeMap<InnerKey, RowValue>>;
@@ -69,7 +70,7 @@ impl Memtable for BTreeMemtable {
fn iter(&self, ctx: &IterContext) -> Result<BoxedBatchIterator> {
assert!(ctx.batch_size > 0);
- let iter = BTreeIterator::new(ctx.clone(), self.schema.clone(), self.map.clone());
+ let iter = BTreeIterator::new(ctx.clone(), self.schema.clone(), self.map.clone())?;
Ok(Box::new(iter))
}
@@ -85,6 +86,7 @@ struct BTreeIterator {
schema: RegionSchemaRef,
/// Projected schema that user expect to read.
projected_schema: ProjectedSchemaRef,
+ adapter: ReadAdapter,
map: Arc<RwLockMap>,
last_key: Option<InnerKey>,
}
@@ -103,27 +105,33 @@ impl Iterator for BTreeIterator {
type Item = Result<Batch>;
fn next(&mut self) -> Option<Result<Batch>> {
- self.next_batch().map(Ok)
+ self.next_batch().transpose()
}
}
impl BTreeIterator {
- fn new(ctx: IterContext, schema: RegionSchemaRef, map: Arc<RwLockMap>) -> BTreeIterator {
+ fn new(
+ ctx: IterContext,
+ schema: RegionSchemaRef,
+ map: Arc<RwLockMap>,
+ ) -> Result<BTreeIterator> {
let projected_schema = ctx
.projected_schema
.clone()
.unwrap_or_else(|| Arc::new(ProjectedSchema::no_projection(schema.clone())));
+ let adapter = ReadAdapter::new(schema.store_schema().clone(), projected_schema.clone())?;
- BTreeIterator {
+ Ok(BTreeIterator {
ctx,
schema,
projected_schema,
+ adapter,
map,
last_key: None,
- }
+ })
}
- fn next_batch(&mut self) -> Option<Batch> {
+ fn next_batch(&mut self) -> Result<Option<Batch>> {
let map = self.map.read().unwrap();
let iter = if let Some(last_key) = &self.last_key {
map.range((Bound::Excluded(last_key), Bound::Unbounded))
@@ -139,7 +147,7 @@ impl BTreeIterator {
};
if keys.is_empty() {
- return None;
+ return Ok(None);
}
self.last_key = keys.last().map(|k| {
let mut last_key = (*k).clone();
@@ -151,27 +159,30 @@ impl BTreeIterator {
.schema
.row_key_columns()
.map(|column_meta| column_meta.desc.data_type.clone());
- let key_needed = vec![true; self.schema.num_row_key_columns()];
let value_data_types = self
.schema
.value_columns()
.map(|column_meta| column_meta.desc.data_type.clone());
- let value_needed: Vec<_> = self
- .schema
- .value_columns()
- .map(|column_meta| self.projected_schema.is_needed(column_meta.id()))
- .collect();
- let key_columns = rows_to_vectors(key_data_types, &key_needed, keys.as_slice());
- let value_columns = rows_to_vectors(value_data_types, &value_needed, values.as_slice());
- let batch = self.projected_schema.batch_from_parts(
+ let key_columns = rows_to_vectors(
+ key_data_types,
+ self.adapter.source_key_needed(),
+ keys.as_slice(),
+ );
+ let value_columns = rows_to_vectors(
+ value_data_types,
+ self.adapter.source_value_needed(),
+ values.as_slice(),
+ );
+
+ let batch = self.adapter.batch_from_parts(
key_columns,
value_columns,
Arc::new(sequences),
Arc::new(op_types),
- );
+ )?;
- Some(batch)
+ Ok(Some(batch))
}
}
diff --git a/src/storage/src/region/tests/alter.rs b/src/storage/src/region/tests/alter.rs
index 94923546ac65..6f44f6ac96f7 100644
--- a/src/storage/src/region/tests/alter.rs
+++ b/src/storage/src/region/tests/alter.rs
@@ -1,16 +1,15 @@
use std::sync::Arc;
use common_time::Timestamp;
-use datatypes::prelude::ConcreteDataType;
-use datatypes::prelude::ScalarVector;
-use datatypes::vectors::Int64Vector;
-use datatypes::vectors::TimestampVector;
+use datatypes::prelude::*;
+use datatypes::vectors::{Int64Vector, TimestampVector};
use log_store::fs::log::LocalFileLogStore;
use store_api::storage::PutOperation;
use store_api::storage::WriteRequest;
use store_api::storage::{
- AddColumn, AlterOperation, AlterRequest, ColumnDescriptor, ColumnDescriptorBuilder, ColumnId,
- Region, RegionMeta, SchemaRef, WriteResponse,
+ AddColumn, AlterOperation, AlterRequest, Chunk, ChunkReader, ColumnDescriptor,
+ ColumnDescriptorBuilder, ColumnId, Region, RegionMeta, ScanRequest, SchemaRef, Snapshot,
+ WriteResponse,
};
use tempdir::TempDir;
@@ -38,6 +37,7 @@ struct AlterTester {
base: Option<FileTesterBase>,
}
+#[derive(Debug, Clone, PartialEq)]
struct DataRow {
key: Option<i64>,
ts: Timestamp,
@@ -145,9 +145,59 @@ impl AlterTester {
metadata.version()
}
- async fn full_scan(&self) -> Vec<(i64, Option<i64>)> {
+ async fn full_scan_with_init_schema(&self) -> Vec<(i64, Option<i64>)> {
self.base().full_scan().await
}
+
+ async fn full_scan(&self) -> Vec<DataRow> {
+ let read_ctx = &self.base().read_ctx;
+ let snapshot = self.base().region.snapshot(read_ctx).unwrap();
+
+ let resp = snapshot
+ .scan(read_ctx, ScanRequest::default())
+ .await
+ .unwrap();
+ let mut reader = resp.reader;
+
+ let metadata = self.base().region.in_memory_metadata();
+ assert_eq!(metadata.schema(), reader.schema());
+
+ let mut dst = Vec::new();
+ while let Some(chunk) = reader.next_chunk().await.unwrap() {
+ append_chunk_to(&chunk, &mut dst);
+ }
+
+ dst
+ }
+}
+
+fn append_chunk_to(chunk: &Chunk, dst: &mut Vec<DataRow>) {
+ assert_eq!(4, chunk.columns.len());
+
+ let k0_vector = chunk.columns[0]
+ .as_any()
+ .downcast_ref::<Int64Vector>()
+ .unwrap();
+ let ts_vector = chunk.columns[1]
+ .as_any()
+ .downcast_ref::<TimestampVector>()
+ .unwrap();
+ let v0_vector = chunk.columns[2]
+ .as_any()
+ .downcast_ref::<Int64Vector>()
+ .unwrap();
+ let v1_vector = chunk.columns[3]
+ .as_any()
+ .downcast_ref::<Int64Vector>()
+ .unwrap();
+ for i in 0..k0_vector.len() {
+ dst.push(DataRow::new(
+ k0_vector.get_data(i),
+ ts_vector.get_data(i).unwrap().value(),
+ v0_vector.get_data(i),
+ v1_vector.get_data(i),
+ ));
+ }
}
fn new_column_desc(id: ColumnId, name: &str) -> ColumnDescriptor {
@@ -200,12 +250,8 @@ async fn test_alter_region_with_reopen() {
let mut tester = AlterTester::new(store_dir).await;
let data = vec![(1000, Some(100)), (1001, Some(101)), (1002, Some(102))];
-
tester.put_with_init_schema(&data).await;
- assert_eq!(3, tester.full_scan().await.len());
-
- let schema = tester.schema();
- check_schema_names(&schema, &["timestamp", "v0"]);
+ assert_eq!(3, tester.full_scan_with_init_schema().await.len());
let req = add_column_req(&[
(new_column_desc(4, "k0"), true), // key column k0
@@ -216,6 +262,7 @@ async fn test_alter_region_with_reopen() {
let schema = tester.schema();
check_schema_names(&schema, &["k0", "timestamp", "v0", "v1"]);
+ // Put data after schema altered.
let data = vec![
DataRow::new(Some(10000), 1003, Some(103), Some(201)),
DataRow::new(Some(10001), 1004, Some(104), Some(202)),
@@ -223,14 +270,26 @@ async fn test_alter_region_with_reopen() {
];
tester.put(&data).await;
+ // Scan with new schema before reopen.
+ let mut expect = vec![
+ DataRow::new(None, 1000, Some(100), None),
+ DataRow::new(None, 1001, Some(101), None),
+ DataRow::new(None, 1002, Some(102), None),
+ ];
+ expect.extend_from_slice(&data);
+ let scanned = tester.full_scan().await;
+ assert_eq!(expect, scanned);
+
+ // Reopen and put more data.
tester.reopen().await;
let data = vec![
DataRow::new(Some(10003), 1006, Some(106), Some(204)),
DataRow::new(Some(10004), 1007, Some(107), Some(205)),
DataRow::new(Some(10005), 1008, Some(108), Some(206)),
];
-
tester.put(&data).await;
+ // Extend expected result.
+ expect.extend_from_slice(&data);
// add columns,then remove them without writing data.
let req = add_column_req(&[
@@ -248,8 +307,12 @@ async fn test_alter_region_with_reopen() {
check_schema_names(&schema, &["k0", "timestamp", "v0", "v1"]);
let data = vec![DataRow::new(Some(10006), 1009, Some(109), Some(207))];
-
tester.put(&data).await;
+ expect.extend_from_slice(&data);
+
+ // Scan with new schema after reopen and write.
+ let scanned = tester.full_scan().await;
+ assert_eq!(expect, scanned);
}
#[tokio::test]
@@ -308,11 +371,23 @@ async fn test_put_old_schema_after_alter() {
tester.alter(req).await;
// Put with old schema.
- let data = vec![(1003, Some(103)), (1004, Some(104))];
+ let data = vec![(1005, Some(105)), (1006, Some(106))];
tester.put_with_init_schema(&data).await;
// Put data with old schema directly to the inner writer, to check that the region
// writer could compat the schema of write batch.
let data = vec![(1003, Some(103)), (1004, Some(104))];
tester.put_inner_with_init_schema(&data).await;
+
+ let expect = vec![
+ DataRow::new(None, 1000, Some(100), None),
+ DataRow::new(None, 1001, Some(101), None),
+ DataRow::new(None, 1002, Some(102), None),
+ DataRow::new(None, 1003, Some(103), None),
+ DataRow::new(None, 1004, Some(104), None),
+ DataRow::new(None, 1005, Some(105), None),
+ DataRow::new(None, 1006, Some(106), None),
+ ];
+ let scanned = tester.full_scan().await;
+ assert_eq!(expect, scanned);
}
diff --git a/src/storage/src/schema.rs b/src/storage/src/schema.rs
index fdb3559ec71c..c7c240b79e25 100644
--- a/src/storage/src/schema.rs
+++ b/src/storage/src/schema.rs
@@ -11,32 +11,41 @@ pub use crate::schema::store::{StoreSchema, StoreSchemaRef};
mod tests {
use std::sync::Arc;
- use datatypes::vectors::{Int64Vector, UInt64Vector, UInt8Vector};
+ use datatypes::vectors::{Int64Vector, UInt64Vector, UInt8Vector, VectorRef};
use super::*;
use crate::metadata::RegionMetadata;
use crate::read::Batch;
use crate::test_util::descriptor_util;
+ pub const REGION_NAME: &str = "test";
+
pub(crate) fn new_batch() -> Batch {
+ new_batch_with_num_values(1)
+ }
+
+ pub(crate) fn new_batch_with_num_values(num_value_columns: usize) -> Batch {
let k0 = Int64Vector::from_slice(&[1, 2, 3]);
let timestamp = Int64Vector::from_slice(&[4, 5, 6]);
- let v0 = Int64Vector::from_slice(&[7, 8, 9]);
+ let mut columns: Vec<VectorRef> = vec![Arc::new(k0), Arc::new(timestamp)];
+
+ for i in 0..num_value_columns {
+ let vi = Int64Vector::from_slice(&[i as i64, i as i64, i as i64]);
+ columns.push(Arc::new(vi));
+ }
+
let sequences = UInt64Vector::from_slice(&[100, 100, 100]);
let op_types = UInt8Vector::from_slice(&[0, 0, 0]);
- Batch::new(vec![
- Arc::new(k0),
- Arc::new(timestamp),
- Arc::new(v0),
- Arc::new(sequences),
- Arc::new(op_types),
- ])
+ columns.push(Arc::new(sequences));
+ columns.push(Arc::new(op_types));
+
+ Batch::new(columns)
}
pub(crate) fn new_region_schema(version: u32, num_value_columns: usize) -> RegionSchema {
let metadata: RegionMetadata =
- descriptor_util::desc_with_value_columns("test", num_value_columns)
+ descriptor_util::desc_with_value_columns(REGION_NAME, num_value_columns)
.try_into()
.unwrap();
diff --git a/src/storage/src/schema/compat.rs b/src/storage/src/schema/compat.rs
index 2967a20a5427..e77a3bb9e096 100644
--- a/src/storage/src/schema/compat.rs
+++ b/src/storage/src/schema/compat.rs
@@ -1,8 +1,18 @@
//! Utilities for resolving schema compatibility problems.
+use std::sync::Arc;
+
+use datatypes::arrow::array::Array;
+use datatypes::arrow::chunk::Chunk;
+use datatypes::arrow::datatypes::Field;
use datatypes::schema::SchemaRef;
+use datatypes::vectors::{Helper, VectorRef};
+use snafu::{ensure, OptionExt, ResultExt};
-use crate::error::Result;
+use crate::error::{self, Result};
+use crate::metadata::ColumnMetadata;
+use crate::read::Batch;
+use crate::schema::{ProjectedSchemaRef, StoreSchemaRef};
/// Make schema compatible to write to target with another schema.
pub trait CompatWrite {
@@ -14,3 +24,631 @@ pub trait CompatWrite {
/// If there are columns not in `dest_schema`, an error would be returned.
fn compat_write(&mut self, dest_schema: &SchemaRef) -> Result<()>;
}
+
+/// Checks whether column with `source_column` could be read as a column with `dest_column`.
+///
+/// Returns
+/// - `Ok(true)` if `source_column` is compatible to read using `dest_column` as schema.
+/// - `Ok(false)` if they are considered different columns.
+/// - `Err` if there is incompatible issue that could not be resolved.
+fn is_source_column_compatible(
+ source_column: &ColumnMetadata,
+ dest_column: &ColumnMetadata,
+) -> Result<bool> {
+ ensure!(
+ source_column.name() == dest_column.name(),
+ error::CompatReadSnafu {
+ reason: format!(
+ "try to use column in {} for column {}",
+ source_column.name(),
+ dest_column.name()
+ ),
+ }
+ );
+
+ if source_column.id() != dest_column.id() {
+ return Ok(false);
+ }
+
+ ensure!(
+ source_column.desc.data_type == dest_column.desc.data_type,
+ error::CompatReadSnafu {
+ reason: format!(
+ "could not read column {} from {:?} type as {:?} type",
+ dest_column.name(),
+ source_column.desc.data_type,
+ dest_column.desc.data_type
+ ),
+ }
+ );
+
+ ensure!(
+ dest_column.desc.is_nullable() || !source_column.desc.is_nullable(),
+ error::CompatReadSnafu {
+ reason: format!(
+ "unable to read nullable data for non null column {}",
+ dest_column.name()
+ ),
+ }
+ );
+
+ Ok(true)
+}
+
+/// Adapter to help reading data with source schema as data with dest schema.
+#[derive(Debug)]
+pub struct ReadAdapter {
+ /// Schema of data source.
+ source_schema: StoreSchemaRef,
+ /// Schema user expects to read.
+ dest_schema: ProjectedSchemaRef,
+ /// For each column in dest schema, stores the index in read result for
+ /// this column, or None if the column is not in result.
+ ///
+ /// This vec would be left empty if `source_version == dest_version`.
+ indices_in_result: Vec<Option<usize>>,
+ /// For each column in source schema, stores whether we need to read that column. All
+ /// columns are needed by default.
+ is_source_needed: Vec<bool>,
+}
+
+impl ReadAdapter {
+ /// Creates a new [ReadAdapter] that could convert data with `source_schema` into data
+ /// with `dest_schema`.
+ pub fn new(
+ source_schema: StoreSchemaRef,
+ dest_schema: ProjectedSchemaRef,
+ ) -> Result<ReadAdapter> {
+ if source_schema.version() == dest_schema.schema_to_read().version() {
+ ReadAdapter::from_same_version(source_schema, dest_schema)
+ } else {
+ ReadAdapter::from_different_version(source_schema, dest_schema)
+ }
+ }
+
+ fn from_same_version(
+ source_schema: StoreSchemaRef,
+ dest_schema: ProjectedSchemaRef,
+ ) -> Result<ReadAdapter> {
+ let mut is_source_needed = vec![true; source_schema.num_columns()];
+ if source_schema.num_columns() != dest_schema.schema_to_read().num_columns() {
+ // `dest_schema` might be projected, so we need to find out value columns that not be read
+ // by the `dest_schema`.
+
+ for (offset, value_column) in source_schema.value_columns().iter().enumerate() {
+ // Iterate value columns in source and mark those not in destination as unneeded.
+ if !dest_schema.is_needed(value_column.id()) {
+ is_source_needed[source_schema.value_column_index_by_offset(offset)] = false;
+ }
+ }
+ }
+
+ Ok(ReadAdapter {
+ source_schema,
+ dest_schema,
+ indices_in_result: Vec::new(),
+ is_source_needed,
+ })
+ }
+
+ fn from_different_version(
+ source_schema: StoreSchemaRef,
+ dest_schema: ProjectedSchemaRef,
+ ) -> Result<ReadAdapter> {
+ let schema_to_read = dest_schema.schema_to_read();
+ let mut indices_in_result = vec![None; schema_to_read.num_columns()];
+ let mut is_source_needed = vec![true; source_schema.num_columns()];
+ // Number of columns in result from source data.
+ let mut num_columns_in_result = 0;
+
+ for (idx, source_column) in source_schema.columns().iter().enumerate() {
+ // For each column in source schema, check whether we need to read it.
+ if let Some(dest_idx) = schema_to_read
+ .schema()
+ .column_index_by_name(source_column.name())
+ {
+ let dest_column = &schema_to_read.columns()[dest_idx];
+ // Check whether we could read this column.
+ if is_source_column_compatible(source_column, dest_column)? {
+ // Mark that this column could be read from source data, since some
+ // columns in source schema would be skipped, we should not use
+ // the source column's index directly.
+ indices_in_result[dest_idx] = Some(num_columns_in_result);
+ num_columns_in_result += 1;
+ } else {
+ // This column is not the same column in dest schema, should be fill by default value
+ // instead of reading from source data.
+ is_source_needed[idx] = false;
+ }
+ } else {
+ // The column is not in `dest_schema`, we don't need to read it.
+ is_source_needed[idx] = false;
+ }
+ }
+
+ Ok(ReadAdapter {
+ source_schema,
+ dest_schema,
+ indices_in_result,
+ is_source_needed,
+ })
+ }
+
+ /// Returns a bool slice to denote which key column in source is needed.
+ #[inline]
+ pub fn source_key_needed(&self) -> &[bool] {
+ &self.is_source_needed[..self.source_schema.row_key_end()]
+ }
+
+ /// Returns a bool slice to denote which value column in source is needed.
+ #[inline]
+ pub fn source_value_needed(&self) -> &[bool] {
+ &self.is_source_needed
+ [self.source_schema.row_key_end()..self.source_schema.user_column_end()]
+ }
+
+ /// Construct a new [Batch] from row key, value, sequence and op_type.
+ ///
+ /// # Panics
+ /// Panics if input `VectorRef` is empty.
+ pub fn batch_from_parts(
+ &self,
+ row_key_columns: Vec<VectorRef>,
+ mut value_columns: Vec<VectorRef>,
+ sequences: VectorRef,
+ op_types: VectorRef,
+ ) -> Result<Batch> {
+ // Each vector should has same length, so here we just use the length of `sequence`.
+ let num_rows = sequences.len();
+
+ let mut source = row_key_columns;
+ // Reserve space for value, sequence and op_type
+ source.reserve(value_columns.len() + 2);
+ source.append(&mut value_columns);
+ // Internal columns are push in sequence, op_type order.
+ source.push(sequences);
+ source.push(op_types);
+
+ if !self.need_compat() {
+ return Ok(Batch::new(source));
+ }
+
+ self.source_columns_to_batch(source, num_rows)
+ }
+
+ /// Returns list of fields need to read from the parquet file.
+ pub fn fields_to_read(&self) -> Vec<Field> {
+ if !self.need_compat() {
+ return self
+ .dest_schema
+ .schema_to_read()
+ .arrow_schema()
+ .fields
+ .clone();
+ }
+
+ self.source_schema
+ .arrow_schema()
+ .fields
+ .iter()
+ .zip(self.is_source_needed.iter())
+ .filter_map(|(field, is_needed)| {
+ if *is_needed {
+ Some(field.clone())
+ } else {
+ None
+ }
+ })
+ .collect()
+ }
+
+ /// Convert chunk read from the parquet file into [Batch].
+ ///
+ /// The chunk should have the same schema as [`ReadAdapter::fields_to_read()`].
+ pub fn arrow_chunk_to_batch(&self, chunk: &Chunk<Arc<dyn Array>>) -> Result<Batch> {
+ let names = self
+ .source_schema
+ .schema()
+ .column_schemas()
+ .iter()
+ .zip(self.is_source_needed.iter())
+ .filter_map(|(column_schema, is_needed)| {
+ if *is_needed {
+ Some(&column_schema.name)
+ } else {
+ None
+ }
+ });
+ let source = chunk
+ .iter()
+ .zip(names)
+ .map(|(column, name)| {
+ Helper::try_into_vector(column.clone()).context(error::ConvertChunkSnafu { name })
+ })
+ .collect::<Result<_>>()?;
+
+ if !self.need_compat() || chunk.is_empty() {
+ return Ok(Batch::new(source));
+ }
+
+ let num_rows = chunk.len();
+ self.source_columns_to_batch(source, num_rows)
+ }
+
+ #[inline]
+ fn need_compat(&self) -> bool {
+ self.source_schema.version() != self.dest_schema.schema_to_read().version()
+ }
+
+ fn source_columns_to_batch(&self, source: Vec<VectorRef>, num_rows: usize) -> Result<Batch> {
+ let column_schemas = self.dest_schema.schema_to_read().schema().column_schemas();
+ let columns = self
+ .indices_in_result
+ .iter()
+ .zip(column_schemas)
+ .map(|(index_opt, column_schema)| {
+ if let Some(idx) = index_opt {
+ Ok(source[*idx].clone())
+ } else {
+ let vector = column_schema
+ .create_default_vector(num_rows)
+ .context(error::CreateDefaultToReadSnafu {
+ column: &column_schema.name,
+ })?
+ .context(error::NoDefaultToReadSnafu {
+ column: &column_schema.name,
+ })?;
+ Ok(vector)
+ }
+ })
+ .collect::<Result<Vec<_>>>()?;
+
+ Ok(Batch::new(columns))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use datatypes::data_type::ConcreteDataType;
+ use store_api::storage::consts;
+ use store_api::storage::ColumnDescriptorBuilder;
+
+ use super::*;
+ use crate::error::Error;
+ use crate::metadata::RegionMetadata;
+ use crate::schema::tests;
+ use crate::schema::{ProjectedSchema, RegionSchema};
+ use crate::test_util::descriptor_util;
+
+ fn check_fields(fields: &[Field], names: &[&str]) {
+ for (field, name) in fields.iter().zip(names) {
+ assert_eq!(&field.name, name);
+ }
+ }
+
+ fn call_batch_from_parts(
+ adapter: &ReadAdapter,
+ batch: &Batch,
+ num_value_columns: usize,
+ ) -> Batch {
+ let key = batch.columns()[0..2].to_vec();
+ let value = batch.columns()[2..2 + num_value_columns].to_vec();
+ let sequence = batch.column(2 + num_value_columns).clone();
+ let op_type = batch.column(2 + num_value_columns + 1).clone();
+
+ adapter
+ .batch_from_parts(key, value, sequence, op_type)
+ .unwrap()
+ }
+
+ fn check_batch_from_parts_without_padding(
+ adapter: &ReadAdapter,
+ batch: &Batch,
+ num_value_columns: usize,
+ ) {
+ let new_batch = call_batch_from_parts(adapter, batch, num_value_columns);
+ assert_eq!(*batch, new_batch);
+ }
+
+ fn call_arrow_chunk_to_batch(adapter: &ReadAdapter, batch: &Batch) -> Batch {
+ let arrays = batch.columns().iter().map(|v| v.to_arrow_array()).collect();
+ let chunk = Chunk::new(arrays);
+ adapter.arrow_chunk_to_batch(&chunk).unwrap()
+ }
+
+ fn check_arrow_chunk_to_batch_without_padding(adapter: &ReadAdapter, batch: &Batch) {
+ let new_batch = call_arrow_chunk_to_batch(adapter, batch);
+ assert_eq!(*batch, new_batch);
+ }
+
+ fn check_batch_with_null_padding(batch: &Batch, new_batch: &Batch, null_columns: &[usize]) {
+ assert_eq!(
+ batch.num_columns() + null_columns.len(),
+ new_batch.num_columns()
+ );
+
+ let columns_from_source = new_batch
+ .columns()
+ .iter()
+ .enumerate()
+ .filter_map(|(i, v)| {
+ if null_columns.contains(&i) {
+ None
+ } else {
+ Some(v.clone())
+ }
+ })
+ .collect::<Vec<_>>();
+
+ assert_eq!(batch.columns(), &columns_from_source);
+
+ for idx in null_columns {
+ assert!(new_batch.column(*idx).only_null());
+ }
+ }
+
+ #[test]
+ fn test_compat_same_schema() {
+ // (k0, timestamp, v0, v1) with version 0.
+ let region_schema = Arc::new(tests::new_region_schema(0, 2));
+ let projected_schema = Arc::new(ProjectedSchema::no_projection(region_schema.clone()));
+
+ let source_schema = region_schema.store_schema().clone();
+ let adapter = ReadAdapter::new(source_schema, projected_schema).unwrap();
+
+ assert_eq!(&[true, true], adapter.source_key_needed());
+ assert_eq!(&[true, true], adapter.source_value_needed());
+
+ let batch = tests::new_batch_with_num_values(2);
+ check_batch_from_parts_without_padding(&adapter, &batch, 2);
+
+ check_fields(
+ &adapter.fields_to_read(),
+ &[
+ "k0",
+ "timestamp",
+ "v0",
+ "v1",
+ consts::SEQUENCE_COLUMN_NAME,
+ consts::OP_TYPE_COLUMN_NAME,
+ ],
+ );
+
+ check_arrow_chunk_to_batch_without_padding(&adapter, &batch);
+ }
+
+ #[test]
+ fn test_compat_same_version_with_projection() {
+ // (k0, timestamp, v0, v1) with version 0.
+ let region_schema = Arc::new(tests::new_region_schema(0, 2));
+ // Just read v0, k0.
+ let projected_schema =
+ Arc::new(ProjectedSchema::new(region_schema.clone(), Some(vec![2, 0])).unwrap());
+
+ let source_schema = region_schema.store_schema().clone();
+ let adapter = ReadAdapter::new(source_schema, projected_schema).unwrap();
+
+ assert_eq!(&[true, true], adapter.source_key_needed());
+ assert_eq!(&[true, false], adapter.source_value_needed());
+
+ // One value column has been filtered out, so the result batch should only contains one value column.
+ let batch = tests::new_batch_with_num_values(1);
+ check_batch_from_parts_without_padding(&adapter, &batch, 1);
+
+ check_fields(
+ &adapter.fields_to_read(),
+ &[
+ "k0",
+ "timestamp",
+ "v0",
+ consts::SEQUENCE_COLUMN_NAME,
+ consts::OP_TYPE_COLUMN_NAME,
+ ],
+ );
+
+ check_arrow_chunk_to_batch_without_padding(&adapter, &batch);
+ }
+
+ #[test]
+ fn test_compat_old_column() {
+ // (k0, timestamp, v0) with version 0.
+ let region_schema_old = Arc::new(tests::new_region_schema(0, 1));
+ // (k0, timestamp, v0, v1) with version 1.
+ let region_schema_new = Arc::new(tests::new_region_schema(1, 1));
+
+ // Just read v0, k0
+ let projected_schema =
+ Arc::new(ProjectedSchema::new(region_schema_new, Some(vec![2, 0])).unwrap());
+
+ let source_schema = region_schema_old.store_schema().clone();
+ let adapter = ReadAdapter::new(source_schema, projected_schema).unwrap();
+
+ assert_eq!(&[true, true], adapter.source_key_needed());
+ assert_eq!(&[true], adapter.source_value_needed());
+
+ let batch = tests::new_batch_with_num_values(1);
+ check_batch_from_parts_without_padding(&adapter, &batch, 1);
+
+ check_fields(
+ &adapter.fields_to_read(),
+ &[
+ "k0",
+ "timestamp",
+ "v0",
+ consts::SEQUENCE_COLUMN_NAME,
+ consts::OP_TYPE_COLUMN_NAME,
+ ],
+ );
+
+ check_arrow_chunk_to_batch_without_padding(&adapter, &batch);
+ }
+
+ #[test]
+ fn test_compat_new_column() {
+ // (k0, timestamp, v0, v1) with version 0.
+ let region_schema_old = Arc::new(tests::new_region_schema(0, 2));
+ // (k0, timestamp, v0, v1, v2) with version 1.
+ let region_schema_new = Arc::new(tests::new_region_schema(1, 3));
+
+ // Just read v2, v0, k0
+ let projected_schema =
+ Arc::new(ProjectedSchema::new(region_schema_new, Some(vec![4, 2, 0])).unwrap());
+
+ let source_schema = region_schema_old.store_schema().clone();
+ let adapter = ReadAdapter::new(source_schema, projected_schema).unwrap();
+
+ assert_eq!(&[true, true], adapter.source_key_needed());
+ assert_eq!(&[true, false], adapter.source_value_needed());
+
+ // Only read one value column from source.
+ let batch = tests::new_batch_with_num_values(1);
+ // New batch should contains k0, timestamp, v0, sequence, op_type.
+ let new_batch = call_batch_from_parts(&adapter, &batch, 1);
+ // v2 is filled by null.
+ check_batch_with_null_padding(&batch, &new_batch, &[3]);
+
+ check_fields(
+ &adapter.fields_to_read(),
+ &[
+ "k0",
+ "timestamp",
+ "v0",
+ consts::SEQUENCE_COLUMN_NAME,
+ consts::OP_TYPE_COLUMN_NAME,
+ ],
+ );
+
+ let new_batch = call_arrow_chunk_to_batch(&adapter, &batch);
+ check_batch_with_null_padding(&batch, &new_batch, &[3]);
+ }
+
+ #[test]
+ fn test_compat_different_column() {
+ // (k0, timestamp, v0, v1) with version 0.
+ let region_schema_old = Arc::new(tests::new_region_schema(0, 2));
+
+ let mut descriptor = descriptor_util::desc_with_value_columns(tests::REGION_NAME, 2);
+ // Assign a much larger column id to v0.
+ descriptor.default_cf.columns[0].id = descriptor.default_cf.columns.last().unwrap().id + 10;
+ let metadata: RegionMetadata = descriptor.try_into().unwrap();
+ let columns = metadata.columns;
+ // (k0, timestamp, v0, v1) with version 2, and v0 has different column id.
+ let region_schema_new = Arc::new(RegionSchema::new(columns, 2).unwrap());
+
+ let projected_schema = Arc::new(ProjectedSchema::no_projection(region_schema_new));
+ let source_schema = region_schema_old.store_schema().clone();
+ let adapter = ReadAdapter::new(source_schema, projected_schema).unwrap();
+
+ assert_eq!(&[true, true], adapter.source_key_needed());
+ // v0 is discarded as it has different column id than new schema's.
+ assert_eq!(&[false, true], adapter.source_value_needed());
+
+ // New batch should contains k0, timestamp, v1, sequence, op_type, so we need to remove v0
+ // from the created batch.
+ let batch = tests::new_batch_with_num_values(2);
+ let mut columns = batch.columns().to_vec();
+ // Remove v0.
+ columns.remove(2);
+ let batch = Batch::new(columns);
+
+ let new_batch = call_batch_from_parts(&adapter, &batch, 1);
+ // v0 is filled by null.
+ check_batch_with_null_padding(&batch, &new_batch, &[2]);
+
+ check_fields(
+ &adapter.fields_to_read(),
+ &[
+ "k0",
+ "timestamp",
+ "v1",
+ consts::SEQUENCE_COLUMN_NAME,
+ consts::OP_TYPE_COLUMN_NAME,
+ ],
+ );
+
+ let new_batch = call_arrow_chunk_to_batch(&adapter, &batch);
+ check_batch_with_null_padding(&batch, &new_batch, &[2]);
+ }
+
+ #[inline]
+ fn new_column_desc_builder() -> ColumnDescriptorBuilder {
+ ColumnDescriptorBuilder::new(10, "test", ConcreteDataType::int32_datatype())
+ }
+
+ #[test]
+ fn test_is_source_column_compatible() {
+ let desc = new_column_desc_builder().build().unwrap();
+ let source = ColumnMetadata { cf_id: 1, desc };
+
+ // Same column is always compatible, also tests read nullable column
+ // as a nullable column.
+ assert!(is_source_column_compatible(&source, &source).unwrap());
+
+ // Different id.
+ let desc = new_column_desc_builder()
+ .id(source.desc.id + 1)
+ .build()
+ .unwrap();
+ let dest = ColumnMetadata { cf_id: 1, desc };
+ assert!(!is_source_column_compatible(&source, &dest).unwrap());
+ }
+
+ #[test]
+ fn test_nullable_column_read_by_not_null() {
+ let desc = new_column_desc_builder().build().unwrap();
+ assert!(desc.is_nullable());
+ let source = ColumnMetadata { cf_id: 1, desc };
+
+ let desc = new_column_desc_builder()
+ .is_nullable(false)
+ .build()
+ .unwrap();
+ let dest = ColumnMetadata { cf_id: 1, desc };
+
+ let err = is_source_column_compatible(&source, &dest).unwrap_err();
+ assert!(
+ matches!(err, Error::CompatRead { .. }),
+ "{:?} is not CompatRead",
+ err
+ );
+ }
+
+ #[test]
+ fn test_read_not_null_column() {
+ let desc = new_column_desc_builder()
+ .is_nullable(false)
+ .build()
+ .unwrap();
+ let source = ColumnMetadata { cf_id: 1, desc };
+
+ let desc = new_column_desc_builder()
+ .is_nullable(false)
+ .build()
+ .unwrap();
+ let not_null_dest = ColumnMetadata { cf_id: 1, desc };
+ assert!(is_source_column_compatible(&source, ¬_null_dest).unwrap());
+
+ let desc = new_column_desc_builder().build().unwrap();
+ let null_dest = ColumnMetadata { cf_id: 1, desc };
+ assert!(is_source_column_compatible(&source, &null_dest).unwrap());
+ }
+
+ #[test]
+ fn test_read_column_with_different_name() {
+ let desc = new_column_desc_builder().build().unwrap();
+ let source = ColumnMetadata { cf_id: 1, desc };
+
+ let desc = new_column_desc_builder()
+ .name(format!("{}_other", source.desc.name))
+ .build()
+ .unwrap();
+ let dest = ColumnMetadata { cf_id: 1, desc };
+
+ let err = is_source_column_compatible(&source, &dest).unwrap_err();
+ assert!(
+ matches!(err, Error::CompatRead { .. }),
+ "{:?} is not CompatRead",
+ err
+ );
+ }
+}
diff --git a/src/storage/src/schema/projected.rs b/src/storage/src/schema/projected.rs
index 0bb5f6c19b18..81d92070fd3c 100644
--- a/src/storage/src/schema/projected.rs
+++ b/src/storage/src/schema/projected.rs
@@ -5,7 +5,7 @@ use std::sync::Arc;
use common_error::prelude::*;
use datatypes::arrow::bitmap::MutableBitmap;
use datatypes::schema::{SchemaBuilder, SchemaRef};
-use datatypes::vectors::{BooleanVector, VectorRef};
+use datatypes::vectors::BooleanVector;
use store_api::storage::{Chunk, ColumnId};
use crate::error;
@@ -184,36 +184,6 @@ impl ProjectedSchema {
.unwrap_or(true)
}
- /// Construct a new [Batch] from row key, value, sequence and op_type.
- ///
- /// # Panics
- /// Panics if number of columns are not the same as this schema.
- pub fn batch_from_parts(
- &self,
- row_key_columns: Vec<VectorRef>,
- mut value_columns: Vec<VectorRef>,
- sequences: VectorRef,
- op_types: VectorRef,
- ) -> Batch {
- // sequence and op_type
- let num_internal_columns = 2;
-
- assert_eq!(
- self.schema_to_read.num_columns(),
- row_key_columns.len() + value_columns.len() + num_internal_columns
- );
-
- let mut columns = row_key_columns;
- // Reserve space for value, sequence and op_type
- columns.reserve(value_columns.len() + num_internal_columns);
- columns.append(&mut value_columns);
- // Internal columns are push in sequence, op_type order.
- columns.push(sequences);
- columns.push(op_types);
-
- Batch::new(columns)
- }
-
fn build_schema_to_read(
region_schema: &RegionSchema,
projection: &Projection,
@@ -369,7 +339,7 @@ impl BatchOp for ProjectedSchema {
mod tests {
use datatypes::prelude::ScalarVector;
use datatypes::type_id::LogicalTypeId;
- use datatypes::vectors::TimestampVector;
+ use datatypes::vectors::{TimestampVector, VectorRef};
use store_api::storage::OpType;
use super::*;
@@ -474,17 +444,6 @@ mod tests {
assert_eq!(2, chunk.columns.len());
assert_eq!(&chunk.columns[0], batch.column(2));
assert_eq!(&chunk.columns[1], batch.column(1));
-
- // Test batch_from_parts
- let keys = batch.columns()[0..2].to_vec();
- let values = batch.columns()[2..3].to_vec();
- let created = projected_schema.batch_from_parts(
- keys,
- values,
- batch.column(3).clone(),
- batch.column(4).clone(),
- );
- assert_eq!(batch, created);
}
#[test]
diff --git a/src/storage/src/schema/store.rs b/src/storage/src/schema/store.rs
index e423888ae816..c65ac6781a37 100644
--- a/src/storage/src/schema/store.rs
+++ b/src/storage/src/schema/store.rs
@@ -5,7 +5,6 @@ use datatypes::arrow::array::Array;
use datatypes::arrow::chunk::Chunk as ArrowChunk;
use datatypes::arrow::datatypes::Schema as ArrowSchema;
use datatypes::schema::{Metadata, Schema, SchemaBuilder, SchemaRef};
-use datatypes::vectors::Helper;
use store_api::storage::consts;
use crate::metadata::{self, ColumnMetadata, ColumnsMetadata, Error, Result};
@@ -50,22 +49,6 @@ impl StoreSchema {
ArrowChunk::new(batch.columns().iter().map(|v| v.to_arrow_array()).collect())
}
- pub fn arrow_chunk_to_batch(&self, chunk: &ArrowChunk<Arc<dyn Array>>) -> Result<Batch> {
- assert_eq!(self.schema.num_columns(), chunk.columns().len());
-
- let columns = chunk
- .iter()
- .enumerate()
- .map(|(i, column)| {
- Helper::try_into_vector(column.clone()).context(metadata::ConvertChunkSnafu {
- name: self.column_name(i),
- })
- })
- .collect::<Result<_>>()?;
-
- Ok(Batch::new(columns))
- }
-
pub(crate) fn contains_column(&self, name: &str) -> bool {
self.schema.column_schema_by_name(name).is_some()
}
@@ -159,6 +142,32 @@ impl StoreSchema {
pub(crate) fn num_columns(&self) -> usize {
self.schema.num_columns()
}
+
+ #[inline]
+ pub(crate) fn row_key_end(&self) -> usize {
+ self.row_key_end
+ }
+
+ #[inline]
+ pub(crate) fn user_column_end(&self) -> usize {
+ self.user_column_end
+ }
+
+ #[inline]
+ pub(crate) fn value_columns(&self) -> &[ColumnMetadata] {
+ &self.columns[self.row_key_end..self.user_column_end]
+ }
+
+ /// Returns the index of the value column according its `offset`.
+ #[inline]
+ pub(crate) fn value_column_index_by_offset(&self, offset: usize) -> usize {
+ self.row_key_end + offset
+ }
+
+ #[inline]
+ pub(crate) fn columns(&self) -> &[ColumnMetadata] {
+ &self.columns
+ }
}
impl TryFrom<ArrowSchema> for StoreSchema {
@@ -262,9 +271,5 @@ mod tests {
// Convert batch to chunk.
let chunk = store_schema.batch_to_arrow_chunk(&batch);
check_chunk_batch(&chunk, &batch);
-
- // Convert chunk to batch.
- let converted_batch = store_schema.arrow_chunk_to_batch(&chunk).unwrap();
- check_chunk_batch(&chunk, &converted_batch);
}
}
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index d48c760b6253..06bb34588b7c 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -9,7 +9,7 @@ use async_trait::async_trait;
use common_telemetry::debug;
use datatypes::arrow::array::Array;
use datatypes::arrow::chunk::Chunk;
-use datatypes::arrow::datatypes::{DataType, Field, Schema};
+use datatypes::arrow::datatypes::{DataType, Schema};
use datatypes::arrow::io::parquet::read::{
infer_schema, read_columns_many_async, read_metadata_async, RowGroupDeserializer,
};
@@ -27,6 +27,7 @@ use table::predicate::Predicate;
use crate::error::{self, Result};
use crate::memtable::BoxedBatchIterator;
use crate::read::{Batch, BatchReader};
+use crate::schema::compat::ReadAdapter;
use crate::schema::{ProjectedSchemaRef, StoreSchema};
use crate::sst;
@@ -213,17 +214,18 @@ impl<'a> ParquetReader<'a> {
let arrow_schema =
infer_schema(&metadata).context(error::ReadParquetSnafu { file: &file_path })?;
+ let store_schema = Arc::new(
+ StoreSchema::try_from(arrow_schema)
+ .context(error::ConvertStoreSchemaSnafu { file: &file_path })?,
+ );
- // Now the StoreSchema is only used to validate metadata of the parquet file, but this schema
- // would be useful once we support altering schema, as this is the actual schema of the SST.
- let store_schema = StoreSchema::try_from(arrow_schema)
- .context(error::ConvertStoreSchemaSnafu { file: &file_path })?;
+ let adapter = ReadAdapter::new(store_schema.clone(), self.projected_schema.clone())?;
let pruned_row_groups = self
.predicate
.prune_row_groups(store_schema.schema().clone(), &metadata.row_groups);
- let projected_fields = self.projected_fields().to_vec();
+ let projected_fields = adapter.fields_to_read();
let chunk_stream = try_stream!({
for (idx, valid) in pruned_row_groups.iter().enumerate() {
if !valid {
@@ -250,27 +252,20 @@ impl<'a> ParquetReader<'a> {
}
});
- ChunkStream::new(self.projected_schema.clone(), Box::pin(chunk_stream))
- }
-
- fn projected_fields(&self) -> &[Field] {
- &self.projected_schema.schema_to_read().arrow_schema().fields
+ ChunkStream::new(adapter, Box::pin(chunk_stream))
}
}
pub type SendableChunkStream = Pin<Box<dyn Stream<Item = Result<Chunk<Arc<dyn Array>>>> + Send>>;
pub struct ChunkStream {
- projected_schema: ProjectedSchemaRef,
+ adapter: ReadAdapter,
stream: SendableChunkStream,
}
impl ChunkStream {
- pub fn new(projected_schema: ProjectedSchemaRef, stream: SendableChunkStream) -> Result<Self> {
- Ok(Self {
- projected_schema,
- stream,
- })
+ pub fn new(adapter: ReadAdapter, stream: SendableChunkStream) -> Result<Self> {
+ Ok(Self { adapter, stream })
}
}
@@ -280,12 +275,7 @@ impl BatchReader for ChunkStream {
self.stream
.try_next()
.await?
- .map(|chunk| {
- self.projected_schema
- .schema_to_read()
- .arrow_chunk_to_batch(&chunk)
- .context(error::InvalidParquetSchemaSnafu)
- })
+ .map(|chunk| self.adapter.arrow_chunk_to_batch(&chunk))
.transpose()
}
}
diff --git a/src/table/src/table/adapter.rs b/src/table/src/table/adapter.rs
index 59a697d1d667..06b002f98ea4 100644
--- a/src/table/src/table/adapter.rs
+++ b/src/table/src/table/adapter.rs
@@ -6,7 +6,6 @@ use common_query::physical_plan::{DfPhysicalPlanAdapter, PhysicalPlanAdapter, Ph
use common_query::DfPhysicalPlan;
use common_telemetry::debug;
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
-/// Datafusion table adpaters
use datafusion::datasource::{
datasource::TableProviderFilterPushDown as DfTableProviderFilterPushDown, TableProvider,
TableType as DfTableType,
|
feat
|
Region supports reading data with different schema (#342)
|
72897a20e311a452fc31358ad64b956a8c744d80
|
2024-05-14 18:55:22
|
Jeremyhi
|
chore: minor refactor on etcd kvbackend (#3940)
| false
|
diff --git a/src/common/meta/src/kv_backend/etcd.rs b/src/common/meta/src/kv_backend/etcd.rs
index c437f90a25c7..1cdd45bc5c13 100644
--- a/src/common/meta/src/kv_backend/etcd.rs
+++ b/src/common/meta/src/kv_backend/etcd.rs
@@ -32,11 +32,6 @@ use crate::rpc::store::{
};
use crate::rpc::KeyValue;
-fn convert_key_value(kv: etcd_client::KeyValue) -> KeyValue {
- let (key, value) = kv.into_key_value();
- KeyValue { key, value }
-}
-
pub struct EtcdStore {
client: Client,
// Maximum number of operations permitted in a transaction.
@@ -123,7 +118,7 @@ impl KvBackend for EtcdStore {
let kvs = res
.take_kvs()
.into_iter()
- .map(convert_key_value)
+ .map(KeyValue::from)
.collect::<Vec<_>>();
Ok(RangeResponse {
@@ -146,7 +141,7 @@ impl KvBackend for EtcdStore {
.await
.context(error::EtcdFailedSnafu)?;
- let prev_kv = res.take_prev_key().map(convert_key_value);
+ let prev_kv = res.take_prev_key().map(KeyValue::from);
Ok(PutResponse { prev_kv })
}
@@ -165,7 +160,7 @@ impl KvBackend for EtcdStore {
for op_res in txn_res.op_responses() {
match op_res {
TxnOpResponse::Put(mut put_res) => {
- if let Some(prev_kv) = put_res.take_prev_key().map(convert_key_value) {
+ if let Some(prev_kv) = put_res.take_prev_key().map(KeyValue::from) {
prev_kvs.push(prev_kv);
}
}
@@ -194,7 +189,7 @@ impl KvBackend for EtcdStore {
TxnOpResponse::Get(get_res) => get_res,
_ => unreachable!(),
};
- kvs.extend(get_res.take_kvs().into_iter().map(convert_key_value));
+ kvs.extend(get_res.take_kvs().into_iter().map(KeyValue::from));
}
}
@@ -214,7 +209,7 @@ impl KvBackend for EtcdStore {
let prev_kvs = res
.take_prev_kvs()
.into_iter()
- .map(convert_key_value)
+ .map(KeyValue::from)
.collect::<Vec<_>>();
Ok(DeleteRangeResponse {
@@ -242,7 +237,7 @@ impl KvBackend for EtcdStore {
delete_res
.take_prev_kvs()
.into_iter()
- .map(convert_key_value)
+ .map(KeyValue::from)
.for_each(|kv| {
prev_kvs.push(kv);
});
diff --git a/src/common/meta/src/kv_backend/memory.rs b/src/common/meta/src/kv_backend/memory.rs
index b9c1dd00bbdb..256e31f93ed3 100644
--- a/src/common/meta/src/kv_backend/memory.rs
+++ b/src/common/meta/src/kv_backend/memory.rs
@@ -268,18 +268,15 @@ impl<T: ErrorExt + Send + Sync> TxnService for MemoryKvBackend<T> {
let do_txn = |txn_op| match txn_op {
TxnOp::Put(key, value) => {
- kvs.insert(key.clone(), value);
+ kvs.insert(key, value);
TxnOpResponse::ResponsePut(PutResponse { prev_kv: None })
}
TxnOp::Get(key) => {
- let value = kvs.get(&key);
+ let value = kvs.get(&key).cloned();
let kvs = value
+ .map(|value| KeyValue { key, value })
.into_iter()
- .map(|value| KeyValue {
- key: key.clone(),
- value: value.clone(),
- })
.collect();
TxnOpResponse::ResponseGet(RangeResponse { kvs, more: false })
}
diff --git a/src/common/meta/src/kv_backend/txn/etcd.rs b/src/common/meta/src/kv_backend/txn/etcd.rs
index ccee8b6062f4..cb516ed51a4c 100644
--- a/src/common/meta/src/kv_backend/txn/etcd.rs
+++ b/src/common/meta/src/kv_backend/txn/etcd.rs
@@ -20,6 +20,7 @@ use etcd_client::{
use super::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse, TxnResponse};
use crate::error::{self, Result};
use crate::rpc::store::{DeleteRangeResponse, PutResponse, RangeResponse};
+use crate::rpc::KeyValue;
impl From<Txn> for EtcdTxn {
fn from(txn: Txn) -> Self {
@@ -65,7 +66,7 @@ impl From<Compare> for EtcdCompare {
};
match cmp.target {
Some(target) => EtcdCompare::value(cmp.key, etcd_cmp, target),
- // create revision 0 means key was not exist
+ // create revision 0 means key does not exist
None => EtcdCompare::create_revision(cmp.key, etcd_cmp, 0),
}
}
@@ -86,28 +87,28 @@ impl TryFrom<EtcdTxnOpResponse> for TxnOpResponse {
fn try_from(op_resp: EtcdTxnOpResponse) -> Result<Self> {
match op_resp {
- EtcdTxnOpResponse::Put(res) => {
- let prev_kv = res.prev_key().cloned().map(Into::into);
- let put_res = PutResponse { prev_kv };
- Ok(TxnOpResponse::ResponsePut(put_res))
+ EtcdTxnOpResponse::Put(mut res) => {
+ let prev_kv = res.take_prev_key().map(KeyValue::from);
+ Ok(TxnOpResponse::ResponsePut(PutResponse { prev_kv }))
}
- EtcdTxnOpResponse::Get(res) => {
- let kvs = res.kvs().iter().cloned().map(Into::into).collect();
- let range_res = RangeResponse { kvs, more: false };
- Ok(TxnOpResponse::ResponseGet(range_res))
+ EtcdTxnOpResponse::Get(mut res) => {
+ let kvs = res.take_kvs().into_iter().map(KeyValue::from).collect();
+ Ok(TxnOpResponse::ResponseGet(RangeResponse {
+ kvs,
+ more: false,
+ }))
}
- EtcdTxnOpResponse::Delete(res) => {
+ EtcdTxnOpResponse::Delete(mut res) => {
+ let deleted = res.deleted();
let prev_kvs = res
- .prev_kvs()
- .iter()
- .cloned()
- .map(Into::into)
+ .take_prev_kvs()
+ .into_iter()
+ .map(KeyValue::from)
.collect::<Vec<_>>();
- let delete_res = DeleteRangeResponse {
+ Ok(TxnOpResponse::ResponseDelete(DeleteRangeResponse {
+ deleted,
prev_kvs,
- deleted: res.deleted(),
- };
- Ok(TxnOpResponse::ResponseDelete(delete_res))
+ }))
}
EtcdTxnOpResponse::Txn(_) => error::EtcdTxnOpResponseSnafu {
err_msg: "nested txn is not supported",
diff --git a/src/log-store/src/raft_engine/backend.rs b/src/log-store/src/raft_engine/backend.rs
index fdc92cd49f19..e2cd65c8fa74 100644
--- a/src/log-store/src/raft_engine/backend.rs
+++ b/src/log-store/src/raft_engine/backend.rs
@@ -103,7 +103,7 @@ impl TxnService for RaftEngineBackend {
let do_txn = |txn_op| match txn_op {
TxnOp::Put(key, value) => {
batch
- .put(SYSTEM_NAMESPACE, key.clone(), value)
+ .put(SYSTEM_NAMESPACE, key, value)
.context(RaftEngineSnafu)
.map_err(BoxedError::new)
.context(meta_error::ExternalSnafu)?;
@@ -113,11 +113,8 @@ impl TxnService for RaftEngineBackend {
TxnOp::Get(key) => {
let value = engine_get(&engine, &key)?.map(|kv| kv.value);
let kvs = value
+ .map(|value| KeyValue { key, value })
.into_iter()
- .map(|value| KeyValue {
- key: key.clone(),
- value,
- })
.collect();
Ok(TxnOpResponse::ResponseGet(RangeResponse {
kvs,
|
chore
|
minor refactor on etcd kvbackend (#3940)
|
e99668092c31b2cd676c28e9c02b458a678ae88b
|
2022-10-12 09:22:43
|
Ruihang Xia
|
refactor: relax memory ordering of accessing VersionControl::submmitted_sequence (#305)
| false
|
diff --git a/src/storage/src/version.rs b/src/storage/src/version.rs
index cbb36ed2d524..c3587f9c9257 100644
--- a/src/storage/src/version.rs
+++ b/src/storage/src/version.rs
@@ -64,7 +64,7 @@ impl VersionControl {
#[inline]
pub fn committed_sequence(&self) -> SequenceNumber {
- self.committed_sequence.load(Ordering::Acquire)
+ self.committed_sequence.load(Ordering::Relaxed)
}
/// Set committed sequence to `value`.
@@ -73,8 +73,8 @@ impl VersionControl {
/// last sequence.
#[inline]
pub fn set_committed_sequence(&self, value: SequenceNumber) {
- // Release ordering should be enough to guarantee sequence is updated at last.
- self.committed_sequence.store(value, Ordering::Release);
+ // Relaxed ordering is enough for this update as this method requires external synchoronization.
+ self.committed_sequence.store(value, Ordering::Relaxed);
}
/// Add mutable memtables and commit.
|
refactor
|
relax memory ordering of accessing VersionControl::submmitted_sequence (#305)
|
a50eea76a60e1cb215920e5d847d5685824a7af9
|
2024-10-21 13:48:30
|
Lei, HUANG
|
chore: bump greptime-meter (#4858)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index ea4bb4000753..fc57d670461d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6415,12 +6415,22 @@ dependencies = [
"parking_lot 0.12.3",
]
+[[package]]
+name = "meter-core"
+version = "0.1.0"
+source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac#a10facb353b41460eeb98578868ebf19c2084fac"
+dependencies = [
+ "anymap2",
+ "once_cell",
+ "parking_lot 0.12.3",
+]
+
[[package]]
name = "meter-macros"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd#80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd"
dependencies = [
- "meter-core",
+ "meter-core 0.1.0 (git+https://github.com/GreptimeTeam/greptime-meter.git?rev=80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd)",
]
[[package]]
@@ -7585,7 +7595,7 @@ dependencies = [
"futures-util",
"lazy_static",
"meta-client",
- "meter-core",
+ "meter-core 0.1.0 (git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac)",
"meter-macros",
"moka",
"object-store",
@@ -8988,7 +8998,7 @@ dependencies = [
"humantime",
"itertools 0.10.5",
"lazy_static",
- "meter-core",
+ "meter-core 0.1.0 (git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac)",
"meter-macros",
"num",
"num-traits",
@@ -10862,7 +10872,7 @@ dependencies = [
"common-telemetry",
"common-time",
"derive_builder 0.12.0",
- "meter-core",
+ "meter-core 0.1.0 (git+https://github.com/GreptimeTeam/greptime-meter.git?rev=a10facb353b41460eeb98578868ebf19c2084fac)",
"snafu 0.8.5",
"sql",
]
diff --git a/Cargo.toml b/Cargo.toml
index 846ab2d9383f..a2998ce7db20 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -127,7 +127,7 @@ humantime-serde = "1.1"
itertools = "0.10"
jsonb = { git = "https://github.com/datafuselabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
lazy_static = "1.4"
-meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd" }
+meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "a10facb353b41460eeb98578868ebf19c2084fac" }
mockall = "0.11.4"
moka = "0.12"
notify = "6.1"
|
chore
|
bump greptime-meter (#4858)
|
6720bc5f7c68d1865aaa46be6fe0d66441fbf337
|
2022-12-05 08:31:43
|
dennis zhuang
|
fix: validate create table request in mito engine (#690)
| false
|
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index 0a2dac59cb7f..b6555b935339 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -43,7 +43,6 @@ use crate::error::{
pub const ENTRY_TYPE_INDEX: usize = 0;
pub const KEY_INDEX: usize = 1;
-pub const TIMESTAMP_INDEX: usize = 2;
pub const VALUE_INDEX: usize = 3;
pub struct SystemCatalogTable {
@@ -111,7 +110,7 @@ impl SystemCatalogTable {
desc: Some("System catalog table".to_string()),
schema: schema.clone(),
region_numbers: vec![0],
- primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX, TIMESTAMP_INDEX],
+ primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
create_if_not_exists: true,
table_options: HashMap::new(),
};
diff --git a/src/datanode/src/sql/create.rs b/src/datanode/src/sql/create.rs
index ba80682b6282..8b75bdef3f74 100644
--- a/src/datanode/src/sql/create.rs
+++ b/src/datanode/src/sql/create.rs
@@ -183,14 +183,6 @@ impl SqlHandler {
ensure!(ts_index != usize::MAX, error::MissingTimestampColumnSnafu);
- if primary_keys.is_empty() {
- info!(
- "Creating table: {} with time index column: {} upon primary keys absent",
- table_ref, ts_index
- );
- primary_keys.push(ts_index);
- }
-
let columns_schemas: Vec<_> = stmt
.columns
.iter()
@@ -288,7 +280,6 @@ mod tests {
assert_matches!(error, Error::MissingTimestampColumn { .. });
}
- /// If primary key is not specified, time index should be used as primary key.
#[tokio::test]
pub async fn test_primary_key_not_specified() {
let handler = create_mock_sql_handler().await;
@@ -304,11 +295,8 @@ mod tests {
let c = handler
.create_to_request(42, parsed_stmt, TableReference::bare("demo_table"))
.unwrap();
- assert_eq!(1, c.primary_key_indices.len());
- assert_eq!(
- c.schema.timestamp_index().unwrap(),
- c.primary_key_indices[0]
- );
+ assert!(c.primary_key_indices.is_empty());
+ assert_eq!(c.schema.timestamp_index(), Some(1));
}
/// Constraints specified, not column cannot be found.
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index 07a49f35ba71..a7cf8e1fe552 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -89,7 +89,7 @@ pub async fn create_test_table(
.expect("ts is expected to be timestamp column"),
),
create_if_not_exists: true,
- primary_key_indices: vec![3, 0], // "host" and "ts" are primary keys
+ primary_key_indices: vec![0], // "host" is in primary keys
table_options: HashMap::new(),
region_numbers: vec![0],
},
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 1a3aee1deb7d..b1c04389a7ae 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -825,7 +825,7 @@ mod tests {
memory DOUBLE NULL,
disk_util DOUBLE DEFAULT 9.9,
TIME INDEX (ts),
- PRIMARY KEY(ts, host)
+ PRIMARY KEY(host)
) engine=mito with(regions=1);"#;
let output = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
.await
@@ -1077,7 +1077,7 @@ mod tests {
desc: None,
column_defs,
time_index: "ts".to_string(),
- primary_keys: vec!["ts".to_string(), "host".to_string()],
+ primary_keys: vec!["host".to_string()],
create_if_not_exists: true,
table_options: Default::default(),
table_id: None,
diff --git a/src/mito/src/engine.rs b/src/mito/src/engine.rs
index 36e0574b56ca..845493d745b3 100644
--- a/src/mito/src/engine.rs
+++ b/src/mito/src/engine.rs
@@ -21,7 +21,7 @@ use common_error::ext::BoxedError;
use common_telemetry::logging;
use datatypes::schema::SchemaRef;
use object_store::ObjectStore;
-use snafu::{OptionExt, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::{
ColumnDescriptorBuilder, ColumnFamilyDescriptor, ColumnFamilyDescriptorBuilder, ColumnId,
CreateOptions, EngineContext as StorageEngineContext, OpenOptions, RegionDescriptorBuilder,
@@ -37,7 +37,8 @@ use tokio::sync::Mutex;
use crate::config::EngineConfig;
use crate::error::{
self, BuildColumnDescriptorSnafu, BuildColumnFamilyDescriptorSnafu, BuildRegionDescriptorSnafu,
- BuildRowKeyDescriptorSnafu, MissingTimestampIndexSnafu, Result, TableExistsSnafu,
+ BuildRowKeyDescriptorSnafu, InvalidPrimaryKeySnafu, MissingTimestampIndexSnafu, Result,
+ TableExistsSnafu,
};
use crate::table::MitoTable;
@@ -248,6 +249,27 @@ fn build_column_family(
))
}
+fn validate_create_table_request(request: &CreateTableRequest) -> Result<()> {
+ let ts_index = request
+ .schema
+ .timestamp_index()
+ .context(MissingTimestampIndexSnafu {
+ table_name: &request.table_name,
+ })?;
+
+ ensure!(
+ !request
+ .primary_key_indices
+ .iter()
+ .any(|index| *index == ts_index),
+ InvalidPrimaryKeySnafu {
+ msg: "time index column can't be included in primary key"
+ }
+ );
+
+ Ok(())
+}
+
impl<S: StorageEngine> MitoEngineInner<S> {
async fn create_table(
&self,
@@ -263,6 +285,8 @@ impl<S: StorageEngine> MitoEngineInner<S> {
table: table_name,
};
+ validate_create_table_request(&request)?;
+
if let Some(table) = self.get_table(&table_ref) {
if request.create_if_not_exists {
return Ok(table);
@@ -652,6 +676,49 @@ mod tests {
);
}
+ #[test]
+ fn test_validate_create_table_request() {
+ let table_name = "test_validate_create_table_request";
+ let column_schemas = vec![
+ ColumnSchema::new("name", ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_datatype(common_time::timestamp::TimeUnit::Millisecond),
+ true,
+ )
+ .with_time_index(true),
+ ];
+
+ let schema = Arc::new(
+ SchemaBuilder::try_from(column_schemas)
+ .unwrap()
+ .build()
+ .expect("ts must be timestamp column"),
+ );
+
+ let mut request = CreateTableRequest {
+ id: 1,
+ catalog_name: "greptime".to_string(),
+ schema_name: "public".to_string(),
+ table_name: table_name.to_string(),
+ desc: Some("a test table".to_string()),
+ schema,
+ create_if_not_exists: true,
+ // put ts into primary keys
+ primary_key_indices: vec![0, 1],
+ table_options: HashMap::new(),
+ region_numbers: vec![0],
+ };
+
+ let err = validate_create_table_request(&request).unwrap_err();
+ assert!(err
+ .to_string()
+ .contains("Invalid primary key: time index column can't be included in primary key"));
+
+ request.primary_key_indices = vec![0];
+ assert!(validate_create_table_request(&request).is_ok());
+ }
+
#[tokio::test]
async fn test_create_table_insert_scan() {
let (_engine, table, schema, _dir) = test_util::setup_test_engine_and_table().await;
diff --git a/src/mito/src/error.rs b/src/mito/src/error.rs
index ff3321ef7a26..ff29e72a818b 100644
--- a/src/mito/src/error.rs
+++ b/src/mito/src/error.rs
@@ -56,6 +56,9 @@ pub enum Error {
backtrace: Backtrace,
},
+ #[snafu(display("Invalid primary key: {}", msg))]
+ InvalidPrimaryKey { msg: String, backtrace: Backtrace },
+
#[snafu(display("Missing timestamp index for table: {}", table_name))]
MissingTimestampIndex {
table_name: String,
@@ -214,6 +217,7 @@ impl ErrorExt for Error {
| BuildRegionDescriptor { .. }
| TableExists { .. }
| ProjectedColumnNotFound { .. }
+ | InvalidPrimaryKey { .. }
| MissingTimestampIndex { .. }
| UnsupportedDefaultConstraint { .. }
| TableNotFound { .. } => StatusCode::InvalidArguments,
diff --git a/src/script/src/table.rs b/src/script/src/table.rs
index 224c98da9492..abc0279a3f23 100644
--- a/src/script/src/table.rs
+++ b/src/script/src/table.rs
@@ -60,9 +60,9 @@ impl ScriptsTable {
table_name: SCRIPTS_TABLE_NAME.to_string(),
desc: Some("Scripts table".to_string()),
schema,
- // name and timestamp as primary key
region_numbers: vec![0],
- primary_key_indices: vec![0, 3],
+ // name as primary key
+ primary_key_indices: vec![0],
create_if_not_exists: true,
table_options: HashMap::default(),
};
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index ea4d3e12dbc8..70a3355f3dc2 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -193,7 +193,7 @@ pub async fn create_test_table(
.expect("ts is expected to be timestamp column"),
),
create_if_not_exists: true,
- primary_key_indices: vec![3, 0], // "host" and "ts" are primary keys
+ primary_key_indices: vec![0], // "host" is in primary keys
table_options: HashMap::new(),
region_numbers: vec![0],
},
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 026f3b53218a..cf6ba4b922f9 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -258,7 +258,7 @@ fn testing_create_expr() -> CreateExpr {
desc: Some("blabla".to_string()),
column_defs,
time_index: "ts".to_string(),
- primary_keys: vec!["ts".to_string(), "host".to_string()],
+ primary_keys: vec!["host".to_string()],
create_if_not_exists: true,
table_options: Default::default(),
table_id: Some(MIN_USER_TABLE_ID),
|
fix
|
validate create table request in mito engine (#690)
|
dc9b5339bf21230cbd83ad8b23d488c582c99667
|
2023-01-29 11:51:13
|
Ruihang Xia
|
feat: impl `increase` and `irate`/`idelta` in PromQL (#880)
| false
|
diff --git a/src/promql/src/error.rs b/src/promql/src/error.rs
index d95d500f7ef9..de6a7285d716 100644
--- a/src/promql/src/error.rs
+++ b/src/promql/src/error.rs
@@ -15,6 +15,7 @@
use std::any::Any;
use common_error::prelude::*;
+use datafusion::error::DataFusionError;
use promql_parser::parser::{Expr as PromExpr, TokenType};
#[derive(Debug, Snafu)]
@@ -125,3 +126,20 @@ impl ErrorExt for Error {
}
pub type Result<T> = std::result::Result<T, Error>;
+
+impl From<Error> for DataFusionError {
+ fn from(err: Error) -> Self {
+ DataFusionError::External(Box::new(err))
+ }
+}
+
+pub(crate) fn ensure(
+ predicate: bool,
+ error: DataFusionError,
+) -> std::result::Result<(), DataFusionError> {
+ if predicate {
+ Ok(())
+ } else {
+ Err(error)
+ }
+}
diff --git a/src/promql/src/functions.rs b/src/promql/src/functions.rs
new file mode 100644
index 000000000000..aaa6aa025880
--- /dev/null
+++ b/src/promql/src/functions.rs
@@ -0,0 +1,32 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod idelta;
+mod increase;
+
+use datafusion::arrow::array::ArrayRef;
+use datafusion::error::DataFusionError;
+use datafusion::physical_plan::ColumnarValue;
+pub use idelta::IDelta;
+pub use increase::Increase;
+
+pub(crate) fn extract_array(columnar_value: &ColumnarValue) -> Result<ArrayRef, DataFusionError> {
+ if let ColumnarValue::Array(array) = columnar_value {
+ Ok(array.clone())
+ } else {
+ Err(DataFusionError::Execution(
+ "expect array as input, found scalar value".to_string(),
+ ))
+ }
+}
diff --git a/src/promql/src/functions/idelta.rs b/src/promql/src/functions/idelta.rs
new file mode 100644
index 000000000000..1a12cbff3829
--- /dev/null
+++ b/src/promql/src/functions/idelta.rs
@@ -0,0 +1,231 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::Display;
+use std::sync::Arc;
+
+use datafusion::arrow::array::{Float64Array, Int64Array};
+use datafusion::common::DataFusionError;
+use datafusion::logical_expr::{ScalarUDF, Signature, TypeSignature, Volatility};
+use datafusion::physical_plan::ColumnarValue;
+use datatypes::arrow::array::{Array, PrimitiveArray};
+use datatypes::arrow::datatypes::DataType;
+
+use crate::error;
+use crate::functions::extract_array;
+use crate::range_array::RangeArray;
+
+/// The `funcIdelta` in Promql,
+/// from https://github.com/prometheus/prometheus/blob/6bdecf377cea8e856509914f35234e948c4fcb80/promql/functions.go#L235
+#[derive(Debug)]
+pub struct IDelta<const IS_RATE: bool> {}
+
+impl<const IS_RATE: bool> IDelta<IS_RATE> {
+ pub const fn name() -> &'static str {
+ if IS_RATE {
+ "prom_irate"
+ } else {
+ "prom_idelta"
+ }
+ }
+
+ pub fn scalar_udf() -> ScalarUDF {
+ ScalarUDF {
+ name: Self::name().to_string(),
+ signature: Signature::new(
+ TypeSignature::Exact(Self::input_type()),
+ Volatility::Immutable,
+ ),
+ return_type: Arc::new(|_| Ok(Arc::new(Self::return_type()))),
+ fun: Arc::new(Self::calc),
+ }
+ }
+
+ // time index column and value column
+ fn input_type() -> Vec<DataType> {
+ vec![
+ RangeArray::convert_data_type(DataType::Int64),
+ RangeArray::convert_data_type(DataType::Float64),
+ ]
+ }
+
+ fn return_type() -> DataType {
+ DataType::Float64
+ }
+
+ fn calc(input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
+ // construct matrix from input
+ assert_eq!(input.len(), 2);
+ let ts_array = extract_array(&input[0])?;
+ let value_array = extract_array(&input[1])?;
+
+ let ts_range: RangeArray = RangeArray::try_new(ts_array.data().clone().into())?;
+ let value_range: RangeArray = RangeArray::try_new(value_array.data().clone().into())?;
+ error::ensure(
+ ts_range.len() == value_range.len(),
+ DataFusionError::Execution(format!(
+ "{}: input arrays should have the same length, found {} and {}",
+ Self::name(),
+ ts_range.len(),
+ value_range.len()
+ )),
+ )?;
+ error::ensure(
+ ts_range.value_type() == DataType::Int64,
+ DataFusionError::Execution(format!(
+ "{}: expect Int64 as time index array's type, found {}",
+ Self::name(),
+ ts_range.value_type()
+ )),
+ )?;
+ error::ensure(
+ value_range.value_type() == DataType::Float64,
+ DataFusionError::Execution(format!(
+ "{}: expect Int64 as time index array's type, found {}",
+ Self::name(),
+ value_range.value_type()
+ )),
+ )?;
+
+ // calculation
+ let mut result_array = Vec::with_capacity(ts_range.len());
+
+ for index in 0..ts_range.len() {
+ let timestamps = ts_range.get(index).unwrap();
+ let timestamps = timestamps
+ .as_any()
+ .downcast_ref::<Int64Array>()
+ .unwrap()
+ .values();
+
+ let values = value_range.get(index).unwrap();
+ let values = values
+ .as_any()
+ .downcast_ref::<Float64Array>()
+ .unwrap()
+ .values();
+ error::ensure(
+ timestamps.len() == values.len(),
+ DataFusionError::Execution(format!(
+ "{}: input arrays should have the same length, found {} and {}",
+ Self::name(),
+ timestamps.len(),
+ values.len()
+ )),
+ )?;
+
+ let len = timestamps.len();
+ if len < 2 {
+ result_array.push(0.0);
+ continue;
+ }
+
+ // if is delta
+ if !IS_RATE {
+ result_array.push(values[len - 1] - values[len - 2]);
+ continue;
+ }
+
+ // else is rate
+ // TODO(ruihang): "divide 1000" converts the timestamp from millisecond to second.
+ // it should consider other percisions.
+ let sampled_interval = (timestamps[len - 1] - timestamps[len - 2]) / 1000;
+ let last_value = values[len - 1];
+ let prev_value = values[len - 2];
+ let result_value = if last_value < prev_value {
+ // counter reset
+ last_value
+ } else {
+ last_value - prev_value
+ };
+
+ result_array.push(result_value / sampled_interval as f64);
+ }
+
+ let result = ColumnarValue::Array(Arc::new(PrimitiveArray::from_iter(result_array)));
+ Ok(result)
+ }
+}
+
+impl<const IS_RATE: bool> Display for IDelta<IS_RATE> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "PromQL Idelta Function (is_rate: {IS_RATE})",)
+ }
+}
+
+#[cfg(test)]
+mod test {
+
+ use super::*;
+
+ fn idelta_runner(input_ts: RangeArray, input_value: RangeArray, expected: Vec<f64>) {
+ let input = vec![
+ ColumnarValue::Array(Arc::new(input_ts.into_dict())),
+ ColumnarValue::Array(Arc::new(input_value.into_dict())),
+ ];
+ let output = extract_array(&IDelta::<false>::calc(&input).unwrap())
+ .unwrap()
+ .as_any()
+ .downcast_ref::<Float64Array>()
+ .unwrap()
+ .values()
+ .to_vec();
+ assert_eq!(output, expected);
+ }
+
+ fn irate_runner(input_ts: RangeArray, input_value: RangeArray, expected: Vec<f64>) {
+ let input = vec![
+ ColumnarValue::Array(Arc::new(input_ts.into_dict())),
+ ColumnarValue::Array(Arc::new(input_value.into_dict())),
+ ];
+ let output = extract_array(&IDelta::<true>::calc(&input).unwrap())
+ .unwrap()
+ .as_any()
+ .downcast_ref::<Float64Array>()
+ .unwrap()
+ .values()
+ .to_vec();
+ assert_eq!(output, expected);
+ }
+
+ #[test]
+ fn basic_idelta_and_irate() {
+ let ts_array = Arc::new(Int64Array::from_iter([
+ 1000, 3000, 5000, 7000, 9000, 11000, 13000, 15000, 17000,
+ ]));
+ let ts_ranges = [(0, 2), (0, 5), (1, 1), (3, 3), (8, 1), (9, 0)];
+
+ let values_array = Arc::new(Float64Array::from_iter([
+ 1.0, 2.0, 3.0, 5.0, 0.0, 6.0, 7.0, 8.0, 9.0,
+ ]));
+ let values_ranges = [(0, 2), (0, 5), (1, 1), (3, 3), (8, 1), (9, 0)];
+
+ let ts_range_array = RangeArray::from_ranges(ts_array.clone(), ts_ranges).unwrap();
+ let value_range_array =
+ RangeArray::from_ranges(values_array.clone(), values_ranges).unwrap();
+ idelta_runner(
+ ts_range_array,
+ value_range_array,
+ vec![1.0, -5.0, 0.0, 6.0, 0.0, 0.0],
+ );
+
+ let ts_range_array = RangeArray::from_ranges(ts_array, ts_ranges).unwrap();
+ let value_range_array = RangeArray::from_ranges(values_array, values_ranges).unwrap();
+ irate_runner(
+ ts_range_array,
+ value_range_array,
+ vec![0.5, 0.0, 0.0, 3.0, 0.0, 0.0],
+ );
+ }
+}
diff --git a/src/promql/src/functions/increase.rs b/src/promql/src/functions/increase.rs
new file mode 100644
index 000000000000..1c3dede2ca0f
--- /dev/null
+++ b/src/promql/src/functions/increase.rs
@@ -0,0 +1,188 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::Display;
+use std::sync::Arc;
+
+use datafusion::arrow::array::Float64Array;
+use datafusion::common::DataFusionError;
+use datafusion::logical_expr::{ScalarUDF, Signature, TypeSignature, Volatility};
+use datafusion::physical_plan::ColumnarValue;
+use datatypes::arrow::array::{Array, PrimitiveArray};
+use datatypes::arrow::datatypes::DataType;
+
+use crate::functions::extract_array;
+use crate::range_array::RangeArray;
+
+/// Part of the `extrapolatedRate` in Promql,
+/// from https://github.com/prometheus/prometheus/blob/6bdecf377cea8e856509914f35234e948c4fcb80/promql/functions.go#L66
+#[derive(Debug)]
+pub struct Increase {}
+
+impl Increase {
+ pub fn name() -> &'static str {
+ "prom_increase"
+ }
+
+ fn input_type() -> DataType {
+ RangeArray::convert_data_type(DataType::Float64)
+ }
+
+ fn return_type() -> DataType {
+ DataType::Float64
+ }
+
+ fn calc(input: &[ColumnarValue]) -> Result<ColumnarValue, DataFusionError> {
+ // construct matrix from input
+ assert_eq!(input.len(), 1);
+ let input_array = extract_array(input.first().unwrap())?;
+ let array_data = input_array.data().clone();
+ let range_array: RangeArray = RangeArray::try_new(array_data.into())?;
+
+ // calculation
+ let mut result_array = Vec::with_capacity(range_array.len());
+ for index in 0..range_array.len() {
+ let range = range_array.get(index).unwrap();
+ let range = range
+ .as_any()
+ .downcast_ref::<Float64Array>()
+ .unwrap()
+ .values();
+
+ if range.len() < 2 {
+ result_array.push(0.0);
+ continue;
+ }
+
+ // refer to functions.go L83-L110
+ let mut result_value = range.last().unwrap() - range.first().unwrap();
+ for window in range.windows(2) {
+ let prev = window[0];
+ let curr = window[1];
+ if curr < prev {
+ result_value += prev
+ }
+ }
+
+ result_array.push(result_value);
+ }
+
+ let result = ColumnarValue::Array(Arc::new(PrimitiveArray::from_iter(result_array)));
+ Ok(result)
+ }
+
+ pub fn scalar_udf() -> ScalarUDF {
+ ScalarUDF {
+ name: Self::name().to_string(),
+ signature: Signature::new(
+ TypeSignature::Exact(vec![Self::input_type()]),
+ Volatility::Immutable,
+ ),
+ return_type: Arc::new(|_| Ok(Arc::new(Self::return_type()))),
+ fun: Arc::new(Self::calc),
+ }
+ }
+}
+
+impl Display for Increase {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.write_str("PromQL Increase Function")
+ }
+}
+
+#[cfg(test)]
+mod test {
+
+ use super::*;
+
+ fn increase_runner(input: RangeArray, expected: Vec<f64>) {
+ let input = vec![ColumnarValue::Array(Arc::new(input.into_dict()))];
+ let output = extract_array(&Increase::calc(&input).unwrap())
+ .unwrap()
+ .as_any()
+ .downcast_ref::<Float64Array>()
+ .unwrap()
+ .values()
+ .to_vec();
+ assert_eq!(output, expected);
+ }
+
+ #[test]
+ fn abnormal_input() {
+ let values_array = Arc::new(Float64Array::from_iter([
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
+ ]));
+ let ranges = [(0, 2), (0, 5), (1, 1), (3, 3), (8, 1), (9, 0)];
+ let range_array = RangeArray::from_ranges(values_array, ranges).unwrap();
+ increase_runner(range_array, vec![1.0, 4.0, 0.0, 2.0, 0.0, 0.0]);
+ }
+
+ #[test]
+ fn normal_input() {
+ let values_array = Arc::new(Float64Array::from_iter([
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
+ ]));
+ let ranges = [
+ (0, 2),
+ (1, 2),
+ (2, 2),
+ (3, 2),
+ (4, 2),
+ (5, 2),
+ (6, 2),
+ (7, 2),
+ ];
+ let range_array = RangeArray::from_ranges(values_array, ranges).unwrap();
+ increase_runner(range_array, vec![1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]);
+ }
+
+ #[test]
+ fn short_input() {
+ let values_array = Arc::new(Float64Array::from_iter([
+ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
+ ]));
+ let ranges = [
+ (0, 1),
+ (1, 0),
+ (2, 1),
+ (3, 0),
+ (4, 3),
+ (5, 1),
+ (6, 0),
+ (7, 2),
+ ];
+ let range_array = RangeArray::from_ranges(values_array, ranges).unwrap();
+ increase_runner(range_array, vec![0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 1.0]);
+ }
+
+ #[test]
+ fn counter_reset() {
+ // this series should be treated [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
+ let values_array = Arc::new(Float64Array::from_iter([
+ 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 5.0,
+ ]));
+ let ranges = [
+ (0, 2),
+ (1, 2),
+ (2, 2),
+ (3, 2),
+ (4, 2),
+ (5, 2),
+ (6, 2),
+ (7, 2),
+ ];
+ let range_array = RangeArray::from_ranges(values_array, ranges).unwrap();
+ increase_runner(range_array, vec![1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]);
+ }
+}
diff --git a/src/promql/src/lib.rs b/src/promql/src/lib.rs
index 40cf3bbf0021..c4b6d796b102 100644
--- a/src/promql/src/lib.rs
+++ b/src/promql/src/lib.rs
@@ -15,5 +15,6 @@
pub mod engine;
pub mod error;
pub mod extension_plan;
+pub mod functions;
pub mod planner;
pub mod range_array;
diff --git a/src/promql/src/range_array.rs b/src/promql/src/range_array.rs
index e5a4fe7d8f67..a85a3fe64702 100644
--- a/src/promql/src/range_array.rs
+++ b/src/promql/src/range_array.rs
@@ -71,6 +71,10 @@ impl RangeArray {
DataType::Int64
}
+ pub fn value_type(&self) -> DataType {
+ self.array.value_type()
+ }
+
pub fn try_new(dict: DictionaryArray<Int64Type>) -> Result<Self> {
let ranges_iter = dict
.keys()
@@ -185,11 +189,16 @@ impl RangeArray {
let value_type = Box::new(field.data_type().clone());
Field::new(
field.name(),
- DataType::Dictionary(Box::new(Self::key_type()), value_type),
+ Self::convert_data_type(*value_type),
field.is_nullable(),
)
}
+ /// Build datatype of wrappered [RangeArray] on given value type.
+ pub fn convert_data_type(value_type: DataType) -> DataType {
+ DataType::Dictionary(Box::new(Self::key_type()), Box::new(value_type))
+ }
+
pub fn values(&self) -> &ArrayRef {
self.array.values()
}
|
feat
|
impl `increase` and `irate`/`idelta` in PromQL (#880)
|
4eb0771afe22959745f7f5c53d08d42a92efaff5
|
2025-03-21 10:49:23
|
Weny Xu
|
feat: introduce `install_manifest_to` for `RegionManifestManager` (#5742)
| false
|
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index e5d494f57f26..5df76f1e5f1d 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -781,6 +781,50 @@ pub enum Error {
#[snafu(display("checksum mismatch (actual: {}, expected: {})", actual, expected))]
ChecksumMismatch { actual: u32, expected: u32 },
+ #[snafu(display(
+ "No checkpoint found, region: {}, last_version: {}",
+ region_id,
+ last_version
+ ))]
+ NoCheckpoint {
+ region_id: RegionId,
+ last_version: ManifestVersion,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display(
+ "No manifests found in range: [{}..{}), region: {}, last_version: {}",
+ start_version,
+ end_version,
+ region_id,
+ last_version
+ ))]
+ NoManifests {
+ region_id: RegionId,
+ start_version: ManifestVersion,
+ end_version: ManifestVersion,
+ last_version: ManifestVersion,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display(
+ "Failed to install manifest to {}, region: {}, available manifest version: {}, last version: {}",
+ target_version,
+ available_version,
+ region_id,
+ last_version
+ ))]
+ InstallManifestTo {
+ region_id: RegionId,
+ target_version: ManifestVersion,
+ available_version: ManifestVersion,
+ #[snafu(implicit)]
+ location: Location,
+ last_version: ManifestVersion,
+ },
+
#[snafu(display("Region {} is stopped", region_id))]
RegionStopped {
region_id: RegionId,
@@ -1019,7 +1063,10 @@ impl ErrorExt for Error {
| OperateAbortedIndex { .. }
| UnexpectedReplay { .. }
| IndexEncodeNull { .. }
- | UnexpectedImpureDefault { .. } => StatusCode::Unexpected,
+ | UnexpectedImpureDefault { .. }
+ | NoCheckpoint { .. }
+ | NoManifests { .. }
+ | InstallManifestTo { .. } => StatusCode::Unexpected,
RegionNotFound { .. } => StatusCode::RegionNotFound,
ObjectStoreNotFound { .. }
| InvalidScanIndex { .. }
diff --git a/src/mito2/src/manifest/manager.rs b/src/mito2/src/manifest/manager.rs
index b77111d570a0..ead8ce7bfc0e 100644
--- a/src/mito2/src/manifest/manager.rs
+++ b/src/mito2/src/manifest/manager.rs
@@ -23,7 +23,9 @@ use snafu::{ensure, OptionExt, ResultExt};
use store_api::manifest::{ManifestVersion, MAX_VERSION, MIN_VERSION};
use store_api::metadata::RegionMetadataRef;
-use crate::error::{self, RegionStoppedSnafu, Result};
+use crate::error::{
+ self, InstallManifestToSnafu, NoCheckpointSnafu, NoManifestsSnafu, RegionStoppedSnafu, Result,
+};
use crate::manifest::action::{
RegionChange, RegionCheckpoint, RegionManifest, RegionManifestBuilder, RegionMetaAction,
RegionMetaActionList,
@@ -197,9 +199,9 @@ impl RegionManifestManager {
let checkpoint = Self::last_checkpoint(&mut store).await?;
let last_checkpoint_version = checkpoint
.as_ref()
- .map(|checkpoint| checkpoint.last_version)
+ .map(|(checkpoint, _)| checkpoint.last_version)
.unwrap_or(MIN_VERSION);
- let mut manifest_builder = if let Some(checkpoint) = checkpoint {
+ let mut manifest_builder = if let Some((checkpoint, _)) = checkpoint {
info!(
"Recover region manifest {} from checkpoint version {}",
options.manifest_dir, checkpoint.last_version
@@ -275,6 +277,153 @@ impl RegionManifestManager {
self.stopped = true;
}
+ /// Installs the manifest changes from the current version to the target version (inclusive).
+ ///
+ /// Returns installed version.
+ /// **Note**: This function is not guaranteed to install the target version strictly.
+ /// The installed version may be greater than the target version.
+ pub async fn install_manifest_to(
+ &mut self,
+ target_version: ManifestVersion,
+ ) -> Result<ManifestVersion> {
+ let _t = MANIFEST_OP_ELAPSED
+ .with_label_values(&["install_manifest_to"])
+ .start_timer();
+
+ // Case 1: If the target version is less than the current version, return the current version.
+ if self.last_version >= target_version {
+ debug!(
+ "Target version {} is less than or equal to the current version {}, region: {}, skip install",
+ target_version, self.last_version, self.manifest.metadata.region_id
+ );
+ return Ok(self.last_version);
+ }
+
+ ensure!(
+ !self.stopped,
+ RegionStoppedSnafu {
+ region_id: self.manifest.metadata.region_id,
+ }
+ );
+
+ // Fetches manifests from the last version strictly.
+ let mut manifests = self
+ .store
+ // Invariant: last_version < target_version.
+ .fetch_manifests_strict_from(self.last_version + 1, target_version + 1)
+ .await?;
+
+ // Case 2: No manifests in range: [current_version+1, target_version+1)
+ //
+ // |---------Has been deleted------------| [Checkpoint Version]...[Latest Version]
+ // [Leader region]
+ // [Current Version]......[Target Version]
+ // [Follower region]
+ if manifests.is_empty() {
+ debug!(
+ "Manifests are not strict from {}, region: {}, tries to install the last checkpoint",
+ self.last_version, self.manifest.metadata.region_id
+ );
+ let last_version = self.install_last_checkpoint().await?;
+ // Case 2.1: If the installed checkpoint version is greater than or equal to the target version, return the last version.
+ if last_version >= target_version {
+ return Ok(last_version);
+ }
+
+ // Fetches manifests from the installed version strictly.
+ manifests = self
+ .store
+ // Invariant: last_version < target_version.
+ .fetch_manifests_strict_from(last_version + 1, target_version + 1)
+ .await?;
+ }
+
+ if manifests.is_empty() {
+ return NoManifestsSnafu {
+ region_id: self.manifest.metadata.region_id,
+ start_version: self.last_version + 1,
+ end_version: target_version + 1,
+ last_version: self.last_version,
+ }
+ .fail();
+ }
+
+ debug_assert_eq!(manifests.first().unwrap().0, self.last_version + 1);
+ let mut manifest_builder =
+ RegionManifestBuilder::with_checkpoint(Some(self.manifest.as_ref().clone()));
+
+ for (manifest_version, raw_action_list) in manifests {
+ self.store
+ .set_delta_file_size(manifest_version, raw_action_list.len() as u64);
+ let action_list = RegionMetaActionList::decode(&raw_action_list)?;
+ for action in action_list.actions {
+ match action {
+ RegionMetaAction::Change(action) => {
+ manifest_builder.apply_change(manifest_version, action);
+ }
+ RegionMetaAction::Edit(action) => {
+ manifest_builder.apply_edit(manifest_version, action);
+ }
+ RegionMetaAction::Remove(_) => {
+ debug!(
+ "Unhandled action for region {}, action: {:?}",
+ self.manifest.metadata.region_id, action
+ );
+ }
+ RegionMetaAction::Truncate(action) => {
+ manifest_builder.apply_truncate(manifest_version, action);
+ }
+ }
+ }
+ }
+
+ let new_manifest = manifest_builder.try_build()?;
+ ensure!(
+ new_manifest.manifest_version >= target_version,
+ InstallManifestToSnafu {
+ region_id: self.manifest.metadata.region_id,
+ target_version,
+ available_version: new_manifest.manifest_version,
+ last_version: self.last_version,
+ }
+ );
+
+ let version = self.last_version;
+ self.manifest = Arc::new(new_manifest);
+ self.last_version = self.manifest.manifest_version;
+ info!(
+ "Install manifest changes from {} to {}, region: {}",
+ version, self.last_version, self.manifest.metadata.region_id
+ );
+
+ Ok(self.last_version)
+ }
+
+ /// Installs the last checkpoint.
+ pub(crate) async fn install_last_checkpoint(&mut self) -> Result<ManifestVersion> {
+ let Some((checkpoint, checkpoint_size)) = Self::last_checkpoint(&mut self.store).await?
+ else {
+ return NoCheckpointSnafu {
+ region_id: self.manifest.metadata.region_id,
+ last_version: self.last_version,
+ }
+ .fail();
+ };
+ self.store.reset_manifest_size();
+ self.store
+ .set_checkpoint_file_size(checkpoint.last_version, checkpoint_size);
+ let builder = RegionManifestBuilder::with_checkpoint(checkpoint.checkpoint);
+ let manifest = builder.try_build()?;
+ self.last_version = manifest.manifest_version;
+ self.manifest = Arc::new(manifest);
+ info!(
+ "Installed region manifest from checkpoint: {}, region: {}",
+ checkpoint.last_version, self.manifest.metadata.region_id
+ );
+
+ Ok(self.last_version)
+ }
+
/// Updates the manifest. Returns the current manifest version number.
pub async fn update(&mut self, action_list: RegionMetaActionList) -> Result<ManifestVersion> {
let _t = MANIFEST_OP_ELAPSED
@@ -371,14 +520,17 @@ impl RegionManifestManager {
}
/// Fetches the last [RegionCheckpoint] from storage.
+ ///
+ /// If the checkpoint is not found, returns `None`.
+ /// Otherwise, returns the checkpoint and the size of the checkpoint.
pub(crate) async fn last_checkpoint(
store: &mut ManifestObjectStore,
- ) -> Result<Option<RegionCheckpoint>> {
+ ) -> Result<Option<(RegionCheckpoint, u64)>> {
let last_checkpoint = store.load_last_checkpoint().await?;
if let Some((_, bytes)) = last_checkpoint {
let checkpoint = RegionCheckpoint::decode(&bytes)?;
- Ok(Some(checkpoint))
+ Ok(Some((checkpoint, bytes.len() as u64)))
} else {
Ok(None)
}
diff --git a/src/mito2/src/manifest/storage.rs b/src/mito2/src/manifest/storage.rs
index be0ead8f8831..c0ee01ba60c6 100644
--- a/src/mito2/src/manifest/storage.rs
+++ b/src/mito2/src/manifest/storage.rs
@@ -236,7 +236,31 @@ impl ManifestObjectStore {
Ok(entries)
}
- /// Fetch all manifests in concurrent.
+ /// Fetches manifests in range [start_version, end_version).
+ ///
+ /// This functions is guaranteed to return manifests from the `start_version` strictly (must contain `start_version`).
+ pub async fn fetch_manifests_strict_from(
+ &self,
+ start_version: ManifestVersion,
+ end_version: ManifestVersion,
+ ) -> Result<Vec<(ManifestVersion, Vec<u8>)>> {
+ let mut manifests = self.fetch_manifests(start_version, end_version).await?;
+ let start_index = manifests.iter().position(|(v, _)| *v == start_version);
+ debug!(
+ "fetches manifests in range [{},{}), start_index: {:?}",
+ start_version, end_version, start_index
+ );
+ if let Some(start_index) = start_index {
+ Ok(manifests.split_off(start_index))
+ } else {
+ Ok(vec![])
+ }
+ }
+
+ /// Fetch all manifests in concurrent, and return the manifests in range [start_version, end_version)
+ ///
+ /// **Notes**: This function is no guarantee to return manifests from the `start_version` strictly.
+ /// Uses [fetch_manifests_strict_from](ManifestObjectStore::fetch_manifests_strict_from) to get manifests from the `start_version`.
pub async fn fetch_manifests(
&self,
start_version: ManifestVersion,
@@ -576,6 +600,12 @@ impl ManifestObjectStore {
self.manifest_size_map.read().unwrap().values().sum()
}
+ /// Resets the size of all files.
+ pub(crate) fn reset_manifest_size(&mut self) {
+ self.manifest_size_map.write().unwrap().clear();
+ self.total_manifest_size.store(0, Ordering::Relaxed);
+ }
+
/// Set the size of the delta file by delta version.
pub(crate) fn set_delta_file_size(&mut self, version: ManifestVersion, size: u64) {
let mut m = self.manifest_size_map.write().unwrap();
@@ -585,7 +615,7 @@ impl ManifestObjectStore {
}
/// Set the size of the checkpoint file by checkpoint version.
- fn set_checkpoint_file_size(&self, version: ManifestVersion, size: u64) {
+ pub(crate) fn set_checkpoint_file_size(&self, version: ManifestVersion, size: u64) {
let mut m = self.manifest_size_map.write().unwrap();
m.insert(FileKey::Checkpoint(version), size);
@@ -595,6 +625,7 @@ impl ManifestObjectStore {
fn unset_file_size(&self, key: &FileKey) {
let mut m = self.manifest_size_map.write().unwrap();
if let Some(val) = m.remove(key) {
+ debug!("Unset file size: {:?}, size: {}", key, val);
self.dec_total_manifest_size(val);
}
}
diff --git a/src/mito2/src/manifest/tests/checkpoint.rs b/src/mito2/src/manifest/tests/checkpoint.rs
index 332d94be12d3..2ebf7cd5bfec 100644
--- a/src/mito2/src/manifest/tests/checkpoint.rs
+++ b/src/mito2/src/manifest/tests/checkpoint.rs
@@ -44,6 +44,18 @@ async fn build_manager(
(env, manager)
}
+async fn build_manager_with_initial_metadata(
+ env: &TestEnv,
+ checkpoint_distance: u64,
+ compress_type: CompressionType,
+) -> RegionManifestManager {
+ let metadata = Arc::new(basic_region_metadata());
+ env.create_manifest_manager(compress_type, checkpoint_distance, Some(metadata.clone()))
+ .await
+ .unwrap()
+ .unwrap()
+}
+
async fn reopen_manager(
env: &TestEnv,
checkpoint_distance: u64,
@@ -265,4 +277,142 @@ async fn generate_checkpoint_with_compression_types(
.await
.unwrap()
.unwrap()
+ .0
+}
+
+fn generate_action_lists(num: usize) -> (Vec<FileId>, Vec<RegionMetaActionList>) {
+ let mut files = vec![];
+ let mut actions = vec![];
+ for _ in 0..num {
+ let file_id = FileId::random();
+ files.push(file_id);
+ let file_meta = FileMeta {
+ region_id: RegionId::new(123, 456),
+ file_id,
+ time_range: (0.into(), 10000000.into()),
+ level: 0,
+ file_size: 1024000,
+ available_indexes: Default::default(),
+ index_file_size: 0,
+ num_rows: 0,
+ num_row_groups: 0,
+ sequence: None,
+ };
+ let action = RegionMetaActionList::new(vec![RegionMetaAction::Edit(RegionEdit {
+ files_to_add: vec![file_meta],
+ files_to_remove: vec![],
+ compaction_time_window: None,
+ flushed_entry_id: None,
+ flushed_sequence: None,
+ })]);
+ actions.push(action);
+ }
+ (files, actions)
+}
+
+#[tokio::test]
+async fn manifest_install_manifest_to() {
+ common_telemetry::init_default_ut_logging();
+ let (env, mut manager) = build_manager(0, CompressionType::Uncompressed).await;
+ let (files, actions) = generate_action_lists(10);
+ for action in actions {
+ manager.update(action).await.unwrap();
+ }
+
+ // Nothing to install
+ let target_version = manager.manifest().manifest_version;
+ let installed_version = manager.install_manifest_to(target_version).await.unwrap();
+ assert_eq!(target_version, installed_version);
+
+ let mut another_manager =
+ build_manager_with_initial_metadata(&env, 0, CompressionType::Uncompressed).await;
+
+ // install manifest changes
+ let target_version = manager.manifest().manifest_version;
+ let installed_version = another_manager
+ .install_manifest_to(target_version - 1)
+ .await
+ .unwrap();
+ assert_eq!(target_version - 1, installed_version);
+ for file_id in files[0..9].iter() {
+ assert!(another_manager.manifest().files.contains_key(file_id));
+ }
+
+ let installed_version = another_manager
+ .install_manifest_to(target_version)
+ .await
+ .unwrap();
+ assert_eq!(target_version, installed_version);
+ for file_id in files.iter() {
+ assert!(another_manager.manifest().files.contains_key(file_id));
+ }
+}
+
+#[tokio::test]
+async fn manifest_install_manifest_to_with_checkpoint() {
+ common_telemetry::init_default_ut_logging();
+ let (env, mut manager) = build_manager(3, CompressionType::Uncompressed).await;
+ let (files, actions) = generate_action_lists(10);
+ for action in actions {
+ manager.update(action).await.unwrap();
+
+ while manager.checkpointer().is_doing_checkpoint() {
+ tokio::time::sleep(Duration::from_millis(10)).await;
+ }
+ }
+
+ // has checkpoint
+ assert!(manager
+ .store()
+ .load_last_checkpoint()
+ .await
+ .unwrap()
+ .is_some());
+
+ // check files
+ let mut expected = vec![
+ "/",
+ "00000000000000000006.checkpoint",
+ "00000000000000000007.json",
+ "00000000000000000008.json",
+ "00000000000000000009.checkpoint",
+ "00000000000000000009.json",
+ "00000000000000000010.json",
+ "_last_checkpoint",
+ ];
+ expected.sort_unstable();
+ let mut paths = manager
+ .store()
+ .get_paths(|e| Some(e.name().to_string()))
+ .await
+ .unwrap();
+
+ paths.sort_unstable();
+ assert_eq!(expected, paths);
+
+ let mut another_manager =
+ build_manager_with_initial_metadata(&env, 0, CompressionType::Uncompressed).await;
+
+ // Install 9 manifests
+ let target_version = manager.manifest().manifest_version;
+ let installed_version = another_manager
+ .install_manifest_to(target_version - 1)
+ .await
+ .unwrap();
+ assert_eq!(target_version - 1, installed_version);
+ for file_id in files[0..9].iter() {
+ assert!(another_manager.manifest().files.contains_key(file_id));
+ }
+
+ // Install all manifests
+ let target_version = manager.manifest().manifest_version;
+ let installed_version = another_manager
+ .install_manifest_to(target_version)
+ .await
+ .unwrap();
+ assert_eq!(target_version, installed_version);
+ for file_id in files.iter() {
+ assert!(another_manager.manifest().files.contains_key(file_id));
+ }
+ assert_eq!(4217, another_manager.store().total_manifest_size());
}
|
feat
|
introduce `install_manifest_to` for `RegionManifestManager` (#5742)
|
97cfa3d6c97f4a4ad3413a20e29b5f0859e304fc
|
2023-07-14 15:27:17
|
Lei, HUANG
|
feat: support append entries from multiple regions at a time (#1959)
| false
|
diff --git a/src/log-store/src/noop.rs b/src/log-store/src/noop.rs
index 7d286ed08e71..330911e70ac4 100644
--- a/src/log-store/src/noop.rs
+++ b/src/log-store/src/noop.rs
@@ -25,7 +25,7 @@ pub struct NoopLogStore;
#[derive(Debug, Default, Clone, PartialEq)]
pub struct EntryImpl;
-#[derive(Debug, Clone, Default, Hash, PartialEq)]
+#[derive(Debug, Clone, Default, Eq, PartialEq, Hash)]
pub struct NamespaceImpl;
impl Namespace for NamespaceImpl {
@@ -65,8 +65,8 @@ impl LogStore for NoopLogStore {
Ok(AppendResponse { entry_id: 0 })
}
- async fn append_batch(&self, _ns: &Self::Namespace, _e: Vec<Self::Entry>) -> Result<Vec<Id>> {
- Ok(vec![])
+ async fn append_batch(&self, _e: Vec<Self::Entry>) -> Result<()> {
+ Ok(())
}
async fn read(
@@ -131,10 +131,7 @@ mod tests {
let store = NoopLogStore::default();
let e = store.entry("".as_bytes(), 1, NamespaceImpl::default());
let _ = store.append(e.clone()).await.unwrap();
- assert!(store
- .append_batch(&NamespaceImpl::default(), vec![e])
- .await
- .is_ok());
+ assert!(store.append_batch(vec![e]).await.is_ok());
store
.create_namespace(&NamespaceImpl::default())
.await
diff --git a/src/log-store/src/raft_engine.rs b/src/log-store/src/raft_engine.rs
index 104540066a0d..b053c491baf1 100644
--- a/src/log-store/src/raft_engine.rs
+++ b/src/log-store/src/raft_engine.rs
@@ -36,6 +36,7 @@ impl EntryImpl {
}
}
}
+
impl NamespaceImpl {
pub fn with_id(id: Id) -> Self {
Self {
@@ -52,6 +53,8 @@ impl Hash for NamespaceImpl {
}
}
+impl Eq for NamespaceImpl {}
+
impl Namespace for NamespaceImpl {
fn id(&self) -> store_api::logstore::namespace::Id {
self.id
diff --git a/src/log-store/src/raft_engine/log_store.rs b/src/log-store/src/raft_engine/log_store.rs
index cfa434d7ac46..af71baa26a86 100644
--- a/src/log-store/src/raft_engine/log_store.rs
+++ b/src/log-store/src/raft_engine/log_store.rs
@@ -20,7 +20,7 @@ use common_runtime::{RepeatedTask, TaskFunction};
use common_telemetry::{error, info};
use raft_engine::{Config, Engine, LogBatch, MessageExt, ReadableSize, RecoveryMode};
use snafu::{ensure, ResultExt};
-use store_api::logstore::entry::Id;
+use store_api::logstore::entry::{Entry, Id};
use store_api::logstore::entry_stream::SendableEntryStream;
use store_api::logstore::namespace::Namespace as NamespaceTrait;
use store_api::logstore::{AppendResponse, LogStore};
@@ -29,9 +29,9 @@ use crate::config::LogConfig;
use crate::error;
use crate::error::{
AddEntryLogBatchSnafu, Error, FetchEntrySnafu, IllegalNamespaceSnafu, IllegalStateSnafu,
- RaftEngineSnafu, StartGcTaskSnafu, StopGcTaskSnafu,
+ OverrideCompactedEntrySnafu, RaftEngineSnafu, Result, StartGcTaskSnafu, StopGcTaskSnafu,
};
-use crate::raft_engine::protos::logstore::{EntryImpl as Entry, NamespaceImpl as Namespace};
+use crate::raft_engine::protos::logstore::{EntryImpl, NamespaceImpl as Namespace};
const NAMESPACE_PREFIX: &str = "__sys_namespace_";
const SYSTEM_NAMESPACE: u64 = 0;
@@ -52,7 +52,7 @@ impl TaskFunction<Error> for PurgeExpiredFilesFunction {
"RaftEngineLogStore-gc-task"
}
- async fn call(&mut self) -> Result<(), Error> {
+ async fn call(&mut self) -> Result<()> {
match self.engine.purge_expired_files().context(RaftEngineSnafu) {
Ok(res) => {
// TODO(hl): the retval of purge_expired_files indicates the namespaces need to be compact,
@@ -72,8 +72,7 @@ impl TaskFunction<Error> for PurgeExpiredFilesFunction {
}
impl RaftEngineLogStore {
- pub async fn try_new(config: LogConfig) -> Result<Self, Error> {
- // TODO(hl): set according to available disk space
+ pub async fn try_new(config: LogConfig) -> Result<Self> {
let raft_engine_config = Config {
dir: config.log_file_dir.clone(),
purge_threshold: ReadableSize(config.purge_threshold),
@@ -103,7 +102,7 @@ impl RaftEngineLogStore {
self.gc_task.started()
}
- fn start(&self) -> Result<(), Error> {
+ fn start(&self) -> Result<()> {
self.gc_task
.start(common_runtime::bg_runtime())
.context(StartGcTaskSnafu)
@@ -115,6 +114,24 @@ impl RaftEngineLogStore {
self.engine.last_index(namespace.id()),
)
}
+
+ /// Checks if entry does not override the min index of namespace.
+ fn check_entry(&self, e: &EntryImpl) -> Result<()> {
+ if cfg!(debug_assertions) {
+ let ns_id = e.namespace_id;
+ if let Some(first_index) = self.engine.first_index(ns_id) {
+ ensure!(
+ e.id() >= first_index,
+ OverrideCompactedEntrySnafu {
+ namespace: ns_id,
+ first_index,
+ attempt_index: e.id(),
+ }
+ );
+ }
+ }
+ Ok(())
+ }
}
impl Debug for RaftEngineLogStore {
@@ -130,14 +147,14 @@ impl Debug for RaftEngineLogStore {
impl LogStore for RaftEngineLogStore {
type Error = Error;
type Namespace = Namespace;
- type Entry = Entry;
+ type Entry = EntryImpl;
- async fn stop(&self) -> Result<(), Self::Error> {
+ async fn stop(&self) -> Result<()> {
self.gc_task.stop().await.context(StopGcTaskSnafu)
}
/// Append an entry to logstore. Currently of existence of entry's namespace is not checked.
- async fn append(&self, e: Self::Entry) -> Result<AppendResponse, Self::Error> {
+ async fn append(&self, e: Self::Entry) -> Result<AppendResponse> {
ensure!(self.started(), IllegalStateSnafu);
let entry_id = e.id;
let namespace_id = e.namespace_id;
@@ -166,49 +183,27 @@ impl LogStore for RaftEngineLogStore {
/// Append a batch of entries to logstore. `RaftEngineLogStore` assures the atomicity of
/// batch append.
- async fn append_batch(
- &self,
- ns: &Self::Namespace,
- entries: Vec<Self::Entry>,
- ) -> Result<Vec<Id>, Self::Error> {
+ async fn append_batch(&self, entries: Vec<Self::Entry>) -> Result<()> {
ensure!(self.started(), IllegalStateSnafu);
if entries.is_empty() {
- return Ok(vec![]);
+ return Ok(());
}
- let mut min_entry_id = u64::MAX;
- let entry_ids = entries
- .iter()
- .map(|e| {
- let id = e.get_id();
- if id < min_entry_id {
- min_entry_id = id;
- }
- id
- })
- .collect::<Vec<_>>();
-
let mut batch = LogBatch::with_capacity(entries.len());
- batch
- .add_entries::<MessageType>(ns.id, &entries)
- .context(AddEntryLogBatchSnafu)?;
- if let Some(first_index) = self.engine.first_index(ns.id) {
- ensure!(
- min_entry_id >= first_index,
- error::OverrideCompactedEntrySnafu {
- namespace: ns.id,
- first_index,
- attempt_index: min_entry_id,
- }
- );
+ for e in entries {
+ self.check_entry(&e)?;
+ let ns_id = e.namespace_id;
+ batch
+ .add_entries::<MessageType>(ns_id, &[e])
+ .context(AddEntryLogBatchSnafu)?;
}
let _ = self
.engine
.write(&mut batch, self.config.sync_write)
.context(RaftEngineSnafu)?;
- Ok(entry_ids)
+ Ok(())
}
/// Create a stream of entries from logstore in the given namespace. The end of stream is
@@ -217,7 +212,7 @@ impl LogStore for RaftEngineLogStore {
&self,
ns: &Self::Namespace,
id: Id,
- ) -> Result<SendableEntryStream<'_, Self::Entry, Self::Error>, Self::Error> {
+ ) -> Result<SendableEntryStream<'_, Self::Entry, Self::Error>> {
ensure!(self.started(), IllegalStateSnafu);
let engine = self.engine.clone();
@@ -275,7 +270,7 @@ impl LogStore for RaftEngineLogStore {
Ok(Box::pin(s))
}
- async fn create_namespace(&self, ns: &Self::Namespace) -> Result<(), Self::Error> {
+ async fn create_namespace(&self, ns: &Self::Namespace) -> Result<()> {
ensure!(
ns.id != SYSTEM_NAMESPACE,
IllegalNamespaceSnafu { ns: ns.id }
@@ -293,7 +288,7 @@ impl LogStore for RaftEngineLogStore {
Ok(())
}
- async fn delete_namespace(&self, ns: &Self::Namespace) -> Result<(), Self::Error> {
+ async fn delete_namespace(&self, ns: &Self::Namespace) -> Result<()> {
ensure!(
ns.id != SYSTEM_NAMESPACE,
IllegalNamespaceSnafu { ns: ns.id }
@@ -309,7 +304,7 @@ impl LogStore for RaftEngineLogStore {
Ok(())
}
- async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>, Self::Error> {
+ async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>> {
ensure!(self.started(), IllegalStateSnafu);
let mut namespaces: Vec<Namespace> = vec![];
self.engine
@@ -328,7 +323,7 @@ impl LogStore for RaftEngineLogStore {
}
fn entry<D: AsRef<[u8]>>(&self, data: D, id: Id, ns: Self::Namespace) -> Self::Entry {
- Entry {
+ EntryImpl {
id,
data: data.as_ref().to_vec(),
namespace_id: ns.id(),
@@ -343,7 +338,7 @@ impl LogStore for RaftEngineLogStore {
}
}
- async fn obsolete(&self, namespace: Self::Namespace, id: Id) -> Result<(), Self::Error> {
+ async fn obsolete(&self, namespace: Self::Namespace, id: Id) -> Result<()> {
ensure!(self.started(), IllegalStateSnafu);
let obsoleted = self.engine.compact_to(namespace.id(), id + 1);
info!(
@@ -361,7 +356,7 @@ impl LogStore for RaftEngineLogStore {
struct MessageType;
impl MessageExt for MessageType {
- type Entry = Entry;
+ type Entry = EntryImpl;
fn index(e: &Self::Entry) -> u64 {
e.id
@@ -580,4 +575,64 @@ mod tests {
vec.sort_by(|a, b| a.id.partial_cmp(&b.id).unwrap());
assert_eq!(101, vec.first().unwrap().id);
}
+
+ #[tokio::test]
+ async fn test_append_batch() {
+ common_telemetry::init_default_ut_logging();
+ let dir = create_temp_dir("logstore-append-batch-test");
+
+ let config = LogConfig {
+ log_file_dir: dir.path().to_str().unwrap().to_string(),
+ file_size: ReadableSize::mb(2).0,
+ purge_threshold: ReadableSize::mb(4).0,
+ purge_interval: Duration::from_secs(5),
+ ..Default::default()
+ };
+
+ let logstore = RaftEngineLogStore::try_new(config).await.unwrap();
+
+ let entries = (0..8)
+ .flat_map(|ns_id| {
+ let data = [ns_id as u8].repeat(4096);
+ (0..16).map(move |idx| Entry::create(idx, ns_id, data.clone()))
+ })
+ .collect();
+
+ logstore.append_batch(entries).await.unwrap();
+ for ns_id in 0..8 {
+ let namespace = Namespace::with_id(ns_id);
+ let (first, last) = logstore.span(&namespace);
+ assert_eq!(0, first.unwrap());
+ assert_eq!(15, last.unwrap());
+ }
+ }
+
+ #[tokio::test]
+ async fn test_append_batch_interleaved() {
+ common_telemetry::init_default_ut_logging();
+ let dir = create_temp_dir("logstore-append-batch-test");
+
+ let config = LogConfig {
+ log_file_dir: dir.path().to_str().unwrap().to_string(),
+ file_size: ReadableSize::mb(2).0,
+ purge_threshold: ReadableSize::mb(4).0,
+ purge_interval: Duration::from_secs(5),
+ ..Default::default()
+ };
+
+ let logstore = RaftEngineLogStore::try_new(config).await.unwrap();
+
+ let entries = vec![
+ Entry::create(0, 0, [b'0'; 4096].to_vec()),
+ Entry::create(1, 0, [b'0'; 4096].to_vec()),
+ Entry::create(0, 1, [b'1'; 4096].to_vec()),
+ Entry::create(2, 0, [b'0'; 4096].to_vec()),
+ Entry::create(1, 1, [b'1'; 4096].to_vec()),
+ ];
+
+ logstore.append_batch(entries).await.unwrap();
+
+ assert_eq!((Some(0), Some(2)), logstore.span(&Namespace::with_id(0)));
+ assert_eq!((Some(0), Some(1)), logstore.span(&Namespace::with_id(1)));
+ }
}
diff --git a/src/store-api/src/logstore.rs b/src/store-api/src/logstore.rs
index f00914015c27..975715074ccc 100644
--- a/src/store-api/src/logstore.rs
+++ b/src/store-api/src/logstore.rs
@@ -36,14 +36,10 @@ pub trait LogStore: Send + Sync + 'static + std::fmt::Debug {
/// Append an `Entry` to WAL with given namespace and return append response containing
/// the entry id.
- async fn append(&self, mut e: Self::Entry) -> Result<AppendResponse, Self::Error>;
+ async fn append(&self, e: Self::Entry) -> Result<AppendResponse, Self::Error>;
/// Append a batch of entries atomically and return the offset of first entry.
- async fn append_batch(
- &self,
- ns: &Self::Namespace,
- e: Vec<Self::Entry>,
- ) -> Result<Vec<Id>, Self::Error>;
+ async fn append_batch(&self, e: Vec<Self::Entry>) -> Result<(), Self::Error>;
/// Create a new `EntryStream` to asynchronously generates `Entry` with ids
/// starting from `id`.
diff --git a/src/store-api/src/logstore/namespace.rs b/src/store-api/src/logstore/namespace.rs
index 8cecd47588da..35a136d809ac 100644
--- a/src/store-api/src/logstore/namespace.rs
+++ b/src/store-api/src/logstore/namespace.rs
@@ -16,6 +16,6 @@ use std::hash::Hash;
pub type Id = u64;
-pub trait Namespace: Send + Sync + Clone + std::fmt::Debug + Hash + PartialEq {
+pub trait Namespace: Send + Sync + Clone + std::fmt::Debug + Hash + PartialEq + Eq {
fn id(&self) -> Id;
}
|
feat
|
support append entries from multiple regions at a time (#1959)
|
204b9433b8beecbb412f429a91b71bbcc1cbbf5f
|
2024-01-17 08:54:40
|
dennis zhuang
|
feat: adds date_format function (#3167)
| false
|
diff --git a/src/common/function/src/function.rs b/src/common/function/src/function.rs
index 3225183bd47f..3b6a51b0289f 100644
--- a/src/common/function/src/function.rs
+++ b/src/common/function/src/function.rs
@@ -15,21 +15,22 @@
use std::fmt;
use std::sync::Arc;
-use chrono_tz::Tz;
use common_query::error::Result;
use common_query::prelude::Signature;
+use common_time::timezone::get_timezone;
+use common_time::Timezone;
use datatypes::data_type::ConcreteDataType;
use datatypes::vectors::VectorRef;
#[derive(Clone)]
pub struct FunctionContext {
- pub tz: Tz,
+ pub timezone: Timezone,
}
impl Default for FunctionContext {
fn default() -> Self {
Self {
- tz: "UTC".parse::<Tz>().unwrap(),
+ timezone: get_timezone(None).clone(),
}
}
}
diff --git a/src/common/function/src/scalars/date.rs b/src/common/function/src/scalars/date.rs
index 86b0c7db6202..4b8e714ec5fa 100644
--- a/src/common/function/src/scalars/date.rs
+++ b/src/common/function/src/scalars/date.rs
@@ -14,9 +14,11 @@
use std::sync::Arc;
mod date_add;
+mod date_format;
mod date_sub;
use date_add::DateAddFunction;
+use date_format::DateFormatFunction;
use date_sub::DateSubFunction;
use crate::function_registry::FunctionRegistry;
@@ -27,5 +29,6 @@ impl DateFunction {
pub fn register(registry: &FunctionRegistry) {
registry.register(Arc::new(DateAddFunction));
registry.register(Arc::new(DateSubFunction));
+ registry.register(Arc::new(DateFormatFunction));
}
}
diff --git a/src/common/function/src/scalars/date/date_format.rs b/src/common/function/src/scalars/date/date_format.rs
new file mode 100644
index 000000000000..d94f115e54a9
--- /dev/null
+++ b/src/common/function/src/scalars/date/date_format.rs
@@ -0,0 +1,306 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt;
+
+use common_error::ext::BoxedError;
+use common_query::error::{self, InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
+use common_query::prelude::Signature;
+use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder};
+use datatypes::vectors::{StringVectorBuilder, VectorRef};
+use snafu::{ensure, ResultExt};
+
+use crate::function::{Function, FunctionContext};
+use crate::helper;
+
+/// A function that formats timestamp/date/datetime into string by the format
+#[derive(Clone, Debug, Default)]
+pub struct DateFormatFunction;
+
+const NAME: &str = "date_format";
+
+impl Function for DateFormatFunction {
+ fn name(&self) -> &str {
+ NAME
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::string_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ helper::one_of_sigs2(
+ vec![
+ ConcreteDataType::date_datatype(),
+ ConcreteDataType::datetime_datatype(),
+ ConcreteDataType::timestamp_second_datatype(),
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ ConcreteDataType::timestamp_microsecond_datatype(),
+ ConcreteDataType::timestamp_nanosecond_datatype(),
+ ],
+ vec![ConcreteDataType::string_datatype()],
+ )
+ }
+
+ fn eval(&self, func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure!(
+ columns.len() == 2,
+ InvalidFuncArgsSnafu {
+ err_msg: format!(
+ "The length of the args is not correct, expect 2, have: {}",
+ columns.len()
+ ),
+ }
+ );
+
+ let left = &columns[0];
+ let formats = &columns[1];
+
+ let size = left.len();
+ let left_datatype = columns[0].data_type();
+ let mut results = StringVectorBuilder::with_capacity(size);
+
+ match left_datatype {
+ ConcreteDataType::Timestamp(_) => {
+ for i in 0..size {
+ let ts = left.get(i).as_timestamp();
+ let format = formats.get(i).as_string();
+
+ let result = match (ts, format) {
+ (Some(ts), Some(fmt)) => Some(
+ ts.as_formatted_string(&fmt, Some(&func_ctx.timezone))
+ .map_err(BoxedError::new)
+ .context(error::ExecuteSnafu)?,
+ ),
+ _ => None,
+ };
+
+ results.push(result.as_deref());
+ }
+ }
+ ConcreteDataType::Date(_) => {
+ for i in 0..size {
+ let date = left.get(i).as_date();
+ let format = formats.get(i).as_string();
+
+ let result = match (date, format) {
+ (Some(date), Some(fmt)) => date
+ .as_formatted_string(&fmt, Some(&func_ctx.timezone))
+ .map_err(BoxedError::new)
+ .context(error::ExecuteSnafu)?,
+ _ => None,
+ };
+
+ results.push(result.as_deref());
+ }
+ }
+ ConcreteDataType::DateTime(_) => {
+ for i in 0..size {
+ let datetime = left.get(i).as_datetime();
+ let format = formats.get(i).as_string();
+
+ let result = match (datetime, format) {
+ (Some(datetime), Some(fmt)) => datetime
+ .as_formatted_string(&fmt, Some(&func_ctx.timezone))
+ .map_err(BoxedError::new)
+ .context(error::ExecuteSnafu)?,
+ _ => None,
+ };
+
+ results.push(result.as_deref());
+ }
+ }
+ _ => {
+ return UnsupportedInputDataTypeSnafu {
+ function: NAME,
+ datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
+ }
+ .fail();
+ }
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+impl fmt::Display for DateFormatFunction {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "DATE_FORMAT")
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use common_query::prelude::{TypeSignature, Volatility};
+ use datatypes::prelude::{ConcreteDataType, ScalarVector};
+ use datatypes::value::Value;
+ use datatypes::vectors::{DateTimeVector, DateVector, StringVector, TimestampSecondVector};
+
+ use super::{DateFormatFunction, *};
+
+ #[test]
+ fn test_date_format_misc() {
+ let f = DateFormatFunction;
+ assert_eq!("date_format", f.name());
+ assert_eq!(
+ ConcreteDataType::string_datatype(),
+ f.return_type(&[ConcreteDataType::timestamp_microsecond_datatype()])
+ .unwrap()
+ );
+ assert_eq!(
+ ConcreteDataType::string_datatype(),
+ f.return_type(&[ConcreteDataType::timestamp_second_datatype()])
+ .unwrap()
+ );
+ assert_eq!(
+ ConcreteDataType::string_datatype(),
+ f.return_type(&[ConcreteDataType::date_datatype()]).unwrap()
+ );
+ assert_eq!(
+ ConcreteDataType::string_datatype(),
+ f.return_type(&[ConcreteDataType::datetime_datatype()])
+ .unwrap()
+ );
+ assert!(matches!(f.signature(),
+ Signature {
+ type_signature: TypeSignature::OneOf(sigs),
+ volatility: Volatility::Immutable
+ } if sigs.len() == 6));
+ }
+
+ #[test]
+ fn test_timestamp_date_format() {
+ let f = DateFormatFunction;
+
+ let times = vec![Some(123), None, Some(42), None];
+ let formats = vec![
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ ];
+ let results = [
+ Some("1970-01-01 00:02:03.000"),
+ None,
+ Some("1970-01-01 00:00:42.000"),
+ None,
+ ];
+
+ let time_vector = TimestampSecondVector::from(times.clone());
+ let interval_vector = StringVector::from_vec(formats);
+ let args: Vec<VectorRef> = vec![Arc::new(time_vector), Arc::new(interval_vector)];
+ let vector = f.eval(FunctionContext::default(), &args).unwrap();
+
+ assert_eq!(4, vector.len());
+ for (i, _t) in times.iter().enumerate() {
+ let v = vector.get(i);
+ let result = results.get(i).unwrap();
+
+ if result.is_none() {
+ assert_eq!(Value::Null, v);
+ continue;
+ }
+ match v {
+ Value::String(s) => {
+ assert_eq!(s.as_utf8(), result.unwrap());
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ #[test]
+ fn test_date_date_format() {
+ let f = DateFormatFunction;
+
+ let dates = vec![Some(123), None, Some(42), None];
+ let formats = vec![
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ ];
+ let results = [
+ Some("1970-05-04 00:00:00.000"),
+ None,
+ Some("1970-02-12 00:00:00.000"),
+ None,
+ ];
+
+ let date_vector = DateVector::from(dates.clone());
+ let interval_vector = StringVector::from_vec(formats);
+ let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
+ let vector = f.eval(FunctionContext::default(), &args).unwrap();
+
+ assert_eq!(4, vector.len());
+ for (i, _t) in dates.iter().enumerate() {
+ let v = vector.get(i);
+ let result = results.get(i).unwrap();
+
+ if result.is_none() {
+ assert_eq!(Value::Null, v);
+ continue;
+ }
+ match v {
+ Value::String(s) => {
+ assert_eq!(s.as_utf8(), result.unwrap());
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ #[test]
+ fn test_datetime_date_format() {
+ let f = DateFormatFunction;
+
+ let dates = vec![Some(123), None, Some(42), None];
+ let formats = vec![
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ "%Y-%m-%d %T.%3f",
+ ];
+ let results = [
+ Some("1970-01-01 00:00:00.123"),
+ None,
+ Some("1970-01-01 00:00:00.042"),
+ None,
+ ];
+
+ let date_vector = DateTimeVector::from(dates.clone());
+ let interval_vector = StringVector::from_vec(formats);
+ let args: Vec<VectorRef> = vec![Arc::new(date_vector), Arc::new(interval_vector)];
+ let vector = f.eval(FunctionContext::default(), &args).unwrap();
+
+ assert_eq!(4, vector.len());
+ for (i, _t) in dates.iter().enumerate() {
+ let v = vector.get(i);
+ let result = results.get(i).unwrap();
+
+ if result.is_none() {
+ assert_eq!(Value::Null, v);
+ continue;
+ }
+ match v {
+ Value::String(s) => {
+ assert_eq!(s.as_utf8(), result.unwrap());
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+}
diff --git a/src/common/function/src/scalars/expression/ctx.rs b/src/common/function/src/scalars/expression/ctx.rs
index 65844548a297..362997ab24c5 100644
--- a/src/common/function/src/scalars/expression/ctx.rs
+++ b/src/common/function/src/scalars/expression/ctx.rs
@@ -12,20 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use chrono_tz::Tz;
use common_query::error::Error;
+use common_time::timezone::get_timezone;
+use common_time::Timezone;
pub struct EvalContext {
- _tz: Tz,
+ pub timezone: Timezone,
pub error: Option<Error>,
}
impl Default for EvalContext {
fn default() -> Self {
- let tz = "UTC".parse::<Tz>().unwrap();
Self {
error: None,
- _tz: tz,
+ timezone: get_timezone(None).clone(),
}
}
}
diff --git a/src/common/function/src/scalars/udf.rs b/src/common/function/src/scalars/udf.rs
index da67f321bf8c..5b91ad1302ac 100644
--- a/src/common/function/src/scalars/udf.rs
+++ b/src/common/function/src/scalars/udf.rs
@@ -34,6 +34,8 @@ pub fn create_udf(func: FunctionRef) -> ScalarUdf {
let func_cloned = func.clone();
let fun: ScalarFunctionImplementation = Arc::new(move |args: &[ColumnarValue]| {
+ // FIXME(dennis): set the timezone into function context
+ // Question: how to get the timezone from the query context?
let func_ctx = FunctionContext::default();
let len = args
diff --git a/src/common/query/src/error.rs b/src/common/query/src/error.rs
index 25f9d3c71523..86a3d5e958c8 100644
--- a/src/common/query/src/error.rs
+++ b/src/common/query/src/error.rs
@@ -163,6 +163,12 @@ pub enum Error {
source: DataTypeError,
},
+ #[snafu(display("Failed to execute function: {source}"))]
+ Execute {
+ location: Location,
+ source: BoxedError,
+ },
+
#[snafu(display("Invalid function args: {}", err_msg))]
InvalidFuncArgs { err_msg: String, location: Location },
}
@@ -201,6 +207,7 @@ impl ErrorExt for Error {
Error::ConvertDfRecordBatchStream { source, .. } => source.status_code(),
Error::ExecutePhysicalPlan { source, .. } => source.status_code(),
+ Error::Execute { source, .. } => source.status_code(),
}
}
diff --git a/src/common/time/src/date.rs b/src/common/time/src/date.rs
index 7ff19145430a..d4182b7c1b6a 100644
--- a/src/common/time/src/date.rs
+++ b/src/common/time/src/date.rs
@@ -12,16 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::fmt::{Display, Formatter};
+use std::fmt::{Display, Formatter, Write};
use std::str::FromStr;
-use chrono::{Datelike, Days, Months, NaiveDate};
+use chrono::{Datelike, Days, Months, NaiveDate, NaiveTime, TimeZone};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use snafu::ResultExt;
use crate::error::{Error, ParseDateStrSnafu, Result};
use crate::interval::Interval;
+use crate::timezone::get_timezone;
+use crate::Timezone;
const UNIX_EPOCH_FROM_CE: i32 = 719_163;
@@ -84,6 +86,40 @@ impl Date {
NaiveDate::from_num_days_from_ce_opt(UNIX_EPOCH_FROM_CE + self.0)
}
+ /// Format Date for given format and timezone.
+ /// If `tz==None`, the server default timezone will used.
+ pub fn as_formatted_string(
+ self,
+ pattern: &str,
+ timezone: Option<&Timezone>,
+ ) -> Result<Option<String>> {
+ if let Some(v) = self.to_chrono_date() {
+ // Safety: always success
+ let time = NaiveTime::from_hms_nano_opt(0, 0, 0, 0).unwrap();
+ let v = v.and_time(time);
+ let mut formatted = String::new();
+
+ match get_timezone(timezone) {
+ Timezone::Offset(offset) => {
+ write!(
+ formatted,
+ "{}",
+ offset.from_utc_datetime(&v).format(pattern)
+ )
+ .context(crate::error::FormatSnafu { pattern })?;
+ }
+ Timezone::Named(tz) => {
+ write!(formatted, "{}", tz.from_utc_datetime(&v).format(pattern))
+ .context(crate::error::FormatSnafu { pattern })?;
+ }
+ }
+
+ return Ok(Some(formatted));
+ }
+
+ Ok(None)
+ }
+
pub fn to_secs(&self) -> i64 {
(self.0 as i64) * 24 * 3600
}
@@ -170,6 +206,37 @@ mod tests {
assert_eq!(date, Date::from_str(&date.to_string()).unwrap());
}
+ #[test]
+ fn test_as_formatted_string() {
+ let d: Date = 42.into();
+
+ assert_eq!(
+ "1970-02-12",
+ d.as_formatted_string("%Y-%m-%d", None).unwrap().unwrap()
+ );
+ assert_eq!(
+ "1970-02-12 00:00:00",
+ d.as_formatted_string("%Y-%m-%d %H:%M:%S", None)
+ .unwrap()
+ .unwrap()
+ );
+ assert_eq!(
+ "1970-02-12T00:00:00:000",
+ d.as_formatted_string("%Y-%m-%dT%H:%M:%S:%3f", None)
+ .unwrap()
+ .unwrap()
+ );
+ assert_eq!(
+ "1970-02-12T08:00:00:000",
+ d.as_formatted_string(
+ "%Y-%m-%dT%H:%M:%S:%3f",
+ Some(&Timezone::from_tz_string("Asia/Shanghai").unwrap())
+ )
+ .unwrap()
+ .unwrap()
+ );
+ }
+
#[test]
pub fn test_from() {
let d: Date = 42.into();
diff --git a/src/common/time/src/datetime.rs b/src/common/time/src/datetime.rs
index 7dc872b8f2ac..722467cfab1f 100644
--- a/src/common/time/src/datetime.rs
+++ b/src/common/time/src/datetime.rs
@@ -12,15 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::fmt::{Display, Formatter};
+use std::fmt::{Display, Formatter, Write};
use std::str::FromStr;
use std::time::Duration;
use chrono::{Days, LocalResult, Months, NaiveDateTime, TimeZone as ChronoTimeZone, Utc};
use serde::{Deserialize, Serialize};
+use snafu::ResultExt;
use crate::error::{Error, InvalidDateStrSnafu, Result};
-use crate::timezone::Timezone;
+use crate::timezone::{get_timezone, Timezone};
use crate::util::{format_utc_datetime, local_datetime_to_utc};
use crate::{Date, Interval};
@@ -110,7 +111,38 @@ impl DateTime {
NaiveDateTime::from_timestamp_millis(self.0)
}
- pub fn to_chrono_datetime_with_timezone(&self, tz: Option<Timezone>) -> Option<NaiveDateTime> {
+ /// Format DateTime for given format and timezone.
+ /// If `tz==None`, the server default timezone will used.
+ pub fn as_formatted_string(
+ self,
+ pattern: &str,
+ timezone: Option<&Timezone>,
+ ) -> Result<Option<String>> {
+ if let Some(v) = self.to_chrono_datetime() {
+ let mut formatted = String::new();
+
+ match get_timezone(timezone) {
+ Timezone::Offset(offset) => {
+ write!(
+ formatted,
+ "{}",
+ offset.from_utc_datetime(&v).format(pattern)
+ )
+ .context(crate::error::FormatSnafu { pattern })?;
+ }
+ Timezone::Named(tz) => {
+ write!(formatted, "{}", tz.from_utc_datetime(&v).format(pattern))
+ .context(crate::error::FormatSnafu { pattern })?;
+ }
+ }
+
+ return Ok(Some(formatted));
+ }
+
+ Ok(None)
+ }
+
+ pub fn to_chrono_datetime_with_timezone(&self, tz: Option<&Timezone>) -> Option<NaiveDateTime> {
let datetime = self.to_chrono_datetime();
datetime.map(|v| match tz {
Some(Timezone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
@@ -211,6 +243,38 @@ mod tests {
assert_eq!(28800000, ts);
}
+ #[test]
+ fn test_as_formatted_string() {
+ let d: DateTime = DateTime::new(1000);
+
+ assert_eq!(
+ "1970-01-01",
+ d.as_formatted_string("%Y-%m-%d", None).unwrap().unwrap()
+ );
+ assert_eq!(
+ "1970-01-01 00:00:01",
+ d.as_formatted_string("%Y-%m-%d %H:%M:%S", None)
+ .unwrap()
+ .unwrap()
+ );
+ assert_eq!(
+ "1970-01-01T00:00:01:000",
+ d.as_formatted_string("%Y-%m-%dT%H:%M:%S:%3f", None)
+ .unwrap()
+ .unwrap()
+ );
+
+ assert_eq!(
+ "1970-01-01T08:00:01:000",
+ d.as_formatted_string(
+ "%Y-%m-%dT%H:%M:%S:%3f",
+ Some(&Timezone::from_tz_string("Asia/Shanghai").unwrap())
+ )
+ .unwrap()
+ .unwrap()
+ );
+ }
+
#[test]
fn test_from_max_date() {
let date = Date::new(i32::MAX);
diff --git a/src/common/time/src/error.rs b/src/common/time/src/error.rs
index a1d225610531..c4c025dc92cf 100644
--- a/src/common/time/src/error.rs
+++ b/src/common/time/src/error.rs
@@ -68,6 +68,14 @@ pub enum Error {
#[snafu(display("Invalid timezone string {raw}"))]
ParseTimezoneName { raw: String, location: Location },
+
+ #[snafu(display("Failed to format, pattern: {}", pattern))]
+ Format {
+ pattern: String,
+ #[snafu(source)]
+ error: std::fmt::Error,
+ location: Location,
+ },
}
impl ErrorExt for Error {
@@ -76,6 +84,7 @@ impl ErrorExt for Error {
Error::ParseDateStr { .. }
| Error::ParseTimestamp { .. }
| Error::InvalidTimezoneOffset { .. }
+ | Error::Format { .. }
| Error::ParseOffsetStr { .. }
| Error::ParseTimezoneName { .. } => StatusCode::InvalidArguments,
Error::TimestampOverflow { .. } => StatusCode::Internal,
@@ -93,6 +102,7 @@ impl ErrorExt for Error {
fn location_opt(&self) -> Option<common_error::snafu::Location> {
match self {
Error::ParseTimestamp { location, .. }
+ | Error::Format { location, .. }
| Error::TimestampOverflow { location, .. }
| Error::ArithmeticOverflow { location, .. } => Some(*location),
Error::ParseDateStr { .. }
diff --git a/src/common/time/src/time.rs b/src/common/time/src/time.rs
index fdcc9ee32ec2..8490195ff601 100644
--- a/src/common/time/src/time.rs
+++ b/src/common/time/src/time.rs
@@ -115,11 +115,11 @@ impl Time {
/// Format Time for given timezone.
/// When timezone is None, using system timezone by default.
- pub fn to_timezone_aware_string(&self, tz: Option<Timezone>) -> String {
+ pub fn to_timezone_aware_string(&self, tz: Option<&Timezone>) -> String {
self.as_formatted_string("%H:%M:%S%.f", tz)
}
- fn as_formatted_string(self, pattern: &str, timezone: Option<Timezone>) -> String {
+ fn as_formatted_string(self, pattern: &str, timezone: Option<&Timezone>) -> String {
if let Some(time) = self.to_chrono_time() {
let date = Utc::now().date_naive();
let datetime = NaiveDateTime::new(date, time);
@@ -380,37 +380,39 @@ mod tests {
assert_eq!(
"08:00:00.001",
Time::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("SYSTEM").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("SYSTEM").unwrap()))
);
assert_eq!(
"08:00:00.001",
Time::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("+08:00").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("+08:00").unwrap()))
);
assert_eq!(
"07:00:00.001",
Time::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("+07:00").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("+07:00").unwrap()))
);
assert_eq!(
"23:00:00.001",
Time::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("-01:00").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("-01:00").unwrap()))
);
assert_eq!(
"08:00:00.001",
- Time::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("Asia/Shanghai").unwrap()))
+ Time::new(1, TimeUnit::Millisecond).to_timezone_aware_string(Some(
+ &Timezone::from_tz_string("Asia/Shanghai").unwrap()
+ ))
);
assert_eq!(
"00:00:00.001",
Time::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("UTC").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("UTC").unwrap()))
);
assert_eq!(
"03:00:00.001",
- Time::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("Europe/Moscow").unwrap()))
+ Time::new(1, TimeUnit::Millisecond).to_timezone_aware_string(Some(
+ &Timezone::from_tz_string("Europe/Moscow").unwrap()
+ ))
);
}
}
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index d09890544bd3..2f13b09a1503 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -14,7 +14,7 @@
use core::default::Default;
use std::cmp::Ordering;
-use std::fmt::{Display, Formatter};
+use std::fmt::{Display, Formatter, Write};
use std::hash::{Hash, Hasher};
use std::str::FromStr;
use std::time::Duration;
@@ -26,7 +26,9 @@ use chrono::{
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
-use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
+use crate::error::{
+ ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, Result, TimestampOverflowSnafu,
+};
use crate::timezone::{get_timezone, Timezone};
use crate::util::div_ceil;
use crate::{error, Interval};
@@ -290,32 +292,50 @@ impl Timestamp {
/// Format timestamp to ISO8601 string. If the timestamp exceeds what chrono timestamp can
/// represent, this function simply print the timestamp unit and value in plain string.
pub fn to_iso8601_string(&self) -> String {
+ // Safety: the format is valid
self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f%z", None)
+ .unwrap()
}
/// Format timestamp use **system timezone**.
pub fn to_local_string(&self) -> String {
+ // Safety: the format is valid
self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f", None)
+ .unwrap()
}
/// Format timestamp for given timezone.
/// If `tz==None`, the server default timezone will used.
- pub fn to_timezone_aware_string(&self, tz: Option<Timezone>) -> String {
+ pub fn to_timezone_aware_string(&self, tz: Option<&Timezone>) -> String {
+ // Safety: the format is valid
self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f", tz)
+ .unwrap()
}
- fn as_formatted_string(self, pattern: &str, timezone: Option<Timezone>) -> String {
+ /// Format timestamp for given format and timezone.
+ /// If `tz==None`, the server default timezone will used.
+ pub fn as_formatted_string(self, pattern: &str, timezone: Option<&Timezone>) -> Result<String> {
if let Some(v) = self.to_chrono_datetime() {
+ let mut formatted = String::new();
+
match get_timezone(timezone) {
Timezone::Offset(offset) => {
- format!("{}", offset.from_utc_datetime(&v).format(pattern))
+ write!(
+ formatted,
+ "{}",
+ offset.from_utc_datetime(&v).format(pattern)
+ )
+ .context(crate::error::FormatSnafu { pattern })?;
}
Timezone::Named(tz) => {
- format!("{}", tz.from_utc_datetime(&v).format(pattern))
+ write!(formatted, "{}", tz.from_utc_datetime(&v).format(pattern))
+ .context(crate::error::FormatSnafu { pattern })?;
}
}
+
+ Ok(formatted)
} else {
- format!("[Timestamp{}: {}]", self.unit, self.value)
+ Ok(format!("[Timestamp{}: {}]", self.unit, self.value))
}
}
@@ -324,7 +344,7 @@ impl Timestamp {
NaiveDateTime::from_timestamp_opt(sec, nsec)
}
- pub fn to_chrono_datetime_with_timezone(&self, tz: Option<Timezone>) -> Option<NaiveDateTime> {
+ pub fn to_chrono_datetime_with_timezone(&self, tz: Option<&Timezone>) -> Option<NaiveDateTime> {
let datetime = self.to_chrono_datetime();
datetime.map(|v| match tz {
Some(Timezone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
@@ -369,7 +389,7 @@ impl FromStr for Timestamp {
/// - `2022-09-20 14:16:43` (Zulu timezone, without T)
/// - `2022-09-20 14:16:43.012345` (Zulu timezone, without T)
#[allow(deprecated)]
- fn from_str(s: &str) -> Result<Self, Self::Err> {
+ fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
// RFC3339 timestamp (with a T)
let s = s.trim();
if let Ok(ts) = DateTime::parse_from_rfc3339(s) {
@@ -1113,47 +1133,77 @@ mod tests {
assert_eq!(
"1970-01-01 08:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("SYSTEM").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("SYSTEM").unwrap()))
);
assert_eq!(
"1970-01-01 08:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("SYSTEM").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("SYSTEM").unwrap()))
);
assert_eq!(
"1970-01-01 08:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("+08:00").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("+08:00").unwrap()))
);
assert_eq!(
"1970-01-01 07:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("+07:00").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("+07:00").unwrap()))
);
assert_eq!(
"1969-12-31 23:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("-01:00").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("-01:00").unwrap()))
);
assert_eq!(
"1970-01-01 08:00:00.001",
- Timestamp::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("Asia/Shanghai").unwrap()))
+ Timestamp::new(1, TimeUnit::Millisecond).to_timezone_aware_string(Some(
+ &Timezone::from_tz_string("Asia/Shanghai").unwrap()
+ ))
);
assert_eq!(
"1970-01-01 00:00:00.001",
Timestamp::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("UTC").unwrap()))
+ .to_timezone_aware_string(Some(&Timezone::from_tz_string("UTC").unwrap()))
);
assert_eq!(
"1970-01-01 01:00:00.001",
- Timestamp::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("Europe/Berlin").unwrap()))
+ Timestamp::new(1, TimeUnit::Millisecond).to_timezone_aware_string(Some(
+ &Timezone::from_tz_string("Europe/Berlin").unwrap()
+ ))
);
assert_eq!(
"1970-01-01 03:00:00.001",
- Timestamp::new(1, TimeUnit::Millisecond)
- .to_timezone_aware_string(Some(Timezone::from_tz_string("Europe/Moscow").unwrap()))
+ Timestamp::new(1, TimeUnit::Millisecond).to_timezone_aware_string(Some(
+ &Timezone::from_tz_string("Europe/Moscow").unwrap()
+ ))
+ );
+ }
+
+ #[test]
+ fn test_as_formatted_string() {
+ let ts = Timestamp::new(1, TimeUnit::Millisecond);
+
+ assert_eq!(
+ "1970-01-01",
+ ts.as_formatted_string("%Y-%m-%d", None).unwrap()
+ );
+ assert_eq!(
+ "1970-01-01 00:00:00",
+ ts.as_formatted_string("%Y-%m-%d %H:%M:%S", None).unwrap()
+ );
+ assert_eq!(
+ "1970-01-01T00:00:00:001",
+ ts.as_formatted_string("%Y-%m-%dT%H:%M:%S:%3f", None)
+ .unwrap()
+ );
+ assert_eq!(
+ "1970-01-01T08:00:00:001",
+ ts.as_formatted_string(
+ "%Y-%m-%dT%H:%M:%S:%3f",
+ Some(&Timezone::from_tz_string("Asia/Shanghai").unwrap())
+ )
+ .unwrap()
);
}
diff --git a/src/common/time/src/timezone.rs b/src/common/time/src/timezone.rs
index 0e597084943e..dda94eb6843a 100644
--- a/src/common/time/src/timezone.rs
+++ b/src/common/time/src/timezone.rs
@@ -43,13 +43,8 @@ pub fn set_default_timezone(tz_str: Option<&str>) -> Result<()> {
#[inline(always)]
/// If the `tz=Some(timezone)`, return `timezone` directly,
/// or return current system timezone.
-pub fn get_timezone(tz: Option<Timezone>) -> Timezone {
- tz.unwrap_or_else(|| {
- DEFAULT_TIMEZONE
- .get()
- .cloned()
- .unwrap_or(Timezone::Named(Tz::UTC))
- })
+pub fn get_timezone(tz: Option<&Timezone>) -> &Timezone {
+ tz.unwrap_or_else(|| DEFAULT_TIMEZONE.get().unwrap_or(&Timezone::Named(Tz::UTC)))
}
#[inline(always)]
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index 943f563a0a64..fede1420bb06 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -218,6 +218,14 @@ impl Value {
}
}
+ /// Cast Value to utf8 String. Return None if value is not a valid string data type.
+ pub fn as_string(&self) -> Option<String> {
+ match self {
+ Value::String(bytes) => Some(bytes.as_utf8().to_string()),
+ _ => None,
+ }
+ }
+
/// Cast Value to Date. Return None if value is not a valid date data type.
pub fn as_date(&self) -> Option<Date> {
match self {
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index 726daf061f75..fd5304ec3e76 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -194,10 +194,10 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
Value::Date(v) => row_writer.write_col(v.to_chrono_date())?,
// convert datetime and timestamp to timezone of current connection
Value::DateTime(v) => row_writer.write_col(
- v.to_chrono_datetime_with_timezone(Some(query_context.timezone())),
+ v.to_chrono_datetime_with_timezone(Some(&query_context.timezone())),
)?,
Value::Timestamp(v) => row_writer.write_col(
- v.to_chrono_datetime_with_timezone(Some(query_context.timezone())),
+ v.to_chrono_datetime_with_timezone(Some(&query_context.timezone())),
)?,
Value::Interval(v) => row_writer.write_col(v.to_iso8601_string())?,
Value::Duration(v) => row_writer.write_col(v.to_std_duration())?,
@@ -210,7 +210,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
})
}
Value::Time(v) => row_writer
- .write_col(v.to_timezone_aware_string(Some(query_context.timezone())))?,
+ .write_col(v.to_timezone_aware_string(Some(&query_context.timezone())))?,
Value::Decimal128(v) => row_writer.write_col(v.to_string())?,
}
}
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index 7446624ae52e..4e3314215c1d 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -69,7 +69,7 @@ impl From<&RegionRequestHeader> for QueryContext {
current_schema: schema.to_string(),
current_user: Default::default(),
// for request send to datanode, all timestamp have converted to UTC, so timezone is not important
- timezone: ArcSwap::new(Arc::new(get_timezone(None))),
+ timezone: ArcSwap::new(Arc::new(get_timezone(None).clone())),
sql_dialect: Box::new(GreptimeDbDialect {}),
}
}
@@ -123,8 +123,8 @@ impl QueryContext {
build_db_string(catalog, schema)
}
- pub fn timezone(&self) -> Timezone {
- self.timezone.load().as_ref().clone()
+ pub fn timezone(&self) -> Arc<Timezone> {
+ self.timezone.load().clone()
}
pub fn current_user(&self) -> Option<UserInfoRef> {
@@ -143,8 +143,8 @@ impl QueryContext {
/// We need persist these change in `Session`.
pub fn update_session(&self, session: &SessionRef) {
let tz = self.timezone();
- if session.timezone() != tz {
- session.set_timezone(tz)
+ if session.timezone() != *tz {
+ session.set_timezone(tz.as_ref().clone())
}
}
}
@@ -163,7 +163,7 @@ impl QueryContextBuilder {
.unwrap_or_else(|| ArcSwap::new(Arc::new(None))),
timezone: self
.timezone
- .unwrap_or(ArcSwap::new(Arc::new(get_timezone(None)))),
+ .unwrap_or(ArcSwap::new(Arc::new(get_timezone(None).clone()))),
sql_dialect: self
.sql_dialect
.unwrap_or_else(|| Box::new(GreptimeDbDialect {})),
diff --git a/src/session/src/lib.rs b/src/session/src/lib.rs
index 35035cda27a6..f64ac81885d5 100644
--- a/src/session/src/lib.rs
+++ b/src/session/src/lib.rs
@@ -46,7 +46,7 @@ impl Session {
schema: ArcSwap::new(Arc::new(DEFAULT_SCHEMA_NAME.into())),
user_info: ArcSwap::new(Arc::new(auth::userinfo_by_name(None))),
conn_info: ConnInfo::new(addr, channel),
- timezone: ArcSwap::new(Arc::new(get_timezone(None))),
+ timezone: ArcSwap::new(Arc::new(get_timezone(None).clone())),
}
}
diff --git a/tests/cases/standalone/common/function/date.result b/tests/cases/standalone/common/function/date.result
index 122866efac4d..a94cff3cd3d9 100644
--- a/tests/cases/standalone/common/function/date.result
+++ b/tests/cases/standalone/common/function/date.result
@@ -1,3 +1,4 @@
+--- date_add ---
SELECT date_add('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
+----------------------------------------------------------------------------------------+
@@ -30,6 +31,7 @@ SELECT date_add('2023-12-06'::DATE, '3 month 5 day');
| 2024-03-11 |
+----------------------------------------------------+
+--- date_sub ---
SELECT date_sub('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
+----------------------------------------------------------------------------------------+
@@ -62,6 +64,37 @@ SELECT date_sub('2023-12-06'::DATE, '3 month 5 day');
| 2023-09-01 |
+----------------------------------------------------+
+--- date_format ---
+SELECT date_format('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '%Y-%m-%d %H:%M:%S:%3f');
+
++----------------------------------------------------------------------------+
+| date_format(Utf8("2023-12-06 07:39:46.222"),Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
++----------------------------------------------------------------------------+
+| 2023-12-06 07:39:46:222 |
++----------------------------------------------------------------------------+
+
+SELECT date_format('2023-12-06 07:39:46.222'::TIMESTAMP_S, '%Y-%m-%d %H:%M:%S:%3f');
+
++----------------------------------------------------------------------------+
+| date_format(Utf8("2023-12-06 07:39:46.222"),Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
++----------------------------------------------------------------------------+
+| 2023-12-06 07:39:46:000 |
++----------------------------------------------------------------------------+
+
+--- datetime not supported yet ---
+SELECT date_format('2023-12-06 07:39:46.222'::DATETIME, '%Y-%m-%d %H:%M:%S:%3f');
+
+Error: 3000(PlanQuery), Failed to plan SQL: This feature is not implemented: Unsupported SQL type Datetime(None)
+
+SELECT date_format('2023-12-06'::DATE, '%m-%d');
+
++-----------------------------------------------+
+| date_format(Utf8("2023-12-06"),Utf8("%m-%d")) |
++-----------------------------------------------+
+| 12-06 |
++-----------------------------------------------+
+
+--- test date functions with table rows ---
CREATE TABLE dates(d DATE, ts timestamp time index);
Affected Rows: 0
@@ -78,6 +111,7 @@ INSERT INTO dates VALUES ('2023-12-06'::DATE, 3);
Affected Rows: 1
+--- date_add ---
SELECT date_add(d, INTERVAL '1 year 2 month 3 day') from dates;
+---------------------------------------------------------------------------+
@@ -118,6 +152,7 @@ SELECT date_add(ts, '1 year 2 month 3 day') from dates;
| 1971-03-04T00:00:00.003 |
+-------------------------------------------------+
+--- date_sub ---
SELECT date_sub(d, INTERVAL '1 year 2 month 3 day') from dates;
+---------------------------------------------------------------------------+
@@ -158,6 +193,27 @@ SELECT date_sub(ts, '1 year 2 month 3 day') from dates;
| 1968-10-29T00:00:00.003 |
+-------------------------------------------------+
+--- date_format ---
+SELECT date_format(d, '%Y-%m-%d %H:%M:%S:%3f') from dates;
+
++----------------------------------------------------+
+| date_format(dates.d,Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
++----------------------------------------------------+
+| 1992-01-01 00:00:00:000 |
+| 1993-12-30 00:00:00:000 |
+| 2023-12-06 00:00:00:000 |
++----------------------------------------------------+
+
+SELECT date_format(ts, '%Y-%m-%d %H:%M:%S:%3f') from dates;
+
++-----------------------------------------------------+
+| date_format(dates.ts,Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
++-----------------------------------------------------+
+| 1970-01-01 00:00:00:001 |
+| 1970-01-01 00:00:00:002 |
+| 1970-01-01 00:00:00:003 |
++-----------------------------------------------------+
+
DROP TABLE dates;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/function/date.sql b/tests/cases/standalone/common/function/date.sql
index a5f962749975..db661ca4ed44 100644
--- a/tests/cases/standalone/common/function/date.sql
+++ b/tests/cases/standalone/common/function/date.sql
@@ -1,3 +1,4 @@
+--- date_add ---
SELECT date_add('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
SELECT date_add('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '5 day');
@@ -6,6 +7,7 @@ SELECT date_add('2023-12-06'::DATE, INTERVAL '3 month 5 day');
SELECT date_add('2023-12-06'::DATE, '3 month 5 day');
+--- date_sub ---
SELECT date_sub('2023-12-06 07:39:46.222'::TIMESTAMP_MS, INTERVAL '5 day');
SELECT date_sub('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '5 day');
@@ -14,7 +16,17 @@ SELECT date_sub('2023-12-06'::DATE, INTERVAL '3 month 5 day');
SELECT date_sub('2023-12-06'::DATE, '3 month 5 day');
+--- date_format ---
+SELECT date_format('2023-12-06 07:39:46.222'::TIMESTAMP_MS, '%Y-%m-%d %H:%M:%S:%3f');
+SELECT date_format('2023-12-06 07:39:46.222'::TIMESTAMP_S, '%Y-%m-%d %H:%M:%S:%3f');
+
+--- datetime not supported yet ---
+SELECT date_format('2023-12-06 07:39:46.222'::DATETIME, '%Y-%m-%d %H:%M:%S:%3f');
+
+SELECT date_format('2023-12-06'::DATE, '%m-%d');
+
+--- test date functions with table rows ---
CREATE TABLE dates(d DATE, ts timestamp time index);
INSERT INTO dates VALUES ('1992-01-01'::DATE, 1);
@@ -23,6 +35,7 @@ INSERT INTO dates VALUES ('1993-12-30'::DATE, 2);
INSERT INTO dates VALUES ('2023-12-06'::DATE, 3);
+--- date_add ---
SELECT date_add(d, INTERVAL '1 year 2 month 3 day') from dates;
SELECT date_add(d, '1 year 2 month 3 day') from dates;
@@ -31,6 +44,7 @@ SELECT date_add(ts, INTERVAL '1 year 2 month 3 day') from dates;
SELECT date_add(ts, '1 year 2 month 3 day') from dates;
+--- date_sub ---
SELECT date_sub(d, INTERVAL '1 year 2 month 3 day') from dates;
SELECT date_sub(d, '1 year 2 month 3 day') from dates;
@@ -39,4 +53,9 @@ SELECT date_sub(ts, INTERVAL '1 year 2 month 3 day') from dates;
SELECT date_sub(ts, '1 year 2 month 3 day') from dates;
+--- date_format ---
+SELECT date_format(d, '%Y-%m-%d %H:%M:%S:%3f') from dates;
+
+SELECT date_format(ts, '%Y-%m-%d %H:%M:%S:%3f') from dates;
+
DROP TABLE dates;
|
feat
|
adds date_format function (#3167)
|
56258d6821a05961bcd9cf8dd46782cf9cd122ea
|
2022-05-07 13:15:46
|
evenyag
|
test: Add more test for opaque error
| false
|
diff --git a/src/common/error/src/ext.rs b/src/common/error/src/ext.rs
index d78e35326c19..31c8bf917a9d 100644
--- a/src/common/error/src/ext.rs
+++ b/src/common/error/src/ext.rs
@@ -97,6 +97,10 @@ mod tests {
}
impl ErrorExt for InnerError {
+ fn status_code(&self) -> StatusCode {
+ StatusCode::Internal
+ }
+
fn backtrace_opt(&self) -> Option<&snafu::Backtrace> {
ErrorCompat::backtrace(self)
}
@@ -133,19 +137,31 @@ mod tests {
#[test]
fn test_opaque_error() {
+ // Test leaf error.
let err: Error = throw_leaf().map_err(Into::into).err().unwrap();
let msg = format!("{:?}", err);
assert!(msg.contains("\nBacktrace:\n"));
- assert!(ErrorCompat::backtrace(&err).is_some());
+
let fmt_msg = format!("{:?}", DebugFormat::new(&err));
assert_eq!(msg, fmt_msg);
+ assert!(ErrorCompat::backtrace(&err).is_some());
+ assert!(err.backtrace_opt().is_some());
+ assert_eq!("This is a leaf error, val: 10", err.to_string());
+ assert_eq!(StatusCode::Internal, err.status_code());
+
+ // Test internal error.
let err: Error = throw_internal().map_err(Into::into).err().unwrap();
let msg = format!("{:?}", err);
assert!(msg.contains("\nBacktrace:\n"));
assert!(msg.contains("Caused by"));
- assert!(ErrorCompat::backtrace(&err).is_some());
+
let fmt_msg = format!("{:?}", DebugFormat::new(&err));
assert_eq!(msg, fmt_msg);
+
+ assert!(ErrorCompat::backtrace(&err).is_some());
+ assert!(err.backtrace_opt().is_some());
+ assert_eq!("This is an internal error", err.to_string());
+ assert_eq!(StatusCode::Internal, err.status_code());
}
}
diff --git a/src/common/error/src/status_code.rs b/src/common/error/src/status_code.rs
index 2b0a338b3139..e81cc349fc15 100644
--- a/src/common/error/src/status_code.rs
+++ b/src/common/error/src/status_code.rs
@@ -1,5 +1,5 @@
/// Common status code for public API.
-#[derive(Debug, Clone, Copy)]
+#[derive(Debug, Clone, Copy, PartialEq)]
pub enum StatusCode {
// ====== Begin of common status code ==============
/// Unknown error.
|
test
|
Add more test for opaque error
|
2c3fccb516addcfb082a7c12302b3fffafcbbab8
|
2024-08-14 16:59:30
|
discord9
|
feat(flow): add `eval_batch` for ScalarExpr (#4551)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 34da29c6727b..acd6a6bd8b73 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3798,6 +3798,7 @@ name = "flow"
version = "0.9.1"
dependencies = [
"api",
+ "arrow",
"arrow-schema",
"async-recursion",
"async-trait",
diff --git a/src/flow/Cargo.toml b/src/flow/Cargo.toml
index c4db341dbe7d..ed2a1dc1c474 100644
--- a/src/flow/Cargo.toml
+++ b/src/flow/Cargo.toml
@@ -9,6 +9,7 @@ workspace = true
[dependencies]
api.workspace = true
+arrow.workspace = true
arrow-schema.workspace = true
async-recursion = "1.0"
async-trait.workspace = true
diff --git a/src/flow/src/compute/render.rs b/src/flow/src/compute/render.rs
index 618f9654257d..444ef7a4ac8c 100644
--- a/src/flow/src/compute/render.rs
+++ b/src/flow/src/compute/render.rs
@@ -16,32 +16,21 @@
//!
//! And the [`Context`] is the environment for the render process, it contains all the necessary information for the render process
-use std::cell::RefCell;
-use std::collections::{BTreeMap, VecDeque};
-use std::ops::Range;
-use std::rc::Rc;
-
-use datatypes::data_type::ConcreteDataType;
-use datatypes::value::{ListValue, Value};
-use hydroflow::futures::SinkExt;
-use hydroflow::lattices::cc_traits::Get;
+use std::collections::BTreeMap;
+
use hydroflow::scheduled::graph::Hydroflow;
use hydroflow::scheduled::graph_ext::GraphExt;
use hydroflow::scheduled::port::{PortCtx, SEND};
use itertools::Itertools;
-use snafu::{ensure, OptionExt, ResultExt};
+use snafu::OptionExt;
use super::state::Scheduler;
use crate::compute::state::DataflowState;
-use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
-use crate::error::{Error, EvalSnafu, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu};
-use crate::expr::error::{DataTypeSnafu, InternalSnafu};
-use crate::expr::{
- self, EvalError, GlobalId, LocalId, MapFilterProject, MfpPlan, SafeMfpPlan, ScalarExpr,
-};
-use crate::plan::{AccumulablePlan, KeyValPlan, Plan, ReducePlan, TypedPlan};
-use crate::repr::{self, DiffRow, KeyValDiffRow, Row};
-use crate::utils::{ArrangeHandler, ArrangeReader, ArrangeWriter, Arrangement};
+use crate::compute::types::{Collection, CollectionBundle, ErrCollector, Toff};
+use crate::error::{Error, InvalidQuerySnafu, NotImplementedSnafu};
+use crate::expr::{self, GlobalId, LocalId};
+use crate::plan::{Plan, TypedPlan};
+use crate::repr::{self, DiffRow};
mod map;
mod reduce;
@@ -218,20 +207,17 @@ mod test {
use std::cell::RefCell;
use std::rc::Rc;
- use common_time::DateTime;
- use datatypes::data_type::ConcreteDataType;
use hydroflow::scheduled::graph::Hydroflow;
use hydroflow::scheduled::graph_ext::GraphExt;
use hydroflow::scheduled::handoff::VecHandoff;
- use pretty_assertions::{assert_eq, assert_ne};
+ use pretty_assertions::assert_eq;
use super::*;
- use crate::expr::BinaryFunc;
use crate::repr::Row;
pub fn run_and_check(
state: &mut DataflowState,
df: &mut Hydroflow,
- time_range: Range<i64>,
+ time_range: std::ops::Range<i64>,
expected: BTreeMap<i64, Vec<DiffRow>>,
output: Rc<RefCell<Vec<DiffRow>>>,
) {
diff --git a/src/flow/src/compute/render/map.rs b/src/flow/src/compute/render/map.rs
index 272be4acc684..c940b34ed144 100644
--- a/src/flow/src/compute/render/map.rs
+++ b/src/flow/src/compute/render/map.rs
@@ -24,7 +24,7 @@ use crate::compute::state::Scheduler;
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
use crate::error::{Error, PlanSnafu};
use crate::expr::{EvalError, MapFilterProject, MfpPlan, ScalarExpr};
-use crate::plan::{Plan, TypedPlan};
+use crate::plan::TypedPlan;
use crate::repr::{self, DiffRow, KeyValDiffRow, Row};
use crate::utils::ArrangeHandler;
@@ -206,8 +206,6 @@ fn eval_mfp_core(
#[cfg(test)]
mod test {
- use std::cell::RefCell;
- use std::rc::Rc;
use datatypes::data_type::ConcreteDataType;
use hydroflow::scheduled::graph::Hydroflow;
@@ -216,6 +214,7 @@ mod test {
use crate::compute::render::test::{get_output_handle, harness_test_ctx, run_and_check};
use crate::compute::state::DataflowState;
use crate::expr::{self, BinaryFunc, GlobalId};
+ use crate::plan::Plan;
use crate::repr::{ColumnType, RelationType};
/// test if temporal filter works properly
diff --git a/src/flow/src/compute/render/reduce.rs b/src/flow/src/compute/render/reduce.rs
index 5d5761656c84..b41364ec4435 100644
--- a/src/flow/src/compute/render/reduce.rs
+++ b/src/flow/src/compute/render/reduce.rs
@@ -18,17 +18,15 @@ use std::ops::Range;
use datatypes::data_type::ConcreteDataType;
use datatypes::value::{ListValue, Value};
use hydroflow::scheduled::graph_ext::GraphExt;
-use hydroflow::scheduled::port::{PortCtx, SEND};
use itertools::Itertools;
use snafu::{ensure, OptionExt, ResultExt};
use crate::compute::render::{Context, SubgraphArg};
-use crate::compute::state::Scheduler;
use crate::compute::types::{Arranged, Collection, CollectionBundle, ErrCollector, Toff};
use crate::error::{Error, PlanSnafu};
use crate::expr::error::{DataAlreadyExpiredSnafu, DataTypeSnafu, InternalSnafu};
-use crate::expr::{AggregateExpr, EvalError, ScalarExpr};
-use crate::plan::{AccumulablePlan, AggrWithIndex, KeyValPlan, Plan, ReducePlan, TypedPlan};
+use crate::expr::{EvalError, ScalarExpr};
+use crate::plan::{AccumulablePlan, AggrWithIndex, KeyValPlan, ReducePlan, TypedPlan};
use crate::repr::{self, DiffRow, KeyValDiffRow, RelationType, Row};
use crate::utils::{ArrangeHandler, ArrangeReader, ArrangeWriter, KeyExpiryManager};
@@ -790,8 +788,6 @@ fn from_val_to_slice_idx(
// TODO(discord9): add tests for accum ser/de
#[cfg(test)]
mod test {
- use std::cell::RefCell;
- use std::rc::Rc;
use common_time::{DateTime, Interval, Timestamp};
use datatypes::data_type::{ConcreteDataType, ConcreteDataType as CDT};
@@ -800,7 +796,10 @@ mod test {
use super::*;
use crate::compute::render::test::{get_output_handle, harness_test_ctx, run_and_check};
use crate::compute::state::DataflowState;
- use crate::expr::{self, AggregateFunc, BinaryFunc, GlobalId, MapFilterProject, UnaryFunc};
+ use crate::expr::{
+ self, AggregateExpr, AggregateFunc, BinaryFunc, GlobalId, MapFilterProject, UnaryFunc,
+ };
+ use crate::plan::Plan;
use crate::repr::{ColumnType, RelationType};
/// SELECT sum(number) FROM numbers_with_ts GROUP BY tumble(ts, '1 second', '2021-07-01 00:00:00')
diff --git a/src/flow/src/compute/render/src_sink.rs b/src/flow/src/compute/render/src_sink.rs
index c8a8a901c75d..d984f4831191 100644
--- a/src/flow/src/compute/render/src_sink.rs
+++ b/src/flow/src/compute/render/src_sink.rs
@@ -16,7 +16,7 @@
use std::collections::{BTreeMap, VecDeque};
-use common_telemetry::{debug, info};
+use common_telemetry::debug;
use hydroflow::scheduled::graph_ext::GraphExt;
use itertools::Itertools;
use snafu::OptionExt;
@@ -27,7 +27,7 @@ use crate::compute::render::Context;
use crate::compute::types::{Arranged, Collection, CollectionBundle, Toff};
use crate::error::{Error, PlanSnafu};
use crate::expr::error::InternalSnafu;
-use crate::expr::{EvalError, GlobalId};
+use crate::expr::EvalError;
use crate::repr::{DiffRow, Row, BROADCAST_CAP};
#[allow(clippy::mutable_key_type)]
diff --git a/src/flow/src/compute/state.rs b/src/flow/src/compute/state.rs
index a9356005546c..d34b4a311d15 100644
--- a/src/flow/src/compute/state.rs
+++ b/src/flow/src/compute/state.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use std::cell::RefCell;
-use std::collections::{BTreeMap, BTreeSet, VecDeque};
+use std::collections::{BTreeMap, VecDeque};
use std::rc::Rc;
use hydroflow::scheduled::graph::Hydroflow;
diff --git a/src/flow/src/compute/types.rs b/src/flow/src/compute/types.rs
index f2276ba755eb..9674163c2686 100644
--- a/src/flow/src/compute/types.rs
+++ b/src/flow/src/compute/types.rs
@@ -22,12 +22,11 @@ use hydroflow::scheduled::handoff::TeeingHandoff;
use hydroflow::scheduled::port::RecvPort;
use hydroflow::scheduled::SubgraphId;
use itertools::Itertools;
-use tokio::sync::{Mutex, RwLock};
+use tokio::sync::Mutex;
-use crate::compute::render::Context;
use crate::expr::{EvalError, ScalarExpr};
use crate::repr::DiffRow;
-use crate::utils::{ArrangeHandler, Arrangement};
+use crate::utils::ArrangeHandler;
pub type Toff<T = DiffRow> = TeeingHandoff<T>;
diff --git a/src/flow/src/expr.rs b/src/flow/src/expr.rs
index aefc4db3beef..35f937cdc136 100644
--- a/src/flow/src/expr.rs
+++ b/src/flow/src/expr.rs
@@ -14,6 +14,7 @@
//! for declare Expression in dataflow, including map, reduce, id and join(TODO!) etc.
+mod df_func;
pub(crate) mod error;
mod func;
mod id;
@@ -22,9 +23,92 @@ mod relation;
mod scalar;
mod signature;
-pub(crate) use error::{EvalError, InvalidArgumentSnafu, OptimizeSnafu};
+use datatypes::prelude::DataType;
+use datatypes::vectors::VectorRef;
+pub(crate) use df_func::{DfScalarFunction, RawDfScalarFn};
+pub(crate) use error::{EvalError, InvalidArgumentSnafu};
pub(crate) use func::{BinaryFunc, UnaryFunc, UnmaterializableFunc, VariadicFunc};
pub(crate) use id::{GlobalId, Id, LocalId};
+use itertools::Itertools;
pub(crate) use linear::{MapFilterProject, MfpPlan, SafeMfpPlan};
pub(crate) use relation::{AggregateExpr, AggregateFunc};
-pub(crate) use scalar::{DfScalarFunction, RawDfScalarFn, ScalarExpr, TypedExpr};
+pub(crate) use scalar::{ScalarExpr, TypedExpr};
+use snafu::{ensure, ResultExt};
+
+use crate::expr::error::DataTypeSnafu;
+
+/// A batch of vectors with the same length but without schema, only useful in dataflow
+pub struct Batch {
+ batch: Vec<VectorRef>,
+ row_count: usize,
+}
+
+impl Batch {
+ pub fn new(batch: Vec<VectorRef>, row_count: usize) -> Self {
+ Self { batch, row_count }
+ }
+
+ pub fn batch(&self) -> &[VectorRef] {
+ &self.batch
+ }
+
+ pub fn row_count(&self) -> usize {
+ self.row_count
+ }
+
+ /// Slices the `Batch`, returning a new `Batch`.
+ ///
+ /// # Panics
+ /// This function panics if `offset + length > self.row_count()`.
+ pub fn slice(&self, offset: usize, length: usize) -> Batch {
+ let batch = self
+ .batch()
+ .iter()
+ .map(|v| v.slice(offset, length))
+ .collect_vec();
+ Batch::new(batch, length)
+ }
+
+ /// append another batch to self
+ pub fn append_batch(&mut self, other: Batch) -> Result<(), EvalError> {
+ ensure!(
+ self.batch.len() == other.batch.len(),
+ InvalidArgumentSnafu {
+ reason: format!(
+ "Expect two batch to have same numbers of column, found {} and {} columns",
+ self.batch.len(),
+ other.batch.len()
+ )
+ }
+ );
+
+ let batch_builders = self
+ .batch
+ .iter()
+ .map(|v| {
+ v.data_type()
+ .create_mutable_vector(self.row_count() + other.row_count())
+ })
+ .collect_vec();
+
+ let mut result = vec![];
+ let zelf_row_count = self.row_count();
+ let other_row_count = other.row_count();
+ for (idx, mut builder) in batch_builders.into_iter().enumerate() {
+ builder
+ .extend_slice_of(self.batch()[idx].as_ref(), 0, zelf_row_count)
+ .context(DataTypeSnafu {
+ msg: "Failed to extend vector",
+ })?;
+ builder
+ .extend_slice_of(other.batch()[idx].as_ref(), 0, other_row_count)
+ .context(DataTypeSnafu {
+ msg: "Failed to extend vector",
+ })?;
+ result.push(builder.to_vector());
+ }
+ self.batch = result;
+ self.row_count = zelf_row_count + other_row_count;
+ Ok(())
+ }
+}
diff --git a/src/flow/src/expr/df_func.rs b/src/flow/src/expr/df_func.rs
new file mode 100644
index 000000000000..b0a2648dd15e
--- /dev/null
+++ b/src/flow/src/expr/df_func.rs
@@ -0,0 +1,293 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Porting Datafusion scalar function to our scalar function to be used in dataflow
+
+use std::sync::Arc;
+
+use arrow::array::RecordBatchOptions;
+use bytes::BytesMut;
+use common_error::ext::BoxedError;
+use common_recordbatch::DfRecordBatch;
+use common_telemetry::debug;
+use datafusion_physical_expr::PhysicalExpr;
+use datatypes::data_type::DataType;
+use datatypes::value::Value;
+use datatypes::vectors::VectorRef;
+use prost::Message;
+use snafu::{IntoError, ResultExt};
+use substrait::error::{DecodeRelSnafu, EncodeRelSnafu};
+use substrait::substrait_proto_df::proto::expression::ScalarFunction;
+
+use crate::error::Error;
+use crate::expr::error::{
+ ArrowSnafu, DatafusionSnafu as EvalDatafusionSnafu, EvalError, ExternalSnafu,
+ InvalidArgumentSnafu,
+};
+use crate::expr::{Batch, ScalarExpr};
+use crate::repr::RelationDesc;
+use crate::transform::{from_scalar_fn_to_df_fn_impl, FunctionExtensions};
+
+/// A way to represent a scalar function that is implemented in Datafusion
+#[derive(Debug, Clone)]
+pub struct DfScalarFunction {
+ /// The raw bytes encoded datafusion scalar function
+ pub(crate) raw_fn: RawDfScalarFn,
+ // TODO(discord9): directly from datafusion expr
+ /// The implementation of the function
+ pub(crate) fn_impl: Arc<dyn PhysicalExpr>,
+ /// The input schema of the function
+ pub(crate) df_schema: Arc<datafusion_common::DFSchema>,
+}
+
+impl DfScalarFunction {
+ pub fn new(raw_fn: RawDfScalarFn, fn_impl: Arc<dyn PhysicalExpr>) -> Result<Self, Error> {
+ Ok(Self {
+ df_schema: Arc::new(raw_fn.input_schema.to_df_schema()?),
+ raw_fn,
+ fn_impl,
+ })
+ }
+
+ pub async fn try_from_raw_fn(raw_fn: RawDfScalarFn) -> Result<Self, Error> {
+ Ok(Self {
+ fn_impl: raw_fn.get_fn_impl().await?,
+ df_schema: Arc::new(raw_fn.input_schema.to_df_schema()?),
+ raw_fn,
+ })
+ }
+
+ /// Evaluate a batch of expressions using input values
+ pub fn eval_batch(&self, batch: &Batch, exprs: &[ScalarExpr]) -> Result<VectorRef, EvalError> {
+ let row_count = batch.row_count();
+ let batch: Vec<_> = exprs
+ .iter()
+ .map(|expr| expr.eval_batch(batch))
+ .collect::<Result<_, _>>()?;
+
+ let schema = self.df_schema.inner().clone();
+
+ let arrays = batch
+ .iter()
+ .map(|array| array.to_arrow_array())
+ .collect::<Vec<_>>();
+ let rb = DfRecordBatch::try_new_with_options(schema, arrays, &RecordBatchOptions::new().with_row_count(Some(row_count))).map_err(|err| {
+ ArrowSnafu {
+ context:
+ "Failed to create RecordBatch from values when eval_batch datafusion scalar function",
+ }
+ .into_error(err)
+ })?;
+
+ let len = rb.num_rows();
+
+ let res = self.fn_impl.evaluate(&rb).map_err(|err| {
+ EvalDatafusionSnafu {
+ raw: err,
+ context: "Failed to evaluate datafusion scalar function",
+ }
+ .build()
+ })?;
+ let res = common_query::columnar_value::ColumnarValue::try_from(&res)
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ let res_vec = res
+ .try_into_vector(len)
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+
+ Ok(res_vec)
+ }
+
+ /// eval a list of expressions using input values
+ fn eval_args(values: &[Value], exprs: &[ScalarExpr]) -> Result<Vec<Value>, EvalError> {
+ exprs
+ .iter()
+ .map(|expr| expr.eval(values))
+ .collect::<Result<_, _>>()
+ }
+
+ // TODO(discord9): add RecordBatch support
+ pub fn eval(&self, values: &[Value], exprs: &[ScalarExpr]) -> Result<Value, EvalError> {
+ // first eval exprs to construct values to feed to datafusion
+ let values: Vec<_> = Self::eval_args(values, exprs)?;
+ if values.is_empty() {
+ return InvalidArgumentSnafu {
+ reason: "values is empty".to_string(),
+ }
+ .fail();
+ }
+ // TODO(discord9): make cols all array length of one
+ let mut cols = vec![];
+ for (idx, typ) in self
+ .raw_fn
+ .input_schema
+ .typ()
+ .column_types
+ .iter()
+ .enumerate()
+ {
+ let typ = typ.scalar_type();
+ let mut array = typ.create_mutable_vector(1);
+ array.push_value_ref(values[idx].as_value_ref());
+ cols.push(array.to_vector().to_arrow_array());
+ }
+ let schema = self.df_schema.inner().clone();
+ let rb = DfRecordBatch::try_new_with_options(
+ schema,
+ cols,
+ &RecordBatchOptions::new().with_row_count(Some(1)),
+ )
+ .map_err(|err| {
+ ArrowSnafu {
+ context:
+ "Failed to create RecordBatch from values when eval datafusion scalar function",
+ }
+ .into_error(err)
+ })?;
+
+ let res = self.fn_impl.evaluate(&rb).map_err(|err| {
+ EvalDatafusionSnafu {
+ raw: err,
+ context: "Failed to evaluate datafusion scalar function",
+ }
+ .build()
+ })?;
+ let res = common_query::columnar_value::ColumnarValue::try_from(&res)
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ let res_vec = res
+ .try_into_vector(1)
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ let res_val = res_vec
+ .try_get(0)
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ Ok(res_val)
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct RawDfScalarFn {
+ /// The raw bytes encoded datafusion scalar function
+ pub(crate) f: bytes::BytesMut,
+ /// The input schema of the function
+ pub(crate) input_schema: RelationDesc,
+ /// Extension contains mapping from function reference to function name
+ pub(crate) extensions: FunctionExtensions,
+}
+
+impl RawDfScalarFn {
+ pub fn from_proto(
+ f: &substrait::substrait_proto_df::proto::expression::ScalarFunction,
+ input_schema: RelationDesc,
+ extensions: FunctionExtensions,
+ ) -> Result<Self, Error> {
+ let mut buf = BytesMut::new();
+ f.encode(&mut buf)
+ .context(EncodeRelSnafu)
+ .map_err(BoxedError::new)
+ .context(crate::error::ExternalSnafu)?;
+ Ok(Self {
+ f: buf,
+ input_schema,
+ extensions,
+ })
+ }
+ async fn get_fn_impl(&self) -> Result<Arc<dyn PhysicalExpr>, Error> {
+ let f = ScalarFunction::decode(&mut self.f.as_ref())
+ .context(DecodeRelSnafu)
+ .map_err(BoxedError::new)
+ .context(crate::error::ExternalSnafu)?;
+ debug!("Decoded scalar function: {:?}", f);
+
+ let input_schema = &self.input_schema;
+ let extensions = &self.extensions;
+
+ from_scalar_fn_to_df_fn_impl(&f, input_schema, extensions).await
+ }
+}
+
+impl std::cmp::PartialEq for DfScalarFunction {
+ fn eq(&self, other: &Self) -> bool {
+ self.raw_fn.eq(&other.raw_fn)
+ }
+}
+
+// can't derive Eq because of Arc<dyn PhysicalExpr> not eq, so implement it manually
+impl std::cmp::Eq for DfScalarFunction {}
+
+impl std::cmp::PartialOrd for DfScalarFunction {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+impl std::cmp::Ord for DfScalarFunction {
+ fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+ self.raw_fn.cmp(&other.raw_fn)
+ }
+}
+impl std::hash::Hash for DfScalarFunction {
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ self.raw_fn.hash(state);
+ }
+}
+
+#[cfg(test)]
+mod test {
+
+ use datatypes::prelude::ConcreteDataType;
+ use substrait::substrait_proto_df::proto::expression::literal::LiteralType;
+ use substrait::substrait_proto_df::proto::expression::{Literal, RexType};
+ use substrait::substrait_proto_df::proto::function_argument::ArgType;
+ use substrait::substrait_proto_df::proto::{Expression, FunctionArgument};
+
+ use super::*;
+ use crate::repr::{ColumnType, RelationType};
+
+ #[tokio::test]
+ async fn test_df_scalar_function() {
+ let raw_scalar_func = ScalarFunction {
+ function_reference: 0,
+ arguments: vec![FunctionArgument {
+ arg_type: Some(ArgType::Value(Expression {
+ rex_type: Some(RexType::Literal(Literal {
+ nullable: false,
+ type_variation_reference: 0,
+ literal_type: Some(LiteralType::I64(-1)),
+ })),
+ })),
+ }],
+ output_type: None,
+ ..Default::default()
+ };
+ let input_schema = RelationDesc::try_new(
+ RelationType::new(vec![ColumnType::new_nullable(
+ ConcreteDataType::null_datatype(),
+ )]),
+ vec!["null_column".to_string()],
+ )
+ .unwrap();
+ let extensions = FunctionExtensions::from_iter(vec![(0, "abs")]);
+ let raw_fn = RawDfScalarFn::from_proto(&raw_scalar_func, input_schema, extensions).unwrap();
+ let df_func = DfScalarFunction::try_from_raw_fn(raw_fn).await.unwrap();
+ assert_eq!(
+ df_func
+ .eval(&[Value::Null], &[ScalarExpr::Column(0)])
+ .unwrap(),
+ Value::Int64(1)
+ );
+ }
+}
diff --git a/src/flow/src/expr/error.rs b/src/flow/src/expr/error.rs
index ff1765df49fd..6703ce240471 100644
--- a/src/flow/src/expr/error.rs
+++ b/src/flow/src/expr/error.rs
@@ -14,17 +14,12 @@
//! Error handling for expression evaluation.
-use std::any::Any;
-
use arrow_schema::ArrowError;
use common_error::ext::BoxedError;
use common_macro::stack_trace_debug;
-use common_telemetry::common_error::ext::ErrorExt;
-use common_telemetry::common_error::status_code::StatusCode;
use datafusion_common::DataFusionError;
use datatypes::data_type::ConcreteDataType;
-use serde::{Deserialize, Serialize};
-use snafu::{Location, ResultExt, Snafu};
+use snafu::{Location, Snafu};
fn is_send_sync() {
fn check<T: Send + Sync>() {}
@@ -113,6 +108,7 @@ pub enum EvalError {
#[snafu(display("Arrow error: {raw:?}, context: {context}"))]
Arrow {
+ #[snafu(source)]
raw: ArrowError,
context: String,
#[snafu(implicit)]
diff --git a/src/flow/src/expr/func.rs b/src/flow/src/expr/func.rs
index ba8cdba71c70..143f1a82dda3 100644
--- a/src/flow/src/expr/func.rs
+++ b/src/flow/src/expr/func.rs
@@ -15,17 +15,20 @@
//! This module contains the definition of functions that can be used in expressions.
use std::collections::HashMap;
-use std::sync::OnceLock;
+use std::sync::{Arc, OnceLock};
+use arrow::array::{ArrayRef, BooleanArray};
use common_error::ext::BoxedError;
-use common_telemetry::debug;
use common_time::timestamp::TimeUnit;
use common_time::{DateTime, Timestamp};
use datafusion_expr::Operator;
use datatypes::data_type::ConcreteDataType;
+use datatypes::prelude::DataType;
use datatypes::types::cast;
-use datatypes::types::cast::CastOption;
use datatypes::value::Value;
+use datatypes::vectors::{
+ BooleanVector, DateTimeVector, Helper, TimestampMillisecondVector, VectorRef,
+};
use serde::{Deserialize, Serialize};
use smallvec::smallvec;
use snafu::{ensure, OptionExt, ResultExt};
@@ -34,12 +37,12 @@ use substrait::df_logical_plan::consumer::name_to_op;
use crate::error::{Error, ExternalSnafu, InvalidQuerySnafu, PlanSnafu};
use crate::expr::error::{
- CastValueSnafu, DivisionByZeroSnafu, EvalError, InternalSnafu, OverflowSnafu,
+ ArrowSnafu, CastValueSnafu, DataTypeSnafu, DivisionByZeroSnafu, EvalError, OverflowSnafu,
TryFromValueSnafu, TypeMismatchSnafu,
};
use crate::expr::signature::{GenericFn, Signature};
-use crate::expr::{InvalidArgumentSnafu, ScalarExpr, TypedExpr};
-use crate::repr::{self, value_to_internal_ts, Row};
+use crate::expr::{Batch, InvalidArgumentSnafu, ScalarExpr, TypedExpr};
+use crate::repr::{self, value_to_internal_ts};
/// UnmaterializableFunc is a function that can't be eval independently,
/// and require special handling
@@ -221,6 +224,129 @@ impl UnaryFunc {
}
}
+ pub fn eval_batch(&self, batch: &Batch, expr: &ScalarExpr) -> Result<VectorRef, EvalError> {
+ let arg_col = expr.eval_batch(batch)?;
+ match self {
+ Self::Not => {
+ let arrow_array = arg_col.to_arrow_array();
+ let bool_array = arrow_array
+ .as_any()
+ .downcast_ref::<BooleanArray>()
+ .context({
+ TypeMismatchSnafu {
+ expected: ConcreteDataType::boolean_datatype(),
+ actual: arg_col.data_type(),
+ }
+ })?;
+ let ret = arrow::compute::not(bool_array).context(ArrowSnafu { context: "not" })?;
+ let ret = BooleanVector::from(ret);
+ Ok(Arc::new(ret))
+ }
+ Self::IsNull => {
+ let arrow_array = arg_col.to_arrow_array();
+ let ret = arrow::compute::is_null(&arrow_array)
+ .context(ArrowSnafu { context: "is_null" })?;
+ let ret = BooleanVector::from(ret);
+ Ok(Arc::new(ret))
+ }
+ Self::IsTrue | Self::IsFalse => {
+ let arrow_array = arg_col.to_arrow_array();
+ let bool_array = arrow_array
+ .as_any()
+ .downcast_ref::<BooleanArray>()
+ .context({
+ TypeMismatchSnafu {
+ expected: ConcreteDataType::boolean_datatype(),
+ actual: arg_col.data_type(),
+ }
+ })?;
+
+ if matches!(self, Self::IsTrue) {
+ Ok(Arc::new(BooleanVector::from(bool_array.clone())))
+ } else {
+ let ret =
+ arrow::compute::not(bool_array).context(ArrowSnafu { context: "not" })?;
+ Ok(Arc::new(BooleanVector::from(ret)))
+ }
+ }
+ Self::StepTimestamp => {
+ let datetime_array = get_datetime_array(&arg_col)?;
+ let date_array_ref = datetime_array
+ .as_any()
+ .downcast_ref::<arrow::array::Date64Array>()
+ .context({
+ TypeMismatchSnafu {
+ expected: ConcreteDataType::boolean_datatype(),
+ actual: ConcreteDataType::from_arrow_type(datetime_array.data_type()),
+ }
+ })?;
+
+ let ret = arrow::compute::unary(date_array_ref, |arr| arr + 1);
+ let ret = DateTimeVector::from(ret);
+ Ok(Arc::new(ret))
+ }
+ Self::Cast(to) => {
+ let arrow_array = arg_col.to_arrow_array();
+ let ret = arrow::compute::cast(&arrow_array, &to.as_arrow_type())
+ .context(ArrowSnafu { context: "cast" })?;
+ let vector = Helper::try_into_vector(ret).context(DataTypeSnafu {
+ msg: "Fail to convert to Vector",
+ })?;
+ Ok(vector)
+ }
+ Self::TumbleWindowFloor {
+ window_size,
+ start_time,
+ } => {
+ let datetime_array = get_datetime_array(&arg_col)?;
+ let date_array_ref = datetime_array
+ .as_any()
+ .downcast_ref::<arrow::array::Date64Array>()
+ .context({
+ TypeMismatchSnafu {
+ expected: ConcreteDataType::boolean_datatype(),
+ actual: ConcreteDataType::from_arrow_type(datetime_array.data_type()),
+ }
+ })?;
+
+ let start_time = start_time.map(|t| t.val());
+ let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
+
+ let ret = arrow::compute::unary(date_array_ref, |ts| {
+ get_window_start(ts, window_size, start_time)
+ });
+
+ let ret = TimestampMillisecondVector::from(ret);
+ Ok(Arc::new(ret))
+ }
+ Self::TumbleWindowCeiling {
+ window_size,
+ start_time,
+ } => {
+ let datetime_array = get_datetime_array(&arg_col)?;
+ let date_array_ref = datetime_array
+ .as_any()
+ .downcast_ref::<arrow::array::Date64Array>()
+ .context({
+ TypeMismatchSnafu {
+ expected: ConcreteDataType::boolean_datatype(),
+ actual: ConcreteDataType::from_arrow_type(datetime_array.data_type()),
+ }
+ })?;
+
+ let start_time = start_time.map(|t| t.val());
+ let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
+
+ let ret = arrow::compute::unary(date_array_ref, |ts| {
+ get_window_start(ts, window_size, start_time) + window_size
+ });
+
+ let ret = TimestampMillisecondVector::from(ret);
+ Ok(Arc::new(ret))
+ }
+ }
+ }
+
/// Evaluate the function with given values and expression
///
/// # Arguments
@@ -314,6 +440,23 @@ impl UnaryFunc {
}
}
+fn get_datetime_array(vector: &VectorRef) -> Result<arrow::array::ArrayRef, EvalError> {
+ let arrow_array = vector.to_arrow_array();
+ let datetime_array =
+ if *arrow_array.data_type() == ConcreteDataType::datetime_datatype().as_arrow_type() {
+ arrow_array
+ } else {
+ arrow::compute::cast(
+ &arrow_array,
+ &ConcreteDataType::datetime_datatype().as_arrow_type(),
+ )
+ .context(ArrowSnafu {
+ context: "Trying to cast to datetime in StepTimestamp",
+ })?
+ };
+ Ok(datetime_array)
+}
+
fn get_window_start(
ts: repr::Timestamp,
window_size: repr::Duration,
@@ -692,6 +835,98 @@ impl BinaryFunc {
Ok((spec_fn, signature))
}
+ pub fn eval_batch(
+ &self,
+ batch: &Batch,
+ expr1: &ScalarExpr,
+ expr2: &ScalarExpr,
+ ) -> Result<VectorRef, EvalError> {
+ let left = expr1.eval_batch(batch)?;
+ let left = left.to_arrow_array();
+ let right = expr2.eval_batch(batch)?;
+ let right = right.to_arrow_array();
+
+ let arrow_array: ArrayRef = match self {
+ Self::Eq => Arc::new(
+ arrow::compute::kernels::cmp::eq(&left, &right)
+ .context(ArrowSnafu { context: "eq" })?,
+ ),
+ Self::NotEq => Arc::new(
+ arrow::compute::kernels::cmp::neq(&left, &right)
+ .context(ArrowSnafu { context: "neq" })?,
+ ),
+ Self::Lt => Arc::new(
+ arrow::compute::kernels::cmp::lt(&left, &right)
+ .context(ArrowSnafu { context: "lt" })?,
+ ),
+ Self::Lte => Arc::new(
+ arrow::compute::kernels::cmp::lt_eq(&left, &right)
+ .context(ArrowSnafu { context: "lte" })?,
+ ),
+ Self::Gt => Arc::new(
+ arrow::compute::kernels::cmp::gt(&left, &right)
+ .context(ArrowSnafu { context: "gt" })?,
+ ),
+ Self::Gte => Arc::new(
+ arrow::compute::kernels::cmp::gt_eq(&left, &right)
+ .context(ArrowSnafu { context: "gte" })?,
+ ),
+
+ Self::AddInt16
+ | Self::AddInt32
+ | Self::AddInt64
+ | Self::AddUInt16
+ | Self::AddUInt32
+ | Self::AddUInt64
+ | Self::AddFloat32
+ | Self::AddFloat64 => arrow::compute::kernels::numeric::add(&left, &right)
+ .context(ArrowSnafu { context: "add" })?,
+
+ Self::SubInt16
+ | Self::SubInt32
+ | Self::SubInt64
+ | Self::SubUInt16
+ | Self::SubUInt32
+ | Self::SubUInt64
+ | Self::SubFloat32
+ | Self::SubFloat64 => arrow::compute::kernels::numeric::sub(&left, &right)
+ .context(ArrowSnafu { context: "sub" })?,
+
+ Self::MulInt16
+ | Self::MulInt32
+ | Self::MulInt64
+ | Self::MulUInt16
+ | Self::MulUInt32
+ | Self::MulUInt64
+ | Self::MulFloat32
+ | Self::MulFloat64 => arrow::compute::kernels::numeric::mul(&left, &right)
+ .context(ArrowSnafu { context: "mul" })?,
+
+ Self::DivInt16
+ | Self::DivInt32
+ | Self::DivInt64
+ | Self::DivUInt16
+ | Self::DivUInt32
+ | Self::DivUInt64
+ | Self::DivFloat32
+ | Self::DivFloat64 => arrow::compute::kernels::numeric::mul(&left, &right)
+ .context(ArrowSnafu { context: "div" })?,
+
+ Self::ModInt16
+ | Self::ModInt32
+ | Self::ModInt64
+ | Self::ModUInt16
+ | Self::ModUInt32
+ | Self::ModUInt64 => arrow::compute::kernels::numeric::rem(&left, &right)
+ .context(ArrowSnafu { context: "rem" })?,
+ };
+
+ let vector = Helper::try_into_vector(arrow_array).context(DataTypeSnafu {
+ msg: "Fail to convert to Vector",
+ })?;
+ Ok(vector)
+ }
+
/// Evaluate the function with given values and expression
///
/// # Arguments
@@ -824,6 +1059,51 @@ impl VariadicFunc {
}
}
+ pub fn eval_batch(&self, batch: &Batch, exprs: &[ScalarExpr]) -> Result<VectorRef, EvalError> {
+ ensure!(
+ !exprs.is_empty(),
+ InvalidArgumentSnafu {
+ reason: format!("Variadic function {:?} requires at least 1 arguments", self)
+ }
+ );
+ let args = exprs
+ .iter()
+ .map(|expr| expr.eval_batch(batch).map(|v| v.to_arrow_array()))
+ .collect::<Result<Vec<_>, _>>()?;
+ let mut iter = args.into_iter();
+
+ let first = iter.next().unwrap();
+ let mut left = first
+ .as_any()
+ .downcast_ref::<BooleanArray>()
+ .context({
+ TypeMismatchSnafu {
+ expected: ConcreteDataType::boolean_datatype(),
+ actual: ConcreteDataType::from_arrow_type(first.data_type()),
+ }
+ })?
+ .clone();
+
+ for right in iter {
+ let right = right.as_any().downcast_ref::<BooleanArray>().context({
+ TypeMismatchSnafu {
+ expected: ConcreteDataType::boolean_datatype(),
+ actual: ConcreteDataType::from_arrow_type(right.data_type()),
+ }
+ })?;
+ left = match self {
+ Self::And => {
+ arrow::compute::and(&left, right).context(ArrowSnafu { context: "and" })?
+ }
+ Self::Or => {
+ arrow::compute::or(&left, right).context(ArrowSnafu { context: "or" })?
+ }
+ }
+ }
+
+ Ok(Arc::new(BooleanVector::from(left)))
+ }
+
/// Evaluate the function with given values and expressions
pub fn eval(&self, values: &[Value], exprs: &[ScalarExpr]) -> Result<Value, EvalError> {
match self {
diff --git a/src/flow/src/expr/linear.rs b/src/flow/src/expr/linear.rs
index b61ff944daa2..234ae12cef14 100644
--- a/src/flow/src/expr/linear.rs
+++ b/src/flow/src/expr/linear.rs
@@ -14,17 +14,15 @@
//! define MapFilterProject which is a compound operator that can be applied row-by-row.
-use std::collections::{BTreeMap, BTreeSet, VecDeque};
+use std::collections::{BTreeMap, BTreeSet};
use common_telemetry::debug;
use datatypes::value::Value;
-use itertools::Itertools;
-use serde::{Deserialize, Serialize};
-use snafu::{ensure, OptionExt};
+use snafu::ensure;
use crate::error::{Error, InvalidQuerySnafu};
use crate::expr::error::{EvalError, InternalSnafu};
-use crate::expr::{Id, InvalidArgumentSnafu, LocalId, ScalarExpr};
+use crate::expr::{InvalidArgumentSnafu, ScalarExpr};
use crate::repr::{self, value_to_internal_ts, Diff, Row};
/// A compound operator that can be applied row-by-row.
@@ -738,7 +736,6 @@ impl MfpPlan {
#[cfg(test)]
mod test {
use datatypes::data_type::ConcreteDataType;
- use itertools::Itertools;
use super::*;
use crate::expr::{BinaryFunc, UnaryFunc, UnmaterializableFunc};
diff --git a/src/flow/src/expr/relation.rs b/src/flow/src/expr/relation.rs
index 661f716dcd29..3661db4ff0f2 100644
--- a/src/flow/src/expr/relation.rs
+++ b/src/flow/src/expr/relation.rs
@@ -15,7 +15,6 @@
//! Describes an aggregation function and it's input expression.
pub(crate) use func::AggregateFunc;
-use serde::{Deserialize, Serialize};
use crate::expr::ScalarExpr;
diff --git a/src/flow/src/expr/relation/accum.rs b/src/flow/src/expr/relation/accum.rs
index c9affae7601d..252913de56f6 100644
--- a/src/flow/src/expr/relation/accum.rs
+++ b/src/flow/src/expr/relation/accum.rs
@@ -24,11 +24,9 @@ use std::any::type_name;
use std::fmt::Display;
use common_decimal::Decimal128;
-use common_time::{Date, DateTime};
use datatypes::data_type::ConcreteDataType;
use datatypes::value::{OrderedF32, OrderedF64, OrderedFloat, Value};
use enum_dispatch::enum_dispatch;
-use hydroflow::futures::stream::Concat;
use serde::{Deserialize, Serialize};
use snafu::ensure;
@@ -761,7 +759,10 @@ fn ty_eq_without_precision(left: ConcreteDataType, right: ConcreteDataType) -> b
#[allow(clippy::too_many_lines)]
#[cfg(test)]
mod test {
+ use common_time::DateTime;
+
use super::*;
+
#[test]
fn test_accum() {
let testcases = vec![
diff --git a/src/flow/src/expr/relation/func.rs b/src/flow/src/expr/relation/func.rs
index 5307a6aedb3a..868d83b43f02 100644
--- a/src/flow/src/expr/relation/func.rs
+++ b/src/flow/src/expr/relation/func.rs
@@ -16,16 +16,15 @@ use std::collections::HashMap;
use std::str::FromStr;
use std::sync::OnceLock;
-use common_time::{Date, DateTime};
use datatypes::prelude::ConcreteDataType;
-use datatypes::value::{OrderedF32, OrderedF64, Value};
+use datatypes::value::Value;
use serde::{Deserialize, Serialize};
use smallvec::smallvec;
-use snafu::{IntoError, OptionExt, ResultExt};
+use snafu::{IntoError, OptionExt};
use strum::{EnumIter, IntoEnumIterator};
use crate::error::{DatafusionSnafu, Error, InvalidQuerySnafu};
-use crate::expr::error::{EvalError, TryFromValueSnafu, TypeMismatchSnafu};
+use crate::expr::error::EvalError;
use crate::expr::relation::accum::{Accum, Accumulator};
use crate::expr::signature::{GenericFn, Signature};
use crate::repr::Diff;
diff --git a/src/flow/src/expr/scalar.rs b/src/flow/src/expr/scalar.rs
index 8a3290a932f1..b582c75114a1 100644
--- a/src/flow/src/expr/scalar.rs
+++ b/src/flow/src/expr/scalar.rs
@@ -15,34 +15,22 @@
//! Scalar expressions.
use std::collections::{BTreeMap, BTreeSet};
-use std::sync::{Arc, Mutex};
-use bytes::BytesMut;
use common_error::ext::BoxedError;
-use common_recordbatch::DfRecordBatch;
-use common_telemetry::debug;
-use datafusion_physical_expr::PhysicalExpr;
-use datatypes::data_type::DataType;
-use datatypes::prelude::ConcreteDataType;
+use datatypes::prelude::{ConcreteDataType, DataType};
use datatypes::value::Value;
-use datatypes::{arrow_array, value};
-use prost::Message;
-use serde::{Deserialize, Serialize};
-use snafu::{ensure, ResultExt};
-use substrait::error::{DecodeRelSnafu, EncodeRelSnafu};
-use substrait::substrait_proto_df::proto::expression::{RexType, ScalarFunction};
-use substrait::substrait_proto_df::proto::Expression;
+use datatypes::vectors::{BooleanVector, Helper, NullVector, Vector, VectorRef};
+use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{
DatafusionSnafu, Error, InvalidQuerySnafu, UnexpectedSnafu, UnsupportedTemporalFilterSnafu,
};
use crate::expr::error::{
- ArrowSnafu, DatafusionSnafu as EvalDatafusionSnafu, EvalError, ExternalSnafu,
- InvalidArgumentSnafu, OptimizeSnafu,
+ DataTypeSnafu, EvalError, InternalSnafu, InvalidArgumentSnafu, OptimizeSnafu, TypeMismatchSnafu,
};
use crate::expr::func::{BinaryFunc, UnaryFunc, UnmaterializableFunc, VariadicFunc};
-use crate::repr::{ColumnType, RelationDesc, RelationType};
-use crate::transform::{from_scalar_fn_to_df_fn_impl, FunctionExtensions};
+use crate::expr::{Batch, DfScalarFunction};
+use crate::repr::{ColumnType, RelationType};
/// A scalar expression with a known type.
#[derive(Ord, PartialOrd, Clone, Debug, Eq, PartialEq, Hash)]
pub struct TypedExpr {
@@ -174,163 +162,6 @@ pub enum ScalarExpr {
},
}
-/// A way to represent a scalar function that is implemented in Datafusion
-#[derive(Debug, Clone)]
-pub struct DfScalarFunction {
- raw_fn: RawDfScalarFn,
- // TODO(discord9): directly from datafusion expr
- fn_impl: Arc<dyn PhysicalExpr>,
- df_schema: Arc<datafusion_common::DFSchema>,
-}
-
-impl DfScalarFunction {
- pub fn new(raw_fn: RawDfScalarFn, fn_impl: Arc<dyn PhysicalExpr>) -> Result<Self, Error> {
- Ok(Self {
- df_schema: Arc::new(raw_fn.input_schema.to_df_schema()?),
- raw_fn,
- fn_impl,
- })
- }
-
- pub async fn try_from_raw_fn(raw_fn: RawDfScalarFn) -> Result<Self, Error> {
- Ok(Self {
- fn_impl: raw_fn.get_fn_impl().await?,
- df_schema: Arc::new(raw_fn.input_schema.to_df_schema()?),
- raw_fn,
- })
- }
-
- /// eval a list of expressions using input values
- fn eval_args(values: &[Value], exprs: &[ScalarExpr]) -> Result<Vec<Value>, EvalError> {
- exprs
- .iter()
- .map(|expr| expr.eval(values))
- .collect::<Result<_, _>>()
- }
-
- // TODO(discord9): add RecordBatch support
- pub fn eval(&self, values: &[Value], exprs: &[ScalarExpr]) -> Result<Value, EvalError> {
- // first eval exprs to construct values to feed to datafusion
- let values: Vec<_> = Self::eval_args(values, exprs)?;
- if values.is_empty() {
- return InvalidArgumentSnafu {
- reason: "values is empty".to_string(),
- }
- .fail();
- }
- // TODO(discord9): make cols all array length of one
- let mut cols = vec![];
- for (idx, typ) in self
- .raw_fn
- .input_schema
- .typ()
- .column_types
- .iter()
- .enumerate()
- {
- let typ = typ.scalar_type();
- let mut array = typ.create_mutable_vector(1);
- array.push_value_ref(values[idx].as_value_ref());
- cols.push(array.to_vector().to_arrow_array());
- }
- let schema = self.df_schema.inner().clone();
- let rb = DfRecordBatch::try_new(schema, cols).map_err(|err| {
- ArrowSnafu {
- raw: err,
- context:
- "Failed to create RecordBatch from values when eval datafusion scalar function",
- }
- .build()
- })?;
-
- let res = self.fn_impl.evaluate(&rb).map_err(|err| {
- EvalDatafusionSnafu {
- raw: err,
- context: "Failed to evaluate datafusion scalar function",
- }
- .build()
- })?;
- let res = common_query::columnar_value::ColumnarValue::try_from(&res)
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?;
- let res_vec = res
- .try_into_vector(1)
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?;
- let res_val = res_vec
- .try_get(0)
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?;
- Ok(res_val)
- }
-}
-
-#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct RawDfScalarFn {
- /// The raw bytes encoded datafusion scalar function
- pub(crate) f: bytes::BytesMut,
- /// The input schema of the function
- pub(crate) input_schema: RelationDesc,
- /// Extension contains mapping from function reference to function name
- pub(crate) extensions: FunctionExtensions,
-}
-
-impl RawDfScalarFn {
- pub fn from_proto(
- f: &substrait::substrait_proto_df::proto::expression::ScalarFunction,
- input_schema: RelationDesc,
- extensions: FunctionExtensions,
- ) -> Result<Self, Error> {
- let mut buf = BytesMut::new();
- f.encode(&mut buf)
- .context(EncodeRelSnafu)
- .map_err(BoxedError::new)
- .context(crate::error::ExternalSnafu)?;
- Ok(Self {
- f: buf,
- input_schema,
- extensions,
- })
- }
- async fn get_fn_impl(&self) -> Result<Arc<dyn PhysicalExpr>, Error> {
- let f = ScalarFunction::decode(&mut self.f.as_ref())
- .context(DecodeRelSnafu)
- .map_err(BoxedError::new)
- .context(crate::error::ExternalSnafu)?;
- debug!("Decoded scalar function: {:?}", f);
-
- let input_schema = &self.input_schema;
- let extensions = &self.extensions;
-
- from_scalar_fn_to_df_fn_impl(&f, input_schema, extensions).await
- }
-}
-
-impl std::cmp::PartialEq for DfScalarFunction {
- fn eq(&self, other: &Self) -> bool {
- self.raw_fn.eq(&other.raw_fn)
- }
-}
-
-// can't derive Eq because of Arc<dyn PhysicalExpr> not eq, so implement it manually
-impl std::cmp::Eq for DfScalarFunction {}
-
-impl std::cmp::PartialOrd for DfScalarFunction {
- fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
- Some(self.cmp(other))
- }
-}
-impl std::cmp::Ord for DfScalarFunction {
- fn cmp(&self, other: &Self) -> std::cmp::Ordering {
- self.raw_fn.cmp(&other.raw_fn)
- }
-}
-impl std::hash::Hash for DfScalarFunction {
- fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
- self.raw_fn.hash(state);
- }
-}
-
impl ScalarExpr {
pub fn with_type(self, typ: ColumnType) -> TypedExpr {
TypedExpr::new(self, typ)
@@ -428,6 +259,177 @@ impl ScalarExpr {
}
}
+ pub fn eval_batch(&self, batch: &Batch) -> Result<VectorRef, EvalError> {
+ match self {
+ ScalarExpr::Column(i) => Ok(batch.batch()[*i].clone()),
+ ScalarExpr::Literal(val, dt) => Ok(Helper::try_from_scalar_value(
+ val.try_to_scalar_value(dt).context(DataTypeSnafu {
+ msg: "Failed to convert literal to scalar value",
+ })?,
+ batch.row_count(),
+ )
+ .context(DataTypeSnafu {
+ msg: "Failed to convert scalar value to vector ref when parsing literal",
+ })?),
+ ScalarExpr::CallUnmaterializable(_) => OptimizeSnafu {
+ reason: "Can't eval unmaterializable function",
+ }
+ .fail()?,
+ ScalarExpr::CallUnary { func, expr } => func.eval_batch(batch, expr),
+ ScalarExpr::CallBinary { func, expr1, expr2 } => func.eval_batch(batch, expr1, expr2),
+ ScalarExpr::CallVariadic { func, exprs } => func.eval_batch(batch, exprs),
+ ScalarExpr::CallDf {
+ df_scalar_fn,
+ exprs,
+ } => df_scalar_fn.eval_batch(batch, exprs),
+ ScalarExpr::If { cond, then, els } => Self::eval_if_then(batch, cond, then, els),
+ }
+ }
+
+ fn eval_if_then(
+ batch: &Batch,
+ cond: &ScalarExpr,
+ then: &ScalarExpr,
+ els: &ScalarExpr,
+ ) -> Result<VectorRef, EvalError> {
+ let conds = cond.eval_batch(batch)?;
+ let bool_conds = conds
+ .as_any()
+ .downcast_ref::<BooleanVector>()
+ .context({
+ TypeMismatchSnafu {
+ expected: ConcreteDataType::boolean_datatype(),
+ actual: conds.data_type(),
+ }
+ })?
+ .as_boolean_array();
+
+ let mut then_input_batch = None;
+ let mut else_input_batch = None;
+ let mut null_input_batch = None;
+
+ // instructions for how to reassembly result vector,
+ // iterate over (type of vec, offset, length) and append to resulting vec
+ let mut assembly_idx = vec![];
+
+ // append batch, returning appended batch's slice in (offset, length)
+ fn append_batch(
+ batch: &mut Option<Batch>,
+ to_be_append: Batch,
+ ) -> Result<(usize, usize), EvalError> {
+ let len = to_be_append.row_count();
+ if let Some(batch) = batch {
+ let offset = batch.row_count();
+ batch.append_batch(to_be_append)?;
+ Ok((offset, len))
+ } else {
+ *batch = Some(to_be_append);
+ Ok((0, len))
+ }
+ }
+
+ let mut prev_cond: Option<Option<bool>> = None;
+ let mut prev_start_idx: Option<usize> = None;
+ // first put different conds' vector into different batches
+ for (idx, cond) in bool_conds.iter().enumerate() {
+ // if belong to same slice and not last one continue
+ if prev_cond == Some(cond) {
+ continue;
+ } else if let Some(prev_cond_idx) = prev_start_idx {
+ let prev_cond = prev_cond.unwrap();
+
+ // put a slice to corresponding batch
+ let slice_offset = prev_cond_idx;
+ let slice_length = idx - prev_cond_idx;
+ let to_be_append = batch.slice(slice_offset, slice_length);
+
+ let to_put_back = match prev_cond {
+ Some(true) => (
+ Some(true),
+ append_batch(&mut then_input_batch, to_be_append)?,
+ ),
+ Some(false) => (
+ Some(false),
+ append_batch(&mut else_input_batch, to_be_append)?,
+ ),
+ None => (None, append_batch(&mut null_input_batch, to_be_append)?),
+ };
+ assembly_idx.push(to_put_back);
+ }
+ prev_cond = Some(cond);
+ prev_start_idx = Some(idx);
+ }
+
+ // deal with empty and last slice case
+ if let Some(slice_offset) = prev_start_idx {
+ let prev_cond = prev_cond.unwrap();
+ let slice_length = bool_conds.len() - slice_offset;
+ let to_be_append = batch.slice(slice_offset, slice_length);
+ let to_put_back = match prev_cond {
+ Some(true) => (
+ Some(true),
+ append_batch(&mut then_input_batch, to_be_append)?,
+ ),
+ Some(false) => (
+ Some(false),
+ append_batch(&mut else_input_batch, to_be_append)?,
+ ),
+ None => (None, append_batch(&mut null_input_batch, to_be_append)?),
+ };
+ assembly_idx.push(to_put_back);
+ }
+
+ let then_output_vec = then_input_batch
+ .map(|batch| then.eval_batch(&batch))
+ .transpose()?;
+ let else_output_vec = else_input_batch
+ .map(|batch| els.eval_batch(&batch))
+ .transpose()?;
+ let null_output_vec = null_input_batch
+ .map(|null| NullVector::new(null.row_count()).slice(0, null.row_count()));
+
+ let dt = then_output_vec
+ .as_ref()
+ .map(|v| v.data_type())
+ .or(else_output_vec.as_ref().map(|v| v.data_type()))
+ .unwrap_or(ConcreteDataType::null_datatype());
+ let mut builder = dt.create_mutable_vector(conds.len());
+ for (cond, (offset, length)) in assembly_idx {
+ let slice = match cond {
+ Some(true) => then_output_vec.as_ref(),
+ Some(false) => else_output_vec.as_ref(),
+ None => null_output_vec.as_ref(),
+ }
+ .context(InternalSnafu {
+ reason: "Expect corresponding output vector to exist",
+ })?;
+ // TODO(discord9): seems `extend_slice_of` doesn't support NullVector or ConstantVector
+ // consider adding it maybe?
+ if slice.data_type().is_null() {
+ builder.push_nulls(length);
+ } else if slice.is_const() {
+ let arr = slice.slice(offset, length).to_arrow_array();
+ let vector = Helper::try_into_vector(arr).context(DataTypeSnafu {
+ msg: "Failed to convert arrow array to vector",
+ })?;
+ builder
+ .extend_slice_of(vector.as_ref(), 0, vector.len())
+ .context(DataTypeSnafu {
+ msg: "Failed to build result vector for if-then expression",
+ })?;
+ } else {
+ builder
+ .extend_slice_of(slice.as_ref(), offset, length)
+ .context(DataTypeSnafu {
+ msg: "Failed to build result vector for if-then expression",
+ })?;
+ }
+ }
+ let result_vec = builder.to_vector();
+
+ Ok(result_vec)
+ }
+
/// Eval this expression with the given values.
pub fn eval(&self, values: &[Value]) -> Result<Value, EvalError> {
match self {
@@ -747,18 +749,11 @@ impl ScalarExpr {
#[cfg(test)]
mod test {
- use datatypes::arrow::array::Scalar;
- use query::parser::QueryLanguageParser;
- use query::QueryEngine;
- use session::context::QueryContext;
- use substrait::extension_serializer;
- use substrait::substrait_proto_df::proto::expression::literal::LiteralType;
- use substrait::substrait_proto_df::proto::expression::Literal;
- use substrait::substrait_proto_df::proto::function_argument::ArgType;
- use substrait::substrait_proto_df::proto::r#type::Kind;
- use substrait::substrait_proto_df::proto::{r#type, FunctionArgument, Type};
+ use datatypes::vectors::Int32Vector;
+ use pretty_assertions::assert_eq;
use super::*;
+
#[test]
fn test_extract_bound() {
let test_list: [(ScalarExpr, Result<_, EvalError>); 5] = [
@@ -849,37 +844,68 @@ mod test {
assert!(matches!(res, Err(Error::InvalidQuery { .. })));
}
- #[tokio::test]
- async fn test_df_scalar_function() {
- let raw_scalar_func = ScalarFunction {
- function_reference: 0,
- arguments: vec![FunctionArgument {
- arg_type: Some(ArgType::Value(Expression {
- rex_type: Some(RexType::Literal(Literal {
- nullable: false,
- type_variation_reference: 0,
- literal_type: Some(LiteralType::I64(-1)),
- })),
- })),
- }],
- output_type: None,
- ..Default::default()
- };
- let input_schema = RelationDesc::try_new(
- RelationType::new(vec![ColumnType::new_nullable(
- ConcreteDataType::null_datatype(),
- )]),
- vec!["null_column".to_string()],
- )
- .unwrap();
- let extensions = FunctionExtensions::from_iter(vec![(0, "abs")]);
- let raw_fn = RawDfScalarFn::from_proto(&raw_scalar_func, input_schema, extensions).unwrap();
- let df_func = DfScalarFunction::try_from_raw_fn(raw_fn).await.unwrap();
- assert_eq!(
- df_func
- .eval(&[Value::Null], &[ScalarExpr::Column(0)])
- .unwrap(),
- Value::Int64(1)
- );
+ #[test]
+ fn test_eval_batch() {
+ // TODO(discord9): add more tests
+ {
+ let expr = ScalarExpr::If {
+ cond: Box::new(ScalarExpr::Column(0).call_binary(
+ ScalarExpr::literal(Value::from(0), ConcreteDataType::int32_datatype()),
+ BinaryFunc::Eq,
+ )),
+ then: Box::new(ScalarExpr::literal(
+ Value::from(42),
+ ConcreteDataType::int32_datatype(),
+ )),
+ els: Box::new(ScalarExpr::literal(
+ Value::from(37),
+ ConcreteDataType::int32_datatype(),
+ )),
+ };
+ let raw = vec![
+ None,
+ Some(0),
+ Some(1),
+ None,
+ None,
+ Some(0),
+ Some(0),
+ Some(1),
+ Some(1),
+ ];
+ let raw_len = raw.len();
+ let vectors = vec![Int32Vector::from(raw).slice(0, raw_len)];
+
+ let batch = Batch::new(vectors, raw_len);
+ let expected = Int32Vector::from(vec![
+ None,
+ Some(42),
+ Some(37),
+ None,
+ None,
+ Some(42),
+ Some(42),
+ Some(37),
+ Some(37),
+ ])
+ .slice(0, raw_len);
+ assert_eq!(expr.eval_batch(&batch).unwrap(), expected);
+
+ let raw = vec![Some(0)];
+ let raw_len = raw.len();
+ let vectors = vec![Int32Vector::from(raw).slice(0, raw_len)];
+
+ let batch = Batch::new(vectors, raw_len);
+ let expected = Int32Vector::from(vec![Some(42)]).slice(0, raw_len);
+ assert_eq!(expr.eval_batch(&batch).unwrap(), expected);
+
+ let raw: Vec<Option<i32>> = vec![];
+ let raw_len = raw.len();
+ let vectors = vec![Int32Vector::from(raw).slice(0, raw_len)];
+
+ let batch = Batch::new(vectors, raw_len);
+ let expected = NullVector::new(raw_len).slice(0, raw_len);
+ assert_eq!(expr.eval_batch(&batch).unwrap(), expected);
+ }
}
}
diff --git a/src/flow/src/heartbeat.rs b/src/flow/src/heartbeat.rs
index a48230a89883..96635e350dde 100644
--- a/src/flow/src/heartbeat.rs
+++ b/src/flow/src/heartbeat.rs
@@ -19,24 +19,21 @@ use std::sync::Arc;
use api::v1::meta::{HeartbeatRequest, Peer};
use common_error::ext::BoxedError;
-use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
-use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::{
- HandlerGroupExecutor, HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
+ HeartbeatResponseHandlerContext, HeartbeatResponseHandlerExecutorRef,
};
use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MailboxRef, OutgoingMessage};
use common_meta::heartbeat::utils::outgoing_message_to_mailbox_message;
use common_telemetry::{debug, error, info, warn};
use greptime_proto::v1::meta::NodeInfo;
-use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient, MetaClientBuilder};
-use meta_client::{MetaClientOptions, MetaClientType};
+use meta_client::client::{HeartbeatSender, HeartbeatStream, MetaClient};
use servers::addrs;
use servers::heartbeat_options::HeartbeatOptions;
use snafu::ResultExt;
use tokio::sync::mpsc;
-use tokio::time::{Duration, Instant};
+use tokio::time::Duration;
-use crate::error::{ExternalSnafu, MetaClientInitSnafu};
+use crate::error::ExternalSnafu;
use crate::{Error, FlownodeOptions};
/// The flownode heartbeat task which sending `[HeartbeatRequest]` to Metasrv periodically in background.
diff --git a/src/flow/src/lib.rs b/src/flow/src/lib.rs
index 99d6773789a2..738ed524ba04 100644
--- a/src/flow/src/lib.rs
+++ b/src/flow/src/lib.rs
@@ -19,7 +19,6 @@
#![feature(let_chains)]
#![feature(duration_abs_diff)]
#![allow(dead_code)]
-#![allow(unused_imports)]
#![warn(clippy::missing_docs_in_private_items)]
#![warn(clippy::too_many_lines)]
// allow unused for now because it should be use later
diff --git a/src/flow/src/plan.rs b/src/flow/src/plan.rs
index c31ddb652e3b..dec70324f9fd 100644
--- a/src/flow/src/plan.rs
+++ b/src/flow/src/plan.rs
@@ -20,17 +20,11 @@ mod reduce;
use std::collections::BTreeSet;
-use datatypes::arrow::ipc::Map;
-use serde::{Deserialize, Serialize};
-
use crate::error::Error;
-use crate::expr::{
- AggregateExpr, EvalError, GlobalId, Id, LocalId, MapFilterProject, SafeMfpPlan, ScalarExpr,
- TypedExpr,
-};
+use crate::expr::{GlobalId, Id, LocalId, MapFilterProject, SafeMfpPlan, TypedExpr};
use crate::plan::join::JoinPlan;
pub(crate) use crate::plan::reduce::{AccumulablePlan, AggrWithIndex, KeyValPlan, ReducePlan};
-use crate::repr::{ColumnType, DiffRow, RelationDesc, RelationType};
+use crate::repr::{DiffRow, RelationDesc};
/// A plan for a dataflow component. But with type to indicate the output type of the relation.
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
diff --git a/src/flow/src/plan/join.rs b/src/flow/src/plan/join.rs
index 4acf0db2342e..1a437dd00d33 100644
--- a/src/flow/src/plan/join.rs
+++ b/src/flow/src/plan/join.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use serde::{Deserialize, Serialize};
-
use crate::expr::ScalarExpr;
use crate::plan::SafeMfpPlan;
diff --git a/src/flow/src/plan/reduce.rs b/src/flow/src/plan/reduce.rs
index 3d0d8b356a37..1edd0c40dd55 100644
--- a/src/flow/src/plan/reduce.rs
+++ b/src/flow/src/plan/reduce.rs
@@ -12,9 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use serde::{Deserialize, Serialize};
-
-use crate::expr::{AggregateExpr, Id, LocalId, MapFilterProject, SafeMfpPlan, ScalarExpr};
+use crate::expr::{AggregateExpr, SafeMfpPlan};
/// Describe how to extract key-value pair from a `Row`
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
diff --git a/src/flow/src/repr.rs b/src/flow/src/repr.rs
index 188629e58db0..9ce1efa0a04f 100644
--- a/src/flow/src/repr.rs
+++ b/src/flow/src/repr.rs
@@ -17,14 +17,10 @@
mod relation;
-use std::borrow::Borrow;
-use std::slice::SliceIndex;
-
use api::helper::{pb_value_to_value_ref, value_to_grpc_value};
use api::v1::Row as ProtoRow;
use datatypes::data_type::ConcreteDataType;
use datatypes::types::cast;
-use datatypes::types::cast::CastOption;
use datatypes::value::Value;
use itertools::Itertools;
pub(crate) use relation::{ColumnType, Key, RelationDesc, RelationType};
diff --git a/src/flow/src/repr/relation.rs b/src/flow/src/repr/relation.rs
index e470ad9dbdbf..65b75ffdcef8 100644
--- a/src/flow/src/repr/relation.rs
+++ b/src/flow/src/repr/relation.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::{BTreeMap, HashMap};
-
use datafusion_common::DFSchema;
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
@@ -22,7 +20,7 @@ use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{DatafusionSnafu, InternalSnafu, InvalidQuerySnafu, Result, UnexpectedSnafu};
-use crate::expr::{MapFilterProject, SafeMfpPlan, ScalarExpr};
+use crate::expr::{SafeMfpPlan, ScalarExpr};
/// a set of column indices that are "keys" for the collection.
#[derive(Default, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash)]
diff --git a/src/flow/src/server.rs b/src/flow/src/server.rs
index a32ca197d289..d78f9219cb0c 100644
--- a/src/flow/src/server.rs
+++ b/src/flow/src/server.rs
@@ -20,35 +20,27 @@ use std::sync::Arc;
use api::v1::{RowDeleteRequests, RowInsertRequests};
use cache::{TABLE_FLOWNODE_SET_CACHE_NAME, TABLE_ROUTE_CACHE_NAME};
use catalog::CatalogManagerRef;
-use client::client_manager::NodeClients;
use common_base::Plugins;
use common_error::ext::BoxedError;
-use common_grpc::channel_manager::ChannelConfig;
-use common_meta::cache::{
- LayeredCacheRegistry, LayeredCacheRegistryRef, TableFlownodeSetCacheRef, TableRouteCacheRef,
-};
-use common_meta::ddl::{table_meta, ProcedureExecutorRef};
-use common_meta::heartbeat::handler::HandlerGroupExecutor;
+use common_meta::cache::{LayeredCacheRegistryRef, TableFlownodeSetCacheRef, TableRouteCacheRef};
+use common_meta::ddl::ProcedureExecutorRef;
use common_meta::key::flow::FlowMetadataManagerRef;
use common_meta::key::TableMetadataManagerRef;
use common_meta::kv_backend::KvBackendRef;
-use common_meta::node_manager::{self, Flownode, NodeManagerRef};
+use common_meta::node_manager::{Flownode, NodeManagerRef};
use common_query::Output;
use common_telemetry::tracing::info;
-use futures::{FutureExt, StreamExt, TryStreamExt};
+use futures::{FutureExt, TryStreamExt};
use greptime_proto::v1::flow::{flow_server, FlowRequest, FlowResponse, InsertRequests};
use itertools::Itertools;
-use meta_client::client::MetaClient;
use operator::delete::Deleter;
use operator::insert::Inserter;
use operator::statement::StatementExecutor;
use partition::manager::PartitionRuleManager;
use query::{QueryEngine, QueryEngineFactory};
-use serde::de::Unexpected;
use servers::error::{AlreadyStartedSnafu, StartGrpcSnafu, TcpBindSnafu, TcpIncomingSnafu};
-use servers::heartbeat_options::HeartbeatOptions;
use servers::server::Server;
-use session::context::{QueryContext, QueryContextBuilder, QueryContextRef};
+use session::context::{QueryContextBuilder, QueryContextRef};
use snafu::{ensure, OptionExt, ResultExt};
use tokio::net::TcpListener;
use tokio::sync::{broadcast, oneshot, Mutex};
diff --git a/src/flow/src/transform.rs b/src/flow/src/transform.rs
index f8075b5dc221..5441617b93ab 100644
--- a/src/flow/src/transform.rs
+++ b/src/flow/src/transform.rs
@@ -16,37 +16,25 @@
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
-use bytes::buf::IntoIter;
use common_error::ext::BoxedError;
-use common_telemetry::info;
use datafusion::optimizer::simplify_expressions::SimplifyExpressions;
use datafusion::optimizer::{OptimizerContext, OptimizerRule};
use datatypes::data_type::ConcreteDataType as CDT;
-use literal::{from_substrait_literal, from_substrait_type};
-use prost::Message;
use query::parser::QueryLanguageParser;
use query::plan::LogicalPlan;
use query::query_engine::DefaultSerializer;
use query::QueryEngine;
use serde::{Deserialize, Serialize};
-use session::context::QueryContext;
-use snafu::{OptionExt, ResultExt};
+use snafu::ResultExt;
/// note here we are using the `substrait_proto_df` crate from the `substrait` module and
/// rename it to `substrait_proto`
-use substrait::{
- substrait_proto_df as substrait_proto, DFLogicalSubstraitConvertor, SubstraitPlan,
-};
+use substrait::{substrait_proto_df as substrait_proto, DFLogicalSubstraitConvertor};
use substrait_proto::proto::extensions::simple_extension_declaration::MappingType;
use substrait_proto::proto::extensions::SimpleExtensionDeclaration;
use crate::adapter::FlownodeContext;
-use crate::error::{
- DatafusionSnafu, Error, ExternalSnafu, InvalidQueryProstSnafu, NotImplementedSnafu,
- TableNotFoundSnafu, UnexpectedSnafu,
-};
-use crate::expr::GlobalId;
+use crate::error::{DatafusionSnafu, Error, ExternalSnafu, NotImplementedSnafu, UnexpectedSnafu};
use crate::plan::TypedPlan;
-use crate::repr::RelationType;
/// a simple macro to generate a not implemented error
macro_rules! not_impl_err {
($($arg:tt)*) => {
@@ -202,7 +190,7 @@ mod test {
use catalog::RegisterTableRequest;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, NUMBERS_TABLE_ID};
- use common_time::{Date, DateTime};
+ use common_time::DateTime;
use datatypes::prelude::*;
use datatypes::schema::Schema;
use datatypes::vectors::VectorRef;
@@ -219,7 +207,8 @@ mod test {
use super::*;
use crate::adapter::node_context::IdToNameMap;
- use crate::repr::ColumnType;
+ use crate::expr::GlobalId;
+ use crate::repr::{ColumnType, RelationType};
pub fn create_test_ctx() -> FlownodeContext {
let mut schemas = HashMap::new();
diff --git a/src/flow/src/transform/aggr.rs b/src/flow/src/transform/aggr.rs
index 64ecc3eec506..c07338047fe0 100644
--- a/src/flow/src/transform/aggr.rs
+++ b/src/flow/src/transform/aggr.rs
@@ -12,49 +12,23 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::{BTreeMap, HashMap};
+use std::collections::BTreeMap;
-use common_decimal::Decimal128;
-use common_time::{Date, Timestamp};
-use datatypes::arrow::compute::kernels::window;
-use datatypes::arrow::ipc::Binary;
-use datatypes::data_type::{ConcreteDataType as CDT, DataType};
+use datatypes::data_type::DataType;
use datatypes::value::Value;
-use hydroflow::futures::future::Map;
use itertools::Itertools;
-use snafu::{OptionExt, ResultExt};
-use substrait::variation_const::{
- DATE_32_TYPE_VARIATION_REF, DATE_64_TYPE_VARIATION_REF, DEFAULT_TYPE_VARIATION_REF,
- TIMESTAMP_MICRO_TYPE_VARIATION_REF, TIMESTAMP_MILLI_TYPE_VARIATION_REF,
- TIMESTAMP_NANO_TYPE_VARIATION_REF, TIMESTAMP_SECOND_TYPE_VARIATION_REF,
- UNSIGNED_INTEGER_TYPE_VARIATION_REF,
-};
+use snafu::OptionExt;
use substrait_proto::proto::aggregate_function::AggregationInvocation;
use substrait_proto::proto::aggregate_rel::{Grouping, Measure};
-use substrait_proto::proto::expression::field_reference::ReferenceType::DirectReference;
-use substrait_proto::proto::expression::literal::LiteralType;
-use substrait_proto::proto::expression::reference_segment::ReferenceType::StructField;
-use substrait_proto::proto::expression::{
- IfThen, Literal, MaskExpression, RexType, ScalarFunction,
-};
-use substrait_proto::proto::extensions::simple_extension_declaration::MappingType;
-use substrait_proto::proto::extensions::SimpleExtensionDeclaration;
use substrait_proto::proto::function_argument::ArgType;
-use substrait_proto::proto::r#type::Kind;
-use substrait_proto::proto::read_rel::ReadType;
-use substrait_proto::proto::rel::RelType;
-use substrait_proto::proto::{self, plan_rel, Expression, Plan as SubPlan, Rel};
+use substrait_proto::proto::{self};
-use crate::error::{
- DatatypesSnafu, Error, EvalSnafu, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu,
- TableNotFoundSnafu,
-};
+use crate::error::{Error, NotImplementedSnafu, PlanSnafu};
use crate::expr::{
- AggregateExpr, AggregateFunc, BinaryFunc, GlobalId, MapFilterProject, SafeMfpPlan, ScalarExpr,
- TypedExpr, UnaryFunc, UnmaterializableFunc, VariadicFunc,
+ AggregateExpr, AggregateFunc, BinaryFunc, MapFilterProject, ScalarExpr, TypedExpr, UnaryFunc,
};
use crate::plan::{AccumulablePlan, AggrWithIndex, KeyValPlan, Plan, ReducePlan, TypedPlan};
-use crate::repr::{self, ColumnType, RelationDesc, RelationType};
+use crate::repr::{ColumnType, RelationDesc, RelationType};
use crate::transform::{substrait_proto, FlownodeContext, FunctionExtensions};
impl TypedExpr {
@@ -472,13 +446,14 @@ mod test {
use bytes::BytesMut;
use common_time::{DateTime, Interval};
use datatypes::prelude::ConcreteDataType;
- use pretty_assertions::{assert_eq, assert_ne};
+ use pretty_assertions::assert_eq;
use super::*;
- use crate::expr::{DfScalarFunction, RawDfScalarFn};
+ use crate::expr::{DfScalarFunction, GlobalId, RawDfScalarFn};
use crate::plan::{Plan, TypedPlan};
- use crate::repr::{self, ColumnType, RelationType};
+ use crate::repr::{ColumnType, RelationType};
use crate::transform::test::{create_test_ctx, create_test_query_engine, sql_to_substrait};
+ use crate::transform::CDT;
/// TODO(discord9): add more illegal sql tests
#[tokio::test]
async fn test_missing_key_check() {
diff --git a/src/flow/src/transform/literal.rs b/src/flow/src/transform/literal.rs
index 255ceadb54ca..01e06e96830e 100644
--- a/src/flow/src/transform/literal.rs
+++ b/src/flow/src/transform/literal.rs
@@ -34,8 +34,7 @@ use substrait::variation_const::{
};
use substrait_proto::proto::expression::literal::LiteralType;
use substrait_proto::proto::expression::Literal;
-use substrait_proto::proto::r#type::{self, parameter, Kind, Parameter};
-use substrait_proto::proto::Type;
+use substrait_proto::proto::r#type::Kind;
use crate::error::{Error, NotImplementedSnafu, PlanSnafu, UnexpectedSnafu};
use crate::transform::substrait_proto;
diff --git a/src/flow/src/transform/plan.rs b/src/flow/src/transform/plan.rs
index 200226fb352a..6841140989d9 100644
--- a/src/flow/src/transform/plan.rs
+++ b/src/flow/src/transform/plan.rs
@@ -22,11 +22,9 @@ use substrait_proto::proto::read_rel::ReadType;
use substrait_proto::proto::rel::RelType;
use substrait_proto::proto::{plan_rel, Plan as SubPlan, ProjectRel, Rel};
-use crate::error::{
- Error, InternalSnafu, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu, UnexpectedSnafu,
-};
+use crate::error::{Error, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu, UnexpectedSnafu};
use crate::expr::{MapFilterProject, ScalarExpr, TypedExpr, UnaryFunc};
-use crate::plan::{KeyValPlan, Plan, ReducePlan, TypedPlan};
+use crate::plan::{KeyValPlan, Plan, TypedPlan};
use crate::repr::{self, RelationDesc, RelationType};
use crate::transform::{substrait_proto, FlownodeContext, FunctionExtensions};
@@ -350,7 +348,7 @@ mod test {
use super::*;
use crate::expr::{GlobalId, ScalarExpr};
use crate::plan::{Plan, TypedPlan};
- use crate::repr::{self, ColumnType, RelationType};
+ use crate::repr::{ColumnType, RelationType};
use crate::transform::test::{create_test_ctx, create_test_query_engine, sql_to_substrait};
use crate::transform::CDT;
diff --git a/src/flow/src/utils.rs b/src/flow/src/utils.rs
index 69ff8fa2d248..778fde49c9a3 100644
--- a/src/flow/src/utils.rs
+++ b/src/flow/src/utils.rs
@@ -19,14 +19,11 @@ use std::ops::Bound;
use std::sync::Arc;
use common_telemetry::debug;
-use itertools::Itertools;
-use serde::{Deserialize, Serialize};
use smallvec::{smallvec, SmallVec};
-use tokio::sync::{Mutex, RwLock};
+use tokio::sync::RwLock;
-use crate::expr::error::InternalSnafu;
use crate::expr::{EvalError, ScalarExpr};
-use crate::repr::{value_to_internal_ts, Diff, DiffRow, Duration, KeyValDiffRow, Row, Timestamp};
+use crate::repr::{value_to_internal_ts, DiffRow, Duration, KeyValDiffRow, Row, Timestamp};
/// A batch of updates, arranged by key
pub type Batch = BTreeMap<Row, SmallVec<[DiffRow; 2]>>;
@@ -585,6 +582,7 @@ mod test {
use std::borrow::Borrow;
use datatypes::value::Value;
+ use itertools::Itertools;
use super::*;
|
feat
|
add `eval_batch` for ScalarExpr (#4551)
|
15ad9f2f6fc4a5036052b0a3b1b161858e25aedc
|
2024-06-21 16:00:18
|
Weny Xu
|
fix: region logical regions after catching up (#4176)
| false
|
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index 427d95f1d361..8fa2eae383e5 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -536,7 +536,7 @@ impl RegionServerInner {
},
None => return Ok(CurrentEngine::EarlyReturn(0)),
},
- RegionChange::None => match current_region_status {
+ RegionChange::None | RegionChange::Catchup => match current_region_status {
Some(status) => match status.clone() {
RegionEngineWithStatus::Registering(_) => {
return error::RegionNotReadySnafu { region_id }.fail()
@@ -685,8 +685,8 @@ impl RegionServerInner {
| RegionRequest::Alter(_)
| RegionRequest::Flush(_)
| RegionRequest::Compact(_)
- | RegionRequest::Truncate(_)
- | RegionRequest::Catchup(_) => RegionChange::None,
+ | RegionRequest::Truncate(_) => RegionChange::None,
+ RegionRequest::Catchup(_) => RegionChange::Catchup,
};
let engine = match self.get_engine(region_id, ®ion_change)? {
@@ -748,6 +748,7 @@ impl RegionServerInner {
RegionChange::Register(_) | RegionChange::Deregisters => {
self.region_map.remove(®ion_id);
}
+ RegionChange::Catchup => {}
}
}
@@ -790,6 +791,12 @@ impl RegionServerInner {
.map(|(id, engine)| engine.set_writable(id, false));
self.event_listener.on_region_deregistered(region_id);
}
+ RegionChange::Catchup => {
+ if is_metric_engine(engine.name()) {
+ // Registers the logical regions belong to the physical region (`region_id`).
+ self.register_logical_regions(&engine, region_id).await?;
+ }
+ }
}
Ok(())
}
@@ -891,6 +898,11 @@ enum RegionChange {
None,
Register(RegionAttribute),
Deregisters,
+ Catchup,
+}
+
+fn is_metric_engine(engine: &str) -> bool {
+ engine == METRIC_ENGINE_NAME
}
fn parse_region_attribute(
diff --git a/src/metric-engine/src/engine/catchup.rs b/src/metric-engine/src/engine/catchup.rs
index 0e6aee1e3883..faf9f1d83132 100644
--- a/src/metric-engine/src/engine/catchup.rs
+++ b/src/metric-engine/src/engine/catchup.rs
@@ -56,6 +56,9 @@ impl MetricEngineInner {
)
.await
.context(MitoCatchupOperationSnafu)
- .map(|response| response.affected_rows)
+ .map(|response| response.affected_rows)?;
+
+ self.recover_states(region_id).await?;
+ Ok(0)
}
}
diff --git a/src/metric-engine/src/engine/open.rs b/src/metric-engine/src/engine/open.rs
index c42e0376562c..815ffc1143f8 100644
--- a/src/metric-engine/src/engine/open.rs
+++ b/src/metric-engine/src/engine/open.rs
@@ -122,7 +122,7 @@ impl MetricEngineInner {
/// Includes:
/// - Record physical region's column names
/// - Record the mapping between logical region id and physical region id
- async fn recover_states(&self, physical_region_id: RegionId) -> Result<()> {
+ pub(crate) async fn recover_states(&self, physical_region_id: RegionId) -> Result<()> {
// load logical regions and physical column names
let logical_regions = self
.metadata_region
diff --git a/tests-integration/tests/region_migration.rs b/tests-integration/tests/region_migration.rs
index 2cef9287aeb0..24d77a834592 100644
--- a/tests-integration/tests/region_migration.rs
+++ b/tests-integration/tests/region_migration.rs
@@ -85,6 +85,7 @@ macro_rules! region_migration_tests {
test_region_migration_all_regions,
test_region_migration_incorrect_from_peer,
test_region_migration_incorrect_region_id,
+ test_metric_table_region_migration_by_sql,
);
)*
};
@@ -216,6 +217,131 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
assert!(procedure.is_none());
}
+/// A naive metric table region migration test by SQL function
+pub async fn test_metric_table_region_migration_by_sql(
+ store_type: StorageType,
+ endpoints: Vec<String>,
+) {
+ let cluster_name = "test_region_migration";
+ let peer_factory = |id| Peer {
+ id,
+ addr: PEER_PLACEHOLDER_ADDR.to_string(),
+ };
+
+ // Prepares test cluster.
+ let (store_config, _guard) = get_test_store_config(&store_type);
+ let home_dir = create_temp_dir("test_migration_data_home");
+ let datanodes = 5u64;
+ let builder = GreptimeDbClusterBuilder::new(cluster_name).await;
+ let const_selector = Arc::new(ConstNodeSelector::new(vec![
+ peer_factory(1),
+ peer_factory(2),
+ peer_factory(3),
+ ]));
+ let cluster = builder
+ .with_datanodes(datanodes as u32)
+ .with_store_config(store_config)
+ .with_datanode_wal_config(DatanodeWalConfig::Kafka(DatanodeKafkaConfig {
+ broker_endpoints: endpoints.clone(),
+ ..Default::default()
+ }))
+ .with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
+ broker_endpoints: endpoints,
+ num_topics: 3,
+ topic_name_prefix: Uuid::new_v4().to_string(),
+ ..Default::default()
+ }))
+ .with_shared_home_dir(Arc::new(home_dir))
+ .with_meta_selector(const_selector.clone())
+ .build()
+ .await;
+ // Prepares test metric tables.
+ let table_id = prepare_testing_metric_table(&cluster).await;
+ let query_ctx = QueryContext::arc();
+
+ // Inserts values
+ run_sql(
+ &cluster.frontend,
+ r#"INSERT INTO t1 VALUES ('host1',0, 0), ('host2', 1, 1);"#,
+ query_ctx.clone(),
+ )
+ .await
+ .unwrap();
+
+ run_sql(
+ &cluster.frontend,
+ r#"INSERT INTO t2 VALUES ('job1', 0, 0), ('job2', 1, 1);"#,
+ query_ctx.clone(),
+ )
+ .await
+ .unwrap();
+
+ // The region distribution
+ let mut distribution = find_region_distribution_by_sql(&cluster, "phy").await;
+ // Selecting target of region migration.
+ let (from_peer_id, from_regions) = distribution.pop_first().unwrap();
+ info!(
+ "Selecting from peer: {from_peer_id}, and regions: {:?}",
+ from_regions[0]
+ );
+ let to_peer_id = (from_peer_id + 1) % 3;
+ let region_id = RegionId::new(table_id, from_regions[0]);
+ // Trigger region migration.
+ let procedure_id =
+ trigger_migration_by_sql(&cluster, region_id.as_u64(), from_peer_id, to_peer_id).await;
+
+ info!("Started region procedure: {}!", procedure_id);
+
+ // Waits condition by checking procedure state
+ let frontend = cluster.frontend.clone();
+ wait_condition(
+ Duration::from_secs(10),
+ Box::pin(async move {
+ loop {
+ let state = query_procedure_by_sql(&frontend, &procedure_id).await;
+ if state == "{\"status\":\"Done\"}" {
+ info!("Migration done: {state}");
+ break;
+ } else {
+ info!("Migration not finished: {state}");
+ tokio::time::sleep(Duration::from_millis(200)).await;
+ }
+ }
+ }),
+ )
+ .await;
+
+ let result = cluster
+ .frontend
+ .do_query("select * from t1", query_ctx.clone())
+ .await
+ .remove(0);
+
+ let expected = "\
++-------+-------------------------+-----+
+| host | ts | val |
++-------+-------------------------+-----+
+| host2 | 1970-01-01T00:00:00.001 | 1.0 |
+| host1 | 1970-01-01T00:00:00 | 0.0 |
++-------+-------------------------+-----+";
+ check_output_stream(result.unwrap().data, expected).await;
+
+ let result = cluster
+ .frontend
+ .do_query("select * from t2", query_ctx)
+ .await
+ .remove(0);
+
+ let expected = "\
++------+-------------------------+-----+
+| job | ts | val |
++------+-------------------------+-----+
+| job2 | 1970-01-01T00:00:00.001 | 1.0 |
+| job1 | 1970-01-01T00:00:00 | 0.0 |
++------+-------------------------+-----+";
+ check_output_stream(result.unwrap().data, expected).await;
+}
+
/// A naive region migration test by SQL function
pub async fn test_region_migration_by_sql(store_type: StorageType, endpoints: Vec<String>) {
let cluster_name = "test_region_migration";
@@ -264,7 +390,7 @@ pub async fn test_region_migration_by_sql(store_type: StorageType, endpoints: Ve
}
// The region distribution
- let mut distribution = find_region_distribution_by_sql(&cluster).await;
+ let mut distribution = find_region_distribution_by_sql(&cluster, TEST_TABLE_NAME).await;
let old_distribution = distribution.clone();
@@ -329,7 +455,7 @@ pub async fn test_region_migration_by_sql(store_type: StorageType, endpoints: Ve
.unwrap();
assert!(procedure.is_none());
- let new_distribution = find_region_distribution_by_sql(&cluster).await;
+ let new_distribution = find_region_distribution_by_sql(&cluster, TEST_TABLE_NAME).await;
assert_ne!(old_distribution, new_distribution);
}
@@ -804,6 +930,32 @@ async fn assert_values(instance: &Arc<Instance>) {
check_output_stream(result.unwrap().data, expected).await;
}
+async fn prepare_testing_metric_table(cluster: &GreptimeDbCluster) -> TableId {
+ let sql = r#"CREATE TABLE phy (ts timestamp time index, val double) engine=metric with ("physical_metric_table" = "");"#;
+ let mut result = cluster.frontend.do_query(sql, QueryContext::arc()).await;
+ let output = result.remove(0).unwrap();
+ assert!(matches!(output.data, OutputData::AffectedRows(0)));
+
+ let sql = r#"CREATE TABLE t1 (ts timestamp time index, val double, host string primary key) engine = metric with ("on_physical_table" = "phy");"#;
+ let mut result = cluster.frontend.do_query(sql, QueryContext::arc()).await;
+ let output = result.remove(0).unwrap();
+ assert!(matches!(output.data, OutputData::AffectedRows(0)));
+
+ let sql = r#"CREATE TABLE t2 (ts timestamp time index, job string primary key, val double) engine = metric with ("on_physical_table" = "phy");"#;
+ let mut result = cluster.frontend.do_query(sql, QueryContext::arc()).await;
+ let output = result.remove(0).unwrap();
+ assert!(matches!(output.data, OutputData::AffectedRows(0)));
+
+ let table = cluster
+ .frontend
+ .catalog_manager()
+ .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "phy")
+ .await
+ .unwrap()
+ .unwrap();
+ table.table_info().table_id()
+}
+
async fn prepare_testing_table(cluster: &GreptimeDbCluster) -> TableId {
let sql = format!(
r"
@@ -843,7 +995,10 @@ async fn find_region_distribution(
}
/// Find region distribution by SQL query
-async fn find_region_distribution_by_sql(cluster: &GreptimeDbCluster) -> RegionDistribution {
+async fn find_region_distribution_by_sql(
+ cluster: &GreptimeDbCluster,
+ table: &str,
+) -> RegionDistribution {
let query_ctx = QueryContext::arc();
let OutputData::Stream(stream) = run_sql(
@@ -853,7 +1008,7 @@ async fn find_region_distribution_by_sql(cluster: &GreptimeDbCluster) -> RegionD
a.greptime_partition_id as region_id
from information_schema.partitions a left join information_schema.region_peers b
on a.greptime_partition_id = b.region_id
- where a.table_name='{TEST_TABLE_NAME}' order by datanode_id asc"#
+ where a.table_name='{table}' order by datanode_id asc"#
),
query_ctx.clone(),
)
|
fix
|
region logical regions after catching up (#4176)
|
421103c3367931345ccbadbe04e180eba17299d8
|
2023-07-12 07:36:05
|
Ruihang Xia
|
refactor: remove misdirectional alias "Request as GreptimeRequest" (#1940)
| false
|
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index 5cd04330032c..3a8e97ffa03e 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -16,7 +16,7 @@ use std::any::Any;
use std::sync::Arc;
use api::v1::ddl_request::{Expr as DdlExpr, Expr};
-use api::v1::greptime_request::Request as GrpcRequest;
+use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{CreateDatabaseExpr, DdlRequest, DeleteRequest, InsertRequests};
use async_trait::async_trait;
@@ -207,11 +207,11 @@ impl Instance {
impl GrpcQueryHandler for Instance {
type Error = error::Error;
- async fn do_query(&self, request: GrpcRequest, ctx: QueryContextRef) -> Result<Output> {
+ async fn do_query(&self, request: Request, ctx: QueryContextRef) -> Result<Output> {
match request {
- GrpcRequest::Inserts(requests) => self.handle_inserts(requests, &ctx).await,
- GrpcRequest::Delete(request) => self.handle_delete(request, ctx).await,
- GrpcRequest::Query(query_request) => {
+ Request::Inserts(requests) => self.handle_inserts(requests, &ctx).await,
+ Request::Delete(request) => self.handle_delete(request, ctx).await,
+ Request::Query(query_request) => {
let query = query_request
.query
.context(error::MissingRequiredFieldSnafu {
@@ -219,7 +219,7 @@ impl GrpcQueryHandler for Instance {
})?;
self.handle_query(query, ctx).await
}
- GrpcRequest::Ddl(request) => self.handle_ddl(request, ctx).await,
+ Request::Ddl(request) => self.handle_ddl(request, ctx).await,
}
}
}
@@ -332,7 +332,7 @@ mod test {
let instance = MockInstance::new("test_handle_ddl").await;
let instance = instance.inner();
- let query = GrpcRequest::Ddl(DdlRequest {
+ let query = Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CreateDatabase(CreateDatabaseExpr {
database_name: "my_database".to_string(),
create_if_not_exists: true,
@@ -341,7 +341,7 @@ mod test {
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
assert!(matches!(output, Output::AffectedRows(1)));
- let query = GrpcRequest::Ddl(DdlRequest {
+ let query = Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CreateTable(CreateTableExpr {
catalog_name: "greptime".to_string(),
schema_name: "my_database".to_string(),
@@ -369,7 +369,7 @@ mod test {
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
assert!(matches!(output, Output::AffectedRows(0)));
- let query = GrpcRequest::Ddl(DdlRequest {
+ let query = Request::Ddl(DdlRequest {
expr: Some(DdlExpr::Alter(AlterExpr {
catalog_name: "greptime".to_string(),
schema_name: "my_database".to_string(),
@@ -494,7 +494,7 @@ mod test {
..Default::default()
};
- let query = GrpcRequest::Inserts(InsertRequests {
+ let query = Request::Inserts(InsertRequests {
inserts: vec![insert],
});
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
@@ -525,7 +525,7 @@ mod test {
.await
.is_ok());
- let query = GrpcRequest::Query(QueryRequest {
+ let query = Request::Query(QueryRequest {
query: Some(Query::Sql(
"INSERT INTO demo(host, cpu, memory, ts) VALUES \
('host1', 66.6, 1024, 1672201025000),\
@@ -563,7 +563,7 @@ mod test {
row_count: 1,
};
- let request = GrpcRequest::Delete(request);
+ let request = Request::Delete(request);
let output = instance
.do_query(request, QueryContext::arc())
.await
@@ -594,7 +594,7 @@ mod test {
.await
.is_ok());
- let query = GrpcRequest::Query(QueryRequest {
+ let query = Request::Query(QueryRequest {
query: Some(Query::Sql(
"INSERT INTO demo(host, cpu, memory, ts) VALUES \
('host1', 66.6, 1024, 1672201025000),\
@@ -605,7 +605,7 @@ mod test {
let output = instance.do_query(query, QueryContext::arc()).await.unwrap();
assert!(matches!(output, Output::AffectedRows(2)));
- let query = GrpcRequest::Query(QueryRequest {
+ let query = Request::Query(QueryRequest {
query: Some(Query::Sql(
"SELECT ts, host, cpu, memory FROM demo".to_string(),
)),
diff --git a/src/frontend/src/instance/standalone.rs b/src/frontend/src/instance/standalone.rs
index 4e54d073f9f1..af5c224e7c11 100644
--- a/src/frontend/src/instance/standalone.rs
+++ b/src/frontend/src/instance/standalone.rs
@@ -14,7 +14,7 @@
use std::sync::Arc;
-use api::v1::greptime_request::Request as GreptimeRequest;
+use api::v1::greptime_request::Request;
use async_trait::async_trait;
use common_query::Output;
use datanode::error::Error as DatanodeError;
@@ -36,7 +36,7 @@ impl StandaloneGrpcQueryHandler {
impl GrpcQueryHandler for StandaloneGrpcQueryHandler {
type Error = error::Error;
- async fn do_query(&self, query: GreptimeRequest, ctx: QueryContextRef) -> Result<Output> {
+ async fn do_query(&self, query: Request, ctx: QueryContextRef) -> Result<Output> {
self.0
.do_query(query, ctx)
.await
diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs
index 9d9e0c28f688..6034bbe11cbc 100644
--- a/src/meta-srv/src/lib.rs
+++ b/src/meta-srv/src/lib.rs
@@ -37,7 +37,7 @@ mod sequence;
pub mod service;
pub mod table_routes;
+pub use crate::error::Result;
+
#[cfg(test)]
mod test_util;
-
-pub use crate::error::Result;
diff --git a/src/servers/src/query_handler/grpc.rs b/src/servers/src/query_handler/grpc.rs
index b82ff4c3796b..c335b185df1a 100644
--- a/src/servers/src/query_handler/grpc.rs
+++ b/src/servers/src/query_handler/grpc.rs
@@ -14,7 +14,7 @@
use std::sync::Arc;
-use api::v1::greptime_request::Request as GreptimeRequest;
+use api::v1::greptime_request::Request;
use async_trait::async_trait;
use common_error::prelude::*;
use common_query::Output;
@@ -31,7 +31,7 @@ pub trait GrpcQueryHandler {
async fn do_query(
&self,
- query: GreptimeRequest,
+ query: Request,
ctx: QueryContextRef,
) -> std::result::Result<Output, Self::Error>;
}
@@ -51,7 +51,7 @@ where
{
type Error = error::Error;
- async fn do_query(&self, query: GreptimeRequest, ctx: QueryContextRef) -> Result<Output> {
+ async fn do_query(&self, query: Request, ctx: QueryContextRef) -> Result<Output> {
self.0
.do_query(query, ctx)
.await
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index 91be2ae62701..ee8f72600a4c 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -15,7 +15,7 @@
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
-use api::v1::greptime_request::{Request as GreptimeRequest, Request};
+use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use async_trait::async_trait;
use catalog::local::MemoryCatalogManager;
@@ -156,7 +156,7 @@ impl GrpcQueryHandler for DummyInstance {
async fn do_query(
&self,
- request: GreptimeRequest,
+ request: Request,
ctx: QueryContextRef,
) -> std::result::Result<Output, Self::Error> {
let output = match request {
|
refactor
|
remove misdirectional alias "Request as GreptimeRequest" (#1940)
|
52697a9e66446f379802dba8009ec8ed840c0194
|
2025-01-17 13:54:26
|
yihong
|
fix: maybe double free from static str in Snafu (#5383)
| false
|
diff --git a/src/common/datasource/src/error.rs b/src/common/datasource/src/error.rs
index 8f062868a98e..cfaa5a19c041 100644
--- a/src/common/datasource/src/error.rs
+++ b/src/common/datasource/src/error.rs
@@ -180,7 +180,7 @@ pub enum Error {
#[snafu(display("Failed to parse format {} with value: {}", key, value))]
ParseFormat {
- key: &'static str,
+ key: String,
value: String,
#[snafu(implicit)]
location: Location,
diff --git a/src/pipeline/src/etl/error.rs b/src/pipeline/src/etl/error.rs
index 87743abaa394..45e485d04a44 100644
--- a/src/pipeline/src/etl/error.rs
+++ b/src/pipeline/src/etl/error.rs
@@ -58,7 +58,7 @@ pub enum Error {
#[snafu(display("Processor {processor}: unsupported value {val}"))]
ProcessorUnsupportedValue {
- processor: &'static str,
+ processor: String,
val: String,
#[snafu(implicit)]
location: Location,
@@ -180,7 +180,7 @@ pub enum Error {
#[snafu(display("Separator '{separator}' must be a single character, but got '{value}'"))]
CsvSeparatorName {
- separator: &'static str,
+ separator: String,
value: String,
#[snafu(implicit)]
location: Location,
@@ -188,7 +188,7 @@ pub enum Error {
#[snafu(display("Quote '{quote}' must be a single character, but got '{value}'"))]
CsvQuoteName {
- quote: &'static str,
+ quote: String,
value: String,
#[snafu(implicit)]
location: Location,
|
fix
|
maybe double free from static str in Snafu (#5383)
|
3112ced9c0ccae7afa6704d07c6ee5a7b966c3a3
|
2024-04-28 08:04:06
|
Weny Xu
|
chore: rename all `datanode_manager` to `node_manager` (#3813)
| false
|
diff --git a/src/client/src/client_manager.rs b/src/client/src/client_manager.rs
index f219dd264eb6..528b86fe1fec 100644
--- a/src/client/src/client_manager.rs
+++ b/src/client/src/client_manager.rs
@@ -17,7 +17,7 @@ use std::sync::Arc;
use std::time::Duration;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
-use common_meta::datanode_manager::{DatanodeRef, FlownodeRef, NodeManager};
+use common_meta::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
use common_meta::peer::Peer;
use moka::future::{Cache, CacheBuilder};
diff --git a/src/client/src/region.rs b/src/client/src/region.rs
index a401fa434803..e6c6e4af81a4 100644
--- a/src/client/src/region.rs
+++ b/src/client/src/region.rs
@@ -24,8 +24,8 @@ use async_trait::async_trait;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_grpc::flight::{FlightDecoder, FlightMessage};
-use common_meta::datanode_manager::Datanode;
use common_meta::error::{self as meta_error, Result as MetaResult};
+use common_meta::node_manager::Datanode;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::error;
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 7148606a0943..061c4f98e2bd 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -21,12 +21,12 @@ use clap::Parser;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_config::{metadata_store_dir, KvBackendConfig};
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
-use common_meta::datanode_manager::NodeManagerRef;
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
use common_meta::ddl::ProcedureExecutorRef;
use common_meta::ddl_manager::DdlManager;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
+use common_meta::node_manager::NodeManagerRef;
use common_meta::region_keeper::MemoryRegionKeeper;
use common_meta::sequence::SequenceBuilder;
use common_meta::wal_options_allocator::{WalOptionsAllocator, WalOptionsAllocatorRef};
@@ -408,7 +408,7 @@ impl StartCommand {
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
- let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
+ let node_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
let table_id_sequence = Arc::new(
SequenceBuilder::new("table_id", kv_backend.clone())
@@ -432,22 +432,18 @@ impl StartCommand {
let ddl_task_executor = Self::create_ddl_task_executor(
table_metadata_manager,
procedure_manager.clone(),
- datanode_manager.clone(),
+ node_manager.clone(),
multi_cache_invalidator,
table_meta_allocator,
)
.await?;
- let mut frontend = FrontendBuilder::new(
- kv_backend,
- catalog_manager,
- datanode_manager,
- ddl_task_executor,
- )
- .with_plugin(fe_plugins.clone())
- .try_build()
- .await
- .context(StartFrontendSnafu)?;
+ let mut frontend =
+ FrontendBuilder::new(kv_backend, catalog_manager, node_manager, ddl_task_executor)
+ .with_plugin(fe_plugins.clone())
+ .try_build()
+ .await
+ .context(StartFrontendSnafu)?;
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
.build()
@@ -468,14 +464,14 @@ impl StartCommand {
pub async fn create_ddl_task_executor(
table_metadata_manager: TableMetadataManagerRef,
procedure_manager: ProcedureManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
cache_invalidator: CacheInvalidatorRef,
table_meta_allocator: TableMetadataAllocatorRef,
) -> Result<ProcedureExecutorRef> {
let procedure_executor: ProcedureExecutorRef = Arc::new(
DdlManager::try_new(
procedure_manager,
- datanode_manager,
+ node_manager,
cache_invalidator,
table_metadata_manager,
table_meta_allocator,
diff --git a/src/common/meta/src/ddl.rs b/src/common/meta/src/ddl.rs
index c7abac1d6553..3feea55253ef 100644
--- a/src/common/meta/src/ddl.rs
+++ b/src/common/meta/src/ddl.rs
@@ -20,10 +20,10 @@ use store_api::storage::{RegionNumber, TableId};
use self::table_meta::TableMetadataAllocatorRef;
use crate::cache_invalidator::CacheInvalidatorRef;
-use crate::datanode_manager::NodeManagerRef;
use crate::error::Result;
use crate::key::table_route::PhysicalTableRouteValue;
use crate::key::TableMetadataManagerRef;
+use crate::node_manager::NodeManagerRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
@@ -95,7 +95,7 @@ pub struct TableMetadata {
#[derive(Clone)]
pub struct DdlContext {
- pub datanode_manager: NodeManagerRef,
+ pub node_manager: NodeManagerRef,
pub cache_invalidator: CacheInvalidatorRef,
pub table_metadata_manager: TableMetadataManagerRef,
pub memory_region_keeper: MemoryRegionKeeperRef,
diff --git a/src/common/meta/src/ddl/alter_logical_tables.rs b/src/common/meta/src/ddl/alter_logical_tables.rs
index 6819f18941ee..abec47764780 100644
--- a/src/common/meta/src/ddl/alter_logical_tables.rs
+++ b/src/common/meta/src/ddl/alter_logical_tables.rs
@@ -116,7 +116,7 @@ impl AlterLogicalTablesProcedure {
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
for peer in leaders {
- let requester = self.context.datanode_manager.datanode(&peer).await;
+ let requester = self.context.node_manager.datanode(&peer).await;
let request = self.make_request(&peer, &physical_table_route.region_routes)?;
alter_region_tasks.push(async move {
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index f286db47cd7a..31a4fd4af1eb 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -105,7 +105,7 @@ impl AlterTableProcedure {
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
for datanode in leaders {
- let requester = self.context.datanode_manager.datanode(&datanode).await;
+ let requester = self.context.node_manager.datanode(&datanode).await;
let regions = find_leader_regions(&physical_table_route.region_routes, &datanode);
for region in regions {
diff --git a/src/common/meta/src/ddl/alter_table/region_request.rs b/src/common/meta/src/ddl/alter_table/region_request.rs
index ce82ef831dd5..265466b69442 100644
--- a/src/common/meta/src/ddl/alter_table/region_request.rs
+++ b/src/common/meta/src/ddl/alter_table/region_request.rs
@@ -138,8 +138,8 @@ mod tests {
#[tokio::test]
async fn test_make_alter_region_request() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_id = 1024;
let region_id = RegionId::new(table_id, 1);
diff --git a/src/common/meta/src/ddl/create_logical_tables.rs b/src/common/meta/src/ddl/create_logical_tables.rs
index d050e7e3e465..5095b7c32e1a 100644
--- a/src/common/meta/src/ddl/create_logical_tables.rs
+++ b/src/common/meta/src/ddl/create_logical_tables.rs
@@ -142,7 +142,7 @@ impl CreateLogicalTablesProcedure {
let mut create_region_tasks = Vec::with_capacity(leaders.len());
for peer in leaders {
- let requester = self.context.datanode_manager.datanode(&peer).await;
+ let requester = self.context.node_manager.datanode(&peer).await;
let request = self.make_request(&peer, region_routes)?;
create_region_tasks.push(async move {
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index 6204f168a55d..044715b32381 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -222,7 +222,7 @@ impl CreateTableProcedure {
let mut create_region_tasks = Vec::with_capacity(leaders.len());
for datanode in leaders {
- let requester = self.context.datanode_manager.datanode(&datanode).await;
+ let requester = self.context.node_manager.datanode(&datanode).await;
let regions = find_leader_regions(region_routes, &datanode);
let mut requests = Vec::with_capacity(regions.len());
diff --git a/src/common/meta/src/ddl/drop_database/cursor.rs b/src/common/meta/src/ddl/drop_database/cursor.rs
index ed21902e7508..7e1cb05bb98d 100644
--- a/src/common/meta/src/ddl/drop_database/cursor.rs
+++ b/src/common/meta/src/ddl/drop_database/cursor.rs
@@ -163,8 +163,8 @@ mod tests {
#[tokio::test]
async fn test_next_without_logical_tables() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
create_physical_table(&ddl_context, 0, "phy").await;
// It always starts from Logical
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
@@ -197,8 +197,8 @@ mod tests {
#[tokio::test]
async fn test_next_with_logical_tables() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await;
// It always starts from Logical
@@ -228,8 +228,8 @@ mod tests {
#[tokio::test]
async fn test_reach_the_end() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let mut state = DropDatabaseCursor::new(DropTableTarget::Physical);
let mut ctx = DropDatabaseContext {
catalog: DEFAULT_CATALOG_NAME.to_string(),
diff --git a/src/common/meta/src/ddl/drop_database/executor.rs b/src/common/meta/src/ddl/drop_database/executor.rs
index e3bcf0c004d6..acc2d6333156 100644
--- a/src/common/meta/src/ddl/drop_database/executor.rs
+++ b/src/common/meta/src/ddl/drop_database/executor.rs
@@ -159,8 +159,8 @@ mod tests {
#[tokio::test]
async fn test_next_with_physical_table() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
@@ -209,8 +209,8 @@ mod tests {
#[tokio::test]
async fn test_next_logical_table() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await;
let logical_table_id = physical_table_id + 1;
@@ -313,8 +313,8 @@ mod tests {
#[tokio::test]
async fn test_next_retryable_err() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
diff --git a/src/common/meta/src/ddl/drop_database/metadata.rs b/src/common/meta/src/ddl/drop_database/metadata.rs
index f06c51963a78..a9a64f9eca95 100644
--- a/src/common/meta/src/ddl/drop_database/metadata.rs
+++ b/src/common/meta/src/ddl/drop_database/metadata.rs
@@ -108,8 +108,8 @@ mod tests {
#[tokio::test]
async fn test_next() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
ddl_context
.table_metadata_manager
.schema_manager()
diff --git a/src/common/meta/src/ddl/drop_database/start.rs b/src/common/meta/src/ddl/drop_database/start.rs
index 7d71d1972d6b..792eeac8dda1 100644
--- a/src/common/meta/src/ddl/drop_database/start.rs
+++ b/src/common/meta/src/ddl/drop_database/start.rs
@@ -85,8 +85,8 @@ mod tests {
#[tokio::test]
async fn test_schema_not_exists_err() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let mut step = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
catalog: "foo".to_string(),
@@ -100,8 +100,8 @@ mod tests {
#[tokio::test]
async fn test_schema_not_exists() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let mut state = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
catalog: "foo".to_string(),
@@ -116,8 +116,8 @@ mod tests {
#[tokio::test]
async fn test_next() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
ddl_context
.table_metadata_manager
.schema_manager()
diff --git a/src/common/meta/src/ddl/drop_table/executor.rs b/src/common/meta/src/ddl/drop_table/executor.rs
index 6659ee238f61..0c0f2ddc9cb9 100644
--- a/src/common/meta/src/ddl/drop_table/executor.rs
+++ b/src/common/meta/src/ddl/drop_table/executor.rs
@@ -175,7 +175,7 @@ impl DropTableExecutor {
let table_id = self.table_id;
for datanode in leaders {
- let requester = ctx.datanode_manager.datanode(&datanode).await;
+ let requester = ctx.node_manager.datanode(&datanode).await;
let regions = find_leader_regions(region_routes, &datanode);
let region_ids = regions
.iter()
@@ -271,8 +271,8 @@ mod tests {
#[tokio::test]
async fn test_on_prepare() {
// Drops if exists
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ctx = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ctx = new_ddl_context(node_manager);
let executor = DropTableExecutor::new(
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
diff --git a/src/common/meta/src/ddl/tests/alter_logical_tables.rs b/src/common/meta/src/ddl/tests/alter_logical_tables.rs
index b970d62a0ebf..41de5ef4b10b 100644
--- a/src/common/meta/src/ddl/tests/alter_logical_tables.rs
+++ b/src/common/meta/src/ddl/tests/alter_logical_tables.rs
@@ -83,8 +83,8 @@ fn make_alter_logical_table_rename_task(
#[tokio::test]
async fn test_on_prepare_check_schema() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let tasks = vec![
make_alter_logical_table_add_column_task(
@@ -107,8 +107,8 @@ async fn test_on_prepare_check_schema() {
#[tokio::test]
async fn test_on_prepare_check_alter_kind() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let tasks = vec![make_alter_logical_table_rename_task(
"schema1",
@@ -125,8 +125,8 @@ async fn test_on_prepare_check_alter_kind() {
#[tokio::test]
async fn test_on_prepare_different_physical_table() {
let cluster_id = 1;
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let phy1_id = create_physical_table(&ddl_context, cluster_id, "phy1").await;
create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await;
@@ -146,8 +146,8 @@ async fn test_on_prepare_different_physical_table() {
#[tokio::test]
async fn test_on_prepare_logical_table_not_exists() {
let cluster_id = 1;
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
@@ -168,8 +168,8 @@ async fn test_on_prepare_logical_table_not_exists() {
#[tokio::test]
async fn test_on_prepare() {
let cluster_id = 1;
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
@@ -192,8 +192,8 @@ async fn test_on_prepare() {
#[tokio::test]
async fn test_on_update_metadata() {
let cluster_id = 1;
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
@@ -229,8 +229,8 @@ async fn test_on_update_metadata() {
#[tokio::test]
async fn test_on_part_duplicate_alter_request() {
let cluster_id = 1;
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
// Creates physical table
let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
diff --git a/src/common/meta/src/ddl/tests/alter_table.rs b/src/common/meta/src/ddl/tests/alter_table.rs
index 2342aaf44e0d..06654cfe0f3d 100644
--- a/src/common/meta/src/ddl/tests/alter_table.rs
+++ b/src/common/meta/src/ddl/tests/alter_table.rs
@@ -55,8 +55,8 @@ fn test_rename_alter_table_task(table_name: &str, new_table_name: &str) -> Alter
#[tokio::test]
async fn test_on_prepare_table_exists_err() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo", 1024);
// Puts a value to table name key.
@@ -78,8 +78,8 @@ async fn test_on_prepare_table_exists_err() {
#[tokio::test]
async fn test_on_prepare_table_not_exists_err() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_rename_alter_table_task("non-exists", "foo");
let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap();
@@ -91,8 +91,8 @@ async fn test_on_prepare_table_not_exists_err() {
async fn test_on_submit_alter_request() {
let (tx, mut rx) = mpsc::channel(8);
let datanode_handler = DatanodeWatcher(tx);
- let datanode_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_id = 1024;
let table_name = "foo";
@@ -175,10 +175,10 @@ async fn test_on_submit_alter_request() {
#[tokio::test]
async fn test_on_submit_alter_request_with_outdated_request() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(
+ let node_manager = Arc::new(MockDatanodeManager::new(
RequestOutdatedErrorDatanodeHandler,
));
- let ddl_context = new_ddl_context(datanode_manager);
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_id = 1024;
let table_name = "foo";
@@ -236,8 +236,8 @@ async fn test_on_submit_alter_request_with_outdated_request() {
#[tokio::test]
async fn test_on_update_metadata_rename() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_name = "foo";
let new_table_name = "bar";
@@ -287,8 +287,8 @@ async fn test_on_update_metadata_rename() {
#[tokio::test]
async fn test_on_update_metadata_add_columns() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
diff --git a/src/common/meta/src/ddl/tests/create_logical_tables.rs b/src/common/meta/src/ddl/tests/create_logical_tables.rs
index 74000cb557c3..c4f65bcac449 100644
--- a/src/common/meta/src/ddl/tests/create_logical_tables.rs
+++ b/src/common/meta/src/ddl/tests/create_logical_tables.rs
@@ -33,8 +33,8 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
#[tokio::test]
async fn test_on_prepare_physical_table_not_found() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let tasks = vec![test_create_logical_table_task("foo")];
let physical_table_id = 1024u32;
@@ -46,8 +46,8 @@ async fn test_on_prepare_physical_table_not_found() {
#[tokio::test]
async fn test_on_prepare() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
@@ -81,8 +81,8 @@ async fn test_on_prepare() {
#[tokio::test]
async fn test_on_prepare_logical_table_exists_err() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
@@ -127,8 +127,8 @@ async fn test_on_prepare_logical_table_exists_err() {
#[tokio::test]
async fn test_on_prepare_with_create_if_table_exists() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
@@ -175,8 +175,8 @@ async fn test_on_prepare_with_create_if_table_exists() {
#[tokio::test]
async fn test_on_prepare_part_logical_tables_exist() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
@@ -227,8 +227,8 @@ async fn test_on_prepare_part_logical_tables_exist() {
#[tokio::test]
async fn test_on_create_metadata() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
@@ -277,8 +277,8 @@ async fn test_on_create_metadata() {
#[tokio::test]
async fn test_on_create_metadata_part_logical_tables_exist() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
@@ -338,8 +338,8 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
#[tokio::test]
async fn test_on_create_metadata_err() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
diff --git a/src/common/meta/src/ddl/tests/create_table.rs b/src/common/meta/src/ddl/tests/create_table.rs
index 33e5fd55d594..f9c464be0ab0 100644
--- a/src/common/meta/src/ddl/tests/create_table.rs
+++ b/src/common/meta/src/ddl/tests/create_table.rs
@@ -85,8 +85,8 @@ fn test_create_table_task(name: &str) -> CreateTableTask {
#[tokio::test]
async fn test_on_prepare_table_exists_err() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
@@ -108,8 +108,8 @@ async fn test_on_prepare_table_exists_err() {
#[tokio::test]
async fn test_on_prepare_with_create_if_table_exists() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let mut task = test_create_table_task("foo");
task.create_table.create_if_not_exists = true;
@@ -133,8 +133,8 @@ async fn test_on_prepare_with_create_if_table_exists() {
#[tokio::test]
async fn test_on_prepare_without_create_if_table_exists() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let mut task = test_create_table_task("foo");
task.create_table.create_if_not_exists = true;
@@ -146,8 +146,8 @@ async fn test_on_prepare_without_create_if_table_exists() {
#[tokio::test]
async fn test_on_prepare_with_no_partition_err() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let mut task = test_create_table_task("foo");
task.partitions = vec![];
@@ -163,8 +163,8 @@ async fn test_on_prepare_with_no_partition_err() {
#[tokio::test]
async fn test_on_datanode_create_regions_should_retry() {
common_telemetry::init_default_ut_logging();
- let datanode_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
@@ -181,8 +181,8 @@ async fn test_on_datanode_create_regions_should_retry() {
#[tokio::test]
async fn test_on_datanode_create_regions_should_not_retry() {
common_telemetry::init_default_ut_logging();
- let datanode_manager = Arc::new(MockDatanodeManager::new(UnexpectedErrorDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(UnexpectedErrorDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
@@ -199,8 +199,8 @@ async fn test_on_datanode_create_regions_should_not_retry() {
#[tokio::test]
async fn test_on_create_metadata_error() {
common_telemetry::init_default_ut_logging();
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
@@ -231,8 +231,8 @@ async fn test_on_create_metadata_error() {
#[tokio::test]
async fn test_on_create_metadata() {
common_telemetry::init_default_ut_logging();
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
@@ -253,9 +253,9 @@ async fn test_on_create_metadata() {
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
let cluster_id = 1;
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
- let ddl_context = new_ddl_context_with_kv_backend(datanode_manager, kv_backend);
+ let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
let task = test_create_table_task("foo");
let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
diff --git a/src/common/meta/src/ddl/tests/drop_database.rs b/src/common/meta/src/ddl/tests/drop_database.rs
index d4469195c8b6..656e6eb914e8 100644
--- a/src/common/meta/src/ddl/tests/drop_database.rs
+++ b/src/common/meta/src/ddl/tests/drop_database.rs
@@ -29,8 +29,8 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
async fn test_drop_database_with_logical_tables() {
common_telemetry::init_default_ut_logging();
let cluster_id = 1;
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
ddl_context
.table_metadata_manager
.schema_manager()
@@ -78,8 +78,8 @@ async fn test_drop_database_with_logical_tables() {
async fn test_drop_database_retryable_error() {
common_telemetry::init_default_ut_logging();
let cluster_id = 1;
- let datanode_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
+ let ddl_context = new_ddl_context(node_manager);
ddl_context
.table_metadata_manager
.schema_manager()
diff --git a/src/common/meta/src/ddl/tests/drop_table.rs b/src/common/meta/src/ddl/tests/drop_table.rs
index 26ad4580339d..20034fa06f97 100644
--- a/src/common/meta/src/ddl/tests/drop_table.rs
+++ b/src/common/meta/src/ddl/tests/drop_table.rs
@@ -45,8 +45,8 @@ use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDat
#[tokio::test]
async fn test_on_prepare_table_not_exists_err() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
@@ -70,8 +70,8 @@ async fn test_on_prepare_table_not_exists_err() {
#[tokio::test]
async fn test_on_prepare_table() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(()));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
@@ -102,8 +102,8 @@ async fn test_on_prepare_table() {
async fn test_on_datanode_drop_regions() {
let (tx, mut rx) = mpsc::channel(8);
let datanode_handler = DatanodeWatcher(tx);
- let datanode_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
- let ddl_context = new_ddl_context(datanode_manager);
+ let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
+ let ddl_context = new_ddl_context(node_manager);
let cluster_id = 1;
let table_id = 1024;
let table_name = "foo";
@@ -175,9 +175,9 @@ async fn test_on_datanode_drop_regions() {
#[tokio::test]
async fn test_on_rollback() {
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
- let ddl_context = new_ddl_context_with_kv_backend(datanode_manager, kv_backend.clone());
+ let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend.clone());
let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
@@ -258,9 +258,9 @@ fn new_drop_table_task(table_name: &str, table_id: TableId, drop_if_exists: bool
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
let cluster_id = 1;
- let datanode_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
+ let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
- let ddl_context = new_ddl_context_with_kv_backend(datanode_manager, kv_backend);
+ let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
let logical_table_id =
diff --git a/src/common/meta/src/ddl/truncate_table.rs b/src/common/meta/src/ddl/truncate_table.rs
index 7890f70fbf99..de0316de5179 100644
--- a/src/common/meta/src/ddl/truncate_table.rs
+++ b/src/common/meta/src/ddl/truncate_table.rs
@@ -143,7 +143,7 @@ impl TruncateTableProcedure {
let mut truncate_region_tasks = Vec::with_capacity(leaders.len());
for datanode in leaders {
- let requester = self.context.datanode_manager.datanode(&datanode).await;
+ let requester = self.context.node_manager.datanode(&datanode).await;
let regions = find_leader_regions(region_routes, &datanode);
for region in regions {
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index 7201f9602d3a..8db6198bd609 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -23,7 +23,6 @@ use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::TableId;
use crate::cache_invalidator::CacheInvalidatorRef;
-use crate::datanode_manager::NodeManagerRef;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::create_database::CreateDatabaseProcedure;
@@ -43,6 +42,7 @@ use crate::error::{
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
+use crate::node_manager::NodeManagerRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::DdlTask::{
AlterLogicalTables, AlterTable, CreateDatabase, CreateLogicalTables, CreateTable, DropDatabase,
@@ -64,7 +64,7 @@ pub type BoxedProcedureLoaderFactory = dyn Fn(DdlContext) -> BoxedProcedureLoade
/// The [DdlManager] provides the ability to execute Ddl.
pub struct DdlManager {
procedure_manager: ProcedureManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
cache_invalidator: CacheInvalidatorRef,
table_metadata_manager: TableMetadataManagerRef,
table_metadata_allocator: TableMetadataAllocatorRef,
@@ -84,7 +84,7 @@ impl DdlManager {
) -> Result<Self> {
let manager = Self {
procedure_manager,
- datanode_manager: datanode_clients,
+ node_manager: datanode_clients,
cache_invalidator,
table_metadata_manager,
table_metadata_allocator,
@@ -104,7 +104,7 @@ impl DdlManager {
/// Returns the [DdlContext]
pub fn create_context(&self) -> DdlContext {
DdlContext {
- datanode_manager: self.datanode_manager.clone(),
+ node_manager: self.node_manager.clone(),
cache_invalidator: self.cache_invalidator.clone(),
table_metadata_manager: self.table_metadata_manager.clone(),
memory_region_keeper: self.memory_region_keeper.clone(),
@@ -716,7 +716,6 @@ mod tests {
use super::DdlManager;
use crate::cache_invalidator::DummyCacheInvalidator;
- use crate::datanode_manager::{DatanodeRef, FlownodeRef, NodeManager};
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::create_table::CreateTableProcedure;
use crate::ddl::drop_table::DropTableProcedure;
@@ -724,6 +723,7 @@ mod tests {
use crate::ddl::truncate_table::TruncateTableProcedure;
use crate::key::TableMetadataManager;
use crate::kv_backend::memory::MemoryKvBackend;
+ use crate::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
use crate::peer::Peer;
use crate::region_keeper::MemoryRegionKeeper;
use crate::sequence::SequenceBuilder;
diff --git a/src/common/meta/src/lib.rs b/src/common/meta/src/lib.rs
index 8aa8c8abecc4..385f46818a47 100644
--- a/src/common/meta/src/lib.rs
+++ b/src/common/meta/src/lib.rs
@@ -20,7 +20,6 @@
pub mod cache_invalidator;
pub mod cluster;
-pub mod datanode_manager;
pub mod ddl;
pub mod ddl_manager;
pub mod distributed_time_constants;
@@ -31,6 +30,7 @@ pub mod key;
pub mod kv_backend;
pub mod lock_key;
pub mod metrics;
+pub mod node_manager;
pub mod peer;
pub mod range_stream;
pub mod region_keeper;
diff --git a/src/common/meta/src/datanode_manager.rs b/src/common/meta/src/node_manager.rs
similarity index 100%
rename from src/common/meta/src/datanode_manager.rs
rename to src/common/meta/src/node_manager.rs
diff --git a/src/common/meta/src/test_util.rs b/src/common/meta/src/test_util.rs
index c4c64445d796..3d282e8caff3 100644
--- a/src/common/meta/src/test_util.rs
+++ b/src/common/meta/src/test_util.rs
@@ -20,13 +20,13 @@ pub use common_base::AffectedRows;
use common_recordbatch::SendableRecordBatchStream;
use crate::cache_invalidator::DummyCacheInvalidator;
-use crate::datanode_manager::{Datanode, DatanodeRef, FlownodeRef, NodeManager, NodeManagerRef};
use crate::ddl::table_meta::TableMetadataAllocator;
use crate::ddl::DdlContext;
use crate::error::Result;
use crate::key::TableMetadataManager;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::kv_backend::KvBackendRef;
+use crate::node_manager::{Datanode, DatanodeRef, FlownodeRef, NodeManager, NodeManagerRef};
use crate::peer::Peer;
use crate::region_keeper::MemoryRegionKeeper;
use crate::sequence::SequenceBuilder;
@@ -88,20 +88,20 @@ impl<T: MockDatanodeHandler + 'static> NodeManager for MockDatanodeManager<T> {
}
/// Returns a test purpose [DdlContext].
-pub fn new_ddl_context(datanode_manager: NodeManagerRef) -> DdlContext {
+pub fn new_ddl_context(node_manager: NodeManagerRef) -> DdlContext {
let kv_backend = Arc::new(MemoryKvBackend::new());
- new_ddl_context_with_kv_backend(datanode_manager, kv_backend)
+ new_ddl_context_with_kv_backend(node_manager, kv_backend)
}
/// Returns a test purpose [DdlContext] with a specified [KvBackendRef].
pub fn new_ddl_context_with_kv_backend(
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
kv_backend: KvBackendRef,
) -> DdlContext {
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
DdlContext {
- datanode_manager,
+ node_manager,
cache_invalidator: Arc::new(DummyCacheInvalidator),
memory_region_keeper: Arc::new(MemoryRegionKeeper::new()),
table_metadata_allocator: Arc::new(TableMetadataAllocator::new(
diff --git a/src/frontend/src/instance/builder.rs b/src/frontend/src/instance/builder.rs
index 2f39a0dd6795..719c5f33cc64 100644
--- a/src/frontend/src/instance/builder.rs
+++ b/src/frontend/src/instance/builder.rs
@@ -17,10 +17,10 @@ use std::sync::Arc;
use catalog::CatalogManagerRef;
use common_base::Plugins;
use common_meta::cache_invalidator::{CacheInvalidatorRef, DummyCacheInvalidator};
-use common_meta::datanode_manager::NodeManagerRef;
use common_meta::ddl::ProcedureExecutorRef;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::KvBackendRef;
+use common_meta::node_manager::NodeManagerRef;
use operator::delete::Deleter;
use operator::insert::Inserter;
use operator::procedure::ProcedureServiceOperator;
@@ -42,7 +42,7 @@ pub struct FrontendBuilder {
kv_backend: KvBackendRef,
cache_invalidator: Option<CacheInvalidatorRef>,
catalog_manager: CatalogManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
plugins: Option<Plugins>,
procedure_executor: ProcedureExecutorRef,
heartbeat_task: Option<HeartbeatTask>,
@@ -52,14 +52,14 @@ impl FrontendBuilder {
pub fn new(
kv_backend: KvBackendRef,
catalog_manager: CatalogManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
procedure_executor: ProcedureExecutorRef,
) -> Self {
Self {
kv_backend,
cache_invalidator: None,
catalog_manager,
- datanode_manager,
+ node_manager,
plugins: None,
procedure_executor,
heartbeat_task: None,
@@ -89,7 +89,7 @@ impl FrontendBuilder {
pub async fn try_build(self) -> Result<Instance> {
let kv_backend = self.kv_backend;
- let datanode_manager = self.datanode_manager;
+ let node_manager = self.node_manager;
let plugins = self.plugins.unwrap_or_default();
let partition_manager = Arc::new(PartitionRuleManager::new(kv_backend.clone()));
@@ -99,22 +99,22 @@ impl FrontendBuilder {
.unwrap_or_else(|| Arc::new(DummyCacheInvalidator));
let region_query_handler =
- FrontendRegionQueryHandler::arc(partition_manager.clone(), datanode_manager.clone());
+ FrontendRegionQueryHandler::arc(partition_manager.clone(), node_manager.clone());
let inserter = Arc::new(Inserter::new(
self.catalog_manager.clone(),
partition_manager.clone(),
- datanode_manager.clone(),
+ node_manager.clone(),
));
let deleter = Arc::new(Deleter::new(
self.catalog_manager.clone(),
partition_manager.clone(),
- datanode_manager.clone(),
+ node_manager.clone(),
));
let requester = Arc::new(Requester::new(
self.catalog_manager.clone(),
partition_manager,
- datanode_manager.clone(),
+ node_manager.clone(),
));
let table_mutation_handler = Arc::new(TableMutationOperator::new(
inserter.clone(),
diff --git a/src/frontend/src/instance/region_query.rs b/src/frontend/src/instance/region_query.rs
index a6c21e35030a..3cbd07e75905 100644
--- a/src/frontend/src/instance/region_query.rs
+++ b/src/frontend/src/instance/region_query.rs
@@ -17,7 +17,7 @@ use std::sync::Arc;
use api::v1::region::QueryRequest;
use async_trait::async_trait;
use common_error::ext::BoxedError;
-use common_meta::datanode_manager::NodeManagerRef;
+use common_meta::node_manager::NodeManagerRef;
use common_recordbatch::SendableRecordBatchStream;
use partition::manager::PartitionRuleManagerRef;
use query::error::{RegionQuerySnafu, Result as QueryResult};
@@ -29,17 +29,17 @@ use crate::error::{FindTableRouteSnafu, RequestQuerySnafu, Result};
pub(crate) struct FrontendRegionQueryHandler {
partition_manager: PartitionRuleManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
}
impl FrontendRegionQueryHandler {
pub fn arc(
partition_manager: PartitionRuleManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
) -> Arc<Self> {
Arc::new(Self {
partition_manager,
- datanode_manager,
+ node_manager,
})
}
}
@@ -66,7 +66,7 @@ impl FrontendRegionQueryHandler {
table_id: region_id.table_id(),
})?;
- let client = self.datanode_manager.datanode(peer).await;
+ let client = self.node_manager.datanode(peer).await;
client
.handle_query(request)
diff --git a/src/frontend/src/instance/standalone.rs b/src/frontend/src/instance/standalone.rs
index fcb1bd61e42f..911c7fd30b11 100644
--- a/src/frontend/src/instance/standalone.rs
+++ b/src/frontend/src/instance/standalone.rs
@@ -19,8 +19,8 @@ use api::v1::region::{QueryRequest, RegionRequest, RegionResponse as RegionRespo
use async_trait::async_trait;
use client::region::check_response_header;
use common_error::ext::BoxedError;
-use common_meta::datanode_manager::{Datanode, DatanodeRef, FlownodeRef, NodeManager};
use common_meta::error::{self as meta_error, Result as MetaResult};
+use common_meta::node_manager::{Datanode, DatanodeRef, FlownodeRef, NodeManager};
use common_meta::peer::Peer;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::tracing;
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index ea8613db4f0d..223ccf11d147 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -20,13 +20,13 @@ use client::client_manager::DatanodeClients;
use common_base::Plugins;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_grpc::channel_manager::ChannelConfig;
-use common_meta::datanode_manager::NodeManagerRef;
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
use common_meta::ddl_manager::{DdlManager, DdlManagerRef};
use common_meta::distributed_time_constants;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
+use common_meta::node_manager::NodeManagerRef;
use common_meta::region_keeper::{MemoryRegionKeeper, MemoryRegionKeeperRef};
use common_meta::sequence::SequenceBuilder;
use common_meta::state_store::KvStateStore;
@@ -79,7 +79,7 @@ pub struct MetasrvBuilder {
election: Option<ElectionRef>,
meta_peer_client: Option<MetaPeerClientRef>,
lock: Option<DistLockRef>,
- datanode_manager: Option<NodeManagerRef>,
+ node_manager: Option<NodeManagerRef>,
plugins: Option<Plugins>,
table_metadata_allocator: Option<TableMetadataAllocatorRef>,
}
@@ -95,7 +95,7 @@ impl MetasrvBuilder {
election: None,
options: None,
lock: None,
- datanode_manager: None,
+ node_manager: None,
plugins: None,
table_metadata_allocator: None,
}
@@ -141,8 +141,8 @@ impl MetasrvBuilder {
self
}
- pub fn datanode_manager(mut self, datanode_manager: NodeManagerRef) -> Self {
- self.datanode_manager = Some(datanode_manager);
+ pub fn node_manager(mut self, node_manager: NodeManagerRef) -> Self {
+ self.node_manager = Some(node_manager);
self
}
@@ -171,7 +171,7 @@ impl MetasrvBuilder {
selector,
handler_group,
lock,
- datanode_manager,
+ node_manager,
plugins,
table_metadata_allocator,
} = self;
@@ -236,7 +236,7 @@ impl MetasrvBuilder {
let ddl_manager = build_ddl_manager(
&options,
- datanode_manager,
+ node_manager,
&procedure_manager,
&mailbox,
&table_metadata_manager,
diff --git a/src/meta-srv/src/mocks.rs b/src/meta-srv/src/mocks.rs
index 53e22ce6d4af..e9a3b58c8e63 100644
--- a/src/meta-srv/src/mocks.rs
+++ b/src/meta-srv/src/mocks.rs
@@ -70,7 +70,7 @@ pub async fn mock(
};
let builder = match datanode_clients {
- Some(clients) => builder.datanode_manager(clients),
+ Some(clients) => builder.node_manager(clients),
None => builder,
};
diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs
index ce2e5cda4d9d..028df5411091 100644
--- a/src/meta-srv/src/procedure/tests.rs
+++ b/src/meta-srv/src/procedure/tests.rs
@@ -21,7 +21,6 @@ use api::v1::region::{CreateRequest as PbCreateRegionRequest, RegionColumnDef};
use api::v1::{ColumnDataType, ColumnDef as PbColumnDef, SemanticType};
use client::client_manager::DatanodeClients;
use common_catalog::consts::MITO2_ENGINE;
-use common_meta::datanode_manager::NodeManagerRef;
use common_meta::ddl::create_logical_tables::{CreateLogicalTablesProcedure, CreateTablesState};
use common_meta::ddl::create_table::*;
use common_meta::ddl::test_util::columns::TestColumnDefBuilder;
@@ -29,6 +28,7 @@ use common_meta::ddl::test_util::create_table::{
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
};
use common_meta::key::table_route::{PhysicalTableRouteValue, TableRouteValue};
+use common_meta::node_manager::NodeManagerRef;
use common_meta::rpc::ddl::CreateTableTask;
use common_meta::rpc::router::{find_leaders, RegionRoute};
use common_procedure::Status;
@@ -170,7 +170,7 @@ fn test_region_request_builder() {
assert_eq!(template.template(), &expected);
}
-async fn new_datanode_manager(
+async fn new_node_manager(
region_server: &EchoRegionServer,
region_routes: &[RegionRoute],
) -> NodeManagerRef {
@@ -189,12 +189,12 @@ async fn new_datanode_manager(
async fn test_on_datanode_create_regions() {
let (region_server, mut rx) = EchoRegionServer::new();
let region_routes = test_data::new_region_routes();
- let datanode_manager = new_datanode_manager(®ion_server, ®ion_routes).await;
+ let node_manager = new_node_manager(®ion_server, ®ion_routes).await;
let mut procedure = CreateTableProcedure::new(
1,
create_table_task(None),
- test_data::new_ddl_context(datanode_manager),
+ test_data::new_ddl_context(node_manager),
);
procedure.set_allocated_metadata(
@@ -241,7 +241,7 @@ async fn test_on_datanode_create_regions() {
async fn test_on_datanode_create_logical_regions() {
let (region_server, mut rx) = EchoRegionServer::new();
let region_routes = test_data::new_region_routes();
- let datanode_manager = new_datanode_manager(®ion_server, ®ion_routes).await;
+ let node_manager = new_node_manager(®ion_server, ®ion_routes).await;
let physical_table_route = TableRouteValue::physical(region_routes);
let physical_table_id = 1;
@@ -249,7 +249,7 @@ async fn test_on_datanode_create_logical_regions() {
let task2 = create_table_task(Some("my_table2"));
let task3 = create_table_task(Some("my_table3"));
- let ctx = test_data::new_ddl_context(datanode_manager);
+ let ctx = test_data::new_ddl_context(node_manager);
let kv_backend = ctx.table_metadata_manager.kv_backend();
let physical_route_txn = ctx
.table_metadata_manager
diff --git a/src/meta-srv/src/procedure/utils.rs b/src/meta-srv/src/procedure/utils.rs
index a79138e7a7c8..f614f33b00d8 100644
--- a/src/meta-srv/src/procedure/utils.rs
+++ b/src/meta-srv/src/procedure/utils.rs
@@ -105,11 +105,11 @@ pub mod test_data {
use chrono::DateTime;
use common_catalog::consts::MITO2_ENGINE;
- use common_meta::datanode_manager::NodeManagerRef;
use common_meta::ddl::table_meta::TableMetadataAllocator;
use common_meta::ddl::DdlContext;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
+ use common_meta::node_manager::NodeManagerRef;
use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeper;
use common_meta::rpc::router::RegionRoute;
@@ -188,7 +188,7 @@ pub mod test_data {
}
}
- pub(crate) fn new_ddl_context(datanode_manager: NodeManagerRef) -> DdlContext {
+ pub(crate) fn new_ddl_context(node_manager: NodeManagerRef) -> DdlContext {
let kv_backend = Arc::new(MemoryKvBackend::new());
let mailbox_sequence =
@@ -197,7 +197,7 @@ pub mod test_data {
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
DdlContext {
- datanode_manager,
+ node_manager,
cache_invalidator: Arc::new(MetasrvCacheInvalidator::new(
mailbox,
MetasrvInfo {
diff --git a/src/operator/src/delete.rs b/src/operator/src/delete.rs
index 309edf4146b0..ecd3ec23c573 100644
--- a/src/operator/src/delete.rs
+++ b/src/operator/src/delete.rs
@@ -19,7 +19,7 @@ use std::{iter, mem};
use api::v1::region::{DeleteRequests as RegionDeleteRequests, RegionRequestHeader};
use api::v1::{DeleteRequests, RowDeleteRequests};
use catalog::CatalogManagerRef;
-use common_meta::datanode_manager::{AffectedRows, NodeManagerRef};
+use common_meta::node_manager::{AffectedRows, NodeManagerRef};
use common_meta::peer::Peer;
use common_query::Output;
use common_telemetry::tracing_context::TracingContext;
@@ -40,7 +40,7 @@ use crate::req_convert::delete::{ColumnToRow, RowToRegion, TableToRegion};
pub struct Deleter {
catalog_manager: CatalogManagerRef,
partition_manager: PartitionRuleManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
}
pub type DeleterRef = Arc<Deleter>;
@@ -49,12 +49,12 @@ impl Deleter {
pub fn new(
catalog_manager: CatalogManagerRef,
partition_manager: PartitionRuleManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
) -> Self {
Self {
catalog_manager,
partition_manager,
- datanode_manager,
+ node_manager,
}
}
@@ -133,9 +133,9 @@ impl Deleter {
.into_iter()
.map(|(peer, deletes)| {
let request = request_factory.build_delete(deletes);
- let datanode_manager = self.datanode_manager.clone();
+ let node_manager = self.node_manager.clone();
common_runtime::spawn_write(async move {
- datanode_manager
+ node_manager
.datanode(&peer)
.await
.handle(request)
diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs
index 95ff84f13a1b..5b2ac304d96e 100644
--- a/src/operator/src/insert.rs
+++ b/src/operator/src/insert.rs
@@ -25,7 +25,7 @@ use catalog::CatalogManagerRef;
use client::{OutputData, OutputMeta};
use common_catalog::consts::default_engine;
use common_grpc_expr::util::{extract_new_columns, ColumnExpr};
-use common_meta::datanode_manager::{AffectedRows, NodeManagerRef};
+use common_meta::node_manager::{AffectedRows, NodeManagerRef};
use common_meta::peer::Peer;
use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE};
use common_query::Output;
@@ -57,7 +57,7 @@ use crate::statement::StatementExecutor;
pub struct Inserter {
catalog_manager: CatalogManagerRef,
partition_manager: PartitionRuleManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
}
pub type InserterRef = Arc<Inserter>;
@@ -66,12 +66,12 @@ impl Inserter {
pub fn new(
catalog_manager: CatalogManagerRef,
partition_manager: PartitionRuleManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
) -> Self {
Self {
catalog_manager,
partition_manager,
- datanode_manager,
+ node_manager,
}
}
@@ -205,9 +205,9 @@ impl Inserter {
.into_iter()
.map(|(peer, inserts)| {
let request = request_factory.build_insert(inserts);
- let datanode_manager = self.datanode_manager.clone();
+ let node_manager = self.node_manager.clone();
common_runtime::spawn_write(async move {
- datanode_manager
+ node_manager
.datanode(&peer)
.await
.handle(request)
diff --git a/src/operator/src/request.rs b/src/operator/src/request.rs
index 768a2850aac3..b25228b5d068 100644
--- a/src/operator/src/request.rs
+++ b/src/operator/src/request.rs
@@ -18,7 +18,7 @@ use api::v1::region::region_request::Body as RegionRequestBody;
use api::v1::region::{CompactRequest, FlushRequest, RegionRequestHeader};
use catalog::CatalogManagerRef;
use common_catalog::build_db_string;
-use common_meta::datanode_manager::{AffectedRows, NodeManagerRef};
+use common_meta::node_manager::{AffectedRows, NodeManagerRef};
use common_meta::peer::Peer;
use common_telemetry::logging::{error, info};
use common_telemetry::tracing_context::TracingContext;
@@ -39,7 +39,7 @@ use crate::region_req_factory::RegionRequestFactory;
pub struct Requester {
catalog_manager: CatalogManagerRef,
partition_manager: PartitionRuleManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
}
pub type RequesterRef = Arc<Requester>;
@@ -48,12 +48,12 @@ impl Requester {
pub fn new(
catalog_manager: CatalogManagerRef,
partition_manager: PartitionRuleManagerRef,
- datanode_manager: NodeManagerRef,
+ node_manager: NodeManagerRef,
) -> Self {
Self {
catalog_manager,
partition_manager,
- datanode_manager,
+ node_manager,
}
}
@@ -168,11 +168,11 @@ impl Requester {
let tasks = requests.into_iter().map(|req_body| {
let request = request_factory.build_request(req_body.clone());
let partition_manager = self.partition_manager.clone();
- let datanode_manager = self.datanode_manager.clone();
+ let node_manager = self.node_manager.clone();
common_runtime::spawn_write(async move {
let peer =
Self::find_region_leader_by_request(partition_manager, &req_body).await?;
- datanode_manager
+ node_manager
.datanode(&peer)
.await
.handle(request)
diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs
index 72b36dad5da1..79fbef604e24 100644
--- a/tests-integration/src/standalone.rs
+++ b/tests-integration/src/standalone.rs
@@ -130,7 +130,7 @@ impl GreptimeDbStandaloneBuilder {
let catalog_manager =
KvBackendCatalogManager::new(kv_backend.clone(), multi_cache_invalidator.clone()).await;
- let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
+ let node_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
let table_id_sequence = Arc::new(
SequenceBuilder::new("table_id", kv_backend.clone())
@@ -150,7 +150,7 @@ impl GreptimeDbStandaloneBuilder {
let ddl_task_executor = Arc::new(
DdlManager::try_new(
procedure_manager.clone(),
- datanode_manager.clone(),
+ node_manager.clone(),
multi_cache_invalidator,
table_metadata_manager,
table_meta_allocator,
@@ -163,7 +163,7 @@ impl GreptimeDbStandaloneBuilder {
let instance = FrontendBuilder::new(
kv_backend.clone(),
catalog_manager,
- datanode_manager,
+ node_manager,
ddl_task_executor,
)
.with_plugin(plugins)
|
chore
|
rename all `datanode_manager` to `node_manager` (#3813)
|
4306cba8660aeec97c88ad326ebe34a7fa870806
|
2024-06-20 09:51:58
|
dennis zhuang
|
feat: show database options (#4174)
| false
|
diff --git a/src/auth/tests/mod.rs b/src/auth/tests/mod.rs
index 6f7bbb0fe068..310d476ba22d 100644
--- a/src/auth/tests/mod.rs
+++ b/src/auth/tests/mod.rs
@@ -52,7 +52,10 @@ fn test_permission_checker() {
let sql_result = checker.check_permission(
None,
- PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(ShowKind::All))),
+ PermissionReq::SqlStatement(&Statement::ShowDatabases(ShowDatabases::new(
+ ShowKind::All,
+ false,
+ ))),
);
assert_matches!(sql_result, Ok(PermissionResp::Reject));
diff --git a/src/catalog/src/information_schema/schemata.rs b/src/catalog/src/information_schema/schemata.rs
index 0d5196ca1a60..d4151e827916 100644
--- a/src/catalog/src/information_schema/schemata.rs
+++ b/src/catalog/src/information_schema/schemata.rs
@@ -17,6 +17,7 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::INFORMATION_SCHEMA_SCHEMATA_TABLE_ID;
use common_error::ext::BoxedError;
+use common_meta::key::schema_name::SchemaNameKey;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::execution::TaskContext;
@@ -32,15 +33,18 @@ use store_api::storage::{ScanRequest, TableId};
use super::SCHEMATA;
use crate::error::{
- CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
+ CreateRecordBatchSnafu, InternalSnafu, Result, SchemaNotFoundSnafu, TableMetadataManagerSnafu,
+ UpgradeWeakCatalogManagerRefSnafu,
};
-use crate::information_schema::{InformationTable, Predicates};
+use crate::information_schema::{utils, InformationTable, Predicates};
use crate::CatalogManager;
pub const CATALOG_NAME: &str = "catalog_name";
pub const SCHEMA_NAME: &str = "schema_name";
const DEFAULT_CHARACTER_SET_NAME: &str = "default_character_set_name";
const DEFAULT_COLLATION_NAME: &str = "default_collation_name";
+/// The database options
+pub const SCHEMA_OPTS: &str = "options";
const INIT_CAPACITY: usize = 42;
/// The `information_schema.schemata` table implementation.
@@ -74,6 +78,7 @@ impl InformationSchemaSchemata {
false,
),
ColumnSchema::new("sql_path", ConcreteDataType::string_datatype(), true),
+ ColumnSchema::new(SCHEMA_OPTS, ConcreteDataType::string_datatype(), true),
]))
}
@@ -133,6 +138,7 @@ struct InformationSchemaSchemataBuilder {
charset_names: StringVectorBuilder,
collation_names: StringVectorBuilder,
sql_paths: StringVectorBuilder,
+ schema_options: StringVectorBuilder,
}
impl InformationSchemaSchemataBuilder {
@@ -150,6 +156,7 @@ impl InformationSchemaSchemataBuilder {
charset_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
collation_names: StringVectorBuilder::with_capacity(INIT_CAPACITY),
sql_paths: StringVectorBuilder::with_capacity(INIT_CAPACITY),
+ schema_options: StringVectorBuilder::with_capacity(INIT_CAPACITY),
}
}
@@ -160,21 +167,50 @@ impl InformationSchemaSchemataBuilder {
.catalog_manager
.upgrade()
.context(UpgradeWeakCatalogManagerRefSnafu)?;
+ let table_metadata_manager = utils::table_meta_manager(&self.catalog_manager)?;
let predicates = Predicates::from_scan_request(&request);
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
- self.add_schema(&predicates, &catalog_name, &schema_name);
+ let opts = if let Some(table_metadata_manager) = &table_metadata_manager {
+ let schema_opts = table_metadata_manager
+ .schema_manager()
+ .get(SchemaNameKey::new(&catalog_name, &schema_name))
+ .await
+ .context(TableMetadataManagerSnafu)?
+ .context(SchemaNotFoundSnafu {
+ catalog: &catalog_name,
+ schema: &schema_name,
+ })?;
+
+ Some(format!("{schema_opts}"))
+ } else {
+ None
+ };
+
+ self.add_schema(
+ &predicates,
+ &catalog_name,
+ &schema_name,
+ opts.as_deref().unwrap_or(""),
+ );
}
self.finish()
}
- fn add_schema(&mut self, predicates: &Predicates, catalog_name: &str, schema_name: &str) {
+ fn add_schema(
+ &mut self,
+ predicates: &Predicates,
+ catalog_name: &str,
+ schema_name: &str,
+ schema_options: &str,
+ ) {
let row = [
(CATALOG_NAME, &Value::from(catalog_name)),
(SCHEMA_NAME, &Value::from(schema_name)),
(DEFAULT_CHARACTER_SET_NAME, &Value::from("utf8")),
(DEFAULT_COLLATION_NAME, &Value::from("utf8_bin")),
+ (SCHEMA_OPTS, &Value::from(schema_options)),
];
if !predicates.eval(&row) {
@@ -186,6 +222,7 @@ impl InformationSchemaSchemataBuilder {
self.charset_names.push(Some("utf8"));
self.collation_names.push(Some("utf8_bin"));
self.sql_paths.push(None);
+ self.schema_options.push(Some(schema_options));
}
fn finish(&mut self) -> Result<RecordBatch> {
@@ -195,6 +232,7 @@ impl InformationSchemaSchemataBuilder {
Arc::new(self.charset_names.finish()),
Arc::new(self.collation_names.finish()),
Arc::new(self.sql_paths.finish()),
+ Arc::new(self.schema_options.finish()),
];
RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
}
diff --git a/src/catalog/src/information_schema/utils.rs b/src/catalog/src/information_schema/utils.rs
index e476e1fc9147..6a3594718203 100644
--- a/src/catalog/src/information_schema/utils.rs
+++ b/src/catalog/src/information_schema/utils.rs
@@ -15,6 +15,7 @@
use std::sync::{Arc, Weak};
use common_config::Mode;
+use common_meta::key::TableMetadataManagerRef;
use meta_client::client::MetaClient;
use snafu::OptionExt;
@@ -51,3 +52,17 @@ pub fn meta_client(catalog_manager: &Weak<dyn CatalogManager>) -> Result<Option<
Ok(meta_client)
}
+
+/// Try to get the `[TableMetadataManagerRef]` from `[CatalogManager]` weak reference.
+pub fn table_meta_manager(
+ catalog_manager: &Weak<dyn CatalogManager>,
+) -> Result<Option<TableMetadataManagerRef>> {
+ let catalog_manager = catalog_manager
+ .upgrade()
+ .context(UpgradeWeakCatalogManagerRefSnafu)?;
+
+ Ok(catalog_manager
+ .as_any()
+ .downcast_ref::<KvBackendCatalogManager>()
+ .map(|manager| manager.table_metadata_manager_ref().clone()))
+}
diff --git a/src/common/meta/src/key/schema_name.rs b/src/common/meta/src/key/schema_name.rs
index 91c4c74bc104..5d1e3b5324b8 100644
--- a/src/common/meta/src/key/schema_name.rs
+++ b/src/common/meta/src/key/schema_name.rs
@@ -57,6 +57,17 @@ pub struct SchemaNameValue {
pub ttl: Option<Duration>,
}
+impl Display for SchemaNameValue {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ if let Some(ttl) = self.ttl {
+ let ttl = humantime::format_duration(ttl);
+ write!(f, "ttl='{ttl}'")?;
+ }
+
+ Ok(())
+ }
+}
+
impl TryFrom<&HashMap<String, String>> for SchemaNameValue {
type Error = Error;
@@ -233,6 +244,17 @@ mod tests {
use super::*;
use crate::kv_backend::memory::MemoryKvBackend;
+ #[test]
+ fn test_display_schema_value() {
+ let schema_value = SchemaNameValue { ttl: None };
+ assert_eq!("", schema_value.to_string());
+
+ let schema_value = SchemaNameValue {
+ ttl: Some(Duration::from_secs(9)),
+ };
+ assert_eq!("ttl='9s'", schema_value.to_string());
+ }
+
#[test]
fn test_serialization() {
let key = SchemaNameKey::new("my-catalog", "my-schema");
diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs
index 038e26572df9..9b9963797672 100644
--- a/src/query/src/sql.rs
+++ b/src/query/src/sql.rs
@@ -66,6 +66,7 @@ use crate::planner::DfLogicalPlanner;
use crate::QueryEngineRef;
const SCHEMAS_COLUMN: &str = "Database";
+const OPTIONS_COLUMN: &str = "Options";
const TABLES_COLUMN: &str = "Tables";
const FIELD_COLUMN: &str = "Field";
const TABLE_TYPE_COLUMN: &str = "Table_type";
@@ -155,7 +156,14 @@ pub async fn show_databases(
catalog_manager: &CatalogManagerRef,
query_ctx: QueryContextRef,
) -> Result<Output> {
- let projects = vec![(schemata::SCHEMA_NAME, SCHEMAS_COLUMN)];
+ let projects = if stmt.full {
+ vec![
+ (schemata::SCHEMA_NAME, SCHEMAS_COLUMN),
+ (schemata::SCHEMA_OPTS, OPTIONS_COLUMN),
+ ]
+ } else {
+ vec![(schemata::SCHEMA_NAME, SCHEMAS_COLUMN)]
+ };
let filters = vec![col(schemata::CATALOG_NAME).eq(lit(query_ctx.current_catalog()))];
let like_field = Some(schemata::SCHEMA_NAME);
diff --git a/src/sql/src/parsers/show_parser.rs b/src/sql/src/parsers/show_parser.rs
index 5cf8b5fe3c3d..6e680419d8a1 100644
--- a/src/sql/src/parsers/show_parser.rs
+++ b/src/sql/src/parsers/show_parser.rs
@@ -32,7 +32,7 @@ impl<'a> ParserContext<'a> {
/// todo(hl) support `show settings`/`show create`/`show users` etc.
pub(crate) fn parse_show(&mut self) -> Result<Statement> {
if self.consume_token("DATABASES") || self.consume_token("SCHEMAS") {
- self.parse_show_databases()
+ self.parse_show_databases(false)
} else if self.matches_keyword(Keyword::TABLES) {
self.parser.next_token();
self.parse_show_tables(false)
@@ -75,6 +75,8 @@ impl<'a> ParserContext<'a> {
} else if self.consume_token("COLUMNS") || self.consume_token("FIELDS") {
// SHOW {COLUMNS | FIELDS}
self.parse_show_columns(true)
+ } else if self.consume_token("DATABASES") || self.consume_token("SCHEMAS") {
+ self.parse_show_databases(true)
} else {
self.unsupported(self.peek_token_as_string())
}
@@ -341,12 +343,13 @@ impl<'a> ParserContext<'a> {
}
/// Parses `SHOW DATABASES` statement.
- pub fn parse_show_databases(&mut self) -> Result<Statement> {
+ pub fn parse_show_databases(&mut self, full: bool) -> Result<Statement> {
let tok = self.parser.next_token().token;
match &tok {
- Token::EOF | Token::SemiColon => {
- Ok(Statement::ShowDatabases(ShowDatabases::new(ShowKind::All)))
- }
+ Token::EOF | Token::SemiColon => Ok(Statement::ShowDatabases(ShowDatabases::new(
+ ShowKind::All,
+ full,
+ ))),
Token::Word(w) => match w.keyword {
Keyword::LIKE => Ok(Statement::ShowDatabases(ShowDatabases::new(
ShowKind::Like(self.parse_identifier().with_context(|_| {
@@ -356,6 +359,7 @@ impl<'a> ParserContext<'a> {
actual: tok.to_string(),
}
})?),
+ full,
))),
Keyword::WHERE => Ok(Statement::ShowDatabases(ShowDatabases::new(
ShowKind::Where(self.parser.parse_expr().with_context(|_| {
@@ -365,6 +369,7 @@ impl<'a> ParserContext<'a> {
actual: self.peek_token_as_string(),
}
})?),
+ full,
))),
_ => self.unsupported(self.peek_token_as_string()),
},
@@ -395,7 +400,39 @@ mod tests {
assert_matches!(
&stmts[0],
Statement::ShowDatabases(ShowDatabases {
- kind: ShowKind::All
+ kind: ShowKind::All,
+ full: false,
+ })
+ );
+ }
+
+ #[test]
+ pub fn test_show_full_databases() {
+ let sql = "SHOW FULL DATABASES";
+ let result =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
+ let stmts = result.unwrap();
+ assert_eq!(1, stmts.len());
+
+ assert_matches!(
+ &stmts[0],
+ Statement::ShowDatabases(ShowDatabases {
+ kind: ShowKind::All,
+ full: true,
+ })
+ );
+
+ let sql = "SHOW FULL DATABASES LIKE 'test%'";
+ let result =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
+ let stmts = result.unwrap();
+ assert_eq!(1, stmts.len());
+
+ assert_matches!(
+ &stmts[0],
+ Statement::ShowDatabases(ShowDatabases {
+ kind: ShowKind::Like(_),
+ full: true,
})
);
}
@@ -414,7 +451,8 @@ mod tests {
kind: ShowKind::Like(sqlparser::ast::Ident {
value: _,
quote_style: None,
- })
+ }),
+ ..
})
);
}
@@ -434,7 +472,8 @@ mod tests {
left: _,
right: _,
op: sqlparser::ast::BinaryOperator::Or,
- })
+ }),
+ ..
})
);
}
diff --git a/src/sql/src/statements/show.rs b/src/sql/src/statements/show.rs
index 90dad65eaded..ff345f255fd8 100644
--- a/src/sql/src/statements/show.rs
+++ b/src/sql/src/statements/show.rs
@@ -40,6 +40,7 @@ impl Display for ShowKind {
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct ShowDatabases {
pub kind: ShowKind,
+ pub full: bool,
}
/// The SQL `SHOW COLUMNS` statement
@@ -85,15 +86,20 @@ impl Display for ShowIndex {
impl ShowDatabases {
/// Creates a statement for `SHOW DATABASES`
- pub fn new(kind: ShowKind) -> Self {
- ShowDatabases { kind }
+ pub fn new(kind: ShowKind, full: bool) -> Self {
+ ShowDatabases { kind, full }
}
}
impl Display for ShowDatabases {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let kind = &self.kind;
- write!(f, r#"SHOW DATABASES {kind}"#)
+
+ if self.full {
+ write!(f, r#"SHOW FULL DATABASES {kind}"#)
+ } else {
+ write!(f, r#"SHOW DATABASES {kind}"#)
+ }
}
}
diff --git a/tests/cases/standalone/common/create/create_database.result b/tests/cases/standalone/common/create/create_database.result
index 8434c768d61f..9612b3115ec5 100644
--- a/tests/cases/standalone/common/create/create_database.result
+++ b/tests/cases/standalone/common/create/create_database.result
@@ -10,10 +10,6 @@ create database 'illegal-database';
Affected Rows: 1
-create database mydb with (ttl = '1h');
-
-Affected Rows: 1
-
show databases;
+--------------------+
@@ -22,7 +18,6 @@ show databases;
| greptime_private |
| illegal-database |
| information_schema |
-| mydb |
| public |
+--------------------+
@@ -30,7 +25,3 @@ drop database 'illegal-database';
Affected Rows: 0
-drop database mydb;
-
-Affected Rows: 0
-
diff --git a/tests/cases/standalone/common/create/create_database.sql b/tests/cases/standalone/common/create/create_database.sql
index bfbfe6b572eb..6341ce6f85d9 100644
--- a/tests/cases/standalone/common/create/create_database.sql
+++ b/tests/cases/standalone/common/create/create_database.sql
@@ -4,10 +4,6 @@ create database illegal-database;
create database 'illegal-database';
-create database mydb with (ttl = '1h');
-
show databases;
drop database 'illegal-database';
-
-drop database mydb;
diff --git a/tests/cases/standalone/common/create/create_database_opts.result b/tests/cases/standalone/common/create/create_database_opts.result
new file mode 100644
index 000000000000..c824290d715a
--- /dev/null
+++ b/tests/cases/standalone/common/create/create_database_opts.result
@@ -0,0 +1,70 @@
+CREATE DATABASE mydb WITH (ttl = '1h');
+
+Affected Rows: 1
+
+SHOW DATABASES;
+
++--------------------+
+| Database |
++--------------------+
+| greptime_private |
+| information_schema |
+| mydb |
+| public |
++--------------------+
+
+SHOW FULL DATABASES;
+
++--------------------+----------+
+| Database | Options |
++--------------------+----------+
+| greptime_private | |
+| information_schema | |
+| mydb | ttl='1h' |
+| public | |
++--------------------+----------+
+
+USE mydb;
+
+Affected Rows: 0
+
+CREATE TABLE test(host STRING, cpu DOUBLE, ts TIMESTAMP TIME INDEX);
+
+Affected Rows: 0
+
+SHOW CREATE TABLE test;
+
++-------+-------------------------------------+
+| Table | Create Table |
++-------+-------------------------------------+
+| test | CREATE TABLE IF NOT EXISTS "test" ( |
+| | "host" STRING NULL, |
+| | "cpu" DOUBLE NULL, |
+| | "ts" TIMESTAMP(3) NOT NULL, |
+| | TIME INDEX ("ts") |
+| | ) |
+| | |
+| | ENGINE=mito |
+| | WITH( |
+| | ttl = '1h' |
+| | ) |
++-------+-------------------------------------+
+
+USE public;
+
+Affected Rows: 0
+
+DROP DATABASE mydb;
+
+Affected Rows: 0
+
+SHOW DATABASES;
+
++--------------------+
+| Database |
++--------------------+
+| greptime_private |
+| information_schema |
+| public |
++--------------------+
+
diff --git a/tests/cases/standalone/common/create/create_database_opts.sql b/tests/cases/standalone/common/create/create_database_opts.sql
new file mode 100644
index 000000000000..fb2e58690ead
--- /dev/null
+++ b/tests/cases/standalone/common/create/create_database_opts.sql
@@ -0,0 +1,17 @@
+CREATE DATABASE mydb WITH (ttl = '1h');
+
+SHOW DATABASES;
+
+SHOW FULL DATABASES;
+
+USE mydb;
+
+CREATE TABLE test(host STRING, cpu DOUBLE, ts TIMESTAMP TIME INDEX);
+
+SHOW CREATE TABLE test;
+
+USE public;
+
+DROP DATABASE mydb;
+
+SHOW DATABASES;
diff --git a/tests/cases/standalone/common/show/show_databases_tables.result b/tests/cases/standalone/common/show/show_databases_tables.result
index e73d5b749def..7b6dfb7a1662 100644
--- a/tests/cases/standalone/common/show/show_databases_tables.result
+++ b/tests/cases/standalone/common/show/show_databases_tables.result
@@ -1,4 +1,4 @@
-show databases;
+SHOW DATABASES;
+--------------------+
| Database |
@@ -8,11 +8,21 @@ show databases;
| public |
+--------------------+
-use information_schema;
+SHOW FULL DATABASES;
+
++--------------------+---------+
+| Database | Options |
++--------------------+---------+
+| greptime_private | |
+| information_schema | |
+| public | |
++--------------------+---------+
+
+USE information_schema;
Affected Rows: 0
-show tables;
+SHOW TABLES;
+---------------------------------------+
| Tables |
@@ -48,7 +58,7 @@ show tables;
| triggers |
+---------------------------------------+
-use public;
+USE public;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/show/show_databases_tables.sql b/tests/cases/standalone/common/show/show_databases_tables.sql
index 150f341b84a6..f5e46a2849d4 100644
--- a/tests/cases/standalone/common/show/show_databases_tables.sql
+++ b/tests/cases/standalone/common/show/show_databases_tables.sql
@@ -1,7 +1,9 @@
-show databases;
+SHOW DATABASES;
-use information_schema;
+SHOW FULL DATABASES;
-show tables;
+USE information_schema;
-use public;
+SHOW TABLES;
+
+USE public;
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
index cbe6e2d39f18..77077b9a3a0f 100644
--- a/tests/cases/standalone/common/system/information_schema.result
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -322,6 +322,7 @@ select * from information_schema.columns order by table_schema, table_name, colu
| greptime | information_schema | schemata | catalog_name | 1 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | No | string | | |
| greptime | information_schema | schemata | default_character_set_name | 3 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | No | string | | |
| greptime | information_schema | schemata | default_collation_name | 4 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | No | string | | |
+| greptime | information_schema | schemata | options | 6 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | Yes | string | | |
| greptime | information_schema | schemata | schema_name | 2 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | No | string | | |
| greptime | information_schema | schemata | sql_path | 5 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | Yes | string | | |
| greptime | information_schema | session_status | variable_name | 1 | 2147483647 | 2147483647 | | | | utf8 | utf8_bin | | | select,insert | | String | string | FIELD | | No | string | | |
@@ -528,16 +529,17 @@ desc table schemata;
| default_character_set_name | String | | NO | | FIELD |
| default_collation_name | String | | NO | | FIELD |
| sql_path | String | | YES | | FIELD |
+| options | String | | YES | | FIELD |
+----------------------------+--------+-----+------+---------+---------------+
select * from schemata where catalog_name = 'greptime' and schema_name != 'public' order by catalog_name, schema_name;
-+--------------+--------------------+----------------------------+------------------------+----------+
-| catalog_name | schema_name | default_character_set_name | default_collation_name | sql_path |
-+--------------+--------------------+----------------------------+------------------------+----------+
-| greptime | greptime_private | utf8 | utf8_bin | |
-| greptime | information_schema | utf8 | utf8_bin | |
-+--------------+--------------------+----------------------------+------------------------+----------+
++--------------+--------------------+----------------------------+------------------------+----------+---------+
+| catalog_name | schema_name | default_character_set_name | default_collation_name | sql_path | options |
++--------------+--------------------+----------------------------+------------------------+----------+---------+
+| greptime | greptime_private | utf8 | utf8_bin | | |
+| greptime | information_schema | utf8 | utf8_bin | | |
++--------------+--------------------+----------------------------+------------------------+----------+---------+
-- test engines
select * from engines;
|
feat
|
show database options (#4174)
|
4af126eb1b2bdec9e53a3f4856270490108640f0
|
2023-09-12 18:27:15
|
Zhenchi
|
feat: consolidate Insert request related partitioning and distributed processing operations into Inserter (#2346)
| false
|
diff --git a/src/client/src/region_handler.rs b/src/client/src/region_handler.rs
index 9238389f6d0a..a3977d8fd6b3 100644
--- a/src/client/src/region_handler.rs
+++ b/src/client/src/region_handler.rs
@@ -14,22 +14,14 @@
use std::sync::Arc;
-use api::v1::region::{region_request, QueryRequest};
+use api::v1::region::QueryRequest;
use async_trait::async_trait;
-use common_meta::datanode_manager::AffectedRows;
use common_recordbatch::SendableRecordBatchStream;
-use session::context::QueryContextRef;
use crate::error::Result;
#[async_trait]
pub trait RegionRequestHandler: Send + Sync {
- async fn handle(
- &self,
- request: region_request::Body,
- ctx: QueryContextRef,
- ) -> Result<AffectedRows>;
-
// TODO(ruihang): add trace id and span id in the request.
async fn do_get(&self, request: QueryRequest) -> Result<SendableRecordBatchStream>;
}
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index 1814b38645d5..b4787611d71e 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -49,7 +49,7 @@ pub struct DdlManager {
datanode_manager: DatanodeManagerRef,
cache_invalidator: CacheInvalidatorRef,
table_metadata_manager: TableMetadataManagerRef,
- table_creator: TableMetadataAllocatorRef,
+ table_meta_allocator: TableMetadataAllocatorRef,
}
impl DdlManager {
@@ -58,14 +58,14 @@ impl DdlManager {
datanode_clients: DatanodeManagerRef,
cache_invalidator: CacheInvalidatorRef,
table_metadata_manager: TableMetadataManagerRef,
- table_creator: TableMetadataAllocatorRef,
+ table_meta_allocator: TableMetadataAllocatorRef,
) -> Self {
Self {
procedure_manager,
datanode_manager: datanode_clients,
cache_invalidator,
table_metadata_manager,
- table_creator,
+ table_meta_allocator,
}
}
@@ -333,7 +333,7 @@ async fn handle_create_table_task(
mut create_table_task: CreateTableTask,
) -> Result<SubmitDdlTaskResponse> {
let (table_id, region_routes) = ddl_manager
- .table_creator
+ .table_meta_allocator
.create(
&TableMetadataAllocatorContext { cluster_id },
&mut create_table_task.table_info,
diff --git a/src/frontend/src/delete.rs b/src/frontend/src/delete.rs
index 3c9c6850865d..ec88b22346f5 100644
--- a/src/frontend/src/delete.rs
+++ b/src/frontend/src/delete.rs
@@ -12,37 +12,49 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashSet;
+use std::collections::{HashMap, HashSet};
+use std::sync::Arc;
use std::{iter, mem};
-use api::v1::region::region_request;
-use api::v1::{DeleteRequests, RowDeleteRequest, RowDeleteRequests};
-use catalog::CatalogManager;
-use client::region_handler::RegionRequestHandler;
+use api::v1::region::{DeleteRequests as RegionDeleteRequests, RegionRequestHeader};
+use api::v1::{DeleteRequests, RowDeleteRequests};
+use catalog::CatalogManagerRef;
+use common_meta::datanode_manager::{AffectedRows, DatanodeManagerRef};
+use common_meta::peer::Peer;
use common_query::Output;
+use futures_util::future;
+use metrics::counter;
+use partition::manager::PartitionRuleManagerRef;
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
+use table::requests::DeleteRequest as TableDeleteRequest;
use table::TableRef;
use crate::error::{
- CatalogSnafu, InvalidDeleteRequestSnafu, MissingTimeIndexColumnSnafu, RequestDatanodeSnafu,
- Result, TableNotFoundSnafu,
+ CatalogSnafu, FindRegionLeaderSnafu, InvalidDeleteRequestSnafu, JoinTaskSnafu,
+ MissingTimeIndexColumnSnafu, RequestDeletesSnafu, Result, TableNotFoundSnafu,
};
-use crate::req_convert::delete::{ColumnToRow, RowToRegion};
+use crate::region_req_factory::RegionRequestFactory;
+use crate::req_convert::delete::{ColumnToRow, RowToRegion, TableToRegion};
-pub(crate) struct Deleter<'a> {
- catalog_manager: &'a dyn CatalogManager,
- region_request_handler: &'a dyn RegionRequestHandler,
+pub struct Deleter {
+ catalog_manager: CatalogManagerRef,
+ partition_manager: PartitionRuleManagerRef,
+ datanode_manager: DatanodeManagerRef,
}
-impl<'a> Deleter<'a> {
+pub type DeleterRef = Arc<Deleter>;
+
+impl Deleter {
pub fn new(
- catalog_manager: &'a dyn CatalogManager,
- region_request_handler: &'a dyn RegionRequestHandler,
+ catalog_manager: CatalogManagerRef,
+ partition_manager: PartitionRuleManagerRef,
+ datanode_manager: DatanodeManagerRef,
) -> Self {
Self {
catalog_manager,
- region_request_handler,
+ partition_manager,
+ datanode_manager,
}
}
@@ -67,31 +79,99 @@ impl<'a> Deleter<'a> {
.map(|r| !r.rows.is_empty())
.unwrap_or_default()
});
- validate_row_count_match(&requests)?;
+ validate_column_count_match(&requests)?;
let requests = self.trim_columns(requests, &ctx).await?;
- let region_request = RowToRegion::new(self.catalog_manager, &ctx)
- .convert(requests)
- .await?;
- let region_request = region_request::Body::Deletes(region_request);
+ let deletes = RowToRegion::new(
+ self.catalog_manager.as_ref(),
+ self.partition_manager.as_ref(),
+ &ctx,
+ )
+ .convert(requests)
+ .await?;
- let affected_rows = self
- .region_request_handler
- .handle(region_request, ctx)
- .await
- .context(RequestDatanodeSnafu)?;
+ let affected_rows = self.do_request(deletes, ctx.trace_id(), 0).await?;
Ok(Output::AffectedRows(affected_rows as _))
}
+
+ pub async fn handle_table_delete(
+ &self,
+ request: TableDeleteRequest,
+ ctx: QueryContextRef,
+ ) -> Result<AffectedRows> {
+ let catalog = request.catalog_name.as_str();
+ let schema = request.schema_name.as_str();
+ let table = request.table_name.as_str();
+ let table = self.get_table(catalog, schema, table).await?;
+ let table_info = table.table_info();
+
+ let deletes = TableToRegion::new(&table_info, &self.partition_manager)
+ .convert(request)
+ .await?;
+ self.do_request(deletes, ctx.trace_id(), 0).await
+ }
}
-impl<'a> Deleter<'a> {
+impl Deleter {
+ async fn do_request(
+ &self,
+ requests: RegionDeleteRequests,
+ trace_id: u64,
+ span_id: u64,
+ ) -> Result<AffectedRows> {
+ let header = RegionRequestHeader { trace_id, span_id };
+ let request_factory = RegionRequestFactory::new(header);
+
+ let tasks = self
+ .group_requests_by_peer(requests)
+ .await?
+ .into_iter()
+ .map(|(peer, deletes)| {
+ let request = request_factory.build_delete(deletes);
+ let datanode_manager = self.datanode_manager.clone();
+ common_runtime::spawn_write(async move {
+ datanode_manager
+ .datanode(&peer)
+ .await
+ .handle(request)
+ .await
+ .context(RequestDeletesSnafu)
+ })
+ });
+ let results = future::try_join_all(tasks).await.context(JoinTaskSnafu)?;
+
+ let affected_rows = results.into_iter().sum::<Result<u64>>()?;
+ counter!(crate::metrics::DIST_DELETE_ROW_COUNT, affected_rows);
+ Ok(affected_rows)
+ }
+
+ async fn group_requests_by_peer(
+ &self,
+ requests: RegionDeleteRequests,
+ ) -> Result<HashMap<Peer, RegionDeleteRequests>> {
+ let mut deletes: HashMap<Peer, RegionDeleteRequests> = HashMap::new();
+
+ for req in requests.requests {
+ let peer = self
+ .partition_manager
+ .find_region_leader(req.region_id.into())
+ .await
+ .context(FindRegionLeaderSnafu)?;
+ deletes.entry(peer).or_default().requests.push(req);
+ }
+
+ Ok(deletes)
+ }
+
async fn trim_columns(
&self,
mut requests: RowDeleteRequests,
ctx: &QueryContextRef,
) -> Result<RowDeleteRequests> {
for req in &mut requests.deletes {
- let table = self.get_table(req, ctx).await?;
+ let catalog = ctx.current_catalog();
+ let schema = ctx.current_schema();
+ let table = self.get_table(catalog, schema, &req.table_name).await?;
let key_column_names = self.key_column_names(&table)?;
let rows = req.rows.as_mut().unwrap();
@@ -142,25 +222,25 @@ impl<'a> Deleter<'a> {
Ok(key_column_names)
}
- async fn get_table(&self, req: &RowDeleteRequest, ctx: &QueryContextRef) -> Result<TableRef> {
+ async fn get_table(&self, catalog: &str, schema: &str, table: &str) -> Result<TableRef> {
self.catalog_manager
- .table(ctx.current_catalog(), ctx.current_schema(), &req.table_name)
+ .table(catalog, schema, table)
.await
.context(CatalogSnafu)?
.with_context(|| TableNotFoundSnafu {
- table_name: req.table_name.clone(),
+ table_name: common_catalog::format_full_table_name(catalog, schema, table),
})
}
}
-fn validate_row_count_match(requests: &RowDeleteRequests) -> Result<()> {
+fn validate_column_count_match(requests: &RowDeleteRequests) -> Result<()> {
for request in &requests.deletes {
let rows = request.rows.as_ref().unwrap();
let column_count = rows.schema.len();
ensure!(
rows.rows.iter().all(|r| r.values.len() == column_count),
InvalidDeleteRequestSnafu {
- reason: "row count mismatch"
+ reason: "column count mismatch"
}
)
}
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index 3fd905577759..5bbe13042ccb 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -258,6 +258,12 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to find leader for region, source: {}", source))]
+ FindRegionLeader {
+ source: partition::error::Error,
+ location: Location,
+ },
+
#[snafu(display("Failed to create table info, source: {}", source))]
CreateTableInfo {
#[snafu(backtrace)]
@@ -683,6 +689,9 @@ pub enum Error {
column,
))]
ColumnNoneDefaultValue { column: String, location: Location },
+
+ #[snafu(display("Invalid region request, reason: {}", reason))]
+ InvalidRegionRequest { reason: String },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -761,7 +770,8 @@ impl ErrorExt for Error {
| Error::BuildDfLogicalPlan { .. }
| Error::BuildTableMeta { .. }
| Error::VectorToGrpcColumn { .. }
- | Error::MissingInsertBody { .. } => StatusCode::Internal,
+ | Error::MissingInsertBody { .. }
+ | Error::InvalidRegionRequest { .. } => StatusCode::Internal,
Error::IncompleteGrpcResult { .. }
| Error::ContextValueNotFound { .. }
@@ -808,7 +818,8 @@ impl ErrorExt for Error {
| Error::FindTablePartitionRule { source, .. }
| Error::FindTableRoute { source, .. }
| Error::SplitInsert { source, .. }
- | Error::SplitDelete { source, .. } => source.status_code(),
+ | Error::SplitDelete { source, .. }
+ | Error::FindRegionLeader { source, .. } => source.status_code(),
Error::UnrecognizedTableOption { .. } => StatusCode::InvalidArguments,
diff --git a/src/frontend/src/insert.rs b/src/frontend/src/insert.rs
index b5c82a5da041..a37809cc2cfb 100644
--- a/src/frontend/src/insert.rs
+++ b/src/frontend/src/insert.rs
@@ -12,48 +12,59 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
+use std::sync::Arc;
+
use api::v1::alter_expr::Kind;
-use api::v1::region::region_request;
-use api::v1::{AlterExpr, ColumnSchema, InsertRequests, RowInsertRequest, RowInsertRequests};
-use catalog::CatalogManager;
-use client::region_handler::RegionRequestHandler;
+use api::v1::region::{InsertRequests as RegionInsertRequests, RegionRequestHeader};
+use api::v1::{
+ AlterExpr, ColumnSchema, CreateTableExpr, InsertRequests, RowInsertRequest, RowInsertRequests,
+};
+use catalog::CatalogManagerRef;
use common_catalog::consts::default_engine;
use common_grpc_expr::util::{extract_new_columns, ColumnExpr};
+use common_meta::datanode_manager::{AffectedRows, DatanodeManagerRef};
+use common_meta::peer::Peer;
use common_query::Output;
-use common_telemetry::info;
+use common_telemetry::{error, info};
use datatypes::schema::Schema;
+use futures_util::future;
+use metrics::counter;
+use partition::manager::PartitionRuleManagerRef;
use session::context::QueryContextRef;
use snafu::prelude::*;
+use sql::statements::insert::Insert;
use table::engine::TableReference;
+use table::requests::InsertRequest as TableInsertRequest;
use table::TableRef;
use crate::error::{
- CatalogSnafu, FindNewColumnsOnInsertionSnafu, InvalidInsertRequestSnafu, RequestDatanodeSnafu,
- Result,
+ CatalogSnafu, FindNewColumnsOnInsertionSnafu, FindRegionLeaderSnafu, InvalidInsertRequestSnafu,
+ JoinTaskSnafu, RequestInsertsSnafu, Result, TableNotFoundSnafu,
};
use crate::expr_factory::CreateExprFactory;
-use crate::req_convert::insert::{ColumnToRow, RowToRegion};
+use crate::region_req_factory::RegionRequestFactory;
+use crate::req_convert::insert::{ColumnToRow, RowToRegion, StatementToRegion, TableToRegion};
use crate::statement::StatementExecutor;
-pub(crate) struct Inserter<'a> {
- catalog_manager: &'a dyn CatalogManager,
- create_expr_factory: &'a CreateExprFactory,
- statement_executor: &'a StatementExecutor,
- region_request_handler: &'a dyn RegionRequestHandler,
+pub struct Inserter {
+ catalog_manager: CatalogManagerRef,
+ partition_manager: PartitionRuleManagerRef,
+ datanode_manager: DatanodeManagerRef,
}
-impl<'a> Inserter<'a> {
+pub type InserterRef = Arc<Inserter>;
+
+impl Inserter {
pub fn new(
- catalog_manager: &'a dyn CatalogManager,
- create_expr_factory: &'a CreateExprFactory,
- statement_executor: &'a StatementExecutor,
- region_request_handler: &'a dyn RegionRequestHandler,
+ catalog_manager: CatalogManagerRef,
+ partition_manager: PartitionRuleManagerRef,
+ datanode_manager: DatanodeManagerRef,
) -> Self {
Self {
catalog_manager,
- create_expr_factory,
- statement_executor,
- region_request_handler,
+ partition_manager,
+ datanode_manager,
}
}
@@ -61,15 +72,18 @@ impl<'a> Inserter<'a> {
&self,
requests: InsertRequests,
ctx: QueryContextRef,
+ statement_executor: &StatementExecutor,
) -> Result<Output> {
let row_inserts = ColumnToRow::convert(requests)?;
- self.handle_row_inserts(row_inserts, ctx).await
+ self.handle_row_inserts(row_inserts, ctx, statement_executor)
+ .await
}
pub async fn handle_row_inserts(
&self,
mut requests: RowInsertRequests,
ctx: QueryContextRef,
+ statement_executor: &StatementExecutor,
) -> Result<Output> {
// remove empty requests
requests.inserts.retain(|req| {
@@ -78,25 +92,110 @@ impl<'a> Inserter<'a> {
.map(|r| !r.rows.is_empty())
.unwrap_or_default()
});
- validate_row_count_match(&requests)?;
+ validate_column_count_match(&requests)?;
- self.create_or_alter_tables_on_demand(&requests, &ctx)
+ self.create_or_alter_tables_on_demand(&requests, &ctx, statement_executor)
.await?;
- let region_request = RowToRegion::new(self.catalog_manager, &ctx)
- .convert(requests)
+ let inserts = RowToRegion::new(
+ self.catalog_manager.as_ref(),
+ self.partition_manager.as_ref(),
+ &ctx,
+ )
+ .convert(requests)
+ .await?;
+
+ let affected_rows = self.do_request(inserts, ctx.trace_id(), 0).await?;
+ Ok(Output::AffectedRows(affected_rows as _))
+ }
+
+ pub async fn handle_table_insert(
+ &self,
+ request: TableInsertRequest,
+ ctx: QueryContextRef,
+ ) -> Result<usize> {
+ let catalog = request.catalog_name.as_str();
+ let schema = request.schema_name.as_str();
+ let table_name = request.table_name.as_str();
+ let table = self.get_table(catalog, schema, table_name).await?;
+ let table = table.with_context(|| TableNotFoundSnafu {
+ table_name: common_catalog::format_full_table_name(catalog, schema, table_name),
+ })?;
+ let table_info = table.table_info();
+
+ let inserts = TableToRegion::new(&table_info, &self.partition_manager)
+ .convert(request)
.await?;
- let region_request = region_request::Body::Inserts(region_request);
- let affected_rows = self
- .region_request_handler
- .handle(region_request, ctx)
- .await
- .context(RequestDatanodeSnafu)?;
+ let affected_rows = self.do_request(inserts, ctx.trace_id(), 0).await?;
+ Ok(affected_rows as _)
+ }
+
+ pub async fn handle_statement_insert(
+ &self,
+ insert: &Insert,
+ ctx: &QueryContextRef,
+ ) -> Result<Output> {
+ let inserts =
+ StatementToRegion::new(self.catalog_manager.as_ref(), &self.partition_manager, ctx)
+ .convert(insert)
+ .await?;
+
+ let affected_rows = self.do_request(inserts, ctx.trace_id(), 0).await?;
Ok(Output::AffectedRows(affected_rows as _))
}
}
-impl<'a> Inserter<'a> {
+impl Inserter {
+ async fn do_request(
+ &self,
+ requests: RegionInsertRequests,
+ trace_id: u64,
+ span_id: u64,
+ ) -> Result<AffectedRows> {
+ let header = RegionRequestHeader { trace_id, span_id };
+ let request_factory = RegionRequestFactory::new(header);
+
+ let tasks = self
+ .group_requests_by_peer(requests)
+ .await?
+ .into_iter()
+ .map(|(peer, inserts)| {
+ let request = request_factory.build_insert(inserts);
+ let datanode_manager = self.datanode_manager.clone();
+ common_runtime::spawn_write(async move {
+ datanode_manager
+ .datanode(&peer)
+ .await
+ .handle(request)
+ .await
+ .context(RequestInsertsSnafu)
+ })
+ });
+ let results = future::try_join_all(tasks).await.context(JoinTaskSnafu)?;
+
+ let affected_rows = results.into_iter().sum::<Result<u64>>()?;
+ counter!(crate::metrics::DIST_INGEST_ROW_COUNT, affected_rows);
+ Ok(affected_rows)
+ }
+
+ async fn group_requests_by_peer(
+ &self,
+ requests: RegionInsertRequests,
+ ) -> Result<HashMap<Peer, RegionInsertRequests>> {
+ let mut inserts: HashMap<Peer, RegionInsertRequests> = HashMap::new();
+
+ for req in requests.requests {
+ let peer = self
+ .partition_manager
+ .find_region_leader(req.region_id.into())
+ .await
+ .context(FindRegionLeaderSnafu)?;
+ inserts.entry(peer).or_default().requests.push(req);
+ }
+
+ Ok(inserts)
+ }
+
// check if tables already exist:
// - if table does not exist, create table by inferred CreateExpr
// - if table exist, check if schema matches. If any new column found, alter table by inferred `AlterExpr`
@@ -104,15 +203,20 @@ impl<'a> Inserter<'a> {
&self,
requests: &RowInsertRequests,
ctx: &QueryContextRef,
+ statement_executor: &StatementExecutor,
) -> Result<()> {
// TODO(jeremy): create and alter in batch?
for req in &requests.inserts {
- match self.get_table(req, ctx).await? {
+ let catalog = ctx.current_catalog();
+ let schema = ctx.current_schema();
+ let table = self.get_table(catalog, schema, &req.table_name).await?;
+ match table {
Some(table) => {
validate_request_with_table(req, &table)?;
- self.alter_table_on_demand(req, table, ctx).await?
+ self.alter_table_on_demand(req, table, ctx, statement_executor)
+ .await?
}
- None => self.create_table(req, ctx).await?,
+ None => self.create_table(req, ctx, statement_executor).await?,
}
}
@@ -121,11 +225,12 @@ impl<'a> Inserter<'a> {
async fn get_table(
&self,
- req: &RowInsertRequest,
- ctx: &QueryContextRef,
+ catalog: &str,
+ schema: &str,
+ table: &str,
) -> Result<Option<TableRef>> {
self.catalog_manager
- .table(ctx.current_catalog(), ctx.current_schema(), &req.table_name)
+ .table(catalog, schema, table)
.await
.context(CatalogSnafu)
}
@@ -135,9 +240,11 @@ impl<'a> Inserter<'a> {
req: &RowInsertRequest,
table: TableRef,
ctx: &QueryContextRef,
+ statement_executor: &StatementExecutor,
) -> Result<()> {
let catalog_name = ctx.current_catalog();
let schema_name = ctx.current_schema();
+ let table_name = table.table_info().name.clone();
let request_schema = req.rows.as_ref().unwrap().schema.as_slice();
let column_exprs = ColumnExpr::from_column_schemas(request_schema);
@@ -147,7 +254,6 @@ impl<'a> Inserter<'a> {
return Ok(());
};
- let table_name = table.table_info().name.clone();
info!(
"Adding new columns: {:?} to table: {}.{}.{}",
add_columns, catalog_name, schema_name, table_name
@@ -160,54 +266,75 @@ impl<'a> Inserter<'a> {
kind: Some(Kind::AddColumns(add_columns)),
};
- self.statement_executor
- .alter_table_inner(alter_table_expr)
- .await?;
+ let res = statement_executor.alter_table_inner(alter_table_expr).await;
- info!(
- "Successfully added new columns to table: {}.{}.{}",
- catalog_name, schema_name, table_name
- );
-
- Ok(())
+ match res {
+ Ok(_) => {
+ info!(
+ "Successfully added new columns to table: {}.{}.{}",
+ catalog_name, schema_name, table_name
+ );
+ Ok(())
+ }
+ Err(err) => {
+ error!(
+ "Failed to add new columns to table: {}.{}.{}: {}",
+ catalog_name, schema_name, table_name, err
+ );
+ Err(err)
+ }
+ }
}
- async fn create_table(&self, req: &RowInsertRequest, ctx: &QueryContextRef) -> Result<()> {
+ async fn create_table(
+ &self,
+ req: &RowInsertRequest,
+ ctx: &QueryContextRef,
+ statement_executor: &StatementExecutor,
+ ) -> Result<()> {
let table_ref =
TableReference::full(ctx.current_catalog(), ctx.current_schema(), &req.table_name);
+
let request_schema = req.rows.as_ref().unwrap().schema.as_slice();
+ let create_table_expr = &mut build_create_table_expr(&table_ref, request_schema)?;
info!(
"Table {}.{}.{} does not exist, try create table",
table_ref.catalog, table_ref.schema, table_ref.table,
);
- let mut create_table_expr = self
- .create_expr_factory
- .create_table_expr_by_column_schemas(&table_ref, request_schema, default_engine())?;
-
// TODO(weny): multiple regions table.
- self.statement_executor
- .create_table_inner(&mut create_table_expr, None)
- .await?;
-
- info!(
- "Successfully created table on insertion: {}.{}.{}",
- table_ref.catalog, table_ref.schema, table_ref.table,
- );
-
- Ok(())
+ let res = statement_executor
+ .create_table_inner(create_table_expr, None)
+ .await;
+
+ match res {
+ Ok(_) => {
+ info!(
+ "Successfully created table {}.{}.{}",
+ table_ref.catalog, table_ref.schema, table_ref.table,
+ );
+ Ok(())
+ }
+ Err(err) => {
+ error!(
+ "Failed to create table {}.{}.{}: {}",
+ table_ref.catalog, table_ref.schema, table_ref.table, err
+ );
+ Err(err)
+ }
+ }
}
}
-fn validate_row_count_match(requests: &RowInsertRequests) -> Result<()> {
+fn validate_column_count_match(requests: &RowInsertRequests) -> Result<()> {
for request in &requests.inserts {
let rows = request.rows.as_ref().unwrap();
let column_count = rows.schema.len();
ensure!(
rows.rows.iter().all(|r| r.values.len() == column_count),
InvalidInsertRequestSnafu {
- reason: "row count mismatch"
+ reason: "column count mismatch"
}
)
}
@@ -243,6 +370,13 @@ fn validate_required_columns(request_schema: &[ColumnSchema], table_schema: &Sch
Ok(())
}
+fn build_create_table_expr(
+ table: &TableReference,
+ request_schema: &[ColumnSchema],
+) -> Result<CreateTableExpr> {
+ CreateExprFactory.create_table_expr_by_column_schemas(table, request_schema, default_engine())
+}
+
#[cfg(test)]
mod tests {
use datatypes::prelude::{ConcreteDataType, Value as DtValue};
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index fdb7daa18773..1d1e02fa18e2 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -25,14 +25,12 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::Role;
-use api::v1::{DeleteRequests, InsertRequests, RowDeleteRequests, RowInsertRequests};
use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use catalog::local::manager::SystemTableInitializer;
use catalog::remote::CachedMetaKvBackend;
use catalog::CatalogManagerRef;
use client::client_manager::DatanodeClients;
-use client::region_handler::RegionRequestHandlerRef;
use common_base::Plugins;
use common_config::KvStoreConfig;
use common_error::ext::BoxedError;
@@ -53,6 +51,7 @@ use common_telemetry::{error, timer};
use datanode::region_server::RegionServer;
use log_store::raft_engine::RaftEngineBackend;
use meta_client::client::{MetaClient, MetaClientBuilder};
+use partition::manager::PartitionRuleManager;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use query::plan::LogicalPlan;
use query::query_engine::options::{validate_catalog_and_schema, QueryOptions};
@@ -82,19 +81,18 @@ pub use standalone::StandaloneDatanodeManager;
use table::engine::manager::MemoryTableEngineManager;
use self::distributed::DistRegionRequestHandler;
-use self::standalone::{StandaloneRegionRequestHandler, StandaloneTableMetadataCreator};
+use self::standalone::StandaloneTableMetadataCreator;
use crate::catalog::FrontendCatalogManager;
-use crate::delete::Deleter;
+use crate::delete::{Deleter, DeleterRef};
use crate::error::{
self, CatalogSnafu, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExternalSnafu,
MissingMetasrvOptsSnafu, ParseSqlSnafu, PermissionSnafu, PlanStatementSnafu, Result,
SqlExecInterceptedSnafu,
};
-use crate::expr_factory::CreateExprFactory;
use crate::frontend::FrontendOptions;
use crate::heartbeat::handler::invalidate_table_cache::InvalidateTableCacheHandler;
use crate::heartbeat::HeartbeatTask;
-use crate::insert::Inserter;
+use crate::insert::{Inserter, InserterRef};
use crate::metrics;
use crate::script::ScriptExecutor;
use crate::server::{start_server, ServerHandlers, Services};
@@ -127,13 +125,13 @@ pub struct Instance {
script_executor: Arc<ScriptExecutor>,
statement_executor: Arc<StatementExecutor>,
query_engine: QueryEngineRef,
- region_request_handler: RegionRequestHandlerRef,
- create_expr_factory: CreateExprFactory,
/// plugins: this map holds extensions to customize query or auth
/// behaviours.
plugins: Arc<Plugins>,
servers: Arc<ServerHandlers>,
heartbeat_task: Option<HeartbeatTask>,
+ inserter: InserterRef,
+ deleter: DeleterRef,
}
impl Instance {
@@ -172,15 +170,27 @@ impl Instance {
)
.query_engine();
- let region_request_handler = DistRegionRequestHandler::arc(catalog_manager.clone());
+ let partition_manager = Arc::new(PartitionRuleManager::new(meta_backend.clone()));
+
+ let inserter = Arc::new(Inserter::new(
+ catalog_manager.clone(),
+ partition_manager.clone(),
+ datanode_clients.clone(),
+ ));
+ let deleter = Arc::new(Deleter::new(
+ catalog_manager.clone(),
+ partition_manager,
+ datanode_clients,
+ ));
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),
query_engine.clone(),
- region_request_handler.clone(),
meta_client.clone(),
meta_backend.clone(),
catalog_manager.clone(),
+ inserter.clone(),
+ deleter.clone(),
));
plugins.insert::<StatementExecutorRef>(statement_executor.clone());
@@ -194,25 +204,23 @@ impl Instance {
]);
let heartbeat_task = Some(HeartbeatTask::new(
- meta_client,
+ meta_client.clone(),
opts.heartbeat.clone(),
Arc::new(handlers_executor),
));
common_telemetry::init_node_id(opts.node_id.clone());
- let create_expr_factory = CreateExprFactory;
-
Ok(Instance {
catalog_manager,
script_executor,
- create_expr_factory,
statement_executor,
query_engine,
- region_request_handler,
plugins: plugins.clone(),
servers: Arc::new(HashMap::new()),
heartbeat_task,
+ inserter,
+ deleter,
})
}
@@ -293,40 +301,51 @@ impl Instance {
let script_executor =
Arc::new(ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?);
- let region_request_handler = StandaloneRegionRequestHandler::arc(region_server.clone());
-
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
+ let datanode_manager = Arc::new(StandaloneDatanodeManager(region_server));
let cache_invalidator = Arc::new(DummyCacheInvalidator);
let ddl_executor = Arc::new(DdlManager::new(
procedure_manager,
- Arc::new(StandaloneDatanodeManager(region_server)),
+ datanode_manager.clone(),
cache_invalidator.clone(),
table_metadata_manager.clone(),
Arc::new(StandaloneTableMetadataCreator::new(kv_backend.clone())),
));
+ let partition_manager = Arc::new(PartitionRuleManager::new(kv_backend.clone()));
+
+ let inserter = Arc::new(Inserter::new(
+ catalog_manager.clone(),
+ partition_manager.clone(),
+ datanode_manager.clone(),
+ ));
+ let deleter = Arc::new(Deleter::new(
+ catalog_manager.clone(),
+ partition_manager,
+ datanode_manager,
+ ));
+
let statement_executor = Arc::new(StatementExecutor::new(
catalog_manager.clone(),
query_engine.clone(),
- region_request_handler.clone(),
ddl_executor,
kv_backend.clone(),
cache_invalidator,
+ inserter.clone(),
+ deleter.clone(),
));
- let create_expr_factory = CreateExprFactory;
-
Ok(Instance {
catalog_manager: catalog_manager.clone(),
script_executor,
- create_expr_factory,
statement_executor,
query_engine,
- region_request_handler,
plugins: Default::default(),
servers: Arc::new(HashMap::new()),
heartbeat_task: None,
+ inserter,
+ deleter,
})
}
@@ -341,62 +360,6 @@ impl Instance {
&self.catalog_manager
}
- // Handle batch inserts with row-format
- pub async fn handle_row_inserts(
- &self,
- requests: RowInsertRequests,
- ctx: QueryContextRef,
- ) -> Result<Output> {
- let inserter = Inserter::new(
- self.catalog_manager.as_ref(),
- &self.create_expr_factory,
- &self.statement_executor,
- self.region_request_handler.as_ref(),
- );
- inserter.handle_row_inserts(requests, ctx).await
- }
-
- /// Handle batch inserts
- pub async fn handle_inserts(
- &self,
- requests: InsertRequests,
- ctx: QueryContextRef,
- ) -> Result<Output> {
- let inserter = Inserter::new(
- self.catalog_manager.as_ref(),
- &self.create_expr_factory,
- &self.statement_executor,
- self.region_request_handler.as_ref(),
- );
- inserter.handle_column_inserts(requests, ctx).await
- }
-
- /// Handle batch deletes with row-format
- pub async fn handle_row_deletes(
- &self,
- requests: RowDeleteRequests,
- ctx: QueryContextRef,
- ) -> Result<Output> {
- let deleter = Deleter::new(
- self.catalog_manager.as_ref(),
- self.region_request_handler.as_ref(),
- );
- deleter.handle_row_deletes(requests, ctx).await
- }
-
- /// Handle batch deletes
- pub async fn handle_deletes(
- &self,
- requests: DeleteRequests,
- ctx: QueryContextRef,
- ) -> Result<Output> {
- let deleter = Deleter::new(
- self.catalog_manager.as_ref(),
- self.region_request_handler.as_ref(),
- );
- deleter.handle_column_deletes(requests, ctx).await
- }
-
pub fn set_plugins(&mut self, map: Arc<Plugins>) {
self.plugins = map;
}
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index bff5d87520aa..4a99b9d2ef19 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -12,28 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub mod deleter;
-pub(crate) mod inserter;
-
use std::sync::Arc;
-use api::v1::region::{region_request, QueryRequest};
+use api::v1::region::QueryRequest;
use async_trait::async_trait;
use client::error::{HandleRequestSnafu, Result as ClientResult};
use client::region_handler::RegionRequestHandler;
use common_error::ext::BoxedError;
-use common_meta::datanode_manager::AffectedRows;
use common_recordbatch::SendableRecordBatchStream;
-use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionId;
use crate::catalog::FrontendCatalogManager;
-use crate::error::{
- FindDatanodeSnafu, FindTableRouteSnafu, NotSupportedSnafu, RequestQuerySnafu, Result,
-};
-use crate::instance::distributed::deleter::DistDeleter;
-use crate::instance::distributed::inserter::DistInserter;
+use crate::error::{FindDatanodeSnafu, FindTableRouteSnafu, RequestQuerySnafu, Result};
pub(crate) struct DistRegionRequestHandler {
catalog_manager: Arc<FrontendCatalogManager>,
@@ -47,17 +38,6 @@ impl DistRegionRequestHandler {
#[async_trait]
impl RegionRequestHandler for DistRegionRequestHandler {
- async fn handle(
- &self,
- request: region_request::Body,
- ctx: QueryContextRef,
- ) -> ClientResult<AffectedRows> {
- self.handle_inner(request, ctx)
- .await
- .map_err(BoxedError::new)
- .context(HandleRequestSnafu)
- }
-
async fn do_get(&self, request: QueryRequest) -> ClientResult<SendableRecordBatchStream> {
self.do_get_inner(request)
.await
@@ -67,52 +47,6 @@ impl RegionRequestHandler for DistRegionRequestHandler {
}
impl DistRegionRequestHandler {
- async fn handle_inner(
- &self,
- request: region_request::Body,
- ctx: QueryContextRef,
- ) -> Result<AffectedRows> {
- match request {
- region_request::Body::Inserts(inserts) => {
- let inserter =
- DistInserter::new(&self.catalog_manager).with_trace_id(ctx.trace_id());
- inserter.insert(inserts).await
- }
- region_request::Body::Deletes(deletes) => {
- let deleter = DistDeleter::new(&self.catalog_manager).with_trace_id(ctx.trace_id());
- deleter.delete(deletes).await
- }
- region_request::Body::Create(_) => NotSupportedSnafu {
- feat: "region create",
- }
- .fail(),
- region_request::Body::Drop(_) => NotSupportedSnafu {
- feat: "region drop",
- }
- .fail(),
- region_request::Body::Open(_) => NotSupportedSnafu {
- feat: "region open",
- }
- .fail(),
- region_request::Body::Close(_) => NotSupportedSnafu {
- feat: "region close",
- }
- .fail(),
- region_request::Body::Alter(_) => NotSupportedSnafu {
- feat: "region alter",
- }
- .fail(),
- region_request::Body::Flush(_) => NotSupportedSnafu {
- feat: "region flush",
- }
- .fail(),
- region_request::Body::Compact(_) => NotSupportedSnafu {
- feat: "region compact",
- }
- .fail(),
- }
- }
-
async fn do_get_inner(&self, request: QueryRequest) -> Result<SendableRecordBatchStream> {
let region_id = RegionId::from_u64(request.region_id);
diff --git a/src/frontend/src/instance/distributed/deleter.rs b/src/frontend/src/instance/distributed/deleter.rs
deleted file mode 100644
index befcda45740a..000000000000
--- a/src/frontend/src/instance/distributed/deleter.rs
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::collections::HashMap;
-
-use api::v1::region::{region_request, DeleteRequests, RegionRequest, RegionRequestHeader};
-use common_meta::datanode_manager::AffectedRows;
-use common_meta::peer::Peer;
-use futures::future;
-use metrics::counter;
-use snafu::{OptionExt, ResultExt};
-use store_api::storage::RegionId;
-
-use crate::catalog::FrontendCatalogManager;
-use crate::error::{
- FindDatanodeSnafu, FindTableRouteSnafu, JoinTaskSnafu, RequestDeletesSnafu, Result,
- SplitDeleteSnafu,
-};
-
-/// A distributed deleter. It ingests gRPC [DeleteRequests].
-///
-/// Table data partitioning and Datanode requests batching are handled inside.
-pub struct DistDeleter<'a> {
- catalog_manager: &'a FrontendCatalogManager,
- trace_id: Option<u64>,
- span_id: Option<u64>,
-}
-
-impl<'a> DistDeleter<'a> {
- pub(crate) fn new(catalog_manager: &'a FrontendCatalogManager) -> Self {
- Self {
- catalog_manager,
- trace_id: None,
- span_id: None,
- }
- }
-
- pub fn with_trace_id(mut self, trace_id: u64) -> Self {
- self.trace_id = Some(trace_id);
- self
- }
-
- #[allow(dead_code)]
- pub fn with_span_id(mut self, span_id: u64) -> Self {
- self.span_id = Some(span_id);
- self
- }
-
- pub(crate) async fn delete(&self, requests: DeleteRequests) -> Result<AffectedRows> {
- let requests = self.split(requests).await?;
- let trace_id = self.trace_id.unwrap_or_default();
- let span_id = self.span_id.unwrap_or_default();
- let results = future::try_join_all(requests.into_iter().map(|(peer, deletes)| {
- let datanode_clients = self.catalog_manager.datanode_manager();
- common_runtime::spawn_write(async move {
- let request = RegionRequest {
- header: Some(RegionRequestHeader { trace_id, span_id }),
- body: Some(region_request::Body::Deletes(deletes)),
- };
- datanode_clients
- .datanode(&peer)
- .await
- .handle(request)
- .await
- .context(RequestDeletesSnafu)
- })
- }))
- .await
- .context(JoinTaskSnafu)?;
-
- let affected_rows = results.into_iter().sum::<Result<u64>>()?;
- counter!(crate::metrics::DIST_DELETE_ROW_COUNT, affected_rows);
- Ok(affected_rows)
- }
-
- /// Splits gRPC [DeleteRequests] into multiple gRPC [DeleteRequests]s, each of which
- /// is grouped by the peer of Datanode, so we can batch them together when invoking gRPC write
- /// method in Datanode.
- async fn split(&self, requests: DeleteRequests) -> Result<HashMap<Peer, DeleteRequests>> {
- let partition_manager = self.catalog_manager.partition_manager();
- let mut deletes: HashMap<Peer, DeleteRequests> = HashMap::new();
-
- for req in requests.requests {
- let table_id = RegionId::from_u64(req.region_id).table_id();
-
- let req_splits = partition_manager
- .split_delete_request(table_id, req)
- .await
- .context(SplitDeleteSnafu)?;
- let table_route = partition_manager
- .find_table_route(table_id)
- .await
- .context(FindTableRouteSnafu { table_id })?;
-
- for (region_number, delete) in req_splits {
- let peer =
- table_route
- .find_region_leader(region_number)
- .context(FindDatanodeSnafu {
- region: region_number,
- })?;
- deletes
- .entry(peer.clone())
- .or_default()
- .requests
- .push(delete);
- }
- }
-
- Ok(deletes)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::sync::Arc;
-
- use api::helper::vectors_to_rows;
- use api::v1::region::DeleteRequest;
- use api::v1::value::ValueData;
- use api::v1::{ColumnDataType, ColumnSchema, Row, Rows, SemanticType, Value};
- use client::client_manager::DatanodeClients;
- use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use common_meta::helper::{CatalogValue, SchemaValue};
- use common_meta::key::catalog_name::CatalogNameKey;
- use common_meta::key::schema_name::SchemaNameKey;
- use common_meta::kv_backend::memory::MemoryKvBackend;
- use common_meta::kv_backend::{KvBackend, KvBackendRef};
- use common_meta::rpc::store::PutRequest;
- use datatypes::prelude::VectorRef;
- use datatypes::vectors::Int32Vector;
-
- use super::*;
- use crate::heartbeat::handler::tests::MockKvCacheInvalidator;
- use crate::table::test::create_partition_rule_manager;
-
- async fn prepare_mocked_backend() -> KvBackendRef {
- let backend = Arc::new(MemoryKvBackend::default());
-
- let default_catalog = CatalogNameKey {
- catalog: DEFAULT_CATALOG_NAME,
- }
- .to_string();
- let req = PutRequest::new()
- .with_key(default_catalog.as_bytes())
- .with_value(CatalogValue.as_bytes().unwrap());
- backend.put(req).await.unwrap();
-
- let default_schema = SchemaNameKey {
- catalog: DEFAULT_CATALOG_NAME,
- schema: DEFAULT_SCHEMA_NAME,
- }
- .to_string();
- let req = PutRequest::new()
- .with_key(default_schema.as_bytes())
- .with_value(SchemaValue.as_bytes().unwrap());
- backend.put(req).await.unwrap();
-
- backend
- }
-
- #[tokio::test]
- async fn test_split_deletes() {
- let backend = prepare_mocked_backend().await;
- create_partition_rule_manager(backend.clone()).await;
-
- let catalog_manager = Arc::new(FrontendCatalogManager::new(
- backend,
- Arc::new(MockKvCacheInvalidator::default()),
- Arc::new(DatanodeClients::default()),
- ));
-
- let new_delete_request = |vector: VectorRef| -> DeleteRequest {
- let row_count = vector.len();
- DeleteRequest {
- region_id: RegionId::new(1, 0).into(),
- rows: Some(Rows {
- schema: vec![ColumnSchema {
- column_name: "a".to_string(),
- datatype: ColumnDataType::Int32 as i32,
- semantic_type: SemanticType::Tag as i32,
- }],
- rows: vectors_to_rows([vector].iter(), row_count),
- }),
- }
- };
- let requests = DeleteRequests {
- requests: vec![
- new_delete_request(Arc::new(Int32Vector::from(vec![
- Some(1),
- Some(11),
- Some(50),
- ]))),
- new_delete_request(Arc::new(Int32Vector::from(vec![
- Some(2),
- Some(12),
- Some(102),
- ]))),
- ],
- };
-
- let deleter = DistDeleter::new(&catalog_manager);
- let mut deletes = deleter.split(requests).await.unwrap();
-
- assert_eq!(deletes.len(), 3);
-
- let new_split_delete_request =
- |rows: Vec<Option<i32>>, region_id: RegionId| -> DeleteRequest {
- DeleteRequest {
- region_id: region_id.into(),
- rows: Some(Rows {
- schema: vec![ColumnSchema {
- column_name: "a".to_string(),
- datatype: ColumnDataType::Int32 as i32,
- semantic_type: SemanticType::Tag as i32,
- }],
- rows: rows
- .into_iter()
- .map(|v| Row {
- values: vec![Value {
- value_data: v.map(ValueData::I32Value),
- }],
- })
- .collect(),
- }),
- }
- };
-
- // region to datanode placement:
- // 1 -> 1
- // 2 -> 2
- // 3 -> 3
- //
- // region value ranges:
- // 1 -> [50, max)
- // 2 -> [10, 50)
- // 3 -> (min, 10)
-
- let datanode_deletes = deletes.remove(&Peer::new(1, "")).unwrap().requests;
- assert_eq!(datanode_deletes.len(), 2);
-
- assert_eq!(
- datanode_deletes[0],
- new_split_delete_request(vec![Some(50)], RegionId::new(1, 1))
- );
- assert_eq!(
- datanode_deletes[1],
- new_split_delete_request(vec![Some(102)], RegionId::new(1, 1))
- );
-
- let datanode_deletes = deletes.remove(&Peer::new(2, "")).unwrap().requests;
- assert_eq!(datanode_deletes.len(), 2);
- assert_eq!(
- datanode_deletes[0],
- new_split_delete_request(vec![Some(11)], RegionId::new(1, 2))
- );
- assert_eq!(
- datanode_deletes[1],
- new_split_delete_request(vec![Some(12)], RegionId::new(1, 2))
- );
-
- let datanode_deletes = deletes.remove(&Peer::new(3, "")).unwrap().requests;
- assert_eq!(datanode_deletes.len(), 2);
- assert_eq!(
- datanode_deletes[0],
- new_split_delete_request(vec![Some(1)], RegionId::new(1, 3))
- );
- assert_eq!(
- datanode_deletes[1],
- new_split_delete_request(vec![Some(2)], RegionId::new(1, 3))
- );
- }
-}
diff --git a/src/frontend/src/instance/distributed/inserter.rs b/src/frontend/src/instance/distributed/inserter.rs
deleted file mode 100644
index 2be5b3fa9440..000000000000
--- a/src/frontend/src/instance/distributed/inserter.rs
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::collections::HashMap;
-
-use api::v1::region::{region_request, InsertRequests, RegionRequest, RegionRequestHeader};
-use common_meta::datanode_manager::AffectedRows;
-use common_meta::peer::Peer;
-use futures_util::future;
-use metrics::counter;
-use snafu::{OptionExt, ResultExt};
-use store_api::storage::RegionId;
-
-use crate::catalog::FrontendCatalogManager;
-use crate::error::{
- FindDatanodeSnafu, FindTableRouteSnafu, JoinTaskSnafu, RequestInsertsSnafu, Result,
- SplitInsertSnafu,
-};
-
-/// A distributed inserter. It ingests gRPC [InsertRequests].
-///
-/// Table data partitioning and Datanode requests batching are handled inside.
-pub struct DistInserter<'a> {
- catalog_manager: &'a FrontendCatalogManager,
- trace_id: Option<u64>,
- span_id: Option<u64>,
-}
-
-impl<'a> DistInserter<'a> {
- pub fn new(catalog_manager: &'a FrontendCatalogManager) -> Self {
- Self {
- catalog_manager,
- trace_id: None,
- span_id: None,
- }
- }
-
- pub fn with_trace_id(mut self, trace_id: u64) -> Self {
- self.trace_id = Some(trace_id);
- self
- }
-
- #[allow(dead_code)]
- pub fn with_span_id(mut self, span_id: u64) -> Self {
- self.span_id = Some(span_id);
- self
- }
-
- pub(crate) async fn insert(&self, requests: InsertRequests) -> Result<AffectedRows> {
- let requests = self.split(requests).await?;
- let trace_id = self.trace_id.unwrap_or_default();
- let span_id = self.span_id.unwrap_or_default();
- let results = future::try_join_all(requests.into_iter().map(|(peer, inserts)| {
- let datanode_clients = self.catalog_manager.datanode_manager();
- common_runtime::spawn_write(async move {
- let request = RegionRequest {
- header: Some(RegionRequestHeader { trace_id, span_id }),
- body: Some(region_request::Body::Inserts(inserts)),
- };
- datanode_clients
- .datanode(&peer)
- .await
- .handle(request)
- .await
- .context(RequestInsertsSnafu)
- })
- }))
- .await
- .context(JoinTaskSnafu)?;
-
- let affected_rows = results.into_iter().sum::<Result<u64>>()?;
- counter!(crate::metrics::DIST_INGEST_ROW_COUNT, affected_rows);
- Ok(affected_rows)
- }
-
- /// Splits gRPC [InsertRequests] into multiple gRPC [InsertRequests]s, each of which
- /// is grouped by the peer of Datanode, so we can batch them together when invoking gRPC write
- /// method in Datanode.
- async fn split(&self, requests: InsertRequests) -> Result<HashMap<Peer, InsertRequests>> {
- let partition_manager = self.catalog_manager.partition_manager();
- let mut inserts: HashMap<Peer, InsertRequests> = HashMap::new();
-
- for req in requests.requests {
- let table_id = RegionId::from_u64(req.region_id).table_id();
-
- let req_splits = partition_manager
- .split_insert_request(table_id, req)
- .await
- .context(SplitInsertSnafu)?;
- let table_route = partition_manager
- .find_table_route(table_id)
- .await
- .context(FindTableRouteSnafu { table_id })?;
- let region_map = table_route.region_map();
-
- for (region_number, insert) in req_splits {
- let peer = *region_map.get(®ion_number).context(FindDatanodeSnafu {
- region: region_number,
- })?;
- inserts
- .entry(peer.clone())
- .or_default()
- .requests
- .push(insert);
- }
- }
-
- Ok(inserts)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::sync::Arc;
-
- use api::helper::vectors_to_rows;
- use api::v1::region::InsertRequest;
- use api::v1::value::ValueData;
- use api::v1::{ColumnDataType, ColumnSchema, Row, Rows, SemanticType, Value};
- use client::client_manager::DatanodeClients;
- use common_meta::key::catalog_name::{CatalogManager, CatalogNameKey};
- use common_meta::key::schema_name::{SchemaManager, SchemaNameKey};
- use common_meta::kv_backend::memory::MemoryKvBackend;
- use common_meta::kv_backend::KvBackendRef;
- use datatypes::prelude::VectorRef;
- use datatypes::vectors::Int32Vector;
-
- use super::*;
- use crate::heartbeat::handler::tests::MockKvCacheInvalidator;
- use crate::table::test::create_partition_rule_manager;
-
- async fn prepare_mocked_backend() -> KvBackendRef {
- let backend = Arc::new(MemoryKvBackend::default());
-
- let catalog_manager = CatalogManager::new(backend.clone());
- let schema_manager = SchemaManager::new(backend.clone());
-
- catalog_manager
- .create(CatalogNameKey::default())
- .await
- .unwrap();
- schema_manager
- .create(SchemaNameKey::default(), None)
- .await
- .unwrap();
-
- backend
- }
-
- #[tokio::test]
- async fn test_split_inserts() {
- let backend = prepare_mocked_backend().await;
- create_partition_rule_manager(backend.clone()).await;
-
- let catalog_manager = Arc::new(FrontendCatalogManager::new(
- backend,
- Arc::new(MockKvCacheInvalidator::default()),
- Arc::new(DatanodeClients::default()),
- ));
-
- let inserter = DistInserter::new(&catalog_manager);
-
- let new_insert_request = |vector: VectorRef| -> InsertRequest {
- let row_count = vector.len();
- InsertRequest {
- region_id: RegionId::new(1, 0).into(),
- rows: Some(Rows {
- schema: vec![ColumnSchema {
- column_name: "a".to_string(),
- datatype: ColumnDataType::Int32 as i32,
- semantic_type: SemanticType::Field as i32,
- }],
- rows: vectors_to_rows([vector].iter(), row_count),
- }),
- }
- };
-
- let requests = InsertRequests {
- requests: vec![
- new_insert_request(Arc::new(Int32Vector::from(vec![
- Some(1),
- None,
- Some(11),
- Some(101),
- ]))),
- new_insert_request(Arc::new(Int32Vector::from(vec![
- Some(2),
- Some(12),
- None,
- Some(102),
- ]))),
- ],
- };
-
- let mut inserts = inserter.split(requests).await.unwrap();
-
- assert_eq!(inserts.len(), 3);
-
- let new_split_insert_request =
- |rows: Vec<Option<i32>>, region_id: RegionId| -> InsertRequest {
- InsertRequest {
- region_id: region_id.into(),
- rows: Some(Rows {
- schema: vec![ColumnSchema {
- column_name: "a".to_string(),
- datatype: ColumnDataType::Int32 as i32,
- semantic_type: SemanticType::Field as i32,
- }],
- rows: rows
- .into_iter()
- .map(|v| Row {
- values: vec![Value {
- value_data: v.map(ValueData::I32Value),
- }],
- })
- .collect(),
- }),
- }
- };
-
- // region to datanode placement:
- // 1 -> 1
- // 2 -> 2
- // 3 -> 3
- //
- // region value ranges:
- // 1 -> [50, max)
- // 2 -> [10, 50)
- // 3 -> (min, 10)
-
- let datanode_inserts = inserts.remove(&Peer::new(1, "")).unwrap().requests;
- assert_eq!(datanode_inserts.len(), 2);
- assert_eq!(
- datanode_inserts[0],
- new_split_insert_request(vec![Some(101)], RegionId::new(1, 1))
- );
- assert_eq!(
- datanode_inserts[1],
- new_split_insert_request(vec![Some(102)], RegionId::new(1, 1))
- );
-
- let datanode_inserts = inserts.remove(&Peer::new(2, "")).unwrap().requests;
- assert_eq!(datanode_inserts.len(), 2);
- assert_eq!(
- datanode_inserts[0],
- new_split_insert_request(vec![Some(11)], RegionId::new(1, 2))
- );
- assert_eq!(
- datanode_inserts[1],
- new_split_insert_request(vec![Some(12)], RegionId::new(1, 2))
- );
-
- let datanode_inserts = inserts.remove(&Peer::new(3, "")).unwrap().requests;
- assert_eq!(datanode_inserts.len(), 2);
- assert_eq!(
- datanode_inserts[0],
- new_split_insert_request(vec![Some(1), None], RegionId::new(1, 3))
- );
- assert_eq!(
- datanode_inserts[1],
- new_split_insert_request(vec![Some(2), None], RegionId::new(1, 3))
- );
- }
-}
diff --git a/src/frontend/src/instance/grpc.rs b/src/frontend/src/instance/grpc.rs
index 5dd76f7eab6c..d24598fbd1c2 100644
--- a/src/frontend/src/instance/grpc.rs
+++ b/src/frontend/src/instance/grpc.rs
@@ -15,6 +15,7 @@
use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
+use api::v1::{DeleteRequests, InsertRequests, RowDeleteRequests, RowInsertRequests};
use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use common_meta::table_name::TableName;
@@ -133,3 +134,41 @@ impl GrpcQueryHandler for Instance {
Ok(output)
}
}
+
+impl Instance {
+ pub async fn handle_inserts(
+ &self,
+ requests: InsertRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ self.inserter
+ .handle_column_inserts(requests, ctx, self.statement_executor.as_ref())
+ .await
+ }
+
+ pub async fn handle_row_inserts(
+ &self,
+ requests: RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ self.inserter
+ .handle_row_inserts(requests, ctx, self.statement_executor.as_ref())
+ .await
+ }
+
+ pub async fn handle_deletes(
+ &self,
+ requests: DeleteRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ self.deleter.handle_column_deletes(requests, ctx).await
+ }
+
+ pub async fn handle_row_deletes(
+ &self,
+ requests: RowDeleteRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ self.deleter.handle_row_deletes(requests, ctx).await
+ }
+}
diff --git a/src/frontend/src/instance/standalone.rs b/src/frontend/src/instance/standalone.rs
index fb6b4a205349..9e3881ecf504 100644
--- a/src/frontend/src/instance/standalone.rs
+++ b/src/frontend/src/instance/standalone.rs
@@ -15,7 +15,7 @@
use std::sync::Arc;
use api::v1::meta::Partition;
-use api::v1::region::{region_request, QueryRequest, RegionRequest};
+use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
use async_trait::async_trait;
use client::error::{HandleRequestSnafu, Result as ClientResult};
use client::region::check_response_header;
@@ -31,12 +31,11 @@ use common_meta::sequence::{Sequence, SequenceRef};
use common_recordbatch::SendableRecordBatchStream;
use datanode::region_server::RegionServer;
use servers::grpc::region_server::RegionServerHandler;
-use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use store_api::storage::{RegionId, TableId};
use table::metadata::RawTableInfo;
-use crate::error::InvokeRegionServerSnafu;
+use crate::error::{InvalidRegionRequestSnafu, InvokeRegionServerSnafu, Result};
const TABLE_ID_SEQ: &str = "table_id";
@@ -45,31 +44,24 @@ pub(crate) struct StandaloneRegionRequestHandler {
}
impl StandaloneRegionRequestHandler {
- #[allow(dead_code)]
pub fn arc(region_server: RegionServer) -> Arc<Self> {
Arc::new(Self { region_server })
}
-}
-#[async_trait]
-impl RegionRequestHandler for StandaloneRegionRequestHandler {
- async fn handle(
- &self,
- request: region_request::Body,
- _ctx: QueryContextRef,
- ) -> ClientResult<AffectedRows> {
- let response = self
- .region_server
- .handle(request)
+ async fn handle_inner(&self, request: RegionRequest) -> Result<RegionResponse> {
+ let body = request.body.with_context(|| InvalidRegionRequestSnafu {
+ reason: "body not found",
+ })?;
+
+ self.region_server
+ .handle(body)
.await
.context(InvokeRegionServerSnafu)
- .map_err(BoxedError::new)
- .context(HandleRequestSnafu)?;
-
- check_response_header(response.header)?;
- Ok(response.affected_rows)
}
+}
+#[async_trait]
+impl RegionRequestHandler for StandaloneRegionRequestHandler {
async fn do_get(&self, request: QueryRequest) -> ClientResult<SendableRecordBatchStream> {
self.region_server
.handle_read(request)
@@ -79,26 +71,22 @@ impl RegionRequestHandler for StandaloneRegionRequestHandler {
}
}
-pub(crate) struct StandaloneDatanode(pub(crate) RegionServer);
-
#[async_trait]
-impl Datanode for StandaloneDatanode {
+impl Datanode for StandaloneRegionRequestHandler {
async fn handle(&self, request: RegionRequest) -> MetaResult<AffectedRows> {
- let body = request.body.context(meta_error::UnexpectedSnafu {
- err_msg: "body not found",
- })?;
- let resp = self
- .0
- .handle(body)
+ let response = self
+ .handle_inner(request)
.await
.map_err(BoxedError::new)
.context(meta_error::ExternalSnafu)?;
-
- Ok(resp.affected_rows)
+ check_response_header(response.header)
+ .map_err(BoxedError::new)
+ .context(meta_error::ExternalSnafu)?;
+ Ok(response.affected_rows)
}
async fn handle_query(&self, request: QueryRequest) -> MetaResult<SendableRecordBatchStream> {
- self.0
+ self.region_server
.handle_read(request)
.await
.map_err(BoxedError::new)
@@ -111,7 +99,7 @@ pub struct StandaloneDatanodeManager(pub RegionServer);
#[async_trait]
impl DatanodeManager for StandaloneDatanodeManager {
async fn datanode(&self, _datanode: &Peer) -> DatanodeRef {
- Arc::new(StandaloneDatanode(self.0.clone()))
+ StandaloneRegionRequestHandler::arc(self.0.clone())
}
}
diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs
index 52d355549ad0..bfe341b615e8 100644
--- a/src/frontend/src/lib.rs
+++ b/src/frontend/src/lib.rs
@@ -24,6 +24,7 @@ pub mod heartbeat;
pub(crate) mod insert;
pub mod instance;
pub(crate) mod metrics;
+pub(crate) mod region_req_factory;
pub(crate) mod req_convert;
mod script;
mod server;
diff --git a/src/frontend/src/region_req_factory.rs b/src/frontend/src/region_req_factory.rs
new file mode 100644
index 000000000000..d033216bb17e
--- /dev/null
+++ b/src/frontend/src/region_req_factory.rs
@@ -0,0 +1,43 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::region::region_request::Body;
+use api::v1::region::{
+ DeleteRequests as RegionDeleteRequests, InsertRequests as RegionInsertRequests, RegionRequest,
+ RegionRequestHeader,
+};
+
+pub struct RegionRequestFactory {
+ header: RegionRequestHeader,
+}
+
+impl RegionRequestFactory {
+ pub fn new(header: RegionRequestHeader) -> Self {
+ Self { header }
+ }
+
+ pub fn build_insert(&self, requests: RegionInsertRequests) -> RegionRequest {
+ RegionRequest {
+ header: Some(self.header.clone()),
+ body: Some(Body::Inserts(requests)),
+ }
+ }
+
+ pub fn build_delete(&self, requests: RegionDeleteRequests) -> RegionRequest {
+ RegionRequest {
+ header: Some(self.header.clone()),
+ body: Some(Body::Deletes(requests)),
+ }
+ }
+}
diff --git a/src/frontend/src/req_convert/common.rs b/src/frontend/src/req_convert/common.rs
index 73e5c4eadcd2..00a07fdae473 100644
--- a/src/frontend/src/req_convert/common.rs
+++ b/src/frontend/src/req_convert/common.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub(crate) mod partitioner;
+
use std::collections::HashMap;
use api::helper::ColumnDataTypeWrapper;
diff --git a/src/frontend/src/req_convert/common/partitioner.rs b/src/frontend/src/req_convert/common/partitioner.rs
new file mode 100644
index 000000000000..b9f2117c7bab
--- /dev/null
+++ b/src/frontend/src/req_convert/common/partitioner.rs
@@ -0,0 +1,69 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::region::{DeleteRequest, InsertRequest};
+use api::v1::Rows;
+use partition::manager::PartitionRuleManager;
+use snafu::ResultExt;
+use store_api::storage::{RegionId, TableId};
+
+use crate::error::{Result, SplitDeleteSnafu, SplitInsertSnafu};
+
+pub struct Partitioner<'a> {
+ partition_manager: &'a PartitionRuleManager,
+}
+
+impl<'a> Partitioner<'a> {
+ pub fn new(partition_manager: &'a PartitionRuleManager) -> Self {
+ Self { partition_manager }
+ }
+
+ pub async fn partition_insert_requests(
+ &self,
+ table_id: TableId,
+ rows: Rows,
+ ) -> Result<Vec<InsertRequest>> {
+ let requests = self
+ .partition_manager
+ .split_rows(table_id, rows)
+ .await
+ .context(SplitInsertSnafu)?
+ .into_iter()
+ .map(|(region_number, rows)| InsertRequest {
+ region_id: RegionId::new(table_id, region_number).into(),
+ rows: Some(rows),
+ })
+ .collect();
+ Ok(requests)
+ }
+
+ pub async fn partition_delete_requests(
+ &self,
+ table_id: TableId,
+ rows: Rows,
+ ) -> Result<Vec<DeleteRequest>> {
+ let requests = self
+ .partition_manager
+ .split_rows(table_id, rows)
+ .await
+ .context(SplitDeleteSnafu)?
+ .into_iter()
+ .map(|(region_number, rows)| DeleteRequest {
+ region_id: RegionId::new(table_id, region_number).into(),
+ rows: Some(rows),
+ })
+ .collect();
+ Ok(requests)
+ }
+}
diff --git a/src/frontend/src/req_convert/delete/column_to_row.rs b/src/frontend/src/req_convert/delete/column_to_row.rs
index 7e1cc3fda4b9..610d9be48e2f 100644
--- a/src/frontend/src/req_convert/delete/column_to_row.rs
+++ b/src/frontend/src/req_convert/delete/column_to_row.rs
@@ -35,6 +35,6 @@ fn request_column_to_row(request: DeleteRequest) -> Result<RowDeleteRequest> {
Ok(RowDeleteRequest {
table_name: request.table_name,
rows: Some(rows),
- region_number: request.region_number,
+ region_number: 0, // FIXME(zhongzc): deprecated field
})
}
diff --git a/src/frontend/src/req_convert/delete/row_to_region.rs b/src/frontend/src/req_convert/delete/row_to_region.rs
index 5d09beffdf79..e69442115206 100644
--- a/src/frontend/src/req_convert/delete/row_to_region.rs
+++ b/src/frontend/src/req_convert/delete/row_to_region.rs
@@ -12,27 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::region::{
- DeleteRequest as RegionDeleteRequest, DeleteRequests as RegionDeleteRequests,
-};
+use api::v1::region::DeleteRequests as RegionDeleteRequests;
use api::v1::RowDeleteRequests;
use catalog::CatalogManager;
+use partition::manager::PartitionRuleManager;
use session::context::QueryContext;
use snafu::{OptionExt, ResultExt};
-use store_api::storage::RegionId;
use table::TableRef;
use crate::error::{CatalogSnafu, Result, TableNotFoundSnafu};
+use crate::req_convert::common::partitioner::Partitioner;
pub struct RowToRegion<'a> {
catalog_manager: &'a dyn CatalogManager,
+ partition_manager: &'a PartitionRuleManager,
ctx: &'a QueryContext,
}
impl<'a> RowToRegion<'a> {
- pub fn new(catalog_manager: &'a dyn CatalogManager, ctx: &'a QueryContext) -> Self {
+ pub fn new(
+ catalog_manager: &'a dyn CatalogManager,
+ partition_manager: &'a PartitionRuleManager,
+ ctx: &'a QueryContext,
+ ) -> Self {
Self {
catalog_manager,
+ partition_manager,
ctx,
}
}
@@ -41,13 +46,13 @@ impl<'a> RowToRegion<'a> {
let mut region_request = Vec::with_capacity(requests.deletes.len());
for request in requests.deletes {
let table = self.get_table(&request.table_name).await?;
+ let table_id = table.table_info().table_id();
- let region_id = RegionId::new(table.table_info().table_id(), request.region_number);
- let insert_request = RegionDeleteRequest {
- region_id: region_id.into(),
- rows: request.rows,
- };
- region_request.push(insert_request);
+ let requests = Partitioner::new(self.partition_manager)
+ .partition_delete_requests(table_id, request.rows.unwrap_or_default())
+ .await?;
+
+ region_request.extend(requests);
}
Ok(RegionDeleteRequests {
@@ -63,7 +68,11 @@ impl<'a> RowToRegion<'a> {
.await
.context(CatalogSnafu)?
.with_context(|| TableNotFoundSnafu {
- table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
+ table_name: common_catalog::format_full_table_name(
+ catalog_name,
+ schema_name,
+ table_name,
+ ),
})
}
}
diff --git a/src/frontend/src/req_convert/delete/table_to_region.rs b/src/frontend/src/req_convert/delete/table_to_region.rs
index ab51de97e083..fd5e983458f6 100644
--- a/src/frontend/src/req_convert/delete/table_to_region.rs
+++ b/src/frontend/src/req_convert/delete/table_to_region.rs
@@ -12,37 +12,39 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::region::{
- DeleteRequest as RegionDeleteRequest, DeleteRequests as RegionDeleteRequests,
-};
+use api::v1::region::DeleteRequests as RegionDeleteRequests;
use api::v1::Rows;
-use store_api::storage::RegionId;
+use partition::manager::PartitionRuleManager;
use table::metadata::TableInfo;
use table::requests::DeleteRequest as TableDeleteRequest;
use crate::error::Result;
+use crate::req_convert::common::partitioner::Partitioner;
use crate::req_convert::common::{column_schema, row_count};
pub struct TableToRegion<'a> {
table_info: &'a TableInfo,
+ partition_manager: &'a PartitionRuleManager,
}
impl<'a> TableToRegion<'a> {
- pub fn new(table_info: &'a TableInfo) -> Self {
- Self { table_info }
+ pub fn new(table_info: &'a TableInfo, partition_manager: &'a PartitionRuleManager) -> Self {
+ Self {
+ table_info,
+ partition_manager,
+ }
}
- pub fn convert(&self, request: TableDeleteRequest) -> Result<RegionDeleteRequests> {
- let region_id = RegionId::new(self.table_info.table_id(), 0).into();
+ pub async fn convert(&self, request: TableDeleteRequest) -> Result<RegionDeleteRequests> {
let row_count = row_count(&request.key_column_values)?;
let schema = column_schema(self.table_info, &request.key_column_values)?;
let rows = api::helper::vectors_to_rows(request.key_column_values.values(), row_count);
- Ok(RegionDeleteRequests {
- requests: vec![RegionDeleteRequest {
- region_id,
- rows: Some(Rows { schema, rows }),
- }],
- })
+ let rows = Rows { schema, rows };
+
+ let requests = Partitioner::new(self.partition_manager)
+ .partition_delete_requests(self.table_info.table_id(), rows)
+ .await?;
+ Ok(RegionDeleteRequests { requests })
}
}
@@ -51,114 +53,119 @@ mod tests {
use std::collections::HashMap;
use std::sync::Arc;
+ use api::v1::region::DeleteRequest as RegionDeleteRequest;
use api::v1::value::ValueData;
- use api::v1::{ColumnDataType, SemanticType};
+ use api::v1::{ColumnDataType, ColumnSchema, Row, SemanticType, Value};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use datatypes::prelude::ConcreteDataType;
- use datatypes::scalars::ScalarVectorBuilder;
- use datatypes::schema::{ColumnSchema as DtColumnSchema, Schema};
- use datatypes::vectors::{Int16VectorBuilder, MutableVector, StringVectorBuilder};
- use table::metadata::{TableInfoBuilder, TableMetaBuilder};
+ use common_meta::key::catalog_name::{CatalogManager, CatalogNameKey};
+ use common_meta::key::schema_name::{SchemaManager, SchemaNameKey};
+ use common_meta::kv_backend::memory::MemoryKvBackend;
+ use common_meta::kv_backend::KvBackendRef;
+ use datatypes::vectors::{Int32Vector, VectorRef};
+ use store_api::storage::RegionId;
use super::*;
+ use crate::table::test::{create_partition_rule_manager, new_test_table_info};
- #[test]
- fn test_delete_request_table_to_region() {
- let schema = Schema::new(vec![
- DtColumnSchema::new("ts", ConcreteDataType::int64_datatype(), false)
- .with_time_index(true),
- DtColumnSchema::new("id", ConcreteDataType::int16_datatype(), false),
- DtColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
- ]);
-
- let table_meta = TableMetaBuilder::default()
- .schema(Arc::new(schema))
- .primary_key_indices(vec![1, 2])
- .next_column_id(3)
- .build()
- .unwrap();
+ async fn prepare_mocked_backend() -> KvBackendRef {
+ let backend = Arc::new(MemoryKvBackend::default());
- let table_info = Arc::new(
- TableInfoBuilder::default()
- .name("demo")
- .meta(table_meta)
- .table_id(1)
- .build()
- .unwrap(),
- );
+ let catalog_manager = CatalogManager::new(backend.clone());
+ let schema_manager = SchemaManager::new(backend.clone());
- let delete_request = mock_delete_request();
- let mut request = TableToRegion::new(&table_info)
- .convert(delete_request)
+ catalog_manager
+ .create(CatalogNameKey::default())
+ .await
+ .unwrap();
+ schema_manager
+ .create(SchemaNameKey::default(), None)
+ .await
.unwrap();
- assert_eq!(request.requests.len(), 1);
- verify_region_insert_request(request.requests.pop().unwrap());
+ backend
}
- fn mock_delete_request() -> TableDeleteRequest {
- let mut builder = StringVectorBuilder::with_capacity(3);
- builder.push(Some("host1"));
- builder.push(None);
- builder.push(Some("host3"));
- let host = builder.to_vector();
+ #[tokio::test]
+ async fn test_delete_request_table_to_region() {
+ // region to datanode placement:
+ // 1 -> 1
+ // 2 -> 2
+ // 3 -> 3
+ //
+ // region value ranges:
+ // 1 -> [50, max)
+ // 2 -> [10, 50)
+ // 3 -> (min, 10)
+
+ let backend = prepare_mocked_backend().await;
+ let partition_manager = create_partition_rule_manager(backend.clone()).await;
+ let table_info = new_test_table_info(1, "table_1", vec![0u32, 1, 2].into_iter());
+
+ let converter = TableToRegion::new(&table_info, &partition_manager);
+
+ let table_request = build_table_request(Arc::new(Int32Vector::from(vec![
+ Some(1),
+ None,
+ Some(11),
+ Some(101),
+ ])));
+
+ let region_requests = converter.convert(table_request).await.unwrap();
+ let mut region_id_to_region_requests = region_requests
+ .requests
+ .into_iter()
+ .map(|r| (r.region_id, r))
+ .collect::<HashMap<_, _>>();
+
+ let region_id = RegionId::new(1, 1).as_u64();
+ let region_request = region_id_to_region_requests.remove(®ion_id).unwrap();
+ assert_eq!(
+ region_request,
+ build_region_request(vec![Some(101)], region_id)
+ );
- let mut builder = Int16VectorBuilder::with_capacity(3);
- builder.push(Some(1_i16));
- builder.push(Some(2_i16));
- builder.push(Some(3_i16));
- let id = builder.to_vector();
+ let region_id = RegionId::new(1, 2).as_u64();
+ let region_request = region_id_to_region_requests.remove(®ion_id).unwrap();
+ assert_eq!(
+ region_request,
+ build_region_request(vec![Some(11)], region_id)
+ );
- let key_column_values = HashMap::from([("host".to_string(), host), ("id".to_string(), id)]);
+ let region_id = RegionId::new(1, 3).as_u64();
+ let region_request = region_id_to_region_requests.remove(®ion_id).unwrap();
+ assert_eq!(
+ region_request,
+ build_region_request(vec![Some(1), None], region_id)
+ );
+ }
+ fn build_table_request(vector: VectorRef) -> TableDeleteRequest {
TableDeleteRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: "demo".to_string(),
- key_column_values,
+ table_name: "table_1".to_string(),
+ key_column_values: HashMap::from([("a".to_string(), vector)]),
}
}
- fn verify_region_insert_request(request: RegionDeleteRequest) {
- assert_eq!(request.region_id, RegionId::new(1, 0).as_u64());
-
- let rows = request.rows.unwrap();
- for (i, column) in rows.schema.iter().enumerate() {
- let name = &column.column_name;
- if name == "id" {
- assert_eq!(ColumnDataType::Int16 as i32, column.datatype);
- assert_eq!(SemanticType::Tag as i32, column.semantic_type);
- let values = rows
- .rows
- .iter()
- .map(|row| row.values[i].value_data.clone())
- .collect::<Vec<_>>();
- assert_eq!(
- vec![
- Some(ValueData::I16Value(1)),
- Some(ValueData::I16Value(2)),
- Some(ValueData::I16Value(3))
- ],
- values
- );
- }
- if name == "host" {
- assert_eq!(ColumnDataType::String as i32, column.datatype);
- assert_eq!(SemanticType::Tag as i32, column.semantic_type);
- let values = rows
- .rows
- .iter()
- .map(|row| row.values[i].value_data.clone())
- .collect::<Vec<_>>();
- assert_eq!(
- vec![
- Some(ValueData::StringValue("host1".to_string())),
- None,
- Some(ValueData::StringValue("host3".to_string()))
- ],
- values
- );
- }
+ fn build_region_request(rows: Vec<Option<i32>>, region_id: u64) -> RegionDeleteRequest {
+ RegionDeleteRequest {
+ region_id,
+ rows: Some(Rows {
+ schema: vec![ColumnSchema {
+ column_name: "a".to_string(),
+ datatype: ColumnDataType::Int32 as i32,
+ semantic_type: SemanticType::Tag as i32,
+ }],
+ rows: rows
+ .into_iter()
+ .map(|v| Row {
+ values: vec![Value {
+ value_data: v.map(ValueData::I32Value),
+ }],
+ })
+ .collect(),
+ }),
}
}
}
diff --git a/src/frontend/src/req_convert/insert/column_to_row.rs b/src/frontend/src/req_convert/insert/column_to_row.rs
index f3d4c50b366b..adc129219666 100644
--- a/src/frontend/src/req_convert/insert/column_to_row.rs
+++ b/src/frontend/src/req_convert/insert/column_to_row.rs
@@ -35,6 +35,6 @@ fn request_column_to_row(request: InsertRequest) -> Result<RowInsertRequest> {
Ok(RowInsertRequest {
table_name: request.table_name,
rows: Some(rows),
- region_number: request.region_number,
+ region_number: 0, // FIXME(zhongzc): deprecated field
})
}
diff --git a/src/frontend/src/req_convert/insert/row_to_region.rs b/src/frontend/src/req_convert/insert/row_to_region.rs
index 5cd82a4b6396..388b13e9addd 100644
--- a/src/frontend/src/req_convert/insert/row_to_region.rs
+++ b/src/frontend/src/req_convert/insert/row_to_region.rs
@@ -12,27 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::region::{
- InsertRequest as RegionInsertRequest, InsertRequests as RegionInsertRequests,
-};
+use api::v1::region::InsertRequests as RegionInsertRequests;
use api::v1::RowInsertRequests;
use catalog::CatalogManager;
+use partition::manager::PartitionRuleManager;
use session::context::QueryContext;
use snafu::{OptionExt, ResultExt};
-use store_api::storage::RegionId;
use table::TableRef;
use crate::error::{CatalogSnafu, Result, TableNotFoundSnafu};
+use crate::req_convert::common::partitioner::Partitioner;
pub struct RowToRegion<'a> {
catalog_manager: &'a dyn CatalogManager,
+ partition_manager: &'a PartitionRuleManager,
ctx: &'a QueryContext,
}
impl<'a> RowToRegion<'a> {
- pub fn new(catalog_manager: &'a dyn CatalogManager, ctx: &'a QueryContext) -> Self {
+ pub fn new(
+ catalog_manager: &'a dyn CatalogManager,
+ partition_manager: &'a PartitionRuleManager,
+ ctx: &'a QueryContext,
+ ) -> Self {
Self {
catalog_manager,
+ partition_manager,
ctx,
}
}
@@ -41,13 +46,13 @@ impl<'a> RowToRegion<'a> {
let mut region_request = Vec::with_capacity(requests.inserts.len());
for request in requests.inserts {
let table = self.get_table(&request.table_name).await?;
+ let table_id = table.table_info().table_id();
- let region_id = RegionId::new(table.table_info().table_id(), request.region_number);
- let insert_request = RegionInsertRequest {
- region_id: region_id.into(),
- rows: request.rows,
- };
- region_request.push(insert_request);
+ let requests = Partitioner::new(self.partition_manager)
+ .partition_insert_requests(table_id, request.rows.unwrap_or_default())
+ .await?;
+
+ region_request.extend(requests);
}
Ok(RegionInsertRequests {
@@ -63,7 +68,11 @@ impl<'a> RowToRegion<'a> {
.await
.context(CatalogSnafu)?
.with_context(|| TableNotFoundSnafu {
- table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
+ table_name: common_catalog::format_full_table_name(
+ catalog_name,
+ schema_name,
+ table_name,
+ ),
})
}
}
diff --git a/src/frontend/src/req_convert/insert/stmt_to_region.rs b/src/frontend/src/req_convert/insert/stmt_to_region.rs
index 167e23fbe7ff..389640659294 100644
--- a/src/frontend/src/req_convert/insert/stmt_to_region.rs
+++ b/src/frontend/src/req_convert/insert/stmt_to_region.rs
@@ -13,18 +13,16 @@
// limitations under the License.
use api::helper::value_to_grpc_value;
-use api::v1::region::{
- InsertRequest as RegionInsertRequest, InsertRequests as RegionInsertRequests,
-};
+use api::v1::region::InsertRequests as RegionInsertRequests;
use api::v1::{ColumnSchema as GrpcColumnSchema, Row, Rows, Value as GrpcValue};
use catalog::CatalogManager;
use datatypes::schema::{ColumnSchema, SchemaRef};
+use partition::manager::PartitionRuleManager;
use session::context::QueryContext;
use snafu::{ensure, OptionExt, ResultExt};
use sql::statements;
use sql::statements::insert::Insert;
use sqlparser::ast::{ObjectName, Value as SqlValue};
-use store_api::storage::RegionId;
use table::TableRef;
use super::{data_type, semantic_type};
@@ -32,18 +30,25 @@ use crate::error::{
CatalogSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu, ColumnNotFoundSnafu,
InvalidSqlSnafu, MissingInsertBodySnafu, ParseSqlSnafu, Result, TableNotFoundSnafu,
};
+use crate::req_convert::common::partitioner::Partitioner;
const DEFAULT_PLACEHOLDER_VALUE: &str = "default";
pub struct StatementToRegion<'a> {
catalog_manager: &'a dyn CatalogManager,
+ partition_manager: &'a PartitionRuleManager,
ctx: &'a QueryContext,
}
impl<'a> StatementToRegion<'a> {
- pub fn new(catalog_manager: &'a dyn CatalogManager, ctx: &'a QueryContext) -> Self {
+ pub fn new(
+ catalog_manager: &'a dyn CatalogManager,
+ partition_manager: &'a PartitionRuleManager,
+ ctx: &'a QueryContext,
+ ) -> Self {
Self {
catalog_manager,
+ partition_manager,
ctx,
}
}
@@ -63,7 +68,7 @@ impl<'a> StatementToRegion<'a> {
ensure!(
sql_rows.iter().all(|row| row.len() == column_count),
InvalidSqlSnafu {
- err_msg: "The column count of the row is not the same as columns."
+ err_msg: "column count mismatch"
}
);
@@ -98,12 +103,10 @@ impl<'a> StatementToRegion<'a> {
}
}
- Ok(RegionInsertRequests {
- requests: vec![RegionInsertRequest {
- region_id: RegionId::new(table_info.table_id(), 0).into(),
- rows: Some(Rows { schema, rows }),
- }],
- })
+ let requests = Partitioner::new(self.partition_manager)
+ .partition_insert_requests(table_info.table_id(), Rows { schema, rows })
+ .await?;
+ Ok(RegionInsertRequests { requests })
}
async fn get_table(&self, catalog: &str, schema: &str, table: &str) -> Result<TableRef> {
@@ -112,7 +115,7 @@ impl<'a> StatementToRegion<'a> {
.await
.context(CatalogSnafu)?
.with_context(|| TableNotFoundSnafu {
- table_name: format!("{}.{}.{}", catalog, schema, table),
+ table_name: common_catalog::format_full_table_name(catalog, schema, table),
})
}
diff --git a/src/frontend/src/req_convert/insert/table_to_region.rs b/src/frontend/src/req_convert/insert/table_to_region.rs
index 6ea09b3ad4e2..3160a58d0396 100644
--- a/src/frontend/src/req_convert/insert/table_to_region.rs
+++ b/src/frontend/src/req_convert/insert/table_to_region.rs
@@ -12,37 +12,39 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::region::{
- InsertRequest as RegionInsertRequest, InsertRequests as RegionInsertRequests,
-};
+use api::v1::region::InsertRequests as RegionInsertRequests;
use api::v1::Rows;
-use store_api::storage::RegionId;
+use partition::manager::PartitionRuleManager;
use table::metadata::TableInfo;
use table::requests::InsertRequest as TableInsertRequest;
use crate::error::Result;
+use crate::req_convert::common::partitioner::Partitioner;
use crate::req_convert::common::{column_schema, row_count};
pub struct TableToRegion<'a> {
table_info: &'a TableInfo,
+ partition_manager: &'a PartitionRuleManager,
}
impl<'a> TableToRegion<'a> {
- pub fn new(table_info: &'a TableInfo) -> Self {
- Self { table_info }
+ pub fn new(table_info: &'a TableInfo, partition_manager: &'a PartitionRuleManager) -> Self {
+ Self {
+ table_info,
+ partition_manager,
+ }
}
- pub fn convert(&self, request: TableInsertRequest) -> Result<RegionInsertRequests> {
- let region_id = RegionId::new(self.table_info.table_id(), request.region_number).into();
+ pub async fn convert(&self, request: TableInsertRequest) -> Result<RegionInsertRequests> {
let row_count = row_count(&request.columns_values)?;
let schema = column_schema(self.table_info, &request.columns_values)?;
let rows = api::helper::vectors_to_rows(request.columns_values.values(), row_count);
- Ok(RegionInsertRequests {
- requests: vec![RegionInsertRequest {
- region_id,
- rows: Some(Rows { schema, rows }),
- }],
- })
+
+ let rows = Rows { schema, rows };
+ let requests = Partitioner::new(self.partition_manager)
+ .partition_insert_requests(self.table_info.table_id(), rows)
+ .await?;
+ Ok(RegionInsertRequests { requests })
}
}
@@ -51,115 +53,120 @@ mod tests {
use std::collections::HashMap;
use std::sync::Arc;
+ use api::v1::region::InsertRequest as RegionInsertRequest;
use api::v1::value::ValueData;
- use api::v1::{ColumnDataType, SemanticType};
+ use api::v1::{ColumnDataType, ColumnSchema, Row, SemanticType, Value};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use datatypes::prelude::ConcreteDataType;
- use datatypes::scalars::ScalarVectorBuilder;
- use datatypes::schema::{ColumnSchema as DtColumnSchema, Schema};
- use datatypes::vectors::{Int16VectorBuilder, MutableVector, StringVectorBuilder};
- use table::metadata::{TableInfoBuilder, TableMetaBuilder};
+ use common_meta::key::catalog_name::{CatalogManager, CatalogNameKey};
+ use common_meta::key::schema_name::{SchemaManager, SchemaNameKey};
+ use common_meta::kv_backend::memory::MemoryKvBackend;
+ use common_meta::kv_backend::KvBackendRef;
+ use datatypes::vectors::{Int32Vector, VectorRef};
+ use store_api::storage::RegionId;
use super::*;
+ use crate::table::test::{create_partition_rule_manager, new_test_table_info};
- #[test]
- fn test_insert_request_table_to_region() {
- let schema = Schema::new(vec![
- DtColumnSchema::new("ts", ConcreteDataType::int64_datatype(), false)
- .with_time_index(true),
- DtColumnSchema::new("id", ConcreteDataType::int16_datatype(), false),
- DtColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
- ]);
-
- let table_meta = TableMetaBuilder::default()
- .schema(Arc::new(schema))
- .primary_key_indices(vec![2])
- .next_column_id(3)
- .build()
- .unwrap();
+ async fn prepare_mocked_backend() -> KvBackendRef {
+ let backend = Arc::new(MemoryKvBackend::default());
- let table_info = Arc::new(
- TableInfoBuilder::default()
- .name("demo")
- .meta(table_meta)
- .table_id(1)
- .build()
- .unwrap(),
- );
+ let catalog_manager = CatalogManager::new(backend.clone());
+ let schema_manager = SchemaManager::new(backend.clone());
- let insert_request = mock_insert_request();
- let mut request = TableToRegion::new(&table_info)
- .convert(insert_request)
+ catalog_manager
+ .create(CatalogNameKey::default())
+ .await
+ .unwrap();
+ schema_manager
+ .create(SchemaNameKey::default(), None)
+ .await
.unwrap();
- assert_eq!(request.requests.len(), 1);
- verify_region_insert_request(request.requests.pop().unwrap());
+ backend
}
- fn mock_insert_request() -> TableInsertRequest {
- let mut builder = StringVectorBuilder::with_capacity(3);
- builder.push(Some("host1"));
- builder.push(None);
- builder.push(Some("host3"));
- let host = builder.to_vector();
+ #[tokio::test]
+ async fn test_insert_request_table_to_region() {
+ // region to datanode placement:
+ // 1 -> 1
+ // 2 -> 2
+ // 3 -> 3
+ //
+ // region value ranges:
+ // 1 -> [50, max)
+ // 2 -> [10, 50)
+ // 3 -> (min, 10)
+
+ let backend = prepare_mocked_backend().await;
+ let partition_manager = create_partition_rule_manager(backend.clone()).await;
+ let table_info = new_test_table_info(1, "table_1", vec![0u32, 1, 2].into_iter());
+
+ let converter = TableToRegion::new(&table_info, &partition_manager);
+
+ let table_request = build_table_request(Arc::new(Int32Vector::from(vec![
+ Some(1),
+ None,
+ Some(11),
+ Some(101),
+ ])));
+
+ let region_requests = converter.convert(table_request).await.unwrap();
+ let mut region_id_to_region_requests = region_requests
+ .requests
+ .into_iter()
+ .map(|r| (r.region_id, r))
+ .collect::<HashMap<_, _>>();
+
+ let region_id = RegionId::new(1, 1).as_u64();
+ let region_request = region_id_to_region_requests.remove(®ion_id).unwrap();
+ assert_eq!(
+ region_request,
+ build_region_request(vec![Some(101)], region_id)
+ );
- let mut builder = Int16VectorBuilder::with_capacity(3);
- builder.push(Some(1_i16));
- builder.push(Some(2_i16));
- builder.push(Some(3_i16));
- let id = builder.to_vector();
+ let region_id = RegionId::new(1, 2).as_u64();
+ let region_request = region_id_to_region_requests.remove(®ion_id).unwrap();
+ assert_eq!(
+ region_request,
+ build_region_request(vec![Some(11)], region_id)
+ );
- let columns_values = HashMap::from([("host".to_string(), host), ("id".to_string(), id)]);
+ let region_id = RegionId::new(1, 3).as_u64();
+ let region_request = region_id_to_region_requests.remove(®ion_id).unwrap();
+ assert_eq!(
+ region_request,
+ build_region_request(vec![Some(1), None], region_id)
+ );
+ }
+ fn build_table_request(vector: VectorRef) -> TableInsertRequest {
TableInsertRequest {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: "demo".to_string(),
- columns_values,
+ table_name: "table_1".to_string(),
+ columns_values: HashMap::from([("a".to_string(), vector)]),
region_number: 0,
}
}
- fn verify_region_insert_request(request: RegionInsertRequest) {
- assert_eq!(request.region_id, RegionId::new(1, 0).as_u64());
-
- let rows = request.rows.unwrap();
- for (i, column) in rows.schema.iter().enumerate() {
- let name = &column.column_name;
- if name == "id" {
- assert_eq!(ColumnDataType::Int16 as i32, column.datatype);
- assert_eq!(SemanticType::Field as i32, column.semantic_type);
- let values = rows
- .rows
- .iter()
- .map(|row| row.values[i].value_data.clone())
- .collect::<Vec<_>>();
- assert_eq!(
- vec![
- Some(ValueData::I16Value(1)),
- Some(ValueData::I16Value(2)),
- Some(ValueData::I16Value(3))
- ],
- values
- );
- }
- if name == "host" {
- assert_eq!(ColumnDataType::String as i32, column.datatype);
- assert_eq!(SemanticType::Tag as i32, column.semantic_type);
- let values = rows
- .rows
- .iter()
- .map(|row| row.values[i].value_data.clone())
- .collect::<Vec<_>>();
- assert_eq!(
- vec![
- Some(ValueData::StringValue("host1".to_string())),
- None,
- Some(ValueData::StringValue("host3".to_string()))
- ],
- values
- );
- }
+ fn build_region_request(rows: Vec<Option<i32>>, region_id: u64) -> RegionInsertRequest {
+ RegionInsertRequest {
+ region_id,
+ rows: Some(Rows {
+ schema: vec![ColumnSchema {
+ column_name: "a".to_string(),
+ datatype: ColumnDataType::Int32 as i32,
+ semantic_type: SemanticType::Tag as i32,
+ }],
+ rows: rows
+ .into_iter()
+ .map(|v| Row {
+ values: vec![Value {
+ value_data: v.map(ValueData::I32Value),
+ }],
+ })
+ .collect(),
+ }),
}
}
}
diff --git a/src/frontend/src/statement.rs b/src/frontend/src/statement.rs
index 7ff2228c7a25..ae71a6b31a1f 100644
--- a/src/frontend/src/statement.rs
+++ b/src/frontend/src/statement.rs
@@ -25,9 +25,7 @@ use std::collections::HashMap;
use std::str::FromStr;
use std::sync::Arc;
-use api::v1::region::region_request;
use catalog::CatalogManagerRef;
-use client::region_handler::RegionRequestHandlerRef;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::CacheInvalidatorRef;
use common_meta::ddl::DdlTaskExecutorRef;
@@ -47,16 +45,15 @@ use sql::statements::copy::{CopyDatabaseArgument, CopyTable, CopyTableArgument};
use sql::statements::statement::Statement;
use sqlparser::ast::ObjectName;
use table::engine::TableReference;
-use table::requests::{
- CopyDatabaseRequest, CopyDirection, CopyTableRequest, DeleteRequest, InsertRequest,
-};
+use table::requests::{CopyDatabaseRequest, CopyDirection, CopyTableRequest};
use table::TableRef;
+use crate::delete::DeleterRef;
use crate::error::{
self, CatalogSnafu, ExecLogicalPlanSnafu, ExternalSnafu, InvalidSqlSnafu, PlanStatementSnafu,
- RequestDatanodeSnafu, Result, TableNotFoundSnafu,
+ Result, TableNotFoundSnafu,
};
-use crate::req_convert::{delete, insert};
+use crate::insert::InserterRef;
use crate::statement::backup::{COPY_DATABASE_TIME_END_KEY, COPY_DATABASE_TIME_START_KEY};
use crate::table::table_idents_to_full_name;
@@ -64,30 +61,33 @@ use crate::table::table_idents_to_full_name;
pub struct StatementExecutor {
catalog_manager: CatalogManagerRef,
query_engine: QueryEngineRef,
- region_request_handler: RegionRequestHandlerRef,
ddl_executor: DdlTaskExecutorRef,
table_metadata_manager: TableMetadataManagerRef,
partition_manager: PartitionRuleManagerRef,
cache_invalidator: CacheInvalidatorRef,
+ inserter: InserterRef,
+ deleter: DeleterRef,
}
impl StatementExecutor {
pub(crate) fn new(
catalog_manager: CatalogManagerRef,
query_engine: QueryEngineRef,
- region_request_handler: RegionRequestHandlerRef,
ddl_task_executor: DdlTaskExecutorRef,
kv_backend: KvBackendRef,
cache_invalidator: CacheInvalidatorRef,
+ inserter: InserterRef,
+ deleter: DeleterRef,
) -> Self {
Self {
catalog_manager,
query_engine,
- region_request_handler,
ddl_executor: ddl_task_executor,
table_metadata_manager: Arc::new(TableMetadataManager::new(kv_backend.clone())),
partition_manager: Arc::new(PartitionRuleManager::new(kv_backend)),
cache_invalidator,
+ inserter,
+ deleter,
}
}
@@ -224,50 +224,6 @@ impl StatementExecutor {
table_name: table_ref.to_string(),
})
}
-
- async fn handle_table_insert_request(
- &self,
- request: InsertRequest,
- query_ctx: QueryContextRef,
- ) -> Result<usize> {
- let table_ref = TableReference::full(
- &request.catalog_name,
- &request.schema_name,
- &request.table_name,
- );
- let table = self.get_table(&table_ref).await?;
- let table_info = table.table_info();
-
- let request = insert::TableToRegion::new(&table_info).convert(request)?;
- let affected_rows = self
- .region_request_handler
- .handle(region_request::Body::Inserts(request), query_ctx)
- .await
- .context(RequestDatanodeSnafu)?;
- Ok(affected_rows as _)
- }
-
- async fn handle_table_delete_request(
- &self,
- request: DeleteRequest,
- query_ctx: QueryContextRef,
- ) -> Result<usize> {
- let table_ref = TableReference::full(
- &request.catalog_name,
- &request.schema_name,
- &request.table_name,
- );
- let table = self.get_table(&table_ref).await?;
- let table_info = table.table_info();
-
- let request = delete::TableToRegion::new(&table_info).convert(request)?;
- let affected_rows = self
- .region_request_handler
- .handle(region_request::Body::Deletes(request), query_ctx)
- .await
- .context(RequestDatanodeSnafu)?;
- Ok(affected_rows as _)
- }
}
fn to_copy_table_request(stmt: CopyTable, query_ctx: QueryContextRef) -> Result<CopyTableRequest> {
diff --git a/src/frontend/src/statement/copy_table_from.rs b/src/frontend/src/statement/copy_table_from.rs
index 034443f0ec1d..cb36d41823d3 100644
--- a/src/frontend/src/statement/copy_table_from.rs
+++ b/src/frontend/src/statement/copy_table_from.rs
@@ -327,13 +327,12 @@ impl StatementExecutor {
.zip(vectors)
.collect::<HashMap<_, _>>();
- pending.push(self.handle_table_insert_request(
+ pending.push(self.inserter.handle_table_insert(
InsertRequest {
catalog_name: req.catalog_name.to_string(),
schema_name: req.schema_name.to_string(),
table_name: req.table_name.to_string(),
columns_values,
- // TODO: support multi-regions
region_number: 0,
},
query_ctx.clone(),
diff --git a/src/frontend/src/statement/dml.rs b/src/frontend/src/statement/dml.rs
index d5730fda32e1..f127dda4d48b 100644
--- a/src/frontend/src/statement/dml.rs
+++ b/src/frontend/src/statement/dml.rs
@@ -14,7 +14,6 @@
use std::collections::HashMap;
-use api::v1::region::region_request;
use common_query::Output;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion_expr::{DmlStatement, LogicalPlan as DfLogicalPlan, WriteOp};
@@ -35,23 +34,16 @@ use table::TableRef;
use super::StatementExecutor;
use crate::error::{
BuildColumnVectorsSnafu, ExecLogicalPlanSnafu, MissingTimeIndexColumnSnafu,
- ReadRecordBatchSnafu, RequestDatanodeSnafu, Result, UnexpectedSnafu,
+ ReadRecordBatchSnafu, Result, UnexpectedSnafu,
};
-use crate::req_convert::insert::StatementToRegion;
impl StatementExecutor {
pub async fn insert(&self, insert: Box<Insert>, query_ctx: QueryContextRef) -> Result<Output> {
if insert.can_extract_values() {
// Fast path: plain insert ("insert with literal values") is executed directly
- let request = StatementToRegion::new(self.catalog_manager.as_ref(), &query_ctx)
- .convert(&insert)
- .await?;
- let affected_rows = self
- .region_request_handler
- .handle(region_request::Body::Inserts(request), query_ctx)
+ self.inserter
+ .handle_statement_insert(insert.as_ref(), &query_ctx)
.await
- .context(RequestDatanodeSnafu)?;
- Ok(Output::AffectedRows(affected_rows as _))
} else {
// Slow path: insert with subquery. Execute the subquery first, via query engine. Then
// insert the results by sending insert requests.
@@ -82,7 +74,8 @@ impl StatementExecutor {
let insert_request =
build_insert_request(record_batch, table.schema(), &table_info)?;
affected_rows += self
- .handle_table_insert_request(insert_request, query_ctx.clone())
+ .inserter
+ .handle_table_insert(insert_request, query_ctx.clone())
.await?;
}
@@ -114,13 +107,14 @@ impl StatementExecutor {
let table_info = table.table_info();
while let Some(batch) = stream.next().await {
let record_batch = batch.context(ReadRecordBatchSnafu)?;
- let delete_request = build_delete_request(record_batch, table.schema(), &table_info)?;
+ let request = build_delete_request(record_batch, table.schema(), &table_info)?;
affected_rows += self
- .handle_table_delete_request(delete_request, query_ctx.clone())
+ .deleter
+ .handle_table_delete(request, query_ctx.clone())
.await?;
}
- Ok(Output::AffectedRows(affected_rows))
+ Ok(Output::AffectedRows(affected_rows as _))
}
async fn execute_dml_subquery(
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 8a19e3627d45..19e33ace2345 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -116,20 +116,20 @@ pub(crate) mod test {
use super::*;
- fn new_test_table_info(
+ pub fn new_test_table_info(
table_id: u32,
table_name: &str,
region_numbers: impl Iterator<Item = u32>,
) -> TableInfo {
let column_schemas = vec![
- ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new("a", ConcreteDataType::int32_datatype(), true),
ColumnSchema::new(
"ts",
ConcreteDataType::timestamp_millisecond_datatype(),
false,
)
.with_time_index(true),
- ColumnSchema::new("col2", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new("b", ConcreteDataType::int32_datatype(), true),
];
let schema = SchemaBuilder::try_from(column_schemas)
.unwrap()
diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs
index a340fb205bf2..4e5b26e1f532 100644
--- a/src/meta-srv/src/lib.rs
+++ b/src/meta-srv/src/lib.rs
@@ -34,7 +34,7 @@ pub mod procedure;
pub mod pubsub;
pub mod selector;
pub mod service;
-pub mod table_creator;
+pub mod table_meta_alloc;
pub mod table_routes;
pub use crate::error::Result;
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index ae308a8c7607..1c630a30ab59 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -54,7 +54,7 @@ use crate::service::mailbox::MailboxRef;
use crate::service::store::cached_kv::{CheckLeader, LeaderCachedKvStore};
use crate::service::store::kv::{KvBackendAdapter, KvStoreRef, ResettableKvStoreRef};
use crate::service::store::memory::MemStore;
-use crate::table_creator::MetaSrvTableMetadataAllocator;
+use crate::table_meta_alloc::MetaSrvTableMetadataAllocator;
// TODO(fys): try use derive_builder macro
pub struct MetaSrvBuilder {
@@ -366,7 +366,7 @@ fn build_ddl_manager(
},
));
- let table_creator = Arc::new(MetaSrvTableMetadataAllocator::new(
+ let table_meta_allocator = Arc::new(MetaSrvTableMetadataAllocator::new(
selector_ctx.clone(),
selector.clone(),
table_id_sequence.clone(),
@@ -377,7 +377,7 @@ fn build_ddl_manager(
datanode_clients,
cache_invalidator,
table_metadata_manager.clone(),
- table_creator,
+ table_meta_allocator,
))
}
diff --git a/src/meta-srv/src/table_creator.rs b/src/meta-srv/src/table_meta_alloc.rs
similarity index 100%
rename from src/meta-srv/src/table_creator.rs
rename to src/meta-srv/src/table_meta_alloc.rs
diff --git a/src/partition/src/manager.rs b/src/partition/src/manager.rs
index 297447f7942b..cb02db0d22ee 100644
--- a/src/partition/src/manager.rs
+++ b/src/partition/src/manager.rs
@@ -15,7 +15,7 @@
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
-use api::v1::region::{DeleteRequest, InsertRequest};
+use api::v1::Rows;
use common_meta::key::table_route::TableRouteManager;
use common_meta::kv_backend::KvBackendRef;
use common_meta::peer::Peer;
@@ -31,7 +31,7 @@ use crate::columns::RangeColumnsPartitionRule;
use crate::error::{FindLeaderSnafu, Result};
use crate::partition::{PartitionBound, PartitionDef, PartitionExpr};
use crate::range::RangePartitionRule;
-use crate::splitter::{DeleteRequestSplits, InsertRequestSplits, RowSplitter};
+use crate::splitter::RowSplitter;
use crate::{error, PartitionRuleRef};
#[async_trait::async_trait]
@@ -247,26 +247,24 @@ impl PartitionRuleManager {
Ok(regions)
}
- /// Split [InsertRequest] into [InsertRequestSplits] according to the partition rule
- /// of given table.
- pub async fn split_insert_request(
- &self,
- table: TableId,
- req: InsertRequest,
- ) -> Result<InsertRequestSplits> {
- let partition_rule = self.find_table_partition_rule(table).await?;
- RowSplitter::new(partition_rule).split_insert(req)
+ pub async fn find_region_leader(&self, region_id: RegionId) -> Result<Peer> {
+ let table_route = self.find_table_route(region_id.table_id()).await?;
+ let peer = table_route
+ .find_region_leader(region_id.region_number())
+ .with_context(|| FindLeaderSnafu {
+ region_id,
+ table_id: region_id.table_id(),
+ })?;
+ Ok(peer.clone())
}
- /// Split [DeleteRequest] into [DeleteRequestSplits] according to the partition rule
- /// of given table.
- pub async fn split_delete_request(
+ pub async fn split_rows(
&self,
- table: TableId,
- req: DeleteRequest,
- ) -> Result<DeleteRequestSplits> {
- let partition_rule = self.find_table_partition_rule(table).await?;
- RowSplitter::new(partition_rule).split_delete(req)
+ table_id: TableId,
+ rows: Rows,
+ ) -> Result<HashMap<RegionNumber, Rows>> {
+ let partition_rule = self.find_table_partition_rule(table_id).await?;
+ RowSplitter::new(partition_rule).split(rows)
}
}
diff --git a/src/partition/src/splitter.rs b/src/partition/src/splitter.rs
index 2bf2b0ce1b17..f7c64d8ba117 100644
--- a/src/partition/src/splitter.rs
+++ b/src/partition/src/splitter.rs
@@ -15,17 +15,13 @@
use std::collections::HashMap;
use api::helper;
-use api::v1::region::{DeleteRequest, InsertRequest};
use api::v1::{ColumnSchema, Row, Rows};
use datatypes::value::Value;
-use store_api::storage::{RegionId, RegionNumber};
+use store_api::storage::RegionNumber;
use crate::error::Result;
use crate::PartitionRuleRef;
-pub type InsertRequestSplits = HashMap<RegionNumber, InsertRequest>;
-pub type DeleteRequestSplits = HashMap<RegionNumber, DeleteRequest>;
-
pub struct RowSplitter {
partition_rule: PartitionRuleRef,
}
@@ -35,43 +31,8 @@ impl RowSplitter {
Self { partition_rule }
}
- pub fn split_insert(&self, req: InsertRequest) -> Result<InsertRequestSplits> {
- let table_id = RegionId::from_u64(req.region_id).table_id();
- Ok(self
- .split(req.rows)?
- .into_iter()
- .map(|(region_number, rows)| {
- let region_id = RegionId::new(table_id, region_number);
- let req = InsertRequest {
- rows: Some(rows),
- region_id: region_id.into(),
- };
- (region_number, req)
- })
- .collect())
- }
-
- pub fn split_delete(&self, req: DeleteRequest) -> Result<DeleteRequestSplits> {
- let table_id = RegionId::from_u64(req.region_id).table_id();
- Ok(self
- .split(req.rows)?
- .into_iter()
- .map(|(region_number, rows)| {
- let region_id = RegionId::new(table_id, region_number);
- let req = DeleteRequest {
- rows: Some(rows),
- region_id: region_id.into(),
- };
- (region_number, req)
- })
- .collect())
- }
-
- fn split(&self, rows: Option<Rows>) -> Result<HashMap<RegionNumber, Rows>> {
+ pub fn split(&self, rows: Rows) -> Result<HashMap<RegionNumber, Rows>> {
// No data
- let Some(rows) = rows else {
- return Ok(HashMap::new());
- };
if rows.rows.is_empty() {
return Ok(HashMap::new());
}
@@ -177,7 +138,7 @@ mod tests {
use crate::partition::PartitionExpr;
use crate::PartitionRule;
- fn mock_insert_request() -> InsertRequest {
+ fn mock_rows() -> Rows {
let schema = vec![
ColumnSchema {
column_name: "id".to_string(),
@@ -218,10 +179,7 @@ mod tests {
],
},
];
- InsertRequest {
- rows: Some(Rows { schema, rows }),
- region_id: 0,
- }
+ Rows { schema, rows }
}
#[derive(Debug, Serialize, Deserialize)]
@@ -301,53 +259,42 @@ mod tests {
#[test]
fn test_writer_splitter() {
- let insert_request = mock_insert_request();
+ let rows = mock_rows();
let rule = Arc::new(MockPartitionRule) as PartitionRuleRef;
let splitter = RowSplitter::new(rule);
- let splits = splitter.split_insert(insert_request).unwrap();
+ let mut splits = splitter.split(rows).unwrap();
assert_eq!(splits.len(), 2);
- let req0 = &splits[&0];
- let req1 = &splits[&1];
- assert_eq!(req0.region_id, 0);
- assert_eq!(req1.region_id, 1);
-
- let rows0 = req0.rows.as_ref().unwrap();
- let rows1 = req1.rows.as_ref().unwrap();
- assert_eq!(rows0.rows.len(), 1);
- assert_eq!(rows1.rows.len(), 2);
+ let rows0 = splits.remove(&0).unwrap().rows;
+ let rows1 = splits.remove(&1).unwrap().rows;
+ assert_eq!(rows0.len(), 1);
+ assert_eq!(rows1.len(), 2);
}
#[test]
fn test_missed_col_writer_splitter() {
- let insert_request = mock_insert_request();
+ let rows = mock_rows();
let rule = Arc::new(MockMissedColPartitionRule) as PartitionRuleRef;
- let splitter = RowSplitter::new(rule);
- let splits = splitter.split_insert(insert_request).unwrap();
+ let splitter = RowSplitter::new(rule);
+ let mut splits = splitter.split(rows).unwrap();
assert_eq!(splits.len(), 1);
- let req = &splits[&1];
- assert_eq!(req.region_id, 1);
-
- let rows = req.rows.as_ref().unwrap();
- assert_eq!(rows.rows.len(), 3);
+ let rows = splits.remove(&1).unwrap().rows;
+ assert_eq!(rows.len(), 3);
}
#[test]
fn test_empty_partition_rule_writer_splitter() {
- let insert_request = mock_insert_request();
+ let rows = mock_rows();
let rule = Arc::new(EmptyPartitionRule) as PartitionRuleRef;
let splitter = RowSplitter::new(rule);
- let splits = splitter.split_insert(insert_request).unwrap();
+ let mut splits = splitter.split(rows).unwrap();
assert_eq!(splits.len(), 1);
- let req = &splits[&0];
- assert_eq!(req.region_id, 0);
-
- let rows = req.rows.as_ref().unwrap();
- assert_eq!(rows.rows.len(), 3);
+ let rows = splits.remove(&0).unwrap().rows;
+ assert_eq!(rows.len(), 3);
}
}
|
feat
|
consolidate Insert request related partitioning and distributed processing operations into Inserter (#2346)
|
567510fa3e6acf3a60257d19bd76ca9f69ebe397
|
2022-08-08 15:57:02
|
evenyag
|
ci: Add pr title checker (#155)
| false
|
diff --git a/.github/pr-title-checker-config.json b/.github/pr-title-checker-config.json
new file mode 100644
index 000000000000..d372e023742b
--- /dev/null
+++ b/.github/pr-title-checker-config.json
@@ -0,0 +1,10 @@
+{
+ "LABEL": {
+ "name": "Invalid PR Title",
+ "color": "B60205"
+ },
+ "CHECKS": {
+ "regexp": "^(feat|fix|test|refactor|chore|style|doc|perf|build|ci|revert)(\\(.*\\))?:.*",
+ "ignoreLabels" : ["ignore-title"]
+ }
+}
diff --git a/.github/workflows/pr-title-checker.yml b/.github/workflows/pr-title-checker.yml
new file mode 100644
index 000000000000..68828ed8949a
--- /dev/null
+++ b/.github/workflows/pr-title-checker.yml
@@ -0,0 +1,19 @@
+name: "PR Title Checker"
+on:
+ pull_request_target:
+ types:
+ - opened
+ - edited
+ - synchronize
+ - labeled
+ - unlabeled
+
+jobs:
+ check:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: thehanimo/[email protected]
+ with:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ pass_on_octokit_error: false
+ configuration_path: ".github/pr-title-checker-config.json"
|
ci
|
Add pr title checker (#155)
|
95787825f1f6c09839271a5e6cc6d11b062bbaad
|
2024-09-19 15:14:44
|
Yohan Wal
|
build(deps): use original jsonb repo (#4742)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 786c1c3a8bdf..88c7f7f080b3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5481,7 +5481,7 @@ dependencies = [
[[package]]
name = "jsonb"
version = "0.4.1"
-source = "git+https://github.com/CookiePieWw/jsonb.git?rev=d0166c130fce903bf6c58643417a3173a6172d31#d0166c130fce903bf6c58643417a3173a6172d31"
+source = "git+https://github.com/datafuselabs/jsonb.git?rev=46ad50fc71cf75afbf98eec455f7892a6387c1fc#46ad50fc71cf75afbf98eec455f7892a6387c1fc"
dependencies = [
"byteorder",
"fast-float",
diff --git a/Cargo.toml b/Cargo.toml
index c1eea12a53bc..25cd5b5dd2d5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -124,7 +124,7 @@ greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", r
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
-jsonb = { git = "https://github.com/CookiePieWw/jsonb.git", rev = "d0166c130fce903bf6c58643417a3173a6172d31", default-features = false }
+jsonb = { git = "https://github.com/datafuselabs/jsonb.git", rev = "46ad50fc71cf75afbf98eec455f7892a6387c1fc", default-features = false }
lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80eb97c24c88af4dd9a86f8bbaf50e741d4eb8cd" }
mockall = "0.11.4"
|
build
|
use original jsonb repo (#4742)
|
1641fd572a59bfbef36e2323a35ec8dbd1c67022
|
2023-12-25 17:07:50
|
LFC
|
refactor: hide `RegionRoute` behind `TableRouteValue` (#2989)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 89e77773b2e3..e343909ce95c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -136,9 +136,9 @@ dependencies = [
[[package]]
name = "anstream"
-version = "0.6.4"
+version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44"
+checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -156,37 +156,37 @@ checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
[[package]]
name = "anstyle-parse"
-version = "0.2.2"
+version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140"
+checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
-version = "1.0.0"
+version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
+checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648"
dependencies = [
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
name = "anstyle-wincon"
-version = "3.0.1"
+version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
+checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7"
dependencies = [
"anstyle",
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
name = "anyhow"
-version = "1.0.75"
+version = "1.0.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
+checksum = "59d2a3357dde987206219e78ecfbbb6e8dad06cbb65292758d3270e6254f7355"
[[package]]
name = "anymap"
@@ -206,7 +206,7 @@ dependencies = [
"datatypes",
"greptime-proto",
"paste",
- "prost 0.12.2",
+ "prost 0.12.3",
"snafu",
"tonic 0.10.2",
"tonic-build 0.9.2",
@@ -229,9 +229,9 @@ checksum = "b3f9eb837c6a783fbf002e3e5cc7925a3aa6893d6d42f9169517528983777590"
[[package]]
name = "aquamarine"
-version = "0.3.2"
+version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "df752953c49ce90719c7bf1fc587bc8227aed04732ea0c0f85e5397d7fdbd1a1"
+checksum = "d1da02abba9f9063d786eab1509833ebb2fac0f966862ca59439c76b9c566760"
dependencies = [
"include_dir",
"itertools 0.10.5",
@@ -316,7 +316,7 @@ dependencies = [
"chrono",
"chrono-tz 0.8.4",
"half 2.3.1",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"num",
]
@@ -395,7 +395,7 @@ dependencies = [
"bytes",
"futures",
"paste",
- "prost 0.12.2",
+ "prost 0.12.3",
"tokio",
"tonic 0.10.2",
]
@@ -461,7 +461,7 @@ dependencies = [
"arrow-data",
"arrow-schema",
"half 2.3.1",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
]
[[package]]
@@ -597,7 +597,7 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -619,18 +619,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
name = "async-trait"
-version = "0.1.74"
+version = "0.1.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9"
+checksum = "fdf6721fb0140e4f897002dd086c06f6c27775df19cfe1fccb21181a48fd2c98"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -770,7 +770,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -851,7 +851,7 @@ version = "0.4.4"
dependencies = [
"arrow",
"chrono",
- "clap 4.4.8",
+ "clap 4.4.11",
"client",
"futures-util",
"indicatif",
@@ -899,7 +899,7 @@ dependencies = [
"regex",
"rustc-hash",
"shlex",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -974,9 +974,9 @@ dependencies = [
[[package]]
name = "borsh"
-version = "1.2.0"
+version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf617fabf5cdbdc92f774bfe5062d870f228b80056d41180797abf48bed4056e"
+checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028"
dependencies = [
"borsh-derive",
"cfg_aliases",
@@ -984,15 +984,15 @@ dependencies = [
[[package]]
name = "borsh-derive"
-version = "1.2.0"
+version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f404657a7ea7b5249e36808dff544bc88a28f26e0ac40009f674b7a009d14be3"
+checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0"
dependencies = [
"once_cell",
- "proc-macro-crate 2.0.0",
+ "proc-macro-crate 2.0.1",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
"syn_derive",
]
@@ -1401,9 +1401,9 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.4.8"
+version = "4.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2275f18819641850fa26c89acc84d465c1bf91ce57bc2748b28c420473352f64"
+checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2"
dependencies = [
"clap_builder",
"clap_derive",
@@ -1411,9 +1411,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.4.8"
+version = "4.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07cdf1b148b25c1e1f7a42225e30a0d99a615cd4637eae7365548dd4529b95bc"
+checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb"
dependencies = [
"anstream",
"anstyle",
@@ -1430,7 +1430,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -1478,7 +1478,7 @@ dependencies = [
"moka",
"parking_lot 0.12.1",
"prometheus",
- "prost 0.12.2",
+ "prost 0.12.3",
"rand",
"session",
"snafu",
@@ -1520,7 +1520,7 @@ dependencies = [
"auth",
"catalog",
"chrono",
- "clap 4.4.8",
+ "clap 4.4.11",
"client",
"common-base",
"common-catalog",
@@ -1550,7 +1550,7 @@ dependencies = [
"partition",
"plugins",
"prometheus",
- "prost 0.12.2",
+ "prost 0.12.3",
"query",
"rand",
"regex",
@@ -1750,7 +1750,7 @@ dependencies = [
"flatbuffers",
"futures",
"lazy_static",
- "prost 0.12.2",
+ "prost 0.12.3",
"rand",
"snafu",
"tokio",
@@ -1789,7 +1789,7 @@ dependencies = [
"snafu",
"static_assertions",
"syn 1.0.109",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -1834,7 +1834,7 @@ dependencies = [
"hyper",
"lazy_static",
"prometheus",
- "prost 0.12.2",
+ "prost 0.12.3",
"rand",
"regex",
"rskafka",
@@ -1999,18 +1999,18 @@ dependencies = [
[[package]]
name = "concurrent-queue"
-version = "2.3.0"
+version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400"
+checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "config"
-version = "0.13.3"
+version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7"
+checksum = "23738e11972c7643e4ec947840fc463b6a571afcd3e735bdfce7d03c7a784aca"
dependencies = [
"async-trait",
"json5",
@@ -2082,9 +2082,9 @@ checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3"
[[package]]
name = "const-oid"
-version = "0.9.5"
+version = "0.9.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f"
+checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
[[package]]
name = "const-random"
@@ -2114,9 +2114,9 @@ checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2"
[[package]]
name = "core-foundation"
-version = "0.9.3"
+version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146"
+checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
dependencies = [
"core-foundation-sys",
"libc",
@@ -2124,9 +2124,9 @@ dependencies = [
[[package]]
name = "core-foundation-sys"
-version = "0.8.4"
+version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
+checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f"
[[package]]
name = "cpp_demangle"
@@ -2233,9 +2233,9 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
-version = "0.5.8"
+version = "0.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
+checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-utils",
@@ -2243,9 +2243,9 @@ dependencies = [
[[package]]
name = "crossbeam-deque"
-version = "0.8.3"
+version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
+checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-epoch",
@@ -2254,22 +2254,21 @@ dependencies = [
[[package]]
name = "crossbeam-epoch"
-version = "0.9.15"
+version = "0.9.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7"
+checksum = "2d2fe95351b870527a5d09bf563ed3c97c0cffb87cf1c78a591bf48bb218d9aa"
dependencies = [
"autocfg",
"cfg-if 1.0.0",
"crossbeam-utils",
"memoffset 0.9.0",
- "scopeguard",
]
[[package]]
name = "crossbeam-queue"
-version = "0.3.8"
+version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add"
+checksum = "b9bcf5bdbfdd6030fb4a1c497b5d5fc5921aa2f60d359a17e249c0e6df3de153"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-utils",
@@ -2277,9 +2276,9 @@ dependencies = [
[[package]]
name = "crossbeam-utils"
-version = "0.8.16"
+version = "0.8.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294"
+checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f"
dependencies = [
"cfg-if 1.0.0",
]
@@ -2376,7 +2375,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim 0.10.0",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -2398,7 +2397,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5"
dependencies = [
"darling_core 0.20.3",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -2408,7 +2407,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
dependencies = [
"cfg-if 1.0.0",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"lock_api",
"once_cell",
"parking_lot_core 0.9.9",
@@ -2440,7 +2439,7 @@ dependencies = [
"futures",
"glob",
"half 2.3.1",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"indexmap 2.1.0",
"itertools 0.11.0",
"log",
@@ -2490,7 +2489,7 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"futures",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"log",
"object_store",
"parking_lot 0.12.1",
@@ -2524,7 +2523,7 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"datafusion-physical-expr",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"itertools 0.11.0",
"log",
"regex-syntax 0.8.2",
@@ -2547,7 +2546,7 @@ dependencies = [
"datafusion-common",
"datafusion-expr",
"half 2.3.1",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"hex",
"indexmap 2.1.0",
"itertools 0.11.0",
@@ -2581,7 +2580,7 @@ dependencies = [
"datafusion-physical-expr",
"futures",
"half 2.3.1",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"indexmap 2.1.0",
"itertools 0.11.0",
"log",
@@ -2616,8 +2615,8 @@ dependencies = [
"datafusion",
"itertools 0.11.0",
"object_store",
- "prost 0.12.2",
- "prost-types 0.12.2",
+ "prost 0.12.3",
+ "prost-types 0.12.3",
"substrait 0.17.1",
"tokio",
]
@@ -2673,7 +2672,7 @@ dependencies = [
"object-store",
"pin-project",
"prometheus",
- "prost 0.12.2",
+ "prost 0.12.3",
"query",
"reqwest",
"secrecy",
@@ -2747,16 +2746,16 @@ version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c"
dependencies = [
- "const-oid 0.9.5",
+ "const-oid 0.9.6",
"pem-rfc7468 0.7.0",
"zeroize",
]
[[package]]
name = "deranged"
-version = "0.3.9"
+version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3"
+checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc"
dependencies = [
"powerfmt",
"serde",
@@ -2781,7 +2780,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -2865,7 +2864,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
- "const-oid 0.9.5",
+ "const-oid 0.9.6",
"crypto-common",
"subtle",
]
@@ -3021,7 +3020,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -3033,7 +3032,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -3044,21 +3043,21 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "erased-serde"
-version = "0.3.31"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c"
+checksum = "4adbf0983fe06bd3a5c19c8477a637c2389feb0994eca7a59e3b961054aa7c0a"
dependencies = [
"serde",
]
[[package]]
name = "errno"
-version = "0.3.7"
+version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f258a7194e7f7c2a7837a8913aeab7fd8c383457034fa20ce4dd3dcb813e8eb8"
+checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
dependencies = [
"libc",
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -3087,7 +3086,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f5231ad671c74ee5dc02753a0a9c855fe6e90de2a07acb2582f8a702470e04d1"
dependencies = [
"http",
- "prost 0.12.2",
+ "prost 0.12.3",
"tokio",
"tokio-stream",
"tonic 0.10.2",
@@ -3153,7 +3152,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5"
dependencies = [
"cfg-if 1.0.0",
- "rustix 0.38.25",
+ "rustix 0.38.28",
"windows-sys 0.48.0",
]
@@ -3188,14 +3187,14 @@ dependencies = [
[[package]]
name = "filetime"
-version = "0.2.22"
+version = "0.2.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0"
+checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd"
dependencies = [
"cfg-if 1.0.0",
"libc",
- "redox_syscall 0.3.5",
- "windows-sys 0.48.0",
+ "redox_syscall 0.4.1",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -3340,7 +3339,7 @@ dependencies = [
"operator",
"partition",
"prometheus",
- "prost 0.12.2",
+ "prost 0.12.3",
"query",
"raft-engine",
"regex",
@@ -3388,7 +3387,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e"
dependencies = [
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -3400,7 +3399,7 @@ dependencies = [
"frunk_core",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -3412,7 +3411,7 @@ dependencies = [
"frunk_core",
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -3504,7 +3503,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -3599,9 +3598,9 @@ dependencies = [
[[package]]
name = "gimli"
-version = "0.28.0"
+version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
+checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "git2"
@@ -3627,7 +3626,7 @@ name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a31ea166fc015ea7ff111ac94e26c3a5d64364d2#a31ea166fc015ea7ff111ac94e26c3a5d64364d2"
dependencies = [
- "prost 0.12.2",
+ "prost 0.12.3",
"serde",
"serde_json",
"strum 0.25.0",
@@ -3692,9 +3691,9 @@ dependencies = [
[[package]]
name = "hashbrown"
-version = "0.14.2"
+version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156"
+checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
dependencies = [
"ahash 0.8.6",
"allocator-api2",
@@ -3706,7 +3705,7 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
dependencies = [
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
]
[[package]]
@@ -3784,9 +3783,9 @@ checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df"
[[package]]
name = "hkdf"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437"
+checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7"
dependencies = [
"hmac",
]
@@ -3802,11 +3801,11 @@ dependencies = [
[[package]]
name = "home"
-version = "0.5.5"
+version = "0.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb"
+checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5"
dependencies = [
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -3833,9 +3832,9 @@ dependencies = [
[[package]]
name = "http-body"
-version = "0.4.5"
+version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
+checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
dependencies = [
"bytes",
"http",
@@ -3878,9 +3877,9 @@ dependencies = [
[[package]]
name = "hyper"
-version = "0.14.27"
+version = "0.14.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468"
+checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80"
dependencies = [
"bytes",
"futures-channel",
@@ -3893,7 +3892,7 @@ dependencies = [
"httpdate",
"itoa",
"pin-project-lite",
- "socket2 0.4.10",
+ "socket2 0.5.5",
"tokio",
"tower-service",
"tracing",
@@ -3909,7 +3908,7 @@ dependencies = [
"futures-util",
"http",
"hyper",
- "rustls 0.21.9",
+ "rustls 0.21.10",
"tokio",
"tokio-rustls 0.24.1",
]
@@ -4007,7 +4006,7 @@ dependencies = [
"greptime-proto",
"mockall",
"pin-project",
- "prost 0.12.2",
+ "prost 0.12.3",
"rand",
"regex",
"regex-automata 0.1.10",
@@ -4034,7 +4033,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f"
dependencies = [
"equivalent",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"serde",
]
@@ -4059,9 +4058,9 @@ checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306"
[[package]]
name = "inferno"
-version = "0.11.18"
+version = "0.11.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "abfb2e51b23c338595ae0b6bdaaa7a4a8b860b8d788a4331cb07b50fe5dea71b"
+checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9"
dependencies = [
"ahash 0.8.6",
"indexmap 2.1.0",
@@ -4109,9 +4108,9 @@ checksum = "924df4f0e24e2e7f9cdd90babb0b96f93b20f3ecfa949ea9e6613756b8c8e1bf"
[[package]]
name = "inventory"
-version = "0.3.13"
+version = "0.3.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0508c56cfe9bfd5dfeb0c22ab9a6abfda2f27bdca422132e494266351ed8d83c"
+checksum = "c8573b2b1fb643a372c73b23f4da5f888677feef3305146d68a539250a9bccc7"
[[package]]
name = "io-lifetimes"
@@ -4159,7 +4158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
dependencies = [
"hermit-abi 0.3.3",
- "rustix 0.38.25",
+ "rustix 0.38.28",
"windows-sys 0.48.0",
]
@@ -4181,11 +4180,20 @@ dependencies = [
"either",
]
+[[package]]
+name = "itertools"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0"
+dependencies = [
+ "either",
+]
+
[[package]]
name = "itoa"
-version = "1.0.9"
+version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
+checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
[[package]]
name = "jobserver"
@@ -4198,9 +4206,9 @@ dependencies = [
[[package]]
name = "js-sys"
-version = "0.3.65"
+version = "0.3.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8"
+checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca"
dependencies = [
"wasm-bindgen",
]
@@ -4218,13 +4226,14 @@ dependencies = [
[[package]]
name = "jsonwebtoken"
-version = "8.3.0"
+version = "9.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378"
+checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4"
dependencies = [
"base64 0.21.5",
- "pem 1.1.1",
- "ring 0.16.20",
+ "js-sys",
+ "pem",
+ "ring 0.17.7",
"serde",
"serde_json",
"simple_asn1",
@@ -4360,9 +4369,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.150"
+version = "0.2.151"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c"
+checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
[[package]]
name = "libgit2-sys"
@@ -4440,9 +4449,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
[[package]]
name = "linux-raw-sys"
-version = "0.4.11"
+version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829"
+checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
[[package]]
name = "lock_api"
@@ -4551,7 +4560,7 @@ version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7"
dependencies = [
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
]
[[package]]
@@ -4606,9 +4615,9 @@ dependencies = [
[[package]]
name = "mach2"
-version = "0.4.1"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8"
+checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709"
dependencies = [
"libc",
]
@@ -4700,9 +4709,9 @@ dependencies = [
[[package]]
name = "memmap2"
-version = "0.8.0"
+version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43a5a03cefb0d953ec0be133036f14e109412fa594edc2f77227249db66cc3ed"
+checksum = "45fd3a57831bf88bc63f8cebc0cf956116276e97fef3966103e96416209f7c92"
dependencies = [
"libc",
]
@@ -4801,7 +4810,7 @@ dependencies = [
"once_cell",
"parking_lot 0.12.1",
"prometheus",
- "prost 0.12.2",
+ "prost 0.12.3",
"rand",
"regex",
"serde",
@@ -4903,9 +4912,9 @@ dependencies = [
[[package]]
name = "mio"
-version = "0.8.9"
+version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0"
+checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09"
dependencies = [
"libc",
"log",
@@ -4957,7 +4966,7 @@ dependencies = [
"parquet",
"paste",
"prometheus",
- "prost 0.12.2",
+ "prost 0.12.3",
"regex",
"serde",
"serde_json",
@@ -5052,7 +5061,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
"termcolor",
"thiserror",
]
@@ -5075,11 +5084,11 @@ dependencies = [
"mio",
"mysql_common",
"once_cell",
- "pem 3.0.2",
+ "pem",
"percent-encoding",
"pin-project",
"rand",
- "rustls 0.21.9",
+ "rustls 0.21.10",
"rustls-pemfile 1.0.4",
"serde",
"serde_json",
@@ -5303,7 +5312,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -5451,9 +5460,9 @@ dependencies = [
[[package]]
name = "once_cell"
-version = "1.18.0"
+version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
+checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "oorandom"
@@ -5584,7 +5593,7 @@ source = "git+https://github.com/waynexia/opentelemetry-rust.git?rev=33841b38dda
dependencies = [
"opentelemetry 0.21.0 (git+https://github.com/waynexia/opentelemetry-rust.git?rev=33841b38dda79b15f2024952be5f32533325ca02)",
"opentelemetry_sdk 0.20.0",
- "prost 0.12.2",
+ "prost 0.12.3",
"tonic 0.10.2",
]
@@ -5622,7 +5631,7 @@ dependencies = [
"glob",
"once_cell",
"opentelemetry 0.21.0 (git+https://github.com/waynexia/opentelemetry-rust.git?rev=33841b38dda79b15f2024952be5f32533325ca02)",
- "ordered-float 4.1.1",
+ "ordered-float 4.2.0",
"percent-encoding",
"rand",
"thiserror",
@@ -5642,7 +5651,7 @@ dependencies = [
"glob",
"once_cell",
"opentelemetry 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "ordered-float 4.1.1",
+ "ordered-float 4.2.0",
"percent-encoding",
"rand",
"thiserror",
@@ -5752,9 +5761,9 @@ dependencies = [
[[package]]
name = "ordered-float"
-version = "4.1.1"
+version = "4.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "536900a8093134cf9ccf00a27deb3532421099e958d9dd431135d0c7543ca1e8"
+checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e"
dependencies = [
"num-traits",
]
@@ -5776,7 +5785,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4d6a8c22fc714f0c2373e6091bf6f5e9b37b1bc0b1184874b7e0a4e303d318f"
dependencies = [
"dlv-list 0.5.2",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
]
[[package]]
@@ -5891,7 +5900,7 @@ dependencies = [
"chrono",
"flate2",
"futures",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"lz4",
"num",
"num-bigint",
@@ -5961,18 +5970,9 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
[[package]]
name = "pem"
-version = "1.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8"
-dependencies = [
- "base64 0.13.1",
-]
-
-[[package]]
-name = "pem"
-version = "3.0.2"
+version = "3.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923"
+checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310"
dependencies = [
"base64 0.21.5",
"serde",
@@ -6033,7 +6033,7 @@ dependencies = [
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -6075,7 +6075,7 @@ dependencies = [
"md5",
"postgres-types",
"rand",
- "ring 0.17.5",
+ "ring 0.17.7",
"stringprep",
"thiserror",
"time",
@@ -6150,7 +6150,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -6184,7 +6184,7 @@ checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
dependencies = [
"der 0.7.8",
"pkcs8 0.10.2",
- "spki 0.7.2",
+ "spki 0.7.3",
]
[[package]]
@@ -6205,14 +6205,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
dependencies = [
"der 0.7.8",
- "spki 0.7.2",
+ "spki 0.7.3",
]
[[package]]
name = "pkg-config"
-version = "0.3.27"
+version = "0.3.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
+checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a"
[[package]]
name = "plotters"
@@ -6267,9 +6267,9 @@ dependencies = [
[[package]]
name = "portable-atomic"
-version = "1.5.1"
+version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3bccab0e7fd7cc19f820a1c8c91720af652d0c88dc9664dd72aef2614f04af3b"
+checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0"
[[package]]
name = "postgres-protocol"
@@ -6323,9 +6323,9 @@ dependencies = [
"nix 0.26.4",
"once_cell",
"parking_lot 0.12.1",
- "prost 0.12.2",
- "prost-build 0.12.2",
- "prost-derive 0.12.2",
+ "prost 0.12.3",
+ "prost-build 0.12.3",
+ "prost-derive 0.12.3",
"protobuf",
"sha2",
"smallvec",
@@ -6403,7 +6403,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d"
dependencies = [
"proc-macro2",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -6418,11 +6418,12 @@ dependencies = [
[[package]]
name = "proc-macro-crate"
-version = "2.0.0"
+version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8"
+checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a"
dependencies = [
- "toml_edit 0.20.7",
+ "toml_datetime",
+ "toml_edit 0.20.2",
]
[[package]]
@@ -6451,9 +6452,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.69"
+version = "1.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
+checksum = "75cb1540fadbd5b8fbccc4dddad2734eba435053f725621c070711a14bb5f4b8"
dependencies = [
"unicode-ident",
]
@@ -6520,7 +6521,7 @@ dependencies = [
"lazy_static",
"prometheus",
"promql-parser",
- "prost 0.12.2",
+ "prost 0.12.3",
"query",
"session",
"snafu",
@@ -6553,12 +6554,12 @@ dependencies = [
[[package]]
name = "prost"
-version = "0.12.2"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a5a410fc7882af66deb8d01d01737353cf3ad6204c408177ba494291a626312"
+checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a"
dependencies = [
"bytes",
- "prost-derive 0.12.2",
+ "prost-derive 0.12.3",
]
[[package]]
@@ -6585,9 +6586,9 @@ dependencies = [
[[package]]
name = "prost-build"
-version = "0.12.2"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fa3d084c8704911bfefb2771be2f9b6c5c0da7343a71e0021ee3c665cada738"
+checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2"
dependencies = [
"bytes",
"heck",
@@ -6597,10 +6598,10 @@ dependencies = [
"once_cell",
"petgraph",
"prettyplease 0.2.15",
- "prost 0.12.2",
- "prost-types 0.12.2",
+ "prost 0.12.3",
+ "prost-types 0.12.3",
"regex",
- "syn 2.0.39",
+ "syn 2.0.42",
"tempfile",
"which",
]
@@ -6620,15 +6621,15 @@ dependencies = [
[[package]]
name = "prost-derive"
-version = "0.12.2"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "065717a5dfaca4a83d2fe57db3487b311365200000551d7a364e715dbf4346bc"
+checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
dependencies = [
"anyhow",
"itertools 0.11.0",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -6642,11 +6643,11 @@ dependencies = [
[[package]]
name = "prost-types"
-version = "0.12.2"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8339f32236f590281e2f6368276441394fcd1b2133b549cc895d0ae80f2f9a52"
+checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e"
dependencies = [
- "prost 0.12.2",
+ "prost 0.12.3",
]
[[package]]
@@ -6950,7 +6951,7 @@ dependencies = [
"crossbeam",
"fail",
"fs2",
- "hashbrown 0.14.2",
+ "hashbrown 0.14.3",
"hex",
"if_chain",
"lazy_static",
@@ -7059,15 +7060,6 @@ dependencies = [
"bitflags 1.3.2",
]
-[[package]]
-name = "redox_syscall"
-version = "0.3.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29"
-dependencies = [
- "bitflags 1.3.2",
-]
-
[[package]]
name = "redox_syscall"
version = "0.4.1"
@@ -7160,15 +7152,16 @@ dependencies = [
[[package]]
name = "reqsign"
-version = "0.14.3"
+version = "0.14.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ad14258ddd8ef6e564d57a94613e138cc9c21ef8a1fec547206d853213c7959"
+checksum = "dce87f66ba6c6acef277a729f989a0eca946cb9ce6a15bcc036bda0f72d4b9fd"
dependencies = [
"anyhow",
"async-trait",
"base64 0.21.5",
"chrono",
"form_urlencoded",
+ "getrandom",
"hex",
"hmac",
"home",
@@ -7180,7 +7173,7 @@ dependencies = [
"quick-xml 0.31.0",
"rand",
"reqwest",
- "rsa 0.9.4",
+ "rsa 0.9.6",
"rust-ini 0.20.0",
"serde",
"serde_json",
@@ -7191,9 +7184,9 @@ dependencies = [
[[package]]
name = "reqwest"
-version = "0.11.22"
+version = "0.11.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b"
+checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41"
dependencies = [
"base64 0.21.5",
"bytes",
@@ -7213,7 +7206,7 @@ dependencies = [
"once_cell",
"percent-encoding",
"pin-project-lite",
- "rustls 0.21.9",
+ "rustls 0.21.10",
"rustls-native-certs",
"rustls-pemfile 1.0.4",
"serde",
@@ -7300,7 +7293,7 @@ checksum = "853977598f084a492323fe2f7896b4100a86284ee8473612de60021ea341310f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -7320,9 +7313,9 @@ dependencies = [
[[package]]
name = "ring"
-version = "0.17.5"
+version = "0.17.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b"
+checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74"
dependencies = [
"cc",
"getrandom",
@@ -7334,12 +7327,13 @@ dependencies = [
[[package]]
name = "rkyv"
-version = "0.7.42"
+version = "0.7.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58"
+checksum = "527a97cdfef66f65998b5f3b637c26f5a5ec09cc52a3f9932313ac645f4190f5"
dependencies = [
"bitvec",
"bytecheck",
+ "bytes",
"hashbrown 0.12.3",
"ptr_meta",
"rend",
@@ -7351,9 +7345,9 @@ dependencies = [
[[package]]
name = "rkyv_derive"
-version = "0.7.42"
+version = "0.7.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d"
+checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033"
dependencies = [
"proc-macro2",
"quote",
@@ -7393,11 +7387,11 @@ dependencies = [
[[package]]
name = "rsa"
-version = "0.9.4"
+version = "0.9.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a3211b01eea83d80687da9eef70e39d65144a3894866a5153a2723e425a157f"
+checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc"
dependencies = [
- "const-oid 0.9.5",
+ "const-oid 0.9.6",
"digest",
"num-bigint-dig",
"num-integer",
@@ -7406,7 +7400,7 @@ dependencies = [
"pkcs8 0.10.2",
"rand_core",
"signature",
- "spki 0.7.2",
+ "spki 0.7.3",
"subtle",
"zeroize",
]
@@ -7493,7 +7487,7 @@ dependencies = [
"proc-macro2",
"quote",
"rust-embed-utils",
- "syn 2.0.39",
+ "syn 2.0.42",
"walkdir",
]
@@ -7580,15 +7574,15 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.38.25"
+version = "0.38.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e"
+checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316"
dependencies = [
"bitflags 2.4.1",
"errno",
"libc",
- "linux-raw-sys 0.4.11",
- "windows-sys 0.48.0",
+ "linux-raw-sys 0.4.12",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -7605,12 +7599,12 @@ dependencies = [
[[package]]
name = "rustls"
-version = "0.21.9"
+version = "0.21.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9"
+checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba"
dependencies = [
"log",
- "ring 0.17.5",
+ "ring 0.17.7",
"rustls-webpki 0.101.7",
"sct",
]
@@ -7622,7 +7616,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe6b63262c9fcac8659abfaa96cac103d28166d3ff3eaf8f412e19f3ae9e5a48"
dependencies = [
"log",
- "ring 0.17.5",
+ "ring 0.17.7",
"rustls-pki-types",
"rustls-webpki 0.102.0",
"subtle",
@@ -7672,7 +7666,7 @@ version = "0.101.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765"
dependencies = [
- "ring 0.17.5",
+ "ring 0.17.7",
"untrusted 0.9.0",
]
@@ -7682,7 +7676,7 @@ version = "0.102.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89"
dependencies = [
- "ring 0.17.5",
+ "ring 0.17.7",
"rustls-pki-types",
"untrusted 0.9.0",
]
@@ -8024,9 +8018,9 @@ dependencies = [
[[package]]
name = "ryu"
-version = "1.0.15"
+version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
+checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
[[package]]
name = "safe-lock"
@@ -8209,7 +8203,7 @@ version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414"
dependencies = [
- "ring 0.17.5",
+ "ring 0.17.7",
"untrusted 0.9.0",
]
@@ -8284,7 +8278,7 @@ checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -8327,14 +8321,14 @@ checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
name = "serde_spanned"
-version = "0.6.4"
+version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80"
+checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1"
dependencies = [
"serde",
]
@@ -8348,7 +8342,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -8389,14 +8383,14 @@ dependencies = [
"darling 0.20.3",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
name = "serde_yaml"
-version = "0.9.27"
+version = "0.9.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3cc7a1570e38322cfe4154732e5110f887ea57e22b76f4bfd32b5bdd3368666c"
+checksum = "a15e0ef66bf939a7c890a0bf6d5a733c70202225f9888a89ed5c62298b019129"
dependencies = [
"indexmap 2.1.0",
"itoa",
@@ -8466,7 +8460,7 @@ dependencies = [
"pprof",
"prometheus",
"promql-parser",
- "prost 0.12.2",
+ "prost 0.12.3",
"query",
"rand",
"regex",
@@ -8694,9 +8688,9 @@ dependencies = [
[[package]]
name = "snap"
-version = "1.1.0"
+version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831"
+checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b"
[[package]]
name = "socket2"
@@ -8754,9 +8748,9 @@ dependencies = [
[[package]]
name = "spki"
-version = "0.7.2"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a"
+checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"
dependencies = [
"base64ct",
"der 0.7.8",
@@ -8790,11 +8784,11 @@ dependencies = [
[[package]]
name = "sqlformat"
-version = "0.2.2"
+version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85"
+checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c"
dependencies = [
- "itertools 0.11.0",
+ "itertools 0.12.0",
"nom",
"unicode_categories",
]
@@ -8819,7 +8813,7 @@ name = "sqlness-runner"
version = "0.4.4"
dependencies = [
"async-trait",
- "clap 4.4.8",
+ "clap 4.4.11",
"client",
"common-base",
"common-error",
@@ -9147,7 +9141,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -9179,7 +9173,7 @@ dependencies = [
"datatypes",
"futures",
"promql",
- "prost 0.12.2",
+ "prost 0.12.3",
"session",
"snafu",
"substrait 0.17.1",
@@ -9196,15 +9190,15 @@ dependencies = [
"git2",
"heck",
"prettyplease 0.2.15",
- "prost 0.12.2",
- "prost-build 0.12.2",
- "prost-types 0.12.2",
+ "prost 0.12.3",
+ "prost-build 0.12.3",
+ "prost-types 0.12.3",
"schemars",
"semver",
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.39",
+ "syn 2.0.42",
"typify",
"walkdir",
]
@@ -9217,21 +9211,21 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "symbolic-common"
-version = "12.7.0"
+version = "12.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39eac77836da383d35edbd9ff4585b4fc1109929ff641232f2e9a1aefdfc9e91"
+checksum = "1cccfffbc6bb3bb2d3a26cd2077f4d055f6808d266f9d4d158797a4c60510dfe"
dependencies = [
"debugid",
- "memmap2 0.8.0",
+ "memmap2 0.9.3",
"stable_deref_trait",
"uuid",
]
[[package]]
name = "symbolic-demangle"
-version = "12.7.0"
+version = "12.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ee1608a1d13061fb0e307a316de29f6c6e737b05459fe6bbf5dd8d7837c4fb7"
+checksum = "76a99812da4020a67e76c4eb41f08c87364c14170495ff780f30dd519c221a68"
dependencies = [
"cpp_demangle",
"rustc-demangle",
@@ -9251,9 +9245,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.39"
+version = "2.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a"
+checksum = "5b7d0a2c048d661a1a59fcd7355baa232f7ed34e0ee4df2eef3c1c1c0d3852d8"
dependencies = [
"proc-macro2",
"quote",
@@ -9278,7 +9272,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -9381,7 +9375,7 @@ dependencies = [
"cfg-if 1.0.0",
"fastrand 2.0.1",
"redox_syscall 0.4.1",
- "rustix 0.38.25",
+ "rustix 0.38.28",
"windows-sys 0.48.0",
]
@@ -9463,7 +9457,7 @@ dependencies = [
"operator",
"partition",
"paste",
- "prost 0.12.2",
+ "prost 0.12.3",
"query",
"rand",
"rstest",
@@ -9512,22 +9506,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]]
name = "thiserror"
-version = "1.0.50"
+version = "1.0.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2"
+checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.50"
+version = "1.0.51"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8"
+checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -9594,9 +9588,9 @@ dependencies = [
[[package]]
name = "time"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5"
+checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e"
dependencies = [
"deranged",
"itoa",
@@ -9614,18 +9608,18 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "time-macros"
-version = "0.2.15"
+version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20"
+checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f"
dependencies = [
"time-core",
]
[[package]]
name = "timsort"
-version = "0.1.2"
+version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3cb4fa83bb73adf1c7219f4fe4bf3c0ac5635e4e51e070fad5df745a41bedfb8"
+checksum = "639ce8ef6d2ba56be0383a94dd13b92138d58de44c62618303bb798fa92bdc00"
[[package]]
name = "tiny-keccak"
@@ -9663,9 +9657,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.34.0"
+version = "1.35.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9"
+checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104"
dependencies = [
"backtrace",
"bytes",
@@ -9699,7 +9693,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -9759,7 +9753,7 @@ version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
dependencies = [
- "rustls 0.21.9",
+ "rustls 0.21.10",
"tokio",
]
@@ -9836,9 +9830,9 @@ dependencies = [
[[package]]
name = "toml_datetime"
-version = "0.6.5"
+version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
+checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b"
dependencies = [
"serde",
]
@@ -9858,9 +9852,9 @@ dependencies = [
[[package]]
name = "toml_edit"
-version = "0.20.7"
+version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81"
+checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338"
dependencies = [
"indexmap 2.1.0",
"toml_datetime",
@@ -9913,8 +9907,8 @@ dependencies = [
"hyper-timeout",
"percent-encoding",
"pin-project",
- "prost 0.12.2",
- "rustls 0.21.9",
+ "prost 0.12.3",
+ "rustls 0.21.10",
"rustls-pemfile 1.0.4",
"tokio",
"tokio-rustls 0.24.1",
@@ -9946,9 +9940,9 @@ checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889"
dependencies = [
"prettyplease 0.2.15",
"proc-macro2",
- "prost-build 0.12.2",
+ "prost-build 0.12.3",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -9957,8 +9951,8 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fa37c513df1339d197f4ba21d28c918b9ef1ac1768265f11ecb6b7f1cba1b76"
dependencies = [
- "prost 0.12.2",
- "prost-types 0.12.2",
+ "prost 0.12.3",
+ "prost-types 0.12.3",
"tokio",
"tokio-stream",
"tonic 0.10.2",
@@ -10059,7 +10053,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -10144,15 +10138,15 @@ dependencies = [
[[package]]
name = "triomphe"
-version = "0.1.9"
+version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f"
+checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3"
[[package]]
name = "try-lock"
-version = "0.2.4"
+version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed"
+checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "try_from"
@@ -10182,9 +10176,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "typetag"
-version = "0.2.13"
+version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "80960fd143d4c96275c0e60b08f14b81fbb468e79bc0ef8fbda69fb0afafae43"
+checksum = "196976efd4a62737b3a2b662cda76efb448d099b1049613d7a5d72743c611ce0"
dependencies = [
"erased-serde",
"inventory",
@@ -10195,13 +10189,13 @@ dependencies = [
[[package]]
name = "typetag-impl"
-version = "0.2.13"
+version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfc13d450dc4a695200da3074dacf43d449b968baee95e341920e47f61a3b40f"
+checksum = "2eea6765137e2414c44c7b1e07c73965a118a72c46148e1e168b3fc9d3ccf3aa"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -10227,7 +10221,7 @@ dependencies = [
"regress",
"schemars",
"serde_json",
- "syn 2.0.39",
+ "syn 2.0.42",
"thiserror",
"unicode-ident",
]
@@ -10244,7 +10238,7 @@ dependencies = [
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.39",
+ "syn 2.0.42",
"typify-impl",
]
@@ -10405,9 +10399,9 @@ dependencies = [
[[package]]
name = "unicode-bidi"
-version = "0.3.13"
+version = "0.3.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460"
+checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416"
[[package]]
name = "unicode-casing"
@@ -10470,9 +10464,9 @@ checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c"
[[package]]
name = "unsafe-libyaml"
-version = "0.2.9"
+version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa"
+checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b"
[[package]]
name = "untrusted"
@@ -10530,7 +10524,7 @@ checksum = "f49e7f3f3db8040a100710a11932239fd30697115e2ba4107080d8252939845e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -10616,9 +10610,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce"
+checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e"
dependencies = [
"cfg-if 1.0.0",
"wasm-bindgen-macro",
@@ -10626,24 +10620,24 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217"
+checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-futures"
-version = "0.4.38"
+version = "0.4.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02"
+checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12"
dependencies = [
"cfg-if 1.0.0",
"js-sys",
@@ -10653,9 +10647,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2"
+checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -10663,22 +10657,22 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907"
+checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.88"
+version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b"
+checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f"
[[package]]
name = "wasm-streams"
@@ -10695,9 +10689,9 @@ dependencies = [
[[package]]
name = "web-sys"
-version = "0.3.65"
+version = "0.3.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85"
+checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -10719,7 +10713,7 @@ version = "0.22.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53"
dependencies = [
- "ring 0.17.5",
+ "ring 0.17.7",
"untrusted 0.9.0",
]
@@ -10747,7 +10741,7 @@ dependencies = [
"either",
"home",
"once_cell",
- "rustix 0.38.25",
+ "rustix 0.38.28",
]
[[package]]
@@ -10847,6 +10841,15 @@ dependencies = [
"windows-targets 0.48.5",
]
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets 0.52.0",
+]
+
[[package]]
name = "windows-targets"
version = "0.42.2"
@@ -10877,6 +10880,21 @@ dependencies = [
"windows_x86_64_msvc 0.48.5",
]
+[[package]]
+name = "windows-targets"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
+dependencies = [
+ "windows_aarch64_gnullvm 0.52.0",
+ "windows_aarch64_msvc 0.52.0",
+ "windows_i686_gnu 0.52.0",
+ "windows_i686_msvc 0.52.0",
+ "windows_x86_64_gnu 0.52.0",
+ "windows_x86_64_gnullvm 0.52.0",
+ "windows_x86_64_msvc 0.52.0",
+]
+
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
@@ -10889,6 +10907,12 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
+
[[package]]
name = "windows_aarch64_msvc"
version = "0.39.0"
@@ -10907,6 +10931,12 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
+
[[package]]
name = "windows_i686_gnu"
version = "0.39.0"
@@ -10925,6 +10955,12 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
+
[[package]]
name = "windows_i686_msvc"
version = "0.39.0"
@@ -10943,6 +10979,12 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
+
[[package]]
name = "windows_x86_64_gnu"
version = "0.39.0"
@@ -10961,6 +11003,12 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
+
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
@@ -10973,6 +11021,12 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
+
[[package]]
name = "windows_x86_64_msvc"
version = "0.39.0"
@@ -10991,11 +11045,17 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
+
[[package]]
name = "winnow"
-version = "0.5.19"
+version = "0.5.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "829846f3e3db426d4cee4510841b71a8e58aa2a76b1132579487ae430ccd9c7b"
+checksum = "9b5c3db89721d50d0e2a673f5043fc4722f76dcc352d7b1ab8b8288bed4ed2c5"
dependencies = [
"memchr",
]
@@ -11039,10 +11099,10 @@ dependencies = [
"chrono",
"der 0.7.8",
"hex",
- "pem 3.0.2",
- "ring 0.17.5",
+ "pem",
+ "ring 0.17.7",
"signature",
- "spki 0.7.2",
+ "spki 0.7.3",
"thiserror",
"zeroize",
]
@@ -11073,22 +11133,22 @@ dependencies = [
[[package]]
name = "zerocopy"
-version = "0.7.26"
+version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0"
+checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
-version = "0.7.26"
+version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f"
+checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
@@ -11108,7 +11168,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.39",
+ "syn 2.0.42",
]
[[package]]
diff --git a/src/cmd/src/cli/bench/metadata.rs b/src/cmd/src/cli/bench/metadata.rs
index 7b77fed49dbd..6eedc18eac18 100644
--- a/src/cmd/src/cli/bench/metadata.rs
+++ b/src/cmd/src/cli/bench/metadata.rs
@@ -14,6 +14,7 @@
use std::time::Instant;
+use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManagerRef;
use common_meta::table_name::TableName;
@@ -53,7 +54,11 @@ impl TableMetadataBencher {
let start = Instant::now();
self.table_metadata_manager
- .create_table_metadata(table_info, region_routes, region_wal_options)
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(region_routes),
+ region_wal_options,
+ )
.await
.unwrap();
diff --git a/src/cmd/src/cli/upgrade.rs b/src/cmd/src/cli/upgrade.rs
index e5615f4d8219..6936b13fd7b4 100644
--- a/src/cmd/src/cli/upgrade.rs
+++ b/src/cmd/src/cli/upgrade.rs
@@ -27,7 +27,7 @@ use common_meta::key::table_info::{TableInfoKey, TableInfoValue};
use common_meta::key::table_name::{TableNameKey, TableNameValue};
use common_meta::key::table_region::{TableRegionKey, TableRegionValue};
use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue};
-use common_meta::key::{RegionDistribution, TableMetaKey};
+use common_meta::key::{RegionDistribution, TableMetaKey, TableMetaValue};
use common_meta::kv_backend::etcd::EtcdStore;
use common_meta::kv_backend::KvBackendRef;
use common_meta::range_stream::PaginationStream;
@@ -153,7 +153,7 @@ impl MigrateTableMetadata {
)
.unwrap();
- let new_table_value = NextTableRouteValue::new(table_route.region_routes);
+ let new_table_value = NextTableRouteValue::physical(table_route.region_routes);
let table_id = table_route.table.id as u32;
let new_key = TableRouteKey::new(table_id);
diff --git a/src/common/meta/src/ddl.rs b/src/common/meta/src/ddl.rs
index 793df3f9c4d6..bb5220724ab6 100644
--- a/src/common/meta/src/ddl.rs
+++ b/src/common/meta/src/ddl.rs
@@ -21,10 +21,10 @@ use store_api::storage::{RegionNumber, TableId};
use crate::cache_invalidator::CacheInvalidatorRef;
use crate::datanode_manager::DatanodeManagerRef;
use crate::error::Result;
+use crate::key::table_route::TableRouteValue;
use crate::key::TableMetadataManagerRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::{CreateTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse};
-use crate::rpc::router::RegionRoute;
pub mod alter_table;
pub mod create_table;
@@ -58,7 +58,7 @@ pub struct TableMetadata {
/// Table id.
pub table_id: TableId,
/// Route information for each region of the table.
- pub region_routes: Vec<RegionRoute>,
+ pub table_route: TableRouteValue,
/// The encoded wal options for regions of the table.
// If a region does not have an associated wal options, no key for the region would be found in the map.
pub region_wal_options: HashMap<RegionNumber, String>,
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index a48e46913173..092d4dd24263 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -182,7 +182,6 @@ impl AlterTableProcedure {
pub async fn submit_alter_region_requests(&mut self) -> Result<Status> {
let table_id = self.data.table_id();
- let table_ref = self.data.table_ref();
let table_route = self
.context
@@ -190,9 +189,7 @@ impl AlterTableProcedure {
.table_route_manager()
.get(table_id)
.await?
- .with_context(|| TableRouteNotFoundSnafu {
- table_name: table_ref.to_string(),
- })?
+ .context(TableRouteNotFoundSnafu { table_id })?
.into_inner();
let region_routes = table_route.region_routes();
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index 35050643d3c2..c73844fc8337 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -18,9 +18,8 @@ use api::v1::region::region_request::Body as PbRegionRequest;
use api::v1::region::{
CreateRequest as PbCreateRegionRequest, RegionColumnDef, RegionRequest, RegionRequestHeader,
};
-use api::v1::{ColumnDef, CreateTableExpr, SemanticType};
+use api::v1::{ColumnDef, SemanticType};
use async_trait::async_trait;
-use common_catalog::consts::METRIC_ENGINE;
use common_config::WAL_OPTIONS_KEY;
use common_error::ext::BoxedError;
use common_procedure::error::{
@@ -40,8 +39,9 @@ use table::metadata::{RawTableInfo, TableId};
use crate::ddl::utils::{handle_operate_region_error, handle_retry_error, region_storage_path};
use crate::ddl::DdlContext;
-use crate::error::{self, Result, TableInfoNotFoundSnafu};
+use crate::error::{self, Result, TableRouteNotFoundSnafu};
use crate::key::table_name::TableNameKey;
+use crate::key::table_route::TableRouteValue;
use crate::metrics;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::ddl::CreateTableTask;
@@ -60,13 +60,13 @@ impl CreateTableProcedure {
pub fn new(
cluster_id: u64,
task: CreateTableTask,
- region_routes: Vec<RegionRoute>,
+ table_route: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
context: DdlContext,
) -> Self {
Self {
context,
- creator: TableCreator::new(cluster_id, task, region_routes, region_wal_options),
+ creator: TableCreator::new(cluster_id, task, table_route, region_wal_options),
}
}
@@ -78,10 +78,12 @@ impl CreateTableProcedure {
opening_regions: vec![],
};
- creator
- .register_opening_regions(&context)
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?;
+ if let TableRouteValue::Physical(x) = &creator.data.table_route {
+ creator.opening_regions = creator
+ .register_opening_regions(&context, &x.region_routes)
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ }
Ok(CreateTableProcedure { context, creator })
}
@@ -94,10 +96,6 @@ impl CreateTableProcedure {
self.table_info().ident.table_id
}
- pub fn region_routes(&self) -> &Vec<RegionRoute> {
- &self.creator.data.region_routes
- }
-
pub fn region_wal_options(&self) -> &HashMap<RegionNumber, String> {
&self.creator.data.region_wal_options
}
@@ -132,7 +130,10 @@ impl CreateTableProcedure {
Ok(Status::executing(true))
}
- pub fn new_region_request_builder(&self) -> Result<CreateRequestBuilder> {
+ pub fn new_region_request_builder(
+ &self,
+ physical_table_id: Option<TableId>,
+ ) -> Result<CreateRequestBuilder> {
let create_table_expr = &self.creator.data.task.create_table;
let column_defs = create_table_expr
@@ -191,16 +192,54 @@ impl CreateTableProcedure {
options: create_table_expr.table_options.clone(),
};
- let builder = CreateRequestBuilder::new_template(self.context.clone(), template);
- Ok(builder)
+ Ok(CreateRequestBuilder {
+ template,
+ physical_table_id,
+ })
}
pub async fn on_datanode_create_regions(&mut self) -> Result<Status> {
+ match &self.creator.data.table_route {
+ TableRouteValue::Physical(x) => {
+ let region_routes = x.region_routes.clone();
+ let request_builder = self.new_region_request_builder(None)?;
+ self.create_regions(®ion_routes, request_builder).await
+ }
+ TableRouteValue::Logical(x) => {
+ let physical_table_id = x.physical_table_id();
+
+ let physical_table_route = self
+ .context
+ .table_metadata_manager
+ .table_route_manager()
+ .get(physical_table_id)
+ .await?
+ .context(TableRouteNotFoundSnafu {
+ table_id: physical_table_id,
+ })?;
+ let region_routes = physical_table_route.region_routes();
+
+ let request_builder = self.new_region_request_builder(Some(physical_table_id))?;
+
+ self.create_regions(region_routes, request_builder).await
+ }
+ }
+ }
+
+ async fn create_regions(
+ &mut self,
+ region_routes: &[RegionRoute],
+ request_builder: CreateRequestBuilder,
+ ) -> Result<Status> {
// Registers opening regions
- self.creator.register_opening_regions(&self.context)?;
+ let guards = self
+ .creator
+ .register_opening_regions(&self.context, region_routes)?;
+ if !guards.is_empty() {
+ self.creator.opening_regions = guards;
+ }
let create_table_data = &self.creator.data;
- let region_routes = &create_table_data.region_routes;
let region_wal_options = &create_table_data.region_wal_options;
let create_table_expr = &create_table_data.task.create_table;
@@ -208,8 +247,6 @@ impl CreateTableProcedure {
let schema = &create_table_expr.schema_name;
let storage_path = region_storage_path(catalog, schema);
- let mut request_builder = self.new_region_request_builder()?;
-
let leaders = find_leaders(region_routes);
let mut create_region_tasks = Vec::with_capacity(leaders.len());
@@ -221,12 +258,7 @@ impl CreateTableProcedure {
for region_number in regions {
let region_id = RegionId::new(self.table_id(), region_number);
let create_region_request = request_builder
- .build_one(
- &self.creator.data.task.create_table,
- region_id,
- storage_path.clone(),
- region_wal_options,
- )
+ .build_one(region_id, storage_path.clone(), region_wal_options)
.await?;
requests.push(PbRegionRequest::Create(create_region_request));
@@ -270,10 +302,13 @@ impl CreateTableProcedure {
let manager = &self.context.table_metadata_manager;
let raw_table_info = self.table_info().clone();
- let region_routes = self.region_routes().clone();
let region_wal_options = self.region_wal_options().clone();
manager
- .create_table_metadata(raw_table_info, region_routes, region_wal_options)
+ .create_table_metadata(
+ raw_table_info,
+ self.creator.data.table_route.clone(),
+ region_wal_options,
+ )
.await?;
info!("Created table metadata for table {table_id}");
@@ -329,7 +364,7 @@ impl TableCreator {
pub fn new(
cluster_id: u64,
task: CreateTableTask,
- region_routes: Vec<RegionRoute>,
+ table_route: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
) -> Self {
Self {
@@ -337,21 +372,23 @@ impl TableCreator {
state: CreateTableState::Prepare,
cluster_id,
task,
- region_routes,
+ table_route,
region_wal_options,
},
opening_regions: vec![],
}
}
- /// Register opening regions if doesn't exist.
- pub fn register_opening_regions(&mut self, context: &DdlContext) -> Result<()> {
- let region_routes = &self.data.region_routes;
-
+ /// Registers and returns the guards of the opening region if they don't exist.
+ fn register_opening_regions(
+ &self,
+ context: &DdlContext,
+ region_routes: &[RegionRoute],
+ ) -> Result<Vec<OperatingRegionGuard>> {
let opening_regions = operating_leader_regions(region_routes);
if self.opening_regions.len() == opening_regions.len() {
- return Ok(());
+ return Ok(vec![]);
}
let mut opening_region_guards = Vec::with_capacity(opening_regions.len());
@@ -366,9 +403,7 @@ impl TableCreator {
})?;
opening_region_guards.push(guard);
}
-
- self.opening_regions = opening_region_guards;
- Ok(())
+ Ok(opening_region_guards)
}
}
@@ -386,7 +421,7 @@ pub enum CreateTableState {
pub struct CreateTableData {
pub state: CreateTableState,
pub task: CreateTableTask,
- pub region_routes: Vec<RegionRoute>,
+ table_route: TableRouteValue,
pub region_wal_options: HashMap<RegionNumber, String>,
pub cluster_id: u64,
}
@@ -399,28 +434,18 @@ impl CreateTableData {
/// Builder for [PbCreateRegionRequest].
pub struct CreateRequestBuilder {
- context: DdlContext,
template: PbCreateRegionRequest,
/// Optional. Only for metric engine.
physical_table_id: Option<TableId>,
}
impl CreateRequestBuilder {
- fn new_template(context: DdlContext, template: PbCreateRegionRequest) -> Self {
- Self {
- context,
- template,
- physical_table_id: None,
- }
- }
-
pub fn template(&self) -> &PbCreateRegionRequest {
&self.template
}
async fn build_one(
- &mut self,
- create_expr: &CreateTableExpr,
+ &self,
region_id: RegionId,
storage_path: String,
region_wal_options: &HashMap<RegionNumber, String>,
@@ -438,49 +463,18 @@ impl CreateRequestBuilder {
.insert(WAL_OPTIONS_KEY.to_string(), wal_options.clone())
});
- if self.template.engine == METRIC_ENGINE {
- self.metric_engine_hook(create_expr, region_id, &mut request)
- .await?;
- }
-
- Ok(request)
- }
+ if let Some(physical_table_id) = self.physical_table_id {
+ // Logical table has the same region numbers with physical table, and they have a one-to-one mapping.
+ // For example, region 0 of logical table must resides with region 0 of physical table. So here we can
+ // simply concat the physical table id and the logical region number to get the physical region id.
+ let physical_region_id = RegionId::new(physical_table_id, region_id.region_number());
- async fn metric_engine_hook(
- &mut self,
- create_expr: &CreateTableExpr,
- region_id: RegionId,
- request: &mut PbCreateRegionRequest,
- ) -> Result<()> {
- if let Some(physical_table_name) = request.options.get(LOGICAL_TABLE_METADATA_KEY) {
- let table_id = if let Some(table_id) = self.physical_table_id {
- table_id
- } else {
- let table_name_manager = self.context.table_metadata_manager.table_name_manager();
- let table_name_key = TableNameKey::new(
- &create_expr.catalog_name,
- &create_expr.schema_name,
- physical_table_name,
- );
- let table_id = table_name_manager
- .get(table_name_key)
- .await?
- .context(TableInfoNotFoundSnafu {
- table_name: physical_table_name,
- })?
- .table_id();
- self.physical_table_id = Some(table_id);
- table_id
- };
- // Concat physical table's table id and corresponding region number to get
- // the physical region id.
- let physical_region_id = RegionId::new(table_id, region_id.region_number());
request.options.insert(
LOGICAL_TABLE_METADATA_KEY.to_string(),
physical_region_id.as_u64().to_string(),
);
}
- Ok(())
+ Ok(request)
}
}
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index 471de7ac852f..6b1e4bf94f38 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -177,7 +177,7 @@ impl DdlManager {
&self,
cluster_id: u64,
create_table_task: CreateTableTask,
- region_routes: Vec<RegionRoute>,
+ table_route: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
) -> Result<ProcedureId> {
let context = self.create_context();
@@ -185,7 +185,7 @@ impl DdlManager {
let procedure = CreateTableProcedure::new(
cluster_id,
create_table_task,
- region_routes,
+ table_route,
region_wal_options,
context,
);
@@ -275,9 +275,8 @@ async fn handle_truncate_table_task(
table_name: table_ref.to_string(),
})?;
- let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
- table_name: table_ref.to_string(),
- })?;
+ let table_route_value =
+ table_route_value.context(error::TableRouteNotFoundSnafu { table_id })?;
let table_route = table_route_value.into_inner().region_routes().clone();
@@ -356,9 +355,8 @@ async fn handle_drop_table_task(
table_name: table_ref.to_string(),
})?;
- let table_route_value = table_route_value.with_context(|| error::TableRouteNotFoundSnafu {
- table_name: table_ref.to_string(),
- })?;
+ let table_route_value =
+ table_route_value.context(error::TableRouteNotFoundSnafu { table_id })?;
let id = ddl_manager
.submit_drop_table_task(
@@ -392,7 +390,7 @@ async fn handle_create_table_task(
let TableMetadata {
table_id,
- region_routes,
+ table_route,
region_wal_options,
} = table_meta;
@@ -402,7 +400,7 @@ async fn handle_create_table_task(
.submit_create_table_task(
cluster_id,
create_table_task,
- region_routes,
+ table_route,
region_wal_options,
)
.await?;
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index 519d8ec7a1af..c120c8ba939d 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -135,9 +135,9 @@ pub enum Error {
source: table::error::Error,
},
- #[snafu(display("Table route not found: {}", table_name))]
+ #[snafu(display("Failed to find table route for table id {}", table_id))]
TableRouteNotFound {
- table_name: String,
+ table_id: TableId,
location: Location,
},
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index d0e24c309b2e..bb2b87a973f5 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -147,6 +147,14 @@ pub trait TableMetaKey {
fn as_raw_key(&self) -> Vec<u8>;
}
+pub trait TableMetaValue {
+ fn try_from_raw_value(raw_value: &[u8]) -> Result<Self>
+ where
+ Self: Sized;
+
+ fn try_as_raw_value(&self) -> Result<Vec<u8>>;
+}
+
pub type TableMetadataManagerRef = Arc<TableMetadataManager>;
pub struct TableMetadataManager {
@@ -221,7 +229,9 @@ impl<T: DeserializeOwned + Serialize> Serialize for DeserializedValueWithBytes<T
}
}
-impl<'de, T: DeserializeOwned + Serialize> Deserialize<'de> for DeserializedValueWithBytes<T> {
+impl<'de, T: DeserializeOwned + Serialize + TableMetaValue> Deserialize<'de>
+ for DeserializedValueWithBytes<T>
+{
/// - Deserialize behaviors:
///
/// The `inner` field will be deserialized from the `bytes` field.
@@ -248,11 +258,11 @@ impl<T: Serialize + DeserializeOwned + Clone> Clone for DeserializedValueWithByt
}
}
-impl<T: Serialize + DeserializeOwned> DeserializedValueWithBytes<T> {
+impl<T: Serialize + DeserializeOwned + TableMetaValue> DeserializedValueWithBytes<T> {
/// Returns a struct containing a deserialized value and an original `bytes`.
/// It accepts original bytes of inner.
pub fn from_inner_bytes(bytes: Bytes) -> Result<Self> {
- let inner = serde_json::from_slice(&bytes).context(error::SerdeJsonSnafu)?;
+ let inner = T::try_from_raw_value(&bytes)?;
Ok(Self { bytes, inner })
}
@@ -373,13 +383,10 @@ impl TableMetadataManager {
pub async fn create_table_metadata(
&self,
mut table_info: RawTableInfo,
- region_routes: Vec<RegionRoute>,
+ table_route_value: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
) -> Result<()> {
- let region_numbers = region_routes
- .iter()
- .map(|region| region.region.id.region_number())
- .collect::<Vec<_>>();
+ let region_numbers = table_route_value.region_numbers();
table_info.meta.region_numbers = region_numbers;
let table_id = table_info.ident.table_id;
let engine = table_info.meta.engine.clone();
@@ -403,30 +410,28 @@ impl TableMetadataManager {
.table_info_manager()
.build_create_txn(table_id, &table_info_value)?;
- // Creates datanode table key value pairs.
- let distribution = region_distribution(®ion_routes)?;
- let create_datanode_table_txn = self.datanode_table_manager().build_create_txn(
- table_id,
- &engine,
- ®ion_storage_path,
- region_options,
- region_wal_options,
- distribution,
- )?;
-
- // Creates table route.
- let table_route_value = TableRouteValue::new(region_routes);
let (create_table_route_txn, on_create_table_route_failure) = self
.table_route_manager()
.build_create_txn(table_id, &table_route_value)?;
- let txn = Txn::merge_all(vec![
+ let mut txn = Txn::merge_all(vec![
create_table_name_txn,
create_table_info_txn,
- create_datanode_table_txn,
create_table_route_txn,
]);
+ if let TableRouteValue::Physical(x) = &table_route_value {
+ let create_datanode_table_txn = self.datanode_table_manager().build_create_txn(
+ table_id,
+ &engine,
+ ®ion_storage_path,
+ region_options,
+ region_wal_options,
+ region_distribution(&x.region_routes)?,
+ )?;
+ txn = txn.merge(create_datanode_table_txn);
+ }
+
let r = self.kv_backend.txn(txn).await?;
// Checks whether metadata was already created.
@@ -711,12 +716,12 @@ impl_table_meta_key!(TableNameKey<'_>, TableInfoKey, DatanodeTableKey);
macro_rules! impl_table_meta_value {
($($val_ty: ty), *) => {
$(
- impl $val_ty {
- pub fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
+ impl $crate::key::TableMetaValue for $val_ty {
+ fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
serde_json::from_slice(raw_value).context(SerdeJsonSnafu)
}
- pub fn try_as_raw_value(&self) -> Result<Vec<u8>> {
+ fn try_as_raw_value(&self) -> Result<Vec<u8>> {
serde_json::to_vec(self).context(SerdeJsonSnafu)
}
}
@@ -744,8 +749,7 @@ macro_rules! impl_optional_meta_value {
impl_table_meta_value! {
TableNameValue,
TableInfoValue,
- DatanodeTableValue,
- TableRouteValue
+ DatanodeTableValue
}
impl_optional_meta_value! {
@@ -765,6 +769,7 @@ mod tests {
use super::datanode_table::DatanodeTableKey;
use super::test_utils;
use crate::ddl::utils::region_storage_path;
+ use crate::error::Result;
use crate::key::datanode_table::RegionInfo;
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
@@ -780,14 +785,14 @@ mod tests {
let region_routes = vec![region_route.clone()];
let expected_region_routes =
- TableRouteValue::new(vec![region_route.clone(), region_route.clone()]);
+ TableRouteValue::physical(vec![region_route.clone(), region_route.clone()]);
let expected = serde_json::to_vec(&expected_region_routes).unwrap();
// Serialize behaviors:
// The inner field will be ignored.
let value = DeserializedValueWithBytes {
// ignored
- inner: TableRouteValue::new(region_routes.clone()),
+ inner: TableRouteValue::physical(region_routes.clone()),
bytes: Bytes::from(expected.clone()),
};
@@ -831,6 +836,20 @@ mod tests {
test_utils::new_test_table_info(10, region_numbers)
}
+ async fn create_physical_table_metadata(
+ table_metadata_manager: &TableMetadataManager,
+ table_info: RawTableInfo,
+ region_routes: Vec<RegionRoute>,
+ ) -> Result<()> {
+ table_metadata_manager
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(region_routes),
+ HashMap::default(),
+ )
+ .await
+ }
+
#[tokio::test]
async fn test_create_table_metadata() {
let mem_kv = Arc::new(MemoryKvBackend::default());
@@ -840,34 +859,33 @@ mod tests {
let table_info: RawTableInfo =
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
// creates metadata.
- table_metadata_manager
- .create_table_metadata(
- table_info.clone(),
- region_routes.clone(),
- HashMap::default(),
- )
- .await
- .unwrap();
+ create_physical_table_metadata(
+ &table_metadata_manager,
+ table_info.clone(),
+ region_routes.clone(),
+ )
+ .await
+ .unwrap();
+
// if metadata was already created, it should be ok.
- table_metadata_manager
- .create_table_metadata(
- table_info.clone(),
- region_routes.clone(),
- HashMap::default(),
- )
- .await
- .unwrap();
+ assert!(create_physical_table_metadata(
+ &table_metadata_manager,
+ table_info.clone(),
+ region_routes.clone(),
+ )
+ .await
+ .is_ok());
+
let mut modified_region_routes = region_routes.clone();
modified_region_routes.push(region_route.clone());
// if remote metadata was exists, it should return an error.
- assert!(table_metadata_manager
- .create_table_metadata(
- table_info.clone(),
- modified_region_routes,
- HashMap::default()
- )
- .await
- .is_err());
+ assert!(create_physical_table_metadata(
+ &table_metadata_manager,
+ table_info.clone(),
+ modified_region_routes
+ )
+ .await
+ .is_err());
let (remote_table_info, remote_table_route) = table_metadata_manager
.get_full_table_info(10)
@@ -894,18 +912,18 @@ mod tests {
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
let datanode_id = 2;
- let table_route_value =
- DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
+ let table_route_value = DeserializedValueWithBytes::from_inner(TableRouteValue::physical(
+ region_routes.clone(),
+ ));
// creates metadata.
- table_metadata_manager
- .create_table_metadata(
- table_info.clone(),
- region_routes.clone(),
- HashMap::default(),
- )
- .await
- .unwrap();
+ create_physical_table_metadata(
+ &table_metadata_manager,
+ table_info.clone(),
+ region_routes.clone(),
+ )
+ .await
+ .unwrap();
let table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
@@ -973,14 +991,14 @@ mod tests {
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
// creates metadata.
- table_metadata_manager
- .create_table_metadata(
- table_info.clone(),
- region_routes.clone(),
- HashMap::default(),
- )
- .await
- .unwrap();
+ create_physical_table_metadata(
+ &table_metadata_manager,
+ table_info.clone(),
+ region_routes.clone(),
+ )
+ .await
+ .unwrap();
+
let new_table_name = "another_name".to_string();
let table_info_value =
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
@@ -1045,14 +1063,14 @@ mod tests {
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
// creates metadata.
- table_metadata_manager
- .create_table_metadata(
- table_info.clone(),
- region_routes.clone(),
- HashMap::default(),
- )
- .await
- .unwrap();
+ create_physical_table_metadata(
+ &table_metadata_manager,
+ table_info.clone(),
+ region_routes.clone(),
+ )
+ .await
+ .unwrap();
+
let mut new_table_info = table_info.clone();
new_table_info.name = "hi".to_string();
let current_table_info_value =
@@ -1123,17 +1141,18 @@ mod tests {
let table_info: RawTableInfo =
new_test_table_info(region_routes.iter().map(|r| r.region.id.region_number())).into();
let table_id = table_info.ident.table_id;
- let current_table_route_value =
- DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
+ let current_table_route_value = DeserializedValueWithBytes::from_inner(
+ TableRouteValue::physical(region_routes.clone()),
+ );
+
// creates metadata.
- table_metadata_manager
- .create_table_metadata(
- table_info.clone(),
- region_routes.clone(),
- HashMap::default(),
- )
- .await
- .unwrap();
+ create_physical_table_metadata(
+ &table_metadata_manager,
+ table_info.clone(),
+ region_routes.clone(),
+ )
+ .await
+ .unwrap();
table_metadata_manager
.update_leader_region_status(table_id, ¤t_table_route_value, |region_route| {
@@ -1193,17 +1212,19 @@ mod tests {
let engine = table_info.meta.engine.as_str();
let region_storage_path =
region_storage_path(&table_info.catalog_name, &table_info.schema_name);
- let current_table_route_value =
- DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes.clone()));
+ let current_table_route_value = DeserializedValueWithBytes::from_inner(
+ TableRouteValue::physical(region_routes.clone()),
+ );
+
// creates metadata.
- table_metadata_manager
- .create_table_metadata(
- table_info.clone(),
- region_routes.clone(),
- HashMap::default(),
- )
- .await
- .unwrap();
+ create_physical_table_metadata(
+ &table_metadata_manager,
+ table_info.clone(),
+ region_routes.clone(),
+ )
+ .await
+ .unwrap();
+
assert_datanode_table(&table_metadata_manager, table_id, ®ion_routes).await;
let new_region_routes = vec![
new_region_route(1, 1),
diff --git a/src/common/meta/src/key/datanode_table.rs b/src/common/meta/src/key/datanode_table.rs
index 3ddb00a19ac2..b2e25e014bc8 100644
--- a/src/common/meta/src/key/datanode_table.rs
+++ b/src/common/meta/src/key/datanode_table.rs
@@ -24,7 +24,8 @@ use table::metadata::TableId;
use crate::error::{InvalidTableMetadataSnafu, Result};
use crate::key::{
- RegionDistribution, TableMetaKey, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
+ RegionDistribution, TableMetaKey, TableMetaValue, DATANODE_TABLE_KEY_PATTERN,
+ DATANODE_TABLE_KEY_PREFIX,
};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
diff --git a/src/common/meta/src/key/table_info.rs b/src/common/meta/src/key/table_info.rs
index 21f8656451b7..5415a0f1f941 100644
--- a/src/common/meta/src/key/table_info.rs
+++ b/src/common/meta/src/key/table_info.rs
@@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize};
use table::engine::TableReference;
use table::metadata::{RawTableInfo, TableId};
-use super::{DeserializedValueWithBytes, TABLE_INFO_KEY_PREFIX};
+use super::{DeserializedValueWithBytes, TableMetaValue, TABLE_INFO_KEY_PREFIX};
use crate::error::Result;
use crate::key::{to_removed_key, TableMetaKey};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
diff --git a/src/common/meta/src/key/table_name.rs b/src/common/meta/src/key/table_name.rs
index cf3690e3ff8d..12d44dace180 100644
--- a/src/common/meta/src/key/table_name.rs
+++ b/src/common/meta/src/key/table_name.rs
@@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize};
use snafu::OptionExt;
use table::metadata::TableId;
-use super::{TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
+use super::{TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX};
use crate::error::{Error, InvalidTableMetadataSnafu, Result};
use crate::key::{to_removed_key, TableMetaKey};
use crate::kv_backend::memory::MemoryKvBackend;
diff --git a/src/common/meta/src/key/table_region.rs b/src/common/meta/src/key/table_region.rs
index 7dabc8f114ef..e51e1a547194 100644
--- a/src/common/meta/src/key/table_region.rs
+++ b/src/common/meta/src/key/table_region.rs
@@ -71,8 +71,8 @@ impl_table_meta_value! {TableRegionValue}
#[cfg(test)]
mod tests {
-
use super::*;
+ use crate::key::TableMetaValue;
#[test]
fn test_serde() {
diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs
index 852c17937c34..f799f321e544 100644
--- a/src/common/meta/src/key/table_route.rs
+++ b/src/common/meta/src/key/table_route.rs
@@ -16,11 +16,12 @@ use std::collections::HashMap;
use std::fmt::Display;
use serde::{Deserialize, Serialize};
-use store_api::storage::RegionId;
+use snafu::ResultExt;
+use store_api::storage::{RegionId, RegionNumber};
use table::metadata::TableId;
-use super::DeserializedValueWithBytes;
-use crate::error::Result;
+use super::{DeserializedValueWithBytes, TableMetaValue};
+use crate::error::{Result, SerdeJsonSnafu};
use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
use crate::kv_backend::KvBackendRef;
@@ -38,6 +39,7 @@ impl TableRouteKey {
}
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
+#[serde(tag = "type", rename_all = "snake_case")]
pub enum TableRouteValue {
Physical(PhysicalTableRouteValue),
Logical(LogicalTableRouteValue),
@@ -55,11 +57,8 @@ pub struct LogicalTableRouteValue {
}
impl TableRouteValue {
- pub fn new(region_routes: Vec<RegionRoute>) -> Self {
- Self::Physical(PhysicalTableRouteValue {
- region_routes,
- version: 0,
- })
+ pub fn physical(region_routes: Vec<RegionRoute>) -> Self {
+ Self::Physical(PhysicalTableRouteValue::new(region_routes))
}
/// Returns a new version [TableRouteValue] with `region_routes`.
@@ -102,6 +101,59 @@ impl TableRouteValue {
_ => unreachable!("Mistakenly been treated as a Physical TableRoute: {self:?}"),
}
}
+
+ pub fn region_numbers(&self) -> Vec<RegionNumber> {
+ match self {
+ TableRouteValue::Physical(x) => x
+ .region_routes
+ .iter()
+ .map(|region_route| region_route.region.id.region_number())
+ .collect::<Vec<_>>(),
+ TableRouteValue::Logical(x) => x
+ .region_ids()
+ .iter()
+ .map(|region_id| region_id.region_number())
+ .collect::<Vec<_>>(),
+ }
+ }
+}
+
+impl TableMetaValue for TableRouteValue {
+ fn try_from_raw_value(raw_value: &[u8]) -> Result<Self> {
+ let r = serde_json::from_slice::<TableRouteValue>(raw_value);
+ match r {
+ // Compatible with old TableRouteValue.
+ Err(e) if e.is_data() => Ok(Self::Physical(
+ serde_json::from_slice::<PhysicalTableRouteValue>(raw_value)
+ .context(SerdeJsonSnafu)?,
+ )),
+ Ok(x) => Ok(x),
+ Err(e) => Err(e).context(SerdeJsonSnafu),
+ }
+ }
+
+ fn try_as_raw_value(&self) -> Result<Vec<u8>> {
+ serde_json::to_vec(self).context(SerdeJsonSnafu)
+ }
+}
+
+impl PhysicalTableRouteValue {
+ pub fn new(region_routes: Vec<RegionRoute>) -> Self {
+ Self {
+ region_routes,
+ version: 0,
+ }
+ }
+}
+
+impl LogicalTableRouteValue {
+ pub fn physical_table_id(&self) -> TableId {
+ todo!()
+ }
+
+ pub fn region_ids(&self) -> Vec<RegionId> {
+ todo!()
+ }
}
impl TableMetaKey for TableRouteKey {
@@ -301,3 +353,20 @@ impl TableRouteManager {
.transpose()
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_table_route_compatibility() {
+ let old_raw_v = r#"{"region_routes":[{"region":{"id":1,"name":"r1","partition":null,"attrs":{}},"leader_peer":{"id":2,"addr":"a2"},"follower_peers":[]},{"region":{"id":1,"name":"r1","partition":null,"attrs":{}},"leader_peer":{"id":2,"addr":"a2"},"follower_peers":[]}],"version":0}"#;
+ let v = TableRouteValue::try_from_raw_value(old_raw_v.as_bytes()).unwrap();
+
+ let new_raw_v = format!("{:?}", v);
+ assert_eq!(
+ new_raw_v,
+ r#"Physical(PhysicalTableRouteValue { region_routes: [RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None }, RegionRoute { region: Region { id: 1(0, 1), name: "r1", partition: None, attrs: {} }, leader_peer: Some(Peer { id: 2, addr: "a2" }), follower_peers: [], leader_status: None }], version: 0 })"#
+ );
+ }
+}
diff --git a/src/frontend/src/instance/standalone.rs b/src/frontend/src/instance/standalone.rs
index d46ee3d45886..21496e28edc5 100644
--- a/src/frontend/src/instance/standalone.rs
+++ b/src/frontend/src/instance/standalone.rs
@@ -18,10 +18,14 @@ use std::sync::Arc;
use api::v1::region::{QueryRequest, RegionRequest, RegionResponse};
use async_trait::async_trait;
use client::region::check_response_header;
+use common_catalog::consts::METRIC_ENGINE;
use common_error::ext::BoxedError;
use common_meta::datanode_manager::{AffectedRows, Datanode, DatanodeManager, DatanodeRef};
use common_meta::ddl::{TableMetadata, TableMetadataAllocator, TableMetadataAllocatorContext};
use common_meta::error::{self as meta_error, Result as MetaResult, UnsupportedSnafu};
+use common_meta::key::table_route::{
+ LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteValue,
+};
use common_meta::peer::Peer;
use common_meta::rpc::ddl::CreateTableTask;
use common_meta::rpc::router::{Region, RegionRoute};
@@ -34,7 +38,7 @@ use common_telemetry::{debug, info, tracing};
use datanode::region_server::RegionServer;
use servers::grpc::region_server::RegionServerHandler;
use snafu::{ensure, OptionExt, ResultExt};
-use store_api::storage::{RegionId, TableId};
+use store_api::storage::{RegionId, RegionNumber, TableId};
use crate::error::{InvalidRegionRequestSnafu, InvokeRegionServerSnafu, Result};
@@ -151,17 +155,29 @@ impl StandaloneTableMetadataAllocator {
};
Ok(table_id)
}
-}
-#[async_trait]
-impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
- async fn create(
+ fn create_wal_options(
&self,
- _ctx: &TableMetadataAllocatorContext,
- task: &CreateTableTask,
- ) -> MetaResult<TableMetadata> {
- let table_id = self.allocate_table_id(task).await?;
+ table_route: &TableRouteValue,
+ ) -> MetaResult<HashMap<RegionNumber, String>> {
+ match table_route {
+ TableRouteValue::Physical(x) => {
+ let region_numbers = x
+ .region_routes
+ .iter()
+ .map(|route| route.region.id.region_number())
+ .collect();
+ allocate_region_wal_options(region_numbers, &self.wal_options_allocator)
+ }
+ TableRouteValue::Logical(_) => Ok(HashMap::new()),
+ }
+ }
+}
+fn create_table_route(table_id: TableId, task: &CreateTableTask) -> TableRouteValue {
+ if task.create_table.engine == METRIC_ENGINE {
+ TableRouteValue::Logical(LogicalTableRouteValue {})
+ } else {
let region_routes = task
.partitions
.iter()
@@ -182,13 +198,22 @@ impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
}
})
.collect::<Vec<_>>();
+ TableRouteValue::Physical(PhysicalTableRouteValue::new(region_routes))
+ }
+}
- let region_numbers = region_routes
- .iter()
- .map(|route| route.region.id.region_number())
- .collect();
- let region_wal_options =
- allocate_region_wal_options(region_numbers, &self.wal_options_allocator)?;
+#[async_trait]
+impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
+ async fn create(
+ &self,
+ _ctx: &TableMetadataAllocatorContext,
+ task: &CreateTableTask,
+ ) -> MetaResult<TableMetadata> {
+ let table_id = self.allocate_table_id(task).await?;
+
+ let table_route = create_table_route(table_id, task);
+
+ let region_wal_options = self.create_wal_options(&table_route)?;
debug!(
"Allocated region wal options {:?} for table {}",
@@ -197,8 +222,8 @@ impl TableMetadataAllocator for StandaloneTableMetadataAllocator {
Ok(TableMetadata {
table_id,
- region_routes,
- region_wal_options: HashMap::default(),
+ table_route,
+ region_wal_options,
})
}
}
diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs
index d3433179fea0..eb792cf9ecd2 100644
--- a/src/meta-srv/src/handler/region_lease_handler.rs
+++ b/src/meta-srv/src/handler/region_lease_handler.rs
@@ -104,6 +104,7 @@ mod test {
use std::sync::Arc;
use common_meta::distributed_time_constants;
+ use common_meta::key::table_route::TableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
@@ -161,7 +162,11 @@ mod test {
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(region_routes),
+ HashMap::default(),
+ )
.await
.unwrap();
@@ -303,7 +308,11 @@ mod test {
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(region_routes),
+ HashMap::default(),
+ )
.await
.unwrap();
diff --git a/src/meta-srv/src/procedure/region_migration/migration_start.rs b/src/meta-srv/src/procedure/region_migration/migration_start.rs
index cd9b5bad5a5d..fa84a1a6dd5e 100644
--- a/src/meta-srv/src/procedure/region_migration/migration_start.rs
+++ b/src/meta-srv/src/procedure/region_migration/migration_start.rs
@@ -137,7 +137,6 @@ impl RegionMigrationStart {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
- use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -187,10 +186,8 @@ mod tests {
..Default::default()
};
- env.table_metadata_manager()
- .create_table_metadata(table_info, vec![region_route], HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, vec![region_route])
+ .await;
let err = state
.retrieve_region_route(&mut ctx, RegionId::new(1024, 3))
@@ -221,10 +218,8 @@ mod tests {
..Default::default()
}];
- env.table_metadata_manager()
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -254,10 +249,8 @@ mod tests {
..Default::default()
}];
- env.table_metadata_manager()
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -281,10 +274,8 @@ mod tests {
..Default::default()
}];
- env.table_metadata_manager()
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
let (next, _) = state.next(&mut ctx).await.unwrap();
diff --git a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
index dc6ebb2f4df9..74b904ce0105 100644
--- a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
@@ -187,6 +187,7 @@ mod tests {
use std::assert_matches::assert_matches;
use common_catalog::consts::MITO2_ENGINE;
+ use common_meta::key::table_route::TableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
@@ -409,7 +410,11 @@ mod tests {
}];
env.table_metadata_manager()
- .create_table_metadata(table_info, region_routes, HashMap::default())
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(region_routes),
+ HashMap::default(),
+ )
.await
.unwrap();
diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs
index 6496c18ee516..b34b1e655f4c 100644
--- a/src/meta-srv/src/procedure/region_migration/test_util.rs
+++ b/src/meta-srv/src/procedure/region_migration/test_util.rs
@@ -22,6 +22,7 @@ use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
use common_meta::instruction::{
DowngradeRegionReply, InstructionReply, SimpleReply, UpgradeRegionReply,
};
+use common_meta::key::table_route::TableRouteValue;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
@@ -144,6 +145,22 @@ impl TestingEnv {
provider: Arc::new(MockContextProvider::default()),
}
}
+
+ // Creates a table metadata with the physical table route.
+ pub async fn create_physical_table_metadata(
+ &self,
+ table_info: RawTableInfo,
+ region_routes: Vec<RegionRoute>,
+ ) {
+ self.table_metadata_manager
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(region_routes),
+ HashMap::default(),
+ )
+ .await
+ .unwrap();
+ }
}
/// Generates a [InstructionReply::OpenRegion] reply.
@@ -369,7 +386,11 @@ impl ProcedureMigrationTestSuite {
) {
self.env
.table_metadata_manager()
- .create_table_metadata(table_info, region_routes, HashMap::default())
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(region_routes),
+ HashMap::default(),
+ )
.await
.unwrap();
}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
index 05dbb1935f19..5a76d34819e7 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
@@ -74,7 +74,6 @@ impl UpdateMetadata {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
- use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -136,12 +135,10 @@ mod tests {
},
];
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
+ let table_metadata_manager = env.table_metadata_manager();
let original_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
@@ -190,11 +187,10 @@ mod tests {
..Default::default()
}];
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
+
let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
@@ -233,11 +229,10 @@ mod tests {
..Default::default()
}];
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
+
let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
index e7fa73dedf8d..7281737752a4 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
@@ -59,7 +59,6 @@ impl UpdateMetadata {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
- use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -128,12 +127,10 @@ mod tests {
region_routes
};
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
+ let table_metadata_manager = env.table_metadata_manager();
let old_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
@@ -213,11 +210,10 @@ mod tests {
region_routes
};
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
+
let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
index bb86280ba000..597d9afe9a7b 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
@@ -176,7 +176,6 @@ impl UpdateMetadata {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
- use std::collections::HashMap;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::peer::Peer;
@@ -225,11 +224,8 @@ mod tests {
..Default::default()
}];
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
let err = state
.build_upgrade_candidate_region_metadata(&mut ctx)
@@ -254,11 +250,8 @@ mod tests {
..Default::default()
}];
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
let err = state
.build_upgrade_candidate_region_metadata(&mut ctx)
@@ -285,11 +278,8 @@ mod tests {
leader_status: Some(RegionStatus::Downgraded),
}];
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
let new_region_routes = state
.build_upgrade_candidate_region_metadata(&mut ctx)
@@ -326,12 +316,10 @@ mod tests {
},
];
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
+ let table_metadata_manager = env.table_metadata_manager();
let original_table_route = table_metadata_manager
.table_route_manager()
.get(table_id)
@@ -385,11 +373,8 @@ mod tests {
leader_status: None,
}];
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
let updated = state.check_metadata_updated(&mut ctx).await.unwrap();
assert!(!updated);
@@ -411,11 +396,8 @@ mod tests {
leader_status: None,
}];
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
let updated = state.check_metadata_updated(&mut ctx).await.unwrap();
assert!(updated);
@@ -437,11 +419,8 @@ mod tests {
leader_status: Some(RegionStatus::Downgraded),
}];
- let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
let err = state.check_metadata_updated(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::Unexpected { .. });
@@ -470,11 +449,10 @@ mod tests {
.unwrap();
ctx.volatile_ctx.opening_region_guard = Some(guard);
+ env.create_physical_table_metadata(table_info, region_routes)
+ .await;
+
let table_metadata_manager = env.table_metadata_manager();
- table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
- .await
- .unwrap();
let (next, _) = state.next(&mut ctx).await.unwrap();
diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs
index e7b8a681138c..9ffad3aa6cf9 100644
--- a/src/meta-srv/src/procedure/tests.rs
+++ b/src/meta-srv/src/procedure/tests.rs
@@ -100,12 +100,12 @@ fn test_region_request_builder() {
let procedure = CreateTableProcedure::new(
1,
create_table_task(),
- test_data::new_region_routes(),
+ TableRouteValue::physical(test_data::new_region_routes()),
HashMap::default(),
test_data::new_ddl_context(Arc::new(DatanodeClients::default())),
);
- let template = procedure.new_region_request_builder().unwrap();
+ let template = procedure.new_region_request_builder(None).unwrap();
let expected = PbCreateRegionRequest {
region_id: 0,
@@ -191,7 +191,7 @@ async fn test_on_datanode_create_regions() {
let mut procedure = CreateTableProcedure::new(
1,
create_table_task(),
- region_routes,
+ TableRouteValue::physical(region_routes),
HashMap::default(),
test_data::new_ddl_context(datanode_manager),
);
@@ -247,7 +247,7 @@ async fn test_on_datanode_drop_regions() {
let procedure = DropTableProcedure::new(
1,
drop_table_task,
- DeserializedValueWithBytes::from_inner(TableRouteValue::new(region_routes)),
+ DeserializedValueWithBytes::from_inner(TableRouteValue::physical(region_routes)),
DeserializedValueWithBytes::from_inner(TableInfoValue::new(test_data::new_table_info())),
test_data::new_ddl_context(datanode_manager),
);
@@ -373,7 +373,7 @@ async fn test_submit_alter_region_requests() {
.table_metadata_manager
.create_table_metadata(
table_info.clone(),
- region_routes.clone(),
+ TableRouteValue::physical(region_routes),
HashMap::default(),
)
.await
diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs
index b555d2e780dd..cbd2451896b1 100644
--- a/src/meta-srv/src/region/lease_keeper.rs
+++ b/src/meta-srv/src/region/lease_keeper.rs
@@ -188,6 +188,7 @@ mod tests {
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
+ use common_meta::key::table_route::TableRouteValue;
use common_meta::key::test_utils::new_test_table_info;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
@@ -291,7 +292,11 @@ mod tests {
let keeper = new_test_keeper();
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
- .create_table_metadata(table_info, vec![region_route.clone()], HashMap::default())
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(vec![region_route]),
+ HashMap::default(),
+ )
.await
.unwrap();
@@ -378,7 +383,11 @@ mod tests {
let keeper = new_test_keeper();
let table_metadata_manager = keeper.table_metadata_manager();
table_metadata_manager
- .create_table_metadata(table_info, vec![region_route.clone()], HashMap::default())
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(vec![region_route]),
+ HashMap::default(),
+ )
.await
.unwrap();
diff --git a/src/meta-srv/src/table_meta_alloc.rs b/src/meta-srv/src/table_meta_alloc.rs
index 12fac723b430..21e5778209f7 100644
--- a/src/meta-srv/src/table_meta_alloc.rs
+++ b/src/meta-srv/src/table_meta_alloc.rs
@@ -12,17 +12,23 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use common_catalog::format_full_table_name;
+use std::collections::HashMap;
+
+use common_catalog::consts::METRIC_ENGINE;
use common_error::ext::BoxedError;
use common_meta::ddl::{TableMetadata, TableMetadataAllocator, TableMetadataAllocatorContext};
-use common_meta::error::{self as meta_error, Result as MetaResult};
+use common_meta::error::{ExternalSnafu, Result as MetaResult};
+use common_meta::key::table_route::{
+ LogicalTableRouteValue, PhysicalTableRouteValue, TableRouteValue,
+};
use common_meta::rpc::ddl::CreateTableTask;
use common_meta::rpc::router::{Region, RegionRoute};
use common_meta::sequence::SequenceRef;
use common_meta::wal::{allocate_region_wal_options, WalOptionsAllocatorRef};
-use common_telemetry::{debug, warn};
+use common_meta::ClusterId;
+use common_telemetry::debug;
use snafu::{ensure, ResultExt};
-use store_api::storage::{RegionId, TableId, MAX_REGION_SEQ};
+use store_api::storage::{RegionId, RegionNumber, TableId, MAX_REGION_SEQ};
use crate::error::{self, Result, TooManyPartitionsSnafu};
use crate::metasrv::{SelectorContext, SelectorRef};
@@ -49,6 +55,83 @@ impl MetaSrvTableMetadataAllocator {
wal_options_allocator,
}
}
+
+ async fn create_table_route(
+ &self,
+ cluster_id: ClusterId,
+ table_id: TableId,
+ task: &CreateTableTask,
+ ) -> Result<TableRouteValue> {
+ let table_route = if task.create_table.engine == METRIC_ENGINE {
+ TableRouteValue::Logical(LogicalTableRouteValue {})
+ } else {
+ let regions = task.partitions.len();
+
+ ensure!(regions <= MAX_REGION_SEQ as usize, TooManyPartitionsSnafu);
+
+ let mut peers = self
+ .selector
+ .select(
+ cluster_id,
+ &self.ctx,
+ SelectorOptions {
+ min_required_items: regions,
+ allow_duplication: true,
+ },
+ )
+ .await?;
+
+ ensure!(
+ peers.len() >= regions,
+ error::NoEnoughAvailableDatanodeSnafu {
+ required: regions,
+ available: peers.len(),
+ }
+ );
+
+ peers.truncate(regions);
+
+ let region_routes = task
+ .partitions
+ .iter()
+ .enumerate()
+ .map(|(i, partition)| {
+ let region = Region {
+ id: RegionId::new(table_id, i as RegionNumber),
+ partition: Some(partition.clone().into()),
+ ..Default::default()
+ };
+
+ let peer = peers[i % peers.len()].clone();
+
+ RegionRoute {
+ region,
+ leader_peer: Some(peer.into()),
+ ..Default::default()
+ }
+ })
+ .collect::<Vec<_>>();
+ TableRouteValue::Physical(PhysicalTableRouteValue::new(region_routes))
+ };
+ Ok(table_route)
+ }
+
+ fn create_wal_options(
+ &self,
+ table_route: &TableRouteValue,
+ ) -> MetaResult<HashMap<RegionNumber, String>> {
+ match table_route {
+ TableRouteValue::Physical(x) => {
+ let region_numbers = x
+ .region_routes
+ .iter()
+ .map(|route| route.region.id.region_number())
+ .collect();
+ allocate_region_wal_options(region_numbers, &self.wal_options_allocator)
+ }
+ TableRouteValue::Logical(_) => Ok(HashMap::new()),
+ }
+ }
}
#[async_trait::async_trait]
@@ -58,23 +141,15 @@ impl TableMetadataAllocator for MetaSrvTableMetadataAllocator {
ctx: &TableMetadataAllocatorContext,
task: &CreateTableTask,
) -> MetaResult<TableMetadata> {
- let (table_id, region_routes) = handle_create_region_routes(
- ctx.cluster_id,
- task,
- &self.ctx,
- &self.selector,
- &self.table_id_sequence,
- )
- .await
- .map_err(BoxedError::new)
- .context(meta_error::ExternalSnafu)?;
-
- let region_numbers = region_routes
- .iter()
- .map(|route| route.region.id.region_number())
- .collect();
- let region_wal_options =
- allocate_region_wal_options(region_numbers, &self.wal_options_allocator)?;
+ let table_id = self.table_id_sequence.next().await? as TableId;
+
+ let table_route = self
+ .create_table_route(ctx.cluster_id, table_id, task)
+ .await
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+
+ let region_wal_options = self.create_wal_options(&table_route)?;
debug!(
"Allocated region wal options {:?} for table {}",
@@ -83,84 +158,8 @@ impl TableMetadataAllocator for MetaSrvTableMetadataAllocator {
Ok(TableMetadata {
table_id,
- region_routes,
+ table_route,
region_wal_options,
})
}
}
-
-/// pre-allocates create table's table id and region routes.
-async fn handle_create_region_routes(
- cluster_id: u64,
- task: &CreateTableTask,
- ctx: &SelectorContext,
- selector: &SelectorRef,
- table_id_sequence: &SequenceRef,
-) -> Result<(TableId, Vec<RegionRoute>)> {
- let table_info = &task.table_info;
- let partitions = &task.partitions;
-
- let mut peers = selector
- .select(
- cluster_id,
- ctx,
- SelectorOptions {
- min_required_items: partitions.len(),
- allow_duplication: true,
- },
- )
- .await?;
-
- if peers.len() < partitions.len() {
- warn!(
- "Create table failed due to no enough available datanodes, table: {}, partition number: {}, datanode number: {}",
- format_full_table_name(
- &table_info.catalog_name,
- &table_info.schema_name,
- &table_info.name
- ),
- partitions.len(),
- peers.len()
- );
- return error::NoEnoughAvailableDatanodeSnafu {
- required: partitions.len(),
- available: peers.len(),
- }
- .fail();
- }
-
- // We don't need to keep all peers, just truncate it to the number of partitions.
- // If the peers are not enough, some peers will be used for multiple partitions.
- peers.truncate(partitions.len());
-
- let table_id = table_id_sequence
- .next()
- .await
- .context(error::NextSequenceSnafu)? as u32;
-
- ensure!(
- partitions.len() <= MAX_REGION_SEQ as usize,
- TooManyPartitionsSnafu
- );
-
- let region_routes = partitions
- .iter()
- .enumerate()
- .map(|(i, partition)| {
- let region = Region {
- id: RegionId::new(table_id, i as u32),
- partition: Some(partition.clone().into()),
- ..Default::default()
- };
- let peer = peers[i % peers.len()].clone();
- RegionRoute {
- region,
- leader_peer: Some(peer.into()),
- follower_peers: vec![], // follower_peers is not supported at the moment
- leader_status: None,
- }
- })
- .collect::<Vec<_>>();
-
- Ok((table_id, region_routes))
-}
diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs
index 801b63ab3222..3013ac9ad745 100644
--- a/src/meta-srv/src/test_util.rs
+++ b/src/meta-srv/src/test_util.rs
@@ -17,6 +17,7 @@ use std::sync::Arc;
use chrono::DateTime;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
+use common_meta::key::table_route::TableRouteValue;
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
@@ -145,7 +146,11 @@ pub(crate) async fn prepare_table_region_and_info_value(
region_route_factory(4, 3),
];
table_metadata_manager
- .create_table_metadata(table_info, region_routes, HashMap::default())
+ .create_table_metadata(
+ table_info,
+ TableRouteValue::physical(region_routes),
+ HashMap::default(),
+ )
.await
.unwrap();
}
diff --git a/src/operator/src/tests/partition_manager.rs b/src/operator/src/tests/partition_manager.rs
index c0d2a9f74f6b..dd2a044b51c3 100644
--- a/src/operator/src/tests/partition_manager.rs
+++ b/src/operator/src/tests/partition_manager.rs
@@ -17,6 +17,7 @@ use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use catalog::kvbackend::MetaKvBackend;
+use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::KvBackendRef;
@@ -114,7 +115,7 @@ pub(crate) async fn create_partition_rule_manager(
table_metadata_manager
.create_table_metadata(
new_test_table_info(1, "table_1", regions.clone().into_iter()).into(),
- vec![
+ TableRouteValue::physical(vec![
RegionRoute {
region: Region {
id: 3.into(),
@@ -169,7 +170,7 @@ pub(crate) async fn create_partition_rule_manager(
follower_peers: vec![],
leader_status: None,
},
- ],
+ ]),
region_wal_options.clone(),
)
.await
@@ -178,7 +179,7 @@ pub(crate) async fn create_partition_rule_manager(
table_metadata_manager
.create_table_metadata(
new_test_table_info(2, "table_2", regions.clone().into_iter()).into(),
- vec![
+ TableRouteValue::physical(vec![
RegionRoute {
region: Region {
id: 1.into(),
@@ -239,7 +240,7 @@ pub(crate) async fn create_partition_rule_manager(
follower_peers: vec![],
leader_status: None,
},
- ],
+ ]),
region_wal_options,
)
.await
|
refactor
|
hide `RegionRoute` behind `TableRouteValue` (#2989)
|
5533bd92937dcce068575ddb770a817120cdc095
|
2024-06-11 13:14:53
|
Yingwen
|
chore(common-macro): remove features covered by full (#4131)
| false
|
diff --git a/src/common/macro/Cargo.toml b/src/common/macro/Cargo.toml
index d474d4b5e9e7..ad56147eb4fd 100644
--- a/src/common/macro/Cargo.toml
+++ b/src/common/macro/Cargo.toml
@@ -14,11 +14,6 @@ workspace = true
proc-macro2 = "1.0.66"
quote = "1.0"
syn = { version = "2.0", features = [
- "derive",
- "parsing",
- "printing",
- "clone-impls",
- "proc-macro",
"extra-traits",
"full",
] }
|
chore
|
remove features covered by full (#4131)
|
3a4c9f2b459edd014be5e7b40ebc88b90638d2a9
|
2023-11-24 14:49:33
|
Wei
|
feat: supports decimal type in RPC (#2788)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index da0b43f05d94..278ed2f85772 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -199,6 +199,7 @@ name = "api"
version = "0.4.3"
dependencies = [
"common-base",
+ "common-decimal",
"common-error",
"common-macro",
"common-time",
@@ -3536,7 +3537,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=25429306d0379ad29211a062a81da2554a0208ab#25429306d0379ad29211a062a81da2554a0208ab"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a11efce55d8ce20257e08842e4f4c1c8fce2b3a8#a11efce55d8ce20257e08842e4f4c1c8fce2b3a8"
dependencies = [
"prost 0.12.2",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index ba46247cf922..4c3c78eef2d1 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -85,7 +85,7 @@ derive_builder = "0.12"
etcd-client = "0.12"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "25429306d0379ad29211a062a81da2554a0208ab" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a11efce55d8ce20257e08842e4f4c1c8fce2b3a8" }
humantime-serde = "1.1"
itertools = "0.10"
lazy_static = "1.4"
diff --git a/benchmarks/src/bin/nyc-taxi.rs b/benchmarks/src/bin/nyc-taxi.rs
index b30989625f73..678b5321c00a 100644
--- a/benchmarks/src/bin/nyc-taxi.rs
+++ b/benchmarks/src/bin/nyc-taxi.rs
@@ -152,6 +152,7 @@ fn convert_record_batch(record_batch: RecordBatch) -> (Vec<Column>, u32) {
.unwrap_or_default(),
datatype: datatype.into(),
semantic_type: semantic_type as i32,
+ ..Default::default()
};
columns.push(column);
}
@@ -266,6 +267,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "tpep_pickup_datetime".to_string(),
@@ -274,6 +276,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "tpep_dropoff_datetime".to_string(),
@@ -282,6 +285,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "passenger_count".to_string(),
@@ -290,6 +294,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "trip_distance".to_string(),
@@ -298,6 +303,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "RatecodeID".to_string(),
@@ -306,6 +312,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "store_and_fwd_flag".to_string(),
@@ -314,6 +321,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "PULocationID".to_string(),
@@ -322,6 +330,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "DOLocationID".to_string(),
@@ -330,6 +339,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "payment_type".to_string(),
@@ -338,6 +348,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "fare_amount".to_string(),
@@ -346,6 +357,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "extra".to_string(),
@@ -354,6 +366,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "mta_tax".to_string(),
@@ -362,6 +375,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "tip_amount".to_string(),
@@ -370,6 +384,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "tolls_amount".to_string(),
@@ -378,6 +393,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "improvement_surcharge".to_string(),
@@ -386,6 +402,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "total_amount".to_string(),
@@ -394,6 +411,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "congestion_surcharge".to_string(),
@@ -402,6 +420,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "airport_fee".to_string(),
@@ -410,6 +429,7 @@ fn create_table_expr(table_name: &str) -> CreateTableExpr {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
],
time_index: "tpep_pickup_datetime".to_string(),
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index 7c1ff3e04ba6..9beea1ff51b9 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -6,11 +6,13 @@ license.workspace = true
[dependencies]
common-base.workspace = true
+common-decimal.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-time.workspace = true
datatypes.workspace = true
greptime-proto.workspace = true
+paste = "1.0"
prost.workspace = true
snafu.workspace = true
tonic.workspace = true
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index 9328540bdf0a..40b8d1533125 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -15,6 +15,8 @@
use std::sync::Arc;
use common_base::BitVec;
+use common_decimal::decimal128::{DECIMAL128_DEFAULT_SCALE, DECIMAL128_MAX_PRECISION};
+use common_decimal::Decimal128;
use common_time::interval::IntervalUnit;
use common_time::time::Time;
use common_time::timestamp::TimeUnit;
@@ -26,47 +28,71 @@ use datatypes::types::{
};
use datatypes::value::{OrderedF32, OrderedF64, Value};
use datatypes::vectors::{
- BinaryVector, BooleanVector, DateTimeVector, DateVector, DurationMicrosecondVector,
- DurationMillisecondVector, DurationNanosecondVector, DurationSecondVector, Float32Vector,
- Float64Vector, Int32Vector, Int64Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector,
- IntervalYearMonthVector, PrimitiveVector, StringVector, TimeMicrosecondVector,
- TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
- TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt32Vector,
- UInt64Vector, VectorRef,
+ BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
+ DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
+ DurationSecondVector, Float32Vector, Float64Vector, Int32Vector, Int64Vector,
+ IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector, PrimitiveVector,
+ StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
+ TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
+ TimestampNanosecondVector, TimestampSecondVector, UInt32Vector, UInt64Vector, VectorRef,
};
+use greptime_proto::v1;
+use greptime_proto::v1::column_data_type_extension::TypeExt;
use greptime_proto::v1::ddl_request::Expr;
use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
use greptime_proto::v1::value::ValueData;
-use greptime_proto::v1::{self, DdlRequest, IntervalMonthDayNano, QueryRequest, Row, SemanticType};
+use greptime_proto::v1::{
+ ColumnDataTypeExtension, DdlRequest, DecimalTypeExtension, QueryRequest, Row, SemanticType,
+};
+use paste::paste;
use snafu::prelude::*;
use crate::error::{self, Result};
use crate::v1::column::Values;
use crate::v1::{Column, ColumnDataType, Value as GrpcValue};
-#[derive(Debug, PartialEq, Eq)]
-pub struct ColumnDataTypeWrapper(ColumnDataType);
+/// ColumnDataTypeWrapper is a wrapper of ColumnDataType and ColumnDataTypeExtension.
+/// It could be used to convert with ConcreteDataType.
+#[derive(Debug, PartialEq)]
+pub struct ColumnDataTypeWrapper {
+ datatype: ColumnDataType,
+ datatype_ext: Option<ColumnDataTypeExtension>,
+}
impl ColumnDataTypeWrapper {
- pub fn try_new(datatype: i32) -> Result<Self> {
+ /// Try to create a ColumnDataTypeWrapper from i32(ColumnDataType) and ColumnDataTypeExtension.
+ pub fn try_new(datatype: i32, datatype_ext: Option<ColumnDataTypeExtension>) -> Result<Self> {
let datatype = ColumnDataType::try_from(datatype)
.context(error::UnknownColumnDataTypeSnafu { datatype })?;
- Ok(Self(datatype))
+ Ok(Self {
+ datatype,
+ datatype_ext,
+ })
}
- pub fn new(datatype: ColumnDataType) -> Self {
- Self(datatype)
+ /// Create a ColumnDataTypeWrapper from ColumnDataType and ColumnDataTypeExtension.
+ pub fn new(datatype: ColumnDataType, datatype_ext: Option<ColumnDataTypeExtension>) -> Self {
+ Self {
+ datatype,
+ datatype_ext,
+ }
}
+ /// Get the ColumnDataType.
pub fn datatype(&self) -> ColumnDataType {
- self.0
+ self.datatype
+ }
+
+ /// Get a tuple of ColumnDataType and ColumnDataTypeExtension.
+ pub fn to_parts(&self) -> (ColumnDataType, Option<ColumnDataTypeExtension>) {
+ (self.datatype, self.datatype_ext.clone())
}
}
impl From<ColumnDataTypeWrapper> for ConcreteDataType {
- fn from(datatype: ColumnDataTypeWrapper) -> Self {
- match datatype.0 {
+ fn from(datatype_wrapper: ColumnDataTypeWrapper) -> Self {
+ match datatype_wrapper.datatype {
ColumnDataType::Boolean => ConcreteDataType::boolean_datatype(),
ColumnDataType::Int8 => ConcreteDataType::int8_datatype(),
ColumnDataType::Int16 => ConcreteDataType::int16_datatype(),
@@ -109,6 +135,100 @@ impl From<ColumnDataTypeWrapper> for ConcreteDataType {
ConcreteDataType::duration_microsecond_datatype()
}
ColumnDataType::DurationNanosecond => ConcreteDataType::duration_nanosecond_datatype(),
+ ColumnDataType::Decimal128 => {
+ if let Some(TypeExt::DecimalType(d)) = datatype_wrapper
+ .datatype_ext
+ .as_ref()
+ .and_then(|datatype_ext| datatype_ext.type_ext.as_ref())
+ {
+ ConcreteDataType::decimal128_datatype(d.precision as u8, d.scale as i8)
+ } else {
+ ConcreteDataType::decimal128_default_datatype()
+ }
+ }
+ }
+ }
+}
+
+/// This macro is used to generate datatype functions
+/// with lower style for ColumnDataTypeWrapper.
+///
+///
+/// For example: we can use `ColumnDataTypeWrapper::int8_datatype()`,
+/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::Int8`.
+macro_rules! impl_column_type_functions {
+ ($($Type: ident), +) => {
+ paste! {
+ impl ColumnDataTypeWrapper {
+ $(
+ pub fn [<$Type:lower _datatype>]() -> ColumnDataTypeWrapper {
+ ColumnDataTypeWrapper {
+ datatype: ColumnDataType::$Type,
+ datatype_ext: None,
+ }
+ }
+ )+
+ }
+ }
+ }
+}
+
+/// This macro is used to generate datatype functions
+/// with snake style for ColumnDataTypeWrapper.
+///
+///
+/// For example: we can use `ColumnDataTypeWrapper::duration_second_datatype()`,
+/// to get a ColumnDataTypeWrapper with datatype `ColumnDataType::DurationSecond`.
+macro_rules! impl_column_type_functions_with_snake {
+ ($($TypeName: ident), +) => {
+ paste!{
+ impl ColumnDataTypeWrapper {
+ $(
+ pub fn [<$TypeName:snake _datatype>]() -> ColumnDataTypeWrapper {
+ ColumnDataTypeWrapper {
+ datatype: ColumnDataType::$TypeName,
+ datatype_ext: None,
+ }
+ }
+ )+
+ }
+ }
+ };
+}
+
+impl_column_type_functions!(
+ Boolean, Uint8, Uint16, Uint32, Uint64, Int8, Int16, Int32, Int64, Float32, Float64, Binary,
+ Date, Datetime, String
+);
+
+impl_column_type_functions_with_snake!(
+ TimestampSecond,
+ TimestampMillisecond,
+ TimestampMicrosecond,
+ TimestampNanosecond,
+ TimeSecond,
+ TimeMillisecond,
+ TimeMicrosecond,
+ TimeNanosecond,
+ IntervalYearMonth,
+ IntervalDayTime,
+ IntervalMonthDayNano,
+ DurationSecond,
+ DurationMillisecond,
+ DurationMicrosecond,
+ DurationNanosecond
+);
+
+impl ColumnDataTypeWrapper {
+ pub fn decimal128_datatype(precision: i32, scale: i32) -> Self {
+ ColumnDataTypeWrapper {
+ datatype: ColumnDataType::Decimal128,
+ datatype_ext: Some(ColumnDataTypeExtension {
+ type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
+ precision,
+ scale,
+ })),
+ }),
}
}
}
@@ -117,7 +237,7 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
type Error = error::Error;
fn try_from(datatype: ConcreteDataType) -> Result<Self> {
- let datatype = ColumnDataTypeWrapper(match datatype {
+ let column_datatype = match datatype {
ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
ConcreteDataType::Int8(_) => ColumnDataType::Int8,
ConcreteDataType::Int16(_) => ColumnDataType::Int16,
@@ -156,14 +276,30 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
DurationType::Microsecond(_) => ColumnDataType::DurationMicrosecond,
DurationType::Nanosecond(_) => ColumnDataType::DurationNanosecond,
},
+ ConcreteDataType::Decimal128(_) => ColumnDataType::Decimal128,
ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
- | ConcreteDataType::Dictionary(_)
- | ConcreteDataType::Decimal128(_) => {
+ | ConcreteDataType::Dictionary(_) => {
return error::IntoColumnDataTypeSnafu { from: datatype }.fail()
}
- });
- Ok(datatype)
+ };
+ let datatype_extension = match column_datatype {
+ ColumnDataType::Decimal128 => {
+ datatype
+ .as_decimal128()
+ .map(|decimal_type| ColumnDataTypeExtension {
+ type_ext: Some(TypeExt::DecimalType(DecimalTypeExtension {
+ precision: decimal_type.precision() as i32,
+ scale: decimal_type.scale() as i32,
+ })),
+ })
+ }
+ _ => None,
+ };
+ Ok(Self {
+ datatype: column_datatype,
+ datatype_ext: datatype_extension,
+ })
}
}
@@ -289,6 +425,10 @@ pub fn values_with_capacity(datatype: ColumnDataType, capacity: usize) -> Values
duration_nanosecond_values: Vec::with_capacity(capacity),
..Default::default()
},
+ ColumnDataType::Decimal128 => Values {
+ decimal128_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
}
}
@@ -342,7 +482,8 @@ pub fn push_vals(column: &mut Column, origin_count: usize, vector: VectorRef) {
TimeUnit::Microsecond => values.duration_microsecond_values.push(val.value()),
TimeUnit::Nanosecond => values.duration_nanosecond_values.push(val.value()),
},
- Value::List(_) | Value::Decimal128(_) => unreachable!(),
+ Value::Decimal128(val) => values.decimal128_values.push(convert_to_pb_decimal128(val)),
+ Value::List(_) => unreachable!(),
});
column.null_mask = null_mask.into_vec();
}
@@ -382,17 +523,29 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
}
/// Converts an i128 value to google protobuf type [IntervalMonthDayNano].
-pub fn convert_i128_to_interval(v: i128) -> IntervalMonthDayNano {
+pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
let interval = Interval::from_i128(v);
let (months, days, nanoseconds) = interval.to_month_day_nano();
- IntervalMonthDayNano {
+ v1::IntervalMonthDayNano {
months,
days,
nanoseconds,
}
}
-pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
+/// Convert common decimal128 to grpc decimal128 without precision and scale.
+pub fn convert_to_pb_decimal128(v: Decimal128) -> v1::Decimal128 {
+ let value = v.val();
+ v1::Decimal128 {
+ hi: (value >> 64) as i64,
+ lo: value as i64,
+ }
+}
+
+pub fn pb_value_to_value_ref<'a>(
+ value: &'a v1::Value,
+ datatype_ext: &'a Option<ColumnDataTypeExtension>,
+) -> ValueRef<'a> {
let Some(value) = &value.value_data else {
return ValueRef::Null;
};
@@ -437,6 +590,28 @@ pub fn pb_value_to_value_ref(value: &v1::Value) -> ValueRef {
ValueData::DurationMillisecondValue(v) => ValueRef::Duration(Duration::new_millisecond(*v)),
ValueData::DurationMicrosecondValue(v) => ValueRef::Duration(Duration::new_microsecond(*v)),
ValueData::DurationNanosecondValue(v) => ValueRef::Duration(Duration::new_nanosecond(*v)),
+ ValueData::Decimal128Value(v) => {
+ // get precision and scale from datatype_extension
+ if let Some(TypeExt::DecimalType(d)) = datatype_ext
+ .as_ref()
+ .and_then(|column_ext| column_ext.type_ext.as_ref())
+ {
+ ValueRef::Decimal128(Decimal128::from_value_precision_scale(
+ v.hi,
+ v.lo,
+ d.precision as u8,
+ d.scale as i8,
+ ))
+ } else {
+ // If the precision and scale are not set, use the default value.
+ ValueRef::Decimal128(Decimal128::from_value_precision_scale(
+ v.hi,
+ v.lo,
+ DECIMAL128_MAX_PRECISION,
+ DECIMAL128_DEFAULT_SCALE,
+ ))
+ }
+ }
}
}
@@ -523,10 +698,12 @@ pub fn pb_values_to_vector_ref(data_type: &ConcreteDataType, values: Values) ->
values.duration_nanosecond_values,
)),
},
- ConcreteDataType::Null(_)
- | ConcreteDataType::List(_)
- | ConcreteDataType::Dictionary(_)
- | ConcreteDataType::Decimal128(_) => {
+ ConcreteDataType::Decimal128(d) => Arc::new(Decimal128Vector::from_values(
+ values.decimal128_values.iter().map(|x| {
+ Decimal128::from_value_precision_scale(x.hi, x.lo, d.precision(), d.scale()).into()
+ }),
+ )),
+ ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
unreachable!()
}
}
@@ -696,10 +873,19 @@ pub fn pb_values_to_values(data_type: &ConcreteDataType, values: Values) -> Vec<
.into_iter()
.map(|v| Value::Duration(Duration::new_nanosecond(v)))
.collect(),
- ConcreteDataType::Null(_)
- | ConcreteDataType::List(_)
- | ConcreteDataType::Dictionary(_)
- | ConcreteDataType::Decimal128(_) => {
+ ConcreteDataType::Decimal128(d) => values
+ .decimal128_values
+ .into_iter()
+ .map(|v| {
+ Value::Decimal128(Decimal128::from_value_precision_scale(
+ v.hi,
+ v.lo,
+ d.precision(),
+ d.scale(),
+ ))
+ })
+ .collect(),
+ ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
unreachable!()
}
}
@@ -711,12 +897,14 @@ pub fn is_semantic_type_eq(type_value: i32, semantic_type: SemanticType) -> bool
}
/// Returns true if the pb type value is valid.
-pub fn is_column_type_value_eq(type_value: i32, expect_type: &ConcreteDataType) -> bool {
- let Ok(column_type) = ColumnDataType::try_from(type_value) else {
- return false;
- };
-
- is_column_type_eq(column_type, expect_type)
+pub fn is_column_type_value_eq(
+ type_value: i32,
+ type_extension: Option<ColumnDataTypeExtension>,
+ expect_type: &ConcreteDataType,
+) -> bool {
+ ColumnDataTypeWrapper::try_new(type_value, type_extension)
+ .map(|wrapper| ConcreteDataType::from(wrapper) == *expect_type)
+ .unwrap_or(false)
}
/// Convert value into proto's value.
@@ -823,13 +1011,19 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
},
},
- Value::List(_) | Value::Decimal128(_) => return None,
+ Value::Decimal128(v) => {
+ let (hi, lo) = v.split_value();
+ v1::Value {
+ value_data: Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo })),
+ }
+ }
+ Value::List(_) => return None,
};
Some(proto_value)
}
-/// Returns the [ColumnDataType] of the value.
+/// Returns the [ColumnDataTypeWrapper] of the value.
///
/// If value is null, returns `None`.
pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
@@ -864,66 +1058,11 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
ValueData::DurationNanosecondValue(_) => ColumnDataType::DurationNanosecond,
+ ValueData::Decimal128Value(_) => ColumnDataType::Decimal128,
};
Some(value_type)
}
-/// Convert [ConcreteDataType] to [ColumnDataType].
-pub fn to_column_data_type(data_type: &ConcreteDataType) -> Option<ColumnDataType> {
- let column_data_type = match data_type {
- ConcreteDataType::Boolean(_) => ColumnDataType::Boolean,
- ConcreteDataType::Int8(_) => ColumnDataType::Int8,
- ConcreteDataType::Int16(_) => ColumnDataType::Int16,
- ConcreteDataType::Int32(_) => ColumnDataType::Int32,
- ConcreteDataType::Int64(_) => ColumnDataType::Int64,
- ConcreteDataType::UInt8(_) => ColumnDataType::Uint8,
- ConcreteDataType::UInt16(_) => ColumnDataType::Uint16,
- ConcreteDataType::UInt32(_) => ColumnDataType::Uint32,
- ConcreteDataType::UInt64(_) => ColumnDataType::Uint64,
- ConcreteDataType::Float32(_) => ColumnDataType::Float32,
- ConcreteDataType::Float64(_) => ColumnDataType::Float64,
- ConcreteDataType::Binary(_) => ColumnDataType::Binary,
- ConcreteDataType::String(_) => ColumnDataType::String,
- ConcreteDataType::Date(_) => ColumnDataType::Date,
- ConcreteDataType::DateTime(_) => ColumnDataType::Datetime,
- ConcreteDataType::Timestamp(TimestampType::Second(_)) => ColumnDataType::TimestampSecond,
- ConcreteDataType::Timestamp(TimestampType::Millisecond(_)) => {
- ColumnDataType::TimestampMillisecond
- }
- ConcreteDataType::Timestamp(TimestampType::Microsecond(_)) => {
- ColumnDataType::TimestampMicrosecond
- }
- ConcreteDataType::Timestamp(TimestampType::Nanosecond(_)) => {
- ColumnDataType::TimestampNanosecond
- }
- ConcreteDataType::Time(TimeType::Second(_)) => ColumnDataType::TimeSecond,
- ConcreteDataType::Time(TimeType::Millisecond(_)) => ColumnDataType::TimeMillisecond,
- ConcreteDataType::Time(TimeType::Microsecond(_)) => ColumnDataType::TimeMicrosecond,
- ConcreteDataType::Time(TimeType::Nanosecond(_)) => ColumnDataType::TimeNanosecond,
- ConcreteDataType::Duration(DurationType::Second(_)) => ColumnDataType::DurationSecond,
- ConcreteDataType::Duration(DurationType::Millisecond(_)) => {
- ColumnDataType::DurationMillisecond
- }
- ConcreteDataType::Duration(DurationType::Microsecond(_)) => {
- ColumnDataType::DurationMicrosecond
- }
- ConcreteDataType::Duration(DurationType::Nanosecond(_)) => {
- ColumnDataType::DurationNanosecond
- }
- ConcreteDataType::Interval(IntervalType::YearMonth(_)) => ColumnDataType::IntervalYearMonth,
- ConcreteDataType::Interval(IntervalType::MonthDayNano(_)) => {
- ColumnDataType::IntervalMonthDayNano
- }
- ConcreteDataType::Interval(IntervalType::DayTime(_)) => ColumnDataType::IntervalDayTime,
- ConcreteDataType::Null(_)
- | ConcreteDataType::List(_)
- | ConcreteDataType::Dictionary(_)
- | ConcreteDataType::Decimal128(_) => return None,
- };
-
- Some(column_data_type)
-}
-
pub fn vectors_to_rows<'a>(
columns: impl Iterator<Item = &'a VectorRef>,
row_count: usize,
@@ -982,20 +1121,15 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
}),
- Value::List(_) | Value::Decimal128(_) => unreachable!(),
+ Value::Decimal128(v) => {
+ let (hi, lo) = v.split_value();
+ Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo }))
+ }
+ Value::List(_) => unreachable!(),
},
}
}
-/// Returns true if the column type is equal to expected type.
-fn is_column_type_eq(column_type: ColumnDataType, expect_type: &ConcreteDataType) -> bool {
- if let Some(expect) = to_column_data_type(expect_type) {
- column_type == expect
- } else {
- false
- }
-}
-
#[cfg(test)]
mod tests {
use std::sync::Arc;
@@ -1089,189 +1223,204 @@ mod tests {
let values = values_with_capacity(ColumnDataType::DurationMillisecond, 2);
let values = values.duration_millisecond_values;
assert_eq!(2, values.capacity());
+
+ let values = values_with_capacity(ColumnDataType::Decimal128, 2);
+ let values = values.decimal128_values;
+ assert_eq!(2, values.capacity());
}
#[test]
fn test_concrete_datatype_from_column_datatype() {
assert_eq!(
ConcreteDataType::boolean_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Boolean).into()
+ ColumnDataTypeWrapper::boolean_datatype().into()
);
assert_eq!(
ConcreteDataType::int8_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Int8).into()
+ ColumnDataTypeWrapper::int8_datatype().into()
);
assert_eq!(
ConcreteDataType::int16_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Int16).into()
+ ColumnDataTypeWrapper::int16_datatype().into()
);
assert_eq!(
ConcreteDataType::int32_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Int32).into()
+ ColumnDataTypeWrapper::int32_datatype().into()
);
assert_eq!(
ConcreteDataType::int64_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Int64).into()
+ ColumnDataTypeWrapper::int64_datatype().into()
);
assert_eq!(
ConcreteDataType::uint8_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Uint8).into()
+ ColumnDataTypeWrapper::uint8_datatype().into()
);
assert_eq!(
ConcreteDataType::uint16_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Uint16).into()
+ ColumnDataTypeWrapper::uint16_datatype().into()
);
assert_eq!(
ConcreteDataType::uint32_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Uint32).into()
+ ColumnDataTypeWrapper::uint32_datatype().into()
);
assert_eq!(
ConcreteDataType::uint64_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Uint64).into()
+ ColumnDataTypeWrapper::uint64_datatype().into()
);
assert_eq!(
ConcreteDataType::float32_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Float32).into()
+ ColumnDataTypeWrapper::float32_datatype().into()
);
assert_eq!(
ConcreteDataType::float64_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Float64).into()
+ ColumnDataTypeWrapper::float64_datatype().into()
);
assert_eq!(
ConcreteDataType::binary_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Binary).into()
+ ColumnDataTypeWrapper::binary_datatype().into()
);
assert_eq!(
ConcreteDataType::string_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::String).into()
+ ColumnDataTypeWrapper::string_datatype().into()
);
assert_eq!(
ConcreteDataType::date_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Date).into()
+ ColumnDataTypeWrapper::date_datatype().into()
);
assert_eq!(
ConcreteDataType::datetime_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::Datetime).into()
+ ColumnDataTypeWrapper::datetime_datatype().into()
);
assert_eq!(
ConcreteDataType::timestamp_millisecond_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond).into()
+ ColumnDataTypeWrapper::timestamp_millisecond_datatype().into()
);
assert_eq!(
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
- ColumnDataTypeWrapper(ColumnDataType::TimeMillisecond).into()
+ ColumnDataTypeWrapper::time_millisecond_datatype().into()
);
assert_eq!(
ConcreteDataType::interval_datatype(IntervalUnit::DayTime),
- ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime).into()
+ ColumnDataTypeWrapper::interval_day_time_datatype().into()
);
assert_eq!(
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth),
- ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth).into()
+ ColumnDataTypeWrapper::interval_year_month_datatype().into()
);
assert_eq!(
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
- ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano).into()
+ ColumnDataTypeWrapper::interval_month_day_nano_datatype().into()
);
assert_eq!(
ConcreteDataType::duration_millisecond_datatype(),
- ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond).into()
+ ColumnDataTypeWrapper::duration_millisecond_datatype().into()
+ );
+ assert_eq!(
+ ConcreteDataType::decimal128_datatype(10, 2),
+ ColumnDataTypeWrapper::decimal128_datatype(10, 2).into()
)
}
#[test]
fn test_column_datatype_from_concrete_datatype() {
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Boolean),
+ ColumnDataTypeWrapper::boolean_datatype(),
ConcreteDataType::boolean_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Int8),
+ ColumnDataTypeWrapper::int8_datatype(),
ConcreteDataType::int8_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Int16),
+ ColumnDataTypeWrapper::int16_datatype(),
ConcreteDataType::int16_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Int32),
+ ColumnDataTypeWrapper::int32_datatype(),
ConcreteDataType::int32_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Int64),
+ ColumnDataTypeWrapper::int64_datatype(),
ConcreteDataType::int64_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Uint8),
+ ColumnDataTypeWrapper::uint8_datatype(),
ConcreteDataType::uint8_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Uint16),
+ ColumnDataTypeWrapper::uint16_datatype(),
ConcreteDataType::uint16_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Uint32),
+ ColumnDataTypeWrapper::uint32_datatype(),
ConcreteDataType::uint32_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Uint64),
+ ColumnDataTypeWrapper::uint64_datatype(),
ConcreteDataType::uint64_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Float32),
+ ColumnDataTypeWrapper::float32_datatype(),
ConcreteDataType::float32_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Float64),
+ ColumnDataTypeWrapper::float64_datatype(),
ConcreteDataType::float64_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Binary),
+ ColumnDataTypeWrapper::binary_datatype(),
ConcreteDataType::binary_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::String),
+ ColumnDataTypeWrapper::string_datatype(),
ConcreteDataType::string_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Date),
+ ColumnDataTypeWrapper::date_datatype(),
ConcreteDataType::date_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::Datetime),
+ ColumnDataTypeWrapper::datetime_datatype(),
ConcreteDataType::datetime_datatype().try_into().unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::TimestampMillisecond),
+ ColumnDataTypeWrapper::timestamp_millisecond_datatype(),
ConcreteDataType::timestamp_millisecond_datatype()
.try_into()
.unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::IntervalYearMonth),
+ ColumnDataTypeWrapper::interval_year_month_datatype(),
ConcreteDataType::interval_datatype(IntervalUnit::YearMonth)
.try_into()
.unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::IntervalDayTime),
+ ColumnDataTypeWrapper::interval_day_time_datatype(),
ConcreteDataType::interval_datatype(IntervalUnit::DayTime)
.try_into()
.unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::IntervalMonthDayNano),
+ ColumnDataTypeWrapper::interval_month_day_nano_datatype(),
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano)
.try_into()
.unwrap()
);
assert_eq!(
- ColumnDataTypeWrapper(ColumnDataType::DurationMillisecond),
+ ColumnDataTypeWrapper::duration_millisecond_datatype(),
ConcreteDataType::duration_millisecond_datatype()
.try_into()
.unwrap()
);
+ assert_eq!(
+ ColumnDataTypeWrapper::decimal128_datatype(10, 2),
+ ConcreteDataType::decimal128_datatype(10, 2)
+ .try_into()
+ .unwrap()
+ );
+
let result: Result<ColumnDataTypeWrapper> = ConcreteDataType::null_datatype().try_into();
assert!(result.is_err());
assert_eq!(
@@ -1298,6 +1447,7 @@ mod tests {
}),
null_mask: vec![],
datatype: 0,
+ ..Default::default()
};
let vector = Arc::new(TimestampNanosecondVector::from_vec(vec![1, 2, 3]));
@@ -1339,6 +1489,7 @@ mod tests {
}),
null_mask: vec![],
datatype: 0,
+ ..Default::default()
};
let vector = Arc::new(TimeNanosecondVector::from_vec(vec![1, 2, 3]));
@@ -1380,6 +1531,7 @@ mod tests {
}),
null_mask: vec![],
datatype: 0,
+ ..Default::default()
};
let vector = Arc::new(IntervalYearMonthVector::from_vec(vec![1, 2, 3]));
@@ -1424,6 +1576,7 @@ mod tests {
}),
null_mask: vec![],
datatype: 0,
+ ..Default::default()
};
let vector = Arc::new(DurationNanosecondVector::from_vec(vec![1, 2, 3]));
@@ -1468,6 +1621,7 @@ mod tests {
}),
null_mask: vec![2],
datatype: ColumnDataType::Boolean as i32,
+ ..Default::default()
};
let row_count = 4;
@@ -1625,17 +1779,17 @@ mod tests {
&ConcreteDataType::Interval(IntervalType::MonthDayNano(IntervalMonthDayNanoType)),
Values {
interval_month_day_nano_values: vec![
- IntervalMonthDayNano {
+ v1::IntervalMonthDayNano {
months: 1,
days: 2,
nanoseconds: 3,
},
- IntervalMonthDayNano {
+ v1::IntervalMonthDayNano {
months: 5,
days: 6,
nanoseconds: 7,
},
- IntervalMonthDayNano {
+ v1::IntervalMonthDayNano {
months: 9,
days: 10,
nanoseconds: 11,
@@ -1867,4 +2021,33 @@ mod tests {
assert_eq!(values[6], ValueData::DateValue(30));
assert_eq!(values[7], ValueData::StringValue("c".to_string()));
}
+
+ #[test]
+ fn test_is_column_type_value_eq() {
+ // test column type eq
+ let column1 = Column {
+ column_name: "test".to_string(),
+ semantic_type: 0,
+ values: Some(Values {
+ bool_values: vec![false, true, true],
+ ..Default::default()
+ }),
+ null_mask: vec![2],
+ datatype: ColumnDataType::Boolean as i32,
+ datatype_extension: None,
+ };
+ assert!(is_column_type_value_eq(
+ column1.datatype,
+ column1.datatype_extension,
+ &ConcreteDataType::boolean_datatype(),
+ ));
+ }
+
+ #[test]
+ fn test_convert_to_pb_decimal128() {
+ let decimal = Decimal128::new(123, 3, 1);
+ let pb_decimal = convert_to_pb_decimal128(decimal);
+ assert_eq!(pb_decimal.lo, 123);
+ assert_eq!(pb_decimal.hi, 0);
+ }
}
diff --git a/src/api/src/v1/column_def.rs b/src/api/src/v1/column_def.rs
index 7a812d005fae..4a077d3b5451 100644
--- a/src/api/src/v1/column_def.rs
+++ b/src/api/src/v1/column_def.rs
@@ -22,7 +22,10 @@ use crate::helper::ColumnDataTypeWrapper;
use crate::v1::ColumnDef;
pub fn try_as_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
- let data_type = ColumnDataTypeWrapper::try_new(column_def.data_type)?;
+ let data_type = ColumnDataTypeWrapper::try_new(
+ column_def.data_type,
+ column_def.datatype_extension.clone(),
+ )?;
let constraint = if column_def.default_constraint.is_empty() {
None
diff --git a/src/client/examples/logical.rs b/src/client/examples/logical.rs
index ca7d3ae96ba3..ec4c11cdd9f9 100644
--- a/src/client/examples/logical.rs
+++ b/src/client/examples/logical.rs
@@ -46,6 +46,7 @@ async fn run() {
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "key".to_string(),
@@ -54,6 +55,7 @@ async fn run() {
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
+ ..Default::default()
},
ColumnDef {
name: "value".to_string(),
@@ -62,6 +64,7 @@ async fn run() {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
],
time_index: "timestamp".to_string(),
diff --git a/src/common/decimal/src/decimal128.rs b/src/common/decimal/src/decimal128.rs
index c68039fb57a7..ce23fcf98a97 100644
--- a/src/common/decimal/src/decimal128.rs
+++ b/src/common/decimal/src/decimal128.rs
@@ -96,10 +96,25 @@ impl Decimal128 {
self.scale
}
- /// Convert to ScalarValue
+ /// Convert to ScalarValue(value,precision,scale)
pub fn to_scalar_value(&self) -> (Option<i128>, u8, i8) {
(Some(self.value), self.precision, self.scale)
}
+
+ /// split the self.value(i128) to (high-64 bit, low-64 bit), and
+ /// the precision, scale information is discarded.
+ ///
+ /// Return: (high-64 bit, low-64 bit)
+ pub fn split_value(&self) -> (i64, i64) {
+ ((self.value >> 64) as i64, self.value as i64)
+ }
+
+ /// Convert from precision, scale, a i128 value which
+ /// represents by two i64 value(high-64 bit, low-64 bit).
+ pub fn from_value_precision_scale(hi: i64, lo: i64, precision: u8, scale: i8) -> Self {
+ let value = (hi as i128) << 64 | lo as i128;
+ Self::new(value, precision, scale)
+ }
}
/// The default value of Decimal128 is 0, and its precision is 1 and scale is 0.
diff --git a/src/common/grpc-expr/src/alter.rs b/src/common/grpc-expr/src/alter.rs
index 462eefde1a27..532f9cf15c48 100644
--- a/src/common/grpc-expr/src/alter.rs
+++ b/src/common/grpc-expr/src/alter.rs
@@ -158,6 +158,7 @@ mod tests {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
}),
location: None,
}],
@@ -199,6 +200,7 @@ mod tests {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
}),
location: Some(Location {
location_type: LocationType::First.into(),
@@ -213,6 +215,7 @@ mod tests {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
}),
location: Some(Location {
location_type: LocationType::After.into(),
diff --git a/src/common/grpc-expr/src/delete.rs b/src/common/grpc-expr/src/delete.rs
index d272b6aa0bc0..ff737fcdfc60 100644
--- a/src/common/grpc-expr/src/delete.rs
+++ b/src/common/grpc-expr/src/delete.rs
@@ -36,14 +36,16 @@ pub fn to_table_delete_request(
values,
null_mask,
datatype,
+ datatype_extension,
..
} in request.key_columns
{
let Some(values) = values else { continue };
- let datatype: ConcreteDataType = ColumnDataTypeWrapper::try_new(datatype)
- .context(ColumnDataTypeSnafu)?
- .into();
+ let datatype: ConcreteDataType =
+ ColumnDataTypeWrapper::try_new(datatype, datatype_extension)
+ .context(ColumnDataTypeSnafu)?
+ .into();
let vector = add_values_to_builder(datatype, values, row_count, null_mask)?;
ensure!(
diff --git a/src/common/grpc-expr/src/insert.rs b/src/common/grpc-expr/src/insert.rs
index 5b173b6fdc67..746189ee2b53 100644
--- a/src/common/grpc-expr/src/insert.rs
+++ b/src/common/grpc-expr/src/insert.rs
@@ -119,7 +119,7 @@ mod tests {
nullable: bool,
) -> error::Result<ColumnSchema> {
let datatype_wrapper =
- ColumnDataTypeWrapper::try_new(datatype).context(ColumnDataTypeSnafu)?;
+ ColumnDataTypeWrapper::try_new(datatype, None).context(ColumnDataTypeSnafu)?;
Ok(ColumnSchema::new(
column_name,
@@ -170,7 +170,8 @@ mod tests {
.iter()
.find(|c| c.name == "host")
.unwrap()
- .data_type
+ .data_type,
+ None
)
.unwrap()
)
@@ -184,7 +185,8 @@ mod tests {
.iter()
.find(|c| c.name == "cpu")
.unwrap()
- .data_type
+ .data_type,
+ None
)
.unwrap()
)
@@ -198,7 +200,8 @@ mod tests {
.iter()
.find(|c| c.name == "memory")
.unwrap()
- .data_type
+ .data_type,
+ None
)
.unwrap()
)
@@ -212,7 +215,8 @@ mod tests {
.iter()
.find(|c| c.name == "time")
.unwrap()
- .data_type
+ .data_type,
+ None
)
.unwrap()
)
@@ -226,7 +230,8 @@ mod tests {
.iter()
.find(|c| c.name == "interval")
.unwrap()
- .data_type
+ .data_type,
+ None
)
.unwrap()
)
@@ -240,7 +245,8 @@ mod tests {
.iter()
.find(|c| c.name == "duration")
.unwrap()
- .data_type
+ .data_type,
+ None
)
.unwrap()
)
@@ -254,7 +260,8 @@ mod tests {
.iter()
.find(|c| c.name == "ts")
.unwrap()
- .data_type
+ .data_type,
+ None
)
.unwrap()
)
@@ -284,8 +291,11 @@ mod tests {
assert_eq!(
ConcreteDataType::string_datatype(),
ConcreteDataType::from(
- ColumnDataTypeWrapper::try_new(host_column.column_def.as_ref().unwrap().data_type)
- .unwrap()
+ ColumnDataTypeWrapper::try_new(
+ host_column.column_def.as_ref().unwrap().data_type,
+ None
+ )
+ .unwrap()
)
);
@@ -294,7 +304,8 @@ mod tests {
ConcreteDataType::float64_datatype(),
ConcreteDataType::from(
ColumnDataTypeWrapper::try_new(
- memory_column.column_def.as_ref().unwrap().data_type
+ memory_column.column_def.as_ref().unwrap().data_type,
+ None
)
.unwrap()
)
@@ -304,8 +315,11 @@ mod tests {
assert_eq!(
ConcreteDataType::time_datatype(TimeUnit::Millisecond),
ConcreteDataType::from(
- ColumnDataTypeWrapper::try_new(time_column.column_def.as_ref().unwrap().data_type)
- .unwrap()
+ ColumnDataTypeWrapper::try_new(
+ time_column.column_def.as_ref().unwrap().data_type,
+ None
+ )
+ .unwrap()
)
);
@@ -314,7 +328,8 @@ mod tests {
ConcreteDataType::interval_datatype(IntervalUnit::MonthDayNano),
ConcreteDataType::from(
ColumnDataTypeWrapper::try_new(
- interval_column.column_def.as_ref().unwrap().data_type
+ interval_column.column_def.as_ref().unwrap().data_type,
+ None
)
.unwrap()
)
@@ -326,7 +341,8 @@ mod tests {
ConcreteDataType::duration_millisecond_datatype(),
ConcreteDataType::from(
ColumnDataTypeWrapper::try_new(
- duration_column.column_def.as_ref().unwrap().data_type
+ duration_column.column_def.as_ref().unwrap().data_type,
+ None
)
.unwrap()
)
@@ -360,6 +376,7 @@ mod tests {
values: Some(host_vals),
null_mask: vec![0],
datatype: ColumnDataType::String as i32,
+ ..Default::default()
};
let cpu_vals = Values {
@@ -372,6 +389,7 @@ mod tests {
values: Some(cpu_vals),
null_mask: vec![2],
datatype: ColumnDataType::Float64 as i32,
+ ..Default::default()
};
let mem_vals = Values {
@@ -384,6 +402,7 @@ mod tests {
values: Some(mem_vals),
null_mask: vec![1],
datatype: ColumnDataType::Float64 as i32,
+ ..Default::default()
};
let time_vals = Values {
@@ -396,6 +415,7 @@ mod tests {
values: Some(time_vals),
null_mask: vec![0],
datatype: ColumnDataType::TimeMillisecond as i32,
+ ..Default::default()
};
let interval1 = IntervalMonthDayNano {
@@ -418,6 +438,7 @@ mod tests {
values: Some(interval_vals),
null_mask: vec![0],
datatype: ColumnDataType::IntervalMonthDayNano as i32,
+ ..Default::default()
};
let duration_vals = Values {
@@ -430,6 +451,7 @@ mod tests {
values: Some(duration_vals),
null_mask: vec![0],
datatype: ColumnDataType::DurationMillisecond as i32,
+ ..Default::default()
};
let ts_vals = Values {
@@ -442,6 +464,7 @@ mod tests {
values: Some(ts_vals),
null_mask: vec![0],
datatype: ColumnDataType::TimestampMillisecond as i32,
+ ..Default::default()
};
(
diff --git a/src/common/grpc-expr/src/util.rs b/src/common/grpc-expr/src/util.rs
index 337a0a16e6e2..f7068bc875d8 100644
--- a/src/common/grpc-expr/src/util.rs
+++ b/src/common/grpc-expr/src/util.rs
@@ -121,6 +121,7 @@ pub fn build_create_table_expr(
default_constraint: vec![],
semantic_type,
comment: String::new(),
+ ..Default::default()
};
column_defs.push(column_def);
}
@@ -161,6 +162,7 @@ pub fn extract_new_columns(
default_constraint: vec![],
semantic_type: expr.semantic_type,
comment: String::new(),
+ ..Default::default()
});
AddColumn {
column_def,
diff --git a/src/common/grpc/src/select.rs b/src/common/grpc/src/select.rs
index 4c6e4a6af99c..6b53a5900c73 100644
--- a/src/common/grpc/src/select.rs
+++ b/src/common/grpc/src/select.rs
@@ -12,18 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::helper::convert_i128_to_interval;
+use api::helper::{convert_i128_to_interval, convert_to_pb_decimal128};
use api::v1::column::Values;
use common_base::BitVec;
use datatypes::types::{DurationType, IntervalType, TimeType, TimestampType, WrapperType};
use datatypes::vectors::{
- BinaryVector, BooleanVector, DateTimeVector, DateVector, DurationMicrosecondVector,
- DurationMillisecondVector, DurationNanosecondVector, DurationSecondVector, Float32Vector,
- Float64Vector, Int16Vector, Int32Vector, Int64Vector, Int8Vector, IntervalDayTimeVector,
- IntervalMonthDayNanoVector, IntervalYearMonthVector, StringVector, TimeMicrosecondVector,
- TimeMillisecondVector, TimeNanosecondVector, TimeSecondVector, TimestampMicrosecondVector,
- TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector,
- UInt32Vector, UInt64Vector, UInt8Vector, VectorRef,
+ BinaryVector, BooleanVector, DateTimeVector, DateVector, Decimal128Vector,
+ DurationMicrosecondVector, DurationMillisecondVector, DurationNanosecondVector,
+ DurationSecondVector, Float32Vector, Float64Vector, Int16Vector, Int32Vector, Int64Vector,
+ Int8Vector, IntervalDayTimeVector, IntervalMonthDayNanoVector, IntervalYearMonthVector,
+ StringVector, TimeMicrosecondVector, TimeMillisecondVector, TimeNanosecondVector,
+ TimeSecondVector, TimestampMicrosecondVector, TimestampMillisecondVector,
+ TimestampNanosecondVector, TimestampSecondVector, UInt16Vector, UInt32Vector, UInt64Vector,
+ UInt8Vector, VectorRef,
};
use snafu::OptionExt;
@@ -71,8 +72,7 @@ macro_rules! convert_arrow_array_to_grpc_vals {
return Ok(vals);
},
)+
- // TODO(QuenKar): support gRPC for Decimal128
- ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) | ConcreteDataType::Decimal128(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
+ ConcreteDataType::Null(_) | ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => unreachable!("Should not send {:?} in gRPC", $data_type),
}
}};
}
@@ -238,6 +238,12 @@ pub fn values(arrays: &[VectorRef]) -> Result<Values> {
DurationNanosecondVector,
duration_nanosecond_values,
|x| { x.into_native() }
+ ),
+ (
+ ConcreteDataType::Decimal128(_),
+ Decimal128Vector,
+ decimal128_values,
+ |x| { convert_to_pb_decimal128(x) }
)
)
}
@@ -315,6 +321,17 @@ mod tests {
assert_eq!(vec![1, 2, 3], values.duration_second_values);
}
+ #[test]
+ fn test_convert_arrow_array_decimal128() {
+ let array = Decimal128Vector::from(vec![Some(1), Some(2), None, Some(3)]);
+
+ let vals = values(&[Arc::new(array)]).unwrap();
+ (0..3).for_each(|i| {
+ assert_eq!(vals.decimal128_values[i].hi, 0);
+ assert_eq!(vals.decimal128_values[i].lo, i as i64 + 1);
+ });
+ }
+
#[test]
fn test_convert_arrow_arrays_string() {
let array = StringVector::from(vec![
diff --git a/src/common/grpc/src/writer.rs b/src/common/grpc/src/writer.rs
index 7efa3dfd5cad..3a8e9238287e 100644
--- a/src/common/grpc/src/writer.rs
+++ b/src/common/grpc/src/writer.rs
@@ -16,7 +16,7 @@ use std::collections::HashMap;
use std::fmt::Display;
use api::helper::values_with_capacity;
-use api::v1::{Column, ColumnDataType, SemanticType};
+use api::v1::{Column, ColumnDataType, ColumnDataTypeExtension, SemanticType};
use common_base::BitVec;
use common_time::timestamp::TimeUnit;
use snafu::ensure;
@@ -50,6 +50,7 @@ impl LinesWriter {
column_name,
ColumnDataType::TimestampMillisecond,
SemanticType::Timestamp,
+ None,
);
ensure!(
column.datatype == ColumnDataType::TimestampMillisecond as i32,
@@ -69,7 +70,8 @@ impl LinesWriter {
}
pub fn write_tag(&mut self, column_name: &str, value: &str) -> Result<()> {
- let (idx, column) = self.mut_column(column_name, ColumnDataType::String, SemanticType::Tag);
+ let (idx, column) =
+ self.mut_column(column_name, ColumnDataType::String, SemanticType::Tag, None);
ensure!(
column.datatype == ColumnDataType::String as i32,
TypeMismatchSnafu {
@@ -86,8 +88,12 @@ impl LinesWriter {
}
pub fn write_u64(&mut self, column_name: &str, value: u64) -> Result<()> {
- let (idx, column) =
- self.mut_column(column_name, ColumnDataType::Uint64, SemanticType::Field);
+ let (idx, column) = self.mut_column(
+ column_name,
+ ColumnDataType::Uint64,
+ SemanticType::Field,
+ None,
+ );
ensure!(
column.datatype == ColumnDataType::Uint64 as i32,
TypeMismatchSnafu {
@@ -104,8 +110,12 @@ impl LinesWriter {
}
pub fn write_i64(&mut self, column_name: &str, value: i64) -> Result<()> {
- let (idx, column) =
- self.mut_column(column_name, ColumnDataType::Int64, SemanticType::Field);
+ let (idx, column) = self.mut_column(
+ column_name,
+ ColumnDataType::Int64,
+ SemanticType::Field,
+ None,
+ );
ensure!(
column.datatype == ColumnDataType::Int64 as i32,
TypeMismatchSnafu {
@@ -122,8 +132,12 @@ impl LinesWriter {
}
pub fn write_f64(&mut self, column_name: &str, value: f64) -> Result<()> {
- let (idx, column) =
- self.mut_column(column_name, ColumnDataType::Float64, SemanticType::Field);
+ let (idx, column) = self.mut_column(
+ column_name,
+ ColumnDataType::Float64,
+ SemanticType::Field,
+ None,
+ );
ensure!(
column.datatype == ColumnDataType::Float64 as i32,
TypeMismatchSnafu {
@@ -140,8 +154,12 @@ impl LinesWriter {
}
pub fn write_string(&mut self, column_name: &str, value: &str) -> Result<()> {
- let (idx, column) =
- self.mut_column(column_name, ColumnDataType::String, SemanticType::Field);
+ let (idx, column) = self.mut_column(
+ column_name,
+ ColumnDataType::String,
+ SemanticType::Field,
+ None,
+ );
ensure!(
column.datatype == ColumnDataType::String as i32,
TypeMismatchSnafu {
@@ -158,8 +176,12 @@ impl LinesWriter {
}
pub fn write_bool(&mut self, column_name: &str, value: bool) -> Result<()> {
- let (idx, column) =
- self.mut_column(column_name, ColumnDataType::Boolean, SemanticType::Field);
+ let (idx, column) = self.mut_column(
+ column_name,
+ ColumnDataType::Boolean,
+ SemanticType::Field,
+ None,
+ );
ensure!(
column.datatype == ColumnDataType::Boolean as i32,
TypeMismatchSnafu {
@@ -201,6 +223,7 @@ impl LinesWriter {
column_name: &str,
datatype: ColumnDataType,
semantic_type: SemanticType,
+ datatype_extension: Option<ColumnDataTypeExtension>,
) -> (usize, &mut Column) {
let column_names = &mut self.column_name_index;
let column_idx = match column_names.get(column_name) {
@@ -218,6 +241,7 @@ impl LinesWriter {
values: Some(values_with_capacity(datatype, to_insert)),
datatype: datatype as i32,
null_mask: Vec::default(),
+ datatype_extension,
});
let _ = column_names.insert(column_name.to_string(), new_idx);
new_idx
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index 54632e9b3f05..85b4bb8d23da 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -132,6 +132,7 @@ impl CreateTableProcedure {
default_constraint: c.default_constraint.clone(),
semantic_type: semantic_type as i32,
comment: String::new(),
+ datatype_extension: c.datatype_extension.clone(),
}),
column_id: i as u32,
}
diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs
index 877bf47e3bf5..61470eec6bd7 100644
--- a/src/datatypes/src/data_type.rs
+++ b/src/datatypes/src/data_type.rs
@@ -142,6 +142,7 @@ impl ConcreteDataType {
| ConcreteDataType::Time(_)
| ConcreteDataType::Interval(_)
| ConcreteDataType::Duration(_)
+ | ConcreteDataType::Decimal128(_)
)
}
@@ -676,6 +677,7 @@ mod tests {
assert!(ConcreteDataType::duration_millisecond_datatype().is_stringifiable());
assert!(ConcreteDataType::duration_microsecond_datatype().is_stringifiable());
assert!(ConcreteDataType::duration_nanosecond_datatype().is_stringifiable());
+ assert!(ConcreteDataType::decimal128_datatype(10, 2).is_stringifiable());
}
#[test]
diff --git a/src/datatypes/src/types/decimal_type.rs b/src/datatypes/src/types/decimal_type.rs
index edda8fe9f7eb..48ede0c44136 100644
--- a/src/datatypes/src/types/decimal_type.rs
+++ b/src/datatypes/src/types/decimal_type.rs
@@ -13,6 +13,7 @@
// limitations under the License.
use arrow_schema::DataType as ArrowDataType;
+use common_decimal::decimal128::DECIMAL128_MAX_PRECISION;
use common_decimal::Decimal128;
use serde::{Deserialize, Serialize};
@@ -32,7 +33,17 @@ pub struct Decimal128Type {
impl Decimal128Type {
pub fn new(precision: u8, scale: i8) -> Self {
- Self { precision, scale }
+ // assert precision and scale is valid
+ assert!(
+ precision > 0 && precision <= DECIMAL128_MAX_PRECISION,
+ "precision should be in [1, {}]",
+ DECIMAL128_MAX_PRECISION
+ );
+ assert!(
+ scale >= 0 && scale <= precision as i8,
+ "scale should be in [0, precision]"
+ );
+ Decimal128Type { precision, scale }
}
pub fn precision(&self) -> u8 {
@@ -46,7 +57,8 @@ impl Decimal128Type {
impl DataType for Decimal128Type {
fn name(&self) -> &str {
- "decimal128"
+ // TODO(QuenKar): support precision and scale information in name
+ "decimal"
}
fn logical_type_id(&self) -> LogicalTypeId {
@@ -62,7 +74,12 @@ impl DataType for Decimal128Type {
}
fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
- Box::new(Decimal128VectorBuilder::with_capacity(capacity))
+ Box::new(
+ Decimal128VectorBuilder::with_capacity(capacity)
+ .with_precision_and_scale(self.precision, self.scale)
+ // safe to unwrap because we have validated the precision and scale in new()
+ .unwrap(),
+ )
}
fn try_cast(&self, val: Value) -> Option<Value> {
diff --git a/src/datatypes/src/vectors/decimal.rs b/src/datatypes/src/vectors/decimal.rs
index 1303303d7b66..ebcdb43d6e34 100644
--- a/src/datatypes/src/vectors/decimal.rs
+++ b/src/datatypes/src/vectors/decimal.rs
@@ -392,7 +392,26 @@ pub mod tests {
let decimal_array = Decimal128Array::from(vec![Some(123), Some(456)]);
let decimal_vector = Decimal128Vector::from(decimal_array);
let expect = Decimal128Vector::from_values(vec![123, 456]);
+ assert_eq!(decimal_vector, expect);
+
+ let decimal_array = Decimal128Array::from(vec![Some(123), Some(456)])
+ .with_precision_and_scale(10, 2)
+ .unwrap();
+ let decimal_vector = Decimal128Vector::from(decimal_array);
+ let expect = Decimal128Vector::from_values(vec![123, 456])
+ .with_precision_and_scale(10, 2)
+ .unwrap();
+ assert_eq!(decimal_vector, expect);
+ let decimal_array: ArrayRef = Arc::new(
+ Decimal128Array::from(vec![Some(123), Some(456)])
+ .with_precision_and_scale(3, 2)
+ .unwrap(),
+ );
+ let decimal_vector = Decimal128Vector::try_from_arrow_array(decimal_array).unwrap();
+ let expect = Decimal128Vector::from_values(vec![123, 456])
+ .with_precision_and_scale(3, 2)
+ .unwrap();
assert_eq!(decimal_vector, expect);
}
diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs
index d9ca7a7d31bc..49d09a2ad3fc 100644
--- a/src/meta-srv/src/procedure/tests.rs
+++ b/src/meta-srv/src/procedure/tests.rs
@@ -54,6 +54,7 @@ fn create_table_task() -> CreateTableTask {
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
+ ..Default::default()
},
PbColumnDef {
name: "my_tag1".to_string(),
@@ -62,6 +63,7 @@ fn create_table_task() -> CreateTableTask {
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
+ ..Default::default()
},
PbColumnDef {
name: "my_tag2".to_string(),
@@ -70,6 +72,7 @@ fn create_table_task() -> CreateTableTask {
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
+ ..Default::default()
},
PbColumnDef {
name: "my_field_column".to_string(),
@@ -78,6 +81,7 @@ fn create_table_task() -> CreateTableTask {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
},
],
time_index: "ts".to_string(),
@@ -114,6 +118,7 @@ fn test_create_region_request_template() {
default_constraint: vec![],
semantic_type: SemanticType::Timestamp as i32,
comment: String::new(),
+ ..Default::default()
}),
column_id: 0,
},
@@ -125,6 +130,7 @@ fn test_create_region_request_template() {
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
+ ..Default::default()
}),
column_id: 1,
},
@@ -136,6 +142,7 @@ fn test_create_region_request_template() {
default_constraint: vec![],
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
+ ..Default::default()
}),
column_id: 2,
},
@@ -147,6 +154,7 @@ fn test_create_region_request_template() {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
}),
column_id: 3,
},
@@ -287,6 +295,7 @@ fn test_create_alter_region_request() {
default_constraint: b"hello".to_vec(),
semantic_type: SemanticType::Tag as i32,
comment: String::new(),
+ ..Default::default()
}),
location: Some(AddColumnLocation {
location_type: LocationType::After as i32,
@@ -321,7 +330,8 @@ fn test_create_alter_region_request() {
is_nullable: true,
default_constraint: b"hello".to_vec(),
semantic_type: SemanticType::Tag as i32,
- comment: String::new()
+ comment: String::new(),
+ ..Default::default()
}),
column_id: 3,
}),
diff --git a/src/metric-engine/src/engine/put.rs b/src/metric-engine/src/engine/put.rs
index c5520cd84f01..8baa37a7cfbc 100644
--- a/src/metric-engine/src/engine/put.rs
+++ b/src/metric-engine/src/engine/put.rs
@@ -15,9 +15,8 @@
use std::hash::{BuildHasher, Hash, Hasher};
use ahash::RandomState;
-use api::helper::to_column_data_type;
use api::v1::value::ValueData;
-use api::v1::{ColumnSchema, Row, Rows, SemanticType};
+use api::v1::{ColumnDataType, ColumnSchema, Row, Rows, SemanticType};
use common_query::Output;
use common_telemetry::{error, info};
use datatypes::data_type::ConcreteDataType;
@@ -162,18 +161,16 @@ impl MetricEngineInner {
// add table_name column
rows.schema.push(ColumnSchema {
column_name: DATA_SCHEMA_TABLE_ID_COLUMN_NAME.to_string(),
- datatype: to_column_data_type(&ConcreteDataType::uint32_datatype())
- .unwrap()
- .into(),
+ datatype: ColumnDataType::Uint32 as i32,
semantic_type: SemanticType::Tag as _,
+ datatype_extension: None,
});
// add tsid column
rows.schema.push(ColumnSchema {
column_name: DATA_SCHEMA_TSID_COLUMN_NAME.to_string(),
- datatype: to_column_data_type(&ConcreteDataType::uint64_datatype())
- .unwrap()
- .into(),
+ datatype: ColumnDataType::Uint64 as i32,
semantic_type: SemanticType::Tag as _,
+ datatype_extension: None,
});
// fill internal columns
diff --git a/src/metric-engine/src/metadata_region.rs b/src/metric-engine/src/metadata_region.rs
index 28098b4e5f5d..e65a4526e690 100644
--- a/src/metric-engine/src/metadata_region.rs
+++ b/src/metric-engine/src/metadata_region.rs
@@ -349,16 +349,19 @@ impl MetadataRegion {
column_name: METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME.to_string(),
datatype: ColumnDataType::TimestampMillisecond as _,
semantic_type: SemanticType::Timestamp as _,
+ ..Default::default()
},
ColumnSchema {
column_name: METADATA_SCHEMA_KEY_COLUMN_NAME.to_string(),
datatype: ColumnDataType::String as _,
semantic_type: SemanticType::Tag as _,
+ ..Default::default()
},
ColumnSchema {
column_name: METADATA_SCHEMA_VALUE_COLUMN_NAME.to_string(),
datatype: ColumnDataType::String as _,
semantic_type: SemanticType::Field as _,
+ ..Default::default()
},
];
let rows = Rows {
diff --git a/src/metric-engine/src/test_util.rs b/src/metric-engine/src/test_util.rs
index f71fff63719d..664c35f8367d 100644
--- a/src/metric-engine/src/test_util.rs
+++ b/src/metric-engine/src/test_util.rs
@@ -14,9 +14,8 @@
//! Utilities for testing.
-use api::helper::to_column_data_type;
use api::v1::value::ValueData;
-use api::v1::{ColumnSchema as PbColumnSchema, Row, SemanticType, Value};
+use api::v1::{ColumnDataType, ColumnSchema as PbColumnSchema, Row, SemanticType, Value};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::ColumnSchema;
use mito2::config::MitoConfig;
@@ -233,26 +232,23 @@ pub fn row_schema_with_tags(tags: &[&str]) -> Vec<PbColumnSchema> {
let mut schema = vec![
PbColumnSchema {
column_name: "greptime_timestamp".to_string(),
- datatype: to_column_data_type(&ConcreteDataType::timestamp_millisecond_datatype())
- .unwrap()
- .into(),
+ datatype: ColumnDataType::TimestampMillisecond as i32,
semantic_type: SemanticType::Timestamp as _,
+ datatype_extension: None,
},
PbColumnSchema {
column_name: "greptime_value".to_string(),
- datatype: to_column_data_type(&ConcreteDataType::float64_datatype())
- .unwrap()
- .into(),
+ datatype: ColumnDataType::Float64 as i32,
semantic_type: SemanticType::Field as _,
+ datatype_extension: None,
},
];
for tag in tags {
schema.push(PbColumnSchema {
column_name: tag.to_string(),
- datatype: to_column_data_type(&ConcreteDataType::string_datatype())
- .unwrap()
- .into(),
+ datatype: ColumnDataType::String as i32,
semantic_type: SemanticType::Tag as _,
+ datatype_extension: None,
});
}
schema
diff --git a/src/mito2/src/engine/alter_test.rs b/src/mito2/src/engine/alter_test.rs
index c3d4045df334..a7c3d19caeb5 100644
--- a/src/mito2/src/engine/alter_test.rs
+++ b/src/mito2/src/engine/alter_test.rs
@@ -220,6 +220,7 @@ async fn test_put_after_alter() {
column_name: "tag_1".to_string(),
datatype: ColumnDataType::String as i32,
semantic_type: SemanticType::Tag as i32,
+ ..Default::default()
});
// Put with new schema.
let rows = Rows {
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index e9aba0bf6f8f..fbf66ac284f1 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -155,6 +155,16 @@ pub enum Error {
location: Location,
},
+ #[snafu(display(
+ "Failed to convert ConcreteDataType to ColumnDataType, reason: {}",
+ reason
+ ))]
+ ConvertColumnDataType {
+ reason: String,
+ source: api::error::Error,
+ location: Location,
+ },
+
/// An error type to indicate that schema is changed and we need
/// to fill default values again.
#[snafu(display("Need to fill default value for region {}", region_id))]
@@ -438,6 +448,7 @@ impl ErrorExt for Error {
| InvalidMeta { .. }
| InvalidRequest { .. }
| FillDefault { .. }
+ | ConvertColumnDataType { .. }
| InvalidMetadata { .. } => StatusCode::InvalidArguments,
RegionMetadataNotFound { .. }
| Join { .. }
diff --git a/src/mito2/src/memtable/key_values.rs b/src/mito2/src/memtable/key_values.rs
index 37906a0c5c3f..10854d23a8ae 100644
--- a/src/mito2/src/memtable/key_values.rs
+++ b/src/mito2/src/memtable/key_values.rs
@@ -14,7 +14,7 @@
use std::collections::HashMap;
-use api::v1::{Mutation, OpType, Row, Rows};
+use api::v1::{ColumnSchema, Mutation, OpType, Row, Rows};
use datatypes::value::ValueRef;
use store_api::metadata::RegionMetadata;
use store_api::storage::SequenceNumber;
@@ -45,9 +45,11 @@ impl KeyValues {
/// Returns a key value iterator.
pub fn iter(&self) -> impl Iterator<Item = KeyValue> {
let rows = self.mutation.rows.as_ref().unwrap();
+ let schema = &rows.schema;
rows.rows.iter().enumerate().map(|(idx, row)| {
KeyValue {
row,
+ schema,
helper: &self.helper,
sequence: self.mutation.sequence + idx as u64, // Calculate sequence for each row.
// Safety: This is a valid mutation.
@@ -72,6 +74,7 @@ impl KeyValues {
#[derive(Debug)]
pub struct KeyValue<'a> {
row: &'a Row,
+ schema: &'a Vec<ColumnSchema>,
helper: &'a ReadRowHelper,
sequence: SequenceNumber,
op_type: OpType,
@@ -82,21 +85,34 @@ impl<'a> KeyValue<'a> {
pub fn primary_keys(&self) -> impl Iterator<Item = ValueRef> {
self.helper.indices[..self.helper.num_primary_key_column]
.iter()
- .map(|idx| api::helper::pb_value_to_value_ref(&self.row.values[*idx]))
+ .map(|idx| {
+ api::helper::pb_value_to_value_ref(
+ &self.row.values[*idx],
+ &self.schema[*idx].datatype_extension,
+ )
+ })
}
/// Get field columns.
pub fn fields(&self) -> impl Iterator<Item = ValueRef> {
self.helper.indices[self.helper.num_primary_key_column + 1..]
.iter()
- .map(|idx| api::helper::pb_value_to_value_ref(&self.row.values[*idx]))
+ .map(|idx| {
+ api::helper::pb_value_to_value_ref(
+ &self.row.values[*idx],
+ &self.schema[*idx].datatype_extension,
+ )
+ })
}
/// Get timestamp.
pub fn timestamp(&self) -> ValueRef {
// Timestamp is primitive, we clone it.
let index = self.helper.indices[self.helper.num_primary_key_column];
- api::helper::pb_value_to_value_ref(&self.row.values[index])
+ api::helper::pb_value_to_value_ref(
+ &self.row.values[index],
+ &self.schema[index].datatype_extension,
+ )
}
/// Get number of primary key columns.
@@ -233,6 +249,7 @@ mod tests {
column_name: column_name.to_string(),
datatype,
semantic_type,
+ ..Default::default()
}
})
.collect();
diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs
index 0289d932cd70..4dd590212c33 100644
--- a/src/mito2/src/memtable/time_series.rs
+++ b/src/mito2/src/memtable/time_series.rs
@@ -953,6 +953,7 @@ mod tests {
.unwrap()
.datatype() as i32,
semantic_type: c.semantic_type as i32,
+ ..Default::default()
})
.collect();
diff --git a/src/mito2/src/request.rs b/src/mito2/src/request.rs
index 260e008db248..5ebd5fae110c 100644
--- a/src/mito2/src/request.rs
+++ b/src/mito2/src/request.rs
@@ -19,8 +19,8 @@ use std::sync::Arc;
use std::time::{Duration, Instant};
use api::helper::{
- is_column_type_value_eq, is_semantic_type_eq, proto_value_type, to_column_data_type,
- to_proto_value,
+ is_column_type_value_eq, is_semantic_type_eq, proto_value_type, to_proto_value,
+ ColumnDataTypeWrapper,
};
use api::v1::{ColumnDataType, ColumnSchema, OpType, Rows, SemanticType, Value};
use common_query::Output;
@@ -40,8 +40,8 @@ use store_api::storage::{RegionId, SequenceNumber};
use tokio::sync::oneshot::{self, Receiver, Sender};
use crate::error::{
- CompactRegionSnafu, CreateDefaultSnafu, Error, FillDefaultSnafu, FlushRegionSnafu,
- InvalidRequestSnafu, Result,
+ CompactRegionSnafu, ConvertColumnDataTypeSnafu, CreateDefaultSnafu, Error, FillDefaultSnafu,
+ FlushRegionSnafu, InvalidRequestSnafu, Result,
};
use crate::memtable::MemtableId;
use crate::metrics::COMPACTION_ELAPSED_TOTAL;
@@ -152,7 +152,11 @@ impl WriteRequest {
if let Some(input_col) = rows_columns.remove(&column.column_schema.name) {
// Check data type.
ensure!(
- is_column_type_value_eq(input_col.datatype, &column.column_schema.data_type),
+ is_column_type_value_eq(
+ input_col.datatype,
+ input_col.datatype_extension.clone(),
+ &column.column_schema.data_type
+ ),
InvalidRequestSnafu {
region_id,
reason: format!(
@@ -248,19 +252,20 @@ impl WriteRequest {
}
// Insert column schema.
- let datatype = to_column_data_type(&column.column_schema.data_type).with_context(|| {
- InvalidRequestSnafu {
- region_id: self.region_id,
- reason: format!(
- "no protobuf type for column {} ({:?})",
- column.column_schema.name, column.column_schema.data_type
- ),
- }
- })?;
+ let (datatype, datatype_ext) =
+ ColumnDataTypeWrapper::try_from(column.column_schema.data_type.clone())
+ .with_context(|_| ConvertColumnDataTypeSnafu {
+ reason: format!(
+ "no protobuf type for column {} ({:?})",
+ column.column_schema.name, column.column_schema.data_type
+ ),
+ })?
+ .to_parts();
self.rows.schema.push(ColumnSchema {
column_name: column.column_schema.name.clone(),
datatype: datatype as i32,
semantic_type: column.semantic_type as i32,
+ datatype_extension: datatype_ext,
});
Ok(())
@@ -715,6 +720,7 @@ mod tests {
column_name: name.to_string(),
datatype: data_type as i32,
semantic_type: semantic_type as i32,
+ ..Default::default()
}
}
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index e602643ff5c5..59738d0e2ad4 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -530,12 +530,15 @@ impl WriteBufferManager for MockWriteBufferManager {
}
pub(crate) fn column_metadata_to_column_schema(metadata: &ColumnMetadata) -> api::v1::ColumnSchema {
+ let (datatype, datatype_extension) =
+ ColumnDataTypeWrapper::try_from(metadata.column_schema.data_type.clone())
+ .unwrap()
+ .to_parts();
api::v1::ColumnSchema {
column_name: metadata.column_schema.name.clone(),
- datatype: ColumnDataTypeWrapper::try_from(metadata.column_schema.data_type.clone())
- .unwrap()
- .datatype() as i32,
+ datatype: datatype as i32,
semantic_type: metadata.semantic_type as i32,
+ datatype_extension,
}
}
diff --git a/src/mito2/src/wal.rs b/src/mito2/src/wal.rs
index 85216c04e7ef..2171ceb7b1bd 100644
--- a/src/mito2/src/wal.rs
+++ b/src/mito2/src/wal.rs
@@ -214,11 +214,13 @@ mod tests {
column_name: "tag".to_string(),
datatype: ColumnDataType::String as i32,
semantic_type: SemanticType::Tag as i32,
+ ..Default::default()
},
ColumnSchema {
column_name: "ts".to_string(),
datatype: ColumnDataType::TimestampMillisecond as i32,
semantic_type: SemanticType::Timestamp as i32,
+ ..Default::default()
},
];
diff --git a/src/operator/src/expr_factory.rs b/src/operator/src/expr_factory.rs
index fb855f62028e..aec2b51566d2 100644
--- a/src/operator/src/expr_factory.rs
+++ b/src/operator/src/expr_factory.rs
@@ -17,8 +17,8 @@ use std::collections::HashMap;
use api::helper::ColumnDataTypeWrapper;
use api::v1::alter_expr::Kind;
use api::v1::{
- AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, CreateTableExpr, DropColumn,
- DropColumns, RenameTable, SemanticType,
+ AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDataTypeExtension,
+ CreateTableExpr, DropColumn, DropColumns, RenameTable, SemanticType,
};
use common_error::ext::BoxedError;
use common_grpc_expr::util::ColumnExpr;
@@ -312,14 +312,14 @@ pub fn column_schemas_to_defs(
column_schemas: Vec<ColumnSchema>,
primary_keys: &[String],
) -> Result<Vec<api::v1::ColumnDef>> {
- let column_datatypes = column_schemas
+ let column_datatypes: Vec<(ColumnDataType, Option<ColumnDataTypeExtension>)> = column_schemas
.iter()
.map(|c| {
ColumnDataTypeWrapper::try_from(c.data_type.clone())
- .map(|w| w.datatype())
+ .map(|w| w.to_parts())
.context(ColumnDataTypeSnafu)
})
- .collect::<Result<Vec<ColumnDataType>>>()?;
+ .collect::<Result<Vec<_>>>()?;
column_schemas
.iter()
@@ -340,7 +340,7 @@ pub fn column_schemas_to_defs(
Ok(api::v1::ColumnDef {
name: schema.name.clone(),
- data_type: datatype as i32,
+ data_type: datatype.0 as i32,
is_nullable: schema.is_nullable(),
default_constraint: match schema.default_constraint() {
None => vec![],
@@ -354,6 +354,7 @@ pub fn column_schemas_to_defs(
},
semantic_type,
comment,
+ datatype_extension: datatype.1,
})
})
.collect()
diff --git a/src/operator/src/req_convert/common.rs b/src/operator/src/req_convert/common.rs
index 21bca1c33acd..694906b82833 100644
--- a/src/operator/src/req_convert/common.rs
+++ b/src/operator/src/req_convert/common.rs
@@ -20,7 +20,6 @@ use api::helper::ColumnDataTypeWrapper;
use api::v1::value::ValueData;
use api::v1::{Column, ColumnDataType, ColumnSchema, Row, Rows, SemanticType, Value};
use common_base::BitVec;
-use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::VectorRef;
use snafu::prelude::*;
use snafu::ResultExt;
@@ -46,6 +45,7 @@ pub fn columns_to_rows(columns: Vec<Column>, row_count: u32) -> Result<Rows> {
column_name: column.column_name.clone(),
datatype: column.datatype,
semantic_type: column.semantic_type,
+ datatype_extension: column.datatype_extension.clone(),
};
schema.push(column_schema);
@@ -57,7 +57,7 @@ pub fn columns_to_rows(columns: Vec<Column>, row_count: u32) -> Result<Rows> {
fn push_column_to_rows(column: Column, rows: &mut [Row]) -> Result<()> {
let null_mask = BitVec::from_vec(column.null_mask);
- let column_type = ColumnDataTypeWrapper::try_new(column.datatype)
+ let column_type = ColumnDataTypeWrapper::try_new(column.datatype, column.datatype_extension)
.context(ColumnDataTypeSnafu)?
.datatype();
let column_values = column.values.unwrap_or_default();
@@ -177,6 +177,7 @@ fn push_column_to_rows(column: Column, rows: &mut [Row]) -> Result<()> {
DurationNanosecondValue,
duration_nanosecond_values
),
+ (Decimal128, Decimal128Value, decimal128_values),
);
Ok(())
@@ -206,10 +207,16 @@ pub fn column_schema(
columns
.iter()
.map(|(column_name, vector)| {
+ let (datatype, datatype_extension) =
+ ColumnDataTypeWrapper::try_from(vector.data_type().clone())
+ .context(ColumnDataTypeSnafu)?
+ .to_parts();
+
Ok(ColumnSchema {
column_name: column_name.clone(),
- datatype: data_type(vector.data_type())?.into(),
+ datatype: datatype as i32,
semantic_type: semantic_type(table_info, column_name)?.into(),
+ datatype_extension,
})
})
.collect::<Result<Vec<_>>>()
@@ -245,11 +252,6 @@ fn semantic_type(table_info: &TableInfo, column: &str) -> Result<SemanticType> {
Ok(semantic_type)
}
-fn data_type(data_type: ConcreteDataType) -> Result<ColumnDataType> {
- let datatype: ColumnDataTypeWrapper = data_type.try_into().context(ColumnDataTypeSnafu)?;
- Ok(datatype.datatype())
-}
-
#[cfg(test)]
mod tests {
use api::v1::column::Values;
@@ -270,6 +272,7 @@ mod tests {
i32_values: vec![42],
..Default::default()
}),
+ ..Default::default()
},
Column {
column_name: String::from("col2"),
@@ -284,6 +287,7 @@ mod tests {
],
..Default::default()
}),
+ ..Default::default()
},
];
let row_count = 3;
@@ -335,6 +339,7 @@ mod tests {
i8_values: vec![42],
..Default::default()
}),
+ ..Default::default()
}];
let row_count = 3;
assert!(columns_to_rows(columns, row_count).is_err());
@@ -349,6 +354,7 @@ mod tests {
i32_values: vec![42],
..Default::default()
}),
+ ..Default::default()
}];
let row_count = 3;
assert!(columns_to_rows(columns, row_count).is_err());
@@ -363,6 +369,7 @@ mod tests {
i32_values: vec![42],
..Default::default()
}),
+ ..Default::default()
}];
let row_count = 3;
assert!(columns_to_rows(columns, row_count).is_err());
diff --git a/src/operator/src/req_convert/delete/table_to_region.rs b/src/operator/src/req_convert/delete/table_to_region.rs
index 0b578c8697c3..ba93ad0f4afb 100644
--- a/src/operator/src/req_convert/delete/table_to_region.rs
+++ b/src/operator/src/req_convert/delete/table_to_region.rs
@@ -156,6 +156,7 @@ mod tests {
column_name: "a".to_string(),
datatype: ColumnDataType::Int32 as i32,
semantic_type: SemanticType::Tag as i32,
+ ..Default::default()
}],
rows: rows
.into_iter()
diff --git a/src/operator/src/req_convert/insert.rs b/src/operator/src/req_convert/insert.rs
index 96f88a739b7b..51984c4de034 100644
--- a/src/operator/src/req_convert/insert.rs
+++ b/src/operator/src/req_convert/insert.rs
@@ -17,17 +17,15 @@ mod row_to_region;
mod stmt_to_region;
mod table_to_region;
-use api::helper::ColumnDataTypeWrapper;
-use api::v1::{ColumnDataType, SemanticType};
+use api::v1::SemanticType;
pub use column_to_row::ColumnToRow;
-use datatypes::prelude::ConcreteDataType;
pub use row_to_region::RowToRegion;
use snafu::{OptionExt, ResultExt};
pub use stmt_to_region::StatementToRegion;
use table::metadata::TableInfo;
pub use table_to_region::TableToRegion;
-use crate::error::{ColumnDataTypeSnafu, ColumnNotFoundSnafu, MissingTimeIndexColumnSnafu, Result};
+use crate::error::{ColumnNotFoundSnafu, MissingTimeIndexColumnSnafu, Result};
fn semantic_type(table_info: &TableInfo, column: &str) -> Result<SemanticType> {
let table_meta = &table_info.meta;
@@ -58,8 +56,3 @@ fn semantic_type(table_info: &TableInfo, column: &str) -> Result<SemanticType> {
Ok(semantic_type)
}
-
-fn data_type(data_type: ConcreteDataType) -> Result<ColumnDataType> {
- let datatype: ColumnDataTypeWrapper = data_type.try_into().context(ColumnDataTypeSnafu)?;
- Ok(datatype.datatype())
-}
diff --git a/src/operator/src/req_convert/insert/stmt_to_region.rs b/src/operator/src/req_convert/insert/stmt_to_region.rs
index 1230bb1427b3..1297adf6a5de 100644
--- a/src/operator/src/req_convert/insert/stmt_to_region.rs
+++ b/src/operator/src/req_convert/insert/stmt_to_region.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::helper::value_to_grpc_value;
+use api::helper::{value_to_grpc_value, ColumnDataTypeWrapper};
use api::v1::region::InsertRequests as RegionInsertRequests;
use api::v1::{ColumnSchema as GrpcColumnSchema, Row, Rows, Value as GrpcValue};
use catalog::CatalogManager;
@@ -25,10 +25,11 @@ use sql::statements::insert::Insert;
use sqlparser::ast::{ObjectName, Value as SqlValue};
use table::TableRef;
-use super::{data_type, semantic_type};
+use super::semantic_type;
use crate::error::{
- CatalogSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu, ColumnNotFoundSnafu,
- InvalidSqlSnafu, MissingInsertBodySnafu, ParseSqlSnafu, Result, TableNotFoundSnafu,
+ CatalogSnafu, ColumnDataTypeSnafu, ColumnDefaultValueSnafu, ColumnNoneDefaultValueSnafu,
+ ColumnNotFoundSnafu, InvalidSqlSnafu, MissingInsertBodySnafu, ParseSqlSnafu, Result,
+ TableNotFoundSnafu,
};
use crate::req_convert::common::partitioner::Partitioner;
@@ -94,13 +95,17 @@ impl<'a> StatementToRegion<'a> {
msg: format!("Column {} not found in table {}", column_name, &table_name),
})?;
- let datatype = data_type(column_schema.data_type.clone())?;
+ let (datatype, datatype_extension) =
+ ColumnDataTypeWrapper::try_from(column_schema.data_type.clone())
+ .context(ColumnDataTypeSnafu)?
+ .to_parts();
let semantic_type = semantic_type(&table_info, column_name)?;
let grpc_column_schema = GrpcColumnSchema {
column_name: column_name.clone(),
datatype: datatype.into(),
semantic_type: semantic_type.into(),
+ datatype_extension,
};
schema.push(grpc_column_schema);
diff --git a/src/operator/src/req_convert/insert/table_to_region.rs b/src/operator/src/req_convert/insert/table_to_region.rs
index 5ece06b79f96..729355cf0159 100644
--- a/src/operator/src/req_convert/insert/table_to_region.rs
+++ b/src/operator/src/req_convert/insert/table_to_region.rs
@@ -156,6 +156,7 @@ mod tests {
column_name: "a".to_string(),
datatype: ColumnDataType::Int32 as i32,
semantic_type: SemanticType::Tag as i32,
+ ..Default::default()
}],
rows: rows
.into_iter()
diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs
index c31ee9afae94..7051a9c626a8 100644
--- a/src/operator/src/statement/ddl.rs
+++ b/src/operator/src/statement/ddl.rs
@@ -543,7 +543,8 @@ fn find_partition_entries(
for column in column_defs {
let column_name = &column.name;
let data_type = ConcreteDataType::from(
- ColumnDataTypeWrapper::try_new(column.data_type).context(ColumnDataTypeSnafu)?,
+ ColumnDataTypeWrapper::try_new(column.data_type, column.datatype_extension.clone())
+ .context(ColumnDataTypeSnafu)?,
);
column_name_and_type.push((column_name, data_type));
}
diff --git a/src/partition/src/splitter.rs b/src/partition/src/splitter.rs
index f7c64d8ba117..ed0eaa2cc571 100644
--- a/src/partition/src/splitter.rs
+++ b/src/partition/src/splitter.rs
@@ -117,7 +117,11 @@ impl<'a> SplitReadRowHelper<'a> {
.iter()
.map(|idx| {
idx.as_ref().map_or(Value::Null, |idx| {
- helper::pb_value_to_value_ref(&row.values[*idx]).into()
+ helper::pb_value_to_value_ref(
+ &row.values[*idx],
+ &self.schema[*idx].datatype_extension,
+ )
+ .into()
})
})
.collect()
@@ -144,16 +148,19 @@ mod tests {
column_name: "id".to_string(),
datatype: ColumnDataType::String as i32,
semantic_type: SemanticType::Tag as i32,
+ ..Default::default()
},
ColumnSchema {
column_name: "name".to_string(),
datatype: ColumnDataType::String as i32,
semantic_type: SemanticType::Tag as i32,
+ ..Default::default()
},
ColumnSchema {
column_name: "age".to_string(),
datatype: ColumnDataType::Uint32 as i32,
semantic_type: SemanticType::Field as i32,
+ ..Default::default()
},
];
let rows = vec![
diff --git a/src/script/src/table.rs b/src/script/src/table.rs
index 6e52f7139d81..6ecbaa4e33ff 100644
--- a/src/script/src/table.rs
+++ b/src/script/src/table.rs
@@ -302,31 +302,37 @@ fn build_insert_column_schemas() -> Vec<PbColumnSchema> {
column_name: "schema".to_string(),
datatype: ColumnDataType::String.into(),
semantic_type: SemanticType::Tag.into(),
+ ..Default::default()
},
PbColumnSchema {
column_name: "name".to_string(),
datatype: ColumnDataType::String.into(),
semantic_type: SemanticType::Tag.into(),
+ ..Default::default()
},
PbColumnSchema {
column_name: "engine".to_string(),
datatype: ColumnDataType::String.into(),
semantic_type: SemanticType::Tag.into(),
+ ..Default::default()
},
PbColumnSchema {
column_name: "script".to_string(),
datatype: ColumnDataType::String.into(),
semantic_type: SemanticType::Field.into(),
+ ..Default::default()
},
PbColumnSchema {
column_name: "greptime_timestamp".to_string(),
datatype: ColumnDataType::TimestampMillisecond.into(),
semantic_type: SemanticType::Timestamp.into(),
+ ..Default::default()
},
PbColumnSchema {
column_name: "gmt_modified".to_string(),
datatype: ColumnDataType::TimestampMillisecond.into(),
semantic_type: SemanticType::Field.into(),
+ ..Default::default()
},
]
}
@@ -358,7 +364,9 @@ pub fn build_scripts_schema() -> RawSchema {
let cs = ColumnSchema::new(
c.column_name,
// Safety: the type always exists
- ColumnDataTypeWrapper::try_new(c.datatype).unwrap().into(),
+ ColumnDataTypeWrapper::try_new(c.datatype, c.datatype_extension)
+ .unwrap()
+ .into(),
false,
);
if c.semantic_type == SemanticType::Timestamp as i32 {
diff --git a/src/servers/src/prom_store.rs b/src/servers/src/prom_store.rs
index 955bda833bcc..a0a4725fa861 100644
--- a/src/servers/src/prom_store.rs
+++ b/src/servers/src/prom_store.rs
@@ -577,6 +577,7 @@ mod tests {
column_name: k.to_string(),
datatype: t as i32,
semantic_type: s as i32,
+ ..Default::default()
})
.collect()
}
diff --git a/src/servers/src/row_writer.rs b/src/servers/src/row_writer.rs
index 9dfe92738706..b4daad3ecdac 100644
--- a/src/servers/src/row_writer.rs
+++ b/src/servers/src/row_writer.rs
@@ -215,6 +215,7 @@ fn write_by_semantic_type(
column_name: name.to_string(),
datatype: datatype as i32,
semantic_type: semantic_type as i32,
+ ..Default::default()
});
one_row.push(value.into());
} else {
@@ -269,6 +270,7 @@ pub fn write_ts_precision(
column_name: name,
datatype: ColumnDataType::TimestampMillisecond as i32,
semantic_type: SemanticType::Timestamp as i32,
+ ..Default::default()
});
one_row.push(ValueData::TimestampMillisecondValue(ts).into())
} else {
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index eae6551c30e1..e9594aac8e7a 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -107,6 +107,16 @@ fn parse_string_to_value(
.fail()
}
}
+ ConcreteDataType::Decimal128(_) => {
+ if let Ok(val) = common_decimal::Decimal128::from_str(&s) {
+ Ok(Value::Decimal128(val))
+ } else {
+ ParseSqlValueSnafu {
+ msg: format!("Fail to parse number {s} to Decimal128 value"),
+ }
+ .fail()
+ }
+ }
_ => {
unreachable!()
}
@@ -146,7 +156,19 @@ macro_rules! parse_number_to_value {
let n = parse_sql_number::<i64>($n)?;
Ok(Value::Timestamp(Timestamp::new(n, t.unit())))
},
- // TODO(QuenKar): parse decimal128 string with precision and scale
+ // TODO(QuenKar): This could need to be optimized
+ // if this from_str function is slow,
+ // we can implement parse decimal string with precision and scale manually.
+ ConcreteDataType::Decimal128(_) => {
+ if let Ok(val) = common_decimal::Decimal128::from_str($n) {
+ Ok(Value::Decimal128(val))
+ } else {
+ ParseSqlValueSnafu {
+ msg: format!("Fail to parse number {}, invalid column type: {:?}",
+ $n, $data_type)
+ }.fail()
+ }
+ }
_ => ParseSqlValueSnafu {
msg: format!("Fail to parse number {}, invalid column type: {:?}",
@@ -356,18 +378,20 @@ pub fn sql_column_def_to_grpc_column_def(col: &ColumnDef) -> Result<api::v1::Col
.map(ColumnDefaultConstraint::try_into) // serialize default constraint to bytes
.transpose()
.context(SerializeColumnDefaultConstraintSnafu)?;
-
- let data_type = ColumnDataTypeWrapper::try_from(data_type)
+ // convert ConcreteDataType to grpc ColumnDataTypeWrapper
+ let (datatype, datatype_ext) = ColumnDataTypeWrapper::try_from(data_type.clone())
.context(ConvertToGrpcDataTypeSnafu)?
- .datatype() as i32;
+ .to_parts();
+
Ok(api::v1::ColumnDef {
name,
- data_type,
+ data_type: datatype as i32,
is_nullable,
default_constraint: default_constraint.unwrap_or_default(),
// TODO(#1308): support adding new primary key columns
semantic_type: SemanticType::Field as _,
comment: String::new(),
+ datatype_extension: datatype_ext,
})
}
diff --git a/src/store-api/src/metadata.rs b/src/store-api/src/metadata.rs
index 6e1c236fac4a..f4d2f4b84783 100644
--- a/src/store-api/src/metadata.rs
+++ b/src/store-api/src/metadata.rs
@@ -81,7 +81,11 @@ impl ColumnMetadata {
.context(ConvertDatatypesSnafu)?,
)
};
- let data_type = ColumnDataTypeWrapper::new(column_def.data_type()).into();
+ let data_type = ColumnDataTypeWrapper::new(
+ column_def.data_type(),
+ column_def.datatype_extension.clone(),
+ )
+ .into();
let column_schema = ColumnSchema::new(column_def.name, data_type, column_def.is_nullable)
.with_default_constraint(default_constrain)
.context(ConvertDatatypesSnafu)?;
diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs
index e7633a2964a0..928f2abd73ee 100644
--- a/src/store-api/src/region_request.rs
+++ b/src/store-api/src/region_request.rs
@@ -534,6 +534,7 @@ mod tests {
default_constraint: vec![],
semantic_type: SemanticType::Field as i32,
comment: String::new(),
+ ..Default::default()
}),
column_id: 1,
}),
diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs
index 3d1c5da8cdb8..07a1741f5a21 100644
--- a/src/store-api/src/storage/requests.rs
+++ b/src/store-api/src/storage/requests.rs
@@ -150,7 +150,8 @@ impl TryFrom<PbAddColumn> for AddColumn {
})?;
let data_type = column_def.data_type;
- let data_type = ColumnDataTypeWrapper::try_new(data_type)
+ let data_type_ext = column_def.datatype_extension.clone();
+ let data_type = ColumnDataTypeWrapper::try_new(data_type, data_type_ext)
.map_err(|_| {
InvalidRawRegionRequestSnafu {
err: format!("unknown raw column datatype: {data_type}"),
@@ -313,6 +314,7 @@ mod tests {
default_constraint: vec![],
semantic_type: SemanticType::Tag as _,
comment: String::new(),
+ ..Default::default()
}),
column_id: 1,
}),
@@ -329,6 +331,7 @@ mod tests {
.unwrap(),
semantic_type: SemanticType::Field as _,
comment: String::new(),
+ ..Default::default()
}),
column_id: 2,
}),
diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs
index 123e864dd553..ded795861476 100644
--- a/tests-integration/src/grpc.rs
+++ b/tests-integration/src/grpc.rs
@@ -340,6 +340,7 @@ CREATE TABLE {table_name} (
null_mask: vec![32, 0],
semantic_type: SemanticType::Tag as i32,
datatype: ColumnDataType::Int32 as i32,
+ ..Default::default()
},
Column {
column_name: "b".to_string(),
@@ -573,6 +574,7 @@ CREATE TABLE {table_name} (
null_mask: vec![2],
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::Int32 as i32,
+ ..Default::default()
},
Column {
column_name: "ts".to_string(),
@@ -611,6 +613,7 @@ CREATE TABLE {table_name} (
null_mask: vec![2],
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::String as i32,
+ ..Default::default()
},
Column {
column_name: "ts".to_string(),
@@ -743,6 +746,7 @@ CREATE TABLE {table_name} (
null_mask: vec![4],
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::Float64 as i32,
+ ..Default::default()
},
Column {
column_name: "ts".to_string(),
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 3d766a5a37f0..a0d19908f503 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -267,6 +267,7 @@ fn expect_data() -> (Column, Column, Column, Column) {
null_mask: vec![2],
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::Float64 as i32,
+ ..Default::default()
};
let expected_mem_col = Column {
column_name: "memory".to_string(),
@@ -277,6 +278,7 @@ fn expect_data() -> (Column, Column, Column, Column) {
null_mask: vec![4],
semantic_type: SemanticType::Field as i32,
datatype: ColumnDataType::Float64 as i32,
+ ..Default::default()
};
let expected_ts_col = Column {
column_name: "ts".to_string(),
|
feat
|
supports decimal type in RPC (#2788)
|
480b05c5904c254ccb54a52c9e837613be18675c
|
2025-02-08 14:31:54
|
Ning Sun
|
feat: pipeline dispatcher part 2: execution (#5409)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 1ef7915a5dea..45d242094d5c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -268,9 +268,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "arrow"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c91839b07e474b3995035fd8ac33ee54f9c9ccbbb1ea33d9909c71bffdf1259d"
+checksum = "eaf3437355979f1e93ba84ba108c38be5767713051f3c8ffbf07c094e2e61f9f"
dependencies = [
"arrow-arith",
"arrow-array",
@@ -289,9 +289,9 @@ dependencies = [
[[package]]
name = "arrow-arith"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "855c57c4efd26722b044dcd3e348252560e3e0333087fb9f6479dc0bf744054f"
+checksum = "31dce77d2985522288edae7206bffd5fc4996491841dda01a13a58415867e681"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -304,16 +304,16 @@ dependencies = [
[[package]]
name = "arrow-array"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd03279cea46569acf9295f6224fbc370c5df184b4d2ecfe97ccb131d5615a7f"
+checksum = "2d45fe6d3faed0435b7313e59a02583b14c6c6339fa7729e94c32a20af319a79"
dependencies = [
"ahash 0.8.11",
"arrow-buffer",
"arrow-data",
"arrow-schema",
"chrono",
- "chrono-tz 0.10.0",
+ "chrono-tz 0.10.1",
"half",
"hashbrown 0.15.2",
"num",
@@ -321,9 +321,9 @@ dependencies = [
[[package]]
name = "arrow-buffer"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e4a9b9b1d6d7117f6138e13bc4dd5daa7f94e671b70e8c9c4dc37b4f5ecfc16"
+checksum = "2b02656a35cc103f28084bc80a0159668e0a680d919cef127bd7e0aaccb06ec1"
dependencies = [
"bytes",
"half",
@@ -332,9 +332,9 @@ dependencies = [
[[package]]
name = "arrow-cast"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc70e39916e60c5b7af7a8e2719e3ae589326039e1e863675a008bee5ffe90fd"
+checksum = "c73c6233c5b5d635a56f6010e6eb1ab9e30e94707db21cea03da317f67d84cf3"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -353,9 +353,9 @@ dependencies = [
[[package]]
name = "arrow-csv"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "789b2af43c1049b03a8d088ff6b2257cdcea1756cd76b174b1f2600356771b97"
+checksum = "ec222848d70fea5a32af9c3602b08f5d740d5e2d33fbd76bf6fd88759b5b13a7"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -372,9 +372,9 @@ dependencies = [
[[package]]
name = "arrow-data"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e4e75edf21ffd53744a9b8e3ed11101f610e7ceb1a29860432824f1834a1f623"
+checksum = "b7f2861ffa86f107b8ab577d86cff7c7a490243eabe961ba1e1af4f27542bb79"
dependencies = [
"arrow-buffer",
"arrow-schema",
@@ -384,9 +384,9 @@ dependencies = [
[[package]]
name = "arrow-flight"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c09b331887a526f203f2123444792aee924632bd08b9940435070901075832e"
+checksum = "3ab7635558f3f803b492eae56c03cde97ea5f85a1c768f94181cb7db69cd81be"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -405,9 +405,9 @@ dependencies = [
[[package]]
name = "arrow-ipc"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d186a909dece9160bf8312f5124d797884f608ef5435a36d9d608e0b2a9bcbf8"
+checksum = "0270dc511f11bb5fa98a25020ad51a99ca5b08d8a8dfbd17503bb9dba0388f0b"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -421,9 +421,9 @@ dependencies = [
[[package]]
name = "arrow-json"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b66ff2fedc1222942d0bd2fd391cb14a85baa3857be95c9373179bd616753b85"
+checksum = "0eff38eeb8a971ad3a4caf62c5d57f0cff8a48b64a55e3207c4fd696a9234aad"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -441,9 +441,9 @@ dependencies = [
[[package]]
name = "arrow-ord"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ece7b5bc1180e6d82d1a60e1688c199829e8842e38497563c3ab6ea813e527fd"
+checksum = "c6f202a879d287099139ff0d121e7f55ae5e0efe634b8cf2106ebc27a8715dee"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -456,9 +456,9 @@ dependencies = [
[[package]]
name = "arrow-row"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "745c114c8f0e8ce211c83389270de6fbe96a9088a7b32c2a041258a443fe83ff"
+checksum = "a8f936954991c360ba762dff23f5dda16300774fafd722353d9683abd97630ae"
dependencies = [
"ahash 0.8.11",
"arrow-array",
@@ -470,18 +470,18 @@ dependencies = [
[[package]]
name = "arrow-schema"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b95513080e728e4cec37f1ff5af4f12c9688d47795d17cda80b6ec2cf74d4678"
+checksum = "9579b9d8bce47aa41389fe344f2c6758279983b7c0ebb4013e283e3e91bb450e"
dependencies = [
"serde",
]
[[package]]
name = "arrow-select"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e415279094ea70323c032c6e739c48ad8d80e78a09bef7117b8718ad5bf3722"
+checksum = "7471ba126d0b0aaa24b50a36bc6c25e4e74869a1fd1a5553357027a0b1c8d1f1"
dependencies = [
"ahash 0.8.11",
"arrow-array",
@@ -493,9 +493,9 @@ dependencies = [
[[package]]
name = "arrow-string"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "11d956cae7002eb8d83a27dbd34daaea1cf5b75852f0b84deb4d93a276e92bbf"
+checksum = "72993b01cb62507b06f1fb49648d7286c8989ecfabdb7b77a750fcb54410731b"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -616,7 +616,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -638,7 +638,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -655,7 +655,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -735,7 +735,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -936,7 +936,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -1059,7 +1059,7 @@ dependencies = [
"regex",
"rustc-hash 1.1.0",
"shlex",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -1171,7 +1171,7 @@ dependencies = [
"proc-macro-crate 3.2.0",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
"syn_derive",
]
@@ -1532,9 +1532,9 @@ dependencies = [
[[package]]
name = "chrono-tz"
-version = "0.10.0"
+version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd6dd8046d00723a59a2f8c5f295c515b9bb9a331ee4f8f3d4dd49e428acd3b6"
+checksum = "9c6ac4f2c0bf0f44e9161aec9675e1050aa4a530663c4a9e37e108fa948bca9f"
dependencies = [
"chrono",
"chrono-tz-build 0.4.0",
@@ -1685,7 +1685,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -2168,7 +2168,7 @@ dependencies = [
"quote",
"snafu 0.8.5",
"static_assertions",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -2880,7 +2880,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim 0.11.1",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -2902,7 +2902,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
dependencies = [
"darling_core 0.20.10",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -3235,7 +3235,7 @@ dependencies = [
"datafusion-doc",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -3558,7 +3558,7 @@ checksum = "2cdc8d50f426189eef89dac62fabfa0abb27d5cc008f25bf4156a0203325becc"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -3569,7 +3569,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -3632,7 +3632,7 @@ dependencies = [
"darling 0.20.10",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -3662,7 +3662,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc"
dependencies = [
"derive_builder_core 0.20.1",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -3682,7 +3682,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
"unicode-xid",
]
@@ -3694,7 +3694,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -3768,6 +3768,17 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "displaydoc"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.96",
+]
+
[[package]]
name = "dlv-list"
version = "0.3.0"
@@ -3882,7 +3893,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -3894,7 +3905,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -3914,7 +3925,7 @@ checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -4132,9 +4143,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flatbuffers"
-version = "24.3.25"
+version = "24.12.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8add37afff2d4ffa83bc748a70b4b1370984f6980768554182424ef71447c35f"
+checksum = "4f1baf0dbf96932ec9a3038d57900329c015b0bfb7b63d904f3bc27e2b02a096"
dependencies = [
"bitflags 1.3.2",
"rustc_version",
@@ -4388,7 +4399,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3"
dependencies = [
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -4400,7 +4411,7 @@ dependencies = [
"frunk_core",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -4412,7 +4423,7 @@ dependencies = [
"frunk_core",
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -4536,7 +4547,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -4658,9 +4669,9 @@ dependencies = [
[[package]]
name = "get-size2"
-version = "0.1.3"
+version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3aa3d1f2527cf956b5637a531e21eb1ef9c825c70cd6f8765fd00b7457eef699"
+checksum = "159c430715e540d2198fa981d39cd45563ccc60900de187f5b152b33b1cb408e"
[[package]]
name = "getopts"
@@ -5137,7 +5148,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -5152,7 +5163,7 @@ dependencies = [
"rust-sitter",
"rust-sitter-tool",
"slotmap",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -5171,7 +5182,7 @@ dependencies = [
"serde",
"serde_json",
"slotmap",
- "syn 2.0.90",
+ "syn 2.0.96",
"webbrowser",
]
@@ -5185,7 +5196,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -5383,6 +5394,124 @@ dependencies = [
"cc",
]
+[[package]]
+name = "icu_collections"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526"
+dependencies = [
+ "displaydoc",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637"
+dependencies = [
+ "displaydoc",
+ "litemap",
+ "tinystr",
+ "writeable",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_locid_transform_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e"
+
+[[package]]
+name = "icu_normalizer"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_normalizer_data",
+ "icu_properties",
+ "icu_provider",
+ "smallvec",
+ "utf16_iter",
+ "utf8_iter",
+ "write16",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_normalizer_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516"
+
+[[package]]
+name = "icu_properties"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_locid_transform",
+ "icu_properties_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_properties_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569"
+
+[[package]]
+name = "icu_provider"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_provider_macros",
+ "stable_deref_trait",
+ "tinystr",
+ "writeable",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_provider_macros"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.96",
+]
+
[[package]]
name = "ident_case"
version = "1.0.1"
@@ -5391,12 +5520,23 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "idna"
-version = "0.5.0"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
+checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
dependencies = [
- "unicode-bidi",
- "unicode-normalization",
+ "idna_adapter",
+ "smallvec",
+ "utf8_iter",
+]
+
+[[package]]
+name = "idna_adapter"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71"
+dependencies = [
+ "icu_normalizer",
+ "icu_properties",
]
[[package]]
@@ -5939,7 +6079,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde_json",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -6000,7 +6140,7 @@ dependencies = [
"proc-macro2",
"quote",
"regex",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -6020,9 +6160,9 @@ checksum = "0c2cdeb66e45e9f36bfad5bbdb4d2384e70936afbee843c6f6543f0c551ebb25"
[[package]]
name = "lexical-core"
-version = "1.0.2"
+version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0431c65b318a590c1de6b8fd6e72798c92291d27762d94c9e6c37ed7a73d8458"
+checksum = "b765c31809609075565a70b4b71402281283aeda7ecaf4818ac14a7b2ade8958"
dependencies = [
"lexical-parse-float",
"lexical-parse-integer",
@@ -6033,9 +6173,9 @@ dependencies = [
[[package]]
name = "lexical-parse-float"
-version = "1.0.2"
+version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb17a4bdb9b418051aa59d41d65b1c9be5affab314a872e5ad7f06231fb3b4e0"
+checksum = "de6f9cb01fb0b08060209a057c048fcbab8717b4c1ecd2eac66ebfe39a65b0f2"
dependencies = [
"lexical-parse-integer",
"lexical-util",
@@ -6044,9 +6184,9 @@ dependencies = [
[[package]]
name = "lexical-parse-integer"
-version = "1.0.2"
+version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5df98f4a4ab53bf8b175b363a34c7af608fe31f93cc1fb1bf07130622ca4ef61"
+checksum = "72207aae22fc0a121ba7b6d479e42cbfea549af1479c3f3a4f12c70dd66df12e"
dependencies = [
"lexical-util",
"static_assertions",
@@ -6054,18 +6194,18 @@ dependencies = [
[[package]]
name = "lexical-util"
-version = "1.0.3"
+version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85314db53332e5c192b6bca611fb10c114a80d1b831ddac0af1e9be1b9232ca0"
+checksum = "5a82e24bf537fd24c177ffbbdc6ebcc8d54732c35b50a3f28cc3f4e4c949a0b3"
dependencies = [
"static_assertions",
]
[[package]]
name = "lexical-write-float"
-version = "1.0.2"
+version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e7c3ad4e37db81c1cbe7cf34610340adc09c322871972f74877a712abc6c809"
+checksum = "c5afc668a27f460fb45a81a757b6bf2f43c2d7e30cb5a2dcd3abf294c78d62bd"
dependencies = [
"lexical-util",
"lexical-write-integer",
@@ -6074,9 +6214,9 @@ dependencies = [
[[package]]
name = "lexical-write-integer"
-version = "1.0.2"
+version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb89e9f6958b83258afa3deed90b5de9ef68eef090ad5086c791cd2345610162"
+checksum = "629ddff1a914a836fb245616a7888b62903aae58fa771e1d83943035efa0f978"
dependencies = [
"lexical-util",
"static_assertions",
@@ -6084,9 +6224,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.169"
+version = "0.2.159"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
+checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5"
[[package]]
name = "libfuzzer-sys"
@@ -6182,6 +6322,12 @@ version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
+[[package]]
+name = "litemap"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104"
+
[[package]]
name = "local-ip-address"
version = "0.6.3"
@@ -6920,7 +7066,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
"termcolor",
"thiserror 1.0.64",
]
@@ -6938,7 +7084,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
"termcolor",
"thiserror 1.0.64",
]
@@ -7112,7 +7258,7 @@ checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -7132,9 +7278,9 @@ checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b"
[[package]]
name = "neli"
-version = "0.6.5"
+version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9"
+checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43"
dependencies = [
"byteorder",
"libc",
@@ -7144,9 +7290,9 @@ dependencies = [
[[package]]
name = "neli-proc-macros"
-version = "0.1.4"
+version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe"
+checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4"
dependencies = [
"either",
"proc-macro2",
@@ -7321,7 +7467,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -7441,9 +7587,9 @@ dependencies = [
[[package]]
name = "object_store"
-version = "0.11.1"
+version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6eb4c22c6154a1e759d7099f9ffad7cc5ef8245f9efbab4a41b92623079c82f3"
+checksum = "3cfccb68961a56facde1163f9319e0d15743352344e7808a11795fb99698dcaf"
dependencies = [
"async-trait",
"bytes",
@@ -7738,7 +7884,7 @@ dependencies = [
"bytemuck",
"bytes",
"chrono",
- "chrono-tz 0.10.0",
+ "chrono-tz 0.10.1",
"fallible-streaming-iterator",
"flate2",
"futures",
@@ -7912,9 +8058,9 @@ dependencies = [
[[package]]
name = "parquet"
-version = "53.3.0"
+version = "53.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b449890367085eb65d7d3321540abc3d7babbd179ce31df0016e90719114191"
+checksum = "8957c0c95a6a1804f3e51a18f69df29be53856a8c5768cc9b6d00fcafcd2917c"
dependencies = [
"ahash 0.8.11",
"arrow-array",
@@ -8105,7 +8251,7 @@ dependencies = [
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -8147,7 +8293,7 @@ dependencies = [
"rand",
"ring",
"rust_decimal",
- "thiserror 2.0.11",
+ "thiserror 2.0.6",
"tokio",
"tokio-rustls 0.26.0",
"tokio-util",
@@ -8208,7 +8354,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -8513,12 +8659,12 @@ dependencies = [
[[package]]
name = "prettyplease"
-version = "0.2.25"
+version = "0.2.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033"
+checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac"
dependencies = [
"proc-macro2",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -8711,7 +8857,7 @@ dependencies = [
"prost 0.12.6",
"prost-types 0.12.6",
"regex",
- "syn 2.0.90",
+ "syn 2.0.96",
"tempfile",
]
@@ -8732,7 +8878,7 @@ dependencies = [
"prost 0.13.3",
"prost-types 0.13.3",
"regex",
- "syn 2.0.90",
+ "syn 2.0.96",
"tempfile",
]
@@ -8759,7 +8905,7 @@ dependencies = [
"itertools 0.12.1",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -8772,7 +8918,7 @@ dependencies = [
"itertools 0.11.0",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -9229,7 +9375,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76009fbe0614077fc1a2ce255e3a1881a2e3a3527097d5dc6d8212c585e7e38b"
dependencies = [
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -9278,7 +9424,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -9343,11 +9489,11 @@ dependencies = [
[[package]]
name = "regress"
-version = "0.10.1"
+version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1541daf4e4ed43a0922b7969bdc2170178bcacc5dabf7e39bc508a9fa3953a7a"
+checksum = "78ef7fa9ed0256d64a688a3747d0fef7a88851c18a5e1d57f115f38ec2e09366"
dependencies = [
- "hashbrown 0.14.5",
+ "hashbrown 0.15.2",
"memchr",
]
@@ -9399,9 +9545,9 @@ dependencies = [
[[package]]
name = "reqwest"
-version = "0.12.8"
+version = "0.12.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b"
+checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f"
dependencies = [
"base64 0.22.1",
"bytes",
@@ -9626,7 +9772,7 @@ dependencies = [
"regex",
"relative-path",
"rustc_version",
- "syn 2.0.90",
+ "syn 2.0.96",
"unicode-ident",
]
@@ -9638,7 +9784,7 @@ checksum = "b3a8fb4672e840a587a66fc577a5491375df51ddb88f2a2c2a792598c326fe14"
dependencies = [
"quote",
"rand",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -9661,7 +9807,7 @@ dependencies = [
"proc-macro2",
"quote",
"rust-embed-utils",
- "syn 2.0.90",
+ "syn 2.0.96",
"walkdir",
]
@@ -10068,7 +10214,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde_derive_internals",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10119,7 +10265,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10172,9 +10318,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
[[package]]
name = "serde"
-version = "1.0.215"
+version = "1.0.217"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f"
+checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70"
dependencies = [
"serde_derive",
]
@@ -10191,13 +10337,13 @@ dependencies = [
[[package]]
name = "serde_derive"
-version = "1.0.215"
+version = "1.0.217"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
+checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10208,14 +10354,14 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
name = "serde_json"
-version = "1.0.133"
+version = "1.0.137"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
+checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b"
dependencies = [
"indexmap 2.6.0",
"itoa",
@@ -10242,7 +10388,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10263,7 +10409,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10305,7 +10451,7 @@ dependencies = [
"darling 0.20.10",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10683,7 +10829,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10870,7 +11016,7 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10880,7 +11026,7 @@ source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=71dd86058d2af
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10925,7 +11071,7 @@ dependencies = [
"serde_json",
"sha2",
"smallvec",
- "thiserror 2.0.11",
+ "thiserror 2.0.6",
"tokio",
"tokio-stream",
"tracing",
@@ -10943,7 +11089,7 @@ dependencies = [
"quote",
"sqlx-core",
"sqlx-macros-core",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -10966,7 +11112,7 @@ dependencies = [
"sqlx-mysql",
"sqlx-postgres",
"sqlx-sqlite",
- "syn 2.0.90",
+ "syn 2.0.96",
"tempfile",
"tokio",
"url",
@@ -11010,7 +11156,7 @@ dependencies = [
"smallvec",
"sqlx-core",
"stringprep",
- "thiserror 2.0.11",
+ "thiserror 2.0.6",
"tracing",
"whoami",
]
@@ -11048,7 +11194,7 @@ dependencies = [
"smallvec",
"sqlx-core",
"stringprep",
- "thiserror 2.0.11",
+ "thiserror 2.0.6",
"tracing",
"whoami",
]
@@ -11219,7 +11365,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -11232,7 +11378,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -11282,7 +11428,7 @@ dependencies = [
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.90",
+ "syn 2.0.96",
"typify 0.1.0",
"walkdir",
]
@@ -11301,13 +11447,13 @@ dependencies = [
"prost 0.13.3",
"prost-build 0.13.3",
"prost-types 0.13.3",
- "regress 0.10.1",
+ "regress 0.10.3",
"schemars",
"semver",
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.90",
+ "syn 2.0.96",
"typify 0.2.0",
"walkdir",
]
@@ -11354,9 +11500,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.90"
+version = "2.0.96"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31"
+checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80"
dependencies = [
"proc-macro2",
"quote",
@@ -11382,7 +11528,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -11400,6 +11546,17 @@ dependencies = [
"futures-core",
]
+[[package]]
+name = "synstructure"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.96",
+]
+
[[package]]
name = "sysinfo"
version = "0.30.13"
@@ -11818,11 +11975,11 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "2.0.11"
+version = "2.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc"
+checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47"
dependencies = [
- "thiserror-impl 2.0.11",
+ "thiserror-impl 2.0.6",
]
[[package]]
@@ -11833,18 +11990,18 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
name = "thiserror-impl"
-version = "2.0.11"
+version = "2.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2"
+checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -11963,6 +12120,16 @@ dependencies = [
"log",
]
+[[package]]
+name = "tinystr"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f"
+dependencies = [
+ "displaydoc",
+ "zerovec",
+]
+
[[package]]
name = "tinytemplate"
version = "1.2.1"
@@ -11990,9 +12157,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.40.0"
+version = "1.42.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998"
+checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551"
dependencies = [
"backtrace",
"bytes",
@@ -12025,7 +12192,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -12279,7 +12446,7 @@ dependencies = [
"proc-macro2",
"prost-build 0.12.6",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -12293,7 +12460,7 @@ dependencies = [
"prost-build 0.13.3",
"prost-types 0.13.3",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -12443,7 +12610,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -12694,7 +12861,7 @@ checksum = "70b20a22c42c8f1cd23ce5e34f165d4d37038f5b663ad20fb6adbdf029172483"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -12732,7 +12899,7 @@ dependencies = [
"semver",
"serde",
"serde_json",
- "syn 2.0.90",
+ "syn 2.0.96",
"thiserror 1.0.64",
"unicode-ident",
]
@@ -12747,12 +12914,12 @@ dependencies = [
"log",
"proc-macro2",
"quote",
- "regress 0.10.1",
+ "regress 0.10.3",
"schemars",
"semver",
"serde",
"serde_json",
- "syn 2.0.90",
+ "syn 2.0.96",
"thiserror 1.0.64",
"unicode-ident",
]
@@ -12770,7 +12937,7 @@ dependencies = [
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.90",
+ "syn 2.0.96",
"typify-impl 0.1.0",
]
@@ -12787,7 +12954,7 @@ dependencies = [
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.90",
+ "syn 2.0.96",
"typify-impl 0.2.0",
]
@@ -12894,9 +13061,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "url"
-version = "2.5.2"
+version = "2.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c"
+checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
dependencies = [
"form_urlencoded",
"idna",
@@ -12909,6 +13076,12 @@ version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
+[[package]]
+name = "utf16_iter"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246"
+
[[package]]
name = "utf8-ranges"
version = "1.0.5"
@@ -12921,6 +13094,12 @@ version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86bd8d4e895da8537e5315b8254664e6b769c4ff3db18321b297a1e7004392e3"
+[[package]]
+name = "utf8_iter"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
+
[[package]]
name = "utf8parse"
version = "0.2.2"
@@ -13045,7 +13224,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
"wasm-bindgen-shared",
]
@@ -13079,7 +13258,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -13289,7 +13468,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -13300,7 +13479,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
]
[[package]]
@@ -13577,6 +13756,18 @@ dependencies = [
"thiserror 1.0.64",
]
+[[package]]
+name = "write16"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936"
+
+[[package]]
+name = "writeable"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
+
[[package]]
name = "wyz"
version = "0.5.1"
@@ -13607,9 +13798,9 @@ dependencies = [
[[package]]
name = "xattr"
-version = "1.4.0"
+version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909"
+checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f"
dependencies = [
"libc",
"linux-raw-sys",
@@ -13640,6 +13831,30 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
+[[package]]
+name = "yoke"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40"
+dependencies = [
+ "serde",
+ "stable_deref_trait",
+ "yoke-derive",
+ "zerofrom",
+]
+
+[[package]]
+name = "yoke-derive"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.96",
+ "synstructure",
+]
+
[[package]]
name = "zerocopy"
version = "0.7.35"
@@ -13658,7 +13873,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
+]
+
+[[package]]
+name = "zerofrom"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e"
+dependencies = [
+ "zerofrom-derive",
+]
+
+[[package]]
+name = "zerofrom-derive"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.96",
+ "synstructure",
]
[[package]]
@@ -13678,7 +13914,29 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.90",
+ "syn 2.0.96",
+]
+
+[[package]]
+name = "zerovec"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079"
+dependencies = [
+ "yoke",
+ "zerofrom",
+ "zerovec-derive",
+]
+
+[[package]]
+name = "zerovec-derive"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.96",
]
[[package]]
diff --git a/src/frontend/src/instance/otlp.rs b/src/frontend/src/instance/otlp.rs
index 989c6c4348fc..fff075cac6a1 100644
--- a/src/frontend/src/instance/otlp.rs
+++ b/src/frontend/src/instance/otlp.rs
@@ -20,11 +20,11 @@ use common_telemetry::tracing;
use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest;
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
-use pipeline::PipelineWay;
+use pipeline::{GreptimePipelineParams, PipelineWay};
use servers::error::{self, AuthSnafu, InFlightWriteBytesExceededSnafu, Result as ServerResult};
use servers::interceptor::{OpenTelemetryProtocolInterceptor, OpenTelemetryProtocolInterceptorRef};
use servers::otlp;
-use servers::query_handler::OpenTelemetryProtocolHandler;
+use servers::query_handler::{OpenTelemetryProtocolHandler, PipelineHandlerRef};
use session::context::QueryContextRef;
use snafu::ResultExt;
@@ -112,8 +112,10 @@ impl OpenTelemetryProtocolHandler for Instance {
#[tracing::instrument(skip_all)]
async fn logs(
&self,
+ pipeline_handler: PipelineHandlerRef,
request: ExportLogsServiceRequest,
pipeline: PipelineWay,
+ pipeline_params: GreptimePipelineParams,
table_name: String,
ctx: QueryContextRef,
) -> ServerResult<Output> {
@@ -128,7 +130,15 @@ impl OpenTelemetryProtocolHandler for Instance {
.get::<OpenTelemetryProtocolInterceptorRef<servers::error::Error>>();
interceptor_ref.pre_execute(ctx.clone())?;
- let (requests, rows) = otlp::logs::to_grpc_insert_requests(request, pipeline, table_name)?;
+ let (requests, rows) = otlp::logs::to_grpc_insert_requests(
+ request,
+ pipeline,
+ pipeline_params,
+ table_name,
+ &ctx,
+ pipeline_handler,
+ )
+ .await?;
let _guard = if let Some(limiter) = &self.limiter {
let result = limiter.limit_row_inserts(&requests);
diff --git a/src/pipeline/benches/processor.rs b/src/pipeline/benches/processor.rs
index 8cf221af5b10..ba7240b9d527 100644
--- a/src/pipeline/benches/processor.rs
+++ b/src/pipeline/benches/processor.rs
@@ -13,21 +13,22 @@
// limitations under the License.
use criterion::{black_box, criterion_group, criterion_main, Criterion};
-use pipeline::{parse, Content, GreptimeTransformer, Pipeline, Result};
+use pipeline::{json_to_intermediate_state, parse, Content, GreptimeTransformer, Pipeline, Result};
use serde_json::{Deserializer, Value};
fn processor_mut(
pipeline: &Pipeline<GreptimeTransformer>,
input_values: Vec<Value>,
) -> Result<Vec<greptime_proto::v1::Row>> {
- let mut payload = pipeline.init_intermediate_state();
let mut result = Vec::with_capacity(input_values.len());
for v in input_values {
- pipeline.prepare(v, &mut payload)?;
- let r = pipeline.exec_mut(&mut payload)?;
+ let mut payload = json_to_intermediate_state(v).unwrap();
+ let r = pipeline
+ .exec_mut(&mut payload)?
+ .into_transformed()
+ .expect("expect transformed result ");
result.push(r);
- pipeline.reset_intermediate_state(&mut payload);
}
Ok(result)
diff --git a/src/pipeline/src/dispatcher.rs b/src/pipeline/src/dispatcher.rs
index f16fd7e57fb2..a1c208e85094 100644
--- a/src/pipeline/src/dispatcher.rs
+++ b/src/pipeline/src/dispatcher.rs
@@ -12,18 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::BTreeMap;
+
+use common_telemetry::debug;
use snafu::OptionExt;
use yaml_rust::Yaml;
-use crate::etl::error::{Error, Result};
-use crate::etl_error::{
- FieldRequiredForDispatcherSnafu, TablePartRequiredForDispatcherRuleSnafu,
+use crate::etl::error::{
+ Error, FieldRequiredForDispatcherSnafu, Result, TableSuffixRequiredForDispatcherRuleSnafu,
ValueRequiredForDispatcherRuleSnafu,
};
use crate::Value;
const FIELD: &str = "field";
-const TABLE_PARTIAL: &str = "table_part";
+const TABLE_SUFFIX: &str = "table_suffix";
const PIPELINE: &str = "pipeline";
const VALUE: &str = "value";
const RULES: &str = "rules";
@@ -39,10 +41,10 @@ const RULES: &str = "rules";
/// rules:
/// - value: http
/// pipeline: http_pipeline
-/// table_part: http_log
+/// table_suffix: http_log
/// - value: db
/// pipeline: db_pipeline
-/// table_part: db_log
+/// table_suffix: db_log
/// ```
///
/// If none of the rules match the value, this pipeline will continue to process
@@ -58,12 +60,12 @@ pub(crate) struct Dispatcher {
/// - `value`: for pattern matching
/// - `pipeline`: the pipeline to call, if it's unspecified, we use default
/// `greptime_identity`
-/// - `table_part`: the table name segment that we use to construct full table
+/// - `table_suffix`: the table name segment that we use to construct full table
/// name
#[derive(Debug, PartialEq)]
pub(crate) struct Rule {
pub value: Value,
- pub table_part: String,
+ pub table_suffix: String,
pub pipeline: Option<String>,
}
@@ -80,10 +82,11 @@ impl TryFrom<&Yaml> for Dispatcher {
rules
.iter()
.map(|rule| {
- let table_part = rule[TABLE_PARTIAL]
+ let table_part = rule[TABLE_SUFFIX]
.as_str()
.map(|s| s.to_string())
- .context(TablePartRequiredForDispatcherRuleSnafu)?;
+ .context(TableSuffixRequiredForDispatcherRuleSnafu)?;
+
let pipeline = rule[PIPELINE].as_str().map(|s| s.to_string());
if rule[VALUE].is_badvalue() {
@@ -93,7 +96,7 @@ impl TryFrom<&Yaml> for Dispatcher {
Ok(Rule {
value,
- table_part,
+ table_suffix: table_part,
pipeline,
})
})
@@ -105,3 +108,21 @@ impl TryFrom<&Yaml> for Dispatcher {
Ok(Dispatcher { field, rules })
}
}
+
+impl Dispatcher {
+ /// execute dispatcher and returns matched rule if any
+ pub(crate) fn exec(&self, data: &BTreeMap<String, Value>) -> Option<&Rule> {
+ if let Some(value) = data.get(&self.field) {
+ for rule in &self.rules {
+ if rule.value == *value {
+ return Some(rule);
+ }
+ }
+
+ None
+ } else {
+ debug!("field {} not found in keys {:?}", &self.field, data.keys());
+ None
+ }
+ }
+}
diff --git a/src/pipeline/src/etl.rs b/src/pipeline/src/etl.rs
index 08ce929fd61d..deee21d8bbd3 100644
--- a/src/pipeline/src/etl.rs
+++ b/src/pipeline/src/etl.rs
@@ -20,18 +20,22 @@ pub mod processor;
pub mod transform;
pub mod value;
-use ahash::HashSet;
-use common_telemetry::debug;
-use error::{IntermediateKeyIndexSnafu, PrepareValueMustBeObjectSnafu, YamlLoadSnafu};
+use std::collections::BTreeMap;
+use std::sync::Arc;
+
+use error::{
+ IntermediateKeyIndexSnafu, PrepareValueMustBeObjectSnafu, YamlLoadSnafu, YamlParseSnafu,
+};
use itertools::Itertools;
-use processor::{Processor, ProcessorBuilder, Processors};
-use snafu::{OptionExt, ResultExt};
-use transform::{TransformBuilders, Transformer, Transforms};
+use processor::{IntermediateStatus, Processor, Processors};
+use snafu::{ensure, OptionExt, ResultExt};
+use transform::{Transformer, Transforms};
use value::Value;
use yaml_rust::YamlLoader;
-use crate::dispatcher::Dispatcher;
+use crate::dispatcher::{Dispatcher, Rule};
use crate::etl::error::Result;
+use crate::{GreptimeTransformer, PipelineVersion};
const DESCRIPTION: &str = "description";
const PROCESSORS: &str = "processors";
@@ -52,103 +56,23 @@ where
Content::Yaml(str) => {
let docs = YamlLoader::load_from_str(str).context(YamlLoadSnafu)?;
+ ensure!(docs.len() == 1, YamlParseSnafu);
+
let doc = &docs[0];
let description = doc[DESCRIPTION].as_str().map(|s| s.to_string());
- let processor_builder_list = if let Some(v) = doc[PROCESSORS].as_vec() {
+ let processors = if let Some(v) = doc[PROCESSORS].as_vec() {
v.try_into()?
} else {
- processor::ProcessorBuilderList::default()
- };
-
- let transform_builders =
- if let Some(v) = doc[TRANSFORMS].as_vec().or(doc[TRANSFORM].as_vec()) {
- v.try_into()?
- } else {
- TransformBuilders::default()
- };
-
- let processors_required_keys = &processor_builder_list.input_keys;
- let processors_output_keys = &processor_builder_list.output_keys;
- let processors_required_original_keys = &processor_builder_list.original_input_keys;
-
- debug!(
- "processors_required_original_keys: {:?}",
- processors_required_original_keys
- );
- debug!("processors_required_keys: {:?}", processors_required_keys);
- debug!("processors_output_keys: {:?}", processors_output_keys);
-
- let transforms_required_keys = &transform_builders.required_keys;
- let mut tr_keys = Vec::with_capacity(50);
- for key in transforms_required_keys.iter() {
- if !processors_output_keys.contains(key)
- && !processors_required_original_keys.contains(key)
- {
- tr_keys.push(key.clone());
- }
- }
-
- let mut required_keys = processors_required_original_keys.clone();
-
- required_keys.append(&mut tr_keys);
- required_keys.sort();
-
- debug!("required_keys: {:?}", required_keys);
-
- // intermediate keys are the keys that all processor and transformer required
- let ordered_intermediate_keys: Vec<String> = [
- processors_required_keys,
- transforms_required_keys,
- processors_output_keys,
- ]
- .iter()
- .flat_map(|l| l.iter())
- .collect::<HashSet<&String>>()
- .into_iter()
- .sorted()
- .cloned()
- .collect_vec();
-
- let mut final_intermediate_keys = Vec::with_capacity(ordered_intermediate_keys.len());
- let mut intermediate_keys_exclude_original =
- Vec::with_capacity(ordered_intermediate_keys.len());
-
- for key_name in ordered_intermediate_keys.iter() {
- if required_keys.contains(key_name) {
- final_intermediate_keys.push(key_name.clone());
- } else {
- intermediate_keys_exclude_original.push(key_name.clone());
- }
- }
-
- final_intermediate_keys.extend(intermediate_keys_exclude_original);
-
- let output_keys = transform_builders.output_keys.clone();
-
- let processors_kind_list = processor_builder_list
- .processor_builders
- .into_iter()
- .map(|builder| builder.build(&final_intermediate_keys))
- .collect::<Result<Vec<_>>>()?;
- let processors = Processors {
- processors: processors_kind_list,
- required_keys: processors_required_keys.clone(),
- output_keys: processors_output_keys.clone(),
- required_original_keys: processors_required_original_keys.clone(),
+ Processors::default()
};
- let transfor_list = transform_builders
- .builders
- .into_iter()
- .map(|builder| builder.build(&final_intermediate_keys, &output_keys))
- .collect::<Result<Vec<_>>>()?;
-
- let transformers = Transforms {
- transforms: transfor_list,
- required_keys: transforms_required_keys.clone(),
- output_keys: output_keys.clone(),
+ let transformers = if let Some(v) = doc[TRANSFORMS].as_vec().or(doc[TRANSFORM].as_vec())
+ {
+ v.try_into()?
+ } else {
+ Transforms::default()
};
let transformer = T::new(transformers)?;
@@ -164,9 +88,6 @@ where
processors,
transformer,
dispatcher,
- required_keys,
- output_keys,
- intermediate_keys: final_intermediate_keys,
})
}
Content::Json(_) => unimplemented!(),
@@ -182,97 +103,98 @@ where
processors: processor::Processors,
dispatcher: Option<Dispatcher>,
transformer: T,
- /// required keys for the preprocessing from map data from user
- /// include all processor required and transformer required keys
- required_keys: Vec<String>,
- /// all output keys from the transformer
- output_keys: Vec<String>,
- /// intermediate keys from the processors
- intermediate_keys: Vec<String>,
- // pub on_failure: processor::Processors,
}
-impl<T> Pipeline<T>
-where
- T: Transformer,
-{
- pub fn exec_mut(&self, val: &mut Vec<Value>) -> Result<T::VecOutput> {
- for processor in self.processors.iter() {
- processor.exec_mut(val)?;
+/// Where the pipeline executed is dispatched to, with context information
+#[derive(Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord)]
+pub struct DispatchedTo {
+ pub table_suffix: String,
+ pub pipeline: Option<String>,
+}
+
+impl From<&Rule> for DispatchedTo {
+ fn from(value: &Rule) -> Self {
+ DispatchedTo {
+ table_suffix: value.table_suffix.clone(),
+ pipeline: value.pipeline.clone(),
}
+ }
+}
- self.transformer.transform_mut(val)
+impl DispatchedTo {
+ /// Generate destination table name from input
+ pub fn dispatched_to_table_name(&self, original: &str) -> String {
+ format!("{}_{}", &original, self.table_suffix)
}
+}
- pub fn prepare_pipeline_value(&self, val: Value, result: &mut [Value]) -> Result<()> {
- match val {
- Value::Map(map) => {
- let mut search_from = 0;
- // because of the key in the json map is ordered
- for (payload_key, payload_value) in map.values.into_iter() {
- if search_from >= self.required_keys.len() {
- break;
- }
+/// The result of pipeline execution
+#[derive(Debug)]
+pub enum PipelineExecOutput<O> {
+ Transformed(O),
+ DispatchedTo(DispatchedTo),
+}
- // because of map key is ordered, required_keys is ordered too
- if let Some(pos) = self.required_keys[search_from..]
- .iter()
- .position(|k| k == &payload_key)
- {
- result[search_from + pos] = payload_value;
- // next search from is always after the current key
- search_from += pos;
- }
- }
- }
- Value::String(_) => {
- result[0] = val;
- }
- _ => {
- return PrepareValueMustBeObjectSnafu.fail();
- }
+impl<O> PipelineExecOutput<O> {
+ pub fn into_transformed(self) -> Option<O> {
+ if let Self::Transformed(o) = self {
+ Some(o)
+ } else {
+ None
}
- Ok(())
}
- pub fn prepare(&self, val: serde_json::Value, result: &mut [Value]) -> Result<()> {
- match val {
- serde_json::Value::Object(map) => {
- let mut search_from = 0;
- // because of the key in the json map is ordered
- for (payload_key, payload_value) in map.into_iter() {
- if search_from >= self.required_keys.len() {
- break;
- }
+ pub fn into_dispatched(self) -> Option<DispatchedTo> {
+ if let Self::DispatchedTo(d) = self {
+ Some(d)
+ } else {
+ None
+ }
+ }
+}
- // because of map key is ordered, required_keys is ordered too
- if let Some(pos) = self.required_keys[search_from..]
- .iter()
- .position(|k| k == &payload_key)
- {
- result[search_from + pos] = payload_value.try_into()?;
- // next search from is always after the current key
- search_from += pos;
- }
- }
- }
- serde_json::Value::String(_) => {
- result[0] = val.try_into()?;
- }
- _ => {
- return PrepareValueMustBeObjectSnafu.fail();
+pub fn json_to_intermediate_state(val: serde_json::Value) -> Result<IntermediateStatus> {
+ match val {
+ serde_json::Value::Object(map) => {
+ let mut intermediate_state = BTreeMap::new();
+ for (k, v) in map {
+ intermediate_state.insert(k, Value::try_from(v)?);
}
+ Ok(intermediate_state)
}
- Ok(())
+ _ => PrepareValueMustBeObjectSnafu.fail(),
}
+}
- pub fn init_intermediate_state(&self) -> Vec<Value> {
- vec![Value::Null; self.intermediate_keys.len()]
- }
+pub fn json_array_to_intermediate_state(
+ val: Vec<serde_json::Value>,
+) -> Result<Vec<IntermediateStatus>> {
+ val.into_iter().map(json_to_intermediate_state).collect()
+}
- pub fn reset_intermediate_state(&self, result: &mut [Value]) {
- for i in result {
- *i = Value::Null;
+impl<T> Pipeline<T>
+where
+ T: Transformer,
+{
+ pub fn exec_mut(
+ &self,
+ val: &mut BTreeMap<String, Value>,
+ ) -> Result<PipelineExecOutput<T::VecOutput>> {
+ for processor in self.processors.iter() {
+ processor.exec_mut(val)?;
+ }
+
+ let matched_rule = self
+ .dispatcher
+ .as_ref()
+ .and_then(|dispatcher| dispatcher.exec(val));
+
+ match matched_rule {
+ None => self
+ .transformer
+ .transform_mut(val)
+ .map(PipelineExecOutput::Transformed),
+ Some(rule) => Ok(PipelineExecOutput::DispatchedTo(rule.into())),
}
}
@@ -284,21 +206,6 @@ where
&self.transformer
}
- /// Required fields in user-supplied data
- pub fn required_keys(&self) -> &Vec<String> {
- &self.required_keys
- }
-
- /// All output keys from the pipeline
- pub fn output_keys(&self) -> &Vec<String> {
- &self.output_keys
- }
-
- /// intermediate keys from the processors
- pub fn intermediate_keys(&self) -> &Vec<String> {
- &self.intermediate_keys
- }
-
pub fn schemas(&self) -> &Vec<greptime_proto::v1::ColumnSchema> {
self.transformer.schemas()
}
@@ -337,9 +244,29 @@ impl SelectInfo {
}
}
+pub const GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME: &str = "greptime_identity";
+
+/// Enum for holding information of a pipeline, which is either pipeline itself,
+/// or information that be used to retrieve a pipeline from `PipelineHandler`
+pub enum PipelineDefinition {
+ Resolved(Arc<Pipeline<GreptimeTransformer>>),
+ ByNameAndValue((String, PipelineVersion)),
+ GreptimeIdentityPipeline,
+}
+
+impl PipelineDefinition {
+ pub fn from_name(name: &str, version: PipelineVersion) -> Self {
+ if name == GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME {
+ Self::GreptimeIdentityPipeline
+ } else {
+ Self::ByNameAndValue((name.to_owned(), version))
+ }
+ }
+}
+
pub enum PipelineWay {
- OtlpLog(Box<SelectInfo>),
- Custom(std::sync::Arc<Pipeline<crate::GreptimeTransformer>>),
+ OtlpLogDirect(Box<SelectInfo>),
+ Pipeline(PipelineDefinition),
}
#[cfg(test)]
@@ -354,33 +281,31 @@ mod tests {
#[test]
fn test_pipeline_prepare() {
let input_value_str = r#"
- {
- "my_field": "1,2",
- "foo": "bar"
- }
- "#;
+ {
+ "my_field": "1,2",
+ "foo": "bar"
+ }
+ "#;
let input_value: serde_json::Value = serde_json::from_str(input_value_str).unwrap();
let pipeline_yaml = r#"description: 'Pipeline for Apache Tomcat'
processors:
- - csv:
- field: my_field
- target_fields: field1, field2
+ - csv:
+ field: my_field
+ target_fields: field1, field2
transform:
- - field: field1
- type: uint32
- - field: field2
- type: uint32
-"#;
+ - field: field1
+ type: uint32
+ - field: field2
+ type: uint32
+ "#;
let pipeline: Pipeline<GreptimeTransformer> = parse(&Content::Yaml(pipeline_yaml)).unwrap();
- let mut payload = pipeline.init_intermediate_state();
- pipeline.prepare(input_value, &mut payload).unwrap();
- assert_eq!(&["my_field"].to_vec(), pipeline.required_keys());
- assert_eq!(
- payload,
- vec![Value::String("1,2".to_string()), Value::Null, Value::Null]
- );
- let result = pipeline.exec_mut(&mut payload).unwrap();
+ let mut payload = json_to_intermediate_state(input_value).unwrap();
+ let result = pipeline
+ .exec_mut(&mut payload)
+ .unwrap()
+ .into_transformed()
+ .unwrap();
assert_eq!(result.values[0].value_data, Some(ValueData::U32Value(1)));
assert_eq!(result.values[1].value_data, Some(ValueData::U32Value(2)));
@@ -396,40 +321,42 @@ transform:
fn test_dissect_pipeline() {
let message = r#"129.37.245.88 - meln1ks [01/Aug/2024:14:22:47 +0800] "PATCH /observability/metrics/production HTTP/1.0" 501 33085"#.to_string();
let pipeline_str = r#"processors:
- - dissect:
- fields:
- - message
- patterns:
- - "%{ip} %{?ignored} %{username} [%{ts}] \"%{method} %{path} %{proto}\" %{status} %{bytes}"
- - timestamp:
- fields:
- - ts
- formats:
- - "%d/%b/%Y:%H:%M:%S %z"
+ - dissect:
+ fields:
+ - message
+ patterns:
+ - "%{ip} %{?ignored} %{username} [%{ts}] \"%{method} %{path} %{proto}\" %{status} %{bytes}"
+ - timestamp:
+ fields:
+ - ts
+ formats:
+ - "%d/%b/%Y:%H:%M:%S %z"
transform:
- - fields:
- - ip
- - username
- - method
- - path
- - proto
- type: string
- - fields:
- - status
- type: uint16
- - fields:
- - bytes
- type: uint32
- - field: ts
- type: timestamp, ns
- index: time"#;
+ - fields:
+ - ip
+ - username
+ - method
+ - path
+ - proto
+ type: string
+ - fields:
+ - status
+ type: uint16
+ - fields:
+ - bytes
+ type: uint32
+ - field: ts
+ type: timestamp, ns
+ index: time"#;
let pipeline: Pipeline<GreptimeTransformer> = parse(&Content::Yaml(pipeline_str)).unwrap();
- let mut payload = pipeline.init_intermediate_state();
- pipeline
- .prepare(serde_json::Value::String(message), &mut payload)
+ let mut payload = BTreeMap::new();
+ payload.insert("message".to_string(), Value::String(message));
+ let result = pipeline
+ .exec_mut(&mut payload)
+ .unwrap()
+ .into_transformed()
.unwrap();
- let result = pipeline.exec_mut(&mut payload).unwrap();
let sechema = pipeline.schemas();
assert_eq!(sechema.len(), result.values.len());
@@ -480,35 +407,33 @@ transform:
#[test]
fn test_csv_pipeline() {
let input_value_str = r#"
- {
- "my_field": "1,2",
- "foo": "bar"
- }
- "#;
+ {
+ "my_field": "1,2",
+ "foo": "bar"
+ }
+ "#;
let input_value: serde_json::Value = serde_json::from_str(input_value_str).unwrap();
let pipeline_yaml = r#"
-description: Pipeline for Apache Tomcat
-processors:
- - csv:
- field: my_field
- target_fields: field1, field2
-transform:
- - field: field1
- type: uint32
- - field: field2
- type: uint32
-"#;
+ description: Pipeline for Apache Tomcat
+ processors:
+ - csv:
+ field: my_field
+ target_fields: field1, field2
+ transform:
+ - field: field1
+ type: uint32
+ - field: field2
+ type: uint32
+ "#;
let pipeline: Pipeline<GreptimeTransformer> = parse(&Content::Yaml(pipeline_yaml)).unwrap();
- let mut payload = pipeline.init_intermediate_state();
- pipeline.prepare(input_value, &mut payload).unwrap();
- assert_eq!(&["my_field"].to_vec(), pipeline.required_keys());
- assert_eq!(
- payload,
- vec![Value::String("1,2".to_string()), Value::Null, Value::Null]
- );
- let result = pipeline.exec_mut(&mut payload).unwrap();
+ let mut payload = json_to_intermediate_state(input_value).unwrap();
+ let result = pipeline
+ .exec_mut(&mut payload)
+ .unwrap()
+ .into_transformed()
+ .unwrap();
assert_eq!(result.values[0].value_data, Some(ValueData::U32Value(1)));
assert_eq!(result.values[1].value_data, Some(ValueData::U32Value(2)));
match &result.values[2].value_data {
@@ -522,33 +447,36 @@ transform:
#[test]
fn test_date_pipeline() {
let input_value_str = r#"
- {
- "my_field": "1,2",
- "foo": "bar",
- "test_time": "2014-5-17T04:34:56+00:00"
- }
- "#;
+ {
+ "my_field": "1,2",
+ "foo": "bar",
+ "test_time": "2014-5-17T04:34:56+00:00"
+ }
+ "#;
let input_value: serde_json::Value = serde_json::from_str(input_value_str).unwrap();
- let pipeline_yaml = r#"
----
+ let pipeline_yaml = r#"---
description: Pipeline for Apache Tomcat
processors:
- - timestamp:
- field: test_time
+ - timestamp:
+ field: test_time
transform:
- - field: test_time
- type: timestamp, ns
- index: time
-"#;
+ - field: test_time
+ type: timestamp, ns
+ index: time
+ "#;
let pipeline: Pipeline<GreptimeTransformer> = parse(&Content::Yaml(pipeline_yaml)).unwrap();
let schema = pipeline.schemas().clone();
- let mut result = pipeline.init_intermediate_state();
- pipeline.prepare(input_value, &mut result).unwrap();
- let row = pipeline.exec_mut(&mut result).unwrap();
+ let mut result = json_to_intermediate_state(input_value).unwrap();
+
+ let row = pipeline
+ .exec_mut(&mut result)
+ .unwrap()
+ .into_transformed()
+ .unwrap();
let output = Rows {
schema,
rows: vec![row],
@@ -584,9 +512,9 @@ dispatcher:
field: typename
rules:
- value: http
- table_part: http_events
+ table_suffix: http_events
- value: database
- table_part: db_events
+ table_suffix: db_events
pipeline: database_pipeline
transform:
@@ -604,7 +532,7 @@ transform:
dispatcher.rules[0],
crate::dispatcher::Rule {
value: Value::String("http".to_string()),
- table_part: "http_events".to_string(),
+ table_suffix: "http_events".to_string(),
pipeline: None
}
);
@@ -613,7 +541,7 @@ transform:
dispatcher.rules[1],
crate::dispatcher::Rule {
value: Value::String("database".to_string()),
- table_part: "db_events".to_string(),
+ table_suffix: "db_events".to_string(),
pipeline: Some("database_pipeline".to_string()),
}
);
@@ -628,9 +556,9 @@ dispatcher:
_field: typename
rules:
- value: http
- table_part: http_events
+ table_suffix: http_events
- value: database
- table_part: db_events
+ table_suffix: db_events
pipeline: database_pipeline
transform:
@@ -648,9 +576,9 @@ dispatcher:
field: typename
rules:
- value: http
- _table_part: http_events
+ _table_suffix: http_events
- value: database
- _table_part: db_events
+ _table_suffix: db_events
pipeline: database_pipeline
transform:
@@ -668,9 +596,9 @@ dispatcher:
field: typename
rules:
- _value: http
- table_part: http_events
+ table_suffix: http_events
- _value: database
- table_part: db_events
+ table_suffix: db_events
pipeline: database_pipeline
transform:
diff --git a/src/pipeline/src/etl/error.rs b/src/pipeline/src/etl/error.rs
index 999345fb1e2e..8365ad6ffb62 100644
--- a/src/pipeline/src/etl/error.rs
+++ b/src/pipeline/src/etl/error.rs
@@ -543,6 +543,11 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+ #[snafu(display("Yaml parse error."))]
+ YamlParse {
+ #[snafu(implicit)]
+ location: Location,
+ },
#[snafu(display("Prepare value must be an object"))]
PrepareValueMustBeObject {
#[snafu(implicit)]
@@ -590,9 +595,9 @@ pub enum Error {
},
#[snafu(display("Field is required for dispatcher"))]
FieldRequiredForDispatcher,
- #[snafu(display("table_part is required for dispatcher rule"))]
- TablePartRequiredForDispatcherRule,
- #[snafu(display("value is required for dispatcher rule"))]
+ #[snafu(display("Table_suffix is required for dispatcher rule"))]
+ TableSuffixRequiredForDispatcherRule,
+ #[snafu(display("Value is required for dispatcher rule"))]
ValueRequiredForDispatcherRule,
#[snafu(display(
"Reached max nested levels when flattening JSON object: {max_nested_levels}"
diff --git a/src/pipeline/src/etl/field.rs b/src/pipeline/src/etl/field.rs
index 10fa681f236c..dd4835ec9279 100644
--- a/src/pipeline/src/etl/field.rs
+++ b/src/pipeline/src/etl/field.rs
@@ -19,133 +19,12 @@ use snafu::OptionExt;
use super::error::{EmptyInputFieldSnafu, MissingInputFieldSnafu};
use crate::etl::error::{Error, Result};
-use crate::etl::find_key_index;
-
-/// Information about the input field including the name and index in intermediate keys.
-#[derive(Debug, Default, Clone)]
-pub struct InputFieldInfo {
- pub(crate) name: String,
- pub(crate) index: usize,
-}
-
-impl InputFieldInfo {
- /// Create a new input field info with the given field name and index.
- pub(crate) fn new(field: impl Into<String>, index: usize) -> Self {
- InputFieldInfo {
- name: field.into(),
- index,
- }
- }
-}
-
-/// Information about a field that has one input and one output.
-#[derive(Debug, Default, Clone)]
-pub struct OneInputOneOutputField {
- input: InputFieldInfo,
- output: Option<(String, usize)>,
-}
-
-impl OneInputOneOutputField {
- /// Create a new field with the given input and output.
- pub(crate) fn new(input: InputFieldInfo, output: (String, usize)) -> Self {
- OneInputOneOutputField {
- input,
- output: Some(output),
- }
- }
-
- /// Build a new field with the given processor kind, intermediate keys, input field, and target field.
- pub(crate) fn build(
- processor_kind: &str,
- intermediate_keys: &[String],
- input_field: &str,
- target_field: &str,
- ) -> Result<Self> {
- let input_index = find_key_index(intermediate_keys, input_field, processor_kind)?;
-
- let input_field_info = InputFieldInfo::new(input_field, input_index);
- let output_index = find_key_index(intermediate_keys, target_field, processor_kind)?;
- Ok(OneInputOneOutputField::new(
- input_field_info,
- (target_field.to_string(), output_index),
- ))
- }
-
- /// Get the input field information.
- pub(crate) fn input(&self) -> &InputFieldInfo {
- &self.input
- }
-
- /// Get the index of the input field.
- pub(crate) fn input_index(&self) -> usize {
- self.input.index
- }
-
- /// Get the name of the input field.
- pub(crate) fn input_name(&self) -> &str {
- &self.input.name
- }
-
- /// Get the index of the output field.
- pub(crate) fn output_index(&self) -> usize {
- *self.output().1
- }
-
- /// Get the name of the output field.
- pub(crate) fn output_name(&self) -> &str {
- self.output().0
- }
-
- /// Get the output field information.
- pub(crate) fn output(&self) -> (&String, &usize) {
- if let Some((name, index)) = &self.output {
- (name, index)
- } else {
- (&self.input.name, &self.input.index)
- }
- }
-}
-
-/// Information about a field that has one input and multiple outputs.
-#[derive(Debug, Default, Clone)]
-pub struct OneInputMultiOutputField {
- input: InputFieldInfo,
- /// Typically, processors that output multiple keys need to be distinguished by splicing the keys together.
- prefix: Option<String>,
-}
-
-impl OneInputMultiOutputField {
- /// Create a new field with the given input and prefix.
- pub(crate) fn new(input: InputFieldInfo, prefix: Option<String>) -> Self {
- OneInputMultiOutputField { input, prefix }
- }
-
- /// Get the input field information.
- pub(crate) fn input(&self) -> &InputFieldInfo {
- &self.input
- }
-
- /// Get the index of the input field.
- pub(crate) fn input_index(&self) -> usize {
- self.input.index
- }
-
- /// Get the name of the input field.
- pub(crate) fn input_name(&self) -> &str {
- &self.input.name
- }
-
- /// Get the prefix for the output fields.
- pub(crate) fn target_prefix(&self) -> &str {
- self.prefix.as_deref().unwrap_or(&self.input.name)
- }
-}
/// Raw processor-defined inputs and outputs
#[derive(Debug, Default, Clone)]
pub struct Field {
- pub(crate) input_field: String,
- pub(crate) target_field: Option<String>,
+ input_field: String,
+ target_field: Option<String>,
}
impl FromStr for Field {
@@ -194,6 +73,10 @@ impl Field {
pub(crate) fn target_or_input_field(&self) -> &str {
self.target_field.as_deref().unwrap_or(&self.input_field)
}
+
+ pub(crate) fn set_target_field(&mut self, target_field: Option<String>) {
+ self.target_field = target_field;
+ }
}
/// A collection of fields.
diff --git a/src/pipeline/src/etl/processor.rs b/src/pipeline/src/etl/processor.rs
index bf37f1f8ce7f..005feca3794e 100644
--- a/src/pipeline/src/etl/processor.rs
+++ b/src/pipeline/src/etl/processor.rs
@@ -27,32 +27,33 @@ pub mod regex;
pub mod timestamp;
pub mod urlencoding;
-use ahash::{HashSet, HashSetExt};
-use cmcd::{CmcdProcessor, CmcdProcessorBuilder};
-use csv::{CsvProcessor, CsvProcessorBuilder};
-use date::{DateProcessor, DateProcessorBuilder};
-use decolorize::{DecolorizeProcessor, DecolorizeProcessorBuilder};
-use digest::{DigestProcessor, DigestProcessorBuilder};
-use dissect::{DissectProcessor, DissectProcessorBuilder};
+use std::collections::BTreeMap;
+
+use cmcd::CmcdProcessor;
+use csv::CsvProcessor;
+use date::DateProcessor;
+use decolorize::DecolorizeProcessor;
+use digest::DigestProcessor;
+use dissect::DissectProcessor;
use enum_dispatch::enum_dispatch;
-use epoch::{EpochProcessor, EpochProcessorBuilder};
-use gsub::{GsubProcessor, GsubProcessorBuilder};
-use itertools::Itertools;
-use join::{JoinProcessor, JoinProcessorBuilder};
-use json_path::{JsonPathProcessor, JsonPathProcessorBuilder};
-use letter::{LetterProcessor, LetterProcessorBuilder};
-use regex::{RegexProcessor, RegexProcessorBuilder};
+use epoch::EpochProcessor;
+use gsub::GsubProcessor;
+use join::JoinProcessor;
+use json_path::JsonPathProcessor;
+use letter::LetterProcessor;
+use regex::RegexProcessor;
use snafu::{OptionExt, ResultExt};
-use timestamp::{TimestampProcessor, TimestampProcessorBuilder};
-use urlencoding::{UrlEncodingProcessor, UrlEncodingProcessorBuilder};
+use timestamp::TimestampProcessor;
+use urlencoding::UrlEncodingProcessor;
use super::error::{
FailedParseFieldFromStringSnafu, FieldMustBeTypeSnafu, ProcessorKeyMustBeStringSnafu,
- ProcessorMustBeMapSnafu, ProcessorMustHaveStringKeySnafu, UnsupportedProcessorSnafu,
+ ProcessorMustBeMapSnafu, ProcessorMustHaveStringKeySnafu,
};
use super::field::{Field, Fields};
use crate::etl::error::{Error, Result};
use crate::etl::value::Value;
+use crate::etl_error::UnsupportedProcessorSnafu;
const FIELD_NAME: &str = "field";
const FIELDS_NAME: &str = "fields";
@@ -65,6 +66,8 @@ const TARGET_FIELDS_NAME: &str = "target_fields";
const JSON_PATH_NAME: &str = "json_path";
const JSON_PATH_RESULT_INDEX_NAME: &str = "result_index";
+pub type IntermediateStatus = BTreeMap<String, Value>;
+
/// Processor trait defines the interface for all processors.
///
/// A processor is a transformation that can be applied to a field in a document
@@ -80,7 +83,7 @@ pub trait Processor: std::fmt::Debug + Send + Sync + 'static {
fn ignore_missing(&self) -> bool;
/// Execute the processor on a vector which be preprocessed by the pipeline
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()>;
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()>;
}
#[derive(Debug)]
@@ -102,57 +105,12 @@ pub enum ProcessorKind {
Digest(DigestProcessor),
}
-/// ProcessorBuilder trait defines the interface for all processor builders
-/// A processor builder is used to create a processor
-#[enum_dispatch(ProcessorBuilders)]
-pub trait ProcessorBuilder: std::fmt::Debug + Send + Sync + 'static {
- /// Get the processor's output keys
- fn output_keys(&self) -> HashSet<&str>;
- /// Get the processor's input keys
- fn input_keys(&self) -> HashSet<&str>;
- /// Build the processor
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind>;
-}
-
-#[derive(Debug)]
-#[enum_dispatch]
-pub enum ProcessorBuilders {
- Cmcd(CmcdProcessorBuilder),
- Csv(CsvProcessorBuilder),
- Dissect(DissectProcessorBuilder),
- Gsub(GsubProcessorBuilder),
- Join(JoinProcessorBuilder),
- Letter(LetterProcessorBuilder),
- Regex(RegexProcessorBuilder),
- Timestamp(TimestampProcessorBuilder),
- UrlEncoding(UrlEncodingProcessorBuilder),
- Epoch(EpochProcessorBuilder),
- Date(DateProcessorBuilder),
- JsonPath(JsonPathProcessorBuilder),
- Decolorize(DecolorizeProcessorBuilder),
- Digest(DigestProcessorBuilder),
-}
-
-#[derive(Debug, Default)]
-pub struct ProcessorBuilderList {
- pub(crate) processor_builders: Vec<ProcessorBuilders>,
- pub(crate) input_keys: Vec<String>,
- pub(crate) output_keys: Vec<String>,
- pub(crate) original_input_keys: Vec<String>,
-}
-
#[derive(Debug, Default)]
pub struct Processors {
/// A ordered list of processors
/// The order of processors is important
/// The output of the first processor will be the input of the second processor
pub processors: Vec<ProcessorKind>,
- /// all required keys in all processors
- pub required_keys: Vec<String>,
- /// all required keys in user-supplied data, not pipeline output fields
- pub required_original_keys: Vec<String>,
- /// all output keys in all processors
- pub output_keys: Vec<String>,
}
impl std::ops::Deref for Processors {
@@ -169,80 +127,22 @@ impl std::ops::DerefMut for Processors {
}
}
-impl Processors {
- /// A collection of all the processor's required input fields
- pub fn required_keys(&self) -> &Vec<String> {
- &self.required_keys
- }
-
- /// A collection of all the processor's output fields
- pub fn output_keys(&self) -> &Vec<String> {
- &self.output_keys
- }
-
- /// Required fields in user-supplied data, not pipeline output fields.
- pub fn required_original_keys(&self) -> &Vec<String> {
- &self.required_original_keys
- }
-}
-
-impl TryFrom<&Vec<yaml_rust::Yaml>> for ProcessorBuilderList {
+impl TryFrom<&Vec<yaml_rust::Yaml>> for Processors {
type Error = Error;
fn try_from(vec: &Vec<yaml_rust::Yaml>) -> Result<Self> {
let mut processors_builders = vec![];
- let mut all_output_keys = HashSet::with_capacity(50);
- let mut all_required_keys = HashSet::with_capacity(50);
- let mut all_required_original_keys = HashSet::with_capacity(50);
for doc in vec {
let processor = parse_processor(doc)?;
processors_builders.push(processor);
}
-
- for processor in processors_builders.iter() {
- {
- // get all required keys
- let processor_required_keys = processor.input_keys();
-
- for key in &processor_required_keys {
- if !all_output_keys.contains(key) {
- all_required_original_keys.insert(*key);
- }
- }
-
- all_required_keys.extend(processor_required_keys);
-
- let processor_output_keys = processor.output_keys().into_iter();
- all_output_keys.extend(processor_output_keys);
- }
- }
-
- let all_required_keys = all_required_keys
- .into_iter()
- .map(|x| x.to_string())
- .sorted()
- .collect();
- let all_output_keys = all_output_keys
- .into_iter()
- .map(|x| x.to_string())
- .sorted()
- .collect();
- let all_required_original_keys = all_required_original_keys
- .into_iter()
- .map(|x| x.to_string())
- .sorted()
- .collect();
-
- Ok(ProcessorBuilderList {
- processor_builders: processors_builders,
- input_keys: all_required_keys,
- output_keys: all_output_keys,
- original_input_keys: all_required_original_keys,
+ Ok(Processors {
+ processors: processors_builders,
})
}
}
-fn parse_processor(doc: &yaml_rust::Yaml) -> Result<ProcessorBuilders> {
+fn parse_processor(doc: &yaml_rust::Yaml) -> Result<ProcessorKind> {
let map = doc.as_hash().context(ProcessorMustBeMapSnafu)?;
let key = map.keys().next().context(ProcessorMustHaveStringKeySnafu)?;
@@ -256,34 +156,28 @@ fn parse_processor(doc: &yaml_rust::Yaml) -> Result<ProcessorBuilders> {
let str_key = key.as_str().context(ProcessorKeyMustBeStringSnafu)?;
let processor = match str_key {
- cmcd::PROCESSOR_CMCD => ProcessorBuilders::Cmcd(CmcdProcessorBuilder::try_from(value)?),
- csv::PROCESSOR_CSV => ProcessorBuilders::Csv(CsvProcessorBuilder::try_from(value)?),
- dissect::PROCESSOR_DISSECT => {
- ProcessorBuilders::Dissect(DissectProcessorBuilder::try_from(value)?)
- }
- epoch::PROCESSOR_EPOCH => ProcessorBuilders::Epoch(EpochProcessorBuilder::try_from(value)?),
- date::PROCESSOR_DATE => ProcessorBuilders::Date(DateProcessorBuilder::try_from(value)?),
- gsub::PROCESSOR_GSUB => ProcessorBuilders::Gsub(GsubProcessorBuilder::try_from(value)?),
- join::PROCESSOR_JOIN => ProcessorBuilders::Join(JoinProcessorBuilder::try_from(value)?),
- letter::PROCESSOR_LETTER => {
- ProcessorBuilders::Letter(LetterProcessorBuilder::try_from(value)?)
- }
- regex::PROCESSOR_REGEX => ProcessorBuilders::Regex(RegexProcessorBuilder::try_from(value)?),
+ cmcd::PROCESSOR_CMCD => ProcessorKind::Cmcd(CmcdProcessor::try_from(value)?),
+ csv::PROCESSOR_CSV => ProcessorKind::Csv(CsvProcessor::try_from(value)?),
+ dissect::PROCESSOR_DISSECT => ProcessorKind::Dissect(DissectProcessor::try_from(value)?),
+ epoch::PROCESSOR_EPOCH => ProcessorKind::Epoch(EpochProcessor::try_from(value)?),
+ date::PROCESSOR_DATE => ProcessorKind::Date(DateProcessor::try_from(value)?),
+ gsub::PROCESSOR_GSUB => ProcessorKind::Gsub(GsubProcessor::try_from(value)?),
+ join::PROCESSOR_JOIN => ProcessorKind::Join(JoinProcessor::try_from(value)?),
+ letter::PROCESSOR_LETTER => ProcessorKind::Letter(LetterProcessor::try_from(value)?),
+ regex::PROCESSOR_REGEX => ProcessorKind::Regex(RegexProcessor::try_from(value)?),
timestamp::PROCESSOR_TIMESTAMP => {
- ProcessorBuilders::Timestamp(TimestampProcessorBuilder::try_from(value)?)
+ ProcessorKind::Timestamp(TimestampProcessor::try_from(value)?)
}
urlencoding::PROCESSOR_URL_ENCODING => {
- ProcessorBuilders::UrlEncoding(UrlEncodingProcessorBuilder::try_from(value)?)
+ ProcessorKind::UrlEncoding(UrlEncodingProcessor::try_from(value)?)
}
json_path::PROCESSOR_JSON_PATH => {
- ProcessorBuilders::JsonPath(json_path::JsonPathProcessorBuilder::try_from(value)?)
+ ProcessorKind::JsonPath(json_path::JsonPathProcessor::try_from(value)?)
}
decolorize::PROCESSOR_DECOLORIZE => {
- ProcessorBuilders::Decolorize(DecolorizeProcessorBuilder::try_from(value)?)
- }
- digest::PROCESSOR_DIGEST => {
- ProcessorBuilders::Digest(DigestProcessorBuilder::try_from(value)?)
+ ProcessorKind::Decolorize(DecolorizeProcessor::try_from(value)?)
}
+ digest::PROCESSOR_DIGEST => ProcessorKind::Digest(DigestProcessor::try_from(value)?),
_ => return UnsupportedProcessorSnafu { processor: str_key }.fail(),
};
diff --git a/src/pipeline/src/etl/processor/cmcd.rs b/src/pipeline/src/etl/processor/cmcd.rs
index 086fe8f3d610..a5da69d0be42 100644
--- a/src/pipeline/src/etl/processor/cmcd.rs
+++ b/src/pipeline/src/etl/processor/cmcd.rs
@@ -18,20 +18,19 @@
use std::collections::BTreeMap;
-use ahash::HashSet;
use snafu::{OptionExt, ResultExt};
use urlencoding::decode;
+use super::IntermediateStatus;
use crate::etl::error::{
CmcdMissingKeySnafu, CmcdMissingValueSnafu, Error, FailedToParseFloatKeySnafu,
FailedToParseIntKeySnafu, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
ProcessorMissingFieldSnafu, Result,
};
-use crate::etl::field::{Field, Fields, InputFieldInfo, OneInputMultiOutputField};
-use crate::etl::find_key_index;
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, Processor, ProcessorBuilder, ProcessorKind,
- FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, Processor, FIELDS_NAME, FIELD_NAME,
+ IGNORE_MISSING_NAME,
};
use crate::etl::value::Value;
@@ -77,139 +76,6 @@ const CMCD_KEYS: [&str; 18] = [
CMCD_KEY_V,
];
-/// CmcdProcessorBuilder is a builder for CmcdProcessor
-/// parse from raw yaml
-#[derive(Debug, Default)]
-pub struct CmcdProcessorBuilder {
- fields: Fields,
- output_keys: HashSet<String>,
- ignore_missing: bool,
-}
-
-impl CmcdProcessorBuilder {
- /// build_cmcd_outputs build cmcd output info
- /// generate index and function for each output
- pub(super) fn build_cmcd_outputs(
- field: &Field,
- intermediate_keys: &[String],
- ) -> Result<(BTreeMap<String, usize>, Vec<CmcdOutputInfo>)> {
- let mut output_index = BTreeMap::new();
- let mut cmcd_field_outputs = Vec::with_capacity(CMCD_KEYS.len());
- for cmcd in CMCD_KEYS {
- let final_key = generate_key(field.target_or_input_field(), cmcd);
- let index = find_key_index(intermediate_keys, &final_key, "cmcd")?;
- output_index.insert(final_key.clone(), index);
- match cmcd {
- CMCD_KEY_BS | CMCD_KEY_SU => {
- let output_info = CmcdOutputInfo::new(final_key, cmcd, index, bs_su);
- cmcd_field_outputs.push(output_info);
- }
- CMCD_KEY_BR | CMCD_KEY_BL | CMCD_KEY_D | CMCD_KEY_DL | CMCD_KEY_MTP
- | CMCD_KEY_RTP | CMCD_KEY_TB => {
- let output_info = CmcdOutputInfo::new(final_key, cmcd, index, br_tb);
- cmcd_field_outputs.push(output_info);
- }
- CMCD_KEY_CID | CMCD_KEY_NRR | CMCD_KEY_OT | CMCD_KEY_SF | CMCD_KEY_SID
- | CMCD_KEY_ST | CMCD_KEY_V => {
- let output_info = CmcdOutputInfo::new(final_key, cmcd, index, cid_v);
- cmcd_field_outputs.push(output_info);
- }
- CMCD_KEY_NOR => {
- let output_info = CmcdOutputInfo::new(final_key, cmcd, index, nor);
- cmcd_field_outputs.push(output_info);
- }
- CMCD_KEY_PR => {
- let output_info = CmcdOutputInfo::new(final_key, cmcd, index, pr);
- cmcd_field_outputs.push(output_info);
- }
- _ => {}
- }
- }
- Ok((output_index, cmcd_field_outputs))
- }
-
- /// build CmcdProcessor from CmcdProcessorBuilder
- pub fn build(self, intermediate_keys: &[String]) -> Result<CmcdProcessor> {
- let mut real_fields = vec![];
- let mut cmcd_outputs = Vec::with_capacity(CMCD_KEYS.len());
- for field in self.fields.into_iter() {
- let input_index = find_key_index(intermediate_keys, field.input_field(), "cmcd")?;
-
- let input_field_info = InputFieldInfo::new(field.input_field(), input_index);
-
- let (_, cmcd_field_outputs) = Self::build_cmcd_outputs(&field, intermediate_keys)?;
-
- cmcd_outputs.push(cmcd_field_outputs);
-
- let real_field = OneInputMultiOutputField::new(input_field_info, field.target_field);
- real_fields.push(real_field);
- }
- Ok(CmcdProcessor {
- fields: real_fields,
- cmcd_outputs,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
-impl ProcessorBuilder for CmcdProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.output_keys.iter().map(|s| s.as_str()).collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Cmcd)
- }
-}
-
-fn generate_key(prefix: &str, key: &str) -> String {
- format!("{}_{}", prefix, key)
-}
-
-/// CmcdOutputInfo is a struct to store output info
-#[derive(Debug)]
-pub(super) struct CmcdOutputInfo {
- /// {input_field}_{cmcd_key}
- final_key: String,
- /// cmcd key
- key: &'static str,
- /// index in intermediate_keys
- index: usize,
- /// function to resolve value
- f: fn(&str, &str, Option<&str>) -> Result<Value>,
-}
-
-impl CmcdOutputInfo {
- fn new(
- final_key: String,
- key: &'static str,
- index: usize,
- f: fn(&str, &str, Option<&str>) -> Result<Value>,
- ) -> Self {
- Self {
- final_key,
- key,
- index,
- f,
- }
- }
-}
-
-impl Default for CmcdOutputInfo {
- fn default() -> Self {
- Self {
- final_key: String::default(),
- key: "",
- index: 0,
- f: |_, _, _| Ok(Value::Null),
- }
- }
-}
-
/// function to resolve CMCD_KEY_BS | CMCD_KEY_SU
fn bs_su(_: &str, _: &str, _: Option<&str>) -> Result<Value> {
Ok(Value::Boolean(true))
@@ -286,9 +152,7 @@ fn pr(s: &str, k: &str, v: Option<&str>) -> Result<Value> {
/// 12. Transport Layer Security SHOULD be used to protect all transmission of CMCD data.
#[derive(Debug, Default)]
pub struct CmcdProcessor {
- fields: Vec<OneInputMultiOutputField>,
- cmcd_outputs: Vec<Vec<CmcdOutputInfo>>,
-
+ fields: Fields,
ignore_missing: bool,
}
@@ -297,27 +161,52 @@ impl CmcdProcessor {
format!("{}_{}", prefix, key)
}
- fn parse(&self, field_index: usize, s: &str) -> Result<Vec<(usize, Value)>> {
- let parts = s.split(',');
- let mut result = Vec::new();
+ fn parse(&self, name: &str, value: &str) -> Result<BTreeMap<String, Value>> {
+ let mut working_set = BTreeMap::new();
+
+ let parts = value.split(',');
+
for part in parts {
let mut kv = part.split('=');
- let k = kv.next().context(CmcdMissingKeySnafu { part, s })?;
+ let k = kv.next().context(CmcdMissingKeySnafu { part, s: value })?;
let v = kv.next();
- for cmcd_key in self.cmcd_outputs[field_index].iter() {
- if cmcd_key.key == k {
- let val = (cmcd_key.f)(s, k, v)?;
- result.push((cmcd_key.index, val));
+ for cmcd_key in CMCD_KEYS {
+ if cmcd_key == k {
+ match cmcd_key {
+ CMCD_KEY_BS | CMCD_KEY_SU => {
+ working_set
+ .insert(Self::generate_key(name, cmcd_key), bs_su(value, k, v)?);
+ }
+ CMCD_KEY_BR | CMCD_KEY_BL | CMCD_KEY_D | CMCD_KEY_DL | CMCD_KEY_MTP
+ | CMCD_KEY_RTP | CMCD_KEY_TB => {
+ working_set
+ .insert(Self::generate_key(name, cmcd_key), br_tb(value, k, v)?);
+ }
+ CMCD_KEY_CID | CMCD_KEY_NRR | CMCD_KEY_OT | CMCD_KEY_SF | CMCD_KEY_SID
+ | CMCD_KEY_ST | CMCD_KEY_V => {
+ working_set
+ .insert(Self::generate_key(name, cmcd_key), cid_v(value, k, v)?);
+ }
+ CMCD_KEY_NOR => {
+ working_set
+ .insert(Self::generate_key(name, cmcd_key), nor(value, k, v)?);
+ }
+ CMCD_KEY_PR => {
+ working_set
+ .insert(Self::generate_key(name, cmcd_key), pr(value, k, v)?);
+ }
+
+ _ => {}
+ }
}
}
}
-
- Ok(result)
+ Ok(working_set)
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for CmcdProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for CmcdProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -344,22 +233,12 @@ impl TryFrom<&yaml_rust::yaml::Hash> for CmcdProcessorBuilder {
}
}
- let output_keys = fields
- .iter()
- .flat_map(|f| {
- CMCD_KEYS
- .iter()
- .map(|cmcd_key| generate_key(f.target_or_input_field(), cmcd_key))
- })
- .collect();
-
- let builder = CmcdProcessorBuilder {
+ let proc = CmcdProcessor {
fields,
- output_keys,
ignore_missing,
};
- Ok(builder)
+ Ok(proc)
}
}
@@ -372,21 +251,20 @@ impl Processor for CmcdProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
- for (field_index, field) in self.fields.iter().enumerate() {
- let field_value_index = field.input_index();
- match val.get(field_value_index) {
- Some(Value::String(v)) => {
- let result_list = self.parse(field_index, v)?;
- for (output_index, v) in result_list {
- val[output_index] = v;
- }
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ for field in self.fields.iter() {
+ let name = field.input_field();
+
+ match val.get(name) {
+ Some(Value::String(s)) => {
+ let results = self.parse(field.target_or_input_field(), s)?;
+ val.extend(results);
}
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind().to_string(),
- field: field.input_name().to_string(),
+ field: name.to_string(),
}
.fail();
}
@@ -400,6 +278,7 @@ impl Processor for CmcdProcessor {
}
}
}
+
Ok(())
}
}
@@ -410,9 +289,9 @@ mod tests {
use urlencoding::decode;
- use super::{CmcdProcessorBuilder, CMCD_KEYS};
+ use super::CmcdProcessor;
use crate::etl::field::{Field, Fields};
- use crate::etl::value::{Map, Value};
+ use crate::etl::value::Value;
#[test]
fn test_cmcd() {
@@ -546,37 +425,20 @@ mod tests {
let field = Field::new("prefix", None);
- let output_keys = CMCD_KEYS
- .iter()
- .map(|k| format!("prefix_{}", k))
- .collect::<Vec<String>>();
-
- let mut intermediate_keys = vec!["prefix".to_string()];
- intermediate_keys.append(&mut (output_keys.clone()));
-
- let builder = CmcdProcessorBuilder {
+ let processor = CmcdProcessor {
fields: Fields::new(vec![field]),
- output_keys: output_keys.iter().map(|s| s.to_string()).collect(),
ignore_missing: false,
};
- let processor = builder.build(&intermediate_keys).unwrap();
-
for (s, vec) in ss.into_iter() {
let decoded = decode(s).unwrap().to_string();
- let values = vec
+ let expected = vec
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect::<BTreeMap<String, Value>>();
- let expected = Map { values };
- let actual = processor.parse(0, &decoded).unwrap();
- let actual = actual
- .into_iter()
- .map(|(index, value)| (intermediate_keys[index].clone(), value))
- .collect::<BTreeMap<String, Value>>();
- let actual = Map { values: actual };
+ let actual = processor.parse("prefix", &decoded).unwrap();
assert_eq!(actual, expected);
}
}
diff --git a/src/pipeline/src/etl/processor/csv.rs b/src/pipeline/src/etl/processor/csv.rs
index c9cb5f847db1..a0fac70de15c 100644
--- a/src/pipeline/src/etl/processor/csv.rs
+++ b/src/pipeline/src/etl/processor/csv.rs
@@ -14,7 +14,8 @@
// Reference: https://www.elastic.co/guide/en/elasticsearch/reference/current/csv-processor.html
-use ahash::HashSet;
+use std::collections::BTreeMap;
+
use csv::{ReaderBuilder, Trim};
use itertools::EitherOrBoth::{Both, Left, Right};
use itertools::Itertools;
@@ -24,11 +25,10 @@ use crate::etl::error::{
CsvNoRecordSnafu, CsvQuoteNameSnafu, CsvReadSnafu, CsvSeparatorNameSnafu, Error,
KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
};
-use crate::etl::field::{Fields, InputFieldInfo, OneInputMultiOutputField};
-use crate::etl::find_key_index;
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, ProcessorBuilder,
- ProcessorKind, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, FIELDS_NAME, FIELD_NAME,
+ IGNORE_MISSING_NAME,
};
use crate::etl::value::Value;
@@ -40,76 +40,17 @@ const TRIM_NAME: &str = "trim";
const EMPTY_VALUE_NAME: &str = "empty_value";
const TARGET_FIELDS: &str = "target_fields";
-#[derive(Debug, Default)]
-pub struct CsvProcessorBuilder {
- reader: ReaderBuilder,
-
- fields: Fields,
- ignore_missing: bool,
-
- // Value used to fill empty fields, empty fields will be skipped if this is not provided.
- empty_value: Option<String>,
- target_fields: Vec<String>,
- // description
- // if
- // ignore_failure
- // on_failure
- // tag
-}
-
-impl CsvProcessorBuilder {
- fn build(self, intermediate_keys: &[String]) -> Result<CsvProcessor> {
- let mut real_fields = vec![];
-
- for field in self.fields {
- let input_index = find_key_index(intermediate_keys, field.input_field(), "csv")?;
-
- let input_field_info = InputFieldInfo::new(field.input_field(), input_index);
- let real_field = OneInputMultiOutputField::new(input_field_info, None);
- real_fields.push(real_field);
- }
-
- let output_index_info = self
- .target_fields
- .iter()
- .map(|f| find_key_index(intermediate_keys, f, "csv"))
- .collect::<Result<Vec<_>>>()?;
- Ok(CsvProcessor {
- reader: self.reader,
- fields: real_fields,
- ignore_missing: self.ignore_missing,
- empty_value: self.empty_value,
- output_index_info,
- })
- }
-}
-
-impl ProcessorBuilder for CsvProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.target_fields.iter().map(|s| s.as_str()).collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Csv)
- }
-}
-
/// only support string value
-#[derive(Debug)]
+#[derive(Debug, Default)]
pub struct CsvProcessor {
reader: ReaderBuilder,
-
- fields: Vec<OneInputMultiOutputField>,
+ fields: Fields,
ignore_missing: bool,
// Value used to fill empty fields, empty fields will be skipped if this is not provided.
empty_value: Option<String>,
- output_index_info: Vec<usize>,
+ target_fields: Vec<String>,
// description
// if
// ignore_failure
@@ -119,18 +60,20 @@ pub struct CsvProcessor {
impl CsvProcessor {
// process the csv format string to a map with target_fields as keys
- fn process(&self, val: &str) -> Result<Vec<(usize, Value)>> {
+ fn process(&self, val: &str) -> Result<BTreeMap<String, Value>> {
let mut reader = self.reader.from_reader(val.as_bytes());
if let Some(result) = reader.records().next() {
let record: csv::StringRecord = result.context(CsvReadSnafu)?;
- let values: Vec<(usize, Value)> = self
- .output_index_info
+ let values = self
+ .target_fields
.iter()
.zip_longest(record.iter())
.filter_map(|zipped| match zipped {
- Both(target_field, val) => Some((*target_field, Value::String(val.into()))),
+ Both(target_field, val) => {
+ Some((target_field.clone(), Value::String(val.into())))
+ }
// if target fields are more than extracted fields, fill the rest with empty value
Left(target_field) => {
let value = self
@@ -138,7 +81,7 @@ impl CsvProcessor {
.as_ref()
.map(|s| Value::String(s.clone()))
.unwrap_or(Value::Null);
- Some((*target_field, value))
+ Some((target_field.clone(), value))
}
// if extracted fields are more than target fields, ignore the rest
Right(_) => None,
@@ -152,7 +95,7 @@ impl CsvProcessor {
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for CsvProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for CsvProcessor {
type Error = Error;
fn try_from(hash: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -224,8 +167,8 @@ impl TryFrom<&yaml_rust::yaml::Hash> for CsvProcessorBuilder {
_ => {}
}
}
- let builder = {
- CsvProcessorBuilder {
+ let proc = {
+ CsvProcessor {
reader,
fields,
ignore_missing,
@@ -234,7 +177,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for CsvProcessorBuilder {
}
};
- Ok(builder)
+ Ok(proc)
}
}
@@ -247,21 +190,20 @@ impl Processor for CsvProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut BTreeMap<String, Value>) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
- match val.get(index) {
+ let name = field.input_field();
+
+ match val.get(name) {
Some(Value::String(v)) => {
- let resule_list = self.process(v)?;
- for (k, v) in resule_list {
- val[k] = v;
- }
+ let results = self.process(v)?;
+ val.extend(results);
}
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind().to_string(),
- field: field.input_name().to_string(),
+ field: name.to_string(),
}
.fail();
}
@@ -282,37 +224,28 @@ impl Processor for CsvProcessor {
#[cfg(test)]
mod tests {
- use ahash::HashMap;
-
- use super::Value;
- use crate::etl::processor::csv::CsvProcessorBuilder;
+ use super::*;
+ use crate::etl::field::Field;
#[test]
fn test_equal_length() {
let mut reader = csv::ReaderBuilder::new();
reader.has_headers(false);
- let builder = CsvProcessorBuilder {
+ let processor = CsvProcessor {
reader,
+ fields: Fields::new(vec![Field::new("data", None)]),
target_fields: vec!["a".into(), "b".into()],
..Default::default()
};
- let intermediate_keys = vec!["data".into(), "a".into(), "b".into()];
-
- let processor = builder.build(&intermediate_keys).unwrap();
- let result = processor
- .process("1,2")
- .unwrap()
- .into_iter()
- .map(|(k, v)| (intermediate_keys[k].clone(), v))
- .collect::<HashMap<_, _>>();
+ let result = processor.process("1,2").unwrap();
let values = [
("a".into(), Value::String("1".into())),
("b".into(), Value::String("2".into())),
]
.into_iter()
- .collect::<HashMap<_, _>>();
+ .collect();
assert_eq!(result, values);
}
@@ -324,21 +257,14 @@ mod tests {
{
let mut reader = csv::ReaderBuilder::new();
reader.has_headers(false);
- let builder = CsvProcessorBuilder {
+ let processor = CsvProcessor {
reader,
+ fields: Fields::new(vec![Field::new("data", None)]),
target_fields: vec!["a".into(), "b".into(), "c".into()],
..Default::default()
};
- let intermediate_keys = vec!["data".into(), "a".into(), "b".into(), "c".into()];
-
- let processor = builder.build(&intermediate_keys).unwrap();
- let result = processor
- .process("1,2")
- .unwrap()
- .into_iter()
- .map(|(k, v)| (intermediate_keys[k].clone(), v))
- .collect::<HashMap<_, _>>();
+ let result = processor.process("1,2").unwrap();
let values = [
("a".into(), Value::String("1".into())),
@@ -346,7 +272,7 @@ mod tests {
("c".into(), Value::Null),
]
.into_iter()
- .collect::<HashMap<_, _>>();
+ .collect();
assert_eq!(result, values);
}
@@ -355,22 +281,15 @@ mod tests {
{
let mut reader = csv::ReaderBuilder::new();
reader.has_headers(false);
- let builder = CsvProcessorBuilder {
+ let processor = CsvProcessor {
reader,
+ fields: Fields::new(vec![Field::new("data", None)]),
target_fields: vec!["a".into(), "b".into(), "c".into()],
empty_value: Some("default".into()),
..Default::default()
};
- let intermediate_keys = vec!["data".into(), "a".into(), "b".into(), "c".into()];
-
- let processor = builder.build(&intermediate_keys).unwrap();
- let result = processor
- .process("1,2")
- .unwrap()
- .into_iter()
- .map(|(k, v)| (intermediate_keys[k].clone(), v))
- .collect::<HashMap<_, _>>();
+ let result = processor.process("1,2").unwrap();
let values = [
("a".into(), Value::String("1".into())),
@@ -389,22 +308,14 @@ mod tests {
fn test_target_fields_has_less_length() {
let mut reader = csv::ReaderBuilder::new();
reader.has_headers(false);
- let builder = CsvProcessorBuilder {
+ let processor = CsvProcessor {
reader,
target_fields: vec!["a".into(), "b".into()],
empty_value: Some("default".into()),
..Default::default()
};
- let intermediate_keys = vec!["data".into(), "a".into(), "b".into()];
-
- let processor = builder.build(&intermediate_keys).unwrap();
- let result = processor
- .process("1,2")
- .unwrap()
- .into_iter()
- .map(|(k, v)| (intermediate_keys[k].clone(), v))
- .collect::<HashMap<_, _>>();
+ let result = processor.process("1,2").unwrap();
let values = [
("a".into(), Value::String("1".into())),
diff --git a/src/pipeline/src/etl/processor/date.rs b/src/pipeline/src/etl/processor/date.rs
index fa202a0edff2..e080b795402c 100644
--- a/src/pipeline/src/etl/processor/date.rs
+++ b/src/pipeline/src/etl/processor/date.rs
@@ -14,21 +14,21 @@
use std::sync::Arc;
-use ahash::HashSet;
use chrono::{DateTime, NaiveDateTime};
use chrono_tz::Tz;
use lazy_static::lazy_static;
use snafu::{OptionExt, ResultExt};
+use super::IntermediateStatus;
use crate::etl::error::{
DateFailedToGetLocalTimezoneSnafu, DateFailedToGetTimestampSnafu, DateParseSnafu,
DateParseTimezoneSnafu, Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
ProcessorFailedToParseStringSnafu, ProcessorMissingFieldSnafu, Result,
};
-use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, Processor,
- ProcessorBuilder, ProcessorKind, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, Processor, FIELDS_NAME,
+ FIELD_NAME, IGNORE_MISSING_NAME,
};
use crate::etl::value::{Timestamp, Value};
@@ -88,55 +88,7 @@ impl std::ops::Deref for Formats {
}
}
-#[derive(Debug, Default)]
-pub struct DateProcessorBuilder {
- fields: Fields,
- formats: Formats,
- timezone: Option<Arc<String>>,
- locale: Option<Arc<String>>,
- ignore_missing: bool,
-}
-
-impl ProcessorBuilder for DateProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Date)
- }
-}
-
-impl DateProcessorBuilder {
- pub fn build(self, intermediate_keys: &[String]) -> Result<DateProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- "date",
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
- Ok(DateProcessor {
- fields: real_fields,
- formats: self.formats,
- timezone: self.timezone,
- locale: self.locale,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
-impl TryFrom<&yaml_rust::yaml::Hash> for DateProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for DateProcessor {
type Error = Error;
fn try_from(hash: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -181,7 +133,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for DateProcessorBuilder {
}
}
- let builder = DateProcessorBuilder {
+ let builder = DateProcessor {
fields,
formats,
timezone,
@@ -197,7 +149,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for DateProcessorBuilder {
/// Reserved for compatibility only
#[derive(Debug, Default)]
pub struct DateProcessor {
- fields: Vec<OneInputOneOutputField>,
+ fields: Fields,
formats: Formats,
timezone: Option<Arc<String>>,
locale: Option<Arc<String>>, // to support locale
@@ -242,20 +194,20 @@ impl Processor for DateProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(Value::String(s)) => {
let timestamp = self.parse(s)?;
- let output_index = field.output_index();
- val[output_index] = Value::Timestamp(timestamp);
+ let output_key = field.target_or_input_field();
+ val.insert(output_key.to_string(), Value::Timestamp(timestamp));
}
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind().to_string(),
- field: field.input_name().to_string(),
+ field: field.input_field().to_string(),
}
.fail();
}
diff --git a/src/pipeline/src/etl/processor/decolorize.rs b/src/pipeline/src/etl/processor/decolorize.rs
index e72bc28a1e66..2547b99d6824 100644
--- a/src/pipeline/src/etl/processor/decolorize.rs
+++ b/src/pipeline/src/etl/processor/decolorize.rs
@@ -18,18 +18,17 @@
//! from Grafana Loki and [`strip_ansi_escape_codes`](https://vector.dev/docs/reference/vrl/functions/#strip_ansi_escape_codes)
//! from Vector VRL.
-use ahash::HashSet;
use once_cell::sync::Lazy;
use regex::Regex;
use snafu::OptionExt;
+use super::IntermediateStatus;
use crate::etl::error::{
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
};
-use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, ProcessorBuilder, ProcessorKind, FIELDS_NAME,
- FIELD_NAME, IGNORE_MISSING_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
};
use crate::etl::value::Value;
@@ -37,52 +36,10 @@ pub(crate) const PROCESSOR_DECOLORIZE: &str = "decolorize";
static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"\x1b\[[0-9;]*m").unwrap());
-#[derive(Debug, Default)]
-pub struct DecolorizeProcessorBuilder {
- fields: Fields,
- ignore_missing: bool,
-}
-
-impl ProcessorBuilder for DecolorizeProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Decolorize)
- }
-}
-
-impl DecolorizeProcessorBuilder {
- fn build(self, intermediate_keys: &[String]) -> Result<DecolorizeProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- "decolorize",
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
- Ok(DecolorizeProcessor {
- fields: real_fields,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
/// Remove ANSI color control codes from the input text.
#[derive(Debug, Default)]
pub struct DecolorizeProcessor {
- fields: Vec<OneInputOneOutputField>,
+ fields: Fields,
ignore_missing: bool,
}
@@ -103,7 +60,7 @@ impl DecolorizeProcessor {
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for DecolorizeProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for DecolorizeProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -129,7 +86,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for DecolorizeProcessorBuilder {
}
}
- Ok(DecolorizeProcessorBuilder {
+ Ok(DecolorizeProcessor {
fields,
ignore_missing,
})
@@ -145,23 +102,23 @@ impl crate::etl::processor::Processor for DecolorizeProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
}
Some(v) => {
let result = self.process(v)?;
- let output_index = field.output_index();
- val[output_index] = result;
+ let output_index = field.target_or_input_field();
+ val.insert(output_index.to_string(), result);
}
}
}
@@ -176,7 +133,7 @@ mod tests {
#[test]
fn test_decolorize_processor() {
let processor = DecolorizeProcessor {
- fields: vec![],
+ fields: Fields::default(),
ignore_missing: false,
};
diff --git a/src/pipeline/src/etl/processor/digest.rs b/src/pipeline/src/etl/processor/digest.rs
index 29054365ad03..64bb2a2f6d8a 100644
--- a/src/pipeline/src/etl/processor/digest.rs
+++ b/src/pipeline/src/etl/processor/digest.rs
@@ -21,17 +21,16 @@
use std::borrow::Cow;
-use ahash::HashSet;
use regex::Regex;
use snafu::OptionExt;
+use super::IntermediateStatus;
use crate::etl::error::{
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
};
-use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, ProcessorBuilder, ProcessorKind, FIELDS_NAME,
- FIELD_NAME, IGNORE_MISSING_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
};
use crate::etl::value::Value;
use crate::etl_error::DigestPatternInvalidSnafu;
@@ -88,54 +87,10 @@ impl PresetPattern {
}
}
-#[derive(Debug, Default)]
-pub struct DigestProcessorBuilder {
- fields: Fields,
- patterns: Vec<Regex>,
- ignore_missing: bool,
-}
-
-impl ProcessorBuilder for DigestProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Digest)
- }
-}
-
-impl DigestProcessorBuilder {
- fn build(self, intermediate_keys: &[String]) -> Result<DigestProcessor> {
- let mut real_fields = Vec::with_capacity(self.fields.len());
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- "digest",
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
- Ok(DigestProcessor {
- fields: real_fields,
- ignore_missing: self.ignore_missing,
- patterns: self.patterns,
- })
- }
-}
-
/// Computes a digest (hash) of the input string.
#[derive(Debug, Default)]
pub struct DigestProcessor {
- fields: Vec<OneInputOneOutputField>,
+ fields: Fields,
ignore_missing: bool,
patterns: Vec<Regex>,
}
@@ -169,7 +124,7 @@ impl DigestProcessor {
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for DigestProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for DigestProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -226,10 +181,10 @@ impl TryFrom<&yaml_rust::yaml::Hash> for DigestProcessorBuilder {
}
for field in fields.iter_mut() {
- field.target_field = Some(format!("{}_digest", field.input_field()));
+ field.set_target_field(Some(format!("{}_digest", field.input_field())));
}
- Ok(DigestProcessorBuilder {
+ Ok(DigestProcessor {
fields,
patterns,
ignore_missing,
@@ -246,23 +201,23 @@ impl crate::etl::processor::Processor for DigestProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
}
Some(v) => {
let result = self.process(v)?;
- let output_index = field.output_index();
- val[output_index] = result;
+ let output_index = field.target_or_input_field();
+ val.insert(output_index.to_string(), result);
}
}
}
@@ -278,7 +233,7 @@ mod tests {
#[test]
fn test_digest_processor_ip() {
let processor = DigestProcessor {
- fields: vec![],
+ fields: Fields::default(),
ignore_missing: false,
patterns: vec![PresetPattern::Ip.regex()],
};
@@ -306,7 +261,7 @@ mod tests {
#[test]
fn test_digest_processor_uuid() {
let processor = DigestProcessor {
- fields: vec![],
+ fields: Fields::default(),
ignore_missing: false,
patterns: vec![PresetPattern::Uuid.regex()],
};
@@ -339,7 +294,7 @@ mod tests {
#[test]
fn test_digest_processor_brackets() {
let processor = DigestProcessor {
- fields: vec![],
+ fields: Fields::default(),
ignore_missing: false,
patterns: vec![PresetPattern::Bracketed.regex()],
};
@@ -389,7 +344,7 @@ mod tests {
#[test]
fn test_digest_processor_quotes() {
let processor = DigestProcessor {
- fields: vec![],
+ fields: Fields::default(),
ignore_missing: false,
patterns: vec![PresetPattern::Quoted.regex()],
};
@@ -409,7 +364,7 @@ mod tests {
#[test]
fn test_digest_processor_custom_regex() {
let processor = DigestProcessor {
- fields: vec![],
+ fields: Fields::default(),
ignore_missing: false,
patterns: vec![Regex::new(r"\d+").unwrap()],
};
diff --git a/src/pipeline/src/etl/processor/dissect.rs b/src/pipeline/src/etl/processor/dissect.rs
index a9ccf5e8735e..9ac28f7bf09e 100644
--- a/src/pipeline/src/etl/processor/dissect.rs
+++ b/src/pipeline/src/etl/processor/dissect.rs
@@ -18,6 +18,7 @@ use ahash::{HashMap, HashMapExt, HashSet, HashSetExt};
use itertools::Itertools;
use snafu::OptionExt;
+use super::IntermediateStatus;
use crate::etl::error::{
DissectAppendOrderAlreadySetSnafu, DissectConsecutiveNamesSnafu, DissectEmptyPatternSnafu,
DissectEndModifierAlreadySetSnafu, DissectInvalidPatternSnafu, DissectModifierAlreadySetSnafu,
@@ -25,12 +26,10 @@ use crate::etl::error::{
DissectOrderOnlyAppendSnafu, DissectSplitExceedsInputSnafu, DissectSplitNotMatchInputSnafu,
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
};
-use crate::etl::field::{Fields, InputFieldInfo, OneInputMultiOutputField};
-use crate::etl::find_key_index;
+use crate::etl::field::Fields;
use crate::etl::processor::{
yaml_bool, yaml_new_field, yaml_new_fields, yaml_parse_string, yaml_parse_strings, yaml_string,
- Processor, ProcessorBuilder, ProcessorKind, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
- PATTERNS_NAME, PATTERN_NAME,
+ Processor, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, PATTERNS_NAME, PATTERN_NAME,
};
use crate::etl::value::Value;
@@ -69,14 +68,7 @@ impl std::fmt::Display for EndModifier {
}
}
-#[derive(Debug, PartialEq, Default)]
-struct NameInfo {
- name: String,
- start_modifier: Option<StartModifier>,
- end_modifier: Option<EndModifier>,
-}
-
-impl NameInfo {
+impl Name {
fn is_name_empty(&self) -> bool {
self.name.is_empty()
}
@@ -140,26 +132,9 @@ impl NameInfo {
}
}
-impl std::fmt::Display for NameInfo {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- write!(f, "{}", self.name)
- }
-}
-
-impl From<&str> for NameInfo {
- fn from(value: &str) -> Self {
- NameInfo {
- name: value.to_string(),
- start_modifier: None,
- end_modifier: None,
- }
- }
-}
-
#[derive(Debug, PartialEq, Default)]
struct Name {
name: String,
- index: usize,
start_modifier: Option<StartModifier>,
end_modifier: Option<EndModifier>,
}
@@ -170,57 +145,12 @@ impl std::fmt::Display for Name {
}
}
-impl From<NameInfo> for Name {
- fn from(value: NameInfo) -> Self {
+impl From<&str> for Name {
+ fn from(value: &str) -> Self {
Name {
- name: value.name,
- index: 0,
- start_modifier: value.start_modifier,
- end_modifier: value.end_modifier,
- }
- }
-}
-
-impl Name {
- fn is_name_empty(&self) -> bool {
- self.name.is_empty()
- }
-
- fn is_empty(&self) -> bool {
- self.name.is_empty() && self.start_modifier.is_none() && self.end_modifier.is_none()
- }
-
- fn is_end_modifier_set(&self) -> bool {
- self.end_modifier.is_some()
- }
-}
-
-#[derive(Debug, PartialEq)]
-enum PartInfo {
- Split(String),
- Name(NameInfo),
-}
-
-impl PartInfo {
- fn is_empty(&self) -> bool {
- match self {
- PartInfo::Split(v) => v.is_empty(),
- PartInfo::Name(v) => v.is_empty(),
- }
- }
-
- fn empty_split() -> Self {
- PartInfo::Split(String::new())
- }
-
- fn empty_name() -> Self {
- PartInfo::Name(NameInfo::default())
- }
-
- fn push(&mut self, ch: char) {
- match self {
- PartInfo::Split(v) => v.push(ch),
- PartInfo::Name(v) => v.name.push(ch),
+ name: value.to_string(),
+ start_modifier: None,
+ end_modifier: None,
}
}
}
@@ -246,13 +176,11 @@ impl Part {
fn empty_name() -> Self {
Part::Name(Name::default())
}
-}
-impl From<PartInfo> for Part {
- fn from(value: PartInfo) -> Self {
- match value {
- PartInfo::Split(v) => Part::Split(v),
- PartInfo::Name(v) => Part::Name(v.into()),
+ fn push(&mut self, ch: char) {
+ match self {
+ Part::Split(v) => v.push(ch),
+ Part::Name(v) => v.name.push(ch),
}
}
}
@@ -271,42 +199,12 @@ impl Deref for Pattern {
}
}
-impl From<PatternInfo> for Pattern {
- fn from(value: PatternInfo) -> Self {
- let parts = value.parts.into_iter().map(|x| x.into()).collect();
- Pattern {
- origin: value.origin,
- parts,
- }
- }
-}
-
-#[derive(Debug, Default)]
-struct PatternInfo {
- origin: String,
- parts: Vec<PartInfo>,
-}
-
-impl std::ops::Deref for PatternInfo {
- type Target = Vec<PartInfo>;
-
- fn deref(&self) -> &Self::Target {
- &self.parts
- }
-}
-
-impl std::ops::DerefMut for PatternInfo {
- fn deref_mut(&mut self) -> &mut Self::Target {
- &mut self.parts
- }
-}
-
-impl std::str::FromStr for PatternInfo {
+impl std::str::FromStr for Pattern {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
let mut parts = vec![];
- let mut cursor = PartInfo::empty_split();
+ let mut cursor = Part::empty_split();
let origin = s.to_string();
let chars: Vec<char> = origin.chars().collect();
@@ -316,27 +214,27 @@ impl std::str::FromStr for PatternInfo {
let ch = chars[pos];
match (ch, &mut cursor) {
// if cursor is Split part, and found %{, then ready to start a Name part
- ('%', PartInfo::Split(_)) if matches!(chars.get(pos + 1), Some('{')) => {
+ ('%', Part::Split(_)) if matches!(chars.get(pos + 1), Some('{')) => {
if !cursor.is_empty() {
parts.push(cursor);
}
- cursor = PartInfo::empty_name();
+ cursor = Part::empty_name();
pos += 1; // skip '{'
}
// if cursor is Split part, and not found % or {, then continue the Split part
- (_, PartInfo::Split(_)) => {
+ (_, Part::Split(_)) => {
cursor.push(ch);
}
// if cursor is Name part, and found }, then end the Name part, start the next Split part
- ('}', PartInfo::Name(_)) => {
+ ('}', Part::Name(_)) => {
parts.push(cursor);
- cursor = PartInfo::empty_split();
+ cursor = Part::empty_split();
}
- ('+', PartInfo::Name(name)) if !name.is_start_modifier_set() => {
+ ('+', Part::Name(name)) if !name.is_start_modifier_set() => {
name.try_start_modifier(StartModifier::Append(None))?;
}
- ('/', PartInfo::Name(name)) if name.is_append_modifier_set() => {
+ ('/', Part::Name(name)) if name.is_append_modifier_set() => {
let mut order = 0;
let mut j = pos + 1;
while j < chars.len() {
@@ -360,16 +258,16 @@ impl std::str::FromStr for PatternInfo {
name.try_append_order(order)?;
pos = j - 1; // this will change the position to the last digit of the order
}
- ('?', PartInfo::Name(name)) if !name.is_start_modifier_set() => {
+ ('?', Part::Name(name)) if !name.is_start_modifier_set() => {
name.try_start_modifier(StartModifier::NamedSkip)?;
}
- ('*', PartInfo::Name(name)) if !name.is_start_modifier_set() => {
+ ('*', Part::Name(name)) if !name.is_start_modifier_set() => {
name.try_start_modifier(StartModifier::MapKey)?;
}
- ('&', PartInfo::Name(name)) if !name.is_start_modifier_set() => {
+ ('&', Part::Name(name)) if !name.is_start_modifier_set() => {
name.try_start_modifier(StartModifier::MapVal)?;
}
- ('-', PartInfo::Name(name)) if !name.is_end_modifier_set() => {
+ ('-', Part::Name(name)) if !name.is_end_modifier_set() => {
if let Some('>') = chars.get(pos + 1) {
} else {
return DissectInvalidPatternSnafu {
@@ -391,7 +289,7 @@ impl std::str::FromStr for PatternInfo {
name.try_end_modifier()?;
pos += 1; // only skip '>', the next loop will skip '}'
}
- (_, PartInfo::Name(name)) if !is_valid_char(ch) => {
+ (_, Part::Name(name)) if !is_valid_char(ch) => {
let tail: String = if name.is_name_empty() {
format!("Invalid '{ch}'")
} else {
@@ -399,7 +297,7 @@ impl std::str::FromStr for PatternInfo {
};
return DissectInvalidPatternSnafu { s, detail: tail }.fail();
}
- (_, PartInfo::Name(_)) => {
+ (_, Part::Name(_)) => {
cursor.push(ch);
}
}
@@ -408,8 +306,8 @@ impl std::str::FromStr for PatternInfo {
}
match cursor {
- PartInfo::Split(ref split) if !split.is_empty() => parts.push(cursor),
- PartInfo::Name(name) if !name.is_empty() => {
+ Part::Split(ref split) if !split.is_empty() => parts.push(cursor),
+ Part::Name(name) if !name.is_empty() => {
return DissectInvalidPatternSnafu {
s,
detail: format!("'{name}' is not closed"),
@@ -425,7 +323,7 @@ impl std::str::FromStr for PatternInfo {
}
}
-impl PatternInfo {
+impl Pattern {
fn check(&self) -> Result<()> {
if self.len() == 0 {
return DissectEmptyPatternSnafu.fail();
@@ -438,21 +336,21 @@ impl PatternInfo {
let this_part = &self[i];
let next_part = self.get(i + 1);
match (this_part, next_part) {
- (PartInfo::Split(split), _) if split.is_empty() => {
+ (Part::Split(split), _) if split.is_empty() => {
return DissectInvalidPatternSnafu {
s: &self.origin,
detail: "Empty split is not allowed",
}
.fail();
}
- (PartInfo::Name(name1), Some(PartInfo::Name(name2))) => {
+ (Part::Name(name1), Some(Part::Name(name2))) => {
return DissectInvalidPatternSnafu {
s: &self.origin,
detail: format!("consecutive names are not allowed: '{name1}' '{name2}'",),
}
.fail();
}
- (PartInfo::Name(name), _) if name.is_name_empty() => {
+ (Part::Name(name), _) if name.is_name_empty() => {
if let Some(ref m) = name.start_modifier {
return DissectInvalidPatternSnafu {
s: &self.origin,
@@ -461,7 +359,7 @@ impl PatternInfo {
.fail();
}
}
- (PartInfo::Name(name), _) => match name.start_modifier {
+ (Part::Name(name), _) => match name.start_modifier {
Some(StartModifier::MapKey) => {
if map_keys.contains(&name.name) {
return DissectInvalidPatternSnafu {
@@ -509,128 +407,9 @@ impl PatternInfo {
}
}
-impl std::fmt::Display for PatternInfo {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- write!(f, "{}", self.origin)
- }
-}
-
-#[derive(Debug, Default)]
-pub struct DissectProcessorBuilder {
- fields: Fields,
- patterns: Vec<PatternInfo>,
- ignore_missing: bool,
- append_separator: Option<String>,
- output_keys: HashSet<String>,
-}
-
-impl DissectProcessorBuilder {
- fn build_output_keys(patterns: &[PatternInfo]) -> HashSet<String> {
- patterns
- .iter()
- .flat_map(|pattern| pattern.iter())
- .filter_map(|p| match p {
- PartInfo::Name(name) => {
- if !name.is_empty()
- && (name.start_modifier.is_none()
- || name
- .start_modifier
- .as_ref()
- .is_some_and(|x| matches!(x, StartModifier::Append(_))))
- {
- Some(name.to_string())
- } else {
- None
- }
- }
- _ => None,
- })
- .collect()
- }
-
- fn part_info_to_part(part_info: PartInfo, intermediate_keys: &[String]) -> Result<Part> {
- match part_info {
- PartInfo::Split(s) => Ok(Part::Split(s)),
- PartInfo::Name(n) => match n.start_modifier {
- None | Some(StartModifier::Append(_)) => {
- let index = find_key_index(intermediate_keys, &n.name, "dissect")?;
- Ok(Part::Name(Name {
- name: n.name,
- index,
- start_modifier: n.start_modifier,
- end_modifier: n.end_modifier,
- }))
- }
- _ => Ok(Part::Name(Name {
- name: n.name,
- index: usize::MAX,
- start_modifier: n.start_modifier,
- end_modifier: n.end_modifier,
- })),
- },
- }
- }
-
- fn pattern_info_to_pattern(
- pattern_info: PatternInfo,
- intermediate_keys: &[String],
- ) -> Result<Pattern> {
- let original = pattern_info.origin;
- let pattern = pattern_info
- .parts
- .into_iter()
- .map(|part_info| Self::part_info_to_part(part_info, intermediate_keys))
- .collect::<Result<Vec<_>>>()?;
- Ok(Pattern {
- origin: original,
- parts: pattern,
- })
- }
-
- fn build_patterns_from_pattern_infos(
- patterns: Vec<PatternInfo>,
- intermediate_keys: &[String],
- ) -> Result<Vec<Pattern>> {
- patterns
- .into_iter()
- .map(|pattern_info| Self::pattern_info_to_pattern(pattern_info, intermediate_keys))
- .collect()
- }
-}
-
-impl ProcessorBuilder for DissectProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.output_keys.iter().map(|s| s.as_str()).collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input_index = find_key_index(intermediate_keys, field.input_field(), "dissect")?;
-
- let input_field_info = InputFieldInfo::new(field.input_field(), input_index);
-
- let real_field = OneInputMultiOutputField::new(input_field_info, field.target_field);
- real_fields.push(real_field);
- }
- let patterns = Self::build_patterns_from_pattern_infos(self.patterns, intermediate_keys)?;
- let processor = DissectProcessor {
- fields: real_fields,
- patterns,
- ignore_missing: self.ignore_missing,
- append_separator: self.append_separator,
- };
- Ok(ProcessorKind::Dissect(processor))
- }
-}
-
#[derive(Debug, Default)]
pub struct DissectProcessor {
- fields: Vec<OneInputMultiOutputField>,
+ fields: Fields,
patterns: Vec<Pattern>,
ignore_missing: bool,
@@ -639,33 +418,37 @@ pub struct DissectProcessor {
}
impl DissectProcessor {
- fn process_pattern(&self, chs: &[char], pattern: &Pattern) -> Result<Vec<(usize, Value)>> {
+ fn process_name_value<'a>(
+ name: &'a Name,
+ value: String,
+ appends: &mut HashMap<&'a String, Vec<(String, u32)>>,
+ map: &mut Vec<(&'a String, Value)>,
+ ) {
+ match name.start_modifier {
+ Some(StartModifier::NamedSkip) => {
+ // do nothing, ignore this match
+ }
+ Some(StartModifier::Append(order)) => {
+ appends
+ .entry(&name.name)
+ .or_default()
+ .push((value, order.unwrap_or_default()));
+ }
+ Some(_) => {
+ // do nothing, ignore MapKey and MapVal
+ // because transform can know the key name
+ }
+ None => {
+ map.push((&name.name, Value::String(value)));
+ }
+ }
+ }
+
+ fn process_pattern(&self, chs: &[char], pattern: &Pattern) -> Result<Vec<(String, Value)>> {
let mut map = Vec::new();
let mut pos = 0;
- let mut appends: HashMap<usize, Vec<(String, u32)>> = HashMap::new();
-
- let mut process_name_value = |name: &Name, value: String| {
- let name_index = name.index;
- match name.start_modifier {
- Some(StartModifier::NamedSkip) => {
- // do nothing, ignore this match
- }
- Some(StartModifier::Append(order)) => {
- appends
- .entry(name_index)
- .or_default()
- .push((value, order.unwrap_or_default()));
- }
- Some(_) => {
- // do nothing, ignore MapKey and MapVal
- // because transform can know the key name
- }
- None => {
- map.push((name_index, Value::String(value)));
- }
- }
- };
+ let mut appends: HashMap<&String, Vec<(String, u32)>> = HashMap::new();
for i in 0..pattern.len() {
let this_part = &pattern[i];
@@ -701,7 +484,7 @@ impl DissectProcessor {
// if Name part is the last part, then the rest of the input is the value
(Part::Name(name), None) => {
let value = chs[pos..].iter().collect::<String>();
- process_name_value(name, value);
+ Self::process_name_value(name, value, &mut appends, &mut map);
}
// if Name part, and next part is Split, then find the matched value of the name
@@ -717,7 +500,7 @@ impl DissectProcessor {
if !name.is_name_empty() {
let value = chs[pos..end].iter().collect::<String>();
- process_name_value(name, value);
+ Self::process_name_value(name, value, &mut appends, &mut map);
}
if name.is_end_modifier_set() {
@@ -745,10 +528,10 @@ impl DissectProcessor {
}
}
- Ok(map)
+ Ok(map.into_iter().map(|(k, v)| (k.to_string(), v)).collect())
}
- fn process(&self, val: &str) -> Result<Vec<(usize, Value)>> {
+ fn process(&self, val: &str) -> Result<Vec<(String, Value)>> {
let chs = val.chars().collect::<Vec<char>>();
for pattern in &self.patterns {
@@ -760,7 +543,7 @@ impl DissectProcessor {
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for DissectProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for DissectProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -782,7 +565,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for DissectProcessorBuilder {
fields = yaml_new_fields(v, FIELDS_NAME)?;
}
PATTERN_NAME => {
- let pattern: PatternInfo = yaml_parse_string(v, PATTERN_NAME)?;
+ let pattern: Pattern = yaml_parse_string(v, PATTERN_NAME)?;
patterns = vec![pattern];
}
PATTERNS_NAME => {
@@ -797,13 +580,12 @@ impl TryFrom<&yaml_rust::yaml::Hash> for DissectProcessorBuilder {
_ => {}
}
}
- let output_keys = Self::build_output_keys(&patterns);
- let builder = DissectProcessorBuilder {
+ // let output_keys = Self::build_output_keys(&patterns);
+ let builder = DissectProcessor {
fields,
patterns,
ignore_missing,
append_separator,
- output_keys,
};
Ok(builder)
@@ -819,21 +601,21 @@ impl Processor for DissectProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(Value::String(val_str)) => {
let r = self.process(val_str)?;
for (k, v) in r {
- val[k] = v;
+ val.insert(k, v);
}
}
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
@@ -859,26 +641,19 @@ fn is_valid_char(ch: char) -> bool {
mod tests {
use ahash::HashMap;
- use super::{DissectProcessor, EndModifier, NameInfo, PartInfo, PatternInfo, StartModifier};
- use crate::etl::processor::dissect::DissectProcessorBuilder;
+ use super::{DissectProcessor, EndModifier, Name, Part, StartModifier};
+ use crate::etl::processor::dissect::Pattern;
use crate::etl::value::Value;
fn assert(pattern_str: &str, input: &str, expected: HashMap<String, Value>) {
let chs = input.chars().collect::<Vec<char>>();
- let pattern_infos: Vec<PatternInfo> = vec![pattern_str.parse().unwrap()];
- let output_keys: Vec<String> = DissectProcessorBuilder::build_output_keys(&pattern_infos)
- .into_iter()
- .collect();
- let pattern =
- DissectProcessorBuilder::build_patterns_from_pattern_infos(pattern_infos, &output_keys)
- .unwrap();
+ let patterns: Vec<Pattern> = vec![pattern_str.parse().unwrap()];
let processor = DissectProcessor::default();
let result: HashMap<String, Value> = processor
- .process_pattern(&chs, &pattern[0])
+ .process_pattern(&chs, &patterns[0])
.unwrap()
.into_iter()
- .map(|(k, v)| (output_keys[k].to_string(), v))
.collect();
assert_eq!(result, expected, "pattern: {}", pattern_str);
@@ -889,28 +664,28 @@ mod tests {
let cases = [(
"%{clientip} %{ident} %{auth} [%{timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size}",
vec![
- PartInfo::Name("clientip".into()),
- PartInfo::Split(" ".into()),
- PartInfo::Name("ident".into()),
- PartInfo::Split(" ".into()),
- PartInfo::Name("auth".into()),
- PartInfo::Split(" [".into()),
- PartInfo::Name("timestamp".into()),
- PartInfo::Split("] \"".into()),
- PartInfo::Name("verb".into()),
- PartInfo::Split(" ".into()),
- PartInfo::Name("request".into()),
- PartInfo::Split(" HTTP/".into()),
- PartInfo::Name("httpversion".into()),
- PartInfo::Split("\" ".into()),
- PartInfo::Name("status".into()),
- PartInfo::Split(" ".into()),
- PartInfo::Name("size".into()),
+ Part::Name("clientip".into()),
+ Part::Split(" ".into()),
+ Part::Name("ident".into()),
+ Part::Split(" ".into()),
+ Part::Name("auth".into()),
+ Part::Split(" [".into()),
+ Part::Name("timestamp".into()),
+ Part::Split("] \"".into()),
+ Part::Name("verb".into()),
+ Part::Split(" ".into()),
+ Part::Name("request".into()),
+ Part::Split(" HTTP/".into()),
+ Part::Name("httpversion".into()),
+ Part::Split("\" ".into()),
+ Part::Name("status".into()),
+ Part::Split(" ".into()),
+ Part::Name("size".into()),
],
)];
for (pattern, expected) in cases.into_iter() {
- let p: PatternInfo = pattern.parse().unwrap();
+ let p: Pattern = pattern.parse().unwrap();
assert_eq!(p.parts, expected);
}
}
@@ -921,13 +696,13 @@ mod tests {
(
"%{} %{}",
vec![
- PartInfo::Name(NameInfo {
+ Part::Name(Name {
name: "".into(),
start_modifier: None,
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "".into(),
start_modifier: None,
end_modifier: None,
@@ -937,61 +712,61 @@ mod tests {
(
"%{ts->} %{level}",
vec![
- PartInfo::Name(NameInfo {
+ Part::Name(Name {
name: "ts".into(),
start_modifier: None,
end_modifier: Some(EndModifier),
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name("level".into()),
+ Part::Split(" ".into()),
+ Part::Name("level".into()),
],
),
(
"[%{ts}]%{->}[%{level}]",
vec![
- PartInfo::Split("[".into()),
- PartInfo::Name(NameInfo {
+ Part::Split("[".into()),
+ Part::Name(Name {
name: "ts".into(),
start_modifier: None,
end_modifier: None,
}),
- PartInfo::Split("]".into()),
- PartInfo::Name(NameInfo {
+ Part::Split("]".into()),
+ Part::Name(Name {
name: "".into(),
start_modifier: None,
end_modifier: Some(EndModifier),
}),
- PartInfo::Split("[".into()),
- PartInfo::Name(NameInfo {
+ Part::Split("[".into()),
+ Part::Name(Name {
name: "level".into(),
start_modifier: None,
end_modifier: None,
}),
- PartInfo::Split("]".into()),
+ Part::Split("]".into()),
],
),
(
"%{+name} %{+name} %{+name} %{+name}",
vec![
- PartInfo::Name(NameInfo {
+ Part::Name(Name {
name: "name".into(),
start_modifier: Some(StartModifier::Append(None)),
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "name".into(),
start_modifier: Some(StartModifier::Append(None)),
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "name".into(),
start_modifier: Some(StartModifier::Append(None)),
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "name".into(),
start_modifier: Some(StartModifier::Append(None)),
end_modifier: None,
@@ -1001,25 +776,25 @@ mod tests {
(
"%{+name/2} %{+name/4} %{+name/3} %{+name/1}",
vec![
- PartInfo::Name(NameInfo {
+ Part::Name(Name {
name: "name".into(),
start_modifier: Some(StartModifier::Append(Some(2))),
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "name".into(),
start_modifier: Some(StartModifier::Append(Some(4))),
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "name".into(),
start_modifier: Some(StartModifier::Append(Some(3))),
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "name".into(),
start_modifier: Some(StartModifier::Append(Some(1))),
end_modifier: None,
@@ -1029,67 +804,67 @@ mod tests {
(
"%{clientip} %{?ident} %{?auth} [%{timestamp}]",
vec![
- PartInfo::Name(NameInfo {
+ Part::Name(Name {
name: "clientip".into(),
start_modifier: None,
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "ident".into(),
start_modifier: Some(StartModifier::NamedSkip),
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "auth".into(),
start_modifier: Some(StartModifier::NamedSkip),
end_modifier: None,
}),
- PartInfo::Split(" [".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" [".into()),
+ Part::Name(Name {
name: "timestamp".into(),
start_modifier: None,
end_modifier: None,
}),
- PartInfo::Split("]".into()),
+ Part::Split("]".into()),
],
),
(
"[%{ts}] [%{level}] %{*p1}:%{&p1} %{*p2}:%{&p2}",
vec![
- PartInfo::Split("[".into()),
- PartInfo::Name(NameInfo {
+ Part::Split("[".into()),
+ Part::Name(Name {
name: "ts".into(),
start_modifier: None,
end_modifier: None,
}),
- PartInfo::Split("] [".into()),
- PartInfo::Name(NameInfo {
+ Part::Split("] [".into()),
+ Part::Name(Name {
name: "level".into(),
start_modifier: None,
end_modifier: None,
}),
- PartInfo::Split("] ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split("] ".into()),
+ Part::Name(Name {
name: "p1".into(),
start_modifier: Some(StartModifier::MapKey),
end_modifier: None,
}),
- PartInfo::Split(":".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(":".into()),
+ Part::Name(Name {
name: "p1".into(),
start_modifier: Some(StartModifier::MapVal),
end_modifier: None,
}),
- PartInfo::Split(" ".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(" ".into()),
+ Part::Name(Name {
name: "p2".into(),
start_modifier: Some(StartModifier::MapKey),
end_modifier: None,
}),
- PartInfo::Split(":".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(":".into()),
+ Part::Name(Name {
name: "p2".into(),
start_modifier: Some(StartModifier::MapVal),
end_modifier: None,
@@ -1099,13 +874,13 @@ mod tests {
(
"%{&p1}:%{*p1}",
vec![
- PartInfo::Name(NameInfo {
+ Part::Name(Name {
name: "p1".into(),
start_modifier: Some(StartModifier::MapVal),
end_modifier: None,
}),
- PartInfo::Split(":".into()),
- PartInfo::Name(NameInfo {
+ Part::Split(":".into()),
+ Part::Name(Name {
name: "p1".into(),
start_modifier: Some(StartModifier::MapKey),
end_modifier: None,
@@ -1115,7 +890,7 @@ mod tests {
];
for (pattern, expected) in cases.into_iter() {
- let p: PatternInfo = pattern.parse().unwrap();
+ let p: Pattern = pattern.parse().unwrap();
assert_eq!(p.parts, expected);
}
}
@@ -1195,7 +970,7 @@ mod tests {
];
for (pattern, expected) in cases.into_iter() {
- let err = pattern.parse::<PatternInfo>().unwrap_err();
+ let err = pattern.parse::<Pattern>().unwrap_err();
assert_eq!(err.to_string(), expected);
}
}
diff --git a/src/pipeline/src/etl/processor/epoch.rs b/src/pipeline/src/etl/processor/epoch.rs
index f2c03fd120de..29ad6bd3d97d 100644
--- a/src/pipeline/src/etl/processor/epoch.rs
+++ b/src/pipeline/src/etl/processor/epoch.rs
@@ -12,17 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use ahash::HashSet;
use snafu::{OptionExt, ResultExt};
+use super::IntermediateStatus;
use crate::etl::error::{
EpochInvalidResolutionSnafu, Error, FailedToParseIntSnafu, KeyMustBeStringSnafu,
ProcessorMissingFieldSnafu, ProcessorUnsupportedValueSnafu, Result,
};
-use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, ProcessorBuilder,
- ProcessorKind, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, FIELDS_NAME, FIELD_NAME,
+ IGNORE_MISSING_NAME,
};
use crate::etl::value::time::{
MICROSECOND_RESOLUTION, MICRO_RESOLUTION, MILLISECOND_RESOLUTION, MILLI_RESOLUTION,
@@ -57,56 +57,12 @@ impl TryFrom<&str> for Resolution {
}
}
-#[derive(Debug, Default)]
-pub struct EpochProcessorBuilder {
- fields: Fields,
- resolution: Resolution,
- ignore_missing: bool,
-}
-
-impl ProcessorBuilder for EpochProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Epoch)
- }
-}
-
-impl EpochProcessorBuilder {
- pub fn build(self, intermediate_keys: &[String]) -> Result<EpochProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- "epoch",
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
- Ok(EpochProcessor {
- fields: real_fields,
- resolution: self.resolution,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
/// support string, integer, float, time, epoch
/// deprecated it should be removed in the future
/// Reserved for compatibility only
#[derive(Debug, Default)]
pub struct EpochProcessor {
- fields: Vec<OneInputOneOutputField>,
+ fields: Fields,
resolution: Resolution,
ignore_missing: bool,
// description
@@ -157,7 +113,7 @@ impl EpochProcessor {
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for EpochProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for EpochProcessor {
type Error = Error;
fn try_from(hash: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -188,7 +144,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for EpochProcessorBuilder {
_ => {}
}
}
- let builder = EpochProcessorBuilder {
+ let builder = EpochProcessor {
fields,
resolution,
ignore_missing,
@@ -207,23 +163,23 @@ impl Processor for EpochProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
}
Some(v) => {
let timestamp = self.parse(v)?;
- let output_index = field.output_index();
- val[output_index] = Value::Timestamp(timestamp);
+ let output_index = field.target_or_input_field();
+ val.insert(output_index.to_string(), Value::Timestamp(timestamp));
}
}
}
diff --git a/src/pipeline/src/etl/processor/gsub.rs b/src/pipeline/src/etl/processor/gsub.rs
index 54c8306ec4de..7f0f601f44f3 100644
--- a/src/pipeline/src/etl/processor/gsub.rs
+++ b/src/pipeline/src/etl/processor/gsub.rs
@@ -12,18 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use ahash::HashSet;
use regex::Regex;
use snafu::{OptionExt, ResultExt};
+use super::IntermediateStatus;
use crate::etl::error::{
Error, GsubPatternRequiredSnafu, GsubReplacementRequiredSnafu, KeyMustBeStringSnafu,
ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, RegexSnafu, Result,
};
-use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, ProcessorBuilder, ProcessorKind,
- FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, PATTERN_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, FIELDS_NAME, FIELD_NAME,
+ IGNORE_MISSING_NAME, PATTERN_NAME,
};
use crate::etl::value::Value;
@@ -31,94 +31,18 @@ pub(crate) const PROCESSOR_GSUB: &str = "gsub";
const REPLACEMENT_NAME: &str = "replacement";
-#[derive(Debug, Default)]
-pub struct GsubProcessorBuilder {
- fields: Fields,
- pattern: Option<Regex>,
- replacement: Option<String>,
- ignore_missing: bool,
-}
-
-impl ProcessorBuilder for GsubProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Gsub)
- }
-}
-
-impl GsubProcessorBuilder {
- fn check(self) -> Result<Self> {
- if self.pattern.is_none() {
- return GsubPatternRequiredSnafu.fail();
- }
-
- if self.replacement.is_none() {
- return GsubReplacementRequiredSnafu.fail();
- }
-
- Ok(self)
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<GsubProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- "gsub",
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
- Ok(GsubProcessor {
- fields: real_fields,
- pattern: self.pattern,
- replacement: self.replacement,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
/// A processor to replace all matches of a pattern in string by a replacement, only support string value, and array string value
-#[derive(Debug, Default)]
+#[derive(Debug)]
pub struct GsubProcessor {
- fields: Vec<OneInputOneOutputField>,
- pattern: Option<Regex>,
- replacement: Option<String>,
+ fields: Fields,
+ pattern: Regex,
+ replacement: String,
ignore_missing: bool,
}
impl GsubProcessor {
- fn check(self) -> Result<Self> {
- if self.pattern.is_none() {
- return GsubPatternRequiredSnafu.fail();
- }
-
- if self.replacement.is_none() {
- return GsubReplacementRequiredSnafu.fail();
- }
-
- Ok(self)
- }
-
fn process_string(&self, val: &str) -> Result<Value> {
- let replacement = self.replacement.as_ref().unwrap();
- let new_val = self
- .pattern
- .as_ref()
- .unwrap()
- .replace_all(val, replacement)
- .to_string();
+ let new_val = self.pattern.replace_all(val, &self.replacement).to_string();
let val = Value::String(new_val);
Ok(val)
@@ -136,7 +60,7 @@ impl GsubProcessor {
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for GsubProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for GsubProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -176,14 +100,12 @@ impl TryFrom<&yaml_rust::yaml::Hash> for GsubProcessorBuilder {
}
}
- let builder = GsubProcessorBuilder {
+ Ok(GsubProcessor {
fields,
- pattern,
- replacement,
+ pattern: pattern.context(GsubPatternRequiredSnafu)?,
+ replacement: replacement.context(GsubReplacementRequiredSnafu)?,
ignore_missing,
- };
-
- builder.check()
+ })
}
}
@@ -196,23 +118,23 @@ impl crate::etl::processor::Processor for GsubProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
}
Some(v) => {
let result = self.process(v)?;
- let output_index = field.output_index();
- val[output_index] = result;
+ let output_index = field.target_or_input_field();
+ val.insert(output_index.to_string(), result);
}
}
}
@@ -222,15 +144,17 @@ impl crate::etl::processor::Processor for GsubProcessor {
#[cfg(test)]
mod tests {
+ use super::*;
use crate::etl::processor::gsub::GsubProcessor;
use crate::etl::value::Value;
#[test]
fn test_string_value() {
let processor = GsubProcessor {
- pattern: Some(regex::Regex::new(r"\d+").unwrap()),
- replacement: Some("xxx".to_string()),
- ..Default::default()
+ fields: Fields::default(),
+ pattern: regex::Regex::new(r"\d+").unwrap(),
+ replacement: "xxx".to_string(),
+ ignore_missing: false,
};
let val = Value::String("123".to_string());
diff --git a/src/pipeline/src/etl/processor/join.rs b/src/pipeline/src/etl/processor/join.rs
index ddbc086ab8da..72fafdbf7dd1 100644
--- a/src/pipeline/src/etl/processor/join.rs
+++ b/src/pipeline/src/etl/processor/join.rs
@@ -12,105 +12,43 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use ahash::HashSet;
use snafu::OptionExt;
+use super::IntermediateStatus;
use crate::etl::error::{
Error, JoinSeparatorRequiredSnafu, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
ProcessorMissingFieldSnafu, Result,
};
-use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, ProcessorBuilder,
- ProcessorKind, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, SEPARATOR_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, FIELDS_NAME, FIELD_NAME,
+ IGNORE_MISSING_NAME, SEPARATOR_NAME,
};
use crate::etl::value::{Array, Value};
pub(crate) const PROCESSOR_JOIN: &str = "join";
-#[derive(Debug, Default)]
-pub struct JoinProcessorBuilder {
- fields: Fields,
- separator: Option<String>,
- ignore_missing: bool,
-}
-
-impl ProcessorBuilder for JoinProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Join)
- }
-}
-
-impl JoinProcessorBuilder {
- fn check(self) -> Result<Self> {
- if self.separator.is_none() {
- return JoinSeparatorRequiredSnafu.fail();
- }
-
- Ok(self)
- }
-
- pub fn build(self, intermediate_keys: &[String]) -> Result<JoinProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- "join",
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
-
- Ok(JoinProcessor {
- fields: real_fields,
- separator: self.separator,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
/// A processor to join each element of an array into a single string using a separator string between each element
#[derive(Debug, Default)]
pub struct JoinProcessor {
- fields: Vec<OneInputOneOutputField>,
- separator: Option<String>,
+ fields: Fields,
+ separator: String,
ignore_missing: bool,
}
impl JoinProcessor {
fn process(&self, arr: &Array) -> Result<Value> {
- let sep = self.separator.as_ref().unwrap();
let val = arr
.iter()
.map(|v| v.to_str_value())
.collect::<Vec<String>>()
- .join(sep);
+ .join(&self.separator);
Ok(Value::String(val))
}
-
- fn check(self) -> Result<Self> {
- if self.separator.is_none() {
- return JoinSeparatorRequiredSnafu.fail();
- }
-
- Ok(self)
- }
}
-impl TryFrom<&yaml_rust::yaml::Hash> for JoinProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for JoinProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -140,12 +78,11 @@ impl TryFrom<&yaml_rust::yaml::Hash> for JoinProcessorBuilder {
}
}
- let builder = JoinProcessorBuilder {
+ Ok(JoinProcessor {
fields,
- separator,
+ separator: separator.context(JoinSeparatorRequiredSnafu)?,
ignore_missing,
- };
- builder.check()
+ })
}
}
@@ -158,20 +95,20 @@ impl Processor for JoinProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(Value::Array(arr)) => {
let result = self.process(arr)?;
- let output_index = field.output_index();
- val[output_index] = result;
+ let output_index = field.target_or_input_field();
+ val.insert(output_index.to_string(), result);
}
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
@@ -199,7 +136,7 @@ mod tests {
#[test]
fn test_join_processor() {
let processor = JoinProcessor {
- separator: Some("-".to_string()),
+ separator: "-".to_string(),
..Default::default()
};
diff --git a/src/pipeline/src/etl/processor/json_path.rs b/src/pipeline/src/etl/processor/json_path.rs
index c09d338c637f..92916263e4e9 100644
--- a/src/pipeline/src/etl/processor/json_path.rs
+++ b/src/pipeline/src/etl/processor/json_path.rs
@@ -12,17 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use ahash::HashSet;
use jsonpath_rust::JsonPath;
use snafu::{OptionExt, ResultExt};
use super::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, ProcessorBuilder,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, IntermediateStatus, Processor,
FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, JSON_PATH_NAME, JSON_PATH_RESULT_INDEX_NAME,
};
use crate::etl::error::{Error, Result};
-use crate::etl::field::{Fields, OneInputOneOutputField};
-use crate::etl::processor::ProcessorKind;
+use crate::etl::field::Fields;
use crate::etl_error::{
JsonPathParseResultIndexSnafu, JsonPathParseSnafu, KeyMustBeStringSnafu,
ProcessorMissingFieldSnafu,
@@ -31,54 +29,7 @@ use crate::Value;
pub(crate) const PROCESSOR_JSON_PATH: &str = "json_path";
-#[derive(Debug)]
-pub struct JsonPathProcessorBuilder {
- fields: Fields,
- json_path: JsonPath<Value>,
- ignore_missing: bool,
- result_idex: Option<usize>,
-}
-
-impl JsonPathProcessorBuilder {
- fn build(self, intermediate_keys: &[String]) -> Result<JsonPathProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- JSON_PATH_NAME,
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
-
- Ok(JsonPathProcessor {
- fields: real_fields,
- json_path: self.json_path,
- ignore_missing: self.ignore_missing,
- result_idex: self.result_idex,
- })
- }
-}
-
-impl ProcessorBuilder for JsonPathProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::JsonPath)
- }
-}
-
-impl TryFrom<&yaml_rust::yaml::Hash> for JsonPathProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for JsonPathProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> std::result::Result<Self, Self::Error> {
@@ -116,40 +67,36 @@ impl TryFrom<&yaml_rust::yaml::Hash> for JsonPathProcessorBuilder {
_ => {}
}
}
- if let Some(json_path) = json_path {
- let processor = JsonPathProcessorBuilder {
- fields,
- json_path,
- ignore_missing,
- result_idex,
- };
-
- Ok(processor)
- } else {
- ProcessorMissingFieldSnafu {
+
+ let processor = JsonPathProcessor {
+ fields,
+ json_path: json_path.context(ProcessorMissingFieldSnafu {
processor: PROCESSOR_JSON_PATH,
field: JSON_PATH_NAME,
- }
- .fail()
- }
+ })?,
+ ignore_missing,
+ result_index: result_idex,
+ };
+
+ Ok(processor)
}
}
#[derive(Debug)]
pub struct JsonPathProcessor {
- fields: Vec<OneInputOneOutputField>,
+ fields: Fields,
json_path: JsonPath<Value>,
ignore_missing: bool,
- result_idex: Option<usize>,
+ result_index: Option<usize>,
}
impl Default for JsonPathProcessor {
fn default() -> Self {
JsonPathProcessor {
- fields: vec![],
+ fields: Fields::default(),
json_path: JsonPath::try_from("$").unwrap(),
ignore_missing: false,
- result_idex: None,
+ result_index: None,
}
}
}
@@ -159,7 +106,7 @@ impl JsonPathProcessor {
let processed = self.json_path.find(val);
match processed {
Value::Array(arr) => {
- if let Some(index) = self.result_idex {
+ if let Some(index) = self.result_index {
Ok(arr.get(index).cloned().unwrap_or(Value::Null))
} else {
Ok(Value::Array(arr))
@@ -179,21 +126,20 @@ impl Processor for JsonPathProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(v) => {
let processed = self.process_field(v)?;
-
- let output_index = field.output_index();
- val[output_index] = processed;
+ let output_index = field.target_or_input_field();
+ val.insert(output_index.to_string(), processed);
}
None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
@@ -216,7 +162,7 @@ mod test {
let json_path = JsonPath::try_from("$.hello").unwrap();
let processor = JsonPathProcessor {
json_path,
- result_idex: Some(0),
+ result_index: Some(0),
..Default::default()
};
diff --git a/src/pipeline/src/etl/processor/letter.rs b/src/pipeline/src/etl/processor/letter.rs
index 8eb939918104..960521853e48 100644
--- a/src/pipeline/src/etl/processor/letter.rs
+++ b/src/pipeline/src/etl/processor/letter.rs
@@ -12,17 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use ahash::HashSet;
use snafu::OptionExt;
+use super::IntermediateStatus;
use crate::etl::error::{
Error, KeyMustBeStringSnafu, LetterInvalidMethodSnafu, ProcessorExpectStringSnafu,
ProcessorMissingFieldSnafu, Result,
};
-use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, ProcessorBuilder,
- ProcessorKind, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, METHOD_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, Processor, FIELDS_NAME, FIELD_NAME,
+ IGNORE_MISSING_NAME, METHOD_NAME,
};
use crate::etl::value::Value;
@@ -59,55 +59,10 @@ impl std::str::FromStr for Method {
}
}
-#[derive(Debug, Default)]
-pub struct LetterProcessorBuilder {
- fields: Fields,
- method: Method,
- ignore_missing: bool,
-}
-
-impl ProcessorBuilder for LetterProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Letter)
- }
-}
-
-impl LetterProcessorBuilder {
- pub fn build(self, intermediate_keys: &[String]) -> Result<LetterProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- "letter",
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
-
- Ok(LetterProcessor {
- fields: real_fields,
- method: self.method,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
/// only support string value
#[derive(Debug, Default)]
pub struct LetterProcessor {
- fields: Vec<OneInputOneOutputField>,
+ fields: Fields,
method: Method,
ignore_missing: bool,
}
@@ -125,7 +80,7 @@ impl LetterProcessor {
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for LetterProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for LetterProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -154,7 +109,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for LetterProcessorBuilder {
}
}
- Ok(LetterProcessorBuilder {
+ Ok(LetterProcessor {
fields,
method,
ignore_missing,
@@ -171,20 +126,20 @@ impl Processor for LetterProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(Value::String(s)) => {
let result = self.process_field(s)?;
- let (_, output_index) = field.output();
- val[*output_index] = result;
+ let output_key = field.target_or_input_field();
+ val.insert(output_key.to_string(), result);
}
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
diff --git a/src/pipeline/src/etl/processor/regex.rs b/src/pipeline/src/etl/processor/regex.rs
index de25195f99ab..27f30f65d9ae 100644
--- a/src/pipeline/src/etl/processor/regex.rs
+++ b/src/pipeline/src/etl/processor/regex.rs
@@ -18,21 +18,22 @@ const PATTERNS_NAME: &str = "patterns";
pub(crate) const PROCESSOR_REGEX: &str = "regex";
-use ahash::{HashSet, HashSetExt};
+use std::collections::BTreeMap;
+
use lazy_static::lazy_static;
use regex::Regex;
use snafu::{OptionExt, ResultExt};
+use super::IntermediateStatus;
use crate::etl::error::{
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu,
RegexNamedGroupNotFoundSnafu, RegexNoValidFieldSnafu, RegexNoValidPatternSnafu, RegexSnafu,
Result,
};
-use crate::etl::field::{Fields, InputFieldInfo, OneInputMultiOutputField};
-use crate::etl::find_key_index;
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, Processor,
- ProcessorBuilder, ProcessorKind, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, PATTERN_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, Processor, FIELDS_NAME,
+ FIELD_NAME, IGNORE_MISSING_NAME, PATTERN_NAME,
};
use crate::etl::value::Value;
@@ -83,113 +84,7 @@ impl std::str::FromStr for GroupRegex {
}
}
-#[derive(Debug, Default)]
-pub struct RegexProcessorBuilder {
- fields: Fields,
- patterns: Vec<GroupRegex>,
- ignore_missing: bool,
- output_keys: HashSet<String>,
-}
-
-impl ProcessorBuilder for RegexProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.output_keys.iter().map(|k| k.as_str()).collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Regex)
- }
-}
-
-impl RegexProcessorBuilder {
- fn check(self) -> Result<Self> {
- if self.fields.is_empty() {
- return RegexNoValidFieldSnafu {
- processor: PROCESSOR_REGEX,
- }
- .fail();
- }
-
- if self.patterns.is_empty() {
- return RegexNoValidPatternSnafu {
- processor: PROCESSOR_REGEX,
- }
- .fail();
- }
-
- Ok(self)
- }
-
- fn build_group_output_info(
- group_regex: &GroupRegex,
- om_field: &OneInputMultiOutputField,
- intermediate_keys: &[String],
- ) -> Result<Vec<OutPutInfo>> {
- group_regex
- .groups
- .iter()
- .map(|g| {
- let key = generate_key(om_field.target_prefix(), g);
- let index = find_key_index(intermediate_keys, &key, "regex");
- index.map(|index| OutPutInfo {
- final_key: key,
- group_name: g.to_string(),
- index,
- })
- })
- .collect::<Result<Vec<_>>>()
- }
-
- fn build_group_output_infos(
- patterns: &[GroupRegex],
- om_field: &OneInputMultiOutputField,
- intermediate_keys: &[String],
- ) -> Result<Vec<Vec<OutPutInfo>>> {
- patterns
- .iter()
- .map(|group_regex| {
- Self::build_group_output_info(group_regex, om_field, intermediate_keys)
- })
- .collect::<Result<Vec<_>>>()
- }
-
- fn build_output_info(
- real_fields: &[OneInputMultiOutputField],
- patterns: &[GroupRegex],
- intermediate_keys: &[String],
- ) -> Result<RegexProcessorOutputInfo> {
- let inner = real_fields
- .iter()
- .map(|om_field| Self::build_group_output_infos(patterns, om_field, intermediate_keys))
- .collect::<Result<Vec<_>>>();
- inner.map(|inner| RegexProcessorOutputInfo { inner })
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<RegexProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input_index = find_key_index(intermediate_keys, field.input_field(), "regex")?;
- let input_field_info = InputFieldInfo::new(field.input_field(), input_index);
-
- let input = OneInputMultiOutputField::new(input_field_info, field.target_field);
- real_fields.push(input);
- }
- let output_info = Self::build_output_info(&real_fields, &self.patterns, intermediate_keys)?;
- Ok(RegexProcessor {
- // fields: Fields::one(Field::new("test".to_string())),
- fields: real_fields,
- patterns: self.patterns,
- output_info,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
-impl TryFrom<&yaml_rust::yaml::Hash> for RegexProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for RegexProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -226,61 +121,44 @@ impl TryFrom<&yaml_rust::yaml::Hash> for RegexProcessorBuilder {
}
}
- let pattern_output_keys = patterns
- .iter()
- .flat_map(|pattern| pattern.groups.iter())
- .collect::<Vec<_>>();
- let mut output_keys = HashSet::new();
- for field in fields.iter() {
- for x in pattern_output_keys.iter() {
- output_keys.insert(generate_key(field.target_or_input_field(), x));
- }
- }
-
- let processor_builder = RegexProcessorBuilder {
+ let processor_builder = RegexProcessor {
fields,
patterns,
ignore_missing,
- output_keys,
};
processor_builder.check()
}
}
-#[derive(Debug, Default)]
-struct OutPutInfo {
- final_key: String,
- group_name: String,
- index: usize,
-}
-
-#[derive(Debug, Default)]
-struct RegexProcessorOutputInfo {
- pub inner: Vec<Vec<Vec<OutPutInfo>>>,
-}
-
-impl RegexProcessorOutputInfo {
- fn get_output_index(
- &self,
- field_index: usize,
- pattern_index: usize,
- group_index: usize,
- ) -> usize {
- self.inner[field_index][pattern_index][group_index].index
- }
-}
/// only support string value
/// if no value found from a pattern, the target_field will be ignored
#[derive(Debug, Default)]
pub struct RegexProcessor {
- fields: Vec<OneInputMultiOutputField>,
- output_info: RegexProcessorOutputInfo,
+ fields: Fields,
patterns: Vec<GroupRegex>,
ignore_missing: bool,
}
impl RegexProcessor {
+ fn check(self) -> Result<Self> {
+ if self.fields.is_empty() {
+ return RegexNoValidFieldSnafu {
+ processor: PROCESSOR_REGEX,
+ }
+ .fail();
+ }
+
+ if self.patterns.is_empty() {
+ return RegexNoValidPatternSnafu {
+ processor: PROCESSOR_REGEX,
+ }
+ .fail();
+ }
+
+ Ok(self)
+ }
+
fn try_with_patterns(&mut self, patterns: Vec<String>) -> Result<()> {
let mut rs = vec![];
for pattern in patterns {
@@ -291,21 +169,15 @@ impl RegexProcessor {
Ok(())
}
- fn process(
- &self,
- val: &str,
- gr: &GroupRegex,
- index: (usize, usize),
- ) -> Result<Vec<(usize, Value)>> {
- let mut result = Vec::new();
- if let Some(captures) = gr.regex.captures(val) {
- for (group_index, group) in gr.groups.iter().enumerate() {
- if let Some(capture) = captures.name(group) {
- let value = capture.as_str().to_string();
- let index = self
- .output_info
- .get_output_index(index.0, index.1, group_index);
- result.push((index, Value::String(value)));
+ fn process(&self, prefix: &str, val: &str) -> Result<BTreeMap<String, Value>> {
+ let mut result = BTreeMap::new();
+ for gr in self.patterns.iter() {
+ if let Some(captures) = gr.regex.captures(val) {
+ for group in gr.groups.iter() {
+ if let Some(capture) = captures.name(group) {
+ let value = capture.as_str().to_string();
+ result.insert(generate_key(prefix, group), Value::String(value));
+ }
}
}
}
@@ -322,39 +194,20 @@ impl Processor for RegexProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
- for (field_index, field) in self.fields.iter().enumerate() {
- let index = field.input_index();
- let mut result_list = None;
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ for field in self.fields.iter() {
+ let index = field.input_field();
+ let prefix = field.target_or_input_field();
match val.get(index) {
Some(Value::String(s)) => {
- // we get rust borrow checker error here
- // for (gr_index, gr) in self.patterns.iter().enumerate() {
- // let result_list = self.process(s.as_str(), gr, (field_index, gr_index))?;
- // for (output_index, result) in result_list {
- //cannot borrow `*val` as mutable because it is also borrowed as immutable mutable borrow occurs here
- // val[output_index] = result;
- // }
- // }
- for (gr_index, gr) in self.patterns.iter().enumerate() {
- let result = self.process(s.as_str(), gr, (field_index, gr_index))?;
- if !result.is_empty() {
- match result_list.as_mut() {
- None => {
- result_list = Some(result);
- }
- Some(result_list) => {
- result_list.extend(result);
- }
- }
- }
- }
+ let result = self.process(prefix, s)?;
+ val.extend(result);
}
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
@@ -367,15 +220,6 @@ impl Processor for RegexProcessor {
.fail();
}
}
- // safety here
- match result_list {
- None => {}
- Some(result_list) => {
- for (output_index, result) in result_list {
- val[output_index] = result;
- }
- }
- }
}
Ok(())
@@ -388,7 +232,7 @@ mod tests {
use ahash::{HashMap, HashMapExt};
use itertools::Itertools;
- use crate::etl::processor::regex::RegexProcessorBuilder;
+ use crate::etl::processor::regex::RegexProcessor;
use crate::etl::value::{Map, Value};
#[test]
@@ -402,18 +246,11 @@ ignore_missing: false"#;
.pop()
.unwrap();
let processor_yaml_hash = processor_yaml.as_hash().unwrap();
- let builder = RegexProcessorBuilder::try_from(processor_yaml_hash).unwrap();
- let intermediate_keys = ["a".to_string(), "a_ar".to_string()];
- let processor = builder.build(&intermediate_keys).unwrap();
+ let processor = RegexProcessor::try_from(processor_yaml_hash).unwrap();
// single field (with prefix), multiple patterns
- let result = processor
- .process("123", &processor.patterns[0], (0, 0))
- .unwrap()
- .into_iter()
- .map(|(k, v)| (intermediate_keys[k].clone(), v))
- .collect();
+ let result = processor.process("a", "123").unwrap();
let map = Map { values: result };
@@ -435,7 +272,7 @@ ignore_missing: false"#;
let cw = "[c=w,n=US_CA_SANJOSE,o=55155]";
let breadcrumbs_str = [cc, cg, co, cp, cw].iter().join(",");
- let values = [
+ let temporary_map: BTreeMap<String, Value> = [
("breadcrumbs_parent", Value::String(cc.to_string())),
("breadcrumbs_edge", Value::String(cg.to_string())),
("breadcrumbs_origin", Value::String(co.to_string())),
@@ -445,7 +282,6 @@ ignore_missing: false"#;
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect();
- let temporary_map = Map { values };
{
// single field (with prefix), multiple patterns
@@ -464,31 +300,11 @@ ignore_missing: false"#;
.pop()
.unwrap();
let processor_yaml_hash = processor_yaml.as_hash().unwrap();
- let builder = RegexProcessorBuilder::try_from(processor_yaml_hash).unwrap();
- let intermediate_keys = [
- "breadcrumbs",
- "breadcrumbs_parent",
- "breadcrumbs_edge",
- "breadcrumbs_origin",
- "breadcrumbs_peer",
- "breadcrumbs_wrapper",
- ]
- .iter()
- .map(|k| k.to_string())
- .collect_vec();
- let processor = builder.build(&intermediate_keys).unwrap();
- let mut result = BTreeMap::new();
- for (index, pattern) in processor.patterns.iter().enumerate() {
- let r = processor
- .process(&breadcrumbs_str, pattern, (0, index))
- .unwrap()
- .into_iter()
- .map(|(k, v)| (intermediate_keys[k].clone(), v))
- .collect::<BTreeMap<_, _>>();
- result.extend(r);
- }
- let map = Map { values: result };
- assert_eq!(temporary_map, map);
+ let processor = RegexProcessor::try_from(processor_yaml_hash).unwrap();
+
+ let result = processor.process("breadcrumbs", &breadcrumbs_str).unwrap();
+
+ assert_eq!(temporary_map, result);
}
{
@@ -515,70 +331,19 @@ ignore_missing: false"#;
.pop()
.unwrap();
let processor_yaml_hash = processor_yaml.as_hash().unwrap();
- let builder = RegexProcessorBuilder::try_from(processor_yaml_hash).unwrap();
-
- let intermediate_keys = [
- "breadcrumbs_parent",
- "breadcrumbs_edge",
- "breadcrumbs_origin",
- "breadcrumbs_peer",
- "breadcrumbs_wrapper",
- "edge_ip",
- "edge_request_id",
- "edge_request_end_time",
- "edge_turn_around_time",
- "edge_dns_lookup_time",
- "edge_geo",
- "edge_asn",
- "origin_ip",
- "origin_request_id",
- "origin_request_end_time",
- "origin_turn_around_time",
- "origin_dns_lookup_time",
- "origin_geo",
- "origin_asn",
- "peer_ip",
- "peer_request_id",
- "peer_request_end_time",
- "peer_turn_around_time",
- "peer_dns_lookup_time",
- "peer_geo",
- "peer_asn",
- "parent_ip",
- "parent_request_id",
- "parent_request_end_time",
- "parent_turn_around_time",
- "parent_dns_lookup_time",
- "parent_geo",
- "parent_asn",
- "wrapper_ip",
- "wrapper_request_id",
- "wrapper_request_end_time",
- "wrapper_turn_around_time",
- "wrapper_dns_lookup_time",
- "wrapper_geo",
- "wrapper_asn",
- ]
- .iter()
- .map(|k| k.to_string())
- .collect_vec();
- let processor = builder.build(&intermediate_keys).unwrap();
+ let processor = RegexProcessor::try_from(processor_yaml_hash).unwrap();
let mut result = HashMap::new();
- for (field_index, field) in processor.fields.iter().enumerate() {
- for (pattern_index, pattern) in processor.patterns.iter().enumerate() {
- let s = temporary_map
- .get(field.input_name())
- .unwrap()
- .to_str_value();
- let r = processor
- .process(&s, pattern, (field_index, pattern_index))
- .unwrap()
- .into_iter()
- .map(|(k, v)| (intermediate_keys[k].clone(), v))
- .collect::<HashMap<_, _>>();
- result.extend(r);
- }
+ for field in processor.fields.iter() {
+ let s = temporary_map
+ .get(field.input_field())
+ .unwrap()
+ .to_str_value();
+ let prefix = field.target_or_input_field();
+
+ let r = processor.process(prefix, &s).unwrap();
+
+ result.extend(r);
}
let new_values = vec![
diff --git a/src/pipeline/src/etl/processor/timestamp.rs b/src/pipeline/src/etl/processor/timestamp.rs
index 18b6711c1d80..bf90e78f2165 100644
--- a/src/pipeline/src/etl/processor/timestamp.rs
+++ b/src/pipeline/src/etl/processor/timestamp.rs
@@ -14,22 +14,22 @@
use std::sync::Arc;
-use ahash::HashSet;
use chrono::{DateTime, NaiveDateTime};
use chrono_tz::Tz;
use lazy_static::lazy_static;
use snafu::{OptionExt, ResultExt};
+use super::IntermediateStatus;
use crate::etl::error::{
DateFailedToGetLocalTimezoneSnafu, DateFailedToGetTimestampSnafu, DateInvalidFormatSnafu,
DateParseSnafu, DateParseTimezoneSnafu, EpochInvalidResolutionSnafu, Error,
KeyMustBeStringSnafu, ProcessorFailedToParseStringSnafu, ProcessorMissingFieldSnafu,
ProcessorUnsupportedValueSnafu, Result,
};
-use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, Processor,
- ProcessorBuilder, ProcessorKind, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, yaml_strings, Processor, FIELDS_NAME,
+ FIELD_NAME, IGNORE_MISSING_NAME,
};
use crate::etl::value::time::{
MICROSECOND_RESOLUTION, MICRO_RESOLUTION, MILLISECOND_RESOLUTION, MILLI_RESOLUTION,
@@ -114,56 +114,10 @@ impl std::ops::Deref for Formats {
}
}
-#[derive(Debug)]
-pub struct TimestampProcessorBuilder {
- fields: Fields,
- formats: Formats,
- resolution: Resolution,
- ignore_missing: bool,
-}
-
-impl ProcessorBuilder for TimestampProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys).map(ProcessorKind::Timestamp)
- }
-}
-
-impl TimestampProcessorBuilder {
- pub fn build(self, intermediate_keys: &[String]) -> Result<TimestampProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- "timestamp",
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
- Ok(TimestampProcessor {
- fields: real_fields,
- formats: self.formats,
- resolution: self.resolution,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
/// support string, integer, float, time, epoch
#[derive(Debug, Default)]
pub struct TimestampProcessor {
- fields: Vec<OneInputOneOutputField>,
+ fields: Fields,
formats: Formats,
resolution: Resolution,
ignore_missing: bool,
@@ -289,7 +243,7 @@ fn parse_formats(yaml: &yaml_rust::yaml::Yaml) -> Result<Vec<(Arc<String>, Tz)>>
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for TimestampProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for TimestampProcessor {
type Error = Error;
fn try_from(hash: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -324,7 +278,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for TimestampProcessorBuilder {
}
}
- let processor_builder = TimestampProcessorBuilder {
+ let processor_builder = TimestampProcessor {
fields,
formats,
resolution,
@@ -344,23 +298,23 @@ impl Processor for TimestampProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input().index;
+ let index = field.input_field();
match val.get(index) {
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
}
Some(v) => {
let result = self.parse(v)?;
- let (_, index) = field.output();
- val[*index] = Value::Timestamp(result);
+ let output_key = field.target_or_input_field();
+ val.insert(output_key.to_string(), Value::Timestamp(result));
}
}
}
@@ -372,18 +326,9 @@ impl Processor for TimestampProcessor {
mod tests {
use yaml_rust::YamlLoader;
- use super::{TimestampProcessor, TimestampProcessorBuilder};
+ use super::TimestampProcessor;
use crate::etl::value::{Timestamp, Value};
- fn builder_to_native_processor(builder: TimestampProcessorBuilder) -> TimestampProcessor {
- TimestampProcessor {
- fields: vec![],
- formats: builder.formats,
- resolution: builder.resolution,
- ignore_missing: builder.ignore_missing,
- }
- }
-
#[test]
fn test_parse_epoch() {
let processor_yaml_str = r#"fields:
@@ -397,9 +342,7 @@ formats:
"#;
let yaml = &YamlLoader::load_from_str(processor_yaml_str).unwrap()[0];
let timestamp_yaml = yaml.as_hash().unwrap();
- let processor = builder_to_native_processor(
- TimestampProcessorBuilder::try_from(timestamp_yaml).unwrap(),
- );
+ let processor = TimestampProcessor::try_from(timestamp_yaml).unwrap();
let values = [
(
@@ -451,9 +394,7 @@ formats:
"#;
let yaml = &YamlLoader::load_from_str(processor_yaml_str).unwrap()[0];
let timestamp_yaml = yaml.as_hash().unwrap();
- let processor = builder_to_native_processor(
- TimestampProcessorBuilder::try_from(timestamp_yaml).unwrap(),
- );
+ let processor = TimestampProcessor::try_from(timestamp_yaml).unwrap();
let values: Vec<&str> = vec![
"2014-5-17T12:34:56",
diff --git a/src/pipeline/src/etl/processor/urlencoding.rs b/src/pipeline/src/etl/processor/urlencoding.rs
index ca42aae23677..c14c7d87b11f 100644
--- a/src/pipeline/src/etl/processor/urlencoding.rs
+++ b/src/pipeline/src/etl/processor/urlencoding.rs
@@ -12,7 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use ahash::HashSet;
+use std::collections::BTreeMap;
+
use snafu::{OptionExt, ResultExt};
use urlencoding::{decode, encode};
@@ -20,10 +21,10 @@ use crate::etl::error::{
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
UrlEncodingDecodeSnafu, UrlEncodingInvalidMethodSnafu,
};
-use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::field::Fields;
use crate::etl::processor::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, ProcessorBuilder, ProcessorKind,
- FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, METHOD_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, FIELDS_NAME, FIELD_NAME,
+ IGNORE_MISSING_NAME, METHOD_NAME,
};
use crate::etl::value::Value;
@@ -57,55 +58,10 @@ impl std::str::FromStr for Method {
}
}
-#[derive(Debug, Default)]
-pub struct UrlEncodingProcessorBuilder {
- fields: Fields,
- method: Method,
- ignore_missing: bool,
-}
-
-impl ProcessorBuilder for UrlEncodingProcessorBuilder {
- fn output_keys(&self) -> HashSet<&str> {
- self.fields
- .iter()
- .map(|f| f.target_or_input_field())
- .collect()
- }
-
- fn input_keys(&self) -> HashSet<&str> {
- self.fields.iter().map(|f| f.input_field()).collect()
- }
-
- fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
- self.build(intermediate_keys)
- .map(ProcessorKind::UrlEncoding)
- }
-}
-
-impl UrlEncodingProcessorBuilder {
- fn build(self, intermediate_keys: &[String]) -> Result<UrlEncodingProcessor> {
- let mut real_fields = vec![];
- for field in self.fields.into_iter() {
- let input = OneInputOneOutputField::build(
- "urlencoding",
- intermediate_keys,
- field.input_field(),
- field.target_or_input_field(),
- )?;
- real_fields.push(input);
- }
- Ok(UrlEncodingProcessor {
- fields: real_fields,
- method: self.method,
- ignore_missing: self.ignore_missing,
- })
- }
-}
-
/// only support string value
#[derive(Debug, Default)]
pub struct UrlEncodingProcessor {
- fields: Vec<OneInputOneOutputField>,
+ fields: Fields,
method: Method,
ignore_missing: bool,
}
@@ -120,7 +76,7 @@ impl UrlEncodingProcessor {
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for UrlEncodingProcessorBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for UrlEncodingProcessor {
type Error = Error;
fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -152,7 +108,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for UrlEncodingProcessorBuilder {
_ => {}
}
}
- let processor = UrlEncodingProcessorBuilder {
+ let processor = UrlEncodingProcessor {
fields,
method,
ignore_missing,
@@ -171,20 +127,20 @@ impl crate::etl::processor::Processor for UrlEncodingProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut BTreeMap<String, Value>) -> Result<()> {
for field in self.fields.iter() {
- let index = field.input_index();
+ let index = field.input_field();
match val.get(index) {
Some(Value::String(s)) => {
let result = self.process_field(s)?;
- let output_index = field.output_index();
- val[output_index] = result;
+ let output_index = field.target_or_input_field();
+ val.insert(output_index.to_string(), result);
}
Some(Value::Null) | None => {
if !self.ignore_missing {
return ProcessorMissingFieldSnafu {
processor: self.kind(),
- field: field.input_name(),
+ field: field.input_field(),
}
.fail();
}
@@ -205,6 +161,7 @@ impl crate::etl::processor::Processor for UrlEncodingProcessor {
#[cfg(test)]
mod tests {
+ use crate::etl::field::Fields;
use crate::etl::processor::urlencoding::UrlEncodingProcessor;
use crate::etl::value::Value;
@@ -220,7 +177,7 @@ mod tests {
}
{
let processor = UrlEncodingProcessor {
- fields: vec![],
+ fields: Fields::default(),
method: super::Method::Encode,
ignore_missing: false,
};
diff --git a/src/pipeline/src/etl/transform.rs b/src/pipeline/src/etl/transform.rs
index be7fe35e5076..e3039d6c7ac4 100644
--- a/src/pipeline/src/etl/transform.rs
+++ b/src/pipeline/src/etl/transform.rs
@@ -15,11 +15,9 @@
pub mod index;
pub mod transformer;
-use snafu::OptionExt;
+use std::collections::BTreeMap;
use crate::etl::error::{Error, Result};
-use crate::etl::find_key_index;
-use crate::etl::processor::yaml_string;
use crate::etl::transform::index::Index;
use crate::etl::value::Value;
@@ -30,14 +28,15 @@ const TRANSFORM_INDEX: &str = "index";
const TRANSFORM_DEFAULT: &str = "default";
const TRANSFORM_ON_FAILURE: &str = "on_failure";
+use snafu::OptionExt;
pub use transformer::greptime::GreptimeTransformer;
use super::error::{
KeyMustBeStringSnafu, TransformElementMustBeMapSnafu, TransformOnFailureInvalidValueSnafu,
TransformTypeMustBeSetSnafu,
};
-use super::field::{Fields, InputFieldInfo, OneInputOneOutputField};
-use super::processor::{yaml_new_field, yaml_new_fields};
+use super::field::Fields;
+use super::processor::{yaml_new_field, yaml_new_fields, yaml_string};
pub trait Transformer: std::fmt::Debug + Sized + Send + Sync + 'static {
type Output;
@@ -47,7 +46,7 @@ pub trait Transformer: std::fmt::Debug + Sized + Send + Sync + 'static {
fn schemas(&self) -> &Vec<greptime_proto::v1::ColumnSchema>;
fn transforms(&self) -> &Transforms;
fn transforms_mut(&mut self) -> &mut Transforms;
- fn transform_mut(&self, val: &mut Vec<Value>) -> Result<Self::VecOutput>;
+ fn transform_mut(&self, val: &mut BTreeMap<String, Value>) -> Result<Self::VecOutput>;
}
/// On Failure behavior when transform fails
@@ -73,37 +72,12 @@ impl std::str::FromStr for OnFailure {
}
}
-#[derive(Debug, Default, Clone)]
-pub struct TransformBuilders {
- pub(crate) builders: Vec<TransformBuilder>,
- pub(crate) output_keys: Vec<String>,
- pub(crate) required_keys: Vec<String>,
-}
-
#[derive(Debug, Default, Clone)]
pub struct Transforms {
pub(crate) transforms: Vec<Transform>,
- pub(crate) output_keys: Vec<String>,
- pub(crate) required_keys: Vec<String>,
}
impl Transforms {
- pub fn output_keys(&self) -> &Vec<String> {
- &self.output_keys
- }
-
- pub fn output_keys_mut(&mut self) -> &mut Vec<String> {
- &mut self.output_keys
- }
-
- pub fn required_keys_mut(&mut self) -> &mut Vec<String> {
- &mut self.required_keys
- }
-
- pub fn required_keys(&self) -> &Vec<String> {
- &self.required_keys
- }
-
pub fn transforms(&self) -> &Vec<Transform> {
&self.transforms
}
@@ -123,7 +97,7 @@ impl std::ops::DerefMut for Transforms {
}
}
-impl TryFrom<&Vec<yaml_rust::Yaml>> for TransformBuilders {
+impl TryFrom<&Vec<yaml_rust::Yaml>> for Transforms {
type Error = Error;
fn try_from(docs: &Vec<yaml_rust::Yaml>) -> Result<Self> {
@@ -131,7 +105,7 @@ impl TryFrom<&Vec<yaml_rust::Yaml>> for TransformBuilders {
let mut all_output_keys: Vec<String> = Vec::with_capacity(100);
let mut all_required_keys = Vec::with_capacity(100);
for doc in docs {
- let transform_builder: TransformBuilder = doc
+ let transform_builder: Transform = doc
.as_hash()
.context(TransformElementMustBeMapSnafu)?
.try_into()?;
@@ -154,51 +128,14 @@ impl TryFrom<&Vec<yaml_rust::Yaml>> for TransformBuilders {
all_required_keys.sort();
- Ok(TransformBuilders {
- builders: transforms,
- output_keys: all_output_keys,
- required_keys: all_required_keys,
- })
- }
-}
-
-#[derive(Debug, Clone)]
-pub struct TransformBuilder {
- fields: Fields,
- type_: Value,
- default: Option<Value>,
- index: Option<Index>,
- on_failure: Option<OnFailure>,
-}
-
-impl TransformBuilder {
- pub fn build(self, intermediate_keys: &[String], output_keys: &[String]) -> Result<Transform> {
- let mut real_fields = vec![];
- for field in self.fields {
- let input_index = find_key_index(intermediate_keys, field.input_field(), "transform")?;
- let input_field_info = InputFieldInfo::new(field.input_field(), input_index);
- let output_index =
- find_key_index(output_keys, field.target_or_input_field(), "transform")?;
- let input = OneInputOneOutputField::new(
- input_field_info,
- (field.target_or_input_field().to_string(), output_index),
- );
- real_fields.push(input);
- }
- Ok(Transform {
- real_fields,
- type_: self.type_,
- default: self.default,
- index: self.index,
- on_failure: self.on_failure,
- })
+ Ok(Transforms { transforms })
}
}
/// only field is required
#[derive(Debug, Clone)]
pub struct Transform {
- pub real_fields: Vec<OneInputOneOutputField>,
+ pub fields: Fields,
pub type_: Value,
@@ -212,7 +149,7 @@ pub struct Transform {
impl Default for Transform {
fn default() -> Self {
Transform {
- real_fields: Vec::new(),
+ fields: Fields::default(),
type_: Value::Null,
default: None,
index: None,
@@ -231,7 +168,7 @@ impl Transform {
}
}
-impl TryFrom<&yaml_rust::yaml::Hash> for TransformBuilder {
+impl TryFrom<&yaml_rust::yaml::Hash> for Transform {
type Error = Error;
fn try_from(hash: &yaml_rust::yaml::Hash) -> Result<Self> {
@@ -294,7 +231,7 @@ impl TryFrom<&yaml_rust::yaml::Hash> for TransformBuilder {
}
}
}
- let builder = TransformBuilder {
+ let builder = Transform {
fields,
type_,
default: final_default,
diff --git a/src/pipeline/src/etl/transform/transformer/greptime.rs b/src/pipeline/src/etl/transform/transformer/greptime.rs
index 7bbca8ad771e..eb8d0f882726 100644
--- a/src/pipeline/src/etl/transform/transformer/greptime.rs
+++ b/src/pipeline/src/etl/transform/transformer/greptime.rs
@@ -14,7 +14,7 @@
pub mod coerce;
-use std::collections::HashSet;
+use std::collections::{BTreeMap, HashSet};
use std::sync::Arc;
use ahash::{HashMap, HashMapExt};
@@ -25,7 +25,7 @@ use api::v1::{ColumnDataType, ColumnDataTypeExtension, JsonTypeExtension, Semant
use coerce::{coerce_columns, coerce_value};
use greptime_proto::v1::{ColumnSchema, Row, Rows, Value as GreptimeValue};
use itertools::Itertools;
-use serde_json::{Map, Number, Value as JsonValue};
+use serde_json::Number;
use crate::etl::error::{
IdentifyPipelineColumnTypeMismatchSnafu, ReachedMaxNestedLevelsSnafu, Result,
@@ -33,14 +33,12 @@ use crate::etl::error::{
TransformMultipleTimestampIndexSnafu, TransformTimestampIndexCountSnafu,
UnsupportedNumberTypeSnafu,
};
-use crate::etl::field::{InputFieldInfo, OneInputOneOutputField};
+use crate::etl::field::{Field, Fields};
+use crate::etl::processor::IntermediateStatus;
use crate::etl::transform::index::Index;
use crate::etl::transform::{Transform, Transformer, Transforms};
use crate::etl::value::{Timestamp, Value};
-/// The header key that contains the pipeline params.
-pub const GREPTIME_PIPELINE_PARAMS_HEADER: &str = "x-greptime-pipeline-params";
-
const DEFAULT_GREPTIME_TIMESTAMP_COLUMN: &str = "greptime_timestamp";
const DEFAULT_MAX_NESTED_LEVELS_FOR_JSON_FLATTENING: usize = 10;
@@ -91,30 +89,15 @@ impl GreptimeTransformer {
let default = Some(type_.clone());
let transform = Transform {
- real_fields: vec![OneInputOneOutputField::new(
- InputFieldInfo {
- name: DEFAULT_GREPTIME_TIMESTAMP_COLUMN.to_string(),
- index: usize::MAX,
- },
- (
- DEFAULT_GREPTIME_TIMESTAMP_COLUMN.to_string(),
- transforms
- .transforms
- .iter()
- .map(|x| x.real_fields.len())
- .sum(),
- ),
- )],
+ fields: Fields::one(Field::new(
+ DEFAULT_GREPTIME_TIMESTAMP_COLUMN.to_string(),
+ None,
+ )),
type_,
default,
index: Some(Index::Time),
on_failure: Some(crate::etl::transform::OnFailure::Default),
};
- let required_keys = transforms.required_keys_mut();
- required_keys.push(DEFAULT_GREPTIME_TIMESTAMP_COLUMN.to_string());
-
- let output_keys = transforms.output_keys_mut();
- output_keys.push(DEFAULT_GREPTIME_TIMESTAMP_COLUMN.to_string());
transforms.push(transform);
}
@@ -142,9 +125,9 @@ impl Transformer for GreptimeTransformer {
for transform in transforms.iter() {
let target_fields_set = transform
- .real_fields
+ .fields
.iter()
- .map(|f| f.output_name())
+ .map(|f| f.target_or_input_field())
.collect::<HashSet<_>>();
let intersections: Vec<_> = column_names_set.intersection(&target_fields_set).collect();
@@ -157,16 +140,17 @@ impl Transformer for GreptimeTransformer {
if let Some(idx) = transform.index {
if idx == Index::Time {
- match transform.real_fields.len() {
+ match transform.fields.len() {
//Safety unwrap is fine here because we have checked the length of real_fields
- 1 => timestamp_columns
- .push(transform.real_fields.first().unwrap().input_name()),
+ 1 => {
+ timestamp_columns.push(transform.fields.first().unwrap().input_field())
+ }
_ => {
return TransformMultipleTimestampIndexSnafu {
columns: transform
- .real_fields
+ .fields
.iter()
- .map(|x| x.input_name())
+ .map(|x| x.input_field())
.join(", "),
}
.fail();
@@ -195,12 +179,12 @@ impl Transformer for GreptimeTransformer {
}
}
- fn transform_mut(&self, val: &mut Vec<Value>) -> Result<Self::VecOutput> {
+ fn transform_mut(&self, val: &mut IntermediateStatus) -> Result<Self::VecOutput> {
let mut values = vec![GreptimeValue { value_data: None }; self.schema.len()];
+ let mut output_index = 0;
for transform in self.transforms.iter() {
- for field in transform.real_fields.iter() {
- let index = field.input_index();
- let output_index = field.output_index();
+ for field in transform.fields.iter() {
+ let index = field.input_field();
match val.get(index) {
Some(v) => {
let value_data = coerce_value(v, transform)?;
@@ -216,6 +200,7 @@ impl Transformer for GreptimeTransformer {
values[output_index] = GreptimeValue { value_data };
}
}
+ output_index += 1;
}
}
Ok(Row { values })
@@ -335,30 +320,49 @@ fn resolve_number_schema(
)
}
-fn json_value_to_row(
- schema_info: &mut SchemaInfo,
- map: Map<String, serde_json::Value>,
-) -> Result<Row> {
+fn values_to_row(schema_info: &mut SchemaInfo, values: BTreeMap<String, Value>) -> Result<Row> {
let mut row: Vec<GreptimeValue> = Vec::with_capacity(schema_info.schema.len());
for _ in 0..schema_info.schema.len() {
row.push(GreptimeValue { value_data: None });
}
- for (column_name, value) in map {
+
+ for (column_name, value) in values.into_iter() {
if column_name == DEFAULT_GREPTIME_TIMESTAMP_COLUMN {
continue;
}
+
let index = schema_info.index.get(&column_name).copied();
+
match value {
- serde_json::Value::Null => {
- // do nothing
+ Value::Null => {}
+
+ Value::Int8(_) | Value::Int16(_) | Value::Int32(_) | Value::Int64(_) => {
+ // safe unwrap after type matched
+ let v = value.as_i64().unwrap();
+ resolve_schema(
+ index,
+ ValueData::I64Value(v),
+ ColumnSchema {
+ column_name,
+ datatype: ColumnDataType::Int64 as i32,
+ semantic_type: SemanticType::Field as i32,
+ datatype_extension: None,
+ options: None,
+ },
+ &mut row,
+ schema_info,
+ )?;
}
- serde_json::Value::String(s) => {
+
+ Value::Uint8(_) | Value::Uint16(_) | Value::Uint32(_) | Value::Uint64(_) => {
+ // safe unwrap after type matched
+ let v = value.as_u64().unwrap();
resolve_schema(
index,
- ValueData::StringValue(s),
+ ValueData::U64Value(v),
ColumnSchema {
column_name,
- datatype: ColumnDataType::String as i32,
+ datatype: ColumnDataType::Uint64 as i32,
semantic_type: SemanticType::Field as i32,
datatype_extension: None,
options: None,
@@ -367,10 +371,29 @@ fn json_value_to_row(
schema_info,
)?;
}
- serde_json::Value::Bool(b) => {
+
+ Value::Float32(_) | Value::Float64(_) => {
+ // safe unwrap after type matched
+ let v = value.as_f64().unwrap();
resolve_schema(
index,
- ValueData::BoolValue(b),
+ ValueData::F64Value(v),
+ ColumnSchema {
+ column_name,
+ datatype: ColumnDataType::Float64 as i32,
+ semantic_type: SemanticType::Field as i32,
+ datatype_extension: None,
+ options: None,
+ },
+ &mut row,
+ schema_info,
+ )?;
+ }
+
+ Value::Boolean(v) => {
+ resolve_schema(
+ index,
+ ValueData::BoolValue(v),
ColumnSchema {
column_name,
datatype: ColumnDataType::Boolean as i32,
@@ -382,13 +405,88 @@ fn json_value_to_row(
schema_info,
)?;
}
- serde_json::Value::Number(n) => {
- resolve_number_schema(n, column_name, index, &mut row, schema_info)?;
+ Value::String(v) => {
+ resolve_schema(
+ index,
+ ValueData::StringValue(v),
+ ColumnSchema {
+ column_name,
+ datatype: ColumnDataType::String as i32,
+ semantic_type: SemanticType::Field as i32,
+ datatype_extension: None,
+ options: None,
+ },
+ &mut row,
+ schema_info,
+ )?;
+ }
+
+ Value::Timestamp(Timestamp::Nanosecond(ns)) => {
+ resolve_schema(
+ index,
+ ValueData::TimestampNanosecondValue(ns),
+ ColumnSchema {
+ column_name,
+ datatype: ColumnDataType::TimestampNanosecond as i32,
+ semantic_type: SemanticType::Field as i32,
+ datatype_extension: None,
+ options: None,
+ },
+ &mut row,
+ schema_info,
+ )?;
+ }
+ Value::Timestamp(Timestamp::Microsecond(us)) => {
+ resolve_schema(
+ index,
+ ValueData::TimestampMicrosecondValue(us),
+ ColumnSchema {
+ column_name,
+ datatype: ColumnDataType::TimestampMicrosecond as i32,
+ semantic_type: SemanticType::Field as i32,
+ datatype_extension: None,
+ options: None,
+ },
+ &mut row,
+ schema_info,
+ )?;
+ }
+ Value::Timestamp(Timestamp::Millisecond(ms)) => {
+ resolve_schema(
+ index,
+ ValueData::TimestampMillisecondValue(ms),
+ ColumnSchema {
+ column_name,
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ semantic_type: SemanticType::Field as i32,
+ datatype_extension: None,
+ options: None,
+ },
+ &mut row,
+ schema_info,
+ )?;
}
- serde_json::Value::Array(_) | serde_json::Value::Object(_) => {
+ Value::Timestamp(Timestamp::Second(s)) => {
resolve_schema(
index,
- ValueData::BinaryValue(jsonb::Value::from(value).to_vec()),
+ ValueData::TimestampSecondValue(s),
+ ColumnSchema {
+ column_name,
+ datatype: ColumnDataType::TimestampSecond as i32,
+ semantic_type: SemanticType::Field as i32,
+ datatype_extension: None,
+ options: None,
+ },
+ &mut row,
+ schema_info,
+ )?;
+ }
+
+ Value::Array(_) | Value::Map(_) => {
+ let data: jsonb::Value = value.into();
+ resolve_schema(
+ index,
+ ValueData::BinaryValue(data.to_vec()),
ColumnSchema {
column_name,
datatype: ColumnDataType::Binary as i32,
@@ -408,23 +506,18 @@ fn json_value_to_row(
}
fn identity_pipeline_inner<'a>(
- array: Vec<serde_json::Value>,
+ array: Vec<BTreeMap<String, Value>>,
tag_column_names: Option<impl Iterator<Item = &'a String>>,
- params: &GreptimePipelineParams,
+ _params: &GreptimePipelineParams,
) -> Result<Rows> {
let mut rows = Vec::with_capacity(array.len());
let mut schema_info = SchemaInfo::default();
- for value in array {
- if let serde_json::Value::Object(map) = value {
- let object = if params.flatten_json_object() {
- flatten_json_object(map, DEFAULT_MAX_NESTED_LEVELS_FOR_JSON_FLATTENING)?
- } else {
- map
- };
- let row = json_value_to_row(&mut schema_info, object)?;
- rows.push(row);
- }
+
+ for values in array {
+ let row = values_to_row(&mut schema_info, values)?;
+ rows.push(row);
}
+
let greptime_timestamp_schema = ColumnSchema {
column_name: DEFAULT_GREPTIME_TIMESTAMP_COLUMN.to_string(),
datatype: ColumnDataType::TimestampNanosecond as i32,
@@ -469,17 +562,26 @@ fn identity_pipeline_inner<'a>(
/// 4. The pipeline will return an error if the same column datatype is mismatched
/// 5. The pipeline will analyze the schema of each json record and merge them to get the final schema.
pub fn identity_pipeline(
- array: Vec<serde_json::Value>,
+ array: Vec<BTreeMap<String, Value>>,
table: Option<Arc<table::Table>>,
params: &GreptimePipelineParams,
) -> Result<Rows> {
+ let input = if params.flatten_json_object() {
+ array
+ .into_iter()
+ .map(|item| flatten_object(item, DEFAULT_MAX_NESTED_LEVELS_FOR_JSON_FLATTENING))
+ .collect::<Result<Vec<BTreeMap<String, Value>>>>()?
+ } else {
+ array
+ };
+
match table {
Some(table) => {
let table_info = table.table_info();
let tag_column_names = table_info.meta.row_key_column_names();
- identity_pipeline_inner(array, Some(tag_column_names), params)
+ identity_pipeline_inner(input, Some(tag_column_names), params)
}
- None => identity_pipeline_inner(array, None::<std::iter::Empty<&String>>, params),
+ None => identity_pipeline_inner(input, None::<std::iter::Empty<&String>>, params),
}
}
@@ -487,24 +589,24 @@ pub fn identity_pipeline(
///
/// The `max_nested_levels` parameter is used to limit the nested levels of the JSON object.
/// The error will be returned if the nested levels is greater than the `max_nested_levels`.
-pub fn flatten_json_object(
- object: Map<String, JsonValue>,
+pub fn flatten_object(
+ object: BTreeMap<String, Value>,
max_nested_levels: usize,
-) -> Result<Map<String, JsonValue>> {
- let mut flattened = Map::new();
+) -> Result<BTreeMap<String, Value>> {
+ let mut flattened = BTreeMap::new();
if !object.is_empty() {
// it will use recursion to flatten the object.
- do_flatten_json_object(&mut flattened, None, object, 1, max_nested_levels)?;
+ do_flatten_object(&mut flattened, None, object, 1, max_nested_levels)?;
}
Ok(flattened)
}
-fn do_flatten_json_object(
- dest: &mut Map<String, JsonValue>,
+fn do_flatten_object(
+ dest: &mut BTreeMap<String, Value>,
base: Option<&str>,
- object: Map<String, JsonValue>,
+ object: BTreeMap<String, Value>,
current_level: usize,
max_nested_levels: usize,
) -> Result<()> {
@@ -517,11 +619,11 @@ fn do_flatten_json_object(
let new_key = base.map_or_else(|| key.clone(), |base_key| format!("{base_key}.{key}"));
match value {
- JsonValue::Object(object) => {
- do_flatten_json_object(
+ Value::Map(object) => {
+ do_flatten_object(
dest,
Some(&new_key),
- object,
+ object.values,
current_level + 1,
max_nested_levels,
)?;
@@ -540,9 +642,8 @@ fn do_flatten_json_object(
mod tests {
use api::v1::SemanticType;
- use crate::etl::transform::transformer::greptime::{
- flatten_json_object, identity_pipeline_inner, GreptimePipelineParams,
- };
+ use super::*;
+ use crate::etl::{json_array_to_intermediate_state, json_to_intermediate_state};
use crate::identity_pipeline;
#[test]
@@ -568,6 +669,7 @@ mod tests {
"gaga": "gaga"
}),
];
+ let array = json_array_to_intermediate_state(array).unwrap();
let rows = identity_pipeline(array, None, &GreptimePipelineParams::default());
assert!(rows.is_err());
assert_eq!(
@@ -596,7 +698,11 @@ mod tests {
"gaga": "gaga"
}),
];
- let rows = identity_pipeline(array, None, &GreptimePipelineParams::default());
+ let rows = identity_pipeline(
+ json_array_to_intermediate_state(array).unwrap(),
+ None,
+ &GreptimePipelineParams::default(),
+ );
assert!(rows.is_err());
assert_eq!(
rows.err().unwrap().to_string(),
@@ -624,7 +730,11 @@ mod tests {
"gaga": "gaga"
}),
];
- let rows = identity_pipeline(array, None, &GreptimePipelineParams::default());
+ let rows = identity_pipeline(
+ json_array_to_intermediate_state(array).unwrap(),
+ None,
+ &GreptimePipelineParams::default(),
+ );
assert!(rows.is_ok());
let rows = rows.unwrap();
assert_eq!(rows.schema.len(), 8);
@@ -655,7 +765,7 @@ mod tests {
];
let tag_column_names = ["name".to_string(), "address".to_string()];
let rows = identity_pipeline_inner(
- array,
+ json_array_to_intermediate_state(array).unwrap(),
Some(tag_column_names.iter()),
&GreptimePipelineParams::default(),
);
@@ -754,14 +864,11 @@ mod tests {
];
for (input, max_depth, expected) in test_cases {
- let flattened_object =
- flatten_json_object(input.as_object().unwrap().clone(), max_depth);
- match flattened_object {
- Ok(flattened_object) => {
- assert_eq!(&flattened_object, expected.unwrap().as_object().unwrap())
- }
- Err(_) => assert_eq!(None, expected),
- }
+ let input = json_to_intermediate_state(input).unwrap();
+ let expected = expected.map(|e| json_to_intermediate_state(e).unwrap());
+
+ let flattened_object = flatten_object(input, max_depth).ok();
+ assert_eq!(flattened_object, expected);
}
}
diff --git a/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs b/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs
index 5f448b386cbd..da345b3bdeb3 100644
--- a/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs
+++ b/src/pipeline/src/etl/transform/transformer/greptime/coerce.rs
@@ -71,12 +71,11 @@ impl TryFrom<Value> for ValueData {
}
}
-// TODO(yuanbohan): add fulltext support in datatype_extension
pub(crate) fn coerce_columns(transform: &Transform) -> Result<Vec<ColumnSchema>> {
let mut columns = Vec::new();
- for field in transform.real_fields.iter() {
- let column_name = field.output_name().to_string();
+ for field in transform.fields.iter() {
+ let column_name = field.target_or_input_field().to_string();
let (datatype, datatype_extension) = coerce_type(transform)?;
@@ -477,12 +476,14 @@ fn coerce_json_value(v: &Value, transform: &Transform) -> Result<Option<ValueDat
#[cfg(test)]
mod tests {
+
use super::*;
+ use crate::etl::field::Fields;
#[test]
fn test_coerce_string_without_on_failure() {
let transform = Transform {
- real_fields: vec![],
+ fields: Fields::default(),
type_: Value::Int32(0),
default: None,
index: None,
@@ -507,7 +508,7 @@ mod tests {
#[test]
fn test_coerce_string_with_on_failure_ignore() {
let transform = Transform {
- real_fields: vec![],
+ fields: Fields::default(),
type_: Value::Int32(0),
default: None,
index: None,
@@ -522,7 +523,7 @@ mod tests {
#[test]
fn test_coerce_string_with_on_failure_default() {
let mut transform = Transform {
- real_fields: vec![],
+ fields: Fields::default(),
type_: Value::Int32(0),
default: None,
index: None,
diff --git a/src/pipeline/src/etl/value.rs b/src/pipeline/src/etl/value.rs
index 3657bb914cf7..b007e665134c 100644
--- a/src/pipeline/src/etl/value.rs
+++ b/src/pipeline/src/etl/value.rs
@@ -249,6 +249,29 @@ impl Value {
}
}
+ pub fn as_i64(&self) -> Option<i64> {
+ match self {
+ Value::Uint32(v) => Some(*v as i64),
+ Value::Uint16(v) => Some(*v as i64),
+ Value::Uint8(v) => Some(*v as i64),
+ Value::Int64(v) => Some(*v),
+ Value::Int32(v) => Some(*v as i64),
+ Value::Int16(v) => Some(*v as i64),
+ Value::Int8(v) => Some(*v as i64),
+ _ => None,
+ }
+ }
+
+ pub fn as_u64(&self) -> Option<u64> {
+ match self {
+ Value::Uint64(v) => Some(*v),
+ Value::Uint32(v) => Some(*v as u64),
+ Value::Uint16(v) => Some(*v as u64),
+ Value::Uint8(v) => Some(*v as u64),
+ _ => None,
+ }
+ }
+
pub fn as_f64(&self) -> Option<f64> {
match self {
Value::Float32(v) => Some(*v as f64),
diff --git a/src/pipeline/src/etl/value/map.rs b/src/pipeline/src/etl/value/map.rs
index 004a617b0f9c..9e730ef532d8 100644
--- a/src/pipeline/src/etl/value/map.rs
+++ b/src/pipeline/src/etl/value/map.rs
@@ -49,6 +49,12 @@ impl From<HashMap<String, Value>> for Map {
}
}
+impl From<BTreeMap<String, Value>> for Map {
+ fn from(values: BTreeMap<String, Value>) -> Self {
+ Self { values }
+ }
+}
+
impl std::ops::Deref for Map {
type Target = BTreeMap<String, Value>;
diff --git a/src/pipeline/src/lib.rs b/src/pipeline/src/lib.rs
index edb6ce1f5874..a6c82f9353cf 100644
--- a/src/pipeline/src/lib.rs
+++ b/src/pipeline/src/lib.rs
@@ -19,13 +19,15 @@ mod metrics;
pub use etl::error::Result;
pub use etl::processor::Processor;
-pub use etl::transform::transformer::greptime::{
- GreptimePipelineParams, SchemaInfo, GREPTIME_PIPELINE_PARAMS_HEADER,
-};
+pub use etl::transform::transformer::greptime::{GreptimePipelineParams, SchemaInfo};
pub use etl::transform::transformer::identity_pipeline;
pub use etl::transform::{GreptimeTransformer, Transformer};
pub use etl::value::{Array, Map, Value};
-pub use etl::{error as etl_error, parse, Content, Pipeline, PipelineWay, SelectInfo};
+pub use etl::{
+ error as etl_error, json_array_to_intermediate_state, json_to_intermediate_state, parse,
+ Content, DispatchedTo, Pipeline, PipelineDefinition, PipelineExecOutput, PipelineWay,
+ SelectInfo, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
+};
pub use manager::{
error, pipeline_operator, table, util, PipelineInfo, PipelineRef, PipelineTableRef,
PipelineVersion,
diff --git a/src/pipeline/tests/common.rs b/src/pipeline/tests/common.rs
index d825c91e4cb3..89bebbf85bb9 100644
--- a/src/pipeline/tests/common.rs
+++ b/src/pipeline/tests/common.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use greptime_proto::v1::{ColumnDataType, ColumnSchema, Rows, SemanticType};
-use pipeline::{parse, Content, GreptimeTransformer, Pipeline};
+use pipeline::{json_to_intermediate_state, parse, Content, GreptimeTransformer, Pipeline};
/// test util function to parse and execute pipeline
pub fn parse_and_exec(input_str: &str, pipeline_yaml: &str) -> Rows {
@@ -22,7 +22,6 @@ pub fn parse_and_exec(input_str: &str, pipeline_yaml: &str) -> Rows {
let yaml_content = Content::Yaml(pipeline_yaml);
let pipeline: Pipeline<GreptimeTransformer> =
parse(&yaml_content).expect("failed to parse pipeline");
- let mut result = pipeline.init_intermediate_state();
let schema = pipeline.schemas().clone();
@@ -31,19 +30,22 @@ pub fn parse_and_exec(input_str: &str, pipeline_yaml: &str) -> Rows {
match input_value {
serde_json::Value::Array(array) => {
for value in array {
- pipeline.prepare(value, &mut result).unwrap();
+ let mut intermediate_status = json_to_intermediate_state(value).unwrap();
let row = pipeline
- .exec_mut(&mut result)
- .expect("failed to exec pipeline");
+ .exec_mut(&mut intermediate_status)
+ .expect("failed to exec pipeline")
+ .into_transformed()
+ .expect("expect transformed result ");
rows.push(row);
- pipeline.reset_intermediate_state(&mut result);
}
}
serde_json::Value::Object(_) => {
- pipeline.prepare(input_value, &mut result).unwrap();
+ let mut intermediate_status = json_to_intermediate_state(input_value).unwrap();
let row = pipeline
- .exec_mut(&mut result)
- .expect("failed to exec pipeline");
+ .exec_mut(&mut intermediate_status)
+ .expect("failed to exec pipeline")
+ .into_transformed()
+ .expect("expect transformed result ");
rows.push(row);
}
_ => {
diff --git a/src/pipeline/tests/dissect.rs b/src/pipeline/tests/dissect.rs
index 56386d0e860a..a93112d68945 100644
--- a/src/pipeline/tests/dissect.rs
+++ b/src/pipeline/tests/dissect.rs
@@ -16,6 +16,7 @@ mod common;
use greptime_proto::v1::value::ValueData::StringValue;
use greptime_proto::v1::{ColumnDataType, SemanticType};
+use pipeline::json_to_intermediate_state;
fn make_string_column_schema(name: String) -> greptime_proto::v1::ColumnSchema {
common::make_column_schema(name, ColumnDataType::String, SemanticType::Field)
@@ -273,9 +274,8 @@ transform:
let yaml_content = pipeline::Content::Yaml(pipeline_yaml);
let pipeline: pipeline::Pipeline<pipeline::GreptimeTransformer> =
pipeline::parse(&yaml_content).expect("failed to parse pipeline");
- let mut result = pipeline.init_intermediate_state();
+ let mut result = json_to_intermediate_state(input_value).unwrap();
- pipeline.prepare(input_value, &mut result).unwrap();
let row = pipeline.exec_mut(&mut result);
assert!(row.is_err());
diff --git a/src/pipeline/tests/pipeline.rs b/src/pipeline/tests/pipeline.rs
index cb84e9ad0c8e..7a170660a99d 100644
--- a/src/pipeline/tests/pipeline.rs
+++ b/src/pipeline/tests/pipeline.rs
@@ -20,7 +20,7 @@ use greptime_proto::v1::value::ValueData::{
U32Value, U64Value, U8Value,
};
use greptime_proto::v1::Value as GreptimeValue;
-use pipeline::{parse, Content, GreptimeTransformer, Pipeline};
+use pipeline::{json_to_intermediate_state, parse, Content, GreptimeTransformer, Pipeline};
#[test]
fn test_complex_data() {
@@ -420,14 +420,13 @@ transform:
let yaml_content = Content::Yaml(pipeline_yaml);
let pipeline: Pipeline<GreptimeTransformer> =
parse(&yaml_content).expect("failed to parse pipeline");
- let mut stats = pipeline.init_intermediate_state();
- pipeline
- .prepare(input_value, &mut stats)
- .expect("failed to prepare pipeline");
+ let mut stats = json_to_intermediate_state(input_value).unwrap();
let row = pipeline
.exec_mut(&mut stats)
- .expect("failed to exec pipeline");
+ .expect("failed to exec pipeline")
+ .into_transformed()
+ .expect("expect transformed result ");
let output = Rows {
schema: pipeline.schemas().clone(),
@@ -490,9 +489,12 @@ transform:
let yaml_content = Content::Yaml(pipeline_yaml);
let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
- let mut status = pipeline.init_intermediate_state();
- pipeline.prepare(input_value, &mut status).unwrap();
- let row = pipeline.exec_mut(&mut status).unwrap();
+ let mut status = json_to_intermediate_state(input_value).unwrap();
+ let row = pipeline
+ .exec_mut(&mut status)
+ .unwrap()
+ .into_transformed()
+ .expect("expect transformed result ");
let r = row
.values
.into_iter()
@@ -595,10 +597,12 @@ transform:
let yaml_content = Content::Yaml(pipeline_yaml);
let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
- let mut status = pipeline.init_intermediate_state();
-
- pipeline.prepare(input_value, &mut status).unwrap();
- let row = pipeline.exec_mut(&mut status).unwrap();
+ let mut status = json_to_intermediate_state(input_value).unwrap();
+ let row = pipeline
+ .exec_mut(&mut status)
+ .unwrap()
+ .into_transformed()
+ .expect("expect transformed result ");
let r = row
.values
@@ -638,10 +642,10 @@ processors:
- dissect:
fields:
- line
- patterns:
+ patterns:
- "%{+ts} %{+ts} %{content}"
- date:
- fields:
+ fields:
- ts
formats:
- "%Y-%m-%d %H:%M:%S%.3f"
@@ -658,9 +662,12 @@ transform:
let yaml_content = Content::Yaml(pipeline_yaml);
let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
- let mut status = pipeline.init_intermediate_state();
- pipeline.prepare(input_value, &mut status).unwrap();
- let row = pipeline.exec_mut(&mut status).unwrap();
+ let mut status = json_to_intermediate_state(input_value).unwrap();
+ let row = pipeline
+ .exec_mut(&mut status)
+ .unwrap()
+ .into_transformed()
+ .expect("expect transformed result ");
let r = row
.values
.into_iter()
@@ -694,9 +701,13 @@ transform:
let yaml_content = Content::Yaml(pipeline_yaml);
let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
- let mut status = pipeline.init_intermediate_state();
- pipeline.prepare(input_value, &mut status).unwrap();
- let row = pipeline.exec_mut(&mut status).unwrap();
+ let mut status = json_to_intermediate_state(input_value).unwrap();
+
+ let row = pipeline
+ .exec_mut(&mut status)
+ .unwrap()
+ .into_transformed()
+ .expect("expect transformed result ");
let r = row
.values
@@ -749,9 +760,12 @@ transform:
let yaml_content = Content::Yaml(pipeline_yaml);
let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
- let mut status = pipeline.init_intermediate_state();
- pipeline.prepare(input_value, &mut status).unwrap();
- let row = pipeline.exec_mut(&mut status).unwrap();
+ let mut status = json_to_intermediate_state(input_value).unwrap();
+ let row = pipeline
+ .exec_mut(&mut status)
+ .unwrap()
+ .into_transformed()
+ .expect("expect transformed result ");
let mut r = row
.values
@@ -770,3 +784,79 @@ transform:
assert_eq!(expected, r);
}
+
+#[test]
+fn test_dispatch() {
+ let input_value_str1 = r#"
+{
+ "line": "2024-05-25 20:16:37.217 [http] hello world"
+}
+"#;
+ let input_value1 = serde_json::from_str::<serde_json::Value>(input_value_str1).unwrap();
+ let input_value_str2 = r#"
+{
+ "line": "2024-05-25 20:16:37.217 [database] hello world"
+}
+"#;
+ let input_value2 = serde_json::from_str::<serde_json::Value>(input_value_str2).unwrap();
+
+ let pipeline_yaml = r#"
+processors:
+ - dissect:
+ fields:
+ - line
+ patterns:
+ - "%{+ts} %{+ts} [%{logger}] %{content}"
+ - date:
+ fields:
+ - ts
+ formats:
+ - "%Y-%m-%d %H:%M:%S%.3f"
+
+dispatcher:
+ field: logger
+ rules:
+ - value: http
+ table_suffix: http
+ pipeline: access_log_pipeline
+
+transform:
+ - fields:
+ - content
+ type: string
+ - field: ts
+ type: time
+ index: timestamp
+"#;
+
+ let yaml_content = Content::Yaml(pipeline_yaml);
+ let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
+
+ let mut status = json_to_intermediate_state(input_value1).unwrap();
+ let dispatched_to = pipeline
+ .exec_mut(&mut status)
+ .unwrap()
+ .into_dispatched()
+ .expect("expect dispatched result ");
+ assert_eq!(dispatched_to.table_suffix, "http");
+ assert_eq!(dispatched_to.pipeline.unwrap(), "access_log_pipeline");
+
+ let mut status = json_to_intermediate_state(input_value2).unwrap();
+ let row = pipeline
+ .exec_mut(&mut status)
+ .unwrap()
+ .into_transformed()
+ .expect("expect transformed result ");
+ let r = row
+ .values
+ .into_iter()
+ .map(|v| v.value_data.unwrap())
+ .collect::<Vec<_>>();
+
+ let expected = vec![
+ StringValue("hello world".into()),
+ TimestampNanosecondValue(1716668197217000000),
+ ];
+
+ assert_eq!(expected, r);
+}
diff --git a/src/servers/src/elasticsearch.rs b/src/servers/src/elasticsearch.rs
index e9f1204e25b2..baa25ba7760a 100644
--- a/src/servers/src/elasticsearch.rs
+++ b/src/servers/src/elasticsearch.rs
@@ -24,6 +24,7 @@ use common_error::ext::ErrorExt;
use common_telemetry::{debug, error};
use headers::ContentType;
use once_cell::sync::Lazy;
+use pipeline::GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME;
use serde_json::{json, Deserializer, Value};
use session::context::{Channel, QueryContext};
use snafu::{ensure, ResultExt};
@@ -32,10 +33,7 @@ use crate::error::{
status_code_to_http_status, InvalidElasticsearchInputSnafu, ParseJsonSnafu,
Result as ServersResult,
};
-use crate::http::event::{
- ingest_logs_inner, LogIngestRequest, LogIngesterQueryParams, LogState,
- GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
-};
+use crate::http::event::{ingest_logs_inner, LogIngestRequest, LogIngesterQueryParams, LogState};
use crate::metrics::{
METRIC_ELASTICSEARCH_LOGS_DOCS_COUNT, METRIC_ELASTICSEARCH_LOGS_INGESTION_ELAPSED,
};
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 096c3fd75f8e..adfe3ab841e2 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -158,6 +158,14 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Pipeline transform error"))]
+ PipelineTransform {
+ #[snafu(source)]
+ source: pipeline::etl_error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Not supported: {}", feat))]
NotSupported { feat: String },
@@ -557,12 +565,6 @@ pub enum Error {
location: Location,
},
- #[snafu(display("OpenTelemetry log error"))]
- OpenTelemetryLog {
- source: pipeline::etl_error::Error,
- #[snafu(implicit)]
- location: Location,
- },
#[snafu(display("Unsupported json data type for tag: {} {}", key, ty))]
UnsupportedJsonDataTypeForTag {
key: String,
@@ -634,6 +636,7 @@ impl ErrorExt for Error {
| CheckDatabaseValidity { source, .. } => source.status_code(),
Pipeline { source, .. } => source.status_code(),
+ PipelineTransform { source, .. } => source.status_code(),
NotSupported { .. }
| InvalidParameter { .. }
@@ -661,7 +664,6 @@ impl ErrorExt for Error {
| InvalidLokiPayload { .. }
| UnsupportedContentType { .. }
| TimestampOverflow { .. }
- | OpenTelemetryLog { .. }
| UnsupportedJsonDataTypeForTag { .. }
| InvalidTableName { .. }
| PrepareStatementNotFound { .. }
diff --git a/src/servers/src/http/event.rs b/src/servers/src/http/event.rs
index 00b177b2c096..d6d8e89a56ea 100644
--- a/src/servers/src/http/event.rs
+++ b/src/servers/src/http/event.rs
@@ -16,7 +16,7 @@ use std::str::FromStr;
use std::sync::Arc;
use std::time::Instant;
-use api::v1::{RowInsertRequest, RowInsertRequests, Rows};
+use api::v1::RowInsertRequests;
use async_trait::async_trait;
use axum::extract::{FromRequest, Multipart, Path, Query, Request, State};
use axum::http::header::CONTENT_TYPE;
@@ -32,18 +32,17 @@ use headers::ContentType;
use lazy_static::lazy_static;
use pipeline::error::PipelineTransformSnafu;
use pipeline::util::to_pipeline_version;
-use pipeline::{
- GreptimePipelineParams, GreptimeTransformer, PipelineVersion, GREPTIME_PIPELINE_PARAMS_HEADER,
-};
+use pipeline::{GreptimePipelineParams, GreptimeTransformer, PipelineDefinition, PipelineVersion};
use serde::{Deserialize, Serialize};
use serde_json::{json, Deserializer, Map, Value};
use session::context::{Channel, QueryContext, QueryContextRef};
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{
- status_code_to_http_status, CatalogSnafu, Error, InvalidParameterSnafu, ParseJsonSnafu,
- PipelineSnafu, Result, UnsupportedContentTypeSnafu,
+ status_code_to_http_status, Error, InvalidParameterSnafu, ParseJsonSnafu, PipelineSnafu,
+ Result, UnsupportedContentTypeSnafu,
};
+use crate::http::header::constants::GREPTIME_PIPELINE_PARAMS_HEADER;
use crate::http::header::CONTENT_TYPE_PROTOBUF_STR;
use crate::http::result::greptime_manage_resp::GreptimedbManageResponse;
use crate::http::result::greptime_result_v1::GreptimedbV1Response;
@@ -51,11 +50,11 @@ use crate::http::HttpResponse;
use crate::interceptor::{LogIngestInterceptor, LogIngestInterceptorRef};
use crate::metrics::{
METRIC_FAILURE_VALUE, METRIC_HTTP_LOGS_INGESTION_COUNTER, METRIC_HTTP_LOGS_INGESTION_ELAPSED,
- METRIC_HTTP_LOGS_TRANSFORM_ELAPSED, METRIC_SUCCESS_VALUE,
+ METRIC_SUCCESS_VALUE,
};
+use crate::pipeline::run_pipeline;
use crate::query_handler::PipelineHandlerRef;
-pub const GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME: &str = "greptime_identity";
const GREPTIME_INTERNAL_PIPELINE_NAME_PREFIX: &str = "greptime_";
lazy_static! {
@@ -269,86 +268,107 @@ fn transform_ndjson_array_factory(
}
/// Dryrun pipeline with given data
-fn dryrun_pipeline_inner(
+async fn dryrun_pipeline_inner(
value: Vec<Value>,
- pipeline: &pipeline::Pipeline<GreptimeTransformer>,
+ pipeline: Arc<pipeline::Pipeline<GreptimeTransformer>>,
+ pipeline_handler: PipelineHandlerRef,
+ query_ctx: &QueryContextRef,
) -> Result<Response> {
- let mut intermediate_state = pipeline.init_intermediate_state();
+ let params = GreptimePipelineParams::default();
- let mut results = Vec::with_capacity(value.len());
- for v in value {
- pipeline
- .prepare(v, &mut intermediate_state)
- .context(PipelineTransformSnafu)
- .context(PipelineSnafu)?;
- let r = pipeline
- .exec_mut(&mut intermediate_state)
+ let results = run_pipeline(
+ &pipeline_handler,
+ PipelineDefinition::Resolved(pipeline),
+ ¶ms,
+ pipeline::json_array_to_intermediate_state(value)
.context(PipelineTransformSnafu)
- .context(PipelineSnafu)?;
- results.push(r);
- pipeline.reset_intermediate_state(&mut intermediate_state);
- }
+ .context(PipelineSnafu)?,
+ "dry_run".to_owned(),
+ query_ctx,
+ true,
+ )
+ .await?;
let colume_type_key = "colume_type";
let data_type_key = "data_type";
let name_key = "name";
- let schema = pipeline
- .schemas()
- .iter()
- .map(|cs| {
- let mut map = Map::new();
- map.insert(name_key.to_string(), Value::String(cs.column_name.clone()));
- map.insert(
- data_type_key.to_string(),
- Value::String(cs.datatype().as_str_name().to_string()),
- );
- map.insert(
- colume_type_key.to_string(),
- Value::String(cs.semantic_type().as_str_name().to_string()),
- );
- map.insert(
- "fulltext".to_string(),
- Value::Bool(
- cs.options
- .clone()
- .is_some_and(|x| x.options.contains_key("fulltext")),
- ),
- );
- Value::Object(map)
- })
- .collect::<Vec<_>>();
- let rows = results
+ let results = results
.into_iter()
- .map(|row| {
- let row = row
- .values
- .into_iter()
- .enumerate()
- .map(|(idx, v)| {
- v.value_data
- .map(|d| {
- let mut map = Map::new();
- map.insert("value".to_string(), column_data_to_json(d));
- map.insert("key".to_string(), schema[idx][name_key].clone());
- map.insert(
- "semantic_type".to_string(),
- schema[idx][colume_type_key].clone(),
- );
- map.insert("data_type".to_string(), schema[idx][data_type_key].clone());
- Value::Object(map)
- })
- .unwrap_or(Value::Null)
- })
- .collect();
- Value::Array(row)
+ .filter_map(|row| {
+ if let Some(rows) = row.rows {
+ let table_name = row.table_name;
+ let schema = rows.schema;
+
+ let schema = schema
+ .iter()
+ .map(|cs| {
+ let mut map = Map::new();
+ map.insert(name_key.to_string(), Value::String(cs.column_name.clone()));
+ map.insert(
+ data_type_key.to_string(),
+ Value::String(cs.datatype().as_str_name().to_string()),
+ );
+ map.insert(
+ colume_type_key.to_string(),
+ Value::String(cs.semantic_type().as_str_name().to_string()),
+ );
+ map.insert(
+ "fulltext".to_string(),
+ Value::Bool(
+ cs.options
+ .clone()
+ .is_some_and(|x| x.options.contains_key("fulltext")),
+ ),
+ );
+ Value::Object(map)
+ })
+ .collect::<Vec<_>>();
+
+ let rows = rows
+ .rows
+ .into_iter()
+ .map(|row| {
+ row.values
+ .into_iter()
+ .enumerate()
+ .map(|(idx, v)| {
+ v.value_data
+ .map(|d| {
+ let mut map = Map::new();
+ map.insert("value".to_string(), column_data_to_json(d));
+ map.insert(
+ "key".to_string(),
+ schema[idx][name_key].clone(),
+ );
+ map.insert(
+ "semantic_type".to_string(),
+ schema[idx][colume_type_key].clone(),
+ );
+ map.insert(
+ "data_type".to_string(),
+ schema[idx][data_type_key].clone(),
+ );
+ Value::Object(map)
+ })
+ .unwrap_or(Value::Null)
+ })
+ .collect()
+ })
+ .collect();
+
+ let mut result = Map::new();
+ result.insert("schema".to_string(), Value::Array(schema));
+ result.insert("rows".to_string(), Value::Array(rows));
+ result.insert("table_name".to_string(), Value::String(table_name));
+ let result = Value::Object(result);
+ Some(result)
+ } else {
+ None
+ }
})
- .collect::<Vec<_>>();
- let mut result = Map::new();
- result.insert("schema".to_string(), Value::Array(schema));
- result.insert("rows".to_string(), Value::Array(rows));
- let result = Value::Object(result);
- Ok(Json(result).into_response())
+ .collect();
+ Ok(Json(Value::Array(results)).into_response())
}
/// Dryrun pipeline with given data
@@ -414,6 +434,9 @@ pub async fn pipeline_dryrun(
) -> Result<Response> {
let handler = log_state.log_handler;
+ query_ctx.set_channel(Channel::Http);
+ let query_ctx = Arc::new(query_ctx);
+
match check_pipeline_dryrun_params_valid(&payload) {
Some(params) => {
let data = params.data;
@@ -426,20 +449,29 @@ pub async fn pipeline_dryrun(
to_pipeline_version(params.pipeline_version).context(PipelineSnafu)?;
let pipeline_name = check_pipeline_name_exists(params.pipeline_name)?;
let pipeline = handler
- .get_pipeline(&pipeline_name, version, Arc::new(query_ctx))
+ .get_pipeline(&pipeline_name, version, query_ctx.clone())
.await?;
- dryrun_pipeline_inner(data, &pipeline)
+ dryrun_pipeline_inner(data, pipeline, handler, &query_ctx).await
}
Some(pipeline) => {
let pipeline = handler.build_pipeline(&pipeline);
match pipeline {
- Ok(pipeline) => match dryrun_pipeline_inner(data, &pipeline) {
- Ok(response) => Ok(response),
- Err(e) => Ok(add_step_info_for_pipeline_dryrun_error(
- "Failed to exec pipeline",
- e,
- )),
- },
+ Ok(pipeline) => {
+ match dryrun_pipeline_inner(
+ data,
+ Arc::new(pipeline),
+ handler,
+ &query_ctx,
+ )
+ .await
+ {
+ Ok(response) => Ok(response),
+ Err(e) => Ok(add_step_info_for_pipeline_dryrun_error(
+ "Failed to exec pipeline",
+ e,
+ )),
+ }
+ }
Err(e) => Ok(add_step_info_for_pipeline_dryrun_error(
"Failed to build pipeline",
e,
@@ -463,14 +495,11 @@ pub async fn pipeline_dryrun(
check_data_valid(value.len())?;
- query_ctx.set_channel(Channel::Http);
- let query_ctx = Arc::new(query_ctx);
-
let pipeline = handler
.get_pipeline(&pipeline_name, version, query_ctx.clone())
.await?;
- dryrun_pipeline_inner(value, &pipeline)
+ dryrun_pipeline_inner(value, pipeline, handler, &query_ctx).await
}
}
}
@@ -544,7 +573,7 @@ fn extract_pipeline_value_by_content_type(
ct if ct == *TEXT_CONTENT_TYPE || ct == *TEXT_UTF8_CONTENT_TYPE => payload
.lines()
.filter(|line| !line.is_empty())
- .map(|line| Value::String(line.to_string()))
+ .map(|line| json!({"message": line}))
.collect(),
_ => UnsupportedContentTypeSnafu { content_type }.fail()?,
})
@@ -570,59 +599,20 @@ pub(crate) async fn ingest_logs_inner(
);
for request in log_ingest_requests {
- let transformed_data: Rows = if pipeline_name == GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME {
- let table = state
- .get_table(&request.table, &query_ctx)
- .await
- .context(CatalogSnafu)?;
- pipeline::identity_pipeline(request.values, table, &pipeline_params)
+ let requests = run_pipeline(
+ &state,
+ PipelineDefinition::from_name(&pipeline_name, version),
+ &pipeline_params,
+ pipeline::json_array_to_intermediate_state(request.values)
.context(PipelineTransformSnafu)
- .context(PipelineSnafu)?
- } else {
- let pipeline = state
- .get_pipeline(&pipeline_name, version, query_ctx.clone())
- .await?;
-
- let transform_timer = std::time::Instant::now();
- let mut intermediate_state = pipeline.init_intermediate_state();
- let mut results = Vec::with_capacity(request.values.len());
- for v in request.values {
- pipeline
- .prepare(v, &mut intermediate_state)
- .inspect_err(|_| {
- METRIC_HTTP_LOGS_TRANSFORM_ELAPSED
- .with_label_values(&[db.as_str(), METRIC_FAILURE_VALUE])
- .observe(transform_timer.elapsed().as_secs_f64());
- })
- .context(PipelineTransformSnafu)
- .context(PipelineSnafu)?;
- let r = pipeline
- .exec_mut(&mut intermediate_state)
- .inspect_err(|_| {
- METRIC_HTTP_LOGS_TRANSFORM_ELAPSED
- .with_label_values(&[db.as_str(), METRIC_FAILURE_VALUE])
- .observe(transform_timer.elapsed().as_secs_f64());
- })
- .context(PipelineTransformSnafu)
- .context(PipelineSnafu)?;
- results.push(r);
- pipeline.reset_intermediate_state(&mut intermediate_state);
- }
-
- METRIC_HTTP_LOGS_TRANSFORM_ELAPSED
- .with_label_values(&[db.as_str(), METRIC_SUCCESS_VALUE])
- .observe(transform_timer.elapsed().as_secs_f64());
-
- Rows {
- rows: results,
- schema: pipeline.schemas().clone(),
- }
- };
+ .context(PipelineSnafu)?,
+ request.table,
+ &query_ctx,
+ true,
+ )
+ .await?;
- insert_requests.push(RowInsertRequest {
- rows: Some(transformed_data),
- table_name: request.table.clone(),
- });
+ insert_requests.extend(requests);
}
let output = state
diff --git a/src/servers/src/http/extractor.rs b/src/servers/src/http/extractor.rs
index f3ae606636c5..ae578f21d302 100644
--- a/src/servers/src/http/extractor.rs
+++ b/src/servers/src/http/extractor.rs
@@ -18,12 +18,12 @@ use axum::extract::FromRequestParts;
use axum::http::request::Parts;
use axum::http::StatusCode;
use http::HeaderMap;
-use pipeline::SelectInfo;
+use pipeline::{GreptimePipelineParams, SelectInfo};
use crate::http::header::constants::{
GREPTIME_LOG_EXTRACT_KEYS_HEADER_NAME, GREPTIME_LOG_PIPELINE_NAME_HEADER_NAME,
GREPTIME_LOG_PIPELINE_VERSION_HEADER_NAME, GREPTIME_LOG_TABLE_NAME_HEADER_NAME,
- GREPTIME_TRACE_TABLE_NAME_HEADER_NAME,
+ GREPTIME_PIPELINE_PARAMS_HEADER, GREPTIME_TRACE_TABLE_NAME_HEADER_NAME,
};
/// Axum extractor for optional target log table name from HTTP header
@@ -91,6 +91,7 @@ where
pub struct PipelineInfo {
pub pipeline_name: Option<String>,
pub pipeline_version: Option<String>,
+ pub pipeline_params: GreptimePipelineParams,
}
impl<S> FromRequestParts<S> for PipelineInfo
@@ -105,20 +106,14 @@ where
string_value_from_header(headers, GREPTIME_LOG_PIPELINE_NAME_HEADER_NAME)?;
let pipeline_version =
string_value_from_header(headers, GREPTIME_LOG_PIPELINE_VERSION_HEADER_NAME)?;
- match (pipeline_name, pipeline_version) {
- (Some(name), Some(version)) => Ok(PipelineInfo {
- pipeline_name: Some(name),
- pipeline_version: Some(version),
- }),
- (None, _) => Ok(PipelineInfo {
- pipeline_name: None,
- pipeline_version: None,
- }),
- (Some(name), None) => Ok(PipelineInfo {
- pipeline_name: Some(name),
- pipeline_version: None,
- }),
- }
+ let pipeline_parameters =
+ string_value_from_header(headers, GREPTIME_PIPELINE_PARAMS_HEADER)?;
+
+ Ok(PipelineInfo {
+ pipeline_name,
+ pipeline_version,
+ pipeline_params: GreptimePipelineParams::from_params(pipeline_parameters.as_deref()),
+ })
}
}
diff --git a/src/servers/src/http/header.rs b/src/servers/src/http/header.rs
index 51a07ca01f0c..e14ce6172958 100644
--- a/src/servers/src/http/header.rs
+++ b/src/servers/src/http/header.rs
@@ -50,6 +50,8 @@ pub mod constants {
pub const GREPTIME_LOG_TABLE_NAME_HEADER_NAME: &str = "x-greptime-log-table-name";
pub const GREPTIME_LOG_EXTRACT_KEYS_HEADER_NAME: &str = "x-greptime-log-extract-keys";
pub const GREPTIME_TRACE_TABLE_NAME_HEADER_NAME: &str = "x-greptime-trace-table-name";
+ /// The header key that contains the pipeline params.
+ pub const GREPTIME_PIPELINE_PARAMS_HEADER: &str = "x-greptime-pipeline-params";
}
pub static GREPTIME_DB_HEADER_FORMAT: HeaderName =
diff --git a/src/servers/src/http/otlp.rs b/src/servers/src/http/otlp.rs
index 5cc8f777c44a..acb571a57d39 100644
--- a/src/servers/src/http/otlp.rs
+++ b/src/servers/src/http/otlp.rs
@@ -30,7 +30,7 @@ use opentelemetry_proto::tonic::collector::trace::v1::{
ExportTraceServiceRequest, ExportTraceServiceResponse,
};
use pipeline::util::to_pipeline_version;
-use pipeline::PipelineWay;
+use pipeline::{PipelineDefinition, PipelineWay};
use prost::Message;
use session::context::{Channel, QueryContext};
use snafu::prelude::*;
@@ -40,7 +40,7 @@ use crate::error::{self, PipelineSnafu, Result};
use crate::http::extractor::{LogTableName, PipelineInfo, SelectInfoWrapper, TraceTableName};
use crate::metrics::METRIC_HTTP_OPENTELEMETRY_LOGS_ELAPSED;
use crate::otlp::trace::TRACE_TABLE_NAME;
-use crate::query_handler::OpenTelemetryProtocolHandlerRef;
+use crate::query_handler::{OpenTelemetryProtocolHandlerRef, PipelineHandler};
#[axum_macros::debug_handler]
#[tracing::instrument(skip_all, fields(protocol = "otlp", request_type = "metrics"))]
@@ -118,25 +118,29 @@ pub async fn logs(
.start_timer();
let request = ExportLogsServiceRequest::decode(bytes).context(error::DecodeOtlpRequestSnafu)?;
- let pipeline_way = if let Some(pipeline_name) = &pipeline_info.pipeline_name {
- let pipeline_version =
- to_pipeline_version(pipeline_info.pipeline_version).context(PipelineSnafu)?;
- let pipeline = match handler
- .get_pipeline(pipeline_name, pipeline_version, query_ctx.clone())
- .await
- {
- Ok(p) => p,
- Err(e) => {
- return Err(e);
- }
- };
- PipelineWay::Custom(pipeline)
+ let pipeline = if let Some(pipeline_name) = pipeline_info.pipeline_name {
+ PipelineWay::Pipeline(PipelineDefinition::from_name(
+ &pipeline_name,
+ to_pipeline_version(pipeline_info.pipeline_version).context(PipelineSnafu)?,
+ ))
} else {
- PipelineWay::OtlpLog(Box::new(select_info))
+ PipelineWay::OtlpLogDirect(Box::new(select_info))
};
+ let pipeline_params = pipeline_info.pipeline_params;
+
+ // here we use nightly feature `trait_upcasting` to convert handler to
+ // pipeline_handler
+ let pipeline_handler: Arc<dyn PipelineHandler + Send + Sync> = handler.clone();
handler
- .logs(request, pipeline_way, tablename, query_ctx)
+ .logs(
+ pipeline_handler,
+ request,
+ pipeline,
+ pipeline_params,
+ tablename,
+ query_ctx,
+ )
.await
.map(|o| OtlpResponse {
resp_body: ExportLogsServiceResponse {
diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs
index e95bdac7525d..61bf041f526f 100644
--- a/src/servers/src/lib.rs
+++ b/src/servers/src/lib.rs
@@ -17,6 +17,7 @@
#![feature(exclusive_wrapper)]
#![feature(let_chains)]
#![feature(if_let_guard)]
+#![feature(trait_upcasting)]
use datafusion_expr::LogicalPlan;
use datatypes::schema::Schema;
@@ -37,6 +38,7 @@ pub mod metrics_handler;
pub mod mysql;
pub mod opentsdb;
pub mod otlp;
+mod pipeline;
pub mod postgres;
mod prom_row_builder;
pub mod prom_store;
diff --git a/src/servers/src/otlp/logs.rs b/src/servers/src/otlp/logs.rs
index 71c104666b5f..bad05e88abc9 100644
--- a/src/servers/src/otlp/logs.rs
+++ b/src/servers/src/otlp/logs.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::{BTreeMap, HashMap as StdHashMap};
+use std::collections::HashMap as StdHashMap;
use api::v1::column_data_type_extension::TypeExt;
use api::v1::value::ValueData;
@@ -24,14 +24,18 @@ use jsonb::{Number as JsonbNumber, Value as JsonbValue};
use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
use opentelemetry_proto::tonic::common::v1::{any_value, AnyValue, InstrumentationScope, KeyValue};
use opentelemetry_proto::tonic::logs::v1::{LogRecord, ResourceLogs, ScopeLogs};
-use pipeline::{Array, Map, PipelineWay, SchemaInfo, SelectInfo, Value as PipelineValue};
+use pipeline::{GreptimePipelineParams, PipelineWay, SchemaInfo, SelectInfo};
+use serde_json::{Map, Value};
+use session::context::QueryContextRef;
use snafu::{ensure, ResultExt};
use super::trace::attributes::OtlpAnyValue;
use super::utils::{bytes_to_hex_string, key_value_to_jsonb};
use crate::error::{
- IncompatibleSchemaSnafu, OpenTelemetryLogSnafu, Result, UnsupportedJsonDataTypeForTagSnafu,
+ IncompatibleSchemaSnafu, PipelineTransformSnafu, Result, UnsupportedJsonDataTypeForTagSnafu,
};
+use crate::pipeline::run_pipeline;
+use crate::query_handler::PipelineHandlerRef;
pub const LOG_TABLE_NAME: &str = "opentelemetry_logs";
@@ -42,13 +46,16 @@ pub const LOG_TABLE_NAME: &str = "opentelemetry_logs";
/// for data structure of OTLP metrics.
///
/// Returns `InsertRequests` and total number of rows to ingest
-pub fn to_grpc_insert_requests(
+pub async fn to_grpc_insert_requests(
request: ExportLogsServiceRequest,
pipeline: PipelineWay,
+ pipeline_params: GreptimePipelineParams,
table_name: String,
+ query_ctx: &QueryContextRef,
+ pipeline_handler: PipelineHandlerRef,
) -> Result<(RowInsertRequests, usize)> {
match pipeline {
- PipelineWay::OtlpLog(select_info) => {
+ PipelineWay::OtlpLogDirect(select_info) => {
let rows = parse_export_logs_service_request_to_rows(request, select_info)?;
let len = rows.rows.len();
let insert_request = RowInsertRequest {
@@ -62,53 +69,48 @@ pub fn to_grpc_insert_requests(
len,
))
}
- PipelineWay::Custom(p) => {
- let request = parse_export_logs_service_request(request);
- let mut result = Vec::new();
- let mut intermediate_state = p.init_intermediate_state();
- for v in request {
- p.prepare_pipeline_value(v, &mut intermediate_state)
- .context(OpenTelemetryLogSnafu)?;
- let r = p
- .exec_mut(&mut intermediate_state)
- .context(OpenTelemetryLogSnafu)?;
- result.push(r);
- }
- let len = result.len();
- let rows = Rows {
- schema: p.schemas().clone(),
- rows: result,
- };
- let insert_request = RowInsertRequest {
- rows: Some(rows),
+ PipelineWay::Pipeline(pipeline_def) => {
+ let data = parse_export_logs_service_request(request);
+ let array =
+ pipeline::json_array_to_intermediate_state(data).context(PipelineTransformSnafu)?;
+
+ let inserts = run_pipeline(
+ &pipeline_handler,
+ pipeline_def,
+ &pipeline_params,
+ array,
table_name,
- };
- let insert_requests = RowInsertRequests {
- inserts: vec![insert_request],
- };
+ query_ctx,
+ true,
+ )
+ .await?;
+ let len = inserts
+ .iter()
+ .map(|insert| {
+ insert
+ .rows
+ .as_ref()
+ .map(|rows| rows.rows.len())
+ .unwrap_or(0)
+ })
+ .sum();
+
+ let insert_requests = RowInsertRequests { inserts };
Ok((insert_requests, len))
}
}
}
-fn scope_to_pipeline_value(
- scope: Option<InstrumentationScope>,
-) -> (PipelineValue, PipelineValue, PipelineValue) {
+fn scope_to_pipeline_value(scope: Option<InstrumentationScope>) -> (Value, Value, Value) {
scope
.map(|x| {
(
- PipelineValue::Map(Map {
- values: key_value_to_map(x.attributes),
- }),
- PipelineValue::String(x.version),
- PipelineValue::String(x.name),
+ Value::Object(key_value_to_map(x.attributes)),
+ Value::String(x.version),
+ Value::String(x.name),
)
})
- .unwrap_or((
- PipelineValue::Null,
- PipelineValue::Null,
- PipelineValue::Null,
- ))
+ .unwrap_or((Value::Null, Value::Null, Value::Null))
}
fn scope_to_jsonb(
@@ -127,51 +129,43 @@ fn scope_to_jsonb(
fn log_to_pipeline_value(
log: LogRecord,
- resource_schema_url: PipelineValue,
- resource_attr: PipelineValue,
- scope_schema_url: PipelineValue,
- scope_name: PipelineValue,
- scope_version: PipelineValue,
- scope_attrs: PipelineValue,
-) -> PipelineValue {
- let log_attrs = PipelineValue::Map(Map {
- values: key_value_to_map(log.attributes),
- });
- let mut map = BTreeMap::new();
- map.insert(
- "Timestamp".to_string(),
- PipelineValue::Uint64(log.time_unix_nano),
- );
+ resource_schema_url: Value,
+ resource_attr: Value,
+ scope_schema_url: Value,
+ scope_name: Value,
+ scope_version: Value,
+ scope_attrs: Value,
+) -> Value {
+ let log_attrs = Value::Object(key_value_to_map(log.attributes));
+ let mut map = Map::new();
+ map.insert("Timestamp".to_string(), Value::from(log.time_unix_nano));
map.insert(
"ObservedTimestamp".to_string(),
- PipelineValue::Uint64(log.observed_time_unix_nano),
+ Value::from(log.observed_time_unix_nano),
);
// need to be convert to string
map.insert(
"TraceId".to_string(),
- PipelineValue::String(bytes_to_hex_string(&log.trace_id)),
+ Value::String(bytes_to_hex_string(&log.trace_id)),
);
map.insert(
"SpanId".to_string(),
- PipelineValue::String(bytes_to_hex_string(&log.span_id)),
- );
- map.insert("TraceFlags".to_string(), PipelineValue::Uint32(log.flags));
- map.insert(
- "SeverityText".to_string(),
- PipelineValue::String(log.severity_text),
+ Value::String(bytes_to_hex_string(&log.span_id)),
);
+ map.insert("TraceFlags".to_string(), Value::from(log.flags));
+ map.insert("SeverityText".to_string(), Value::String(log.severity_text));
map.insert(
"SeverityNumber".to_string(),
- PipelineValue::Int32(log.severity_number),
+ Value::from(log.severity_number),
);
// need to be convert to string
map.insert(
"Body".to_string(),
log.body
.as_ref()
- .map(|x| PipelineValue::String(log_body_to_string(x)))
- .unwrap_or(PipelineValue::Null),
+ .map(|x| Value::String(log_body_to_string(x)))
+ .unwrap_or(Value::Null),
);
map.insert("ResourceSchemaUrl".to_string(), resource_schema_url);
@@ -181,7 +175,7 @@ fn log_to_pipeline_value(
map.insert("ScopeVersion".to_string(), scope_version);
map.insert("ScopeAttributes".to_string(), scope_attrs);
map.insert("LogAttributes".to_string(), log_attrs);
- PipelineValue::Map(Map { values: map })
+ Value::Object(map)
}
fn build_otlp_logs_identity_schema() -> Vec<ColumnSchema> {
@@ -636,22 +630,18 @@ fn merge_values(
/// transform otlp logs request to pipeline value
/// https://opentelemetry.io/docs/concepts/signals/logs/
-fn parse_export_logs_service_request(request: ExportLogsServiceRequest) -> Vec<PipelineValue> {
+fn parse_export_logs_service_request(request: ExportLogsServiceRequest) -> Vec<Value> {
let mut result = Vec::new();
for r in request.resource_logs {
let resource_attr = r
.resource
- .map(|x| {
- PipelineValue::Map(Map {
- values: key_value_to_map(x.attributes),
- })
- })
- .unwrap_or(PipelineValue::Null);
- let resource_schema_url = PipelineValue::String(r.schema_url);
+ .map(|x| Value::Object(key_value_to_map(x.attributes)))
+ .unwrap_or(Value::Null);
+ let resource_schema_url = Value::String(r.schema_url);
for scope_logs in r.scope_logs {
let (scope_attrs, scope_version, scope_name) =
scope_to_pipeline_value(scope_logs.scope);
- let scope_schema_url = PipelineValue::String(scope_logs.schema_url);
+ let scope_schema_url = Value::String(scope_logs.schema_url);
for log in scope_logs.log_records {
let value = log_to_pipeline_value(
log,
@@ -670,41 +660,41 @@ fn parse_export_logs_service_request(request: ExportLogsServiceRequest) -> Vec<P
}
// convert AnyValue to pipeline value
-fn any_value_to_pipeline_value(value: any_value::Value) -> PipelineValue {
+fn any_value_to_pipeline_value(value: any_value::Value) -> Value {
match value {
- any_value::Value::StringValue(s) => PipelineValue::String(s),
- any_value::Value::IntValue(i) => PipelineValue::Int64(i),
- any_value::Value::DoubleValue(d) => PipelineValue::Float64(d),
- any_value::Value::BoolValue(b) => PipelineValue::Boolean(b),
+ any_value::Value::StringValue(s) => Value::String(s),
+ any_value::Value::IntValue(i) => Value::from(i),
+ any_value::Value::DoubleValue(d) => Value::from(d),
+ any_value::Value::BoolValue(b) => Value::Bool(b),
any_value::Value::ArrayValue(a) => {
let values = a
.values
.into_iter()
.map(|v| match v.value {
Some(value) => any_value_to_pipeline_value(value),
- None => PipelineValue::Null,
+ None => Value::Null,
})
.collect();
- PipelineValue::Array(Array { values })
+ Value::Array(values)
}
any_value::Value::KvlistValue(kv) => {
let value = key_value_to_map(kv.values);
- PipelineValue::Map(Map { values: value })
+ Value::Object(value)
}
- any_value::Value::BytesValue(b) => PipelineValue::String(bytes_to_hex_string(&b)),
+ any_value::Value::BytesValue(b) => Value::String(bytes_to_hex_string(&b)),
}
}
// convert otlp keyValue vec to map
-fn key_value_to_map(key_values: Vec<KeyValue>) -> BTreeMap<String, PipelineValue> {
- let mut map = BTreeMap::new();
+fn key_value_to_map(key_values: Vec<KeyValue>) -> Map<String, Value> {
+ let mut map = Map::new();
for kv in key_values {
let value = match kv.value {
Some(value) => match value.value {
Some(value) => any_value_to_pipeline_value(value),
- None => PipelineValue::Null,
+ None => Value::Null,
},
- None => PipelineValue::Null,
+ None => Value::Null,
};
map.insert(kv.key.clone(), value);
}
diff --git a/src/servers/src/pipeline.rs b/src/servers/src/pipeline.rs
new file mode 100644
index 000000000000..27c4d2757aa5
--- /dev/null
+++ b/src/servers/src/pipeline.rs
@@ -0,0 +1,159 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::BTreeMap;
+use std::sync::Arc;
+
+use api::v1::{RowInsertRequest, Rows};
+use pipeline::{
+ DispatchedTo, GreptimePipelineParams, GreptimeTransformer, Pipeline, PipelineDefinition,
+ PipelineExecOutput, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
+};
+use session::context::QueryContextRef;
+use snafu::ResultExt;
+
+use crate::error::{CatalogSnafu, PipelineTransformSnafu, Result};
+use crate::metrics::{
+ METRIC_FAILURE_VALUE, METRIC_HTTP_LOGS_TRANSFORM_ELAPSED, METRIC_SUCCESS_VALUE,
+};
+use crate::query_handler::PipelineHandlerRef;
+
+/// Never call this on `GreptimeIdentityPipeline` because it's a real pipeline
+pub async fn get_pipeline(
+ pipeline_def: PipelineDefinition,
+ handler: &PipelineHandlerRef,
+ query_ctx: &QueryContextRef,
+) -> Result<Arc<Pipeline<GreptimeTransformer>>> {
+ match pipeline_def {
+ PipelineDefinition::Resolved(pipeline) => Ok(pipeline),
+ PipelineDefinition::ByNameAndValue((name, version)) => {
+ handler
+ .get_pipeline(&name, version, query_ctx.clone())
+ .await
+ }
+ _ => {
+ unreachable!("Never call get_pipeline on identity.")
+ }
+ }
+}
+
+pub(crate) async fn run_pipeline(
+ state: &PipelineHandlerRef,
+ pipeline_definition: PipelineDefinition,
+ pipeline_parameters: &GreptimePipelineParams,
+ array: Vec<BTreeMap<String, pipeline::Value>>,
+ table_name: String,
+ query_ctx: &QueryContextRef,
+ is_top_level: bool,
+) -> Result<Vec<RowInsertRequest>> {
+ let db = query_ctx.get_db_string();
+
+ if matches!(
+ pipeline_definition,
+ PipelineDefinition::GreptimeIdentityPipeline
+ ) {
+ let table = state
+ .get_table(&table_name, query_ctx)
+ .await
+ .context(CatalogSnafu)?;
+ pipeline::identity_pipeline(array, table, pipeline_parameters)
+ .map(|rows| {
+ vec![RowInsertRequest {
+ rows: Some(rows),
+ table_name,
+ }]
+ })
+ .context(PipelineTransformSnafu)
+ } else {
+ let pipeline = get_pipeline(pipeline_definition, state, query_ctx).await?;
+
+ let transform_timer = std::time::Instant::now();
+
+ let mut transformed = Vec::with_capacity(array.len());
+ let mut dispatched: BTreeMap<DispatchedTo, Vec<BTreeMap<String, pipeline::Value>>> =
+ BTreeMap::new();
+
+ for mut values in array {
+ let r = pipeline
+ .exec_mut(&mut values)
+ .inspect_err(|_| {
+ METRIC_HTTP_LOGS_TRANSFORM_ELAPSED
+ .with_label_values(&[db.as_str(), METRIC_FAILURE_VALUE])
+ .observe(transform_timer.elapsed().as_secs_f64());
+ })
+ .context(PipelineTransformSnafu)?;
+
+ match r {
+ PipelineExecOutput::Transformed(row) => {
+ transformed.push(row);
+ }
+ PipelineExecOutput::DispatchedTo(dispatched_to) => {
+ if let Some(coll) = dispatched.get_mut(&dispatched_to) {
+ coll.push(values);
+ } else {
+ dispatched.insert(dispatched_to, vec![values]);
+ }
+ }
+ }
+ }
+
+ let mut results = Vec::new();
+ // if current pipeline generates some transformed results, build it as
+ // `RowInsertRequest` and append to results. If the pipeline doesn't
+ // have dispatch, this will be only output of the pipeline.
+ if !transformed.is_empty() {
+ results.push(RowInsertRequest {
+ rows: Some(Rows {
+ rows: transformed,
+ schema: pipeline.schemas().clone(),
+ }),
+ table_name: table_name.clone(),
+ })
+ }
+
+ // if current pipeline contains dispatcher and has several rules, we may
+ // already accumulated several dispatched rules and rows.
+ for (dispatched_to, coll) in dispatched {
+ // we generate the new table name according to `table_part` and
+ // current custom table name.
+ let table_name = dispatched_to.dispatched_to_table_name(&table_name);
+ let next_pipeline_name = dispatched_to
+ .pipeline
+ .as_deref()
+ .unwrap_or(GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME);
+
+ // run pipeline recursively.
+ let requests = Box::pin(run_pipeline(
+ state,
+ PipelineDefinition::from_name(next_pipeline_name, None),
+ pipeline_parameters,
+ coll,
+ table_name,
+ query_ctx,
+ false,
+ ))
+ .await?;
+
+ results.extend(requests);
+ }
+
+ if is_top_level {
+ METRIC_HTTP_LOGS_TRANSFORM_ELAPSED
+ .with_label_values(&[db.as_str(), METRIC_SUCCESS_VALUE])
+ .observe(transform_timer.elapsed().as_secs_f64());
+ }
+
+ Ok(results)
+ }
+}
diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs
index d450815a4a0c..dd41305626b9 100644
--- a/src/servers/src/query_handler.rs
+++ b/src/servers/src/query_handler.rs
@@ -38,7 +38,10 @@ use log_query::LogQuery;
use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest;
use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest;
use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest;
-use pipeline::{GreptimeTransformer, Pipeline, PipelineInfo, PipelineVersion, PipelineWay};
+use pipeline::{
+ GreptimePipelineParams, GreptimeTransformer, Pipeline, PipelineInfo, PipelineVersion,
+ PipelineWay,
+};
use serde_json::Value;
use session::context::{QueryContext, QueryContextRef};
@@ -110,8 +113,10 @@ pub trait OpenTelemetryProtocolHandler: PipelineHandler {
async fn logs(
&self,
+ pipeline_handler: PipelineHandlerRef,
request: ExportLogsServiceRequest,
pipeline: PipelineWay,
+ pipeline_params: GreptimePipelineParams,
table_name: String,
ctx: QueryContextRef,
) -> Result<Output>;
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 14fd45089e3e..75ea27b0cd0a 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -93,6 +93,7 @@ macro_rules! http_tests {
test_plain_text_ingestion,
test_identify_pipeline,
test_identify_pipeline_with_flatten,
+ test_pipeline_dispatcher,
test_otlp_metrics,
test_otlp_traces,
@@ -1383,6 +1384,197 @@ pub async fn test_identify_pipeline(store_type: StorageType) {
guard.remove_all().await;
}
+pub async fn test_pipeline_dispatcher(storage_type: StorageType) {
+ common_telemetry::init_default_ut_logging();
+ let (app, mut guard) =
+ setup_test_http_app_with_frontend(storage_type, "test_pipeline_dispatcher").await;
+
+ // handshake
+ let client = TestClient::new(app).await;
+
+ let root_pipeline = r#"
+processors:
+ - date:
+ field: time
+ formats:
+ - "%Y-%m-%d %H:%M:%S%.3f"
+ ignore_missing: true
+
+dispatcher:
+ field: type
+ rules:
+ - value: http
+ table_suffix: http
+ pipeline: http
+ - value: db
+ table_suffix: db
+ - value: not_found
+ table_suffix: not_found
+ pipeline: not_found
+
+transform:
+ - fields:
+ - id1, id1_root
+ - id2, id2_root
+ type: int32
+ - fields:
+ - type
+ - log
+ - logger
+ type: string
+ - field: time
+ type: time
+ index: timestamp
+"#;
+
+ let http_pipeline = r#"
+processors:
+
+transform:
+ - fields:
+ - id1, id1_http
+ - id2, id2_http
+ type: int32
+ - fields:
+ - log
+ - logger
+ type: string
+ - field: time
+ type: time
+ index: timestamp
+"#;
+
+ // 1. create pipeline
+ let res = client
+ .post("/v1/events/pipelines/root")
+ .header("Content-Type", "application/x-yaml")
+ .body(root_pipeline)
+ .send()
+ .await;
+
+ assert_eq!(res.status(), StatusCode::OK);
+
+ let res = client
+ .post("/v1/events/pipelines/http")
+ .header("Content-Type", "application/x-yaml")
+ .body(http_pipeline)
+ .send()
+ .await;
+
+ assert_eq!(res.status(), StatusCode::OK);
+
+ // 2. write data
+ let data_body = r#"
+[
+ {
+ "id1": "2436",
+ "id2": "2528",
+ "logger": "INTERACT.MANAGER",
+ "type": "http",
+ "time": "2024-05-25 20:16:37.217",
+ "log": "ClusterAdapter:enter sendTextDataToCluster\\n"
+ }
+]
+"#;
+ let res = client
+ .post("/v1/events/logs?db=public&table=logs1&pipeline_name=root")
+ .header("Content-Type", "application/json")
+ .body(data_body)
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+
+ let data_body = r#"
+[
+ {
+ "id1": "2436",
+ "id2": "2528",
+ "logger": "INTERACT.MANAGER",
+ "type": "db",
+ "time": "2024-05-25 20:16:37.217",
+ "log": "ClusterAdapter:enter sendTextDataToCluster\\n"
+ }
+]
+"#;
+ let res = client
+ .post("/v1/events/logs?db=public&table=logs1&pipeline_name=root")
+ .header("Content-Type", "application/json")
+ .body(data_body)
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+
+ let data_body = r#"
+[
+ {
+ "id1": "2436",
+ "id2": "2528",
+ "logger": "INTERACT.MANAGER",
+ "type": "api",
+ "time": "2024-05-25 20:16:37.217",
+ "log": "ClusterAdapter:enter sendTextDataToCluster\\n"
+ }
+]
+"#;
+ let res = client
+ .post("/v1/events/logs?db=public&table=logs1&pipeline_name=root")
+ .header("Content-Type", "application/json")
+ .body(data_body)
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+
+ let data_body = r#"
+[
+ {
+ "id1": "2436",
+ "id2": "2528",
+ "logger": "INTERACT.MANAGER",
+ "type": "not_found",
+ "time": "2024-05-25 20:16:37.217",
+ "log": "ClusterAdapter:enter sendTextDataToCluster\\n"
+ }
+]
+"#;
+ let res = client
+ .post("/v1/events/logs?db=public&table=logs1&pipeline_name=root")
+ .header("Content-Type", "application/json")
+ .body(data_body)
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::BAD_REQUEST);
+
+ // 3. verify data
+ let expected = "[[2436]]";
+ validate_data(
+ "test_dispatcher_pipeline default table",
+ &client,
+ "select id1_root from logs1",
+ expected,
+ )
+ .await;
+
+ let expected = "[[2436]]";
+ validate_data(
+ "test_dispatcher_pipeline http table",
+ &client,
+ "select id1_http from logs1_http",
+ expected,
+ )
+ .await;
+
+ let expected = "[[\"2436\"]]";
+ validate_data(
+ "test_dispatcher_pipeline db table",
+ &client,
+ "select id1 from logs1_db",
+ expected,
+ )
+ .await;
+
+ guard.remove_all().await;
+}
+
pub async fn test_identify_pipeline_with_flatten(store_type: StorageType) {
common_telemetry::init_default_ut_logging();
let (app, mut guard) =
@@ -1587,8 +1779,8 @@ transform:
.await;
assert_eq!(res.status(), StatusCode::OK);
let body: Value = res.json().await;
- let schema = &body["schema"];
- let rows = &body["rows"];
+ let schema = &body[0]["schema"];
+ let rows = &body[0]["rows"];
assert_eq!(schema, &dryrun_schema);
assert_eq!(rows, &dryrun_rows);
}
@@ -1617,8 +1809,8 @@ transform:
.await;
assert_eq!(res.status(), StatusCode::OK);
let body: Value = res.json().await;
- let schema = &body["schema"];
- let rows = &body["rows"];
+ let schema = &body[0]["schema"];
+ let rows = &body[0]["rows"];
assert_eq!(schema, &dryrun_schema);
assert_eq!(rows, &dryrun_rows);
}
@@ -1645,8 +1837,8 @@ transform:
.await;
assert_eq!(res.status(), StatusCode::OK);
let body: Value = res.json().await;
- let schema = &body["schema"];
- let rows = &body["rows"];
+ let schema = &body[0]["schema"];
+ let rows = &body[0]["rows"];
assert_eq!(schema, &dryrun_schema);
assert_eq!(rows, &dryrun_rows);
}
@@ -1687,7 +1879,7 @@ pub async fn test_plain_text_ingestion(store_type: StorageType) {
processors:
- dissect:
fields:
- - line
+ - message
patterns:
- "%{+ts} %{+ts} %{content}"
- date:
@@ -2316,7 +2508,7 @@ async fn validate_data(test_name: &str, client: &TestClient, sql: &str, expected
.get(format!("/v1/sql?sql={sql}").as_str())
.send()
.await;
- assert_eq!(res.status(), StatusCode::OK);
+ assert_eq!(res.status(), StatusCode::OK, "validate {test_name} fail");
let resp = res.text().await;
let v = get_rows_from_output(&resp);
|
feat
|
pipeline dispatcher part 2: execution (#5409)
|
6eb746d994f6f5a7f9d259989df3b94b6bbdf87a
|
2025-01-13 10:45:20
|
Weny Xu
|
fix: skip building indexer when `indexed_column_ids` are empty (#5348)
| false
|
diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs
index dc0f0978f84c..4d45e21a6247 100644
--- a/src/mito2/src/sst/index.rs
+++ b/src/mito2/src/sst/index.rs
@@ -222,9 +222,12 @@ impl<'a> IndexerBuilder<'a> {
return None;
}
- if self.metadata.primary_key.is_empty() {
+ let indexed_column_ids = self.metadata.inverted_indexed_column_ids(
+ self.index_options.inverted_index.ignore_column_ids.iter(),
+ );
+ if indexed_column_ids.is_empty() {
debug!(
- "No tag columns, skip creating index, region_id: {}, file_id: {}",
+ "No columns to be indexed, skip creating inverted index, region_id: {}, file_id: {}",
self.metadata.region_id, self.file_id,
);
return None;
@@ -259,9 +262,7 @@ impl<'a> IndexerBuilder<'a> {
self.intermediate_manager.clone(),
self.inverted_index_config.mem_threshold_on_create(),
segment_row_count,
- self.metadata.inverted_indexed_column_ids(
- self.index_options.inverted_index.ignore_column_ids.iter(),
- ),
+ indexed_column_ids,
);
Some(indexer)
|
fix
|
skip building indexer when `indexed_column_ids` are empty (#5348)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.