hash
stringlengths 40
40
| date
stringdate 2022-04-19 15:26:27
2025-03-21 10:49:23
| author
stringclasses 86
values | commit_message
stringlengths 12
115
| is_merge
bool 1
class | git_diff
stringlengths 214
553k
⌀ | type
stringclasses 15
values | masked_commit_message
stringlengths 8
110
|
|---|---|---|---|---|---|---|---|
bbbba29afce1fdc0aa5d682bf70cdedb0f10760e
|
2025-01-06 12:39:48
|
shuiyisong
|
feat: support Loki JSON write (#5288)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c52b7cfec358..374ac9d0560f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4141,6 +4141,12 @@ version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+[[package]]
+name = "foldhash"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2"
+
[[package]]
name = "form_urlencoded"
version = "1.2.1"
@@ -4692,6 +4698,11 @@ name = "hashbrown"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb"
+dependencies = [
+ "allocator-api2",
+ "equivalent",
+ "foldhash",
+]
[[package]]
name = "hashlink"
@@ -10985,7 +10996,7 @@ dependencies = [
"derive_builder 0.12.0",
"futures",
"futures-util",
- "hashbrown 0.14.5",
+ "hashbrown 0.15.0",
"headers 0.3.9",
"hostname",
"http 0.2.12",
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 33831ba639bb..a4508291993e 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -53,7 +53,7 @@ datatypes.workspace = true
derive_builder.workspace = true
futures = "0.3"
futures-util.workspace = true
-hashbrown = "0.14"
+hashbrown = "0.15"
headers = "0.3"
hostname = "0.3"
http.workspace = true
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 88a0ad21b623..31aa5342be57 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -514,6 +514,13 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Invalid Loki JSON request: {}", msg))]
+ InvalidLokiPayload {
+ msg: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Unsupported content type: {:?}", content_type))]
UnsupportedContentType {
content_type: ContentType,
@@ -660,6 +667,7 @@ impl ErrorExt for Error {
| MysqlValueConversion { .. }
| ParseJson { .. }
| ParseJson5 { .. }
+ | InvalidLokiPayload { .. }
| UnsupportedContentType { .. }
| TimestampOverflow { .. }
| OpenTelemetryLog { .. }
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index dd618f24a3f7..816bccc61e26 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -81,6 +81,7 @@ pub mod handler;
pub mod header;
pub mod influxdb;
pub mod logs;
+pub mod loki;
pub mod mem_prof;
pub mod opentsdb;
pub mod otlp;
@@ -742,7 +743,12 @@ impl HttpServer {
fn route_loki<S>(log_state: LogState) -> Router<S> {
Router::new()
- .route("/api/v1/push", routing::post(event::loki_ingest))
+ .route("/api/v1/push", routing::post(loki::loki_ingest))
+ .layer(
+ ServiceBuilder::new()
+ .layer(HandleErrorLayer::new(handle_error))
+ .layer(RequestDecompressionLayer::new()),
+ )
.with_state(log_state)
}
diff --git a/src/servers/src/http/event.rs b/src/servers/src/http/event.rs
index 14e8ad7dd5e4..951c796ac39e 100644
--- a/src/servers/src/http/event.rs
+++ b/src/servers/src/http/event.rs
@@ -12,16 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::{BTreeMap, HashMap};
use std::result::Result as StdResult;
+use std::str::FromStr;
use std::sync::Arc;
use std::time::Instant;
-use api::v1::value::ValueData;
-use api::v1::{
- ColumnDataType, ColumnSchema, Row, RowInsertRequest, RowInsertRequests, Rows, SemanticType,
- Value as GreptimeValue,
-};
+use api::v1::{RowInsertRequest, RowInsertRequests, Rows};
use axum::body::HttpBody;
use axum::extract::{FromRequest, Multipart, Path, Query, State};
use axum::headers::ContentType;
@@ -29,28 +25,23 @@ use axum::http::header::CONTENT_TYPE;
use axum::http::{Request, StatusCode};
use axum::response::{IntoResponse, Response};
use axum::{async_trait, BoxError, Extension, Json, TypedHeader};
-use bytes::Bytes;
use common_error::ext::ErrorExt;
-use common_query::prelude::GREPTIME_TIMESTAMP;
use common_query::{Output, OutputData};
use common_telemetry::{error, warn};
use datatypes::value::column_data_to_json;
use lazy_static::lazy_static;
-use loki_api::prost_types::Timestamp;
use pipeline::error::PipelineTransformSnafu;
use pipeline::util::to_pipeline_version;
use pipeline::{GreptimeTransformer, PipelineVersion};
-use prost::Message;
use serde::{Deserialize, Serialize};
use serde_json::{json, Deserializer, Map, Value};
use session::context::{Channel, QueryContext, QueryContextRef};
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{
- status_code_to_http_status, CatalogSnafu, DecodeOtlpRequestSnafu, Error, InvalidParameterSnafu,
- ParseJson5Snafu, ParseJsonSnafu, PipelineSnafu, Result, UnsupportedContentTypeSnafu,
+ status_code_to_http_status, CatalogSnafu, Error, InvalidParameterSnafu, ParseJsonSnafu,
+ PipelineSnafu, Result, UnsupportedContentTypeSnafu,
};
-use crate::http::extractor::LogTableName;
use crate::http::header::CONTENT_TYPE_PROTOBUF_STR;
use crate::http::result::greptime_manage_resp::GreptimedbManageResponse;
use crate::http::result::greptime_result_v1::GreptimedbV1Response;
@@ -58,35 +49,19 @@ use crate::http::HttpResponse;
use crate::interceptor::{LogIngestInterceptor, LogIngestInterceptorRef};
use crate::metrics::{
METRIC_FAILURE_VALUE, METRIC_HTTP_LOGS_INGESTION_COUNTER, METRIC_HTTP_LOGS_INGESTION_ELAPSED,
- METRIC_HTTP_LOGS_TRANSFORM_ELAPSED, METRIC_LOKI_LOGS_INGESTION_COUNTER,
- METRIC_LOKI_LOGS_INGESTION_ELAPSED, METRIC_SUCCESS_VALUE,
+ METRIC_HTTP_LOGS_TRANSFORM_ELAPSED, METRIC_SUCCESS_VALUE,
};
-use crate::prom_store;
use crate::query_handler::PipelineHandlerRef;
const GREPTIME_INTERNAL_PIPELINE_NAME_PREFIX: &str = "greptime_";
const GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME: &str = "greptime_identity";
-const LOKI_TABLE_NAME: &str = "loki_logs";
-const LOKI_LINE_COLUMN: &str = "line";
-
lazy_static! {
- static ref LOKI_INIT_SCHEMAS: Vec<ColumnSchema> = vec![
- ColumnSchema {
- column_name: GREPTIME_TIMESTAMP.to_string(),
- datatype: ColumnDataType::TimestampNanosecond.into(),
- semantic_type: SemanticType::Timestamp.into(),
- datatype_extension: None,
- options: None,
- },
- ColumnSchema {
- column_name: LOKI_LINE_COLUMN.to_string(),
- datatype: ColumnDataType::String.into(),
- semantic_type: SemanticType::Field.into(),
- datatype_extension: None,
- options: None,
- },
- ];
+ pub static ref JSON_CONTENT_TYPE: ContentType = ContentType::json();
+ pub static ref TEXT_CONTENT_TYPE: ContentType = ContentType::text();
+ pub static ref TEXT_UTF8_CONTENT_TYPE: ContentType = ContentType::text_utf8();
+ pub static ref PB_CONTENT_TYPE: ContentType =
+ ContentType::from_str(CONTENT_TYPE_PROTOBUF_STR).unwrap();
}
#[derive(Debug, Default, Serialize, Deserialize)]
@@ -484,146 +459,6 @@ pub async fn pipeline_dryrun(
}
}
-#[axum_macros::debug_handler]
-pub async fn loki_ingest(
- State(log_state): State<LogState>,
- Extension(mut ctx): Extension<QueryContext>,
- TypedHeader(content_type): TypedHeader<ContentType>,
- LogTableName(table_name): LogTableName,
- bytes: Bytes,
-) -> Result<HttpResponse> {
- ctx.set_channel(Channel::Loki);
- let ctx = Arc::new(ctx);
- let db = ctx.get_db_string();
- let db_str = db.as_str();
- let table_name = table_name.unwrap_or_else(|| LOKI_TABLE_NAME.to_string());
- let exec_timer = Instant::now();
-
- // decompress req
- ensure!(
- content_type.to_string() == CONTENT_TYPE_PROTOBUF_STR,
- UnsupportedContentTypeSnafu { content_type }
- );
- let decompressed = prom_store::snappy_decompress(&bytes).unwrap();
- let req = loki_api::logproto::PushRequest::decode(&decompressed[..])
- .context(DecodeOtlpRequestSnafu)?;
-
- // init schemas
- let mut schemas = LOKI_INIT_SCHEMAS.clone();
-
- let mut global_label_key_index: HashMap<String, i32> = HashMap::new();
- global_label_key_index.insert(GREPTIME_TIMESTAMP.to_string(), 0);
- global_label_key_index.insert(LOKI_LINE_COLUMN.to_string(), 1);
-
- let mut rows = vec![];
-
- for stream in req.streams {
- // parse labels for each row
- // encoding: https://github.com/grafana/alloy/blob/be34410b9e841cc0c37c153f9550d9086a304bca/internal/component/common/loki/client/batch.go#L114-L145
- // use very dirty hack to parse labels
- let labels = stream.labels.replace("=", ":");
- // use btreemap to keep order
- let labels: BTreeMap<String, String> = json5::from_str(&labels).context(ParseJson5Snafu)?;
-
- // process entries
- for entry in stream.entries {
- let ts = if let Some(ts) = entry.timestamp {
- ts
- } else {
- continue;
- };
- let line = entry.line;
-
- // create and init row
- let mut row = Vec::with_capacity(schemas.len());
- for _ in 0..schemas.len() {
- row.push(GreptimeValue { value_data: None });
- }
- // insert ts and line
- row[0] = GreptimeValue {
- value_data: Some(ValueData::TimestampNanosecondValue(prost_ts_to_nano(&ts))),
- };
- row[1] = GreptimeValue {
- value_data: Some(ValueData::StringValue(line)),
- };
- // insert labels
- for (k, v) in labels.iter() {
- if let Some(index) = global_label_key_index.get(k) {
- // exist in schema
- // insert value using index
- row[*index as usize] = GreptimeValue {
- value_data: Some(ValueData::StringValue(v.clone())),
- };
- } else {
- // not exist
- // add schema and append to values
- schemas.push(ColumnSchema {
- column_name: k.clone(),
- datatype: ColumnDataType::String.into(),
- semantic_type: SemanticType::Tag.into(),
- datatype_extension: None,
- options: None,
- });
- global_label_key_index.insert(k.clone(), (schemas.len() - 1) as i32);
-
- row.push(GreptimeValue {
- value_data: Some(ValueData::StringValue(v.clone())),
- });
- }
- }
-
- rows.push(row);
- }
- }
-
- // fill Null for missing values
- for row in rows.iter_mut() {
- if row.len() < schemas.len() {
- for _ in row.len()..schemas.len() {
- row.push(GreptimeValue { value_data: None });
- }
- }
- }
-
- let rows = Rows {
- rows: rows.into_iter().map(|values| Row { values }).collect(),
- schema: schemas,
- };
-
- let ins_req = RowInsertRequest {
- table_name,
- rows: Some(rows),
- };
- let ins_reqs = RowInsertRequests {
- inserts: vec![ins_req],
- };
-
- let handler = log_state.log_handler;
- let output = handler.insert(ins_reqs, ctx).await;
-
- if let Ok(Output {
- data: OutputData::AffectedRows(rows),
- meta: _,
- }) = &output
- {
- METRIC_LOKI_LOGS_INGESTION_COUNTER
- .with_label_values(&[db_str])
- .inc_by(*rows as u64);
- METRIC_LOKI_LOGS_INGESTION_ELAPSED
- .with_label_values(&[db_str, METRIC_SUCCESS_VALUE])
- .observe(exec_timer.elapsed().as_secs_f64());
- } else {
- METRIC_LOKI_LOGS_INGESTION_ELAPSED
- .with_label_values(&[db_str, METRIC_FAILURE_VALUE])
- .observe(exec_timer.elapsed().as_secs_f64());
- }
-
- let response = GreptimedbV1Response::from_output(vec![output])
- .await
- .with_execution_time(exec_timer.elapsed().as_millis() as u64);
- Ok(response)
-}
-
#[axum_macros::debug_handler]
pub async fn log_ingester(
State(log_state): State<LogState>,
@@ -682,11 +517,11 @@ fn extract_pipeline_value_by_content_type(
ignore_errors: bool,
) -> Result<Vec<Value>> {
Ok(match content_type {
- ct if ct == ContentType::json() => transform_ndjson_array_factory(
+ ct if ct == *JSON_CONTENT_TYPE => transform_ndjson_array_factory(
Deserializer::from_str(&payload).into_iter(),
ignore_errors,
)?,
- ct if ct == ContentType::text() || ct == ContentType::text_utf8() => payload
+ ct if ct == *TEXT_CONTENT_TYPE || ct == *TEXT_UTF8_CONTENT_TYPE => payload
.lines()
.filter(|line| !line.is_empty())
.map(|line| Value::String(line.to_string()))
@@ -808,11 +643,6 @@ pub struct LogState {
pub ingest_interceptor: Option<LogIngestInterceptorRef<Error>>,
}
-#[inline]
-fn prost_ts_to_nano(ts: &Timestamp) -> i64 {
- ts.seconds * 1_000_000_000 + ts.nanos as i64
-}
-
#[cfg(test)]
mod tests {
use super::*;
@@ -847,16 +677,4 @@ mod tests {
.to_string();
assert_eq!(a, "[{\"a\":1},{\"b\":2}]");
}
-
- #[test]
- fn test_ts_to_nano() {
- // ts = 1731748568804293888
- // seconds = 1731748568
- // nano = 804293888
- let ts = Timestamp {
- seconds: 1731748568,
- nanos: 804293888,
- };
- assert_eq!(prost_ts_to_nano(&ts), 1731748568804293888);
- }
}
diff --git a/src/servers/src/http/loki.rs b/src/servers/src/http/loki.rs
new file mode 100644
index 000000000000..b1014110613b
--- /dev/null
+++ b/src/servers/src/http/loki.rs
@@ -0,0 +1,377 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::BTreeMap;
+use std::sync::Arc;
+use std::time::Instant;
+
+use api::v1::value::ValueData;
+use api::v1::{
+ ColumnDataType, ColumnSchema, Row, RowInsertRequest, RowInsertRequests, Rows, SemanticType,
+ Value as GreptimeValue,
+};
+use axum::extract::State;
+use axum::headers::ContentType;
+use axum::{Extension, TypedHeader};
+use bytes::Bytes;
+use common_query::prelude::GREPTIME_TIMESTAMP;
+use common_query::{Output, OutputData};
+use common_telemetry::warn;
+use hashbrown::HashMap;
+use lazy_static::lazy_static;
+use loki_api::prost_types::Timestamp;
+use prost::Message;
+use session::context::{Channel, QueryContext};
+use snafu::{OptionExt, ResultExt};
+
+use crate::error::{
+ DecodeOtlpRequestSnafu, InvalidLokiPayloadSnafu, ParseJson5Snafu, ParseJsonSnafu, Result,
+ UnsupportedContentTypeSnafu,
+};
+use crate::http::event::{LogState, JSON_CONTENT_TYPE, PB_CONTENT_TYPE};
+use crate::http::extractor::LogTableName;
+use crate::http::result::greptime_result_v1::GreptimedbV1Response;
+use crate::http::HttpResponse;
+use crate::metrics::{
+ METRIC_FAILURE_VALUE, METRIC_LOKI_LOGS_INGESTION_COUNTER, METRIC_LOKI_LOGS_INGESTION_ELAPSED,
+ METRIC_SUCCESS_VALUE,
+};
+use crate::{prom_store, unwrap_or_warn_continue};
+
+const LOKI_TABLE_NAME: &str = "loki_logs";
+const LOKI_LINE_COLUMN: &str = "line";
+
+const STREAMS_KEY: &str = "streams";
+const LABEL_KEY: &str = "stream";
+const LINES_KEY: &str = "values";
+
+lazy_static! {
+ static ref LOKI_INIT_SCHEMAS: Vec<ColumnSchema> = vec![
+ ColumnSchema {
+ column_name: GREPTIME_TIMESTAMP.to_string(),
+ datatype: ColumnDataType::TimestampNanosecond.into(),
+ semantic_type: SemanticType::Timestamp.into(),
+ datatype_extension: None,
+ options: None,
+ },
+ ColumnSchema {
+ column_name: LOKI_LINE_COLUMN.to_string(),
+ datatype: ColumnDataType::String.into(),
+ semantic_type: SemanticType::Field.into(),
+ datatype_extension: None,
+ options: None,
+ },
+ ];
+}
+
+#[axum_macros::debug_handler]
+pub async fn loki_ingest(
+ State(log_state): State<LogState>,
+ Extension(mut ctx): Extension<QueryContext>,
+ TypedHeader(content_type): TypedHeader<ContentType>,
+ LogTableName(table_name): LogTableName,
+ bytes: Bytes,
+) -> Result<HttpResponse> {
+ ctx.set_channel(Channel::Loki);
+ let ctx = Arc::new(ctx);
+ let table_name = table_name.unwrap_or_else(|| LOKI_TABLE_NAME.to_string());
+ let db = ctx.get_db_string();
+ let db_str = db.as_str();
+ let exec_timer = Instant::now();
+
+ // init schemas
+ let mut schemas = LOKI_INIT_SCHEMAS.clone();
+
+ let mut rows = match content_type {
+ x if x == *JSON_CONTENT_TYPE => handle_json_req(bytes, &mut schemas).await,
+ x if x == *PB_CONTENT_TYPE => handle_pb_req(bytes, &mut schemas).await,
+ _ => UnsupportedContentTypeSnafu { content_type }.fail(),
+ }?;
+
+ // fill Null for missing values
+ for row in rows.iter_mut() {
+ if row.len() < schemas.len() {
+ for _ in row.len()..schemas.len() {
+ row.push(GreptimeValue { value_data: None });
+ }
+ }
+ }
+
+ let rows = Rows {
+ rows: rows.into_iter().map(|values| Row { values }).collect(),
+ schema: schemas,
+ };
+ let ins_req = RowInsertRequest {
+ table_name,
+ rows: Some(rows),
+ };
+ let ins_reqs = RowInsertRequests {
+ inserts: vec![ins_req],
+ };
+
+ let handler = log_state.log_handler;
+ let output = handler.insert(ins_reqs, ctx).await;
+
+ if let Ok(Output {
+ data: OutputData::AffectedRows(rows),
+ meta: _,
+ }) = &output
+ {
+ METRIC_LOKI_LOGS_INGESTION_COUNTER
+ .with_label_values(&[db_str])
+ .inc_by(*rows as u64);
+ METRIC_LOKI_LOGS_INGESTION_ELAPSED
+ .with_label_values(&[db_str, METRIC_SUCCESS_VALUE])
+ .observe(exec_timer.elapsed().as_secs_f64());
+ } else {
+ METRIC_LOKI_LOGS_INGESTION_ELAPSED
+ .with_label_values(&[db_str, METRIC_FAILURE_VALUE])
+ .observe(exec_timer.elapsed().as_secs_f64());
+ }
+
+ let response = GreptimedbV1Response::from_output(vec![output])
+ .await
+ .with_execution_time(exec_timer.elapsed().as_millis() as u64);
+ Ok(response)
+}
+
+async fn handle_json_req(
+ bytes: Bytes,
+ schemas: &mut Vec<ColumnSchema>,
+) -> Result<Vec<Vec<GreptimeValue>>> {
+ let mut column_indexer: HashMap<String, u16> = HashMap::new();
+ column_indexer.insert(GREPTIME_TIMESTAMP.to_string(), 0);
+ column_indexer.insert(LOKI_LINE_COLUMN.to_string(), 1);
+
+ let payload: serde_json::Value =
+ serde_json::from_slice(bytes.as_ref()).context(ParseJsonSnafu)?;
+
+ let streams = payload
+ .get(STREAMS_KEY)
+ .context(InvalidLokiPayloadSnafu {
+ msg: "missing streams",
+ })?
+ .as_array()
+ .context(InvalidLokiPayloadSnafu {
+ msg: "streams is not an array",
+ })?;
+
+ let mut rows = Vec::with_capacity(1000);
+
+ for (stream_index, stream) in streams.iter().enumerate() {
+ // parse lines first
+ // do not use `?` in case there are multiple streams
+ let lines = unwrap_or_warn_continue!(
+ stream.get(LINES_KEY),
+ "missing values on stream {}",
+ stream_index
+ );
+ let lines = unwrap_or_warn_continue!(
+ lines.as_array(),
+ "values is not an array on stream {}",
+ stream_index
+ );
+
+ // get labels
+ let labels = stream
+ .get(LABEL_KEY)
+ .and_then(|label| label.as_object())
+ .map(|l| {
+ l.iter()
+ .filter_map(|(k, v)| v.as_str().map(|v| (k.clone(), v.to_string())))
+ .collect::<BTreeMap<String, String>>()
+ })
+ .unwrap_or_default();
+
+ // process each line
+ for (line_index, line) in lines.iter().enumerate() {
+ let line = unwrap_or_warn_continue!(
+ line.as_array(),
+ "missing line on stream {} index {}",
+ stream_index,
+ line_index
+ );
+ if line.len() < 2 {
+ warn!(
+ "line on stream {} index {} is too short",
+ stream_index, line_index
+ );
+ continue;
+ }
+ // get ts
+ let ts = unwrap_or_warn_continue!(
+ line.first()
+ .and_then(|ts| ts.as_str())
+ .and_then(|ts| ts.parse::<i64>().ok()),
+ "missing or invalid timestamp on stream {} index {}",
+ stream_index,
+ line_index
+ );
+ // get line
+ let line_text = unwrap_or_warn_continue!(
+ line.get(1)
+ .and_then(|line| line.as_str())
+ .map(|line| line.to_string()),
+ "missing or invalid line on stream {} index {}",
+ stream_index,
+ line_index
+ );
+ // TODO(shuiyisong): we'll ignore structured metadata for now
+
+ let mut row = init_row(schemas.len(), ts, line_text);
+ process_labels(&mut column_indexer, schemas, &mut row, labels.iter());
+
+ rows.push(row);
+ }
+ }
+
+ Ok(rows)
+}
+
+async fn handle_pb_req(
+ bytes: Bytes,
+ schemas: &mut Vec<ColumnSchema>,
+) -> Result<Vec<Vec<GreptimeValue>>> {
+ let decompressed = prom_store::snappy_decompress(&bytes).unwrap();
+ let req = loki_api::logproto::PushRequest::decode(&decompressed[..])
+ .context(DecodeOtlpRequestSnafu)?;
+
+ let mut column_indexer: HashMap<String, u16> = HashMap::new();
+ column_indexer.insert(GREPTIME_TIMESTAMP.to_string(), 0);
+ column_indexer.insert(LOKI_LINE_COLUMN.to_string(), 1);
+
+ let cnt = req.streams.iter().map(|s| s.entries.len()).sum::<usize>();
+ let mut rows = Vec::with_capacity(cnt);
+
+ for stream in req.streams {
+ // parse labels for each row
+ // encoding: https://github.com/grafana/alloy/blob/be34410b9e841cc0c37c153f9550d9086a304bca/internal/component/common/loki/client/batch.go#L114-L145
+ // use very dirty hack to parse labels
+ // TODO(shuiyisong): remove json5 and parse the string directly
+ let labels = stream.labels.replace("=", ":");
+ // use btreemap to keep order
+ let labels: BTreeMap<String, String> = json5::from_str(&labels).context(ParseJson5Snafu)?;
+
+ // process entries
+ for entry in stream.entries {
+ let ts = if let Some(ts) = entry.timestamp {
+ ts
+ } else {
+ continue;
+ };
+ let line = entry.line;
+
+ let mut row = init_row(schemas.len(), prost_ts_to_nano(&ts), line);
+ process_labels(&mut column_indexer, schemas, &mut row, labels.iter());
+
+ rows.push(row);
+ }
+ }
+
+ Ok(rows)
+}
+
+#[inline]
+fn prost_ts_to_nano(ts: &Timestamp) -> i64 {
+ ts.seconds * 1_000_000_000 + ts.nanos as i64
+}
+
+fn init_row(schema_len: usize, ts: i64, line: String) -> Vec<GreptimeValue> {
+ // create and init row
+ let mut row = Vec::with_capacity(schema_len);
+ // set ts and line
+ row.push(GreptimeValue {
+ value_data: Some(ValueData::TimestampNanosecondValue(ts)),
+ });
+ row.push(GreptimeValue {
+ value_data: Some(ValueData::StringValue(line)),
+ });
+ for _ in 0..(schema_len - 2) {
+ row.push(GreptimeValue { value_data: None });
+ }
+ row
+}
+
+fn process_labels<'a>(
+ column_indexer: &mut HashMap<String, u16>,
+ schemas: &mut Vec<ColumnSchema>,
+ row: &mut Vec<GreptimeValue>,
+ labels: impl Iterator<Item = (&'a String, &'a String)>,
+) {
+ // insert labels
+ for (k, v) in labels {
+ if let Some(index) = column_indexer.get(k) {
+ // exist in schema
+ // insert value using index
+ row[*index as usize] = GreptimeValue {
+ value_data: Some(ValueData::StringValue(v.clone())),
+ };
+ } else {
+ // not exist
+ // add schema and append to values
+ schemas.push(ColumnSchema {
+ column_name: k.clone(),
+ datatype: ColumnDataType::String.into(),
+ semantic_type: SemanticType::Tag.into(),
+ datatype_extension: None,
+ options: None,
+ });
+ column_indexer.insert(k.clone(), (schemas.len() - 1) as u16);
+
+ row.push(GreptimeValue {
+ value_data: Some(ValueData::StringValue(v.clone())),
+ });
+ }
+ }
+}
+
+#[macro_export]
+macro_rules! unwrap_or_warn_continue {
+ ($expr:expr, $msg:expr) => {
+ if let Some(value) = $expr {
+ value
+ } else {
+ warn!($msg);
+ continue;
+ }
+ };
+
+ ($expr:expr, $fmt:expr, $($arg:tt)*) => {
+ if let Some(value) = $expr {
+ value
+ } else {
+ warn!($fmt, $($arg)*);
+ continue;
+ }
+ };
+}
+
+#[cfg(test)]
+mod tests {
+ use loki_api::prost_types::Timestamp;
+
+ use crate::http::loki::prost_ts_to_nano;
+
+ #[test]
+ fn test_ts_to_nano() {
+ // ts = 1731748568804293888
+ // seconds = 1731748568
+ // nano = 804293888
+ let ts = Timestamp {
+ seconds: 1731748568,
+ nanos: 804293888,
+ };
+ assert_eq!(prost_ts_to_nano(&ts), 1731748568804293888);
+ }
+}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 7793c26d19fd..4245527cf707 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -96,7 +96,8 @@ macro_rules! http_tests {
test_otlp_metrics,
test_otlp_traces,
test_otlp_logs,
- test_loki_logs,
+ test_loki_pb_logs,
+ test_loki_json_logs,
);
)*
};
@@ -1670,7 +1671,17 @@ pub async fn test_otlp_metrics(store_type: StorageType) {
let client = TestClient::new(app);
// write metrics data
- let res = send_req(&client, vec![], "/v1/otlp/v1/metrics", body.clone(), false).await;
+ let res = send_req(
+ &client,
+ vec![(
+ HeaderName::from_static("content-type"),
+ HeaderValue::from_static("application/x-protobuf"),
+ )],
+ "/v1/otlp/v1/metrics",
+ body.clone(),
+ false,
+ )
+ .await;
assert_eq!(StatusCode::OK, res.status());
// select metrics data
@@ -1682,7 +1693,17 @@ pub async fn test_otlp_metrics(store_type: StorageType) {
assert_eq!(res.status(), StatusCode::OK);
// write metrics data with gzip
- let res = send_req(&client, vec![], "/v1/otlp/v1/metrics", body.clone(), true).await;
+ let res = send_req(
+ &client,
+ vec![(
+ HeaderName::from_static("content-type"),
+ HeaderValue::from_static("application/x-protobuf"),
+ )],
+ "/v1/otlp/v1/metrics",
+ body.clone(),
+ true,
+ )
+ .await;
assert_eq!(StatusCode::OK, res.status());
// select metrics data again
@@ -1713,7 +1734,17 @@ pub async fn test_otlp_traces(store_type: StorageType) {
let client = TestClient::new(app);
// write traces data
- let res = send_req(&client, vec![], "/v1/otlp/v1/traces", body.clone(), false).await;
+ let res = send_req(
+ &client,
+ vec![(
+ HeaderName::from_static("content-type"),
+ HeaderValue::from_static("application/x-protobuf"),
+ )],
+ "/v1/otlp/v1/traces",
+ body.clone(),
+ false,
+ )
+ .await;
assert_eq!(StatusCode::OK, res.status());
// select traces data
@@ -1734,7 +1765,17 @@ pub async fn test_otlp_traces(store_type: StorageType) {
assert_eq!(res.status(), StatusCode::OK);
// write traces data with gzip
- let res = send_req(&client, vec![], "/v1/otlp/v1/traces", body.clone(), true).await;
+ let res = send_req(
+ &client,
+ vec![(
+ HeaderName::from_static("content-type"),
+ HeaderValue::from_static("application/x-protobuf"),
+ )],
+ "/v1/otlp/v1/traces",
+ body.clone(),
+ true,
+ )
+ .await;
assert_eq!(StatusCode::OK, res.status());
// select traces data again
@@ -1765,10 +1806,16 @@ pub async fn test_otlp_logs(store_type: StorageType) {
// write log data
let res = send_req(
&client,
- vec![(
- HeaderName::from_static("x-greptime-log-table-name"),
- HeaderValue::from_static("logs1"),
- )],
+ vec![
+ (
+ HeaderName::from_static("content-type"),
+ HeaderValue::from_static("application/x-protobuf"),
+ ),
+ (
+ HeaderName::from_static("x-greptime-log-table-name"),
+ HeaderValue::from_static("logs1"),
+ ),
+ ],
"/v1/otlp/v1/logs?db=public",
body.clone(),
false,
@@ -1784,6 +1831,10 @@ pub async fn test_otlp_logs(store_type: StorageType) {
let res = send_req(
&client,
vec![
+ (
+ HeaderName::from_static("content-type"),
+ HeaderValue::from_static("application/x-protobuf"),
+ ),
(
HeaderName::from_static("x-greptime-log-table-name"),
HeaderValue::from_static("logs"),
@@ -1813,9 +1864,9 @@ pub async fn test_otlp_logs(store_type: StorageType) {
guard.remove_all().await;
}
-pub async fn test_loki_logs(store_type: StorageType) {
+pub async fn test_loki_pb_logs(store_type: StorageType) {
common_telemetry::init_default_ut_logging();
- let (app, mut guard) = setup_test_http_app_with_frontend(store_type, "test_loke_logs").await;
+ let (app, mut guard) = setup_test_http_app_with_frontend(store_type, "test_loki_pb_logs").await;
let client = TestClient::new(app);
@@ -1862,7 +1913,7 @@ pub async fn test_loki_logs(store_type: StorageType) {
// test schema
let expected = "[[\"loki_table_name\",\"CREATE TABLE IF NOT EXISTS \\\"loki_table_name\\\" (\\n \\\"greptime_timestamp\\\" TIMESTAMP(9) NOT NULL,\\n \\\"line\\\" STRING NULL,\\n \\\"service\\\" STRING NULL,\\n \\\"source\\\" STRING NULL,\\n \\\"wadaxi\\\" STRING NULL,\\n TIME INDEX (\\\"greptime_timestamp\\\"),\\n PRIMARY KEY (\\\"service\\\", \\\"source\\\", \\\"wadaxi\\\")\\n)\\n\\nENGINE=mito\\nWITH(\\n append_mode = 'true'\\n)\"]]";
validate_data(
- "loki_schema",
+ "loki_pb_schema",
&client,
"show create table loki_table_name;",
expected,
@@ -1872,7 +1923,75 @@ pub async fn test_loki_logs(store_type: StorageType) {
// test content
let expected = r#"[[1730976830000000000,"this is a log message","test","integration","do anything"],[1730976830000000000,"this is a log message","test","integration","do anything"]]"#;
validate_data(
- "loki_content",
+ "loki_pb_content",
+ &client,
+ "select * from loki_table_name;",
+ expected,
+ )
+ .await;
+
+ guard.remove_all().await;
+}
+
+pub async fn test_loki_json_logs(store_type: StorageType) {
+ common_telemetry::init_default_ut_logging();
+ let (app, mut guard) =
+ setup_test_http_app_with_frontend(store_type, "test_loki_json_logs").await;
+
+ let client = TestClient::new(app);
+
+ let body = r#"
+{
+ "streams": [
+ {
+ "stream": {
+ "source": "test"
+ },
+ "values": [
+ [ "1735901380059465984", "this is line one" ],
+ [ "1735901398478897920", "this is line two" ]
+ ]
+ }
+ ]
+}
+ "#;
+
+ let body = body.as_bytes().to_vec();
+
+ // write plain to loki
+ let res = send_req(
+ &client,
+ vec![
+ (
+ HeaderName::from_static("content-type"),
+ HeaderValue::from_static("application/json"),
+ ),
+ (
+ HeaderName::from_static(GREPTIME_LOG_TABLE_NAME_HEADER_NAME),
+ HeaderValue::from_static("loki_table_name"),
+ ),
+ ],
+ "/v1/loki/api/v1/push",
+ body,
+ false,
+ )
+ .await;
+ assert_eq!(StatusCode::OK, res.status());
+
+ // test schema
+ let expected = "[[\"loki_table_name\",\"CREATE TABLE IF NOT EXISTS \\\"loki_table_name\\\" (\\n \\\"greptime_timestamp\\\" TIMESTAMP(9) NOT NULL,\\n \\\"line\\\" STRING NULL,\\n \\\"source\\\" STRING NULL,\\n TIME INDEX (\\\"greptime_timestamp\\\"),\\n PRIMARY KEY (\\\"source\\\")\\n)\\n\\nENGINE=mito\\nWITH(\\n append_mode = 'true'\\n)\"]]";
+ validate_data(
+ "loki_json_schema",
+ &client,
+ "show create table loki_table_name;",
+ expected,
+ )
+ .await;
+
+ // test content
+ let expected = "[[1735901380059465984,\"this is line one\",\"test\"],[1735901398478897920,\"this is line two\",\"test\"]]";
+ validate_data(
+ "loki_json_content",
&client,
"select * from loki_table_name;",
expected,
@@ -1901,9 +2020,7 @@ async fn send_req(
body: Vec<u8>,
with_gzip: bool,
) -> TestResponse {
- let mut req = client
- .post(path)
- .header("content-type", "application/x-protobuf");
+ let mut req = client.post(path);
for (k, v) in headers {
req = req.header(k, v);
|
feat
|
support Loki JSON write (#5288)
|
1d80a0f2d630c9f67df6e57e852a645b5a77008e
|
2023-12-28 11:29:27
|
Yingwen
|
chore: Update CI badge in README.md (#3028)
| false
|
diff --git a/README.md b/README.md
index 4dc8ac7c3d90..415608ad3c83 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@
<p align="center">
<a href="https://codecov.io/gh/GrepTimeTeam/greptimedb"><img src="https://codecov.io/gh/GrepTimeTeam/greptimedb/branch/main/graph/badge.svg?token=FITFDI3J3C"></img></a>
- <a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/main.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/main.yml/badge.svg" alt="CI"></img></a>
+ <a href="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml"><img src="https://github.com/GreptimeTeam/greptimedb/actions/workflows/develop.yml/badge.svg" alt="CI"></img></a>
<a href="https://github.com/greptimeTeam/greptimedb/blob/main/LICENSE"><img src="https://img.shields.io/github/license/greptimeTeam/greptimedb"></a>
</p>
|
chore
|
Update CI badge in README.md (#3028)
|
56aed6e6ff8c69173e1dc69cb58a9955e6f4622f
|
2024-05-21 17:26:03
|
Weny Xu
|
chore(ci): export kind logs (#3996)
| false
|
diff --git a/.github/actions/sqlness-test/action.yml b/.github/actions/sqlness-test/action.yml
index 28d58902a6ba..9764f03a846e 100644
--- a/.github/actions/sqlness-test/action.yml
+++ b/.github/actions/sqlness-test/action.yml
@@ -57,3 +57,14 @@ runs:
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
run: |
aws s3 rm s3://${{ inputs.aws-ci-test-bucket }}/${{ inputs.data-root }} --recursive
+ - name: Export kind logs
+ if: failure()
+ shell: bash
+ run: kind export logs -n greptimedb-operator-e2e /tmp/kind
+ - name: Upload logs
+ if: failure()
+ uses: actions/upload-artifact@v4
+ with:
+ name: kind-logs
+ path: /tmp/kind
+ retention-days: 3
|
chore
|
export kind logs (#3996)
|
701aba9cdbea0ad459c26d3f5bae409af85c1cc2
|
2024-04-29 19:32:52
|
Weny Xu
|
refactor: rename flow task to flow (#3833)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index eee95568ccd1..f0ea63351da4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3883,7 +3883,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=aba235025ac5643c12bfdcefd656af11ad58ea8e#aba235025ac5643c12bfdcefd656af11ad58ea8e"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=b5412f72257c18410fdccbb893fa5d245b846141#b5412f72257c18410fdccbb893fa5d245b846141"
dependencies = [
"prost 0.12.4",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index 061e0e8b186f..0f6f7b6fad25 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -116,7 +116,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "aba235025ac5643c12bfdcefd656af11ad58ea8e" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b5412f72257c18410fdccbb893fa5d245b846141" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index 60af91742d19..bf7786bcaf66 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -478,8 +478,8 @@ fn ddl_request_type(request: &DdlRequest) -> &'static str {
Some(Expr::Alter(_)) => "ddl.alter",
Some(Expr::DropTable(_)) => "ddl.drop_table",
Some(Expr::TruncateTable(_)) => "ddl.truncate_table",
- Some(Expr::CreateFlowTask(_)) => "ddl.create_flow",
- Some(Expr::DropFlowTask(_)) => "ddl.drop_flow_task",
+ Some(Expr::CreateFlow(_)) => "ddl.create_flow",
+ Some(Expr::DropFlow(_)) => "ddl.drop_flow",
None => "ddl.empty",
}
}
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 2304d49a70b9..212adf562698 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -18,14 +18,14 @@ use std::{fs, path};
use async_trait::async_trait;
use catalog::kvbackend::KvBackendCatalogManager;
use clap::Parser;
-use common_catalog::consts::{MIN_USER_FLOW_TASK_ID, MIN_USER_TABLE_ID};
+use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
use common_config::{metadata_store_dir, KvBackendConfig};
use common_meta::cache_invalidator::{CacheInvalidatorRef, MultiCacheInvalidator};
+use common_meta::ddl::flow_meta::{FlowMetadataAllocator, FlowMetadataAllocatorRef};
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
-use common_meta::ddl::task_meta::{FlowTaskMetadataAllocator, FlowTaskMetadataAllocatorRef};
use common_meta::ddl::{DdlContext, ProcedureExecutorRef};
use common_meta::ddl_manager::DdlManager;
-use common_meta::key::flow_task::{FlowTaskMetadataManager, FlowTaskMetadataManagerRef};
+use common_meta::key::flow::{FlowMetadataManager, FlowMetadataManagerRef};
use common_meta::key::{TableMetadataManager, TableMetadataManagerRef};
use common_meta::kv_backend::KvBackendRef;
use common_meta::node_manager::NodeManagerRef;
@@ -47,7 +47,7 @@ use frontend::server::Services;
use frontend::service_config::{
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
};
-use meta_srv::metasrv::{FLOW_TASK_ID_SEQ, TABLE_ID_SEQ};
+use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use mito2::config::MitoConfig;
use serde::{Deserialize, Serialize};
use servers::export_metrics::ExportMetricsOption;
@@ -419,9 +419,9 @@ impl StartCommand {
.step(10)
.build(),
);
- let flow_task_id_sequence = Arc::new(
- SequenceBuilder::new(FLOW_TASK_ID_SEQ, kv_backend.clone())
- .initial(MIN_USER_FLOW_TASK_ID as u64)
+ let flow_id_sequence = Arc::new(
+ SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
+ .initial(MIN_USER_FLOW_ID as u64)
.step(10)
.build(),
);
@@ -431,14 +431,14 @@ impl StartCommand {
));
let table_metadata_manager =
Self::create_table_metadata_manager(kv_backend.clone()).await?;
- let flow_task_metadata_manager = Arc::new(FlowTaskMetadataManager::new(kv_backend.clone()));
+ let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
table_id_sequence,
wal_options_allocator.clone(),
));
- let flow_task_meta_allocator = Arc::new(
- FlowTaskMetadataAllocator::with_noop_peer_allocator(flow_task_id_sequence),
- );
+ let flow_meta_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
+ flow_id_sequence,
+ ));
let ddl_task_executor = Self::create_ddl_task_executor(
procedure_manager.clone(),
@@ -446,8 +446,8 @@ impl StartCommand {
multi_cache_invalidator,
table_metadata_manager,
table_meta_allocator,
- flow_task_metadata_manager,
- flow_task_meta_allocator,
+ flow_metadata_manager,
+ flow_meta_allocator,
)
.await?;
@@ -480,8 +480,8 @@ impl StartCommand {
cache_invalidator: CacheInvalidatorRef,
table_metadata_manager: TableMetadataManagerRef,
table_metadata_allocator: TableMetadataAllocatorRef,
- flow_task_metadata_manager: FlowTaskMetadataManagerRef,
- flow_task_metadata_allocator: FlowTaskMetadataAllocatorRef,
+ flow_metadata_manager: FlowMetadataManagerRef,
+ flow_metadata_allocator: FlowMetadataAllocatorRef,
) -> Result<ProcedureExecutorRef> {
let procedure_executor: ProcedureExecutorRef = Arc::new(
DdlManager::try_new(
@@ -491,8 +491,8 @@ impl StartCommand {
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
table_metadata_manager,
table_metadata_allocator,
- flow_task_metadata_manager,
- flow_task_metadata_allocator,
+ flow_metadata_manager,
+ flow_metadata_allocator,
},
procedure_manager,
true,
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index 175435d89842..30024b03fa41 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -19,9 +19,9 @@ pub const DEFAULT_CATALOG_NAME: &str = "greptime";
pub const DEFAULT_SCHEMA_NAME: &str = "public";
pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = "greptime_private";
-/// Reserves [0,MIN_USER_FLOW_TASK_ID) for internal usage.
+/// Reserves [0,MIN_USER_FLOW_ID) for internal usage.
/// User defined table id starts from this value.
-pub const MIN_USER_FLOW_TASK_ID: u32 = 1024;
+pub const MIN_USER_FLOW_ID: u32 = 1024;
/// Reserves [0,MIN_USER_TABLE_ID) for internal usage.
/// User defined table id starts from this value.
pub const MIN_USER_TABLE_ID: u32 = 1024;
diff --git a/src/common/meta/src/ddl.rs b/src/common/meta/src/ddl.rs
index 5bdcb1f68e2b..bc4563b2f567 100644
--- a/src/common/meta/src/ddl.rs
+++ b/src/common/meta/src/ddl.rs
@@ -19,10 +19,10 @@ use common_telemetry::tracing_context::W3cTrace;
use store_api::storage::{RegionNumber, TableId};
use crate::cache_invalidator::CacheInvalidatorRef;
+use crate::ddl::flow_meta::FlowMetadataAllocatorRef;
use crate::ddl::table_meta::TableMetadataAllocatorRef;
-use crate::ddl::task_meta::FlowTaskMetadataAllocatorRef;
use crate::error::Result;
-use crate::key::flow_task::FlowTaskMetadataManagerRef;
+use crate::key::flow::FlowMetadataManagerRef;
use crate::key::table_route::PhysicalTableRouteValue;
use crate::key::TableMetadataManagerRef;
use crate::node_manager::NodeManagerRef;
@@ -39,9 +39,9 @@ pub mod create_table;
mod create_table_template;
pub mod drop_database;
pub mod drop_table;
+pub mod flow_meta;
mod physical_table_metadata;
pub mod table_meta;
-pub mod task_meta;
#[cfg(any(test, feature = "testing"))]
pub mod test_util;
#[cfg(test)]
@@ -110,8 +110,8 @@ pub struct DdlContext {
pub table_metadata_manager: TableMetadataManagerRef,
/// Allocator for table metadata.
pub table_metadata_allocator: TableMetadataAllocatorRef,
- /// Flow task metadata manager.
- pub flow_task_metadata_manager: FlowTaskMetadataManagerRef,
- /// Allocator for flow task metadata.
- pub flow_task_metadata_allocator: FlowTaskMetadataAllocatorRef,
+ /// Flow metadata manager.
+ pub flow_metadata_manager: FlowMetadataManagerRef,
+ /// Allocator for flow metadata.
+ pub flow_metadata_allocator: FlowMetadataAllocatorRef,
}
diff --git a/src/common/meta/src/ddl/create_flow.rs b/src/common/meta/src/ddl/create_flow.rs
index 018c7dc84276..195bb3a5679b 100644
--- a/src/common/meta/src/ddl/create_flow.rs
+++ b/src/common/meta/src/ddl/create_flow.rs
@@ -36,17 +36,17 @@ use super::utils::add_peer_context_if_needed;
use crate::ddl::utils::handle_retry_error;
use crate::ddl::DdlContext;
use crate::error::Result;
-use crate::key::flow_task::flow_task_info::FlowTaskInfoValue;
-use crate::key::FlowTaskId;
-use crate::lock_key::{CatalogLock, FlowTaskNameLock, TableNameLock};
+use crate::key::flow::flow_info::FlowInfoValue;
+use crate::key::FlowId;
+use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock};
use crate::peer::Peer;
use crate::rpc::ddl::CreateFlowTask;
use crate::{metrics, ClusterId};
-/// The procedure of flow task creation.
+/// The procedure of flow creation.
pub struct CreateFlowProcedure {
pub context: DdlContext,
- pub data: CreateFlowTaskData,
+ pub data: CreateFlowData,
}
impl CreateFlowProcedure {
@@ -56,13 +56,13 @@ impl CreateFlowProcedure {
pub fn new(cluster_id: ClusterId, task: CreateFlowTask, context: DdlContext) -> Self {
Self {
context,
- data: CreateFlowTaskData {
+ data: CreateFlowData {
cluster_id,
task,
- flow_task_id: None,
+ flow_id: None,
peers: vec![],
source_table_ids: vec![],
- state: CreateFlowTaskState::CreateMetadata,
+ state: CreateFlowState::CreateMetadata,
},
}
}
@@ -76,21 +76,21 @@ impl CreateFlowProcedure {
async fn on_prepare(&mut self) -> Result<Status> {
self.check_creation().await?;
self.collect_source_tables().await?;
- self.allocate_flow_task_id().await?;
- self.data.state = CreateFlowTaskState::CreateFlows;
+ self.allocate_flow_id().await?;
+ self.data.state = CreateFlowState::CreateFlows;
Ok(Status::executing(true))
}
async fn on_flownode_create_flows(&mut self) -> Result<Status> {
// Safety: must be allocated.
- let mut create_flow_task = Vec::with_capacity(self.data.peers.len());
+ let mut create_flow = Vec::with_capacity(self.data.peers.len());
for peer in &self.data.peers {
let requester = self.context.node_manager.flownode(peer).await;
let request = FlowRequest {
body: Some(PbFlowRequest::Create((&self.data).into())),
};
- create_flow_task.push(async move {
+ create_flow.push(async move {
requester
.handle(request)
.await
@@ -98,29 +98,29 @@ impl CreateFlowProcedure {
});
}
- join_all(create_flow_task)
+ join_all(create_flow)
.await
.into_iter()
.collect::<Result<Vec<_>>>()?;
- self.data.state = CreateFlowTaskState::CreateMetadata;
+ self.data.state = CreateFlowState::CreateMetadata;
Ok(Status::executing(true))
}
- /// Creates flow task metadata.
+ /// Creates flow metadata.
///
/// Abort(not-retry):
/// - Failed to create table metadata.
async fn on_create_metadata(&mut self) -> Result<Status> {
- // Safety: The flow task id must be allocated.
- let flow_task_id = self.data.flow_task_id.unwrap();
+ // Safety: The flow id must be allocated.
+ let flow_id = self.data.flow_id.unwrap();
// TODO(weny): Support `or_replace`.
self.context
- .flow_task_metadata_manager
- .create_flow_task_metadata(flow_task_id, (&self.data).into())
+ .flow_metadata_manager
+ .create_flow_metadata(flow_id, (&self.data).into())
.await?;
- info!("Created flow task metadata for flow {flow_task_id}");
- Ok(Status::done_with_output(flow_task_id))
+ info!("Created flow metadata for flow {flow_id}");
+ Ok(Status::done_with_output(flow_id))
}
}
@@ -133,14 +133,14 @@ impl Procedure for CreateFlowProcedure {
async fn execute(&mut self, _ctx: &ProcedureContext) -> ProcedureResult<Status> {
let state = &self.data.state;
- let _timer = metrics::METRIC_META_PROCEDURE_CREATE_FLOW_TASK
+ let _timer = metrics::METRIC_META_PROCEDURE_CREATE_FLOW
.with_label_values(&[state.as_ref()])
.start_timer();
match state {
- CreateFlowTaskState::Prepare => self.on_prepare().await,
- CreateFlowTaskState::CreateFlows => self.on_flownode_create_flows().await,
- CreateFlowTaskState::CreateMetadata => self.on_create_metadata().await,
+ CreateFlowState::Prepare => self.on_prepare().await,
+ CreateFlowState::CreateFlows => self.on_flownode_create_flows().await,
+ CreateFlowState::CreateMetadata => self.on_create_metadata().await,
}
.map_err(handle_retry_error)
}
@@ -151,7 +151,7 @@ impl Procedure for CreateFlowProcedure {
fn lock_key(&self) -> LockKey {
let catalog_name = &self.data.task.catalog_name;
- let task_name = &self.data.task.task_name;
+ let flow_name = &self.data.task.flow_name;
let sink_table_name = &self.data.task.sink_table_name;
LockKey::new(vec![
@@ -162,14 +162,14 @@ impl Procedure for CreateFlowProcedure {
&sink_table_name.catalog_name,
)
.into(),
- FlowTaskNameLock::new(catalog_name, task_name).into(),
+ FlowNameLock::new(catalog_name, flow_name).into(),
])
}
}
-/// The state of [CreateFlowTaskProcedure].
+/// The state of [CreateFlowProcedure].
#[derive(Debug, Clone, Serialize, Deserialize, AsRefStr, PartialEq)]
-pub enum CreateFlowTaskState {
+pub enum CreateFlowState {
/// Prepares to create the flow.
Prepare,
/// Creates flows on the flownode.
@@ -180,22 +180,22 @@ pub enum CreateFlowTaskState {
/// The serializable data.
#[derive(Debug, Serialize, Deserialize)]
-pub struct CreateFlowTaskData {
+pub struct CreateFlowData {
pub(crate) cluster_id: ClusterId,
- pub(crate) state: CreateFlowTaskState,
+ pub(crate) state: CreateFlowState,
pub(crate) task: CreateFlowTask,
- pub(crate) flow_task_id: Option<FlowTaskId>,
+ pub(crate) flow_id: Option<FlowId>,
pub(crate) peers: Vec<Peer>,
pub(crate) source_table_ids: Vec<TableId>,
}
-impl From<&CreateFlowTaskData> for CreateRequest {
- fn from(value: &CreateFlowTaskData) -> Self {
- let flow_task_id = value.flow_task_id.unwrap();
+impl From<&CreateFlowData> for CreateRequest {
+ fn from(value: &CreateFlowData) -> Self {
+ let flow_id = value.flow_id.unwrap();
let source_table_ids = &value.source_table_ids;
CreateRequest {
- task_id: Some(api::v1::flow::TaskId { id: flow_task_id }),
+ flow_id: Some(api::v1::flow::TaskId { id: flow_id }),
source_table_ids: source_table_ids
.iter()
.map(|table_id| api::v1::TableId { id: *table_id })
@@ -206,21 +206,21 @@ impl From<&CreateFlowTaskData> for CreateRequest {
expire_when: value.task.expire_when.clone(),
comment: value.task.comment.clone(),
sql: value.task.sql.clone(),
- task_options: value.task.options.clone(),
+ flow_options: value.task.flow_options.clone(),
}
}
}
-impl From<&CreateFlowTaskData> for FlowTaskInfoValue {
- fn from(value: &CreateFlowTaskData) -> Self {
+impl From<&CreateFlowData> for FlowInfoValue {
+ fn from(value: &CreateFlowData) -> Self {
let CreateFlowTask {
catalog_name,
- task_name,
+ flow_name,
sink_table_name,
expire_when,
comment,
sql,
- options,
+ flow_options: options,
..
} = value.task.clone();
@@ -231,12 +231,12 @@ impl From<&CreateFlowTaskData> for FlowTaskInfoValue {
.map(|(idx, peer)| (idx as u32, peer.id))
.collect::<BTreeMap<_, _>>();
- FlowTaskInfoValue {
+ FlowInfoValue {
source_table_ids: value.source_table_ids.clone(),
sink_table_name,
flownode_ids,
catalog_name,
- task_name,
+ flow_name,
raw_sql: sql,
expire_when,
comment,
diff --git a/src/common/meta/src/ddl/create_flow/check.rs b/src/common/meta/src/ddl/create_flow/check.rs
index 6aa1ecb3ed00..27d8107991e0 100644
--- a/src/common/meta/src/ddl/create_flow/check.rs
+++ b/src/common/meta/src/ddl/create_flow/check.rs
@@ -24,20 +24,20 @@ impl CreateFlowProcedure {
/// - The sink table doesn't exist.
pub(crate) async fn check_creation(&self) -> Result<()> {
let catalog_name = &self.data.task.catalog_name;
- let task_name = &self.data.task.task_name;
+ let flow_name = &self.data.task.flow_name;
let sink_table_name = &self.data.task.sink_table_name;
// Ensures the task name doesn't exist.
let exists = self
.context
- .flow_task_metadata_manager
- .flow_task_name_manager()
- .exists(catalog_name, task_name)
+ .flow_metadata_manager
+ .flow_name_manager()
+ .exists(catalog_name, flow_name)
.await?;
ensure!(
!exists,
- error::TaskAlreadyExistsSnafu {
- task_name: format!("{}.{}", catalog_name, task_name),
+ error::FlowAlreadyExistsSnafu {
+ flow_name: format!("{}.{}", catalog_name, flow_name),
}
);
diff --git a/src/common/meta/src/ddl/create_flow/metadata.rs b/src/common/meta/src/ddl/create_flow/metadata.rs
index ce35ae91ca98..1681479d9173 100644
--- a/src/common/meta/src/ddl/create_flow/metadata.rs
+++ b/src/common/meta/src/ddl/create_flow/metadata.rs
@@ -19,16 +19,16 @@ use crate::error::{self, Result};
use crate::key::table_name::TableNameKey;
impl CreateFlowProcedure {
- /// Allocates the [FlowTaskId].
- pub(crate) async fn allocate_flow_task_id(&mut self) -> Result<()> {
- // TODO(weny, ruihang): We don't support the partitions. It's always be 1, now.
+ /// Allocates the [FlowId].
+ pub(crate) async fn allocate_flow_id(&mut self) -> Result<()> {
+ //TODO(weny, ruihang): We doesn't support the partitions. It's always be 1, now.
let partitions = 1;
- let (flow_task_id, peers) = self
+ let (flow_id, peers) = self
.context
- .flow_task_metadata_allocator
+ .flow_metadata_allocator
.create(partitions)
.await?;
- self.data.flow_task_id = Some(flow_task_id);
+ self.data.flow_id = Some(flow_id);
self.data.peers = peers;
Ok(())
diff --git a/src/common/meta/src/ddl/task_meta.rs b/src/common/meta/src/ddl/flow_meta.rs
similarity index 64%
rename from src/common/meta/src/ddl/task_meta.rs
rename to src/common/meta/src/ddl/flow_meta.rs
index 3e8a4fb36cfc..d7aca9b84eaf 100644
--- a/src/common/meta/src/ddl/task_meta.rs
+++ b/src/common/meta/src/ddl/flow_meta.rs
@@ -17,43 +17,43 @@ use std::sync::Arc;
use tonic::async_trait;
use crate::error::Result;
-use crate::key::FlowTaskId;
+use crate::key::FlowId;
use crate::peer::Peer;
use crate::sequence::SequenceRef;
-/// The reference of [FlowTaskMetadataAllocator].
-pub type FlowTaskMetadataAllocatorRef = Arc<FlowTaskMetadataAllocator>;
+/// The reference of [FlowMetadataAllocator].
+pub type FlowMetadataAllocatorRef = Arc<FlowMetadataAllocator>;
-/// [FlowTaskMetadataAllocator] provides the ability of:
-/// - [FlowTaskId] Allocation.
+/// [FlowMetadataAllocator] provides the ability of:
+/// - [FlowId] Allocation.
/// - [FlownodeId] Selection.
#[derive(Clone)]
-pub struct FlowTaskMetadataAllocator {
- flow_task_id_sequence: SequenceRef,
+pub struct FlowMetadataAllocator {
+ flow_id_sequence: SequenceRef,
partition_peer_allocator: PartitionPeerAllocatorRef,
}
-impl FlowTaskMetadataAllocator {
- /// Returns the [FlowTaskMetadataAllocator] with [NoopPartitionPeerAllocator].
- pub fn with_noop_peer_allocator(flow_task_id_sequence: SequenceRef) -> Self {
+impl FlowMetadataAllocator {
+ /// Returns the [FlowMetadataAllocator] with [NoopPartitionPeerAllocator].
+ pub fn with_noop_peer_allocator(flow_id_sequence: SequenceRef) -> Self {
Self {
- flow_task_id_sequence,
+ flow_id_sequence,
partition_peer_allocator: Arc::new(NoopPartitionPeerAllocator),
}
}
- /// Allocates a the [FlowTaskId].
- pub(crate) async fn allocate_flow_task_id(&self) -> Result<FlowTaskId> {
- let flow_task_id = self.flow_task_id_sequence.next().await? as FlowTaskId;
- Ok(flow_task_id)
+ /// Allocates a the [FlowId].
+ pub(crate) async fn allocate_flow_id(&self) -> Result<FlowId> {
+ let flow_id = self.flow_id_sequence.next().await? as FlowId;
+ Ok(flow_id)
}
- /// Allocates the [FlowTaskId] and [Peer]s.
- pub async fn create(&self, partitions: usize) -> Result<(FlowTaskId, Vec<Peer>)> {
- let flow_task_id = self.allocate_flow_task_id().await?;
+ /// Allocates the [FlowId] and [Peer]s.
+ pub async fn create(&self, partitions: usize) -> Result<(FlowId, Vec<Peer>)> {
+ let flow_id = self.allocate_flow_id().await?;
let peers = self.partition_peer_allocator.alloc(partitions).await?;
- Ok((flow_task_id, peers))
+ Ok((flow_id, peers))
}
}
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index 2f3d3a4eb45c..8af7211210c9 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -616,7 +616,7 @@ async fn handle_create_flow_task(
})?);
info!(
"Flow {}.{}({flow_id}) is created via procedure_id {id:?}",
- create_flow_task.catalog_name, create_flow_task.task_name,
+ create_flow_task.catalog_name, create_flow_task.flow_name,
);
Ok(SubmitDdlTaskResponse {
@@ -756,11 +756,11 @@ mod tests {
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::create_table::CreateTableProcedure;
use crate::ddl::drop_table::DropTableProcedure;
+ use crate::ddl::flow_meta::FlowMetadataAllocator;
use crate::ddl::table_meta::TableMetadataAllocator;
- use crate::ddl::task_meta::FlowTaskMetadataAllocator;
use crate::ddl::truncate_table::TruncateTableProcedure;
use crate::ddl::DdlContext;
- use crate::key::flow_task::FlowTaskMetadataManager;
+ use crate::key::flow::FlowMetadataManager;
use crate::key::TableMetadataManager;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::node_manager::{DatanodeRef, FlownodeRef, NodeManager};
@@ -792,11 +792,10 @@ mod tests {
Arc::new(SequenceBuilder::new("test", kv_backend.clone()).build()),
Arc::new(WalOptionsAllocator::default()),
));
- let flow_task_metadata_manager = Arc::new(FlowTaskMetadataManager::new(kv_backend.clone()));
- let flow_task_metadata_allocator =
- Arc::new(FlowTaskMetadataAllocator::with_noop_peer_allocator(
- Arc::new(SequenceBuilder::new("flow-test", kv_backend.clone()).build()),
- ));
+ let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
+ let flow_metadata_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
+ Arc::new(SequenceBuilder::new("flow-test", kv_backend.clone()).build()),
+ ));
let state_store = Arc::new(KvStateStore::new(kv_backend.clone()));
let procedure_manager = Arc::new(LocalManager::new(Default::default(), state_store));
@@ -807,8 +806,8 @@ mod tests {
cache_invalidator: Arc::new(DummyCacheInvalidator),
table_metadata_manager,
table_metadata_allocator,
- flow_task_metadata_manager,
- flow_task_metadata_allocator,
+ flow_metadata_manager,
+ flow_metadata_allocator,
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
},
procedure_manager.clone(),
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index 40d5070c3bb3..cd7583967188 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -241,9 +241,9 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Task already exists: {}", task_name))]
- TaskAlreadyExists {
- task_name: String,
+ #[snafu(display("Flow already exists: {}", flow_name))]
+ FlowAlreadyExists {
+ flow_name: String,
location: Location,
},
@@ -511,7 +511,7 @@ impl ErrorExt for Error {
| InvalidEngineType { .. }
| AlterLogicalTablesInvalidArguments { .. }
| CreateLogicalTablesInvalidArguments { .. }
- | TaskAlreadyExists { .. }
+ | FlowAlreadyExists { .. }
| MismatchPrefix { .. }
| DelimiterNotFound { .. } => StatusCode::InvalidArguments,
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index 618b092c3f3b..30455181dcc3 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -36,16 +36,16 @@
//! - The value is a [TableNameValue] struct; it contains the table id.
//! - Used in the table name to table id lookup.
//!
-//! 6. Flow task info key: `__flow_task/{catalog}/info/{flow_task_id}`
-//! - Stores metadata of the flow task.
+//! 6. Flow info key: `__flow/{catalog}/info/{flow_id}`
+//! - Stores metadata of the flow.
//!
-//! 7. Flow task name key: `__flow_task/{catalog}/name/{task_name}`
-//! - Mapping {catalog}/{task_name} to {flow_task_id}
+//! 7. Flow name key: `__flow/{catalog}/name/{flow_name}`
+//! - Mapping {catalog}/{flow_name} to {flow_id}
//!
-//! 8. Flownode task key: `__flow_task/{catalog}/flownode/{flownode_id}/{flow_task_id}/{partition_id}`
-//! - Mapping {flownode_id} to {flow_task_id}
+//! 8. Flownode flow key: `__flow/{catalog}/flownode/{flownode_id}/{flow_id}/{partition_id}`
+//! - Mapping {flownode_id} to {flow_id}
//!
-//! 9. Table task key: `__table_task/{catalog}/source_table/{table_id}/{flownode_id}/{flow_task_id}/{partition_id}`
+//! 9. Table flow key: `__table_flow/{catalog}/source_table/{table_id}/{flownode_id}/{flow_id}/{partition_id}`
//! - Mapping source table's {table_id} to {flownode_id}
//! - Used in `Flownode` booting.
//!
@@ -54,37 +54,35 @@
//!
//! To simplify the managers used in struct fields and function parameters, we define "unify"
//! table metadata manager: [TableMetadataManager]
-//! and flow task metadata manager: [FlowTaskMetadataManager](crate::key::flow_task::FlowTaskMetadataManager).
+//! and flow metadata manager: [FlowMetadataManager](crate::key::flow::FlowMetadataManager).
//! It contains all the managers defined above. It's recommended to just use this manager only.
//!
-//! The whole picture of flow task keys will be like this:
+//! The whole picture of flow keys will be like this:
//!
-//! __flow_task/
+//! __flow/
//! {catalog}/
//! info/
//! {tsak_id}
//!
//! name/
-//! {task_name}
+//! {flow_name}
//!
//! flownode/
-//! flownode_id/
-//! {flownode_id}/
-//! {task_id}/
-//! {partition_id}
+//! {flownode_id}/
+//! {flow_id}/
+//! {partition_id}
//!
//! source_table/
-//! flow_task/
-//! {table_id}/
-//! {flownode_id}/
-//! {task_id}/
-//! {partition_id}
+//! {table_id}/
+//! {flownode_id}/
+//! {flow_id}/
+//! {partition_id}
pub mod catalog_name;
pub mod datanode_table;
/// TODO(weny):removes id.
#[allow(unused)]
-pub mod flow_task;
+pub mod flow;
pub mod schema_name;
pub mod scope;
pub mod table_info;
@@ -123,8 +121,8 @@ use table_name::{TableNameKey, TableNameManager, TableNameValue};
use self::catalog_name::{CatalogManager, CatalogNameKey, CatalogNameValue};
use self::datanode_table::RegionInfo;
-use self::flow_task::flow_task_info::FlowTaskInfoValue;
-use self::flow_task::flow_task_name::FlowTaskNameValue;
+use self::flow::flow_info::FlowInfoValue;
+use self::flow::flow_name::FlowNameValue;
use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
use self::table_route::{TableRouteManager, TableRouteValue};
use self::tombstone::TombstoneManager;
@@ -159,10 +157,10 @@ pub const CACHE_KEY_PREFIXES: [&str; 4] = [
pub type RegionDistribution = BTreeMap<DatanodeId, Vec<RegionNumber>>;
-/// The id of flow task.
-pub type FlowTaskId = u32;
-/// The partition of flow task.
-pub type FlowTaskPartitionId = u32;
+/// The id of flow.
+pub type FlowId = u32;
+/// The partition of flow.
+pub type FlowPartitionId = u32;
lazy_static! {
static ref DATANODE_TABLE_KEY_PATTERN: Regex =
@@ -1054,8 +1052,8 @@ impl_table_meta_value! {
TableNameValue,
TableInfoValue,
DatanodeTableValue,
- FlowTaskInfoValue,
- FlowTaskNameValue
+ FlowInfoValue,
+ FlowNameValue
}
impl_optional_meta_value! {
diff --git a/src/common/meta/src/key/flow_task.rs b/src/common/meta/src/key/flow.rs
similarity index 52%
rename from src/common/meta/src/key/flow_task.rs
rename to src/common/meta/src/key/flow.rs
index 5e1c2e427ab6..cbda6aa88276 100644
--- a/src/common/meta/src/key/flow_task.rs
+++ b/src/common/meta/src/key/flow.rs
@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub(crate) mod flow_task_info;
-pub(crate) mod flow_task_name;
-pub(crate) mod flownode_task;
-pub(crate) mod table_task;
+pub(crate) mod flow_info;
+pub(crate) mod flow_name;
+pub(crate) mod flownode_flow;
+pub(crate) mod table_flow;
use std::ops::Deref;
use std::sync::Arc;
@@ -23,26 +23,26 @@ use std::sync::Arc;
use common_telemetry::info;
use snafu::{ensure, OptionExt};
-use self::flow_task_info::FlowTaskInfoValue;
+use self::flow_info::FlowInfoValue;
use crate::ensure_values;
use crate::error::{self, Result};
-use crate::key::flow_task::flow_task_info::FlowTaskInfoManager;
-use crate::key::flow_task::flow_task_name::FlowTaskNameManager;
-use crate::key::flow_task::flownode_task::FlownodeTaskManager;
-use crate::key::flow_task::table_task::TableTaskManager;
+use crate::key::flow::flow_info::FlowInfoManager;
+use crate::key::flow::flow_name::FlowNameManager;
+use crate::key::flow::flownode_flow::FlownodeFlowManager;
+use crate::key::flow::table_flow::TableFlowManager;
use crate::key::scope::MetaKey;
use crate::key::txn_helper::TxnOpGetResponseSet;
-use crate::key::FlowTaskId;
+use crate::key::FlowId;
use crate::kv_backend::txn::Txn;
use crate::kv_backend::KvBackendRef;
-/// The key of `__flow_task/` scope.
+/// The key of `__flow/` scope.
#[derive(Debug, PartialEq)]
-pub struct FlowTaskScoped<T> {
+pub struct FlowScoped<T> {
inner: T,
}
-impl<T> Deref for FlowTaskScoped<T> {
+impl<T> Deref for FlowScoped<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
@@ -50,18 +50,18 @@ impl<T> Deref for FlowTaskScoped<T> {
}
}
-impl<T> FlowTaskScoped<T> {
- const PREFIX: &'static str = "__flow_task/";
+impl<T> FlowScoped<T> {
+ const PREFIX: &'static str = "__flow/";
- /// Returns a new [FlowTaskScoped] key.
- pub fn new(inner: T) -> FlowTaskScoped<T> {
+ /// Returns a new [FlowScoped] key.
+ pub fn new(inner: T) -> FlowScoped<T> {
Self { inner }
}
}
-impl<T: MetaKey<T>> MetaKey<FlowTaskScoped<T>> for FlowTaskScoped<T> {
+impl<T: MetaKey<T>> MetaKey<FlowScoped<T>> for FlowScoped<T> {
fn to_bytes(&self) -> Vec<u8> {
- let prefix = FlowTaskScoped::<T>::PREFIX.as_bytes();
+ let prefix = FlowScoped::<T>::PREFIX.as_bytes();
let inner = self.inner.to_bytes();
let mut bytes = Vec::with_capacity(prefix.len() + inner.len());
bytes.extend(prefix);
@@ -69,8 +69,8 @@ impl<T: MetaKey<T>> MetaKey<FlowTaskScoped<T>> for FlowTaskScoped<T> {
bytes
}
- fn from_bytes(bytes: &[u8]) -> Result<FlowTaskScoped<T>> {
- let prefix = FlowTaskScoped::<T>::PREFIX.as_bytes();
+ fn from_bytes(bytes: &[u8]) -> Result<FlowScoped<T>> {
+ let prefix = FlowScoped::<T>::PREFIX.as_bytes();
ensure!(
bytes.starts_with(prefix),
error::MismatchPrefixSnafu {
@@ -79,140 +79,134 @@ impl<T: MetaKey<T>> MetaKey<FlowTaskScoped<T>> for FlowTaskScoped<T> {
}
);
let inner = T::from_bytes(&bytes[prefix.len()..])?;
- Ok(FlowTaskScoped { inner })
+ Ok(FlowScoped { inner })
}
}
-pub type FlowTaskMetadataManagerRef = Arc<FlowTaskMetadataManager>;
+pub type FlowMetadataManagerRef = Arc<FlowMetadataManager>;
/// The manager of metadata, provides ability to:
-/// - Create metadata of the task.
-/// - Retrieve metadata of the task.
-/// - Delete metadata of the task.
-pub struct FlowTaskMetadataManager {
- flow_task_info_manager: FlowTaskInfoManager,
- flownode_task_manager: FlownodeTaskManager,
- table_task_manager: TableTaskManager,
- flow_task_name_manager: FlowTaskNameManager,
+/// - Create metadata of the flow.
+/// - Retrieve metadata of the flow.
+/// - Delete metadata of the flow.
+pub struct FlowMetadataManager {
+ flow_info_manager: FlowInfoManager,
+ flownode_flow_manager: FlownodeFlowManager,
+ table_flow_manager: TableFlowManager,
+ flow_name_manager: FlowNameManager,
kv_backend: KvBackendRef,
}
-impl FlowTaskMetadataManager {
- /// Returns a new [FlowTaskMetadataManager].
+impl FlowMetadataManager {
+ /// Returns a new [FlowMetadataManager].
pub fn new(kv_backend: KvBackendRef) -> Self {
Self {
- flow_task_info_manager: FlowTaskInfoManager::new(kv_backend.clone()),
- flow_task_name_manager: FlowTaskNameManager::new(kv_backend.clone()),
- flownode_task_manager: FlownodeTaskManager::new(kv_backend.clone()),
- table_task_manager: TableTaskManager::new(kv_backend.clone()),
+ flow_info_manager: FlowInfoManager::new(kv_backend.clone()),
+ flow_name_manager: FlowNameManager::new(kv_backend.clone()),
+ flownode_flow_manager: FlownodeFlowManager::new(kv_backend.clone()),
+ table_flow_manager: TableFlowManager::new(kv_backend.clone()),
kv_backend,
}
}
- /// Returns the [FlowTaskNameManager].
- pub fn flow_task_name_manager(&self) -> &FlowTaskNameManager {
- &self.flow_task_name_manager
+ /// Returns the [FlowNameManager].
+ pub fn flow_name_manager(&self) -> &FlowNameManager {
+ &self.flow_name_manager
}
- /// Returns the [FlowTaskInfoManager].
- pub fn flow_task_info_manager(&self) -> &FlowTaskInfoManager {
- &self.flow_task_info_manager
+ /// Returns the [FlowManager].
+ pub fn flow_info_manager(&self) -> &FlowInfoManager {
+ &self.flow_info_manager
}
- /// Returns the [FlownodeTaskManager].
- pub fn flownode_task_manager(&self) -> &FlownodeTaskManager {
- &self.flownode_task_manager
+ /// Returns the [FlownodeFlowManager].
+ pub fn flownode_flow_manager(&self) -> &FlownodeFlowManager {
+ &self.flownode_flow_manager
}
- /// Returns the [TableTaskManager].
- pub fn table_task_manager(&self) -> &TableTaskManager {
- &self.table_task_manager
+ /// Returns the [TableFlowManager].
+ pub fn table_flow_manager(&self) -> &TableFlowManager {
+ &self.table_flow_manager
}
- /// Creates metadata for task and returns an error if different metadata exists.
- pub async fn create_flow_task_metadata(
+ /// Creates metadata for flow and returns an error if different metadata exists.
+ pub async fn create_flow_metadata(
&self,
- flow_task_id: FlowTaskId,
- flow_task_value: FlowTaskInfoValue,
+ flow_id: FlowId,
+ flow_value: FlowInfoValue,
) -> Result<()> {
- let (create_flow_task_name_txn, on_create_flow_task_name_failure) =
- self.flow_task_name_manager.build_create_txn(
- &flow_task_value.catalog_name,
- &flow_task_value.task_name,
- flow_task_id,
- )?;
-
- let (create_flow_task_txn, on_create_flow_task_failure) =
- self.flow_task_info_manager.build_create_txn(
- &flow_task_value.catalog_name,
- flow_task_id,
- &flow_task_value,
- )?;
-
- let create_flownode_task_txn = self.flownode_task_manager.build_create_txn(
- &flow_task_value.catalog_name,
- flow_task_id,
- flow_task_value.flownode_ids().clone(),
+ let (create_flow_flow_name_txn, on_create_flow_flow_name_failure) = self
+ .flow_name_manager
+ .build_create_txn(&flow_value.catalog_name, &flow_value.flow_name, flow_id)?;
+
+ let (create_flow_txn, on_create_flow_failure) = self.flow_info_manager.build_create_txn(
+ &flow_value.catalog_name,
+ flow_id,
+ &flow_value,
+ )?;
+
+ let create_flownode_flow_txn = self.flownode_flow_manager.build_create_txn(
+ &flow_value.catalog_name,
+ flow_id,
+ flow_value.flownode_ids().clone(),
);
- let create_table_task_txn = self.table_task_manager.build_create_txn(
- &flow_task_value.catalog_name,
- flow_task_id,
- flow_task_value.flownode_ids().clone(),
- flow_task_value.source_table_ids(),
+ let create_table_flow_txn = self.table_flow_manager.build_create_txn(
+ &flow_value.catalog_name,
+ flow_id,
+ flow_value.flownode_ids().clone(),
+ flow_value.source_table_ids(),
);
let txn = Txn::merge_all(vec![
- create_flow_task_name_txn,
- create_flow_task_txn,
- create_flownode_task_txn,
- create_table_task_txn,
+ create_flow_flow_name_txn,
+ create_flow_txn,
+ create_flownode_flow_txn,
+ create_table_flow_txn,
]);
info!(
- "Creating flow task {}.{}({}), with {} txn operations",
- flow_task_value.catalog_name,
- flow_task_value.task_name,
- flow_task_id,
+ "Creating flow {}.{}({}), with {} txn operations",
+ flow_value.catalog_name,
+ flow_value.flow_name,
+ flow_id,
txn.max_operations()
);
let mut resp = self.kv_backend.txn(txn).await?;
if !resp.succeeded {
let mut set = TxnOpGetResponseSet::from(&mut resp.responses);
- let remote_flow_task_name = on_create_flow_task_name_failure(&mut set)?
- .with_context(||error::UnexpectedSnafu {
- err_msg: format!(
- "Reads the empty flow task name during the creating flow task, flow_task_id: {flow_task_id}"
+ let remote_flow_flow_name =
+ on_create_flow_flow_name_failure(&mut set)?.with_context(|| {
+ error::UnexpectedSnafu {
+ err_msg: format!(
+ "Reads the empty flow name during the creating flow, flow_id: {flow_id}"
),
+ }
})?;
- if remote_flow_task_name.flow_task_id() != flow_task_id {
+ if remote_flow_flow_name.flow_id() != flow_id {
info!(
- "Trying to create flow task {}.{}({}), but flow task({}) already exists",
- flow_task_value.catalog_name,
- flow_task_value.task_name,
- flow_task_id,
- remote_flow_task_name.flow_task_id()
+ "Trying to create flow {}.{}({}), but flow({}) already exists",
+ flow_value.catalog_name,
+ flow_value.flow_name,
+ flow_id,
+ remote_flow_flow_name.flow_id()
);
- return error::TaskAlreadyExistsSnafu {
- task_name: format!(
- "{}.{}",
- flow_task_value.catalog_name, flow_task_value.task_name
- ),
+ return error::FlowAlreadyExistsSnafu {
+ flow_name: format!("{}.{}", flow_value.catalog_name, flow_value.flow_name),
}
.fail();
}
- let remote_flow_task = on_create_flow_task_failure(&mut set)?.with_context(|| {
- error::UnexpectedSnafu {
+ let remote_flow =
+ on_create_flow_failure(&mut set)?.with_context(|| error::UnexpectedSnafu {
err_msg: format!(
- "Reads the empty flow task during the creating flow task, flow_task_id: {flow_task_id}"
- ),
- }
- })?;
- let op_name = "creating flow task";
- ensure_values!(*remote_flow_task, flow_task_value, op_name);
+ "Reads the empty flow during the creating flow, flow_id: {flow_id}"
+ ),
+ })?;
+ let op_name = "creating flow";
+ ensure_values!(*remote_flow, flow_value, op_name);
}
Ok(())
@@ -227,7 +221,7 @@ mod tests {
use futures::TryStreamExt;
use super::*;
- use crate::key::flow_task::table_task::TableTaskKey;
+ use crate::key::flow::table_flow::TableFlowKey;
use crate::key::scope::CatalogScoped;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::table_name::TableName;
@@ -251,19 +245,19 @@ mod tests {
#[test]
fn test_flow_scoped_to_bytes() {
- let key = FlowTaskScoped::new(CatalogScoped::new(
+ let key = FlowScoped::new(CatalogScoped::new(
"my_catalog".to_string(),
MockKey {
inner: b"hi".to_vec(),
},
));
- assert_eq!(b"__flow_task/my_catalog/hi".to_vec(), key.to_bytes());
+ assert_eq!(b"__flow/my_catalog/hi".to_vec(), key.to_bytes());
}
#[test]
fn test_flow_scoped_from_bytes() {
- let bytes = b"__flow_task/my_catalog/hi";
- let key = FlowTaskScoped::<CatalogScoped<MockKey>>::from_bytes(bytes).unwrap();
+ let bytes = b"__flow/my_catalog/hi";
+ let key = FlowScoped::<CatalogScoped<MockKey>>::from_bytes(bytes).unwrap();
assert_eq!(key.catalog(), "my_catalog");
assert_eq!(key.inner.inner, b"hi".to_vec());
}
@@ -271,24 +265,24 @@ mod tests {
#[test]
fn test_flow_scoped_from_bytes_mismatch() {
let bytes = b"__table/my_catalog/hi";
- let err = FlowTaskScoped::<CatalogScoped<MockKey>>::from_bytes(bytes).unwrap_err();
+ let err = FlowScoped::<CatalogScoped<MockKey>>::from_bytes(bytes).unwrap_err();
assert_matches!(err, error::Error::MismatchPrefix { .. });
}
#[tokio::test]
async fn test_create_flow_metadata() {
let mem_kv = Arc::new(MemoryKvBackend::default());
- let flow_metadata_manager = FlowTaskMetadataManager::new(mem_kv.clone());
- let task_id = 10;
+ let flow_metadata_manager = FlowMetadataManager::new(mem_kv.clone());
+ let flow_id = 10;
let catalog_name = "greptime";
let sink_table_name = TableName {
catalog_name: catalog_name.to_string(),
schema_name: "my_schema".to_string(),
table_name: "sink_table".to_string(),
};
- let flow_task_value = FlowTaskInfoValue {
+ let flow_value = FlowInfoValue {
catalog_name: catalog_name.to_string(),
- task_name: "task".to_string(),
+ flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name,
flownode_ids: [(0, 1u64)].into(),
@@ -298,42 +292,42 @@ mod tests {
options: Default::default(),
};
flow_metadata_manager
- .create_flow_task_metadata(task_id, flow_task_value.clone())
+ .create_flow_metadata(flow_id, flow_value.clone())
.await
.unwrap();
// Creates again.
flow_metadata_manager
- .create_flow_task_metadata(task_id, flow_task_value.clone())
+ .create_flow_metadata(flow_id, flow_value.clone())
.await
.unwrap();
let got = flow_metadata_manager
- .flow_task_info_manager()
- .get(catalog_name, task_id)
+ .flow_info_manager()
+ .get(catalog_name, flow_id)
.await
.unwrap()
.unwrap();
- assert_eq!(got, flow_task_value);
- let tasks = flow_metadata_manager
- .flownode_task_manager()
- .tasks(catalog_name, 1)
+ assert_eq!(got, flow_value);
+ let flows = flow_metadata_manager
+ .flownode_flow_manager()
+ .flows(catalog_name, 1)
.try_collect::<Vec<_>>()
.await
.unwrap();
- assert_eq!(tasks, vec![(task_id, 0)]);
+ assert_eq!(flows, vec![(flow_id, 0)]);
for table_id in [1024, 1025, 1026] {
let nodes = flow_metadata_manager
- .table_task_manager()
+ .table_flow_manager()
.nodes(catalog_name, table_id)
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(
nodes,
- vec![TableTaskKey::new(
+ vec![TableFlowKey::new(
catalog_name.to_string(),
table_id,
1,
- task_id,
+ flow_id,
0
)]
);
@@ -341,19 +335,19 @@ mod tests {
}
#[tokio::test]
- async fn test_create_table_metadata_task_exists_err() {
+ async fn test_create_table_metadata_flow_exists_err() {
let mem_kv = Arc::new(MemoryKvBackend::default());
- let flow_metadata_manager = FlowTaskMetadataManager::new(mem_kv);
- let task_id = 10;
+ let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
+ let flow_id = 10;
let catalog_name = "greptime";
let sink_table_name = TableName {
catalog_name: catalog_name.to_string(),
schema_name: "my_schema".to_string(),
table_name: "sink_table".to_string(),
};
- let flow_task_value = FlowTaskInfoValue {
+ let flow_value = FlowInfoValue {
catalog_name: "greptime".to_string(),
- task_name: "task".to_string(),
+ flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name: sink_table_name.clone(),
flownode_ids: [(0, 1u64)].into(),
@@ -363,13 +357,13 @@ mod tests {
options: Default::default(),
};
flow_metadata_manager
- .create_flow_task_metadata(task_id, flow_task_value.clone())
+ .create_flow_metadata(flow_id, flow_value.clone())
.await
.unwrap();
// Creates again.
- let flow_task_value = FlowTaskInfoValue {
+ let flow_value = FlowInfoValue {
catalog_name: catalog_name.to_string(),
- task_name: "task".to_string(),
+ flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name,
flownode_ids: [(0, 1u64)].into(),
@@ -379,26 +373,26 @@ mod tests {
options: Default::default(),
};
let err = flow_metadata_manager
- .create_flow_task_metadata(task_id + 1, flow_task_value)
+ .create_flow_metadata(flow_id + 1, flow_value)
.await
.unwrap_err();
- assert_matches!(err, error::Error::TaskAlreadyExists { .. });
+ assert_matches!(err, error::Error::FlowAlreadyExists { .. });
}
#[tokio::test]
async fn test_create_table_metadata_unexpected_err() {
let mem_kv = Arc::new(MemoryKvBackend::default());
- let flow_metadata_manager = FlowTaskMetadataManager::new(mem_kv);
- let task_id = 10;
+ let flow_metadata_manager = FlowMetadataManager::new(mem_kv);
+ let flow_id = 10;
let catalog_name = "greptime";
let sink_table_name = TableName {
catalog_name: catalog_name.to_string(),
schema_name: "my_schema".to_string(),
table_name: "sink_table".to_string(),
};
- let flow_task_value = FlowTaskInfoValue {
+ let flow_value = FlowInfoValue {
catalog_name: "greptime".to_string(),
- task_name: "task".to_string(),
+ flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name: sink_table_name.clone(),
flownode_ids: [(0, 1u64)].into(),
@@ -408,7 +402,7 @@ mod tests {
options: Default::default(),
};
flow_metadata_manager
- .create_flow_task_metadata(task_id, flow_task_value.clone())
+ .create_flow_metadata(flow_id, flow_value.clone())
.await
.unwrap();
// Creates again.
@@ -417,9 +411,9 @@ mod tests {
schema_name: "my_schema".to_string(),
table_name: "another_sink_table".to_string(),
};
- let flow_task_value = FlowTaskInfoValue {
+ let flow_value = FlowInfoValue {
catalog_name: "greptime".to_string(),
- task_name: "task".to_string(),
+ flow_name: "flow".to_string(),
source_table_ids: vec![1024, 1025, 1026],
sink_table_name: another_sink_table_name,
flownode_ids: [(0, 1u64)].into(),
@@ -429,7 +423,7 @@ mod tests {
options: Default::default(),
};
let err = flow_metadata_manager
- .create_flow_task_metadata(task_id, flow_task_value)
+ .create_flow_metadata(flow_id, flow_value)
.await
.unwrap_err();
assert!(err.to_string().contains("Reads the different value"));
diff --git a/src/common/meta/src/key/flow/flow_info.rs b/src/common/meta/src/key/flow/flow_info.rs
new file mode 100644
index 000000000000..f9b9ae4b259d
--- /dev/null
+++ b/src/common/meta/src/key/flow/flow_info.rs
@@ -0,0 +1,212 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::{BTreeMap, HashMap};
+
+use lazy_static::lazy_static;
+use regex::Regex;
+use serde::{Deserialize, Serialize};
+use snafu::OptionExt;
+use table::metadata::TableId;
+
+use crate::error::{self, Result};
+use crate::key::flow::FlowScoped;
+use crate::key::scope::{CatalogScoped, MetaKey};
+use crate::key::txn_helper::TxnOpGetResponseSet;
+use crate::key::{txn_helper, DeserializedValueWithBytes, FlowId, FlowPartitionId, TableMetaValue};
+use crate::kv_backend::txn::Txn;
+use crate::kv_backend::KvBackendRef;
+use crate::table_name::TableName;
+use crate::FlownodeId;
+
+const FLOW_INFO_KEY_PREFIX: &str = "info";
+
+lazy_static! {
+ static ref FLOW_INFO_KEY_PATTERN: Regex =
+ Regex::new(&format!("^{FLOW_INFO_KEY_PREFIX}/([0-9]+)$")).unwrap();
+}
+
+/// The key stores the metadata of the flow.
+///
+/// The layout: `__flow/{catalog}/info/{flow_id}`.
+pub struct FlowInfoKey(FlowScoped<CatalogScoped<FlowInfoKeyInner>>);
+
+impl MetaKey<FlowInfoKey> for FlowInfoKey {
+ fn to_bytes(&self) -> Vec<u8> {
+ self.0.to_bytes()
+ }
+
+ fn from_bytes(bytes: &[u8]) -> Result<FlowInfoKey> {
+ Ok(FlowInfoKey(
+ FlowScoped::<CatalogScoped<FlowInfoKeyInner>>::from_bytes(bytes)?,
+ ))
+ }
+}
+
+impl FlowInfoKey {
+ /// Returns the [FlowInfoKey].
+ pub fn new(catalog: String, flow_id: FlowId) -> FlowInfoKey {
+ let inner = FlowInfoKeyInner::new(flow_id);
+ FlowInfoKey(FlowScoped::new(CatalogScoped::new(catalog, inner)))
+ }
+
+ /// Returns the catalog.
+ pub fn catalog(&self) -> &str {
+ self.0.catalog()
+ }
+
+ /// Returns the [FlowId].
+ pub fn flow_id(&self) -> FlowId {
+ self.0.flow_id
+ }
+}
+
+/// The key of flow metadata.
+#[derive(Debug, Clone, Copy, PartialEq)]
+struct FlowInfoKeyInner {
+ flow_id: FlowId,
+}
+
+impl FlowInfoKeyInner {
+ /// Returns a [FlowInfoKey] with the specified `flow_id`.
+ pub fn new(flow_id: FlowId) -> FlowInfoKeyInner {
+ FlowInfoKeyInner { flow_id }
+ }
+}
+
+impl MetaKey<FlowInfoKeyInner> for FlowInfoKeyInner {
+ fn to_bytes(&self) -> Vec<u8> {
+ format!("{FLOW_INFO_KEY_PREFIX}/{}", self.flow_id).into_bytes()
+ }
+
+ fn from_bytes(bytes: &[u8]) -> Result<FlowInfoKeyInner> {
+ let key = std::str::from_utf8(bytes).map_err(|e| {
+ error::InvalidTableMetadataSnafu {
+ err_msg: format!(
+ "FlowInfoKeyInner '{}' is not a valid UTF8 string: {e}",
+ String::from_utf8_lossy(bytes)
+ ),
+ }
+ .build()
+ })?;
+ let captures =
+ FLOW_INFO_KEY_PATTERN
+ .captures(key)
+ .context(error::InvalidTableMetadataSnafu {
+ err_msg: format!("Invalid FlowInfoKeyInner '{key}'"),
+ })?;
+ // Safety: pass the regex check above
+ let flow_id = captures[1].parse::<FlowId>().unwrap();
+ Ok(FlowInfoKeyInner { flow_id })
+ }
+}
+
+// The metadata of the flow.
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+pub struct FlowInfoValue {
+ /// The source tables used by the flow.
+ pub(crate) source_table_ids: Vec<TableId>,
+ /// The sink table used by the flow.
+ pub(crate) sink_table_name: TableName,
+ /// Which flow nodes this flow is running on.
+ pub(crate) flownode_ids: BTreeMap<FlowPartitionId, FlownodeId>,
+ /// The catalog name.
+ pub(crate) catalog_name: String,
+ /// The flow name.
+ pub(crate) flow_name: String,
+ /// The raw sql.
+ pub(crate) raw_sql: String,
+ /// The expr of expire.
+ pub(crate) expire_when: String,
+ /// The comment.
+ pub(crate) comment: String,
+ /// The options.
+ pub(crate) options: HashMap<String, String>,
+}
+
+impl FlowInfoValue {
+ /// Returns the `flownode_id`.
+ pub fn flownode_ids(&self) -> &BTreeMap<FlowPartitionId, FlownodeId> {
+ &self.flownode_ids
+ }
+
+ /// Returns the `source_table`.
+ pub fn source_table_ids(&self) -> &[TableId] {
+ &self.source_table_ids
+ }
+}
+
+/// The manager of [FlowInfoKey].
+pub struct FlowInfoManager {
+ kv_backend: KvBackendRef,
+}
+
+impl FlowInfoManager {
+ /// Returns a new [FlowInfoManager].
+ pub fn new(kv_backend: KvBackendRef) -> Self {
+ Self { kv_backend }
+ }
+
+ /// Returns the [FlowInfoValue] of specified `flow_id`.
+ pub async fn get(&self, catalog: &str, flow_id: FlowId) -> Result<Option<FlowInfoValue>> {
+ let key = FlowInfoKey::new(catalog.to_string(), flow_id).to_bytes();
+ self.kv_backend
+ .get(&key)
+ .await?
+ .map(|x| FlowInfoValue::try_from_raw_value(&x.value))
+ .transpose()
+ }
+
+ /// Builds a create flow transaction.
+ /// It is expected that the `__flow/{catalog}/info/{flow_id}` wasn't occupied.
+ /// Otherwise, the transaction will retrieve existing value.
+ pub(crate) fn build_create_txn(
+ &self,
+ catalog: &str,
+ flow_id: FlowId,
+ flow_value: &FlowInfoValue,
+ ) -> Result<(
+ Txn,
+ impl FnOnce(
+ &mut TxnOpGetResponseSet,
+ ) -> Result<Option<DeserializedValueWithBytes<FlowInfoValue>>>,
+ )> {
+ let key = FlowInfoKey::new(catalog.to_string(), flow_id).to_bytes();
+ let txn = txn_helper::build_put_if_absent_txn(key.clone(), flow_value.try_as_raw_value()?);
+
+ Ok((
+ txn,
+ TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
+ ))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_key_serialization() {
+ let flow_info = FlowInfoKey::new("my_catalog".to_string(), 2);
+ assert_eq!(b"__flow/my_catalog/info/2".to_vec(), flow_info.to_bytes());
+ }
+
+ #[test]
+ fn test_key_deserialization() {
+ let bytes = b"__flow/my_catalog/info/2".to_vec();
+ let key = FlowInfoKey::from_bytes(&bytes).unwrap();
+ assert_eq!(key.catalog(), "my_catalog");
+ assert_eq!(key.flow_id(), 2);
+ }
+}
diff --git a/src/common/meta/src/key/flow/flow_name.rs b/src/common/meta/src/key/flow/flow_name.rs
new file mode 100644
index 000000000000..dbb6d81c35b1
--- /dev/null
+++ b/src/common/meta/src/key/flow/flow_name.rs
@@ -0,0 +1,203 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use lazy_static::lazy_static;
+use regex::Regex;
+use serde::{Deserialize, Serialize};
+use snafu::OptionExt;
+
+use crate::error::{self, Result};
+use crate::key::flow::FlowScoped;
+use crate::key::scope::{CatalogScoped, MetaKey};
+use crate::key::txn_helper::TxnOpGetResponseSet;
+use crate::key::{txn_helper, DeserializedValueWithBytes, FlowId, TableMetaValue, NAME_PATTERN};
+use crate::kv_backend::txn::Txn;
+use crate::kv_backend::KvBackendRef;
+
+const FLOW_NAME_KEY_PREFIX: &str = "name";
+
+lazy_static! {
+ static ref FLOW_NAME_KEY_PATTERN: Regex =
+ Regex::new(&format!("^{FLOW_NAME_KEY_PREFIX}/({NAME_PATTERN})$")).unwrap();
+}
+
+/// The key of mapping {flow_name} to [FlowId].
+///
+/// The layout: `__flow/{catalog}/name/{flow_name}`.
+pub struct FlowNameKey(FlowScoped<CatalogScoped<FlowNameKeyInner>>);
+
+impl FlowNameKey {
+ /// Returns the [FlowNameKey]
+ pub fn new(catalog: String, flow_name: String) -> FlowNameKey {
+ let inner = FlowNameKeyInner::new(flow_name);
+ FlowNameKey(FlowScoped::new(CatalogScoped::new(catalog, inner)))
+ }
+
+ /// Returns the catalog.
+ pub fn catalog(&self) -> &str {
+ self.0.catalog()
+ }
+
+ /// Return the `flow_name`
+ pub fn flow_name(&self) -> &str {
+ &self.0.flow_name
+ }
+}
+
+impl MetaKey<FlowNameKey> for FlowNameKey {
+ fn to_bytes(&self) -> Vec<u8> {
+ self.0.to_bytes()
+ }
+
+ fn from_bytes(bytes: &[u8]) -> Result<FlowNameKey> {
+ Ok(FlowNameKey(
+ FlowScoped::<CatalogScoped<FlowNameKeyInner>>::from_bytes(bytes)?,
+ ))
+ }
+}
+
+/// The key of mapping name to [FlowId]
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct FlowNameKeyInner {
+ pub flow_name: String,
+}
+
+impl MetaKey<FlowNameKeyInner> for FlowNameKeyInner {
+ fn to_bytes(&self) -> Vec<u8> {
+ format!("{FLOW_NAME_KEY_PREFIX}/{}", self.flow_name).into_bytes()
+ }
+
+ fn from_bytes(bytes: &[u8]) -> Result<FlowNameKeyInner> {
+ let key = std::str::from_utf8(bytes).map_err(|e| {
+ error::InvalidTableMetadataSnafu {
+ err_msg: format!(
+ "FlowNameKeyInner '{}' is not a valid UTF8 string: {e}",
+ String::from_utf8_lossy(bytes)
+ ),
+ }
+ .build()
+ })?;
+ let captures =
+ FLOW_NAME_KEY_PATTERN
+ .captures(key)
+ .context(error::InvalidTableMetadataSnafu {
+ err_msg: format!("Invalid FlowNameKeyInner '{key}'"),
+ })?;
+ // Safety: pass the regex check above
+ let flow_name = captures[1].to_string();
+ Ok(FlowNameKeyInner { flow_name })
+ }
+}
+
+impl FlowNameKeyInner {
+ /// Returns a [FlowNameKeyInner].
+ pub fn new(flow_name: String) -> Self {
+ Self { flow_name }
+ }
+}
+
+/// The value of [FlowNameKey].
+#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
+pub struct FlowNameValue {
+ flow_id: FlowId,
+}
+
+impl FlowNameValue {
+ /// Returns a [FlowNameValue] with specified [FlowId].
+ pub fn new(flow_id: FlowId) -> Self {
+ Self { flow_id }
+ }
+
+ /// Returns the [FlowId]
+ pub fn flow_id(&self) -> FlowId {
+ self.flow_id
+ }
+}
+
+/// The manager of [FlowNameKey].
+pub struct FlowNameManager {
+ kv_backend: KvBackendRef,
+}
+
+impl FlowNameManager {
+ /// Returns a new [FlowNameManager].
+ pub fn new(kv_backend: KvBackendRef) -> Self {
+ Self { kv_backend }
+ }
+
+ /// Returns the [FlowNameValue] of specified `catalog.flow`.
+ pub async fn get(&self, catalog: &str, flow: &str) -> Result<Option<FlowNameValue>> {
+ let key = FlowNameKey::new(catalog.to_string(), flow.to_string());
+ let raw_key = key.to_bytes();
+ self.kv_backend
+ .get(&raw_key)
+ .await?
+ .map(|x| FlowNameValue::try_from_raw_value(&x.value))
+ .transpose()
+ }
+
+ /// Returns true if the `flow` exists.
+ pub async fn exists(&self, catalog: &str, flow: &str) -> Result<bool> {
+ let key = FlowNameKey::new(catalog.to_string(), flow.to_string());
+ let raw_key = key.to_bytes();
+ self.kv_backend.exists(&raw_key).await
+ }
+
+ /// Builds a create flow name transaction.
+ /// It's expected that the `__flow/{catalog}/name/{flow_name}` wasn't occupied.
+ /// Otherwise, the transaction will retrieve existing value.
+ pub fn build_create_txn(
+ &self,
+ catalog: &str,
+ name: &str,
+ flow_id: FlowId,
+ ) -> Result<(
+ Txn,
+ impl FnOnce(
+ &mut TxnOpGetResponseSet,
+ ) -> Result<Option<DeserializedValueWithBytes<FlowNameValue>>>,
+ )> {
+ let key = FlowNameKey::new(catalog.to_string(), name.to_string());
+ let raw_key = key.to_bytes();
+ let flow_flow_name_value = FlowNameValue::new(flow_id);
+ let txn = txn_helper::build_put_if_absent_txn(
+ raw_key.clone(),
+ flow_flow_name_value.try_as_raw_value()?,
+ );
+
+ Ok((
+ txn,
+ TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
+ ))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_key_serialization() {
+ let key = FlowNameKey::new("my_catalog".to_string(), "my_task".to_string());
+ assert_eq!(b"__flow/my_catalog/name/my_task".to_vec(), key.to_bytes(),);
+ }
+
+ #[test]
+ fn test_key_deserialization() {
+ let bytes = b"__flow/my_catalog/name/my_task".to_vec();
+ let key = FlowNameKey::from_bytes(&bytes).unwrap();
+ assert_eq!(key.catalog(), "my_catalog");
+ assert_eq!(key.flow_name(), "my_task");
+ }
+}
diff --git a/src/common/meta/src/key/flow/flownode_flow.rs b/src/common/meta/src/key/flow/flownode_flow.rs
new file mode 100644
index 000000000000..360b96b0f56f
--- /dev/null
+++ b/src/common/meta/src/key/flow/flownode_flow.rs
@@ -0,0 +1,251 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use futures::stream::BoxStream;
+use futures::TryStreamExt;
+use lazy_static::lazy_static;
+use regex::Regex;
+use snafu::OptionExt;
+
+use crate::error::{self, Result};
+use crate::key::flow::FlowScoped;
+use crate::key::scope::{BytesAdapter, CatalogScoped, MetaKey};
+use crate::key::{FlowId, FlowPartitionId};
+use crate::kv_backend::txn::{Txn, TxnOp};
+use crate::kv_backend::KvBackendRef;
+use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
+use crate::rpc::store::RangeRequest;
+use crate::rpc::KeyValue;
+use crate::FlownodeId;
+
+lazy_static! {
+ static ref FLOWNODE_FLOW_KEY_PATTERN: Regex = Regex::new(&format!(
+ "^{FLOWNODE_FLOW_KEY_PREFIX}/([0-9]+)/([0-9]+)/([0-9]+)$"
+ ))
+ .unwrap();
+}
+
+const FLOWNODE_FLOW_KEY_PREFIX: &str = "flownode";
+
+/// The key of mapping [FlownodeId] to [FlowId].
+///
+/// The layout `__flow/{catalog}/flownode/{flownode_id}/{flow_id}/{partition_id}`
+pub struct FlownodeFlowKey(FlowScoped<CatalogScoped<FlownodeFlowKeyInner>>);
+
+impl MetaKey<FlownodeFlowKey> for FlownodeFlowKey {
+ fn to_bytes(&self) -> Vec<u8> {
+ self.0.to_bytes()
+ }
+
+ fn from_bytes(bytes: &[u8]) -> Result<FlownodeFlowKey> {
+ Ok(FlownodeFlowKey(FlowScoped::<
+ CatalogScoped<FlownodeFlowKeyInner>,
+ >::from_bytes(bytes)?))
+ }
+}
+
+impl FlownodeFlowKey {
+ /// Returns a new [FlownodeFlowKey].
+ pub fn new(
+ catalog: String,
+ flownode_id: FlownodeId,
+ flow_id: FlowId,
+ partition_id: FlowPartitionId,
+ ) -> FlownodeFlowKey {
+ let inner = FlownodeFlowKeyInner::new(flownode_id, flow_id, partition_id);
+ FlownodeFlowKey(FlowScoped::new(CatalogScoped::new(catalog, inner)))
+ }
+
+ /// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`.
+ pub fn range_start_key(catalog: String, flownode_id: FlownodeId) -> Vec<u8> {
+ let catalog_scoped_key = CatalogScoped::new(
+ catalog,
+ BytesAdapter::from(FlownodeFlowKeyInner::range_start_key(flownode_id).into_bytes()),
+ );
+
+ FlowScoped::new(catalog_scoped_key).to_bytes()
+ }
+
+ /// Returns the catalog.
+ pub fn catalog(&self) -> &str {
+ self.0.catalog()
+ }
+
+ /// Returns the [FlowId].
+ pub fn flow_id(&self) -> FlowId {
+ self.0.flow_id
+ }
+
+ /// Returns the [FlownodeId].
+ pub fn flownode_id(&self) -> FlownodeId {
+ self.0.flownode_id
+ }
+
+ /// Returns the [PartitionId].
+ pub fn partition_id(&self) -> FlowPartitionId {
+ self.0.partition_id
+ }
+}
+
+/// The key of mapping [FlownodeId] to [FlowId].
+pub struct FlownodeFlowKeyInner {
+ flownode_id: FlownodeId,
+ flow_id: FlowId,
+ partition_id: FlowPartitionId,
+}
+
+impl FlownodeFlowKeyInner {
+ /// Returns a [FlownodeFlowKey] with the specified `flownode_id`, `flow_id` and `partition_id`.
+ pub fn new(flownode_id: FlownodeId, flow_id: FlowId, partition_id: FlowPartitionId) -> Self {
+ Self {
+ flownode_id,
+ flow_id,
+ partition_id,
+ }
+ }
+
+ fn prefix(flownode_id: FlownodeId) -> String {
+ format!("{}/{flownode_id}", FLOWNODE_FLOW_KEY_PREFIX)
+ }
+
+ /// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`.
+ fn range_start_key(flownode_id: FlownodeId) -> String {
+ format!("{}/", Self::prefix(flownode_id))
+ }
+}
+
+impl MetaKey<FlownodeFlowKeyInner> for FlownodeFlowKeyInner {
+ fn to_bytes(&self) -> Vec<u8> {
+ format!(
+ "{FLOWNODE_FLOW_KEY_PREFIX}/{}/{}/{}",
+ self.flownode_id, self.flow_id, self.partition_id,
+ )
+ .into_bytes()
+ }
+
+ fn from_bytes(bytes: &[u8]) -> Result<FlownodeFlowKeyInner> {
+ let key = std::str::from_utf8(bytes).map_err(|e| {
+ error::InvalidTableMetadataSnafu {
+ err_msg: format!(
+ "FlownodeFlowKeyInner '{}' is not a valid UTF8 string: {e}",
+ String::from_utf8_lossy(bytes)
+ ),
+ }
+ .build()
+ })?;
+ let captures =
+ FLOWNODE_FLOW_KEY_PATTERN
+ .captures(key)
+ .context(error::InvalidTableMetadataSnafu {
+ err_msg: format!("Invalid FlownodeFlowKeyInner '{key}'"),
+ })?;
+ // Safety: pass the regex check above
+ let flownode_id = captures[1].parse::<FlownodeId>().unwrap();
+ let flow_id = captures[2].parse::<FlowId>().unwrap();
+ let partition_id = captures[3].parse::<FlowPartitionId>().unwrap();
+
+ Ok(FlownodeFlowKeyInner {
+ flownode_id,
+ flow_id,
+ partition_id,
+ })
+ }
+}
+
+/// The manager of [FlownodeFlowKey].
+pub struct FlownodeFlowManager {
+ kv_backend: KvBackendRef,
+}
+
+/// Decodes `KeyValue` to [FlownodeFlowKey].
+pub fn flownode_flow_key_decoder(kv: KeyValue) -> Result<FlownodeFlowKey> {
+ FlownodeFlowKey::from_bytes(&kv.key)
+}
+
+impl FlownodeFlowManager {
+ /// Returns a new [FlownodeFlowManager].
+ pub fn new(kv_backend: KvBackendRef) -> Self {
+ Self { kv_backend }
+ }
+
+ /// Retrieves all [FlowId] and [FlowPartitionId]s of the specified `flownode_id`.
+ pub fn flows(
+ &self,
+ catalog: &str,
+ flownode_id: FlownodeId,
+ ) -> BoxStream<'static, Result<(FlowId, FlowPartitionId)>> {
+ let start_key = FlownodeFlowKey::range_start_key(catalog.to_string(), flownode_id);
+ let req = RangeRequest::new().with_prefix(start_key);
+
+ let stream = PaginationStream::new(
+ self.kv_backend.clone(),
+ req,
+ DEFAULT_PAGE_SIZE,
+ Arc::new(flownode_flow_key_decoder),
+ );
+
+ Box::pin(stream.map_ok(|key| (key.flow_id(), key.partition_id())))
+ }
+
+ /// Builds a create flownode flow transaction.
+ ///
+ /// Puts `__flownode_flow/{flownode_id}/{flow_id}/{partition_id}` keys.
+ pub(crate) fn build_create_txn<I: IntoIterator<Item = (FlowPartitionId, FlownodeId)>>(
+ &self,
+ catalog: &str,
+ flow_id: FlowId,
+ flownode_ids: I,
+ ) -> Txn {
+ let txns = flownode_ids
+ .into_iter()
+ .map(|(partition_id, flownode_id)| {
+ let key =
+ FlownodeFlowKey::new(catalog.to_string(), flownode_id, flow_id, partition_id)
+ .to_bytes();
+ TxnOp::Put(key, vec![])
+ })
+ .collect::<Vec<_>>();
+
+ Txn::new().and_then(txns)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::key::flow::flownode_flow::FlownodeFlowKey;
+ use crate::key::scope::MetaKey;
+
+ #[test]
+ fn test_key_serialization() {
+ let flownode_flow = FlownodeFlowKey::new("my_catalog".to_string(), 1, 2, 0);
+ assert_eq!(
+ b"__flow/my_catalog/flownode/1/2/0".to_vec(),
+ flownode_flow.to_bytes()
+ );
+ let prefix = FlownodeFlowKey::range_start_key("my_catalog".to_string(), 1);
+ assert_eq!(b"__flow/my_catalog/flownode/1/".to_vec(), prefix);
+ }
+
+ #[test]
+ fn test_key_deserialization() {
+ let bytes = b"__flow/my_catalog/flownode/1/2/0".to_vec();
+ let key = FlownodeFlowKey::from_bytes(&bytes).unwrap();
+ assert_eq!(key.catalog(), "my_catalog");
+ assert_eq!(key.flownode_id(), 1);
+ assert_eq!(key.flow_id(), 2);
+ assert_eq!(key.partition_id(), 0);
+ }
+}
diff --git a/src/common/meta/src/key/flow_task/table_task.rs b/src/common/meta/src/key/flow/table_flow.rs
similarity index 56%
rename from src/common/meta/src/key/flow_task/table_task.rs
rename to src/common/meta/src/key/flow/table_flow.rs
index dd0d34adcfba..d3cabd86f276 100644
--- a/src/common/meta/src/key/flow_task/table_task.rs
+++ b/src/common/meta/src/key/flow/table_flow.rs
@@ -21,9 +21,9 @@ use snafu::OptionExt;
use table::metadata::TableId;
use crate::error::{self, Result};
-use crate::key::flow_task::FlowTaskScoped;
+use crate::key::flow::FlowScoped;
use crate::key::scope::{BytesAdapter, CatalogScoped, MetaKey};
-use crate::key::{FlowTaskId, FlowTaskPartitionId};
+use crate::key::{FlowId, FlowPartitionId};
use crate::kv_backend::txn::{Txn, TxnOp};
use crate::kv_backend::KvBackendRef;
use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
@@ -31,63 +31,63 @@ use crate::rpc::store::RangeRequest;
use crate::rpc::KeyValue;
use crate::FlownodeId;
-const TABLE_TASK_KEY_PREFIX: &str = "source_table";
+const TABLE_FLOW_KEY_PREFIX: &str = "source_table";
lazy_static! {
- static ref TABLE_TASK_KEY_PATTERN: Regex = Regex::new(&format!(
- "^{TABLE_TASK_KEY_PREFIX}/([0-9]+)/([0-9]+)/([0-9]+)/([0-9]+)$"
+ static ref TABLE_FLOW_KEY_PATTERN: Regex = Regex::new(&format!(
+ "^{TABLE_FLOW_KEY_PREFIX}/([0-9]+)/([0-9]+)/([0-9]+)/([0-9]+)$"
))
.unwrap();
}
-/// The key of mapping [TableId] to [FlownodeId] and [FlowTaskId].
+/// The key of mapping [TableId] to [FlownodeId] and [FlowId].
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-struct TableTaskKeyInner {
+struct TableFlowKeyInner {
table_id: TableId,
flownode_id: FlownodeId,
- flow_task_id: FlowTaskId,
- partition_id: FlowTaskPartitionId,
+ flow_id: FlowId,
+ partition_id: FlowPartitionId,
}
-/// The key of mapping [TableId] to [FlownodeId] and [FlowTaskId].
+/// The key of mapping [TableId] to [FlownodeId] and [FlowId].
///
-/// The layout: `__flow_task/{catalog}/table/{table_id}/{flownode_id}/{flow_task_id}/{partition_id}`.
+/// The layout: `__flow/{catalog}/table/{table_id}/{flownode_id}/{flow_id}/{partition_id}`.
#[derive(Debug, PartialEq)]
-pub struct TableTaskKey(FlowTaskScoped<CatalogScoped<TableTaskKeyInner>>);
+pub struct TableFlowKey(FlowScoped<CatalogScoped<TableFlowKeyInner>>);
-impl MetaKey<TableTaskKey> for TableTaskKey {
+impl MetaKey<TableFlowKey> for TableFlowKey {
fn to_bytes(&self) -> Vec<u8> {
self.0.to_bytes()
}
- fn from_bytes(bytes: &[u8]) -> Result<TableTaskKey> {
- Ok(TableTaskKey(FlowTaskScoped::<
- CatalogScoped<TableTaskKeyInner>,
- >::from_bytes(bytes)?))
+ fn from_bytes(bytes: &[u8]) -> Result<TableFlowKey> {
+ Ok(TableFlowKey(
+ FlowScoped::<CatalogScoped<TableFlowKeyInner>>::from_bytes(bytes)?,
+ ))
}
}
-impl TableTaskKey {
- /// Returns a new [TableTaskKey].
+impl TableFlowKey {
+ /// Returns a new [TableFlowKey].
pub fn new(
catalog: String,
table_id: TableId,
flownode_id: FlownodeId,
- flow_task_id: FlowTaskId,
- partition_id: FlowTaskPartitionId,
- ) -> TableTaskKey {
- let inner = TableTaskKeyInner::new(table_id, flownode_id, flow_task_id, partition_id);
- TableTaskKey(FlowTaskScoped::new(CatalogScoped::new(catalog, inner)))
+ flow_id: FlowId,
+ partition_id: FlowPartitionId,
+ ) -> TableFlowKey {
+ let inner = TableFlowKeyInner::new(table_id, flownode_id, flow_id, partition_id);
+ TableFlowKey(FlowScoped::new(CatalogScoped::new(catalog, inner)))
}
- /// The prefix used to retrieve all [TableTaskKey]s with the specified `table_id`.
+ /// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`.
pub fn range_start_key(catalog: String, table_id: TableId) -> Vec<u8> {
let catalog_scoped_key = CatalogScoped::new(
catalog,
- BytesAdapter::from(TableTaskKeyInner::range_start_key(table_id).into_bytes()),
+ BytesAdapter::from(TableFlowKeyInner::range_start_key(table_id).into_bytes()),
);
- FlowTaskScoped::new(catalog_scoped_key).to_bytes()
+ FlowScoped::new(catalog_scoped_key).to_bytes()
}
/// Returns the catalog.
@@ -100,9 +100,9 @@ impl TableTaskKey {
self.0.table_id
}
- /// Returns the [FlowTaskId].
- pub fn flow_task_id(&self) -> FlowTaskId {
- self.0.flow_task_id
+ /// Returns the [FlowId].
+ pub fn flow_id(&self) -> FlowId {
+ self.0.flow_id
}
/// Returns the [FlownodeId].
@@ -111,117 +111,117 @@ impl TableTaskKey {
}
/// Returns the [PartitionId].
- pub fn partition_id(&self) -> FlowTaskPartitionId {
+ pub fn partition_id(&self) -> FlowPartitionId {
self.0.partition_id
}
}
-impl TableTaskKeyInner {
- /// Returns a new [TableTaskKey].
+impl TableFlowKeyInner {
+ /// Returns a new [TableFlowKey].
fn new(
table_id: TableId,
flownode_id: FlownodeId,
- flow_task_id: FlowTaskId,
- partition_id: FlowTaskPartitionId,
- ) -> TableTaskKeyInner {
+ flow_id: FlowId,
+ partition_id: FlowPartitionId,
+ ) -> TableFlowKeyInner {
Self {
table_id,
flownode_id,
- flow_task_id,
+ flow_id,
partition_id,
}
}
fn prefix(table_id: TableId) -> String {
- format!("{}/{table_id}", TABLE_TASK_KEY_PREFIX)
+ format!("{}/{table_id}", TABLE_FLOW_KEY_PREFIX)
}
- /// The prefix used to retrieve all [TableTaskKey]s with the specified `table_id`.
+ /// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`.
fn range_start_key(table_id: TableId) -> String {
format!("{}/", Self::prefix(table_id))
}
}
-impl MetaKey<TableTaskKeyInner> for TableTaskKeyInner {
+impl MetaKey<TableFlowKeyInner> for TableFlowKeyInner {
fn to_bytes(&self) -> Vec<u8> {
format!(
- "{TABLE_TASK_KEY_PREFIX}/{}/{}/{}/{}",
- self.table_id, self.flownode_id, self.flow_task_id, self.partition_id
+ "{TABLE_FLOW_KEY_PREFIX}/{}/{}/{}/{}",
+ self.table_id, self.flownode_id, self.flow_id, self.partition_id
)
.into_bytes()
}
- fn from_bytes(bytes: &[u8]) -> Result<TableTaskKeyInner> {
+ fn from_bytes(bytes: &[u8]) -> Result<TableFlowKeyInner> {
let key = std::str::from_utf8(bytes).map_err(|e| {
error::InvalidTableMetadataSnafu {
err_msg: format!(
- "TableTaskKeyInner '{}' is not a valid UTF8 string: {e}",
+ "TableFlowKeyInner '{}' is not a valid UTF8 string: {e}",
String::from_utf8_lossy(bytes)
),
}
.build()
})?;
let captures =
- TABLE_TASK_KEY_PATTERN
+ TABLE_FLOW_KEY_PATTERN
.captures(key)
.context(error::InvalidTableMetadataSnafu {
- err_msg: format!("Invalid TableTaskKeyInner '{key}'"),
+ err_msg: format!("Invalid TableFlowKeyInner '{key}'"),
})?;
// Safety: pass the regex check above
let table_id = captures[1].parse::<TableId>().unwrap();
let flownode_id = captures[2].parse::<FlownodeId>().unwrap();
- let flow_task_id = captures[3].parse::<FlowTaskId>().unwrap();
- let partition_id = captures[4].parse::<FlowTaskPartitionId>().unwrap();
- Ok(TableTaskKeyInner::new(
+ let flow_id = captures[3].parse::<FlowId>().unwrap();
+ let partition_id = captures[4].parse::<FlowPartitionId>().unwrap();
+ Ok(TableFlowKeyInner::new(
table_id,
flownode_id,
- flow_task_id,
+ flow_id,
partition_id,
))
}
}
-/// Decodes `KeyValue` to [TableTaskKey].
-pub fn table_task_decoder(kv: KeyValue) -> Result<TableTaskKey> {
- TableTaskKey::from_bytes(&kv.key)
+/// Decodes `KeyValue` to [TableFlowKey].
+pub fn table_flow_decoder(kv: KeyValue) -> Result<TableFlowKey> {
+ TableFlowKey::from_bytes(&kv.key)
}
-/// The manager of [TableTaskKey].
-pub struct TableTaskManager {
+/// The manager of [TableFlowKey].
+pub struct TableFlowManager {
kv_backend: KvBackendRef,
}
-impl TableTaskManager {
- /// Returns a new [TableTaskManager].
+impl TableFlowManager {
+ /// Returns a new [TableFlowManager].
pub fn new(kv_backend: KvBackendRef) -> Self {
Self { kv_backend }
}
- /// Retrieves all [TableTaskKey]s of the specified `table_id`.
+ /// Retrieves all [TableFlowKey]s of the specified `table_id`.
pub fn nodes(
&self,
catalog: &str,
table_id: TableId,
- ) -> BoxStream<'static, Result<TableTaskKey>> {
- let start_key = TableTaskKey::range_start_key(catalog.to_string(), table_id);
+ ) -> BoxStream<'static, Result<TableFlowKey>> {
+ let start_key = TableFlowKey::range_start_key(catalog.to_string(), table_id);
let req = RangeRequest::new().with_prefix(start_key);
let stream = PaginationStream::new(
self.kv_backend.clone(),
req,
DEFAULT_PAGE_SIZE,
- Arc::new(table_task_decoder),
+ Arc::new(table_flow_decoder),
);
Box::pin(stream)
}
- /// Builds a create table task transaction.
+ /// Builds a create table flow transaction.
///
- /// Puts `__table_task/{table_id}/{node_id}/{partition_id}` keys.
- pub fn build_create_txn<I: IntoIterator<Item = (FlowTaskPartitionId, FlownodeId)>>(
+ /// Puts `__table_flow/{table_id}/{node_id}/{partition_id}` keys.
+ pub fn build_create_txn<I: IntoIterator<Item = (FlowPartitionId, FlownodeId)>>(
&self,
catalog: &str,
- flow_task_id: FlowTaskId,
+ flow_id: FlowId,
flownode_ids: I,
source_table_ids: &[TableId],
) -> Txn {
@@ -230,11 +230,11 @@ impl TableTaskManager {
.flat_map(|(partition_id, flownode_id)| {
source_table_ids.iter().map(move |table_id| {
TxnOp::Put(
- TableTaskKey::new(
+ TableFlowKey::new(
catalog.to_string(),
*table_id,
flownode_id,
- flow_task_id,
+ flow_id,
partition_id,
)
.to_bytes(),
@@ -254,26 +254,23 @@ mod tests {
#[test]
fn test_key_serialization() {
- let table_task_key = TableTaskKey::new("my_catalog".to_string(), 1024, 1, 2, 0);
- assert_eq!(
- b"__flow_task/my_catalog/source_table/1024/1/2/0".to_vec(),
- table_task_key.to_bytes(),
- );
- let prefix = TableTaskKey::range_start_key("my_catalog".to_string(), 1024);
+ let table_flow_key = TableFlowKey::new("my_catalog".to_string(), 1024, 1, 2, 0);
assert_eq!(
- b"__flow_task/my_catalog/source_table/1024/".to_vec(),
- prefix
+ b"__flow/my_catalog/source_table/1024/1/2/0".to_vec(),
+ table_flow_key.to_bytes(),
);
+ let prefix = TableFlowKey::range_start_key("my_catalog".to_string(), 1024);
+ assert_eq!(b"__flow/my_catalog/source_table/1024/".to_vec(), prefix);
}
#[test]
fn test_key_deserialization() {
- let bytes = b"__flow_task/my_catalog/source_table/1024/1/2/0".to_vec();
- let key = TableTaskKey::from_bytes(&bytes).unwrap();
+ let bytes = b"__flow/my_catalog/source_table/1024/1/2/0".to_vec();
+ let key = TableFlowKey::from_bytes(&bytes).unwrap();
assert_eq!(key.catalog(), "my_catalog");
assert_eq!(key.source_table_id(), 1024);
assert_eq!(key.flownode_id(), 1);
- assert_eq!(key.flow_task_id(), 2);
+ assert_eq!(key.flow_id(), 2);
assert_eq!(key.partition_id(), 0);
}
}
diff --git a/src/common/meta/src/key/flow_task/flow_task_info.rs b/src/common/meta/src/key/flow_task/flow_task_info.rs
deleted file mode 100644
index 371ab96a1e41..000000000000
--- a/src/common/meta/src/key/flow_task/flow_task_info.rs
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::collections::{BTreeMap, HashMap};
-
-use lazy_static::lazy_static;
-use regex::Regex;
-use serde::{Deserialize, Serialize};
-use snafu::OptionExt;
-use table::metadata::TableId;
-
-use crate::error::{self, Result};
-use crate::key::flow_task::FlowTaskScoped;
-use crate::key::scope::{CatalogScoped, MetaKey};
-use crate::key::txn_helper::TxnOpGetResponseSet;
-use crate::key::{
- txn_helper, DeserializedValueWithBytes, FlowTaskId, FlowTaskPartitionId, TableMetaValue,
-};
-use crate::kv_backend::txn::Txn;
-use crate::kv_backend::KvBackendRef;
-use crate::table_name::TableName;
-use crate::FlownodeId;
-
-const FLOW_TASK_INFO_KEY_PREFIX: &str = "info";
-
-lazy_static! {
- static ref FLOW_TASK_INFO_KEY_PATTERN: Regex =
- Regex::new(&format!("^{FLOW_TASK_INFO_KEY_PREFIX}/([0-9]+)$")).unwrap();
-}
-
-/// The key stores the metadata of the task.
-///
-/// The layout: `__flow_task/{catalog}/info/{flow_task_id}`.
-pub struct FlowTaskInfoKey(FlowTaskScoped<CatalogScoped<FlowTaskInfoKeyInner>>);
-
-impl MetaKey<FlowTaskInfoKey> for FlowTaskInfoKey {
- fn to_bytes(&self) -> Vec<u8> {
- self.0.to_bytes()
- }
-
- fn from_bytes(bytes: &[u8]) -> Result<FlowTaskInfoKey> {
- Ok(FlowTaskInfoKey(FlowTaskScoped::<
- CatalogScoped<FlowTaskInfoKeyInner>,
- >::from_bytes(bytes)?))
- }
-}
-
-impl FlowTaskInfoKey {
- /// Returns the [FlowTaskInfoKey].
- pub fn new(catalog: String, flow_task_id: FlowTaskId) -> FlowTaskInfoKey {
- let inner = FlowTaskInfoKeyInner::new(flow_task_id);
- FlowTaskInfoKey(FlowTaskScoped::new(CatalogScoped::new(catalog, inner)))
- }
-
- /// Returns the catalog.
- pub fn catalog(&self) -> &str {
- self.0.catalog()
- }
-
- /// Returns the [FlowTaskId].
- pub fn flow_task_id(&self) -> FlowTaskId {
- self.0.flow_task_id
- }
-}
-
-/// The key of flow task metadata.
-#[derive(Debug, Clone, Copy, PartialEq)]
-struct FlowTaskInfoKeyInner {
- flow_task_id: FlowTaskId,
-}
-
-impl FlowTaskInfoKeyInner {
- /// Returns a [FlowTaskInfoKey] with the specified `flow_task_id`.
- pub fn new(flow_task_id: FlowTaskId) -> FlowTaskInfoKeyInner {
- FlowTaskInfoKeyInner { flow_task_id }
- }
-}
-
-impl MetaKey<FlowTaskInfoKeyInner> for FlowTaskInfoKeyInner {
- fn to_bytes(&self) -> Vec<u8> {
- format!("{FLOW_TASK_INFO_KEY_PREFIX}/{}", self.flow_task_id).into_bytes()
- }
-
- fn from_bytes(bytes: &[u8]) -> Result<FlowTaskInfoKeyInner> {
- let key = std::str::from_utf8(bytes).map_err(|e| {
- error::InvalidTableMetadataSnafu {
- err_msg: format!(
- "FlowTaskInfoKeyInner '{}' is not a valid UTF8 string: {e}",
- String::from_utf8_lossy(bytes)
- ),
- }
- .build()
- })?;
- let captures =
- FLOW_TASK_INFO_KEY_PATTERN
- .captures(key)
- .context(error::InvalidTableMetadataSnafu {
- err_msg: format!("Invalid FlowTaskInfoKeyInner '{key}'"),
- })?;
- // Safety: pass the regex check above
- let flow_task_id = captures[1].parse::<FlowTaskId>().unwrap();
- Ok(FlowTaskInfoKeyInner { flow_task_id })
- }
-}
-
-// The metadata of the flow task.
-#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
-pub struct FlowTaskInfoValue {
- /// The source tables used by the task.
- pub(crate) source_table_ids: Vec<TableId>,
- /// The sink table used by the task.
- pub(crate) sink_table_name: TableName,
- /// Which flow nodes this task is running on.
- pub(crate) flownode_ids: BTreeMap<FlowTaskPartitionId, FlownodeId>,
- /// The catalog name.
- pub(crate) catalog_name: String,
- /// The task name.
- pub(crate) task_name: String,
- /// The raw sql.
- pub(crate) raw_sql: String,
- /// The expr of expire.
- pub(crate) expire_when: String,
- /// The comment.
- pub(crate) comment: String,
- /// The options.
- pub(crate) options: HashMap<String, String>,
-}
-
-impl FlowTaskInfoValue {
- /// Returns the `flownode_id`.
- pub fn flownode_ids(&self) -> &BTreeMap<FlowTaskPartitionId, FlownodeId> {
- &self.flownode_ids
- }
-
- /// Returns the `source_table`.
- pub fn source_table_ids(&self) -> &[TableId] {
- &self.source_table_ids
- }
-}
-
-/// The manager of [FlowTaskInfoKey].
-pub struct FlowTaskInfoManager {
- kv_backend: KvBackendRef,
-}
-
-impl FlowTaskInfoManager {
- /// Returns a new [FlowTaskInfoManager].
- pub fn new(kv_backend: KvBackendRef) -> Self {
- Self { kv_backend }
- }
-
- /// Returns the [FlowTaskInfoValue] of specified `flow_task_id`.
- pub async fn get(
- &self,
- catalog: &str,
- flow_task_id: FlowTaskId,
- ) -> Result<Option<FlowTaskInfoValue>> {
- let key = FlowTaskInfoKey::new(catalog.to_string(), flow_task_id).to_bytes();
- self.kv_backend
- .get(&key)
- .await?
- .map(|x| FlowTaskInfoValue::try_from_raw_value(&x.value))
- .transpose()
- }
-
- /// Builds a create flow task transaction.
- /// It is expected that the `__flow_task/{catalog}/info/{flow_task_id}` wasn't occupied.
- /// Otherwise, the transaction will retrieve existing value.
- pub(crate) fn build_create_txn(
- &self,
- catalog: &str,
- flow_task_id: FlowTaskId,
- flow_task_value: &FlowTaskInfoValue,
- ) -> Result<(
- Txn,
- impl FnOnce(
- &mut TxnOpGetResponseSet,
- ) -> Result<Option<DeserializedValueWithBytes<FlowTaskInfoValue>>>,
- )> {
- let key = FlowTaskInfoKey::new(catalog.to_string(), flow_task_id).to_bytes();
- let txn =
- txn_helper::build_put_if_absent_txn(key.clone(), flow_task_value.try_as_raw_value()?);
-
- Ok((
- txn,
- TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(key)),
- ))
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn test_key_serialization() {
- let flow_task = FlowTaskInfoKey::new("my_catalog".to_string(), 2);
- assert_eq!(
- b"__flow_task/my_catalog/info/2".to_vec(),
- flow_task.to_bytes()
- );
- }
-
- #[test]
- fn test_key_deserialization() {
- let bytes = b"__flow_task/my_catalog/info/2".to_vec();
- let key = FlowTaskInfoKey::from_bytes(&bytes).unwrap();
- assert_eq!(key.catalog(), "my_catalog");
- assert_eq!(key.flow_task_id(), 2);
- }
-}
diff --git a/src/common/meta/src/key/flow_task/flow_task_name.rs b/src/common/meta/src/key/flow_task/flow_task_name.rs
deleted file mode 100644
index eaf6da5ae848..000000000000
--- a/src/common/meta/src/key/flow_task/flow_task_name.rs
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use lazy_static::lazy_static;
-use regex::Regex;
-use serde::{Deserialize, Serialize};
-use snafu::OptionExt;
-
-use crate::error::{self, Result};
-use crate::key::flow_task::FlowTaskScoped;
-use crate::key::scope::{CatalogScoped, MetaKey};
-use crate::key::txn_helper::TxnOpGetResponseSet;
-use crate::key::{
- txn_helper, DeserializedValueWithBytes, FlowTaskId, TableMetaValue, NAME_PATTERN,
-};
-use crate::kv_backend::txn::Txn;
-use crate::kv_backend::KvBackendRef;
-
-const FLOW_TASK_NAME_KEY_PREFIX: &str = "name";
-
-lazy_static! {
- static ref FLOW_TASK_NAME_KEY_PATTERN: Regex =
- Regex::new(&format!("^{FLOW_TASK_NAME_KEY_PREFIX}/({NAME_PATTERN})$")).unwrap();
-}
-
-/// The key of mapping {task_name} to [FlowTaskId].
-///
-/// The layout: `__flow_task/{catalog}/name/{task_name}`.
-pub struct FlowTaskNameKey(FlowTaskScoped<CatalogScoped<FlowTaskNameKeyInner>>);
-
-impl FlowTaskNameKey {
- /// Returns the [FlowTaskNameKey]
- pub fn new(catalog: String, task_name: String) -> FlowTaskNameKey {
- let inner = FlowTaskNameKeyInner::new(task_name);
- FlowTaskNameKey(FlowTaskScoped::new(CatalogScoped::new(catalog, inner)))
- }
-
- /// Returns the catalog.
- pub fn catalog(&self) -> &str {
- self.0.catalog()
- }
-
- /// Return the `task_name`
- pub fn task_name(&self) -> &str {
- &self.0.task_name
- }
-}
-
-impl MetaKey<FlowTaskNameKey> for FlowTaskNameKey {
- fn to_bytes(&self) -> Vec<u8> {
- self.0.to_bytes()
- }
-
- fn from_bytes(bytes: &[u8]) -> Result<FlowTaskNameKey> {
- Ok(FlowTaskNameKey(FlowTaskScoped::<
- CatalogScoped<FlowTaskNameKeyInner>,
- >::from_bytes(bytes)?))
- }
-}
-
-/// The key of mapping name to [FlowTaskId]
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub struct FlowTaskNameKeyInner {
- pub task_name: String,
-}
-
-impl MetaKey<FlowTaskNameKeyInner> for FlowTaskNameKeyInner {
- fn to_bytes(&self) -> Vec<u8> {
- format!("{FLOW_TASK_NAME_KEY_PREFIX}/{}", self.task_name).into_bytes()
- }
-
- fn from_bytes(bytes: &[u8]) -> Result<FlowTaskNameKeyInner> {
- let key = std::str::from_utf8(bytes).map_err(|e| {
- error::InvalidTableMetadataSnafu {
- err_msg: format!(
- "FlowTaskNameKeyInner '{}' is not a valid UTF8 string: {e}",
- String::from_utf8_lossy(bytes)
- ),
- }
- .build()
- })?;
- let captures =
- FLOW_TASK_NAME_KEY_PATTERN
- .captures(key)
- .context(error::InvalidTableMetadataSnafu {
- err_msg: format!("Invalid FlowTaskNameKeyInner '{key}'"),
- })?;
- // Safety: pass the regex check above
- let task = captures[1].to_string();
- Ok(FlowTaskNameKeyInner { task_name: task })
- }
-}
-
-impl FlowTaskNameKeyInner {
- /// Returns a [FlowTaskNameKeyInner].
- pub fn new(task: String) -> Self {
- Self { task_name: task }
- }
-}
-
-/// The value of [FlowTaskNameKey].
-#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
-pub struct FlowTaskNameValue {
- flow_task_id: FlowTaskId,
-}
-
-impl FlowTaskNameValue {
- /// Returns a [FlowTaskNameValue] with specified [FlowTaskId].
- pub fn new(flow_task_id: FlowTaskId) -> Self {
- Self { flow_task_id }
- }
-
- /// Returns the [FlowTaskId]
- pub fn flow_task_id(&self) -> FlowTaskId {
- self.flow_task_id
- }
-}
-
-/// The manager of [FlowTaskNameKey].
-pub struct FlowTaskNameManager {
- kv_backend: KvBackendRef,
-}
-
-impl FlowTaskNameManager {
- /// Returns a new [FlowTaskNameManager].
- pub fn new(kv_backend: KvBackendRef) -> Self {
- Self { kv_backend }
- }
-
- /// Returns the [FlowTaskNameValue] of specified `catalog.task`.
- pub async fn get(&self, catalog: &str, task: &str) -> Result<Option<FlowTaskNameValue>> {
- let key = FlowTaskNameKey::new(catalog.to_string(), task.to_string());
- let raw_key = key.to_bytes();
- self.kv_backend
- .get(&raw_key)
- .await?
- .map(|x| FlowTaskNameValue::try_from_raw_value(&x.value))
- .transpose()
- }
-
- /// Returns true if the `task` exists.
- pub async fn exists(&self, catalog: &str, task: &str) -> Result<bool> {
- let key = FlowTaskNameKey::new(catalog.to_string(), task.to_string());
- let raw_key = key.to_bytes();
- self.kv_backend.exists(&raw_key).await
- }
-
- /// Builds a create flow task name transaction.
- /// It's expected that the `__flow_task/{catalog}/name/{task_name}` wasn't occupied.
- /// Otherwise, the transaction will retrieve existing value.
- pub fn build_create_txn(
- &self,
- catalog: &str,
- name: &str,
- flow_task_id: FlowTaskId,
- ) -> Result<(
- Txn,
- impl FnOnce(
- &mut TxnOpGetResponseSet,
- ) -> Result<Option<DeserializedValueWithBytes<FlowTaskNameValue>>>,
- )> {
- let key = FlowTaskNameKey::new(catalog.to_string(), name.to_string());
- let raw_key = key.to_bytes();
- let flow_task_name_value = FlowTaskNameValue::new(flow_task_id);
- let txn = txn_helper::build_put_if_absent_txn(
- raw_key.clone(),
- flow_task_name_value.try_as_raw_value()?,
- );
-
- Ok((
- txn,
- TxnOpGetResponseSet::decode_with(TxnOpGetResponseSet::filter(raw_key)),
- ))
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn test_key_serialization() {
- let table_task_key = FlowTaskNameKey::new("my_catalog".to_string(), "my_task".to_string());
- assert_eq!(
- b"__flow_task/my_catalog/name/my_task".to_vec(),
- table_task_key.to_bytes(),
- );
- }
-
- #[test]
- fn test_key_deserialization() {
- let bytes = b"__flow_task/my_catalog/name/my_task".to_vec();
- let key = FlowTaskNameKey::from_bytes(&bytes).unwrap();
- assert_eq!(key.catalog(), "my_catalog");
- assert_eq!(key.task_name(), "my_task");
- }
-}
diff --git a/src/common/meta/src/key/flow_task/flownode_task.rs b/src/common/meta/src/key/flow_task/flownode_task.rs
deleted file mode 100644
index bacff5326e08..000000000000
--- a/src/common/meta/src/key/flow_task/flownode_task.rs
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::sync::Arc;
-
-use futures::stream::BoxStream;
-use futures::TryStreamExt;
-use lazy_static::lazy_static;
-use regex::Regex;
-use snafu::OptionExt;
-
-use crate::error::{self, Result};
-use crate::key::flow_task::FlowTaskScoped;
-use crate::key::scope::{BytesAdapter, CatalogScoped, MetaKey};
-use crate::key::{FlowTaskId, FlowTaskPartitionId};
-use crate::kv_backend::txn::{Txn, TxnOp};
-use crate::kv_backend::KvBackendRef;
-use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
-use crate::rpc::store::RangeRequest;
-use crate::rpc::KeyValue;
-use crate::FlownodeId;
-
-lazy_static! {
- static ref FLOWNODE_TASK_KEY_PATTERN: Regex = Regex::new(&format!(
- "^{FLOWNODE_TASK_KEY_PREFIX}/([0-9]+)/([0-9]+)/([0-9]+)$"
- ))
- .unwrap();
-}
-
-const FLOWNODE_TASK_KEY_PREFIX: &str = "flownode";
-
-/// The key of mapping [FlownodeId] to [FlowTaskId].
-///
-/// The layout `__flow_task/{catalog}/flownode/{flownode_id}/{flow_task_id}/{partition_id}`
-pub struct FlownodeTaskKey(FlowTaskScoped<CatalogScoped<FlownodeTaskKeyInner>>);
-
-impl MetaKey<FlownodeTaskKey> for FlownodeTaskKey {
- fn to_bytes(&self) -> Vec<u8> {
- self.0.to_bytes()
- }
-
- fn from_bytes(bytes: &[u8]) -> Result<FlownodeTaskKey> {
- Ok(FlownodeTaskKey(FlowTaskScoped::<
- CatalogScoped<FlownodeTaskKeyInner>,
- >::from_bytes(bytes)?))
- }
-}
-
-impl FlownodeTaskKey {
- /// Returns a new [FlownodeTaskKey].
- pub fn new(
- catalog: String,
- flownode_id: FlownodeId,
- flow_task_id: FlowTaskId,
- partition_id: FlowTaskPartitionId,
- ) -> FlownodeTaskKey {
- let inner = FlownodeTaskKeyInner::new(flownode_id, flow_task_id, partition_id);
- FlownodeTaskKey(FlowTaskScoped::new(CatalogScoped::new(catalog, inner)))
- }
-
- /// The prefix used to retrieve all [FlownodeTaskKey]s with the specified `flownode_id`.
- pub fn range_start_key(catalog: String, flownode_id: FlownodeId) -> Vec<u8> {
- let catalog_scoped_key = CatalogScoped::new(
- catalog,
- BytesAdapter::from(FlownodeTaskKeyInner::range_start_key(flownode_id).into_bytes()),
- );
-
- FlowTaskScoped::new(catalog_scoped_key).to_bytes()
- }
-
- /// Returns the catalog.
- pub fn catalog(&self) -> &str {
- self.0.catalog()
- }
-
- /// Returns the [FlowTaskId].
- pub fn flow_task_id(&self) -> FlowTaskId {
- self.0.flow_task_id
- }
-
- /// Returns the [FlownodeId].
- pub fn flownode_id(&self) -> FlownodeId {
- self.0.flownode_id
- }
-
- /// Returns the [PartitionId].
- pub fn partition_id(&self) -> FlowTaskPartitionId {
- self.0.partition_id
- }
-}
-
-/// The key of mapping [FlownodeId] to [FlowTaskId].
-pub struct FlownodeTaskKeyInner {
- flownode_id: FlownodeId,
- flow_task_id: FlowTaskId,
- partition_id: FlowTaskPartitionId,
-}
-
-impl FlownodeTaskKeyInner {
- /// Returns a [FlownodeTaskKey] with the specified `flownode_id`, `flow_task_id` and `partition_id`.
- pub fn new(
- flownode_id: FlownodeId,
- flow_task_id: FlowTaskId,
- partition_id: FlowTaskPartitionId,
- ) -> Self {
- Self {
- flownode_id,
- flow_task_id,
- partition_id,
- }
- }
-
- fn prefix(flownode_id: FlownodeId) -> String {
- format!("{}/{flownode_id}", FLOWNODE_TASK_KEY_PREFIX)
- }
-
- /// The prefix used to retrieve all [FlownodeTaskKey]s with the specified `flownode_id`.
- fn range_start_key(flownode_id: FlownodeId) -> String {
- format!("{}/", Self::prefix(flownode_id))
- }
-}
-
-impl MetaKey<FlownodeTaskKeyInner> for FlownodeTaskKeyInner {
- fn to_bytes(&self) -> Vec<u8> {
- format!(
- "{FLOWNODE_TASK_KEY_PREFIX}/{}/{}/{}",
- self.flownode_id, self.flow_task_id, self.partition_id,
- )
- .into_bytes()
- }
-
- fn from_bytes(bytes: &[u8]) -> Result<FlownodeTaskKeyInner> {
- let key = std::str::from_utf8(bytes).map_err(|e| {
- error::InvalidTableMetadataSnafu {
- err_msg: format!(
- "FlownodeTaskKeyInner '{}' is not a valid UTF8 string: {e}",
- String::from_utf8_lossy(bytes)
- ),
- }
- .build()
- })?;
- let captures =
- FLOWNODE_TASK_KEY_PATTERN
- .captures(key)
- .context(error::InvalidTableMetadataSnafu {
- err_msg: format!("Invalid FlownodeTaskKeyInner '{key}'"),
- })?;
- // Safety: pass the regex check above
- let flownode_id = captures[1].parse::<FlownodeId>().unwrap();
- let flow_task_id = captures[2].parse::<FlowTaskId>().unwrap();
- let partition_id = captures[3].parse::<FlowTaskPartitionId>().unwrap();
-
- Ok(FlownodeTaskKeyInner {
- flownode_id,
- flow_task_id,
- partition_id,
- })
- }
-}
-
-/// The manager of [FlownodeTaskKey].
-pub struct FlownodeTaskManager {
- kv_backend: KvBackendRef,
-}
-
-/// Decodes `KeyValue` to [FlownodeTaskKey].
-pub fn flownode_task_key_decoder(kv: KeyValue) -> Result<FlownodeTaskKey> {
- FlownodeTaskKey::from_bytes(&kv.key)
-}
-
-impl FlownodeTaskManager {
- /// Returns a new [FlownodeTaskManager].
- pub fn new(kv_backend: KvBackendRef) -> Self {
- Self { kv_backend }
- }
-
- /// Retrieves all [FlowTaskId] and [PartitionId]s of the specified `flownode_id`.
- pub fn tasks(
- &self,
- catalog: &str,
- flownode_id: FlownodeId,
- ) -> BoxStream<'static, Result<(FlowTaskId, FlowTaskPartitionId)>> {
- let start_key = FlownodeTaskKey::range_start_key(catalog.to_string(), flownode_id);
- let req = RangeRequest::new().with_prefix(start_key);
-
- let stream = PaginationStream::new(
- self.kv_backend.clone(),
- req,
- DEFAULT_PAGE_SIZE,
- Arc::new(flownode_task_key_decoder),
- );
-
- Box::pin(stream.map_ok(|key| (key.flow_task_id(), key.partition_id())))
- }
-
- /// Builds a create flownode task transaction.
- ///
- /// Puts `__flownode_task/{flownode_id}/{flow_task_id}/{partition_id}` keys.
- pub(crate) fn build_create_txn<I: IntoIterator<Item = (FlowTaskPartitionId, FlownodeId)>>(
- &self,
- catalog: &str,
- flow_task_id: FlowTaskId,
- flownode_ids: I,
- ) -> Txn {
- let txns = flownode_ids
- .into_iter()
- .map(|(partition_id, flownode_id)| {
- let key = FlownodeTaskKey::new(
- catalog.to_string(),
- flownode_id,
- flow_task_id,
- partition_id,
- )
- .to_bytes();
- TxnOp::Put(key, vec![])
- })
- .collect::<Vec<_>>();
-
- Txn::new().and_then(txns)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use crate::key::flow_task::flownode_task::FlownodeTaskKey;
- use crate::key::scope::MetaKey;
-
- #[test]
- fn test_key_serialization() {
- let flownode_task = FlownodeTaskKey::new("my_catalog".to_string(), 1, 2, 0);
- assert_eq!(
- b"__flow_task/my_catalog/flownode/1/2/0".to_vec(),
- flownode_task.to_bytes()
- );
- let prefix = FlownodeTaskKey::range_start_key("my_catalog".to_string(), 1);
- assert_eq!(b"__flow_task/my_catalog/flownode/1/".to_vec(), prefix);
- }
-
- #[test]
- fn test_key_deserialization() {
- let bytes = b"__flow_task/my_catalog/flownode/1/2/0".to_vec();
- let key = FlownodeTaskKey::from_bytes(&bytes).unwrap();
- assert_eq!(key.catalog(), "my_catalog");
- assert_eq!(key.flownode_id(), 1);
- assert_eq!(key.flow_task_id(), 2);
- assert_eq!(key.partition_id(), 0);
- }
-}
diff --git a/src/common/meta/src/lock_key.rs b/src/common/meta/src/lock_key.rs
index 456d1ccffad7..7fbc07655ea7 100644
--- a/src/common/meta/src/lock_key.rs
+++ b/src/common/meta/src/lock_key.rs
@@ -22,7 +22,7 @@ const CATALOG_LOCK_PREFIX: &str = "__catalog_lock";
const SCHEMA_LOCK_PREFIX: &str = "__schema_lock";
const TABLE_LOCK_PREFIX: &str = "__table_lock";
const TABLE_NAME_LOCK_PREFIX: &str = "__table_name_lock";
-const FLOW_TASK_NAME_LOCK_PREFIX: &str = "__flow_task_name_lock";
+const FLOW_NAME_LOCK_PREFIX: &str = "__flow_name_lock";
const REGION_LOCK_PREFIX: &str = "__region_lock";
/// [CatalogLock] acquires the lock on the tenant level.
@@ -111,28 +111,28 @@ impl From<TableNameLock> for StringKey {
}
}
-/// [FlowTaskNameLock] prevents any procedures trying to create a flow task named it.
-pub enum FlowTaskNameLock {
+/// [FlowNameLock] prevents any procedures trying to create a flow named it.
+pub enum FlowNameLock {
Write(String),
}
-impl FlowTaskNameLock {
+impl FlowNameLock {
pub fn new(catalog: &str, table: &str) -> Self {
Self::Write(format!("{catalog}.{table}"))
}
}
-impl Display for FlowTaskNameLock {
+impl Display for FlowNameLock {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- let FlowTaskNameLock::Write(name) = self;
- write!(f, "{}/{}", FLOW_TASK_NAME_LOCK_PREFIX, name)
+ let FlowNameLock::Write(name) = self;
+ write!(f, "{}/{}", FLOW_NAME_LOCK_PREFIX, name)
}
}
-impl From<FlowTaskNameLock> for StringKey {
- fn from(value: FlowTaskNameLock) -> Self {
+impl From<FlowNameLock> for StringKey {
+ fn from(value: FlowNameLock) -> Self {
match value {
- FlowTaskNameLock::Write(_) => StringKey::Exclusive(value.to_string()),
+ FlowNameLock::Write(_) => StringKey::Exclusive(value.to_string()),
}
}
}
diff --git a/src/common/meta/src/metrics.rs b/src/common/meta/src/metrics.rs
index 0a47b1de1463..34bb95dc0cb7 100644
--- a/src/common/meta/src/metrics.rs
+++ b/src/common/meta/src/metrics.rs
@@ -39,9 +39,9 @@ lazy_static! {
&["step"]
)
.unwrap();
- pub static ref METRIC_META_PROCEDURE_CREATE_FLOW_TASK: HistogramVec = register_histogram_vec!(
- "greptime_meta_procedure_create_flow_task",
- "meta procedure create flow task",
+ pub static ref METRIC_META_PROCEDURE_CREATE_FLOW: HistogramVec = register_histogram_vec!(
+ "greptime_meta_procedure_create_flow",
+ "meta procedure create flow",
&["step"]
)
.unwrap();
diff --git a/src/common/meta/src/rpc/ddl.rs b/src/common/meta/src/rpc/ddl.rs
index 911cab18df4c..9b75bd6c3963 100644
--- a/src/common/meta/src/rpc/ddl.rs
+++ b/src/common/meta/src/rpc/ddl.rs
@@ -26,8 +26,8 @@ use api::v1::meta::{
TruncateTableTask as PbTruncateTableTask,
};
use api::v1::{
- AlterExpr, CreateDatabaseExpr, CreateFlowTaskExpr, CreateTableExpr, DropDatabaseExpr,
- DropFlowTaskExpr, DropTableExpr, TruncateTableExpr,
+ AlterExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr, DropDatabaseExpr, DropFlowExpr,
+ DropTableExpr, TruncateTableExpr,
};
use base64::engine::general_purpose;
use base64::Engine as _;
@@ -729,11 +729,11 @@ impl TryFrom<DropDatabaseTask> for PbDropDatabaseTask {
}
}
-/// Create flow task
+/// Create flow
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CreateFlowTask {
pub catalog_name: String,
- pub task_name: String,
+ pub flow_name: String,
pub source_table_names: Vec<TableName>,
pub sink_table_name: TableName,
pub or_replace: bool,
@@ -741,16 +741,16 @@ pub struct CreateFlowTask {
pub expire_when: String,
pub comment: String,
pub sql: String,
- pub options: HashMap<String, String>,
+ pub flow_options: HashMap<String, String>,
}
impl TryFrom<PbCreateFlowTask> for CreateFlowTask {
type Error = error::Error;
fn try_from(pb: PbCreateFlowTask) -> Result<Self> {
- let CreateFlowTaskExpr {
+ let CreateFlowExpr {
catalog_name,
- task_name,
+ flow_name,
source_table_names,
sink_table_name,
or_replace,
@@ -758,14 +758,14 @@ impl TryFrom<PbCreateFlowTask> for CreateFlowTask {
expire_when,
comment,
sql,
- task_options,
- } = pb.create_flow_task.context(error::InvalidProtoMsgSnafu {
- err_msg: "expected create_flow_task",
+ flow_options,
+ } = pb.create_flow.context(error::InvalidProtoMsgSnafu {
+ err_msg: "expected create_flow",
})?;
Ok(CreateFlowTask {
catalog_name,
- task_name,
+ flow_name,
source_table_names: source_table_names.into_iter().map(Into::into).collect(),
sink_table_name: sink_table_name
.context(error::InvalidProtoMsgSnafu {
@@ -777,7 +777,7 @@ impl TryFrom<PbCreateFlowTask> for CreateFlowTask {
expire_when,
comment,
sql,
- options: task_options,
+ flow_options,
})
}
}
@@ -786,7 +786,7 @@ impl From<CreateFlowTask> for PbCreateFlowTask {
fn from(
CreateFlowTask {
catalog_name,
- task_name,
+ flow_name,
source_table_names,
sink_table_name,
or_replace,
@@ -794,13 +794,13 @@ impl From<CreateFlowTask> for PbCreateFlowTask {
expire_when,
comment,
sql,
- options,
+ flow_options,
}: CreateFlowTask,
) -> Self {
PbCreateFlowTask {
- create_flow_task: Some(CreateFlowTaskExpr {
+ create_flow: Some(CreateFlowExpr {
catalog_name,
- task_name,
+ flow_name,
source_table_names: source_table_names.into_iter().map(Into::into).collect(),
sink_table_name: Some(sink_table_name.into()),
or_replace,
@@ -808,31 +808,31 @@ impl From<CreateFlowTask> for PbCreateFlowTask {
expire_when,
comment,
sql,
- task_options: options,
+ flow_options,
}),
}
}
}
-/// Drop flow task
+/// Drop flow
pub struct DropFlowTask {
pub catalog_name: String,
- pub task_name: String,
+ pub flow_name: String,
}
impl TryFrom<PbDropFlowTask> for DropFlowTask {
type Error = error::Error;
fn try_from(pb: PbDropFlowTask) -> Result<Self> {
- let DropFlowTaskExpr {
+ let DropFlowExpr {
catalog_name,
- task_name,
- } = pb.drop_flow_task.context(error::InvalidProtoMsgSnafu {
+ flow_name,
+ } = pb.drop_flow.context(error::InvalidProtoMsgSnafu {
err_msg: "expected sink_table_name",
})?;
Ok(DropFlowTask {
catalog_name,
- task_name,
+ flow_name,
})
}
}
@@ -841,13 +841,13 @@ impl From<DropFlowTask> for PbDropFlowTask {
fn from(
DropFlowTask {
catalog_name,
- task_name,
+ flow_name,
}: DropFlowTask,
) -> Self {
PbDropFlowTask {
- drop_flow_task: Some(DropFlowTaskExpr {
+ drop_flow: Some(DropFlowExpr {
catalog_name,
- task_name,
+ flow_name,
}),
}
}
diff --git a/src/common/meta/src/test_util.rs b/src/common/meta/src/test_util.rs
index fe5bf0c439be..7c9ed13f5e88 100644
--- a/src/common/meta/src/test_util.rs
+++ b/src/common/meta/src/test_util.rs
@@ -20,11 +20,11 @@ pub use common_base::AffectedRows;
use common_recordbatch::SendableRecordBatchStream;
use crate::cache_invalidator::DummyCacheInvalidator;
+use crate::ddl::flow_meta::FlowMetadataAllocator;
use crate::ddl::table_meta::TableMetadataAllocator;
-use crate::ddl::task_meta::FlowTaskMetadataAllocator;
use crate::ddl::DdlContext;
use crate::error::Result;
-use crate::key::flow_task::FlowTaskMetadataManager;
+use crate::key::flow::FlowMetadataManager;
use crate::key::TableMetadataManager;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::kv_backend::KvBackendRef;
@@ -109,21 +109,20 @@ pub fn new_ddl_context_with_kv_backend(
),
Arc::new(WalOptionsAllocator::default()),
));
- let flow_task_metadata_manager = Arc::new(FlowTaskMetadataManager::new(kv_backend.clone()));
- let flow_task_metadata_allocator = Arc::new(
- FlowTaskMetadataAllocator::with_noop_peer_allocator(Arc::new(
+ let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
+ let flow_metadata_allocator =
+ Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(Arc::new(
SequenceBuilder::new("flow-test", kv_backend)
.initial(1024)
.build(),
- )),
- );
+ )));
DdlContext {
node_manager,
cache_invalidator: Arc::new(DummyCacheInvalidator),
memory_region_keeper: Arc::new(MemoryRegionKeeper::new()),
table_metadata_allocator,
table_metadata_manager,
- flow_task_metadata_allocator,
- flow_task_metadata_manager,
+ flow_metadata_allocator,
+ flow_metadata_manager,
}
}
diff --git a/src/frontend/src/instance/grpc.rs b/src/frontend/src/instance/grpc.rs
index 551a7da85d31..2009bc56381c 100644
--- a/src/frontend/src/instance/grpc.rs
+++ b/src/frontend/src/instance/grpc.rs
@@ -137,10 +137,10 @@ impl GrpcQueryHandler for Instance {
TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name);
self.statement_executor.truncate_table(table_name).await?
}
- DdlExpr::CreateFlowTask(_) => {
+ DdlExpr::CreateFlow(_) => {
unimplemented!()
}
- DdlExpr::DropFlowTask(_) => {
+ DdlExpr::DropFlow(_) => {
unimplemented!()
}
}
@@ -181,12 +181,12 @@ fn fill_catalog_and_schema_from_context(ddl_expr: &mut DdlExpr, ctx: &QueryConte
Expr::TruncateTable(expr) => {
check_and_fill!(expr);
}
- Expr::CreateFlowTask(expr) => {
+ Expr::CreateFlow(expr) => {
if expr.catalog_name.is_empty() {
expr.catalog_name = catalog.to_string();
}
}
- Expr::DropFlowTask(expr) => {
+ Expr::DropFlow(expr) => {
if expr.catalog_name.is_empty() {
expr.catalog_name = catalog.to_string();
}
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 308d08a20ae0..bb0e0255f1db 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -59,7 +59,7 @@ use crate::service::store::cached_kv::LeaderCachedKvBackend;
use crate::state::{become_follower, become_leader, StateRef};
pub const TABLE_ID_SEQ: &str = "table_id";
-pub const FLOW_TASK_ID_SEQ: &str = "flow_id";
+pub const FLOW_ID_SEQ: &str = "flow_id";
pub const METASRV_HOME: &str = "/tmp/metasrv";
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index ab17088745c5..a2cd95e67b2c 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -18,14 +18,14 @@ use std::time::Duration;
use client::client_manager::DatanodeClients;
use common_base::Plugins;
-use common_catalog::consts::{MIN_USER_FLOW_TASK_ID, MIN_USER_TABLE_ID};
+use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
use common_grpc::channel_manager::ChannelConfig;
+use common_meta::ddl::flow_meta::FlowMetadataAllocator;
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
-use common_meta::ddl::task_meta::FlowTaskMetadataAllocator;
use common_meta::ddl::DdlContext;
use common_meta::ddl_manager::DdlManager;
use common_meta::distributed_time_constants;
-use common_meta::key::flow_task::FlowTaskMetadataManager;
+use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
@@ -38,7 +38,7 @@ use common_procedure::local::{LocalManager, ManagerConfig};
use common_procedure::ProcedureManagerRef;
use snafu::ResultExt;
-use super::FLOW_TASK_ID_SEQ;
+use super::FLOW_ID_SEQ;
use crate::cache_invalidator::MetasrvCacheInvalidator;
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
use crate::error::{self, Result};
@@ -205,7 +205,7 @@ impl MetasrvBuilder {
let table_metadata_manager = Arc::new(TableMetadataManager::new(
leader_cached_kv_backend.clone() as _,
));
- let flow_task_metadata_manager = Arc::new(FlowTaskMetadataManager::new(
+ let flow_metadata_manager = Arc::new(FlowMetadataManager::new(
leader_cached_kv_backend.clone() as _,
));
let lock = lock.unwrap_or_else(|| Arc::new(MemLock::default()));
@@ -239,14 +239,13 @@ impl MetasrvBuilder {
))
});
// TODO(weny): use the real allocator.
- let flow_task_metadata_allocator = Arc::new(
- FlowTaskMetadataAllocator::with_noop_peer_allocator(Arc::new(
- SequenceBuilder::new(FLOW_TASK_ID_SEQ, kv_backend.clone())
- .initial(MIN_USER_FLOW_TASK_ID as u64)
+ let flow_metadata_allocator =
+ Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(Arc::new(
+ SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
+ .initial(MIN_USER_FLOW_ID as u64)
.step(10)
.build(),
- )),
- );
+ )));
let memory_region_keeper = Arc::new(MemoryRegionKeeper::default());
let node_manager = node_manager.unwrap_or_else(|| {
let datanode_client_channel_config = ChannelConfig::new()
@@ -273,8 +272,8 @@ impl MetasrvBuilder {
memory_region_keeper: memory_region_keeper.clone(),
table_metadata_manager: table_metadata_manager.clone(),
table_metadata_allocator: table_metadata_allocator.clone(),
- flow_task_metadata_manager: flow_task_metadata_manager.clone(),
- flow_task_metadata_allocator: flow_task_metadata_allocator.clone(),
+ flow_metadata_manager: flow_metadata_manager.clone(),
+ flow_metadata_allocator: flow_metadata_allocator.clone(),
},
procedure_manager.clone(),
true,
diff --git a/src/meta-srv/src/procedure/utils.rs b/src/meta-srv/src/procedure/utils.rs
index 55a9119db3ed..042c31a78741 100644
--- a/src/meta-srv/src/procedure/utils.rs
+++ b/src/meta-srv/src/procedure/utils.rs
@@ -105,10 +105,10 @@ pub mod test_data {
use chrono::DateTime;
use common_catalog::consts::MITO2_ENGINE;
+ use common_meta::ddl::flow_meta::FlowMetadataAllocator;
use common_meta::ddl::table_meta::TableMetadataAllocator;
- use common_meta::ddl::task_meta::FlowTaskMetadataAllocator;
use common_meta::ddl::DdlContext;
- use common_meta::key::flow_task::FlowTaskMetadataManager;
+ use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::node_manager::NodeManagerRef;
@@ -201,11 +201,10 @@ pub mod test_data {
Arc::new(SequenceBuilder::new("test", kv_backend.clone()).build()),
Arc::new(WalOptionsAllocator::default()),
));
- let flow_task_metadata_manager = Arc::new(FlowTaskMetadataManager::new(kv_backend.clone()));
- let flow_task_metadata_allocator =
- Arc::new(FlowTaskMetadataAllocator::with_noop_peer_allocator(
- Arc::new(SequenceBuilder::new("test", kv_backend).build()),
- ));
+ let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
+ let flow_metadata_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
+ Arc::new(SequenceBuilder::new("test", kv_backend).build()),
+ ));
DdlContext {
node_manager,
cache_invalidator: Arc::new(MetasrvCacheInvalidator::new(
@@ -216,8 +215,8 @@ pub mod test_data {
)),
table_metadata_manager,
table_metadata_allocator,
- flow_task_metadata_manager,
- flow_task_metadata_allocator,
+ flow_metadata_manager,
+ flow_metadata_allocator,
memory_region_keeper: Arc::new(MemoryRegionKeeper::new()),
}
}
diff --git a/src/operator/src/expr_factory.rs b/src/operator/src/expr_factory.rs
index 4e7aef084fb2..f0305ed981a5 100644
--- a/src/operator/src/expr_factory.rs
+++ b/src/operator/src/expr_factory.rs
@@ -541,7 +541,7 @@ pub fn to_create_flow_task_expr(
Ok(CreateFlowTask {
catalog_name: query_ctx.current_catalog().to_string(),
- task_name: create_flow.flow_name.to_string(),
+ flow_name: create_flow.flow_name.to_string(),
source_table_names,
sink_table_name,
or_replace: create_flow.or_replace,
@@ -552,7 +552,7 @@ pub fn to_create_flow_task_expr(
.unwrap_or_default(),
comment: create_flow.comment.unwrap_or_default(),
sql: create_flow.query.to_string(),
- options: HashMap::new(),
+ flow_options: HashMap::new(),
})
}
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 1d333c0069ef..4876931eea56 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -175,7 +175,7 @@ impl<'a> ParserContext<'a> {
fn parse_create_flow(&mut self, or_replace: bool) -> Result<Statement> {
let if_not_exists = self.parse_if_not_exist()?;
- let task_name = self.intern_parse_table_name()?;
+ let flow_name = self.intern_parse_table_name()?;
self.parser
.expect_token(&Token::make_keyword(SINK))
@@ -219,7 +219,7 @@ impl<'a> ParserContext<'a> {
let query = Box::new(self.parser.parse_query().context(error::SyntaxSnafu)?);
Ok(Statement::CreateFlow(CreateFlow {
- flow_name: task_name,
+ flow_name,
sink_table_name: output_table_name,
or_replace,
if_not_exists,
diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs
index 0931e87d05d4..2748291957bc 100644
--- a/tests-integration/src/standalone.rs
+++ b/tests-integration/src/standalone.rs
@@ -17,14 +17,14 @@ use std::sync::Arc;
use catalog::kvbackend::KvBackendCatalogManager;
use cmd::options::MixOptions;
use common_base::Plugins;
-use common_catalog::consts::{MIN_USER_FLOW_TASK_ID, MIN_USER_TABLE_ID};
+use common_catalog::consts::{MIN_USER_FLOW_ID, MIN_USER_TABLE_ID};
use common_config::KvBackendConfig;
use common_meta::cache_invalidator::MultiCacheInvalidator;
+use common_meta::ddl::flow_meta::FlowMetadataAllocator;
use common_meta::ddl::table_meta::TableMetadataAllocator;
-use common_meta::ddl::task_meta::FlowTaskMetadataAllocator;
use common_meta::ddl::DdlContext;
use common_meta::ddl_manager::DdlManager;
-use common_meta::key::flow_task::FlowTaskMetadataManager;
+use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::KvBackendRef;
use common_meta::region_keeper::MemoryRegionKeeper;
@@ -38,7 +38,7 @@ use datanode::datanode::DatanodeBuilder;
use frontend::frontend::FrontendOptions;
use frontend::instance::builder::FrontendBuilder;
use frontend::instance::{FrontendInstance, Instance, StandaloneDatanodeManager};
-use meta_srv::metasrv::{FLOW_TASK_ID_SEQ, TABLE_ID_SEQ};
+use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use servers::Mode;
use crate::test_util::{self, create_tmp_dir_and_datanode_opts, StorageType, TestGuard};
@@ -129,7 +129,7 @@ impl GreptimeDbStandaloneBuilder {
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
table_metadata_manager.init().await.unwrap();
- let flow_task_metadata_manager = Arc::new(FlowTaskMetadataManager::new(kv_backend.clone()));
+ let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::default());
let catalog_manager =
KvBackendCatalogManager::new(kv_backend.clone(), multi_cache_invalidator.clone()).await;
@@ -142,9 +142,9 @@ impl GreptimeDbStandaloneBuilder {
.step(10)
.build(),
);
- let flow_task_id_sequence = Arc::new(
- SequenceBuilder::new(FLOW_TASK_ID_SEQ, kv_backend.clone())
- .initial(MIN_USER_FLOW_TASK_ID as u64)
+ let flow_id_sequence = Arc::new(
+ SequenceBuilder::new(FLOW_ID_SEQ, kv_backend.clone())
+ .initial(MIN_USER_FLOW_ID as u64)
.step(10)
.build(),
);
@@ -156,9 +156,9 @@ impl GreptimeDbStandaloneBuilder {
table_id_sequence,
wal_options_allocator.clone(),
));
- let flow_task_metadata_allocator = Arc::new(
- FlowTaskMetadataAllocator::with_noop_peer_allocator(flow_task_id_sequence),
- );
+ let flow_metadata_allocator = Arc::new(FlowMetadataAllocator::with_noop_peer_allocator(
+ flow_id_sequence,
+ ));
let ddl_task_executor = Arc::new(
DdlManager::try_new(
@@ -168,8 +168,8 @@ impl GreptimeDbStandaloneBuilder {
memory_region_keeper: Arc::new(MemoryRegionKeeper::default()),
table_metadata_manager,
table_metadata_allocator,
- flow_task_metadata_manager,
- flow_task_metadata_allocator,
+ flow_metadata_manager,
+ flow_metadata_allocator,
},
procedure_manager.clone(),
register_procedure_loaders,
|
refactor
|
rename flow task to flow (#3833)
|
1e6d2fb1fa0c7b3c81149806705b924b1e3e1c09
|
2025-02-14 14:37:48
|
discord9
|
feat: add snapshot seqs field to query context (#5477)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index d70ff012e4bb..bb2d74f28b32 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4692,7 +4692,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=683e9d10ae7f3dfb8aaabd89082fc600c17e3795#683e9d10ae7f3dfb8aaabd89082fc600c17e3795"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=e2fd89fce1fe9ea0c36c85bcf447ce4bb4a84af3#e2fd89fce1fe9ea0c36c85bcf447ce4bb4a84af3"
dependencies = [
"prost 0.13.3",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index e6bf9f3e5af0..b4543e5e4d66 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -129,7 +129,7 @@ etcd-client = "0.14"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "683e9d10ae7f3dfb8aaabd89082fc600c17e3795" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "e2fd89fce1fe9ea0c36c85bcf447ce4bb4a84af3" }
hex = "0.4"
http = "1"
humantime = "2.1"
diff --git a/src/common/meta/src/rpc/ddl.rs b/src/common/meta/src/rpc/ddl.rs
index bec12796e791..c880e771d8d8 100644
--- a/src/common/meta/src/rpc/ddl.rs
+++ b/src/common/meta/src/rpc/ddl.rs
@@ -1239,6 +1239,7 @@ impl From<QueryContext> for PbQueryContext {
timezone,
extensions,
channel: channel as u32,
+ snapshot_seqs: None,
}
}
}
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index 4c708002dd08..562aca93ae47 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -159,7 +159,12 @@ impl RegionServer {
self.inner.handle_request(region_id, request).await
}
- async fn table_provider(&self, region_id: RegionId) -> Result<Arc<dyn TableProvider>> {
+ /// Returns a table provider for the region. Will set snapshot sequence if available in the context.
+ async fn table_provider(
+ &self,
+ region_id: RegionId,
+ ctx: Option<&session::context::QueryContext>,
+ ) -> Result<Arc<dyn TableProvider>> {
let status = self
.inner
.region_map
@@ -173,7 +178,7 @@ impl RegionServer {
self.inner
.table_provider_factory
- .create(region_id, status.into_engine())
+ .create(region_id, status.into_engine(), ctx)
.await
.context(ExecuteLogicalPlanSnafu)
}
@@ -188,9 +193,6 @@ impl RegionServer {
} else {
None
};
- let region_id = RegionId::from_u64(request.region_id);
- let provider = self.table_provider(region_id).await?;
- let catalog_list = Arc::new(DummyCatalogList::with_table_provider(provider));
let query_ctx: QueryContextRef = request
.header
@@ -198,6 +200,10 @@ impl RegionServer {
.map(|h| Arc::new(h.into()))
.unwrap_or_else(|| Arc::new(QueryContextBuilder::default().build()));
+ let region_id = RegionId::from_u64(request.region_id);
+ let provider = self.table_provider(region_id, Some(&query_ctx)).await?;
+ let catalog_list = Arc::new(DummyCatalogList::with_table_provider(provider));
+
let decoder = self
.inner
.query_engine
@@ -226,7 +232,10 @@ impl RegionServer {
} else {
None
};
- let provider = self.table_provider(request.region_id).await?;
+
+ let ctx: Option<session::context::QueryContext> = request.header.as_ref().map(|h| h.into());
+
+ let provider = self.table_provider(request.region_id, ctx.as_ref()).await?;
struct RegionDataSourceInjector {
source: Arc<dyn TableSource>,
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index 8b48dd4258cc..a7f95e29fdbd 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -37,7 +37,7 @@ use store_api::region_engine::{
SettableRegionRoleState,
};
use store_api::region_request::{AffectedRows, RegionRequest};
-use store_api::storage::{RegionId, ScanRequest};
+use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
use table::TableRef;
use tokio::sync::mpsc::{Receiver, Sender};
@@ -218,6 +218,10 @@ impl RegionEngine for MockRegionEngine {
unimplemented!()
}
+ async fn get_last_seq_num(&self, _: RegionId) -> Result<Option<SequenceNumber>, BoxedError> {
+ unimplemented!()
+ }
+
async fn stop(&self) -> Result<(), BoxedError> {
Ok(())
}
diff --git a/src/file-engine/src/engine.rs b/src/file-engine/src/engine.rs
index a29a3add23d6..9bf4432379a6 100644
--- a/src/file-engine/src/engine.rs
+++ b/src/file-engine/src/engine.rs
@@ -33,7 +33,7 @@ use store_api::region_request::{
AffectedRows, RegionCloseRequest, RegionCreateRequest, RegionDropRequest, RegionOpenRequest,
RegionRequest,
};
-use store_api::storage::{RegionId, ScanRequest};
+use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
use tokio::sync::Mutex;
use crate::config::EngineConfig;
@@ -114,6 +114,10 @@ impl RegionEngine for FileRegionEngine {
None
}
+ async fn get_last_seq_num(&self, _: RegionId) -> Result<Option<SequenceNumber>, BoxedError> {
+ Ok(None)
+ }
+
fn set_region_role(&self, region_id: RegionId, role: RegionRole) -> Result<(), BoxedError> {
self.inner
.set_region_role(region_id, role)
diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs
index 95261580bdff..25d32b4cd66b 100644
--- a/src/metric-engine/src/engine.rs
+++ b/src/metric-engine/src/engine.rs
@@ -43,7 +43,7 @@ use store_api::region_engine::{
SettableRegionRoleState,
};
use store_api::region_request::{BatchRegionDdlRequest, RegionRequest};
-use store_api::storage::{RegionId, ScanRequest};
+use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
use self::state::MetricEngineState;
use crate::config::EngineConfig;
@@ -235,6 +235,16 @@ impl RegionEngine for MetricEngine {
self.handle_query(region_id, request).await
}
+ async fn get_last_seq_num(
+ &self,
+ region_id: RegionId,
+ ) -> Result<Option<SequenceNumber>, BoxedError> {
+ self.inner
+ .get_last_seq_num(region_id)
+ .await
+ .map_err(BoxedError::new)
+ }
+
/// Retrieves region's metadata.
async fn get_metadata(&self, region_id: RegionId) -> Result<RegionMetadataRef, BoxedError> {
self.inner
diff --git a/src/metric-engine/src/engine/read.rs b/src/metric-engine/src/engine/read.rs
index a9a2246a04c1..3df0d08ad7ff 100644
--- a/src/metric-engine/src/engine/read.rs
+++ b/src/metric-engine/src/engine/read.rs
@@ -21,7 +21,7 @@ use snafu::{OptionExt, ResultExt};
use store_api::metadata::{RegionMetadataBuilder, RegionMetadataRef};
use store_api::metric_engine_consts::DATA_SCHEMA_TABLE_ID_COLUMN_NAME;
use store_api::region_engine::{RegionEngine, RegionScannerRef};
-use store_api::storage::{RegionId, ScanRequest};
+use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
use crate::engine::MetricEngineInner;
use crate::error::{
@@ -85,6 +85,19 @@ impl MetricEngineInner {
.context(MitoReadOperationSnafu)
}
+ pub async fn get_last_seq_num(&self, region_id: RegionId) -> Result<Option<SequenceNumber>> {
+ let region_id = if self.is_physical_region(region_id) {
+ region_id
+ } else {
+ let physical_region_id = self.get_physical_region_id(region_id).await?;
+ utils::to_data_region_id(physical_region_id)
+ };
+ self.mito
+ .get_last_seq_num(region_id)
+ .await
+ .context(MitoReadOperationSnafu)
+ }
+
pub async fn load_region_metadata(&self, region_id: RegionId) -> Result<RegionMetadataRef> {
let is_reading_physical_region =
self.state.read().unwrap().exist_physical_region(region_id);
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index fb1a05d36cf5..f031e2d1df33 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -82,7 +82,7 @@ use store_api::region_engine::{
SetRegionRoleStateResponse, SettableRegionRoleState,
};
use store_api::region_request::{AffectedRows, RegionOpenRequest, RegionRequest};
-use store_api::storage::{RegionId, ScanRequest};
+use store_api::storage::{RegionId, ScanRequest, SequenceNumber};
use tokio::sync::{oneshot, Semaphore};
use crate::cache::CacheStrategy;
@@ -424,6 +424,17 @@ impl EngineInner {
receiver.await.context(RecvSnafu)?
}
+ fn get_last_seq_num(&self, region_id: RegionId) -> Result<Option<SequenceNumber>> {
+ // Reading a region doesn't need to go through the region worker thread.
+ let region = self
+ .workers
+ .get_region(region_id)
+ .context(RegionNotFoundSnafu { region_id })?;
+ let version_ctrl = ®ion.version_control;
+ let seq = Some(version_ctrl.committed_sequence());
+ Ok(seq)
+ }
+
/// Handles the scan `request` and returns a [ScanRegion].
fn scan_region(&self, region_id: RegionId, request: ScanRequest) -> Result<ScanRegion> {
let query_start = Instant::now();
@@ -547,6 +558,15 @@ impl RegionEngine for MitoEngine {
.map_err(BoxedError::new)
}
+ async fn get_last_seq_num(
+ &self,
+ region_id: RegionId,
+ ) -> Result<Option<SequenceNumber>, BoxedError> {
+ self.inner
+ .get_last_seq_num(region_id)
+ .map_err(BoxedError::new)
+ }
+
/// Retrieve region's metadata.
async fn get_metadata(
&self,
diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs
index cc809f61a782..c27f385ef7a2 100644
--- a/src/mito2/src/region/version.rs
+++ b/src/mito2/src/region/version.rs
@@ -78,6 +78,11 @@ impl VersionControl {
data.last_entry_id = entry_id;
}
+ /// Sequence number of last committed data.
+ pub(crate) fn committed_sequence(&self) -> SequenceNumber {
+ self.data.read().unwrap().committed_sequence
+ }
+
/// Freezes the mutable memtable if it is not empty.
pub(crate) fn freeze_mutable(&self) -> Result<()> {
let version = self.current().version;
diff --git a/src/query/src/dummy_catalog.rs b/src/query/src/dummy_catalog.rs
index ae5775768382..c7d485b7fbe0 100644
--- a/src/query/src/dummy_catalog.rs
+++ b/src/query/src/dummy_catalog.rs
@@ -234,6 +234,10 @@ impl DummyTableProvider {
self.scan_request.lock().unwrap().series_row_selector = Some(selector);
}
+ pub fn with_sequence(&self, sequence: u64) {
+ self.scan_request.lock().unwrap().sequence = Some(sequence);
+ }
+
/// Gets the scan request of the provider.
#[cfg(test)]
pub fn scan_request(&self) -> ScanRequest {
@@ -249,6 +253,7 @@ impl TableProviderFactory for DummyTableProviderFactory {
&self,
region_id: RegionId,
engine: RegionEngineRef,
+ ctx: Option<&session::context::QueryContext>,
) -> Result<Arc<dyn TableProvider>> {
let metadata =
engine
@@ -258,11 +263,20 @@ impl TableProviderFactory for DummyTableProviderFactory {
engine: engine.name(),
region_id,
})?;
+
+ let scan_request = ctx
+ .and_then(|c| c.get_snapshot(region_id.as_u64()))
+ .map(|seq| ScanRequest {
+ sequence: Some(seq),
+ ..Default::default()
+ })
+ .unwrap_or_default();
+
Ok(Arc::new(DummyTableProvider {
region_id,
engine,
metadata,
- scan_request: Default::default(),
+ scan_request: Arc::new(Mutex::new(scan_request)),
}))
}
}
@@ -273,6 +287,7 @@ pub trait TableProviderFactory: Send + Sync {
&self,
region_id: RegionId,
engine: RegionEngineRef,
+ ctx: Option<&session::context::QueryContext>,
) -> Result<Arc<dyn TableProvider>>;
}
diff --git a/src/query/src/optimizer/test_util.rs b/src/query/src/optimizer/test_util.rs
index c57fb9bf4d9b..25de1c8a16f4 100644
--- a/src/query/src/optimizer/test_util.rs
+++ b/src/query/src/optimizer/test_util.rs
@@ -32,7 +32,7 @@ use store_api::region_engine::{
SettableRegionRoleState,
};
use store_api::region_request::RegionRequest;
-use store_api::storage::{ConcreteDataType, RegionId, ScanRequest};
+use store_api::storage::{ConcreteDataType, RegionId, ScanRequest, SequenceNumber};
use crate::dummy_catalog::DummyTableProvider;
@@ -86,6 +86,13 @@ impl RegionEngine for MetaRegionEngine {
None
}
+ async fn get_last_seq_num(
+ &self,
+ _region_id: RegionId,
+ ) -> Result<Option<SequenceNumber>, BoxedError> {
+ Ok(None)
+ }
+
async fn stop(&self) -> Result<(), BoxedError> {
Ok(())
}
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index 9893e250617b..5715447dfccc 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -43,6 +43,10 @@ const CURSOR_COUNT_WARNING_LIMIT: usize = 10;
#[builder(build_fn(skip))]
pub struct QueryContext {
current_catalog: String,
+ /// mapping of RegionId to SequenceNumber, for snapshot read, meaning that the read should only
+ /// container data that was committed before(and include) the given sequence number
+ /// this field will only be filled if extensions contains a pair of "snapshot_read" and "true"
+ snapshot_seqs: Arc<RwLock<HashMap<u64, u64>>>,
// we use Arc<RwLock>> for modifiable fields
#[builder(default)]
mutable_session_data: Arc<RwLock<MutableInner>>,
@@ -116,7 +120,10 @@ impl From<&RegionRequestHeader> for QueryContext {
.current_schema(ctx.current_schema.clone())
.timezone(parse_timezone(Some(&ctx.timezone)))
.extensions(ctx.extensions.clone())
- .channel(ctx.channel.into());
+ .channel(ctx.channel.into())
+ .snapshot_seqs(Arc::new(RwLock::new(
+ ctx.snapshot_seqs.clone().unwrap_or_default().snapshot_seqs,
+ )));
}
builder.build()
}
@@ -130,6 +137,9 @@ impl From<api::v1::QueryContext> for QueryContext {
.timezone(parse_timezone(Some(&ctx.timezone)))
.extensions(ctx.extensions)
.channel(ctx.channel.into())
+ .snapshot_seqs(Arc::new(RwLock::new(
+ ctx.snapshot_seqs.clone().unwrap_or_default().snapshot_seqs,
+ )))
.build()
}
}
@@ -141,6 +151,7 @@ impl From<QueryContext> for api::v1::QueryContext {
mutable_session_data: mutable_inner,
extensions,
channel,
+ snapshot_seqs,
..
}: QueryContext,
) -> Self {
@@ -151,6 +162,9 @@ impl From<QueryContext> for api::v1::QueryContext {
timezone: mutable_inner.timezone.to_string(),
extensions,
channel: channel as u32,
+ snapshot_seqs: Some(api::v1::SnapshotSequences {
+ snapshot_seqs: snapshot_seqs.read().unwrap().clone(),
+ }),
}
}
}
@@ -324,6 +338,14 @@ impl QueryContext {
let rb = guard.cursors.get(name);
rb.cloned()
}
+
+ pub fn snapshots(&self) -> HashMap<u64, u64> {
+ self.snapshot_seqs.read().unwrap().clone()
+ }
+
+ pub fn get_snapshot(&self, region_id: u64) -> Option<u64> {
+ self.snapshot_seqs.read().unwrap().get(®ion_id).cloned()
+ }
}
impl QueryContextBuilder {
@@ -333,6 +355,7 @@ impl QueryContextBuilder {
current_catalog: self
.current_catalog
.unwrap_or_else(|| DEFAULT_CATALOG_NAME.to_string()),
+ snapshot_seqs: self.snapshot_seqs.unwrap_or_default(),
mutable_session_data: self.mutable_session_data.unwrap_or_default(),
mutable_query_context_data: self.mutable_query_context_data.unwrap_or_default(),
sql_dialect: self
diff --git a/src/store-api/src/mito_engine_options.rs b/src/store-api/src/mito_engine_options.rs
index b97e55aae6d4..e73060469fe4 100644
--- a/src/store-api/src/mito_engine_options.rs
+++ b/src/store-api/src/mito_engine_options.rs
@@ -23,6 +23,8 @@ pub const APPEND_MODE_KEY: &str = "append_mode";
pub const MERGE_MODE_KEY: &str = "merge_mode";
/// Option key for TTL(time-to-live)
pub const TTL_KEY: &str = "ttl";
+/// Option key for snapshot read.
+pub const SNAPSHOT_READ: &str = "snapshot_read";
/// Option key for compaction type.
pub const COMPACTION_TYPE: &str = "compaction.type";
/// TWCS compaction strategy.
diff --git a/src/store-api/src/region_engine.rs b/src/store-api/src/region_engine.rs
index 46411d064b20..e864f585be18 100644
--- a/src/store-api/src/region_engine.rs
+++ b/src/store-api/src/region_engine.rs
@@ -34,8 +34,10 @@ use tokio::sync::Semaphore;
use crate::logstore::entry;
use crate::metadata::RegionMetadataRef;
-use crate::region_request::{BatchRegionDdlRequest, RegionOpenRequest, RegionRequest};
-use crate::storage::{RegionId, ScanRequest};
+use crate::region_request::{
+ BatchRegionDdlRequest, RegionOpenRequest, RegionRequest, RegionSequencesRequest,
+};
+use crate::storage::{RegionId, ScanRequest, SequenceNumber};
/// The settable region role state.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
@@ -436,6 +438,26 @@ pub trait RegionEngine: Send + Sync {
request: RegionRequest,
) -> Result<RegionResponse, BoxedError>;
+ /// Returns the last sequence number of the region.
+ async fn get_last_seq_num(
+ &self,
+ region_id: RegionId,
+ ) -> Result<Option<SequenceNumber>, BoxedError>;
+
+ async fn get_region_sequences(
+ &self,
+ seqs: RegionSequencesRequest,
+ ) -> Result<HashMap<u64, u64>, BoxedError> {
+ let mut results = HashMap::with_capacity(seqs.region_ids.len());
+
+ for region_id in seqs.region_ids {
+ let seq = self.get_last_seq_num(region_id).await?.unwrap_or_default();
+ results.insert(region_id.as_u64(), seq);
+ }
+
+ Ok(results)
+ }
+
/// Handles query and return a scanner that can be used to scan the region concurrently.
async fn handle_query(
&self,
diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs
index af82cd1deb86..bdc78f6f7012 100644
--- a/src/store-api/src/region_request.rs
+++ b/src/store-api/src/region_request.rs
@@ -1084,6 +1084,12 @@ pub struct RegionCatchupRequest {
pub location_id: Option<u64>,
}
+/// Get sequences of regions by region ids.
+#[derive(Debug, Clone)]
+pub struct RegionSequencesRequest {
+ pub region_ids: Vec<RegionId>,
+}
+
impl fmt::Display for RegionRequest {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
|
feat
|
add snapshot seqs field to query context (#5477)
|
2034b40f33723495b12eb1edb48062473394db4a
|
2022-12-06 11:45:00
|
discord9
|
chore: update RustPython dependence(With a tweaked fork) (#655)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index a6f9216b6d22..abe4e0918ca3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -343,6 +343,15 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16"
+[[package]]
+name = "ascii-canvas"
+version = "3.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6"
+dependencies = [
+ "term 0.7.0",
+]
+
[[package]]
name = "async-channel"
version = "1.7.1"
@@ -443,6 +452,15 @@ dependencies = [
"syn",
]
+[[package]]
+name = "atomic"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c"
+dependencies = [
+ "autocfg",
+]
+
[[package]]
name = "atomic_float"
version = "0.1.0"
@@ -716,6 +734,21 @@ dependencies = [
"shlex",
]
+[[package]]
+name = "bit-set"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1"
+dependencies = [
+ "bit-vec",
+]
+
+[[package]]
+name = "bit-vec"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb"
+
[[package]]
name = "bitflags"
version = "1.3.2"
@@ -2061,6 +2094,12 @@ dependencies = [
"syn",
]
+[[package]]
+name = "diff"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
+
[[package]]
name = "digest"
version = "0.10.5"
@@ -2130,6 +2169,18 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257"
+[[package]]
+name = "dns-lookup"
+version = "1.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53ecafc952c4528d9b51a458d1a8904b81783feff9fde08ab6ed2545ff396872"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "socket2",
+ "winapi",
+]
+
[[package]]
name = "doc-comment"
version = "0.3.3"
@@ -2154,6 +2205,15 @@ version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
+[[package]]
+name = "ena"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7402b94a93c24e742487327a7cd839dc9d36fec9de9fb25b09f2dae459f36c3"
+dependencies = [
+ "log",
+]
+
[[package]]
name = "encode_unicode"
version = "0.3.6"
@@ -3040,11 +3100,46 @@ dependencies = [
"simple_asn1",
]
+[[package]]
+name = "keccak"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768"
+dependencies = [
+ "cpufeatures",
+]
+
+[[package]]
+name = "lalrpop"
+version = "0.19.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b30455341b0e18f276fa64540aff54deafb54c589de6aca68659c63dd2d5d823"
+dependencies = [
+ "ascii-canvas",
+ "atty",
+ "bit-set",
+ "diff",
+ "ena",
+ "itertools",
+ "lalrpop-util",
+ "petgraph",
+ "pico-args",
+ "regex",
+ "regex-syntax",
+ "string_cache",
+ "term 0.7.0",
+ "tiny-keccak",
+ "unicode-xid",
+]
+
[[package]]
name = "lalrpop-util"
version = "0.19.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcf796c978e9b4d983414f4caedc9273aa33ee214c5b887bd55fde84c85d2dc4"
+dependencies = [
+ "regex",
+]
[[package]]
name = "lazy_static"
@@ -3252,6 +3347,16 @@ dependencies = [
"twox-hash",
]
+[[package]]
+name = "mac_address"
+version = "1.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b238e3235c8382b7653c6408ed1b08dd379bdb9fdf990fb0bbae3db2cc0ae963"
+dependencies = [
+ "nix 0.23.1",
+ "winapi",
+]
+
[[package]]
name = "mach"
version = "0.3.2"
@@ -3318,6 +3423,15 @@ version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+[[package]]
+name = "memmap2"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "memoffset"
version = "0.6.5"
@@ -3536,6 +3650,15 @@ dependencies = [
"uuid",
]
+[[package]]
+name = "mt19937"
+version = "2.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "12ca7f22ed370d5991a9caec16a83187e865bc8a532f889670337d5a5689e3a1"
+dependencies = [
+ "rand_core 0.6.4",
+]
+
[[package]]
name = "multimap"
version = "0.8.3"
@@ -3664,6 +3787,12 @@ dependencies = [
"syn",
]
+[[package]]
+name = "new_debug_unreachable"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54"
+
[[package]]
name = "nibble_vec"
version = "0.1.0"
@@ -3686,6 +3815,18 @@ dependencies = [
"memoffset",
]
+[[package]]
+name = "nix"
+version = "0.24.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc"
+dependencies = [
+ "bitflags",
+ "cfg-if",
+ "libc",
+ "memoffset",
+]
+
[[package]]
name = "nom"
version = "7.1.1"
@@ -4017,6 +4158,16 @@ version = "6.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff"
+[[package]]
+name = "page_size"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
[[package]]
name = "parking"
version = "2.0.0"
@@ -4299,6 +4450,12 @@ dependencies = [
"uncased",
]
+[[package]]
+name = "pico-args"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db8bcd96cb740d03149cbad5518db9fd87126a10ab519c011893b1754134c468"
+
[[package]]
name = "pin-project"
version = "1.0.12"
@@ -4440,6 +4597,12 @@ version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
+[[package]]
+name = "precomputed-hash"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
+
[[package]]
name = "prettydiff"
version = "0.6.1"
@@ -4471,7 +4634,7 @@ dependencies = [
"csv",
"encode_unicode",
"lazy_static",
- "term",
+ "term 0.5.2",
"unicode-width",
]
@@ -4642,6 +4805,12 @@ dependencies = [
"unicase",
]
+[[package]]
+name = "puruspe"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b7e158a385023d209d6d5f2585c4b468f6dcb3dd5aca9b75c4f1678c05bb375"
+
[[package]]
name = "quanta"
version = "0.10.1"
@@ -5145,32 +5314,33 @@ dependencies = [
[[package]]
name = "rustpython-ast"
version = "0.1.0"
-source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
"num-bigint",
"rustpython-common",
+ "rustpython-compiler-core",
]
[[package]]
-name = "rustpython-bytecode"
+name = "rustpython-codegen"
version = "0.1.2"
-source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
- "bincode 1.3.3",
- "bitflags",
- "bstr",
+ "ahash 0.7.6",
+ "indexmap",
"itertools",
- "lz4_flex",
- "num-bigint",
+ "log",
"num-complex",
- "serde",
- "static_assertions",
+ "num-traits",
+ "rustpython-ast",
+ "rustpython-compiler-core",
+ "thiserror",
]
[[package]]
name = "rustpython-common"
version = "0.0.0"
-source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
"ascii",
"cfg-if",
@@ -5193,9 +5363,9 @@ dependencies = [
[[package]]
name = "rustpython-compiler"
version = "0.1.2"
-source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
- "rustpython-bytecode",
+ "rustpython-codegen",
"rustpython-compiler-core",
"rustpython-parser",
"thiserror",
@@ -5204,22 +5374,24 @@ dependencies = [
[[package]]
name = "rustpython-compiler-core"
version = "0.1.2"
-source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
- "ahash 0.7.6",
- "indexmap",
+ "bincode 1.3.3",
+ "bitflags",
+ "bstr",
"itertools",
- "log",
+ "lz4_flex",
+ "num-bigint",
"num-complex",
- "num-traits",
- "rustpython-ast",
- "rustpython-bytecode",
+ "serde",
+ "static_assertions",
+ "thiserror",
]
[[package]]
name = "rustpython-derive"
version = "0.1.2"
-source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
"indexmap",
"itertools",
@@ -5227,8 +5399,9 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "rustpython-bytecode",
+ "rustpython-codegen",
"rustpython-compiler",
+ "rustpython-compiler-core",
"rustpython-doc",
"syn",
"syn-ext",
@@ -5238,7 +5411,7 @@ dependencies = [
[[package]]
name = "rustpython-doc"
version = "0.1.0"
-source = "git+https://github.com/RustPython/__doc__?branch=main#66be54cd61cc5eb29bb4870314160c337a296a32"
+source = "git+https://github.com/RustPython/__doc__?branch=main#d927debd491e4c45b88e953e6e50e4718e0f2965"
dependencies = [
"once_cell",
]
@@ -5246,9 +5419,12 @@ dependencies = [
[[package]]
name = "rustpython-parser"
version = "0.1.2"
-source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
"ahash 0.7.6",
+ "anyhow",
+ "itertools",
+ "lalrpop",
"lalrpop-util",
"log",
"num-bigint",
@@ -5256,6 +5432,8 @@ dependencies = [
"phf 0.10.1",
"phf_codegen 0.10.0",
"rustpython-ast",
+ "rustpython-compiler-core",
+ "thiserror",
"tiny-keccak",
"unic-emoji-char",
"unic-ucd-ident",
@@ -5265,16 +5443,81 @@ dependencies = [
[[package]]
name = "rustpython-pylib"
version = "0.1.0"
-source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
- "rustpython-bytecode",
+ "glob",
+ "rustpython-compiler-core",
"rustpython-derive",
]
+[[package]]
+name = "rustpython-stdlib"
+version = "0.1.2"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
+dependencies = [
+ "adler32",
+ "ahash 0.7.6",
+ "ascii",
+ "base64",
+ "blake2",
+ "cfg-if",
+ "crc32fast",
+ "crossbeam-utils",
+ "csv-core",
+ "digest",
+ "dns-lookup",
+ "flate2",
+ "gethostname",
+ "hex",
+ "itertools",
+ "lexical-parse-float",
+ "libc",
+ "mac_address",
+ "md-5",
+ "memchr",
+ "memmap2",
+ "mt19937",
+ "nix 0.24.2",
+ "num-bigint",
+ "num-complex",
+ "num-integer",
+ "num-traits",
+ "num_enum",
+ "once_cell",
+ "page_size",
+ "parking_lot",
+ "paste",
+ "puruspe",
+ "rand 0.8.5",
+ "rand_core 0.6.4",
+ "rustpython-common",
+ "rustpython-derive",
+ "rustpython-vm",
+ "schannel",
+ "sha-1",
+ "sha2",
+ "sha3",
+ "socket2",
+ "system-configuration",
+ "termios",
+ "unic-char-property",
+ "unic-normal",
+ "unic-ucd-age",
+ "unic-ucd-bidi",
+ "unic-ucd-category",
+ "unic-ucd-ident",
+ "unicode-casing",
+ "unicode_names2",
+ "uuid",
+ "widestring",
+ "winapi",
+ "xml-rs",
+]
+
[[package]]
name = "rustpython-vm"
version = "0.1.2"
-source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
+source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
"adler32",
"ahash 0.7.6",
@@ -5289,6 +5532,7 @@ dependencies = [
"exitcode",
"flate2",
"getrandom 0.2.7",
+ "glob",
"half 1.8.2",
"hex",
"hexf-parse",
@@ -5299,7 +5543,7 @@ dependencies = [
"log",
"memchr",
"memoffset",
- "nix",
+ "nix 0.24.2",
"num-bigint",
"num-complex",
"num-integer",
@@ -5315,13 +5559,12 @@ dependencies = [
"result-like",
"rustc_version",
"rustpython-ast",
- "rustpython-bytecode",
+ "rustpython-codegen",
"rustpython-common",
"rustpython-compiler",
"rustpython-compiler-core",
"rustpython-derive",
"rustpython-parser",
- "rustpython-pylib",
"rustyline",
"schannel",
"serde",
@@ -5342,6 +5585,7 @@ dependencies = [
"which",
"widestring",
"winapi",
+ "windows",
"winreg",
]
@@ -5353,9 +5597,9 @@ checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8"
[[package]]
name = "rustyline"
-version = "9.1.2"
+version = "10.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db7826789c0e25614b03e5a54a0717a86f9ff6e6e5247f92b369472869320039"
+checksum = "1d1cd5ae51d3f7bf65d7969d579d502168ef578f289452bd8ccc91de28fda20e"
dependencies = [
"bitflags",
"cfg-if",
@@ -5365,10 +5609,9 @@ dependencies = [
"libc",
"log",
"memchr",
- "nix",
+ "nix 0.24.2",
"radix_trie",
"scopeguard",
- "smallvec",
"unicode-segmentation",
"unicode-width",
"utf8parse",
@@ -5526,10 +5769,12 @@ dependencies = [
"query",
"ron",
"rustpython-ast",
- "rustpython-bytecode",
+ "rustpython-codegen",
"rustpython-compiler",
"rustpython-compiler-core",
"rustpython-parser",
+ "rustpython-pylib",
+ "rustpython-stdlib",
"rustpython-vm",
"serde",
"session",
@@ -5759,6 +6004,16 @@ dependencies = [
"digest",
]
+[[package]]
+name = "sha3"
+version = "0.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9"
+dependencies = [
+ "digest",
+ "keccak",
+]
+
[[package]]
name = "sharded-slab"
version = "0.1.4"
@@ -5995,12 +6250,13 @@ dependencies = [
[[package]]
name = "sre-engine"
-version = "0.1.2"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5872399287c284fed4bc773cb7f6041623ac88213774f5e11e89e2131681fc1"
+checksum = "a490c5c46c35dba9a6f5e7ee8e4d67e775eb2d2da0f115750b8d10e1c1ac2d28"
dependencies = [
"bitflags",
"num_enum",
+ "optional",
]
[[package]]
@@ -6129,6 +6385,19 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3ff2f71c82567c565ba4b3009a9350a96a7269eaa4001ebedae926230bc2254"
+[[package]]
+name = "string_cache"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "213494b7a2b503146286049378ce02b482200519accc31872ee8be91fa820a08"
+dependencies = [
+ "new_debug_unreachable",
+ "once_cell",
+ "parking_lot",
+ "phf_shared 0.10.0",
+ "precomputed-hash",
+]
+
[[package]]
name = "stringprep"
version = "0.1.2"
@@ -6287,6 +6556,27 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8"
+[[package]]
+name = "system-configuration"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd"
+dependencies = [
+ "bitflags",
+ "core-foundation",
+ "system-configuration-sys",
+]
+
+[[package]]
+name = "system-configuration-sys"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
[[package]]
name = "table"
version = "0.1.0"
@@ -6360,6 +6650,17 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "term"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f"
+dependencies = [
+ "dirs-next",
+ "rustversion",
+ "winapi",
+]
+
[[package]]
name = "termcolor"
version = "1.1.3"
@@ -6379,6 +6680,15 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "termios"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "411c5bf740737c7918b8b1fe232dca4dc9f8e754b8ad5e20966814001ed0ac6b"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "tests-integration"
version = "0.1.0"
@@ -7035,6 +7345,26 @@ dependencies = [
"unic-ucd-version",
]
+[[package]]
+name = "unic-normal"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f09d64d33589a94628bc2aeb037f35c2e25f3f049c7348b5aa5580b48e6bba62"
+dependencies = [
+ "unic-ucd-normal",
+]
+
+[[package]]
+name = "unic-ucd-age"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6c8cfdfe71af46b871dc6af2c24fcd360e2f3392ee4c5111877f2947f311671c"
+dependencies = [
+ "unic-char-property",
+ "unic-char-range",
+ "unic-ucd-version",
+]
+
[[package]]
name = "unic-ucd-bidi"
version = "0.9.0"
@@ -7058,6 +7388,15 @@ dependencies = [
"unic-ucd-version",
]
+[[package]]
+name = "unic-ucd-hangul"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eb1dc690e19010e1523edb9713224cba5ef55b54894fe33424439ec9a40c0054"
+dependencies = [
+ "unic-ucd-version",
+]
+
[[package]]
name = "unic-ucd-ident"
version = "0.9.0"
@@ -7069,6 +7408,18 @@ dependencies = [
"unic-ucd-version",
]
+[[package]]
+name = "unic-ucd-normal"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "86aed873b8202d22b13859dda5fe7c001d271412c31d411fd9b827e030569410"
+dependencies = [
+ "unic-char-property",
+ "unic-char-range",
+ "unic-ucd-hangul",
+ "unic-ucd-version",
+]
+
[[package]]
name = "unic-ucd-version"
version = "0.9.0"
@@ -7184,8 +7535,22 @@ version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f"
dependencies = [
+ "atomic",
"getrandom 0.2.7",
+ "rand 0.8.5",
"serde",
+ "uuid-macro-internal",
+]
+
+[[package]]
+name = "uuid-macro-internal"
+version = "1.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73bc89f2894593e665241e0052c3791999e6787b7c4831daa0a5c2e637e276d8"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
]
[[package]]
@@ -7421,17 +7786,30 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+[[package]]
+name = "windows"
+version = "0.39.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1c4bd0a50ac6020f65184721f758dba47bb9fbc2133df715ec74a237b26794a"
+dependencies = [
+ "windows_aarch64_msvc 0.39.0",
+ "windows_i686_gnu 0.39.0",
+ "windows_i686_msvc 0.39.0",
+ "windows_x86_64_gnu 0.39.0",
+ "windows_x86_64_msvc 0.39.0",
+]
+
[[package]]
name = "windows-sys"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
dependencies = [
- "windows_aarch64_msvc",
- "windows_i686_gnu",
- "windows_i686_msvc",
- "windows_x86_64_gnu",
- "windows_x86_64_msvc",
+ "windows_aarch64_msvc 0.36.1",
+ "windows_i686_gnu 0.36.1",
+ "windows_i686_msvc 0.36.1",
+ "windows_x86_64_gnu 0.36.1",
+ "windows_x86_64_msvc 0.36.1",
]
[[package]]
@@ -7440,30 +7818,60 @@ version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.39.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec7711666096bd4096ffa835238905bb33fb87267910e154b18b44eaabb340f2"
+
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
+[[package]]
+name = "windows_i686_gnu"
+version = "0.39.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "763fc57100a5f7042e3057e7e8d9bdd7860d330070251a73d003563a3bb49e1b"
+
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
+[[package]]
+name = "windows_i686_msvc"
+version = "0.39.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7bc7cbfe58828921e10a9f446fcaaf649204dcfe6c1ddd712c5eebae6bda1106"
+
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.39.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6868c165637d653ae1e8dc4d82c25d4f97dd6605eaa8d784b5c6e0ab2a252b65"
+
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.39.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e4d40883ae9cae962787ca76ba76390ffa29214667a111db9e0a1ad8377e809"
+
[[package]]
name = "winreg"
version = "0.10.1"
@@ -7482,6 +7890,12 @@ dependencies = [
"tap",
]
+[[package]]
+name = "xml-rs"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3"
+
[[package]]
name = "zstd"
version = "0.10.2+zstd.1.5.2"
diff --git a/src/script/Cargo.toml b/src/script/Cargo.toml
index 7729afbb2d66..c248ddb98319 100644
--- a/src/script/Cargo.toml
+++ b/src/script/Cargo.toml
@@ -14,8 +14,10 @@ python = [
"dep:rustpython-parser",
"dep:rustpython-compiler",
"dep:rustpython-compiler-core",
- "dep:rustpython-bytecode",
+ "dep:rustpython-codegen",
"dep:rustpython-ast",
+ "dep:rustpython-pylib",
+ "dep:rustpython-stdlib",
"dep:paste",
]
@@ -39,13 +41,18 @@ futures = "0.3"
futures-util = "0.3"
paste = { version = "1.0", optional = true }
query = { path = "../query" }
-rustpython-ast = { git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d" }
-rustpython-bytecode = { git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d" }
-rustpython-compiler = { git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d" }
-rustpython-compiler-core = { git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d" }
-rustpython-parser = { git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d" }
-rustpython-vm = { git = "https://github.com/RustPython/RustPython", optional = true, rev = "02a1d1d", features = [
+# TODO(discord9): This is a forked and tweaked version of RustPython, please update it to newest original RustPython After Update toolchain to 1.65
+rustpython-ast = { git = "https://github.com/discord9/RustPython", optional = true, rev = "183e8dab" }
+rustpython-compiler = { git = "https://github.com/discord9/RustPython", optional = true, rev = "183e8dab" }
+rustpython-compiler-core = { git = "https://github.com/discord9/RustPython", optional = true, rev = "183e8dab" }
+rustpython-codegen = { git = "https://github.com/discord9/RustPython", optional = true, rev = "183e8dab" }
+rustpython-parser = { git = "https://github.com/discord9/RustPython", optional = true, rev = "183e8dab" }
+rustpython-vm = { git = "https://github.com/discord9/RustPython", optional = true, rev = "183e8dab", features = [
"default",
+ "codegen",
+] }
+rustpython-stdlib = { git = "https://github.com/discord9/RustPython", optional = true, rev = "183e8dab" }
+rustpython-pylib = { git = "https://github.com/discord9/RustPython", optional = true, rev = "183e8dab", features = [
"freeze-stdlib",
] }
session = { path = "../session" }
diff --git a/src/script/src/python/builtins/test.rs b/src/script/src/python/builtins/test.rs
index 43ad8d4f5de0..8fdeb9ad9439 100644
--- a/src/script/src/python/builtins/test.rs
+++ b/src/script/src/python/builtins/test.rs
@@ -340,7 +340,6 @@ fn run_builtin_fn_testcases() {
let testcases: Vec<TestCase> = from_ron_string(&buf).expect("Fail to convert to testcases");
let cached_vm = rustpython_vm::Interpreter::with_init(Default::default(), |vm| {
vm.add_native_module("greptime", Box::new(greptime_builtin::make_module));
- // this can be in `.enter()` closure, but for clearity, put it in the `with_init()`
PyVector::make_class(&vm.ctx);
});
for (idx, case) in testcases.into_iter().enumerate() {
@@ -358,7 +357,7 @@ fn run_builtin_fn_testcases() {
let code_obj = vm
.compile(
&case.script,
- rustpython_vm::compile::Mode::BlockExpr,
+ rustpython_compiler_core::Mode::BlockExpr,
"<embedded>".to_owned(),
)
.map_err(|err| vm.new_syntax_error(&err))
@@ -466,7 +465,7 @@ fn test_vm() {
r#"
from udf_builtins import *
sin(values)"#,
- rustpython_vm::compile::Mode::BlockExpr,
+ rustpython_compiler_core::Mode::BlockExpr,
"<embedded>".to_owned(),
)
.map_err(|err| vm.new_syntax_error(&err))
diff --git a/src/script/src/python/coprocessor.rs b/src/script/src/python/coprocessor.rs
index bb32494dbfd6..3bc5c39f2a5c 100644
--- a/src/script/src/python/coprocessor.rs
+++ b/src/script/src/python/coprocessor.rs
@@ -16,7 +16,7 @@ pub mod compile;
pub mod parse;
use std::cell::RefCell;
-use std::collections::HashMap;
+use std::collections::{HashMap, HashSet};
use std::result::Result as StdResult;
use std::sync::Arc;
@@ -29,7 +29,7 @@ use datatypes::arrow::compute::cast::CastOptions;
use datatypes::arrow::datatypes::{DataType, Field, Schema as ArrowSchema};
use datatypes::schema::Schema;
use datatypes::vectors::{BooleanVector, Helper, StringVector, Vector, VectorRef};
-use rustpython_bytecode::CodeObject;
+use rustpython_compiler_core::CodeObject;
use rustpython_vm as vm;
use rustpython_vm::class::PyClassImpl;
use rustpython_vm::AsObject;
@@ -430,7 +430,24 @@ pub(crate) fn init_interpreter() -> Arc<Interpreter> {
INTERPRETER.with(|i| {
i.borrow_mut()
.get_or_insert_with(|| {
+ // we limit stdlib imports for safety reason, i.e `fcntl` is not allowed here
+ let native_module_allow_list = HashSet::from([
+ "array", "cmath", "gc", "hashlib", "_json", "_random", "math",
+ ]);
let interpreter = Arc::new(vm::Interpreter::with_init(Default::default(), |vm| {
+ // not using full stdlib to prevent security issue, instead filter out a few simple util module
+ vm.add_native_modules(
+ rustpython_stdlib::get_module_inits()
+ .into_iter()
+ .filter(|(k, _)| native_module_allow_list.contains(k.as_ref())),
+ );
+
+ // We are freezing the stdlib to include the standard library inside the binary.
+ // so according to this issue:
+ // https://github.com/RustPython/RustPython/issues/4292
+ // add this line for stdlib, so rustpython can found stdlib's python part in bytecode format
+ vm.add_frozen(rustpython_pylib::frozen_stdlib());
+ // add our own custom datatype and module
PyVector::make_class(&vm.ctx);
vm.add_native_module("greptime", Box::new(greptime_builtin::make_module));
}));
diff --git a/src/script/src/python/coprocessor/compile.rs b/src/script/src/python/coprocessor/compile.rs
index f1321d1a0e4b..8b8a10d228ee 100644
--- a/src/script/src/python/coprocessor/compile.rs
+++ b/src/script/src/python/coprocessor/compile.rs
@@ -13,9 +13,9 @@
// limitations under the License.
//! compile script to code object
-
-use rustpython_bytecode::CodeObject;
-use rustpython_compiler_core::compile as python_compile;
+use rustpython_codegen::compile::compile_top;
+use rustpython_compiler::{CompileOpts, Mode};
+use rustpython_compiler_core::CodeObject;
use rustpython_parser::ast::{Located, Location};
use rustpython_parser::{ast, parser};
use snafu::ResultExt;
@@ -73,7 +73,8 @@ fn gen_call(name: &str, deco_args: &DecoratorArgs, loc: &Location) -> ast::Stmt<
/// strip type annotation
pub fn compile_script(name: &str, deco_args: &DecoratorArgs, script: &str) -> Result<CodeObject> {
// note that it's important to use `parser::Mode::Interactive` so the ast can be compile to return a result instead of return None in eval mode
- let mut top = parser::parse(script, parser::Mode::Interactive).context(PyParseSnafu)?;
+ let mut top =
+ parser::parse(script, parser::Mode::Interactive, "<embedded>").context(PyParseSnafu)?;
// erase decorator
if let ast::Mod::Interactive { body } = &mut top {
let stmts = body;
@@ -122,11 +123,11 @@ pub fn compile_script(name: &str, deco_args: &DecoratorArgs, script: &str) -> Re
);
}
// use `compile::Mode::BlockExpr` so it return the result of statement
- python_compile::compile_top(
+ compile_top(
&top,
"<embedded>".to_owned(),
- python_compile::Mode::BlockExpr,
- python_compile::CompileOpts { optimize: 0 },
+ Mode::BlockExpr,
+ CompileOpts { optimize: 0 },
)
.context(PyCompileSnafu)
}
diff --git a/src/script/src/python/coprocessor/parse.rs b/src/script/src/python/coprocessor/parse.rs
index 50bb0e326449..324b5f7fc837 100644
--- a/src/script/src/python/coprocessor/parse.rs
+++ b/src/script/src/python/coprocessor/parse.rs
@@ -99,7 +99,7 @@ fn try_into_datatype(ty: &str, loc: &Location) -> Result<Option<DataType>> {
"_" => Ok(None),
// note the different between "_" and _
_ => fail_parse_error!(
- format!("Unknown datatype: {ty} at {}", loc),
+ format!("Unknown datatype: {ty} at {:?}", loc),
Some(loc.to_owned())
),
}
@@ -427,7 +427,7 @@ fn get_return_annotations(rets: &ast::Expr<()>) -> Result<Vec<Option<AnnotationI
/// parse script and return `Coprocessor` struct with info extract from ast
pub fn parse_and_compile_copr(script: &str) -> Result<Coprocessor> {
- let python_ast = parser::parse_program(script).context(PyParseSnafu)?;
+ let python_ast = parser::parse_program(script, "<embedded>").context(PyParseSnafu)?;
let mut coprocessor = None;
diff --git a/src/script/src/python/error.rs b/src/script/src/python/error.rs
index 57499befaf1b..9a77984149cf 100644
--- a/src/script/src/python/error.rs
+++ b/src/script/src/python/error.rs
@@ -18,7 +18,7 @@ use datafusion::error::DataFusionError;
use datatypes::arrow::error::ArrowError;
use datatypes::error::Error as DataTypeError;
use query::error::Error as QueryError;
-use rustpython_compiler_core::error::CompileError as CoreCompileError;
+use rustpython_codegen::error::CodegenError;
use rustpython_parser::ast::Location;
use rustpython_parser::error::ParseError;
pub use snafu::ensure;
@@ -54,7 +54,7 @@ pub enum Error {
#[snafu(display("Failed to compile script, source: {}", source))]
PyCompile {
backtrace: Backtrace,
- source: CoreCompileError,
+ source: CodegenError,
},
/// rustpython problem, using python virtual machines' backtrace instead
@@ -76,7 +76,7 @@ pub enum Error {
/// errors in coprocessors' parse check for types and etc.
#[snafu(display("Coprocessor error: {} {}.", reason,
if let Some(loc) = loc{
- format!("at {loc}")
+ format!("at {loc:?}")
}else{
"".into()
}))]
diff --git a/src/script/src/python/test.rs b/src/script/src/python/test.rs
index 043dcba898ad..5790ce281c5b 100644
--- a/src/script/src/python/test.rs
+++ b/src/script/src/python/test.rs
@@ -192,7 +192,7 @@ fn test_type_anno() {
def a(cpu, mem: vector[f64])->(vector[f64|None], vector[f64], vector[_], vector[ _ | None]):
return cpu + mem, cpu - mem, cpu * mem, cpu / mem
"#;
- let pyast = parser::parse(python_source, parser::Mode::Interactive).unwrap();
+ let pyast = parser::parse(python_source, parser::Mode::Interactive, "<embedded>").unwrap();
let copr = parse_and_compile_copr(python_source);
dbg!(copr);
}
diff --git a/src/script/src/python/testcases.ron b/src/script/src/python/testcases.ron
index 91d736070ec9..8e2415429ad4 100644
--- a/src/script/src/python/testcases.ron
+++ b/src/script/src/python/testcases.ron
@@ -462,5 +462,72 @@ def a(cpu: vector[f32], mem: vector[f64])->(vector[f64], vector[f64]):
predicate: ParseIsErr(
reason: "Expect a function definition, but found a"
)
- )
+ ),
+ (
+ // constant column(int)
+ name: "test_import_stdlib",
+ code: r#"
+@copr(args=["cpu", "mem"], returns=["perf", "what"])
+def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None],
+ vector[f32]):
+ # test if using allow list for stdlib damage unrelated module
+ from collections import deque
+ import math
+ math.ceil(0.2)
+ import string
+ return cpu + mem, 1
+"#,
+ predicate: ExecIsOk(
+ fields: [
+ (
+ datatype: Some(Float64),
+ is_nullable: true
+ ),
+ (
+ datatype: Some(Float32),
+ is_nullable: false
+ ),
+ ],
+ columns: [
+ (
+ ty: Float64,
+ len: 4
+ ),
+ (
+ ty: Float32,
+ len: 4
+ )
+ ]
+ )
+ ),
+ (
+ // constant column(int)
+ name: "test_neg_import_stdlib",
+ code: r#"
+@copr(args=["cpu", "mem"], returns=["perf", "what"])
+def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None],
+ vector[f32]):
+ # test if module not in allow list can't be imported
+ import fcntl
+ return cpu + mem, 1
+"#,
+ predicate: ExecIsErr(
+ reason: "No module named 'fcntl'"
+ )
+ ),
+ (
+ // constant column(int)
+ name: "test_neg_import_depend_stdlib",
+ code: r#"
+@copr(args=["cpu", "mem"], returns=["perf", "what"])
+def a(cpu: vector[f32], mem: vector[f64])->(vector[f64|None],
+ vector[f32]):
+ # test if module not in allow list can't be imported
+ import mailbox
+ return cpu + mem, 1
+"#,
+ predicate: ExecIsErr(
+ reason: "ModuleNotFoundError: No module named"
+ )
+ ),
]
diff --git a/src/script/src/python/vector.rs b/src/script/src/python/vector.rs
index 4a432df60266..951ad2f9537e 100644
--- a/src/script/src/python/vector.rs
+++ b/src/script/src/python/vector.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::mem::ManuallyDrop;
use std::ops::Deref;
use std::sync::Arc;
@@ -35,7 +36,7 @@ use rustpython_vm::protocol::{PyMappingMethods, PySequenceMethods};
use rustpython_vm::sliceable::{SaturatedSlice, SequenceIndex, SequenceIndexOp};
use rustpython_vm::types::{AsMapping, AsSequence, Comparable, PyComparisonOp};
use rustpython_vm::{
- pyclass, pyimpl, AsObject, PyObject, PyObjectRef, PyPayload, PyRef, PyResult, VirtualMachine,
+ pyclass, AsObject, PyObject, PyObjectRef, PyPayload, PyRef, PyResult, VirtualMachine,
};
use crate::python::utils::{is_instance, PyVectorRef};
@@ -179,7 +180,7 @@ impl AsRef<PyVector> for PyVector {
}
/// PyVector type wraps a greptime vector, impl multiply/div/add/sub opeerators etc.
-#[pyimpl(with(AsMapping, AsSequence, Comparable))]
+#[pyclass(with(AsMapping, AsSequence, Comparable))]
impl PyVector {
pub(crate) fn new(
iterable: OptionalArg<PyObjectRef>,
@@ -1012,9 +1013,14 @@ impl Comparable for PyVector {
let ret = ret.into_pyobject(vm);
Ok(Either::A(ret))
} else {
+ // Safety: we are manually drop this ref, so no problem here
+ let r = unsafe {
+ let ptr = std::ptr::NonNull::from(zelf);
+ ManuallyDrop::new(PyObjectRef::from_raw(ptr.as_ptr()))
+ };
Err(vm.new_type_error(format!(
- "unexpected payload {} for {}",
- zelf,
+ "unexpected payload {:?} for {}",
+ r,
op.method_name(&vm.ctx).as_str()
)))
}
@@ -1134,6 +1140,7 @@ pub mod tests {
.as_object()
.set_item("a", vm.new_pyobj(a), vm)
.expect("failed");
+
scope
.locals
.as_object()
@@ -1151,7 +1158,7 @@ pub mod tests {
let code_obj = vm
.compile(
script,
- rustpython_vm::compile::Mode::BlockExpr,
+ rustpython_compiler_core::Mode::BlockExpr,
"<embedded>".to_owned(),
)
.map_err(|err| vm.new_syntax_error(&err))?;
|
chore
|
update RustPython dependence(With a tweaked fork) (#655)
|
302d7ec41b24d7d95cbeaf2382e17f02402231a6
|
2023-01-20 06:06:41
|
Ning Sun
|
ci: use ubuntu 2004 to build weekly (#895)
| false
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 05b06fc4d83b..8a7d94ee10b3 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -28,10 +28,10 @@ jobs:
# The file format is greptime-<os>-<arch>
include:
- arch: x86_64-unknown-linux-gnu
- os: ubuntu-latest-16-cores
+ os: ubuntu-2004-16-cores
file: greptime-linux-amd64
- arch: aarch64-unknown-linux-gnu
- os: ubuntu-latest-16-cores
+ os: ubuntu-2004-16-cores
file: greptime-linux-arm64
- arch: aarch64-apple-darwin
os: macos-latest
diff --git a/Cargo.toml b/Cargo.toml
index fc458a195d5f..814d6b79f48e 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -74,6 +74,6 @@ debug = true
[profile.weekly]
inherits = "release"
strip = true
-lto = true
+lto = "thin"
debug = false
incremental = false
|
ci
|
use ubuntu 2004 to build weekly (#895)
|
e56dd20426f7a5843eb10f3dd86b248b1e84b530
|
2025-01-15 12:12:24
|
yihong
|
fix: panic and interval when do not have keyword `interval` (#5339)
| false
|
diff --git a/src/sql/src/error.rs b/src/sql/src/error.rs
index 09b8eb5cad7a..25cb6578de18 100644
--- a/src/sql/src/error.rs
+++ b/src/sql/src/error.rs
@@ -132,6 +132,12 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+ #[snafu(display("ConcreteDataType not supported yet: {:?}", t))]
+ ConcreteTypeNotSupported {
+ t: ConcreteDataType,
+ #[snafu(implicit)]
+ location: Location,
+ },
#[snafu(display("Failed to parse value: {}", msg))]
ParseSqlValue {
@@ -355,6 +361,7 @@ impl ErrorExt for Error {
| InvalidSql { .. }
| ParseSqlValue { .. }
| SqlTypeNotSupported { .. }
+ | ConcreteTypeNotSupported { .. }
| UnexpectedToken { .. }
| InvalidDefault { .. } => StatusCode::InvalidSyntax,
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index 90db401cbaa6..fb24685e590d 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -136,9 +136,10 @@ fn parse_string_to_value(
let v = parse_string_to_vector_type_value(&s, Some(d.dim)).context(DatatypeSnafu)?;
Ok(Value::Binary(v.into()))
}
- _ => {
- unreachable!()
+ _ => ParseSqlValueSnafu {
+ msg: format!("Failed to parse {s} to {data_type} value"),
}
+ .fail(),
}
}
@@ -430,7 +431,13 @@ fn parse_column_default_constraint(
}
.fail();
}
- _ => unreachable!(),
+ _ => {
+ return UnsupportedDefaultValueSnafu {
+ column_name,
+ expr: Expr::Value(SqlValue::Null),
+ }
+ .fail();
+ }
};
Ok(Some(default_constraint))
@@ -680,9 +687,10 @@ pub fn concrete_data_type_to_sql_data_type(data_type: &ConcreteDataType) -> Resu
ConcreteDataType::Duration(_)
| ConcreteDataType::Null(_)
| ConcreteDataType::List(_)
- | ConcreteDataType::Dictionary(_) => {
- unreachable!()
+ | ConcreteDataType::Dictionary(_) => error::ConcreteTypeNotSupportedSnafu {
+ t: data_type.clone(),
}
+ .fail(),
}
}
diff --git a/tests/cases/standalone/common/insert/insert_wrong_type.result b/tests/cases/standalone/common/insert/insert_wrong_type.result
new file mode 100644
index 000000000000..08768c3fb62f
--- /dev/null
+++ b/tests/cases/standalone/common/insert/insert_wrong_type.result
@@ -0,0 +1,19 @@
+-- test for issue #3235
+CREATE TABLE b(i interval, ts timestamp time index);
+
+Affected Rows: 0
+
+-- should fail
+INSERT INTO b VALUES ('1 year', 1000);
+
+Error: 2000(InvalidSyntax), Failed to parse value: Failed to parse 1 year to IntervalMonthDayNano value
+
+-- success
+INSERT INTO b VALUES (interval '1 year', 1000);
+
+Affected Rows: 1
+
+DROP TABLE b;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/insert/insert_wrong_type.sql b/tests/cases/standalone/common/insert/insert_wrong_type.sql
new file mode 100644
index 000000000000..df3ab6bc8375
--- /dev/null
+++ b/tests/cases/standalone/common/insert/insert_wrong_type.sql
@@ -0,0 +1,8 @@
+-- test for issue #3235
+CREATE TABLE b(i interval, ts timestamp time index);
+-- should fail
+INSERT INTO b VALUES ('1 year', 1000);
+-- success
+INSERT INTO b VALUES (interval '1 year', 1000);
+
+DROP TABLE b;
|
fix
|
panic and interval when do not have keyword `interval` (#5339)
|
2ef0d06cdbd7ce434741b04ca466c0365cd03aa9
|
2023-07-19 16:57:49
|
JeremyHi
|
feat: status_code in response header (#1982)
| false
|
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index e2bfed224e23..d04eda47f94a 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -17,9 +17,9 @@ use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::query_request::Query;
use api::v1::{
- greptime_response, AffectedRows, AlterExpr, AuthHeader, CompactTableExpr, CreateTableExpr,
- DdlRequest, DeleteRequest, DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests,
- PromRangeQuery, QueryRequest, RequestHeader,
+ AlterExpr, AuthHeader, CompactTableExpr, CreateTableExpr, DdlRequest, DeleteRequest,
+ DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery, QueryRequest,
+ RequestHeader,
};
use arrow_flight::{FlightData, Ticket};
use common_error::ext::{BoxedError, ErrorExt};
@@ -28,12 +28,10 @@ use common_query::Output;
use common_telemetry::{logging, timer};
use futures_util::{TryFutureExt, TryStreamExt};
use prost::Message;
-use snafu::{ensure, OptionExt, ResultExt};
+use snafu::{ensure, ResultExt};
-use crate::error::{
- ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu,
-};
-use crate::{error, metrics, Client, Result, StreamInserter};
+use crate::error::{ConvertFlightDataSnafu, IllegalFlightMessagesSnafu, ServerSnafu};
+use crate::{error, from_grpc_response, metrics, Client, Result, StreamInserter};
#[derive(Clone, Debug, Default)]
pub struct Database {
@@ -142,16 +140,8 @@ impl Database {
async fn handle(&self, request: Request) -> Result<u32> {
let mut client = self.client.make_database_client()?.inner;
let request = self.to_rpc_request(request);
- let response = client
- .handle(request)
- .await?
- .into_inner()
- .response
- .context(IllegalDatabaseResponseSnafu {
- err_msg: "GreptimeResponse is empty",
- })?;
- let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
- Ok(value)
+ let response = client.handle(request).await?.into_inner();
+ from_grpc_response(response)
}
#[inline]
@@ -264,7 +254,7 @@ impl Database {
let e: error::Error = e.into();
let code = e.status_code();
let msg = e.to_string();
- error::ServerSnafu { code, msg }
+ ServerSnafu { code, msg }
.fail::<()>()
.map_err(BoxedError::new)
.context(error::FlightGetSnafu {
diff --git a/src/client/src/lib.rs b/src/client/src/lib.rs
index 0c1c2cd600f5..45ae26440b8a 100644
--- a/src/client/src/lib.rs
+++ b/src/client/src/lib.rs
@@ -21,9 +21,42 @@ mod metrics;
mod stream_insert;
pub use api;
+use api::v1::greptime_response::Response;
+use api::v1::{AffectedRows, GreptimeResponse};
pub use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_error::status_code::StatusCode;
+use snafu::OptionExt;
pub use self::client::Client;
pub use self::database::Database;
pub use self::error::{Error, Result};
pub use self::stream_insert::StreamInserter;
+use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
+
+pub fn from_grpc_response(response: GreptimeResponse) -> Result<u32> {
+ let header = response.header.context(IllegalDatabaseResponseSnafu {
+ err_msg: "missing header",
+ })?;
+ let status = header.status.context(IllegalDatabaseResponseSnafu {
+ err_msg: "missing status",
+ })?;
+
+ if StatusCode::is_success(status.status_code) {
+ let res = response.response.context(IllegalDatabaseResponseSnafu {
+ err_msg: "missing response",
+ })?;
+ match res {
+ Response::AffectedRows(AffectedRows { value }) => Ok(value),
+ }
+ } else {
+ let status_code =
+ StatusCode::from_u32(status.status_code).context(IllegalDatabaseResponseSnafu {
+ err_msg: format!("invalid status: {:?}", status),
+ })?;
+ ServerSnafu {
+ code: status_code,
+ msg: status.err_msg,
+ }
+ .fail()
+ }
+}
diff --git a/src/client/src/stream_insert.rs b/src/client/src/stream_insert.rs
index 943f9a0ad7d5..0701490101cf 100644
--- a/src/client/src/stream_insert.rs
+++ b/src/client/src/stream_insert.rs
@@ -15,17 +15,16 @@
use api::v1::greptime_database_client::GreptimeDatabaseClient;
use api::v1::greptime_request::Request;
use api::v1::{
- greptime_response, AffectedRows, AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest,
- InsertRequests, RequestHeader,
+ AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest, InsertRequests, RequestHeader,
};
-use snafu::OptionExt;
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::Channel;
use tonic::{Response, Status};
-use crate::error::{self, IllegalDatabaseResponseSnafu, Result};
+use crate::error::{self, Result};
+use crate::from_grpc_response;
/// A structure that provides some methods for streaming data insert.
///
@@ -89,17 +88,8 @@ impl StreamInserter {
drop(self.sender);
let response = self.join.await.unwrap()?;
-
- let response = response
- .into_inner()
- .response
- .context(IllegalDatabaseResponseSnafu {
- err_msg: "GreptimeResponse is empty",
- })?;
-
- let greptime_response::Response::AffectedRows(AffectedRows { value }) = response;
-
- Ok(value)
+ let response = response.into_inner();
+ from_grpc_response(response)
}
fn to_rpc_request(&self, request: Request) -> GreptimeRequest {
diff --git a/src/common/error/src/status_code.rs b/src/common/error/src/status_code.rs
index c43f908048ed..81f8daa15751 100644
--- a/src/common/error/src/status_code.rs
+++ b/src/common/error/src/status_code.rs
@@ -154,6 +154,44 @@ impl StatusCode {
| StatusCode::AccessDenied => false,
}
}
+
+ pub fn from_u32(value: u32) -> Option<Self> {
+ match value {
+ v if v == StatusCode::Success as u32 => Some(StatusCode::Success),
+ v if v == StatusCode::Unknown as u32 => Some(StatusCode::Unknown),
+ v if v == StatusCode::Unsupported as u32 => Some(StatusCode::Unsupported),
+ v if v == StatusCode::Unexpected as u32 => Some(StatusCode::Unexpected),
+ v if v == StatusCode::Internal as u32 => Some(StatusCode::Internal),
+ v if v == StatusCode::InvalidArguments as u32 => Some(StatusCode::InvalidArguments),
+ v if v == StatusCode::Cancelled as u32 => Some(StatusCode::Cancelled),
+ v if v == StatusCode::InvalidSyntax as u32 => Some(StatusCode::InvalidSyntax),
+ v if v == StatusCode::PlanQuery as u32 => Some(StatusCode::PlanQuery),
+ v if v == StatusCode::EngineExecuteQuery as u32 => Some(StatusCode::EngineExecuteQuery),
+ v if v == StatusCode::TableAlreadyExists as u32 => Some(StatusCode::TableAlreadyExists),
+ v if v == StatusCode::TableNotFound as u32 => Some(StatusCode::TableNotFound),
+ v if v == StatusCode::TableColumnNotFound as u32 => {
+ Some(StatusCode::TableColumnNotFound)
+ }
+ v if v == StatusCode::TableColumnExists as u32 => Some(StatusCode::TableColumnExists),
+ v if v == StatusCode::DatabaseNotFound as u32 => Some(StatusCode::DatabaseNotFound),
+ v if v == StatusCode::StorageUnavailable as u32 => Some(StatusCode::StorageUnavailable),
+ v if v == StatusCode::RuntimeResourcesExhausted as u32 => {
+ Some(StatusCode::RuntimeResourcesExhausted)
+ }
+ v if v == StatusCode::RateLimited as u32 => Some(StatusCode::RateLimited),
+ v if v == StatusCode::UserNotFound as u32 => Some(StatusCode::UserNotFound),
+ v if v == StatusCode::UnsupportedPasswordType as u32 => {
+ Some(StatusCode::UnsupportedPasswordType)
+ }
+ v if v == StatusCode::UserPasswordMismatch as u32 => {
+ Some(StatusCode::UserPasswordMismatch)
+ }
+ v if v == StatusCode::AuthHeaderNotFound as u32 => Some(StatusCode::AuthHeaderNotFound),
+ v if v == StatusCode::InvalidAuthHeader as u32 => Some(StatusCode::InvalidAuthHeader),
+ v if v == StatusCode::AccessDenied as u32 => Some(StatusCode::AccessDenied),
+ _ => None,
+ }
+ }
}
impl fmt::Display for StatusCode {
diff --git a/src/servers/src/grpc/database.rs b/src/servers/src/grpc/database.rs
index edf214bab0c5..adcc6273a599 100644
--- a/src/servers/src/grpc/database.rs
+++ b/src/servers/src/grpc/database.rs
@@ -16,8 +16,10 @@ use std::sync::Arc;
use api::v1::greptime_database_server::GreptimeDatabase;
use api::v1::greptime_response::Response as RawResponse;
-use api::v1::{AffectedRows, GreptimeRequest, GreptimeResponse};
+use api::v1::{AffectedRows, GreptimeRequest, GreptimeResponse, ResponseHeader};
use async_trait::async_trait;
+use common_error::ext::ErrorExt;
+use common_error::status_code::StatusCode;
use common_query::Output;
use futures::StreamExt;
use tonic::{Request, Response, Status, Streaming};
@@ -44,13 +46,27 @@ impl GreptimeDatabase for DatabaseService {
let request = request.into_inner();
let output = self.handler.handle_request(request).await?;
let response = match output {
- Output::AffectedRows(rows) => GreptimeResponse {
- header: None,
+ Ok(Output::AffectedRows(rows)) => GreptimeResponse {
+ header: Some(ResponseHeader {
+ status: Some(api::v1::Status {
+ status_code: StatusCode::Success as _,
+ ..Default::default()
+ }),
+ }),
response: Some(RawResponse::AffectedRows(AffectedRows { value: rows as _ })),
},
- Output::Stream(_) | Output::RecordBatches(_) => {
+ Ok(Output::Stream(_)) | Ok(Output::RecordBatches(_)) => {
return Err(Status::unimplemented("GreptimeDatabase::Handle for query"));
}
+ Err(e) => GreptimeResponse {
+ header: Some(ResponseHeader {
+ status: Some(api::v1::Status {
+ status_code: e.status_code() as _,
+ err_msg: e.to_string(),
+ }),
+ }),
+ response: None,
+ },
};
Ok(Response::new(response))
}
@@ -66,17 +82,26 @@ impl GreptimeDatabase for DatabaseService {
let request = request?;
let output = self.handler.handle_request(request).await?;
match output {
- Output::AffectedRows(rows) => affected_rows += rows,
- Output::Stream(_) | Output::RecordBatches(_) => {
+ Ok(Output::AffectedRows(rows)) => affected_rows += rows,
+ Ok(Output::Stream(_)) | Ok(Output::RecordBatches(_)) => {
return Err(Status::unimplemented(
"GreptimeDatabase::HandleRequests for query",
));
}
+ Err(e) => {
+ // We directly convert it to a tonic error and fail immediately in stream.
+ return Err(e.into());
+ }
}
}
let response = GreptimeResponse {
- header: None,
+ header: Some(ResponseHeader {
+ status: Some(api::v1::Status {
+ status_code: StatusCode::Success as _,
+ ..Default::default()
+ }),
+ }),
response: Some(RawResponse::AffectedRows(AffectedRows {
value: affected_rows as u32,
})),
diff --git a/src/servers/src/grpc/flight.rs b/src/servers/src/grpc/flight.rs
index 0b793d98554d..826de009997a 100644
--- a/src/servers/src/grpc/flight.rs
+++ b/src/servers/src/grpc/flight.rs
@@ -89,7 +89,7 @@ impl FlightService for FlightHandler {
let request =
GreptimeRequest::decode(ticket.as_ref()).context(error::InvalidFlightTicketSnafu)?;
- let output = self.handler.handle_request(request).await?;
+ let output = self.handler.handle_request(request).await??;
let stream = to_flight_data_stream(output);
Ok(Response::new(stream))
diff --git a/src/servers/src/grpc/handler.rs b/src/servers/src/grpc/handler.rs
index ea8234fe8d73..1d4e80c1571c 100644
--- a/src/servers/src/grpc/handler.rs
+++ b/src/servers/src/grpc/handler.rs
@@ -26,11 +26,12 @@ use common_telemetry::logging;
use metrics::{histogram, increment_counter};
use session::context::{QueryContext, QueryContextRef};
use snafu::{OptionExt, ResultExt};
-use tonic::Status;
use crate::auth::{Identity, Password, UserProviderRef};
use crate::error::Error::UnsupportedAuthScheme;
-use crate::error::{AuthSnafu, InvalidQuerySnafu, JoinTaskSnafu, NotFoundAuthHeaderSnafu};
+use crate::error::{
+ AuthSnafu, InvalidQuerySnafu, JoinTaskSnafu, NotFoundAuthHeaderSnafu, Result as InternalResult,
+};
use crate::grpc::TonicResult;
use crate::metrics::{
METRIC_AUTH_FAILURE, METRIC_CODE_LABEL, METRIC_SERVER_GRPC_DB_REQUEST_TIMER,
@@ -57,7 +58,10 @@ impl GreptimeRequestHandler {
}
}
- pub(crate) async fn handle_request(&self, request: GreptimeRequest) -> TonicResult<Output> {
+ pub(crate) async fn handle_request(
+ &self,
+ request: GreptimeRequest,
+ ) -> TonicResult<InternalResult<Output>> {
let query = request.request.context(InvalidQuerySnafu {
reason: "Expecting non-empty GreptimeRequest.",
})?;
@@ -65,7 +69,7 @@ impl GreptimeRequestHandler {
let header = request.header.as_ref();
let query_ctx = create_query_context(header);
- self.auth(header, &query_ctx).await?;
+ let _ = self.auth(header, &query_ctx).await?;
let handler = self.handler.clone();
let request_type = request_type(&query);
@@ -94,7 +98,7 @@ impl GreptimeRequestHandler {
let output = handle.await.context(JoinTaskSnafu).map_err(|e| {
timer.record(e.status_code());
e
- })??;
+ })?;
Ok(output)
}
@@ -102,8 +106,8 @@ impl GreptimeRequestHandler {
&self,
header: Option<&RequestHeader>,
query_ctx: &QueryContextRef,
- ) -> TonicResult<()> {
- let Some(user_provider) = self.user_provider.as_ref() else { return Ok(()) };
+ ) -> TonicResult<InternalResult<()>> {
+ let Some(user_provider) = self.user_provider.as_ref() else { return Ok(Ok(())) };
let auth_scheme = header
.and_then(|header| {
@@ -114,7 +118,7 @@ impl GreptimeRequestHandler {
})
.context(NotFoundAuthHeaderSnafu)?;
- let _ = match auth_scheme {
+ let res = match auth_scheme {
AuthScheme::Basic(Basic { username, password }) => user_provider
.auth(
Identity::UserId(&username, None),
@@ -128,14 +132,15 @@ impl GreptimeRequestHandler {
name: "Token AuthScheme".to_string(),
}),
}
+ .map(|_| ())
.map_err(|e| {
increment_counter!(
METRIC_AUTH_FAILURE,
&[(METRIC_CODE_LABEL, format!("{}", e.status_code()))]
);
- Status::unauthenticated(e.to_string())
- })?;
- Ok(())
+ e
+ });
+ Ok(res)
}
}
diff --git a/src/servers/src/grpc/prom_query_gateway.rs b/src/servers/src/grpc/prom_query_gateway.rs
index eced6644ada5..6a93acab0acf 100644
--- a/src/servers/src/grpc/prom_query_gateway.rs
+++ b/src/servers/src/grpc/prom_query_gateway.rs
@@ -22,6 +22,7 @@ use api::v1::promql_request::Promql;
use api::v1::{PromqlRequest, PromqlResponse, ResponseHeader};
use async_trait::async_trait;
use common_error::ext::ErrorExt;
+use common_error::status_code::StatusCode;
use common_telemetry::timer;
use common_time::util::current_time_rfc3339;
use promql_parser::parser::ValueType;
@@ -80,7 +81,12 @@ impl PrometheusGateway for PrometheusGatewayService {
let json_bytes = serde_json::to_string(&json_response).unwrap().into_bytes();
let response = Response::new(PromqlResponse {
- header: Some(ResponseHeader { status: None }),
+ header: Some(ResponseHeader {
+ status: Some(api::v1::Status {
+ status_code: StatusCode::Success as _,
+ ..Default::default()
+ }),
+ }),
body: json_bytes,
});
Ok(response)
diff --git a/tests/cases/distributed/alter/drop_col_not_null_next.result b/tests/cases/distributed/alter/drop_col_not_null_next.result
new file mode 100644
index 000000000000..7e58c5e63517
--- /dev/null
+++ b/tests/cases/distributed/alter/drop_col_not_null_next.result
@@ -0,0 +1,43 @@
+CREATE TABLE test(i BIGINT TIME INDEX, j INTEGER, k INTEGER NOT NULL);
+
+Affected Rows: 0
+
+INSERT INTO test VALUES (1, 1, 11), (2, 2, 12);
+
+Affected Rows: 2
+
+SELECT * FROM test;
+
++---+---+----+
+| i | j | k |
++---+---+----+
+| 1 | 1 | 11 |
+| 2 | 2 | 12 |
++---+---+----+
+
+ALTER TABLE test DROP COLUMN j;
+
+Affected Rows: 0
+
+INSERT INTO test VALUES (3, NULL);
+
+Error: 1004(InvalidArguments), Failed to insert value to table: greptime.public.test, source: Failed to operate table, source: Column k is not null but input has null
+
+INSERT INTO test VALUES (3, 13);
+
+Affected Rows: 1
+
+SELECT * FROM test;
+
++---+----+
+| i | k |
++---+----+
+| 1 | 11 |
+| 2 | 12 |
+| 3 | 13 |
++---+----+
+
+DROP TABLE test;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/common/alter/drop_col_not_null_next.sql b/tests/cases/distributed/alter/drop_col_not_null_next.sql
similarity index 100%
rename from tests/cases/standalone/common/alter/drop_col_not_null_next.sql
rename to tests/cases/distributed/alter/drop_col_not_null_next.sql
diff --git a/tests/cases/standalone/common/alter/drop_col_not_null_next.result b/tests/cases/standalone/alter/drop_col_not_null_next.result
similarity index 100%
rename from tests/cases/standalone/common/alter/drop_col_not_null_next.result
rename to tests/cases/standalone/alter/drop_col_not_null_next.result
diff --git a/tests/cases/standalone/alter/drop_col_not_null_next.sql b/tests/cases/standalone/alter/drop_col_not_null_next.sql
new file mode 100644
index 000000000000..fcd438ea9a9c
--- /dev/null
+++ b/tests/cases/standalone/alter/drop_col_not_null_next.sql
@@ -0,0 +1,15 @@
+CREATE TABLE test(i BIGINT TIME INDEX, j INTEGER, k INTEGER NOT NULL);
+
+INSERT INTO test VALUES (1, 1, 11), (2, 2, 12);
+
+SELECT * FROM test;
+
+ALTER TABLE test DROP COLUMN j;
+
+INSERT INTO test VALUES (3, NULL);
+
+INSERT INTO test VALUES (3, 13);
+
+SELECT * FROM test;
+
+DROP TABLE test;
|
feat
|
status_code in response header (#1982)
|
b8f7f603cffe530acdc6ef071f1bd9d5ef5178da
|
2023-03-21 08:52:26
|
Weny Xu
|
test: add copy clause sqlness tests (#1198)
| false
|
diff --git a/src/datanode/src/tests/instance_test.rs b/src/datanode/src/tests/instance_test.rs
index 1fca1d977542..63f87e283dc4 100644
--- a/src/datanode/src/tests/instance_test.rs
+++ b/src/datanode/src/tests/instance_test.rs
@@ -769,36 +769,6 @@ async fn test_delete() {
check_output_stream(output, expect).await;
}
-#[tokio::test(flavor = "multi_thread")]
-async fn test_execute_copy_to() {
- let instance = setup_test_instance("test_execute_copy_to").await;
-
- // setups
- execute_sql(
- &instance,
- "create table demo(host string, cpu double, memory double, ts timestamp time index);",
- )
- .await;
-
- let output = execute_sql(
- &instance,
- r#"insert into demo(host, cpu, memory, ts) values
- ('host1', 66.6, 1024, 1655276557000),
- ('host2', 88.8, 333.3, 1655276558000)
- "#,
- )
- .await;
- assert!(matches!(output, Output::AffectedRows(2)));
-
- // exports
- let data_dir = instance.data_tmp_dir().path();
-
- let copy_to_stmt = format!("Copy demo TO '{}/export/demo.parquet'", data_dir.display());
-
- let output = execute_sql(&instance, ©_to_stmt).await;
- assert!(matches!(output, Output::AffectedRows(2)));
-}
-
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_copy_to_s3() {
logging::init_default_ut_logging();
@@ -838,91 +808,6 @@ async fn test_execute_copy_to_s3() {
}
}
-#[tokio::test(flavor = "multi_thread")]
-async fn test_execute_copy_from() {
- let instance = setup_test_instance("test_execute_copy_from").await;
-
- // setups
- execute_sql(
- &instance,
- "create table demo(host string, cpu double, memory double, ts timestamp time index);",
- )
- .await;
-
- let output = execute_sql(
- &instance,
- r#"insert into demo(host, cpu, memory, ts) values
- ('host1', 66.6, 1024, 1655276557000),
- ('host2', 88.8, 333.3, 1655276558000)
- "#,
- )
- .await;
- assert!(matches!(output, Output::AffectedRows(2)));
-
- // export
- let data_dir = instance.data_tmp_dir().path();
-
- let copy_to_stmt = format!("Copy demo TO '{}/export/demo.parquet'", data_dir.display());
-
- let output = execute_sql(&instance, ©_to_stmt).await;
- assert!(matches!(output, Output::AffectedRows(2)));
-
- struct Test<'a> {
- sql: &'a str,
- table_name: &'a str,
- }
- let tests = [
- Test {
- sql: &format!(
- "Copy with_filename FROM '{}/export/demo.parquet_1_2'",
- data_dir.display()
- ),
- table_name: "with_filename",
- },
- Test {
- sql: &format!("Copy with_path FROM '{}/export/'", data_dir.display()),
- table_name: "with_path",
- },
- Test {
- sql: &format!(
- "Copy with_pattern FROM '{}/export/' WITH (PATTERN = 'demo.*')",
- data_dir.display()
- ),
- table_name: "with_pattern",
- },
- ];
-
- for test in tests {
- // import
- execute_sql(
- &instance,
- &format!(
- "create table {}(host string, cpu double, memory double, ts timestamp time index);",
- test.table_name
- ),
- )
- .await;
-
- let output = execute_sql(&instance, test.sql).await;
- assert!(matches!(output, Output::AffectedRows(2)));
-
- let output = execute_sql(
- &instance,
- &format!("select * from {} order by ts", test.table_name),
- )
- .await;
- let expected = "\
-+-------+------+--------+---------------------+
-| host | cpu | memory | ts |
-+-------+------+--------+---------------------+
-| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
-| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
-+-------+------+--------+---------------------+"
- .to_string();
- check_output_stream(output, expected).await;
- }
-}
-
#[tokio::test(flavor = "multi_thread")]
async fn test_execute_copy_from_s3() {
logging::init_default_ut_logging();
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index 284dd9581b08..2e9d7576e5a5 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -116,10 +116,6 @@ impl MockInstance {
pub(crate) fn inner(&self) -> &Instance {
&self.instance
}
-
- pub(crate) fn data_tmp_dir(&self) -> &TempDir {
- &self._guard._data_tmp_dir
- }
}
struct TestGuard {
diff --git a/tests/cases/standalone/copy/copy_from_fs.result b/tests/cases/standalone/copy/copy_from_fs.result
new file mode 100644
index 000000000000..0aae28730854
--- /dev/null
+++ b/tests/cases/standalone/copy/copy_from_fs.result
@@ -0,0 +1,79 @@
+CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
+
+Affected Rows: 0
+
+insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+
+Affected Rows: 2
+
+Copy demo TO '/tmp/demo/export/demo.parquet';
+
+Affected Rows: 2
+
+CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
+
+Affected Rows: 0
+
+Copy with_filename FROM '/tmp/demo/export/demo.parquet_1_2';
+
+Affected Rows: 2
+
+select * from with_filename order by ts;
+
++-------+------+--------+---------------------+
+| host | cpu | memory | ts |
++-------+------+--------+---------------------+
+| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
+| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
++-------+------+--------+---------------------+
+
+CREATE TABLE with_path(host string, cpu double, memory double, ts timestamp time index);
+
+Affected Rows: 0
+
+Copy with_path FROM '/tmp/demo/export/';
+
+Affected Rows: 2
+
+select * from with_path order by ts;
+
++-------+------+--------+---------------------+
+| host | cpu | memory | ts |
++-------+------+--------+---------------------+
+| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
+| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
++-------+------+--------+---------------------+
+
+CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp time index);
+
+Affected Rows: 0
+
+Copy with_pattern FROM '/tmp/demo/export/' WITH (PATTERN = 'demo.*');
+
+Affected Rows: 2
+
+select * from with_pattern order by ts;
+
++-------+------+--------+---------------------+
+| host | cpu | memory | ts |
++-------+------+--------+---------------------+
+| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
+| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
++-------+------+--------+---------------------+
+
+drop table demo;
+
+Affected Rows: 1
+
+drop table with_filename;
+
+Affected Rows: 1
+
+drop table with_path;
+
+Affected Rows: 1
+
+drop table with_pattern;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/copy/copy_from_fs.sql b/tests/cases/standalone/copy/copy_from_fs.sql
new file mode 100644
index 000000000000..4c6fc2910dad
--- /dev/null
+++ b/tests/cases/standalone/copy/copy_from_fs.sql
@@ -0,0 +1,31 @@
+CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
+
+insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+
+Copy demo TO '/tmp/demo/export/demo.parquet';
+
+CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
+
+Copy with_filename FROM '/tmp/demo/export/demo.parquet_1_2';
+
+select * from with_filename order by ts;
+
+CREATE TABLE with_path(host string, cpu double, memory double, ts timestamp time index);
+
+Copy with_path FROM '/tmp/demo/export/';
+
+select * from with_path order by ts;
+
+CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp time index);
+
+Copy with_pattern FROM '/tmp/demo/export/' WITH (PATTERN = 'demo.*');
+
+select * from with_pattern order by ts;
+
+drop table demo;
+
+drop table with_filename;
+
+drop table with_path;
+
+drop table with_pattern;
diff --git a/tests/cases/standalone/copy/copy_to_fs.result b/tests/cases/standalone/copy/copy_to_fs.result
new file mode 100644
index 000000000000..698f4f6cde37
--- /dev/null
+++ b/tests/cases/standalone/copy/copy_to_fs.result
@@ -0,0 +1,16 @@
+CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
+
+Affected Rows: 0
+
+insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+
+Affected Rows: 2
+
+Copy demo TO '/tmp/export/demo.parquet';
+
+Affected Rows: 2
+
+drop table demo;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/copy/copy_to_fs.sql b/tests/cases/standalone/copy/copy_to_fs.sql
new file mode 100644
index 000000000000..0fb6c713cc3b
--- /dev/null
+++ b/tests/cases/standalone/copy/copy_to_fs.sql
@@ -0,0 +1,7 @@
+CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
+
+insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+
+Copy demo TO '/tmp/export/demo.parquet';
+
+drop table demo;
|
test
|
add copy clause sqlness tests (#1198)
|
b9bac2b195bfd53d65775f7c233a6cdd132756d9
|
2023-08-11 13:07:27
|
Ruihang Xia
|
fix: let information_schema know itself (#2149)
| false
|
diff --git a/src/catalog/src/information_schema.rs b/src/catalog/src/information_schema.rs
index 27d7afba2cad..43ae228854c4 100644
--- a/src/catalog/src/information_schema.rs
+++ b/src/catalog/src/information_schema.rs
@@ -16,18 +16,23 @@ mod columns;
mod tables;
use std::any::Any;
+use std::collections::HashMap;
use std::sync::{Arc, Weak};
use async_trait::async_trait;
+use common_catalog::consts::{
+ INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
+ INFORMATION_SCHEMA_TABLES_TABLE_ID,
+};
use common_error::ext::BoxedError;
use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
use datatypes::schema::SchemaRef;
use futures_util::StreamExt;
use snafu::ResultExt;
-use store_api::storage::ScanRequest;
+use store_api::storage::{ScanRequest, TableId};
use table::data_source::DataSource;
use table::error::{SchemaConversionSnafu, TablesRecordBatchSnafu};
-use table::metadata::TableType;
+use table::metadata::{TableIdent, TableInfoBuilder, TableMetaBuilder, TableType};
use table::{Result as TableResult, Table, TableRef};
use self::columns::InformationSchemaColumns;
@@ -36,8 +41,8 @@ use crate::information_schema::tables::InformationSchemaTables;
use crate::table_factory::TableFactory;
use crate::CatalogManager;
-const TABLES: &str = "tables";
-const COLUMNS: &str = "columns";
+pub const TABLES: &str = "tables";
+pub const COLUMNS: &str = "columns";
pub struct InformationSchemaProvider {
catalog_name: String,
@@ -51,42 +56,95 @@ impl InformationSchemaProvider {
catalog_manager,
}
}
-}
-impl InformationSchemaProvider {
- pub fn table(&self, name: &str) -> Result<Option<TableRef>> {
- let stream_builder = match name.to_ascii_lowercase().as_ref() {
- TABLES => Arc::new(InformationSchemaTables::new(
- self.catalog_name.clone(),
- self.catalog_manager.clone(),
+ /// Build a map of [TableRef] in information schema.
+ /// Including `tables` and `columns`.
+ pub fn build(
+ catalog_name: String,
+ catalog_manager: Weak<dyn CatalogManager>,
+ ) -> HashMap<String, TableRef> {
+ let mut schema = HashMap::new();
+
+ schema.insert(
+ TABLES.to_string(),
+ Arc::new(InformationTable::new(
+ catalog_name.clone(),
+ INFORMATION_SCHEMA_TABLES_TABLE_ID,
+ TABLES.to_string(),
+ Arc::new(InformationSchemaTables::new(
+ catalog_name.clone(),
+ catalog_manager.clone(),
+ )),
)) as _,
- COLUMNS => Arc::new(InformationSchemaColumns::new(
- self.catalog_name.clone(),
- self.catalog_manager.clone(),
+ );
+ schema.insert(
+ COLUMNS.to_string(),
+ Arc::new(InformationTable::new(
+ catalog_name.clone(),
+ INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
+ COLUMNS.to_string(),
+ Arc::new(InformationSchemaColumns::new(catalog_name, catalog_manager)),
)) as _,
+ );
+
+ schema
+ }
+
+ pub fn table(&self, name: &str) -> Result<Option<TableRef>> {
+ let (stream_builder, table_id) = match name.to_ascii_lowercase().as_ref() {
+ TABLES => (
+ Arc::new(InformationSchemaTables::new(
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )) as _,
+ INFORMATION_SCHEMA_TABLES_TABLE_ID,
+ ),
+ COLUMNS => (
+ Arc::new(InformationSchemaColumns::new(
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )) as _,
+ INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
+ ),
_ => {
return Ok(None);
}
};
- Ok(Some(Arc::new(InformationTable::new(stream_builder))))
+ Ok(Some(Arc::new(InformationTable::new(
+ self.catalog_name.clone(),
+ table_id,
+ name.to_string(),
+ stream_builder,
+ ))))
}
pub fn table_factory(&self, name: &str) -> Result<Option<TableFactory>> {
- let stream_builder = match name.to_ascii_lowercase().as_ref() {
- TABLES => Arc::new(InformationSchemaTables::new(
- self.catalog_name.clone(),
- self.catalog_manager.clone(),
- )) as _,
- COLUMNS => Arc::new(InformationSchemaColumns::new(
- self.catalog_name.clone(),
- self.catalog_manager.clone(),
- )) as _,
+ let (stream_builder, table_id) = match name.to_ascii_lowercase().as_ref() {
+ TABLES => (
+ Arc::new(InformationSchemaTables::new(
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )) as _,
+ INFORMATION_SCHEMA_TABLES_TABLE_ID,
+ ),
+ COLUMNS => (
+ Arc::new(InformationSchemaColumns::new(
+ self.catalog_name.clone(),
+ self.catalog_manager.clone(),
+ )) as _,
+ INFORMATION_SCHEMA_COLUMNS_TABLE_ID,
+ ),
_ => {
return Ok(None);
}
};
- let data_source = Arc::new(InformationTable::new(stream_builder));
+ let data_source = Arc::new(InformationTable::new(
+ self.catalog_name.clone(),
+ table_id,
+ name.to_string(),
+ stream_builder,
+ ));
Ok(Some(Arc::new(move || data_source.clone())))
}
@@ -101,12 +159,25 @@ pub trait InformationStreamBuilder: Send + Sync {
}
pub struct InformationTable {
+ catalog_name: String,
+ table_id: TableId,
+ name: String,
stream_builder: Arc<dyn InformationStreamBuilder>,
}
impl InformationTable {
- pub fn new(stream_builder: Arc<dyn InformationStreamBuilder>) -> Self {
- Self { stream_builder }
+ pub fn new(
+ catalog_name: String,
+ table_id: TableId,
+ name: String,
+ stream_builder: Arc<dyn InformationStreamBuilder>,
+ ) -> Self {
+ Self {
+ catalog_name,
+ table_id,
+ name,
+ stream_builder,
+ }
}
}
@@ -121,7 +192,26 @@ impl Table for InformationTable {
}
fn table_info(&self) -> table::metadata::TableInfoRef {
- unreachable!("Should not call table_info() of InformationTable directly")
+ let table_meta = TableMetaBuilder::default()
+ .schema(self.stream_builder.schema())
+ .primary_key_indices(vec![])
+ .next_column_id(0)
+ .build()
+ .unwrap();
+ Arc::new(
+ TableInfoBuilder::default()
+ .ident(TableIdent {
+ table_id: self.table_id,
+ version: 0,
+ })
+ .name(self.name.clone())
+ .catalog_name(self.catalog_name.clone())
+ .schema_name(INFORMATION_SCHEMA_NAME.to_string())
+ .meta(table_meta)
+ .table_type(TableType::Temporary)
+ .build()
+ .unwrap(),
+ )
}
fn table_type(&self) -> TableType {
diff --git a/src/catalog/src/information_schema/columns.rs b/src/catalog/src/information_schema/columns.rs
index 30c0d1a79500..be66119539f0 100644
--- a/src/catalog/src/information_schema/columns.rs
+++ b/src/catalog/src/information_schema/columns.rs
@@ -16,7 +16,8 @@ use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
use common_catalog::consts::{
- SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY, SEMANTIC_TYPE_TIME_INDEX,
+ INFORMATION_SCHEMA_NAME, SEMANTIC_TYPE_FIELD, SEMANTIC_TYPE_PRIMARY_KEY,
+ SEMANTIC_TYPE_TIME_INDEX,
};
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
@@ -31,7 +32,8 @@ use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::vectors::{StringVectorBuilder, VectorRef};
use snafu::{OptionExt, ResultExt};
-use super::InformationStreamBuilder;
+use super::tables::InformationSchemaTables;
+use super::{InformationStreamBuilder, COLUMNS, TABLES};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
@@ -52,19 +54,22 @@ const SEMANTIC_TYPE: &str = "semantic_type";
impl InformationSchemaColumns {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
- let schema = Arc::new(Schema::new(vec![
+ Self {
+ schema: Self::schema(),
+ catalog_name,
+ catalog_manager,
+ }
+ }
+
+ fn schema() -> SchemaRef {
+ Arc::new(Schema::new(vec![
ColumnSchema::new(TABLE_CATALOG, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_SCHEMA, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(TABLE_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(COLUMN_NAME, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(DATA_TYPE, ConcreteDataType::string_datatype(), false),
ColumnSchema::new(SEMANTIC_TYPE, ConcreteDataType::string_datatype(), false),
- ]));
- Self {
- schema,
- catalog_name,
- catalog_manager,
- }
+ ]))
}
fn builder(&self) -> InformationSchemaColumnsBuilder {
@@ -153,14 +158,28 @@ impl InformationSchemaColumnsBuilder {
.table_names(&catalog_name, &schema_name)
.await?
{
- let Some(table) = catalog_manager
+ let (keys, schema) = if let Some(table) = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.await?
- else {
- continue;
+ {
+ let keys = &table.table_info().meta.primary_key_indices;
+ let schema = table.schema();
+ (keys.clone(), schema)
+ } else {
+ // TODO: this specific branch is only a workaround for FrontendCatalogManager.
+ if schema_name == INFORMATION_SCHEMA_NAME {
+ if table_name == COLUMNS {
+ (vec![], InformationSchemaColumns::schema())
+ } else if table_name == TABLES {
+ (vec![], InformationSchemaTables::schema())
+ } else {
+ continue;
+ }
+ } else {
+ continue;
+ }
};
- let keys = &table.table_info().meta.primary_key_indices;
- let schema = table.schema();
+
for (idx, column) in schema.column_schemas().iter().enumerate() {
let semantic_type = if column.is_time_index() {
SEMANTIC_TYPE_TIME_INDEX
diff --git a/src/catalog/src/information_schema/tables.rs b/src/catalog/src/information_schema/tables.rs
index 36b6f198ade7..081f2f03cdde 100644
--- a/src/catalog/src/information_schema/tables.rs
+++ b/src/catalog/src/information_schema/tables.rs
@@ -15,7 +15,10 @@
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
-use common_catalog::consts::INFORMATION_SCHEMA_NAME;
+use common_catalog::consts::{
+ INFORMATION_SCHEMA_COLUMNS_TABLE_ID, INFORMATION_SCHEMA_NAME,
+ INFORMATION_SCHEMA_TABLES_TABLE_ID,
+};
use common_error::ext::BoxedError;
use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
@@ -29,6 +32,7 @@ use datatypes::vectors::{StringVectorBuilder, UInt32VectorBuilder};
use snafu::{OptionExt, ResultExt};
use table::metadata::TableType;
+use super::{COLUMNS, TABLES};
use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
@@ -43,19 +47,22 @@ pub(super) struct InformationSchemaTables {
impl InformationSchemaTables {
pub(super) fn new(catalog_name: String, catalog_manager: Weak<dyn CatalogManager>) -> Self {
- let schema = Arc::new(Schema::new(vec![
+ Self {
+ schema: Self::schema(),
+ catalog_name,
+ catalog_manager,
+ }
+ }
+
+ pub(crate) fn schema() -> SchemaRef {
+ Arc::new(Schema::new(vec![
ColumnSchema::new("table_catalog", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_schema", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_name", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_type", ConcreteDataType::string_datatype(), false),
ColumnSchema::new("table_id", ConcreteDataType::uint32_datatype(), true),
ColumnSchema::new("engine", ConcreteDataType::string_datatype(), true),
- ]));
- Self {
- schema,
- catalog_name,
- catalog_manager,
- }
+ ]))
}
fn builder(&self) -> InformationSchemaTablesBuilder {
@@ -137,9 +144,6 @@ impl InformationSchemaTablesBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?;
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
- if schema_name == INFORMATION_SCHEMA_NAME {
- continue;
- }
if !catalog_manager
.schema_exist(&catalog_name, &schema_name)
.await?
@@ -151,21 +155,43 @@ impl InformationSchemaTablesBuilder {
.table_names(&catalog_name, &schema_name)
.await?
{
- let Some(table) = catalog_manager
+ if let Some(table) = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.await?
- else {
- continue;
+ {
+ let table_info = table.table_info();
+ self.add_table(
+ &catalog_name,
+ &schema_name,
+ &table_name,
+ table.table_type(),
+ Some(table_info.ident.table_id),
+ Some(&table_info.meta.engine),
+ );
+ } else {
+ // TODO: this specific branch is only a workaround for FrontendCatalogManager.
+ if schema_name == INFORMATION_SCHEMA_NAME {
+ if table_name == COLUMNS {
+ self.add_table(
+ &catalog_name,
+ &schema_name,
+ &table_name,
+ TableType::Temporary,
+ Some(INFORMATION_SCHEMA_COLUMNS_TABLE_ID),
+ None,
+ );
+ } else if table_name == TABLES {
+ self.add_table(
+ &catalog_name,
+ &schema_name,
+ &table_name,
+ TableType::Temporary,
+ Some(INFORMATION_SCHEMA_TABLES_TABLE_ID),
+ None,
+ );
+ }
+ }
};
- let table_info = table.table_info();
- self.add_table(
- &catalog_name,
- &schema_name,
- &table_name,
- table.table_type(),
- Some(table_info.ident.table_id),
- Some(&table_info.meta.engine),
- );
}
}
diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs
index d8c46ac8f3dc..d8574b062d1f 100644
--- a/src/catalog/src/lib.rs
+++ b/src/catalog/src/lib.rs
@@ -49,7 +49,7 @@ pub trait CatalogManager: Send + Sync {
async fn start(&self) -> Result<()>;
/// Registers a catalog to catalog manager, returns whether the catalog exist before.
- async fn register_catalog(&self, name: String) -> Result<bool>;
+ async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool>;
/// Register a schema with catalog name and schema name. Retuens whether the
/// schema registered.
diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs
index ac9e946f365b..f87601aa326e 100644
--- a/src/catalog/src/local/manager.rs
+++ b/src/catalog/src/local/manager.rs
@@ -43,7 +43,6 @@ use crate::error::{
SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
TableNotFoundSnafu, UnimplementedSnafu,
};
-use crate::information_schema::InformationSchemaProvider;
use crate::local::memory::MemoryCatalogManager;
use crate::system::{
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
@@ -51,9 +50,8 @@ use crate::system::{
};
use crate::tables::SystemCatalog;
use crate::{
- handle_system_table_request, CatalogManager, CatalogManagerRef, DeregisterSchemaRequest,
- DeregisterTableRequest, RegisterSchemaRequest, RegisterSystemTableRequest,
- RegisterTableRequest, RenameTableRequest,
+ handle_system_table_request, CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest,
+ RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
};
/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
@@ -118,11 +116,18 @@ impl LocalCatalogManager {
}
async fn init_system_catalog(&self) -> Result<()> {
+ // register default catalog and default schema
+ self.catalogs
+ .register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
+ self.catalogs.register_schema_sync(RegisterSchemaRequest {
+ catalog: DEFAULT_CATALOG_NAME.to_string(),
+ schema: DEFAULT_SCHEMA_NAME.to_string(),
+ })?;
+
// register SystemCatalogTable
- let _ = self
- .catalogs
+ self.catalogs
.register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?;
- let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
+ self.catalogs.register_schema_sync(RegisterSchemaRequest {
catalog: SYSTEM_CATALOG_NAME.to_string(),
schema: INFORMATION_SCHEMA_NAME.to_string(),
})?;
@@ -133,16 +138,7 @@ impl LocalCatalogManager {
table_id: SYSTEM_CATALOG_TABLE_ID,
table: self.system.information_schema.system.clone(),
};
- let _ = self.catalogs.register_table(register_table_req).await?;
-
- // register default catalog and default schema
- let _ = self
- .catalogs
- .register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
- let _ = self.catalogs.register_schema_sync(RegisterSchemaRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- })?;
+ self.catalogs.register_table(register_table_req).await?;
// Add numbers table for test
let numbers_table = Arc::new(NumbersTable::default());
@@ -154,8 +150,7 @@ impl LocalCatalogManager {
table: numbers_table,
};
- let _ = self
- .catalogs
+ self.catalogs
.register_table(register_number_table_req)
.await?;
@@ -230,9 +225,8 @@ impl LocalCatalogManager {
for entry in entries {
match entry {
Entry::Catalog(c) => {
- let _ = self
- .catalogs
- .register_catalog_if_absent(c.catalog_name.clone());
+ self.catalogs
+ .register_catalog_sync(c.catalog_name.clone())?;
info!("Register catalog: {}", c.catalog_name);
}
Entry::Schema(s) => {
@@ -548,13 +542,6 @@ impl CatalogManager for LocalCatalogManager {
schema_name: &str,
table_name: &str,
) -> Result<Option<TableRef>> {
- if schema_name == INFORMATION_SCHEMA_NAME {
- let manager: CatalogManagerRef = self.catalogs.clone() as _;
- let provider =
- InformationSchemaProvider::new(catalog_name.to_string(), Arc::downgrade(&manager));
- return provider.table(table_name);
- }
-
self.catalogs
.table(catalog_name, schema_name, table_name)
.await
@@ -584,8 +571,8 @@ impl CatalogManager for LocalCatalogManager {
self.catalogs.table_names(catalog_name, schema_name).await
}
- async fn register_catalog(&self, name: String) -> Result<bool> {
- self.catalogs.register_catalog(name).await
+ async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
+ self.catalogs.clone().register_catalog(name).await
}
fn as_any(&self) -> &dyn Any {
diff --git a/src/catalog/src/local/memory.rs b/src/catalog/src/local/memory.rs
index 9fb8c8f0f0e1..9dbe70333d57 100644
--- a/src/catalog/src/local/memory.rs
+++ b/src/catalog/src/local/memory.rs
@@ -16,9 +16,11 @@ use std::any::Any;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
-use std::sync::{Arc, RwLock};
+use std::sync::{Arc, RwLock, Weak};
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
+use common_catalog::consts::{
+ DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
+};
use metrics::{decrement_gauge, increment_gauge};
use snafu::OptionExt;
use table::metadata::TableId;
@@ -28,6 +30,7 @@ use table::TableRef;
use crate::error::{
CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
};
+use crate::information_schema::InformationSchemaProvider;
use crate::{
CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest, RegisterSchemaRequest,
RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
@@ -42,24 +45,6 @@ pub struct MemoryCatalogManager {
pub table_id: AtomicU32,
}
-impl Default for MemoryCatalogManager {
- fn default() -> Self {
- let manager = Self {
- table_id: AtomicU32::new(MIN_USER_TABLE_ID),
- catalogs: Default::default(),
- };
-
- let catalog = HashMap::from([(DEFAULT_SCHEMA_NAME.to_string(), HashMap::new())]);
- let _ = manager
- .catalogs
- .write()
- .unwrap()
- .insert(DEFAULT_CATALOG_NAME.to_string(), catalog);
-
- manager
- }
-}
-
#[async_trait::async_trait]
impl TableIdProvider for MemoryCatalogManager {
async fn next_table_id(&self) -> table::error::Result<TableId> {
@@ -250,7 +235,7 @@ impl CatalogManager for MemoryCatalogManager {
.collect())
}
- async fn register_catalog(&self, name: String) -> Result<bool> {
+ async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
self.register_catalog_sync(name)
}
@@ -260,6 +245,28 @@ impl CatalogManager for MemoryCatalogManager {
}
impl MemoryCatalogManager {
+ /// Create a manager with some default setups
+ /// (e.g. default catalog/schema and information schema)
+ pub fn with_default_setup() -> Arc<Self> {
+ let manager = Arc::new(Self {
+ table_id: AtomicU32::new(MIN_USER_TABLE_ID),
+ catalogs: Default::default(),
+ });
+
+ // Safety: default catalog/schema is registered in order so no CatalogNotFound error will occur
+ manager
+ .register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())
+ .unwrap();
+ manager
+ .register_schema_sync(RegisterSchemaRequest {
+ catalog: DEFAULT_CATALOG_NAME.to_string(),
+ schema: DEFAULT_SCHEMA_NAME.to_string(),
+ })
+ .unwrap();
+
+ manager
+ }
+
/// Registers a catalog and return the catalog already exist
pub fn register_catalog_if_absent(&self, name: String) -> bool {
let mut catalogs = self.catalogs.write().unwrap();
@@ -273,12 +280,13 @@ impl MemoryCatalogManager {
}
}
- pub fn register_catalog_sync(&self, name: String) -> Result<bool> {
+ pub fn register_catalog_sync(self: &Arc<Self>, name: String) -> Result<bool> {
let mut catalogs = self.catalogs.write().unwrap();
- match catalogs.entry(name) {
+ match catalogs.entry(name.clone()) {
Entry::Vacant(e) => {
- e.insert(HashMap::new());
+ let catalog = self.create_catalog_entry(name);
+ e.insert(catalog);
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
Ok(true)
}
@@ -332,9 +340,19 @@ impl MemoryCatalogManager {
Ok(true)
}
+ fn create_catalog_entry(self: &Arc<Self>, catalog: String) -> SchemaEntries {
+ let information_schema = InformationSchemaProvider::build(
+ catalog,
+ Arc::downgrade(self) as Weak<dyn CatalogManager>,
+ );
+ let mut catalog = HashMap::new();
+ catalog.insert(INFORMATION_SCHEMA_NAME.to_string(), information_schema);
+ catalog
+ }
+
#[cfg(any(test, feature = "testing"))]
- pub fn new_with_table(table: TableRef) -> Self {
- let manager = Self::default();
+ pub fn new_with_table(table: TableRef) -> Arc<Self> {
+ let manager = Self::with_default_setup();
let request = RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
@@ -349,7 +367,7 @@ impl MemoryCatalogManager {
/// Create a memory catalog list contains a numbers table for test
pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
- Ok(Arc::new(MemoryCatalogManager::default()))
+ Ok(MemoryCatalogManager::with_default_setup())
}
#[cfg(test)]
@@ -392,7 +410,7 @@ mod tests {
#[tokio::test]
async fn test_mem_manager_rename_table() {
- let catalog = MemoryCatalogManager::default();
+ let catalog = MemoryCatalogManager::with_default_setup();
let table_name = "test_table";
assert!(!catalog
.table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
@@ -456,7 +474,7 @@ mod tests {
#[tokio::test]
async fn test_catalog_rename_table() {
- let catalog = MemoryCatalogManager::default();
+ let catalog = MemoryCatalogManager::with_default_setup();
let table_name = "num";
let table_id = 2333;
let table: TableRef = Arc::new(NumbersTable::new(table_id));
@@ -507,14 +525,14 @@ mod tests {
#[test]
pub fn test_register_if_absent() {
- let list = MemoryCatalogManager::default();
+ let list = MemoryCatalogManager::with_default_setup();
assert!(!list.register_catalog_if_absent("test_catalog".to_string(),));
assert!(list.register_catalog_if_absent("test_catalog".to_string()));
}
#[tokio::test]
pub async fn test_catalog_deregister_table() {
- let catalog = MemoryCatalogManager::default();
+ let catalog = MemoryCatalogManager::with_default_setup();
let table_name = "foo_table";
let register_table_req = RegisterTableRequest {
@@ -549,7 +567,7 @@ mod tests {
#[tokio::test]
async fn test_catalog_deregister_schema() {
- let catalog = MemoryCatalogManager::default();
+ let catalog = MemoryCatalogManager::with_default_setup();
// Registers a catalog, a schema, and a table.
let catalog_name = "foo_catalog".to_string();
@@ -567,6 +585,7 @@ mod tests {
table: Arc::new(NumbersTable::default()),
};
catalog
+ .clone()
.register_catalog(catalog_name.clone())
.await
.unwrap();
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index 973bc0625466..0bfae4ea3104 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -67,7 +67,7 @@ impl RemoteCatalogManager {
backend,
system_table_requests: Default::default(),
region_alive_keepers,
- memory_catalog_manager: Arc::new(MemoryCatalogManager::default()),
+ memory_catalog_manager: MemoryCatalogManager::with_default_setup(),
table_metadata_manager,
}
}
@@ -386,6 +386,7 @@ impl CatalogManager for RemoteCatalogManager {
if remote_catalog_exists
&& self
.memory_catalog_manager
+ .clone()
.register_catalog(catalog.to_string())
.await?
{
@@ -423,7 +424,7 @@ impl CatalogManager for RemoteCatalogManager {
.await
}
- async fn register_catalog(&self, name: String) -> Result<bool> {
+ async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
self.memory_catalog_manager.register_catalog_sync(name)
}
diff --git a/src/catalog/src/table_source.rs b/src/catalog/src/table_source.rs
index 358d015038ab..fbcac34795c6 100644
--- a/src/catalog/src/table_source.rs
+++ b/src/catalog/src/table_source.rs
@@ -130,7 +130,7 @@ mod tests {
let query_ctx = &QueryContext::with("greptime", "public");
let table_provider =
- DfTableSourceProvider::new(Arc::new(MemoryCatalogManager::default()), true, query_ctx);
+ DfTableSourceProvider::new(MemoryCatalogManager::with_default_setup(), true, query_ctx);
let table_ref = TableReference::Bare {
table: Cow::Borrowed("table_name"),
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index dc6005cd4954..11b352d51335 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -26,7 +26,9 @@ mod tests {
use catalog::remote::region_alive_keeper::RegionAliveKeepers;
use catalog::remote::{CachedMetaKvBackend, RemoteCatalogManager};
use catalog::{CatalogManager, RegisterSchemaRequest, RegisterTableRequest};
- use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
+ use common_catalog::consts::{
+ DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
+ };
use common_meta::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use common_meta::ident::TableIdent;
use common_meta::key::TableMetadataManager;
@@ -179,12 +181,17 @@ mod tests {
catalog_manager.catalog_names().await.unwrap()
);
+ let mut schema_names = catalog_manager
+ .schema_names(DEFAULT_CATALOG_NAME)
+ .await
+ .unwrap();
+ schema_names.sort_unstable();
assert_eq!(
- vec![DEFAULT_SCHEMA_NAME.to_string()],
- catalog_manager
- .schema_names(DEFAULT_CATALOG_NAME)
- .await
- .unwrap()
+ vec![
+ INFORMATION_SCHEMA_NAME.to_string(),
+ DEFAULT_SCHEMA_NAME.to_string()
+ ],
+ schema_names
);
}
@@ -240,13 +247,18 @@ mod tests {
async fn test_register_table() {
let node_id = 42;
let components = prepare_components(node_id).await;
+ let mut schema_names = components
+ .catalog_manager
+ .schema_names(DEFAULT_CATALOG_NAME)
+ .await
+ .unwrap();
+ schema_names.sort_unstable();
assert_eq!(
- vec![DEFAULT_SCHEMA_NAME.to_string()],
- components
- .catalog_manager
- .schema_names(DEFAULT_CATALOG_NAME)
- .await
- .unwrap()
+ vec![
+ INFORMATION_SCHEMA_NAME.to_string(),
+ DEFAULT_SCHEMA_NAME.to_string(),
+ ],
+ schema_names
);
// register a new table with an nonexistent catalog
@@ -309,6 +321,7 @@ mod tests {
// register catalog to catalog manager
assert!(components
.catalog_manager
+ .clone()
.register_catalog(catalog_name.clone())
.await
.is_ok());
@@ -374,7 +387,7 @@ mod tests {
.unwrap());
assert_eq!(
- HashSet::from([schema_name.clone()]),
+ HashSet::from([schema_name.clone(), INFORMATION_SCHEMA_NAME.to_string()]),
components
.catalog_manager
.schema_names(&catalog_name)
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index 555359f7b666..de3602775708 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -29,6 +29,10 @@ pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
pub const SCRIPTS_TABLE_ID: u32 = 1;
/// numbers table id
pub const NUMBERS_TABLE_ID: u32 = 2;
+/// id for information_schema.tables
+pub const INFORMATION_SCHEMA_TABLES_TABLE_ID: u32 = 3;
+/// id for information_schema.columns
+pub const INFORMATION_SCHEMA_COLUMNS_TABLE_ID: u32 = 4;
pub const MITO_ENGINE: &str = "mito";
pub const IMMUTABLE_FILE_ENGINE: &str = "file";
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 9c654f9b3f32..ed229060b06b 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -208,7 +208,7 @@ impl Instance {
let (catalog_manager, table_id_provider, region_alive_keepers) = match opts.mode {
Mode::Standalone => {
if opts.enable_memory_catalog {
- let catalog = Arc::new(catalog::local::MemoryCatalogManager::default());
+ let catalog = catalog::local::MemoryCatalogManager::with_default_setup();
let table = NumbersTable::new(MIN_USER_TABLE_ID);
let _ = catalog
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index 1c7228974ec8..9937f3716c7b 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -21,7 +21,7 @@ use catalog::error::{
self as catalog_err, InternalSnafu, InvalidCatalogValueSnafu, InvalidSystemTableDefSnafu,
Result as CatalogResult, TableMetadataManagerSnafu, UnimplementedSnafu,
};
-use catalog::information_schema::InformationSchemaProvider;
+use catalog::information_schema::{InformationSchemaProvider, COLUMNS, TABLES};
use catalog::remote::KvCacheInvalidatorRef;
use catalog::{
CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest, RegisterSchemaRequest,
@@ -43,7 +43,7 @@ use common_telemetry::{debug, warn};
use partition::manager::PartitionRuleManagerRef;
use snafu::prelude::*;
use table::metadata::TableId;
-use table::table::numbers::NumbersTable;
+use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use table::TableRef;
use crate::expr_factory;
@@ -160,7 +160,7 @@ impl CatalogManager for FrontendCatalogManager {
Ok(())
}
- async fn register_catalog(&self, _name: String) -> CatalogResult<bool> {
+ async fn register_catalog(self: Arc<Self>, _name: String) -> CatalogResult<bool> {
unimplemented!("FrontendCatalogManager does not support registering catalog")
}
@@ -318,6 +318,7 @@ impl CatalogManager for FrontendCatalogManager {
.kvs;
let mut res = HashSet::new();
+ res.insert(INFORMATION_SCHEMA_NAME.to_string());
for KeyValue { key: k, value: _ } in kvs {
let key =
SchemaKey::parse(String::from_utf8_lossy(&k)).context(InvalidCatalogValueSnafu)?;
@@ -337,7 +338,11 @@ impl CatalogManager for FrontendCatalogManager {
.map(|(k, _)| k)
.collect::<Vec<String>>();
if catalog == DEFAULT_CATALOG_NAME && schema == DEFAULT_SCHEMA_NAME {
- tables.push("numbers".to_string());
+ tables.push(NUMBERS_TABLE_NAME.to_string());
+ }
+ if schema == INFORMATION_SCHEMA_NAME {
+ tables.push(TABLES.to_string());
+ tables.push(COLUMNS.to_string());
}
Ok(tables)
@@ -356,6 +361,10 @@ impl CatalogManager for FrontendCatalogManager {
}
async fn schema_exist(&self, catalog: &str, schema: &str) -> CatalogResult<bool> {
+ if schema == INFORMATION_SCHEMA_NAME {
+ return Ok(true);
+ }
+
let schema_key = SchemaKey {
catalog_name: catalog.to_string(),
schema_name: schema.to_string(),
@@ -370,6 +379,10 @@ impl CatalogManager for FrontendCatalogManager {
}
async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> CatalogResult<bool> {
+ if schema == INFORMATION_SCHEMA_NAME && (table == TABLES || table == COLUMNS) {
+ return Ok(true);
+ }
+
let key = TableNameKey::new(catalog, schema, table);
self.table_metadata_manager
.table_name_manager()
@@ -387,7 +400,7 @@ impl CatalogManager for FrontendCatalogManager {
) -> CatalogResult<Option<TableRef>> {
if catalog == DEFAULT_CATALOG_NAME
&& schema == DEFAULT_SCHEMA_NAME
- && table_name == "numbers"
+ && table_name == NUMBERS_TABLE_NAME
{
return Ok(Some(Arc::new(NumbersTable::default())));
}
@@ -395,9 +408,12 @@ impl CatalogManager for FrontendCatalogManager {
if schema == INFORMATION_SCHEMA_NAME {
// hack: use existing cyclin reference to get Arc<Self>.
// This can be remove by refactoring the struct into something like Arc<Inner>
+ common_telemetry::info!("going to use dist instance");
let manager = if let Some(instance) = self.dist_instance.as_ref() {
+ common_telemetry::info!("dist instance exist");
instance.catalog_manager() as _
} else {
+ common_telemetry::info!("dist instance doesn't exist");
return Ok(None);
};
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index 5c9819be66b1..198c00a0a6bd 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -79,7 +79,7 @@ const MAX_VALUE: &str = "MAXVALUE";
#[derive(Clone)]
pub struct DistInstance {
meta_client: Arc<MetaClient>,
- catalog_manager: Arc<FrontendCatalogManager>,
+ pub(crate) catalog_manager: Arc<FrontendCatalogManager>,
datanode_clients: Arc<DatanodeClients>,
}
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index 1b9c0ff37abf..bf761dca433f 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -1389,7 +1389,7 @@ mod test {
.build()
.unwrap();
let table = Arc::new(EmptyTable::from_table_info(&table_info));
- let catalog_list = Arc::new(MemoryCatalogManager::default());
+ let catalog_list = MemoryCatalogManager::with_default_setup();
assert!(catalog_list
.register_table(RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
diff --git a/src/query/src/dist_plan/planner.rs b/src/query/src/dist_plan/planner.rs
index 5c6c07373576..f24e2cfe6176 100644
--- a/src/query/src/dist_plan/planner.rs
+++ b/src/query/src/dist_plan/planner.rs
@@ -20,7 +20,7 @@ use async_trait::async_trait;
use catalog::CatalogManagerRef;
use client::client_manager::DatanodeClients;
use common_base::bytes::Bytes;
-use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
+use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_meta::peer::Peer;
use common_meta::table_name::TableName;
use datafusion::common::Result;
@@ -93,13 +93,6 @@ impl ExtensionPlanner for DistExtensionPlanner {
return Ok(Some(input_physical_plan));
};
- if table_name.schema_name == INFORMATION_SCHEMA_NAME {
- return planner
- .create_physical_plan(input_plan, session_state)
- .await
- .map(Some);
- }
-
let input_schema = input_physical_plan.schema().clone();
let input_plan = self.set_table_name(&table_name, input_plan.clone())?;
let substrait_plan: Bytes = DFLogicalSubstraitConvertor
diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs
index 643280cf3cd3..40f3400946a4 100644
--- a/src/query/src/range_select/plan_rewrite.rs
+++ b/src/query/src/range_select/plan_rewrite.rs
@@ -379,7 +379,7 @@ mod test {
.build()
.unwrap();
let table = Arc::new(EmptyTable::from_table_info(&table_info));
- let catalog_list = Arc::new(MemoryCatalogManager::default());
+ let catalog_list = MemoryCatalogManager::with_default_setup();
assert!(catalog_list
.register_table(RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs
index d57f83fd92eb..d73150029688 100644
--- a/src/query/src/sql.rs
+++ b/src/query/src/sql.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-mod show;
+mod show_create_table;
use std::collections::HashMap;
use std::sync::Arc;
@@ -185,7 +185,7 @@ pub async fn show_tables(
pub fn show_create_table(table: TableRef, partitions: Option<Partitions>) -> Result<Output> {
let table_info = table.table_info();
let table_name = &table_info.name;
- let mut stmt = show::create_table_stmt(&table_info)?;
+ let mut stmt = show_create_table::create_table_stmt(&table_info)?;
stmt.partitions = partitions;
let sql = format!("{}", stmt);
let columns = vec![
diff --git a/src/query/src/sql/show.rs b/src/query/src/sql/show_create_table.rs
similarity index 99%
rename from src/query/src/sql/show.rs
rename to src/query/src/sql/show_create_table.rs
index 7b4db61fcbf3..5f48d7e1c21c 100644
--- a/src/query/src/sql/show.rs
+++ b/src/query/src/sql/show_create_table.rs
@@ -11,6 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+
+//! Implementation of `SHOW CREATE TABLE` statement.
+
use std::fmt::Display;
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, SchemaRef, COMMENT_KEY};
diff --git a/src/query/src/tests.rs b/src/query/src/tests.rs
index 79d5fe755d09..cf44a5fe8549 100644
--- a/src/query/src/tests.rs
+++ b/src/query/src/tests.rs
@@ -52,7 +52,7 @@ async fn exec_selection(engine: QueryEngineRef, sql: &str) -> Vec<RecordBatch> {
pub fn new_query_engine_with_table(table: MemTable) -> QueryEngineRef {
let table = Arc::new(table);
- let catalog_manager = Arc::new(MemoryCatalogManager::new_with_table(table));
+ let catalog_manager = MemoryCatalogManager::new_with_table(table);
QueryEngineFactory::new(catalog_manager, false).query_engine()
}
diff --git a/src/script/benches/py_benchmark.rs b/src/script/benches/py_benchmark.rs
index a888db4f6fb2..9b2b065d4a02 100644
--- a/src/script/benches/py_benchmark.rs
+++ b/src/script/benches/py_benchmark.rs
@@ -49,9 +49,7 @@ where
}
pub(crate) fn sample_script_engine() -> PyEngine {
- let catalog_manager = Arc::new(MemoryCatalogManager::new_with_table(Arc::new(
- NumbersTable::default(),
- )));
+ let catalog_manager = MemoryCatalogManager::new_with_table(Arc::new(NumbersTable::default()));
let query_engine = QueryEngineFactory::new(catalog_manager, false).query_engine();
PyEngine::new(query_engine.clone())
diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs
index 27431c0b34d7..b5133cfdb3a5 100644
--- a/src/script/src/python/engine.rs
+++ b/src/script/src/python/engine.rs
@@ -369,9 +369,8 @@ mod tests {
use super::*;
pub(crate) fn sample_script_engine() -> PyEngine {
- let catalog_manager = Arc::new(MemoryCatalogManager::new_with_table(Arc::new(
- NumbersTable::default(),
- )));
+ let catalog_manager =
+ MemoryCatalogManager::new_with_table(Arc::new(NumbersTable::default()));
let query_engine = QueryEngineFactory::new(catalog_manager, false).query_engine();
PyEngine::new(query_engine.clone())
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index ee8f72600a4c..f517798ae4f8 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -202,7 +202,7 @@ impl GrpcQueryHandler for DummyInstance {
fn create_testing_instance(table: MemTable) -> DummyInstance {
let table = Arc::new(table);
- let catalog_manager = Arc::new(MemoryCatalogManager::new_with_table(table));
+ let catalog_manager = MemoryCatalogManager::new_with_table(table);
let query_engine = QueryEngineFactory::new(catalog_manager, false).query_engine();
DummyInstance::new(query_engine)
}
diff --git a/src/table-procedure/src/test_util.rs b/src/table-procedure/src/test_util.rs
index 45ed0a2e1cc6..740bd9d62964 100644
--- a/src/table-procedure/src/test_util.rs
+++ b/src/table-procedure/src/test_util.rs
@@ -83,7 +83,7 @@ impl TestEnv {
let state_store = Arc::new(ObjectStateStore::new(object_store));
let procedure_manager = Arc::new(LocalManager::new(config, state_store));
- let catalog_manager = Arc::new(MemoryCatalogManager::default());
+ let catalog_manager = MemoryCatalogManager::with_default_setup();
TestEnv {
dir,
diff --git a/tests-integration/src/tests.rs b/tests-integration/src/tests.rs
index 6abf5104ddbd..436ac5f19517 100644
--- a/tests-integration/src/tests.rs
+++ b/tests-integration/src/tests.rs
@@ -86,6 +86,7 @@ pub(crate) async fn create_standalone_instance(test_name: &str) -> MockStandalon
assert!(dn_instance
.catalog_manager()
+ .clone()
.register_catalog("another_catalog".to_string())
.await
.is_ok());
diff --git a/tests-integration/src/tests/instance_test.rs b/tests-integration/src/tests/instance_test.rs
index e451cdb1c833..9345814ae4e3 100644
--- a/tests-integration/src/tests/instance_test.rs
+++ b/tests-integration/src/tests/instance_test.rs
@@ -392,11 +392,14 @@ async fn test_execute_show_databases_tables(instance: Arc<dyn MockInstance>) {
Output::RecordBatches(databases) => {
let databases = databases.take();
assert_eq!(1, databases[0].num_columns());
- assert_eq!(databases[0].column(0).len(), 1);
+ assert_eq!(databases[0].column(0).len(), 2);
assert_eq!(
*databases[0].column(0),
- Arc::new(StringVector::from(vec![Some("public")])) as VectorRef
+ Arc::new(StringVector::from(vec![
+ Some("information_schema"),
+ Some("public")
+ ])) as VectorRef
);
}
_ => unreachable!(),
@@ -1390,21 +1393,25 @@ async fn test_information_schema_dot_tables(instance: Arc<dyn MockInstance>) {
let expected = match is_distributed_mode {
true => {
"\
-+---------------+--------------+------------+-----------------+----------+-------------+
-| table_catalog | table_schema | table_name | table_type | table_id | engine |
-+---------------+--------------+------------+-----------------+----------+-------------+
-| greptime | public | numbers | LOCAL TEMPORARY | 2 | test_engine |
-| greptime | public | scripts | BASE TABLE | 1024 | mito |
-+---------------+--------------+------------+-----------------+----------+-------------+"
++---------------+--------------------+------------+-----------------+----------+-------------+
+| table_catalog | table_schema | table_name | table_type | table_id | engine |
++---------------+--------------------+------------+-----------------+----------+-------------+
+| greptime | information_schema | columns | LOCAL TEMPORARY | 4 | |
+| greptime | public | numbers | LOCAL TEMPORARY | 2 | test_engine |
+| greptime | public | scripts | BASE TABLE | 1024 | mito |
+| greptime | information_schema | tables | LOCAL TEMPORARY | 3 | |
++---------------+--------------------+------------+-----------------+----------+-------------+"
}
false => {
"\
-+---------------+--------------+------------+-----------------+----------+-------------+
-| table_catalog | table_schema | table_name | table_type | table_id | engine |
-+---------------+--------------+------------+-----------------+----------+-------------+
-| greptime | public | numbers | LOCAL TEMPORARY | 2 | test_engine |
-| greptime | public | scripts | BASE TABLE | 1 | mito |
-+---------------+--------------+------------+-----------------+----------+-------------+"
++---------------+--------------------+------------+-----------------+----------+-------------+
+| table_catalog | table_schema | table_name | table_type | table_id | engine |
++---------------+--------------------+------------+-----------------+----------+-------------+
+| greptime | information_schema | columns | LOCAL TEMPORARY | 4 | |
+| greptime | public | numbers | LOCAL TEMPORARY | 2 | test_engine |
+| greptime | public | scripts | BASE TABLE | 1 | mito |
+| greptime | information_schema | tables | LOCAL TEMPORARY | 3 | |
++---------------+--------------------+------------+-----------------+----------+-------------+"
}
};
@@ -1414,19 +1421,23 @@ async fn test_information_schema_dot_tables(instance: Arc<dyn MockInstance>) {
let expected = match is_distributed_mode {
true => {
"\
-+-----------------+----------------+---------------+------------+----------+--------+
-| table_catalog | table_schema | table_name | table_type | table_id | engine |
-+-----------------+----------------+---------------+------------+----------+--------+
-| another_catalog | another_schema | another_table | BASE TABLE | 1025 | mito |
-+-----------------+----------------+---------------+------------+----------+--------+"
++-----------------+--------------------+---------------+-----------------+----------+--------+
+| table_catalog | table_schema | table_name | table_type | table_id | engine |
++-----------------+--------------------+---------------+-----------------+----------+--------+
+| another_catalog | another_schema | another_table | BASE TABLE | 1025 | mito |
+| another_catalog | information_schema | columns | LOCAL TEMPORARY | 4 | |
+| another_catalog | information_schema | tables | LOCAL TEMPORARY | 3 | |
++-----------------+--------------------+---------------+-----------------+----------+--------+"
}
false => {
"\
-+-----------------+----------------+---------------+------------+----------+--------+
-| table_catalog | table_schema | table_name | table_type | table_id | engine |
-+-----------------+----------------+---------------+------------+----------+--------+
-| another_catalog | another_schema | another_table | BASE TABLE | 1024 | mito |
-+-----------------+----------------+---------------+------------+----------+--------+"
++-----------------+--------------------+---------------+-----------------+----------+--------+
+| table_catalog | table_schema | table_name | table_type | table_id | engine |
++-----------------+--------------------+---------------+-----------------+----------+--------+
+| another_catalog | another_schema | another_table | BASE TABLE | 1024 | mito |
+| another_catalog | information_schema | columns | LOCAL TEMPORARY | 4 | |
+| another_catalog | information_schema | tables | LOCAL TEMPORARY | 3 | |
++-----------------+--------------------+---------------+-----------------+----------+--------+"
}
};
check_output_stream(output, expected).await;
@@ -1447,28 +1458,52 @@ async fn test_information_schema_dot_columns(instance: Arc<dyn MockInstance>) {
let output = execute_sql(&instance, sql).await;
let expected = "\
-+---------------+--------------+------------+--------------+----------------------+---------------+
-| table_catalog | table_schema | table_name | column_name | data_type | semantic_type |
-+---------------+--------------+------------+--------------+----------------------+---------------+
-| greptime | public | numbers | number | UInt32 | PRIMARY KEY |
-| greptime | public | scripts | schema | String | PRIMARY KEY |
-| greptime | public | scripts | name | String | PRIMARY KEY |
-| greptime | public | scripts | script | String | FIELD |
-| greptime | public | scripts | engine | String | FIELD |
-| greptime | public | scripts | timestamp | TimestampMillisecond | TIME INDEX |
-| greptime | public | scripts | gmt_created | TimestampMillisecond | FIELD |
-| greptime | public | scripts | gmt_modified | TimestampMillisecond | FIELD |
-+---------------+--------------+------------+--------------+----------------------+---------------+";
++---------------+--------------------+------------+---------------+----------------------+---------------+
+| table_catalog | table_schema | table_name | column_name | data_type | semantic_type |
++---------------+--------------------+------------+---------------+----------------------+---------------+
+| greptime | information_schema | columns | table_catalog | String | FIELD |
+| greptime | information_schema | columns | table_schema | String | FIELD |
+| greptime | information_schema | columns | table_name | String | FIELD |
+| greptime | information_schema | columns | column_name | String | FIELD |
+| greptime | information_schema | columns | data_type | String | FIELD |
+| greptime | information_schema | columns | semantic_type | String | FIELD |
+| greptime | public | numbers | number | UInt32 | PRIMARY KEY |
+| greptime | public | scripts | schema | String | PRIMARY KEY |
+| greptime | public | scripts | name | String | PRIMARY KEY |
+| greptime | public | scripts | script | String | FIELD |
+| greptime | public | scripts | engine | String | FIELD |
+| greptime | public | scripts | timestamp | TimestampMillisecond | TIME INDEX |
+| greptime | public | scripts | gmt_created | TimestampMillisecond | FIELD |
+| greptime | public | scripts | gmt_modified | TimestampMillisecond | FIELD |
+| greptime | information_schema | tables | table_catalog | String | FIELD |
+| greptime | information_schema | tables | table_schema | String | FIELD |
+| greptime | information_schema | tables | table_name | String | FIELD |
+| greptime | information_schema | tables | table_type | String | FIELD |
+| greptime | information_schema | tables | table_id | UInt32 | FIELD |
+| greptime | information_schema | tables | engine | String | FIELD |
++---------------+--------------------+------------+---------------+----------------------+---------------+";
check_output_stream(output, expected).await;
let output = execute_sql_with(&instance, sql, query_ctx).await;
let expected = "\
-+-----------------+----------------+---------------+-------------+-----------+---------------+
-| table_catalog | table_schema | table_name | column_name | data_type | semantic_type |
-+-----------------+----------------+---------------+-------------+-----------+---------------+
-| another_catalog | another_schema | another_table | i | Int64 | TIME INDEX |
-+-----------------+----------------+---------------+-------------+-----------+---------------+";
++-----------------+--------------------+---------------+---------------+-----------+---------------+
+| table_catalog | table_schema | table_name | column_name | data_type | semantic_type |
++-----------------+--------------------+---------------+---------------+-----------+---------------+
+| another_catalog | another_schema | another_table | i | Int64 | TIME INDEX |
+| another_catalog | information_schema | columns | table_catalog | String | FIELD |
+| another_catalog | information_schema | columns | table_schema | String | FIELD |
+| another_catalog | information_schema | columns | table_name | String | FIELD |
+| another_catalog | information_schema | columns | column_name | String | FIELD |
+| another_catalog | information_schema | columns | data_type | String | FIELD |
+| another_catalog | information_schema | columns | semantic_type | String | FIELD |
+| another_catalog | information_schema | tables | table_catalog | String | FIELD |
+| another_catalog | information_schema | tables | table_schema | String | FIELD |
+| another_catalog | information_schema | tables | table_name | String | FIELD |
+| another_catalog | information_schema | tables | table_type | String | FIELD |
+| another_catalog | information_schema | tables | table_id | UInt32 | FIELD |
+| another_catalog | information_schema | tables | engine | String | FIELD |
++-----------------+--------------------+---------------+---------------+-----------+---------------+";
check_output_stream(output, expected).await;
}
diff --git a/tests/cases/standalone/common/show/show_databases_tables.result b/tests/cases/standalone/common/show/show_databases_tables.result
new file mode 100644
index 000000000000..ec9f50230642
--- /dev/null
+++ b/tests/cases/standalone/common/show/show_databases_tables.result
@@ -0,0 +1,24 @@
+show databases;
+
++-----------------------+
+| Schemas |
++-----------------------+
+| information_schema |
+| public |
+| test_public_schema |
+| upper_case_table_name |
++-----------------------+
+
+use information_schema;
+
+Affected Rows: 0
+
+show tables;
+
++---------+
+| Tables |
++---------+
+| columns |
+| tables |
++---------+
+
diff --git a/tests/cases/standalone/common/show/show_databases_tables.sql b/tests/cases/standalone/common/show/show_databases_tables.sql
new file mode 100644
index 000000000000..5e84f273903a
--- /dev/null
+++ b/tests/cases/standalone/common/show/show_databases_tables.sql
@@ -0,0 +1,5 @@
+show databases;
+
+use information_schema;
+
+show tables;
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
index d6ad9c60e0ae..1da70f3c6469 100644
--- a/tests/cases/standalone/common/system/information_schema.result
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -1,3 +1,44 @@
+-- scripts table has different table ids in different modes
+select *
+from information_schema.tables
+where table_name != 'scripts'
+order by table_schema, table_name;
+
++---------------+--------------------+------------+-----------------+----------+-------------+
+| table_catalog | table_schema | table_name | table_type | table_id | engine |
++---------------+--------------------+------------+-----------------+----------+-------------+
+| greptime | information_schema | columns | LOCAL TEMPORARY | 4 | |
+| greptime | information_schema | tables | LOCAL TEMPORARY | 3 | |
+| greptime | public | numbers | LOCAL TEMPORARY | 2 | test_engine |
++---------------+--------------------+------------+-----------------+----------+-------------+
+
+select * from information_schema.columns order by table_schema, table_name;
+
++---------------+--------------------+------------+---------------+----------------------+---------------+
+| table_catalog | table_schema | table_name | column_name | data_type | semantic_type |
++---------------+--------------------+------------+---------------+----------------------+---------------+
+| greptime | information_schema | columns | table_catalog | String | FIELD |
+| greptime | information_schema | columns | table_schema | String | FIELD |
+| greptime | information_schema | columns | table_name | String | FIELD |
+| greptime | information_schema | columns | column_name | String | FIELD |
+| greptime | information_schema | columns | data_type | String | FIELD |
+| greptime | information_schema | columns | semantic_type | String | FIELD |
+| greptime | information_schema | tables | table_catalog | String | FIELD |
+| greptime | information_schema | tables | table_schema | String | FIELD |
+| greptime | information_schema | tables | table_name | String | FIELD |
+| greptime | information_schema | tables | table_type | String | FIELD |
+| greptime | information_schema | tables | table_id | UInt32 | FIELD |
+| greptime | information_schema | tables | engine | String | FIELD |
+| greptime | public | numbers | number | UInt32 | PRIMARY KEY |
+| greptime | public | scripts | schema | String | PRIMARY KEY |
+| greptime | public | scripts | name | String | PRIMARY KEY |
+| greptime | public | scripts | script | String | FIELD |
+| greptime | public | scripts | engine | String | FIELD |
+| greptime | public | scripts | timestamp | TimestampMillisecond | TIME INDEX |
+| greptime | public | scripts | gmt_created | TimestampMillisecond | FIELD |
+| greptime | public | scripts | gmt_modified | TimestampMillisecond | FIELD |
++---------------+--------------------+------------+---------------+----------------------+---------------+
+
create
database my_db;
@@ -29,6 +70,7 @@ select table_catalog, table_schema, table_name, table_type, engine
from information_schema.tables
where table_catalog = 'greptime'
and table_schema != 'public'
+ and table_schema != 'information_schema'
order by table_schema, table_name;
+---------------+--------------+------------+------------+--------+
@@ -41,6 +83,7 @@ select table_catalog, table_schema, table_name, column_name, data_type, semantic
from information_schema.columns
where table_catalog = 'greptime'
and table_schema != 'public'
+ and table_schema != 'information_schema'
order by table_schema, table_name;
+---------------+--------------+------------+-------------+-----------+---------------+
@@ -53,3 +96,7 @@ use public;
Affected Rows: 0
+drop schema my_db;
+
+Error: 1001(Unsupported), SQL statement is not supported: drop schema my_db;, keyword: schema
+
diff --git a/tests/cases/standalone/common/system/information_schema.sql b/tests/cases/standalone/common/system/information_schema.sql
index 4987bb053dbb..4313d9179775 100644
--- a/tests/cases/standalone/common/system/information_schema.sql
+++ b/tests/cases/standalone/common/system/information_schema.sql
@@ -1,3 +1,11 @@
+-- scripts table has different table ids in different modes
+select *
+from information_schema.tables
+where table_name != 'scripts'
+order by table_schema, table_name;
+
+select * from information_schema.columns order by table_schema, table_name;
+
create
database my_db;
@@ -17,12 +25,16 @@ select table_catalog, table_schema, table_name, table_type, engine
from information_schema.tables
where table_catalog = 'greptime'
and table_schema != 'public'
+ and table_schema != 'information_schema'
order by table_schema, table_name;
select table_catalog, table_schema, table_name, column_name, data_type, semantic_type
from information_schema.columns
where table_catalog = 'greptime'
and table_schema != 'public'
+ and table_schema != 'information_schema'
order by table_schema, table_name;
use public;
+
+drop schema my_db;
|
fix
|
let information_schema know itself (#2149)
|
2c1b1cecc85802a2a7f2df0cf85f7fff18e1885d
|
2024-01-09 12:12:46
|
Lei, HUANG
|
chore: add bound check for raft-engine logstore (#3073)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 627fb9cccaa8..420c8d77dfa7 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -117,6 +117,8 @@ sst_write_buffer_size = "8MB"
scan_parallelism = 0
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
parallel_scan_channel_size = 32
+# Whether to allow stale WAL entries read during replay.
+allow_stale_entries = false
# Log options, see `standalone.example.toml`
# [logging]
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index c9fd320b077d..5757f263737d 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -214,6 +214,8 @@ sst_write_buffer_size = "8MB"
scan_parallelism = 0
# Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
parallel_scan_channel_size = 32
+# Whether to allow stale WAL entries read during replay.
+allow_stale_entries = false
# Log options
# [logging]
diff --git a/src/log-store/src/error.rs b/src/log-store/src/error.rs
index b3f8b5d08585..be880b188211 100644
--- a/src/log-store/src/error.rs
+++ b/src/log-store/src/error.rs
@@ -20,6 +20,7 @@ use common_macro::stack_trace_debug;
use common_runtime::error::Error as RuntimeError;
use serde_json::error::Error as JsonError;
use snafu::{Location, Snafu};
+use store_api::storage::RegionId;
use crate::kafka::NamespaceImpl as KafkaNamespace;
@@ -183,6 +184,18 @@ pub enum Error {
#[snafu(display("The record sequence is not legal, error: {}", error))]
IllegalSequence { location: Location, error: String },
+
+ #[snafu(display(
+ "Attempt to append discontinuous log entry, region: {}, last index: {}, attempt index: {}",
+ region_id,
+ last_index,
+ attempt_index
+ ))]
+ DiscontinuousLogIndex {
+ region_id: RegionId,
+ last_index: u64,
+ attempt_index: u64,
+ },
}
impl ErrorExt for Error {
diff --git a/src/log-store/src/raft_engine/log_store.rs b/src/log-store/src/raft_engine/log_store.rs
index 15c81d1e49d6..515ce6645f1b 100644
--- a/src/log-store/src/raft_engine/log_store.rs
+++ b/src/log-store/src/raft_engine/log_store.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt::{Debug, Formatter};
use std::sync::atomic::{AtomicI64, Ordering};
@@ -23,15 +24,15 @@ use common_runtime::{RepeatedTask, TaskFunction};
use common_telemetry::{error, info};
use raft_engine::{Config, Engine, LogBatch, MessageExt, ReadableSize, RecoveryMode};
use snafu::{ensure, ResultExt};
-use store_api::logstore::entry::{Entry, Id as EntryId};
+use store_api::logstore::entry::Id as EntryId;
use store_api::logstore::entry_stream::SendableEntryStream;
use store_api::logstore::namespace::{Id as NamespaceId, Namespace as NamespaceTrait};
use store_api::logstore::{AppendBatchResponse, AppendResponse, LogStore};
-use crate::error;
use crate::error::{
- AddEntryLogBatchSnafu, Error, FetchEntrySnafu, IllegalNamespaceSnafu, IllegalStateSnafu,
- OverrideCompactedEntrySnafu, RaftEngineSnafu, Result, StartGcTaskSnafu, StopGcTaskSnafu,
+ AddEntryLogBatchSnafu, DiscontinuousLogIndexSnafu, Error, FetchEntrySnafu,
+ IllegalNamespaceSnafu, IllegalStateSnafu, OverrideCompactedEntrySnafu, RaftEngineSnafu, Result,
+ StartGcTaskSnafu, StopGcTaskSnafu,
};
use crate::raft_engine::backend::SYSTEM_NAMESPACE;
use crate::raft_engine::protos::logstore::{EntryImpl, NamespaceImpl as Namespace};
@@ -121,22 +122,65 @@ impl RaftEngineLogStore {
)
}
- /// Checks if entry does not override the min index of namespace.
- fn check_entry(&self, e: &EntryImpl) -> Result<()> {
- if cfg!(debug_assertions) {
+ /// Converts entries to `LogBatch` and checks if entry ids are valid.
+ /// Returns the `LogBatch` converted along with the last entry id
+ /// to append in each namespace(region).
+ fn entries_to_batch(
+ &self,
+ entries: Vec<EntryImpl>,
+ ) -> Result<(LogBatch, HashMap<NamespaceId, EntryId>)> {
+ // Records the last entry id for each region's entries.
+ let mut entry_ids: HashMap<NamespaceId, EntryId> = HashMap::with_capacity(entries.len());
+ let mut batch = LogBatch::with_capacity(entries.len());
+
+ for e in entries {
let ns_id = e.namespace_id;
- if let Some(first_index) = self.engine.first_index(ns_id) {
- ensure!(
- e.id() >= first_index,
- OverrideCompactedEntrySnafu {
- namespace: ns_id,
- first_index,
- attempt_index: e.id(),
+ match entry_ids.entry(ns_id) {
+ Entry::Occupied(mut o) => {
+ let prev = *o.get();
+ ensure!(
+ e.id == prev + 1,
+ DiscontinuousLogIndexSnafu {
+ region_id: ns_id,
+ last_index: prev,
+ attempt_index: e.id
+ }
+ );
+ o.insert(e.id);
+ }
+ Entry::Vacant(v) => {
+ // this entry is the first in batch of given region.
+ if let Some(first_index) = self.engine.first_index(ns_id) {
+ // ensure the first in batch does not override compacted entry.
+ ensure!(
+ e.id > first_index,
+ OverrideCompactedEntrySnafu {
+ namespace: ns_id,
+ first_index,
+ attempt_index: e.id,
+ }
+ );
}
- );
+ // ensure the first in batch does not form a hole in raft-engine.
+ if let Some(last_index) = self.engine.last_index(ns_id) {
+ ensure!(
+ e.id == last_index + 1,
+ DiscontinuousLogIndexSnafu {
+ region_id: ns_id,
+ last_index,
+ attempt_index: e.id
+ }
+ );
+ }
+ v.insert(e.id);
+ }
}
+ batch
+ .add_entries::<MessageType>(ns_id, &[e])
+ .context(AddEntryLogBatchSnafu)?;
}
- Ok(())
+
+ Ok((batch, entry_ids))
}
}
@@ -171,8 +215,8 @@ impl LogStore for RaftEngineLogStore {
if let Some(first_index) = self.engine.first_index(namespace_id) {
ensure!(
- entry_id >= first_index,
- error::OverrideCompactedEntrySnafu {
+ entry_id > first_index,
+ OverrideCompactedEntrySnafu {
namespace: namespace_id,
first_index,
attempt_index: entry_id,
@@ -180,6 +224,17 @@ impl LogStore for RaftEngineLogStore {
);
}
+ if let Some(last_index) = self.engine.last_index(namespace_id) {
+ ensure!(
+ entry_id == last_index + 1,
+ DiscontinuousLogIndexSnafu {
+ region_id: namespace_id,
+ last_index,
+ attempt_index: entry_id
+ }
+ );
+ }
+
let _ = self
.engine
.write(&mut batch, self.config.sync_write)
@@ -197,23 +252,7 @@ impl LogStore for RaftEngineLogStore {
return Ok(AppendBatchResponse::default());
}
- // Records the last entry id for each region's entries.
- let mut last_entry_ids: HashMap<NamespaceId, EntryId> =
- HashMap::with_capacity(entries.len());
- let mut batch = LogBatch::with_capacity(entries.len());
-
- for e in entries {
- self.check_entry(&e)?;
- // For raft-engine log store, the namespace id is the region id.
- let ns_id = e.namespace_id;
- last_entry_ids
- .entry(ns_id)
- .and_modify(|x| *x = (*x).max(e.id))
- .or_insert(e.id);
- batch
- .add_entries::<MessageType>(ns_id, &[e])
- .context(AddEntryLogBatchSnafu)?;
- }
+ let (mut batch, last_entry_ids) = self.entries_to_batch(entries)?;
let mut sync = self.config.sync_write;
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
index b56c16addf79..0723c702ae70 100644
--- a/src/mito2/src/config.rs
+++ b/src/mito2/src/config.rs
@@ -87,6 +87,8 @@ pub struct MitoConfig {
pub scan_parallelism: usize,
/// Capacity of the channel to send data from parallel scan tasks to the main task (default 32).
pub parallel_scan_channel_size: usize,
+ /// Whether to allow stale entries read during replay.
+ pub allow_stale_entries: bool,
}
impl Default for MitoConfig {
@@ -110,6 +112,7 @@ impl Default for MitoConfig {
sst_write_buffer_size: ReadableSize::mb(8),
scan_parallelism: divide_num_cpus(4),
parallel_scan_channel_size: DEFAULT_SCAN_CHANNEL_SIZE,
+ allow_stale_entries: false,
}
}
}
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index b63068072883..86ac9abfe79b 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -473,6 +473,18 @@ pub enum Error {
#[snafu(display("Invalid config, {reason}"))]
InvalidConfig { reason: String, location: Location },
+
+ #[snafu(display(
+ "Stale log entry found during replay, region: {}, flushed: {}, replayed: {}",
+ region_id,
+ flushed_entry_id,
+ unexpected_entry_id
+ ))]
+ StaleLogEntry {
+ region_id: RegionId,
+ flushed_entry_id: u64,
+ unexpected_entry_id: u64,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -563,6 +575,7 @@ impl ErrorExt for Error {
}
CleanDir { .. } => StatusCode::Unexpected,
InvalidConfig { .. } => StatusCode::InvalidArguments,
+ StaleLogEntry { .. } => StatusCode::Unexpected,
}
}
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index 9fd6e36dc898..80116ea9fdd8 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -32,7 +32,9 @@ use store_api::storage::{ColumnId, RegionId};
use crate::access_layer::AccessLayer;
use crate::cache::CacheManagerRef;
use crate::config::MitoConfig;
-use crate::error::{EmptyRegionDirSnafu, ObjectStoreNotFoundSnafu, RegionCorruptedSnafu, Result};
+use crate::error::{
+ EmptyRegionDirSnafu, ObjectStoreNotFoundSnafu, RegionCorruptedSnafu, Result, StaleLogEntrySnafu,
+};
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
use crate::manifest::storage::manifest_compress_type;
use crate::memtable::MemtableBuilderRef;
@@ -267,6 +269,7 @@ impl RegionOpener {
region_id,
flushed_entry_id,
&version_control,
+ config.allow_stale_entries,
)
.await?;
} else {
@@ -375,6 +378,7 @@ pub(crate) async fn replay_memtable<S: LogStore>(
region_id: RegionId,
flushed_entry_id: EntryId,
version_control: &VersionControlRef,
+ allow_stale_entries: bool,
) -> Result<EntryId> {
let mut rows_replayed = 0;
// Last entry id should start from flushed entry id since there might be no
@@ -383,10 +387,23 @@ pub(crate) async fn replay_memtable<S: LogStore>(
let mut region_write_ctx = RegionWriteCtx::new(region_id, version_control, wal_options.clone());
let replay_from_entry_id = flushed_entry_id + 1;
+ let mut stale_entry_found = false;
let mut wal_stream = wal.scan(region_id, replay_from_entry_id, wal_options)?;
while let Some(res) = wal_stream.next().await {
let (entry_id, entry) = res?;
- debug_assert!(entry_id > flushed_entry_id);
+ if entry_id <= flushed_entry_id {
+ stale_entry_found = true;
+ warn!("Stale WAL entries read during replay, region id: {}, flushed entry id: {}, entry id read: {}", region_id, flushed_entry_id, entry_id);
+ ensure!(
+ allow_stale_entries,
+ StaleLogEntrySnafu {
+ region_id,
+ flushed_entry_id,
+ unexpected_entry_id: entry_id,
+ }
+ );
+ }
+
last_entry_id = last_entry_id.max(entry_id);
for mutation in entry.mutations {
rows_replayed += mutation
@@ -402,6 +419,12 @@ pub(crate) async fn replay_memtable<S: LogStore>(
region_write_ctx.set_next_entry_id(last_entry_id + 1);
region_write_ctx.write_memtable();
+ if allow_stale_entries && stale_entry_found {
+ wal.obsolete(region_id, flushed_entry_id, wal_options)
+ .await?;
+ info!("Force obsolete WAL entries, region id: {}, flushed entry id: {}, last entry id read: {}", region_id, flushed_entry_id, last_entry_id);
+ }
+
info!(
"Replay WAL for region: {}, rows recovered: {}, last entry id: {}",
region_id, rows_replayed, last_entry_id
diff --git a/src/mito2/src/worker/handle_catchup.rs b/src/mito2/src/worker/handle_catchup.rs
index c25f5e074da1..9841c4eb43f6 100644
--- a/src/mito2/src/worker/handle_catchup.rs
+++ b/src/mito2/src/worker/handle_catchup.rs
@@ -78,6 +78,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
region_id,
flushed_entry_id,
®ion.version_control,
+ self.config.allow_stale_entries,
)
.await?;
info!(
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index fda52c3c35ef..351fa7cd0b95 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -737,6 +737,7 @@ experimental_write_cache_path = ""
experimental_write_cache_size = "512MiB"
sst_write_buffer_size = "8MiB"
parallel_scan_channel_size = 32
+allow_stale_entries = false
[[datanode.region_engine]]
|
chore
|
add bound check for raft-engine logstore (#3073)
|
72a1732fb4ba254ba9de30cc10f365cc901df5fb
|
2024-08-13 11:59:28
|
Yingwen
|
docs: Adds more panels to grafana dashboards (#4540)
| false
|
diff --git a/grafana/greptimedb-cluster.json b/grafana/greptimedb-cluster.json
index 1e2473fda302..cf8630e20067 100644
--- a/grafana/greptimedb-cluster.json
+++ b/grafana/greptimedb-cluster.json
@@ -1,4862 +1,5244 @@
{
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": {
- "type": "grafana",
- "uid": "-- Grafana --"
- },
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "target": {
- "limit": 100,
- "matchAny": false,
- "tags": [],
- "type": "dashboard"
- },
- "type": "dashboard"
- }
- ]
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS-1",
+ "label": "prometheus-1",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "10.1.5"
},
- "description": "",
- "editable": true,
- "fiscalYearStartMonth": 0,
- "graphTooltip": 1,
- "id": 1,
- "links": [],
- "liveNow": false,
- "panels": [
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 155,
- "panels": [],
- "title": "Frontend Entry Middleware",
- "type": "row"
- },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
{
+ "builtIn": 1,
"datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "type": "grafana",
+ "uid": "-- Grafana --"
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "",
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 1,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 155,
+ "panels": [],
+ "title": "Frontend Entry Middleware",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 152,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{path}}-{{code}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum by(greptime_pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{path}}-{{code}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "gRPC middleware",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
- }
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 154,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
+ }
+ ]
}
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "id": 152,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{greptime_pod=~\"$greptime_pod\",path!~\"/health|/metrics\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{path}}-{{method}}-{{code}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum by(greptime_pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{greptime_pod=~\"$greptime_pod\",path!~\"/health|/metrics\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{path}}-{{method}}-{{code}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "HTTP middleware",
- "type": "timeseries"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{path}}-{{code}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by(instance, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": true,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{path}}-{{code}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "gRPC middleware",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 9
- },
- "id": 156,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, subprotocol, db) (rate(greptime_servers_mysql_query_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum by(greptime_pod, subprotocol, db) (rate(greptime_servers_mysql_query_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "MySQL per DB",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 1
+ },
+ "id": 154,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 9
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{instance=~\"$instance\",path!~\"/health|/metrics\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{path}}-{{method}}-{{code}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 157,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by(instance, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{instance=~\"$instance\",path!~\"/health|/metrics\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{path}}-{{method}}-{{code}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "HTTP middleware",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, subprotocol, db) (rate(greptime_servers_postgres_query_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum by(greptime_pod, subprotocol, db) (rate(greptime_servers_postgres_query_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
- ],
- "title": "PostgreSQL per DB",
- "type": "timeseries"
+ ]
},
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 17
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "id": 156,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": true
},
- "id": 158,
- "panels": [],
- "title": "Frontend HTTP per DB",
- "type": "row"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, subprotocol, db) (rate(greptime_servers_mysql_query_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{subprotocol}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by(instance, subprotocol, db) (rate(greptime_servers_mysql_query_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{subprotocol}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "MySQL per DB",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 8,
- "x": 0,
- "y": 18
- },
- "id": 159,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_sql_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "code",
- "exemplar": false,
- "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_sql_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "HTTP sql per DB",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
- }
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 8,
- "x": 8,
- "y": 18
- },
- "id": 160,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
+ }
+ ]
}
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 157,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": true
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_promql_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_promql_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "HTTP promql per DB",
- "type": "timeseries"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, subprotocol, db) (rate(greptime_servers_postgres_query_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{subprotocol}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by(instance, subprotocol, db) (rate(greptime_servers_postgres_query_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{subprotocol}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "PostgreSQL per DB",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 17
+ },
+ "id": 158,
+ "panels": [],
+ "title": "Frontend HTTP per DB",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 8,
- "x": 16,
- "y": 18
- },
- "id": 161,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_influxdb_write_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_influxdb_write_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "HTTP influxdb per DB",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 18
+ },
+ "id": 159,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "gridPos": {
- "h": 8,
- "w": 6,
- "x": 0,
- "y": 26
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, db) (rate(greptime_servers_http_sql_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 162,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by(instance, db) (rate(greptime_servers_http_sql_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "HTTP sql per DB",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_prometheus_write_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_prometheus_write_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "HTTP prom store write per DB",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 8,
+ "y": 18
+ },
+ "id": 160,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "gridPos": {
- "h": 8,
- "w": 6,
- "x": 6,
- "y": 26
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, db) (rate(greptime_servers_http_promql_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 183,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, db) (rate(greptime_servers_http_promql_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "HTTP promql per DB",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_prometheus_read_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_prometheus_read_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "HTTP prom store read per DB",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 16,
+ "y": 18
+ },
+ "id": 161,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "gridPos": {
- "h": 8,
- "w": 6,
- "x": 12,
- "y": 26
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, db) (rate(greptime_servers_http_influxdb_write_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 184,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, db) (rate(greptime_servers_http_influxdb_write_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "HTTP influxdb per DB",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_otlp_metrics_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_otlp_metrics_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "HTTP otlp metrics per DB",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 0,
+ "y": 26
+ },
+ "id": 162,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "gridPos": {
- "h": 8,
- "w": 6,
- "x": 18,
- "y": 26
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, db) (rate(greptime_servers_http_prometheus_write_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 185,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, db) (rate(greptime_servers_http_prometheus_write_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "HTTP prom store write per DB",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_otlp_traces_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_otlp_traces_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
- ],
- "title": "HTTP otlp traces per DB",
- "type": "timeseries"
+ ]
},
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 34
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 6,
+ "y": 26
+ },
+ "id": 183,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "id": 163,
- "panels": [],
- "title": "Frontend gRPC per DB",
- "type": "row"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, db) (rate(greptime_servers_http_prometheus_read_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, db) (rate(greptime_servers_http_prometheus_read_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "HTTP prom store read per DB",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 35
- },
- "id": 164,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "label_replace(histogram_quantile(0.99, sum by(greptime_pod, le, db, type, code) (rate(greptime_servers_grpc_db_request_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))), \"db\", \"$1\", \"db\", \"(.*)-public\")",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{db}}-{{type}}-{{code}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, db, type, code) (rate(greptime_servers_grpc_db_request_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{db}}-{{type}}-{{code}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "gRPC per DB",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 12,
+ "y": 26
+ },
+ "id": 184,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 35
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, db) (rate(greptime_servers_http_otlp_metrics_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 165,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": false
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, db) (rate(greptime_servers_http_otlp_metrics_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "HTTP otlp metrics per DB",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_grpc_prom_request_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{db}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, db) (rate(greptime_servers_grpc_prom_request_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{db}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
- ],
- "title": "gRPC prom per DB",
- "type": "timeseries"
+ ]
},
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 43
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 18,
+ "y": 26
+ },
+ "id": 185,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "id": 166,
- "panels": [],
- "title": "Frontend-Datanode Call",
- "type": "row"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, db) (rate(greptime_servers_http_otlp_traces_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, db) (rate(greptime_servers_http_otlp_traces_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "HTTP otlp traces per DB",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 34
+ },
+ "id": 163,
+ "panels": [],
+ "title": "Frontend gRPC per DB",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ ]
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-rps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "rowsps"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
+ "unit": "s"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
}
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- }
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 44
- },
- "id": 186,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true,
- "sortBy": "Name",
- "sortDesc": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "desc"
+ }
+ ]
}
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 35
+ },
+ "id": 164,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "rate(greptime_table_operator_ingest_rows{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-rps",
- "range": true,
- "refId": "A",
- "useBackend": false
- }
- ],
- "title": "ingested rows",
- "type": "timeseries"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "label_replace(histogram_quantile(0.99, sum by(instance, le, db, type, code) (rate(greptime_servers_grpc_db_request_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval]))), \"db\", \"$1\", \"db\", \"(.*)-public\")",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{db}}-{{type}}-{{code}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, db, type, code) (rate(greptime_servers_grpc_db_request_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{db}}-{{type}}-{{code}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "gRPC per DB",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ ]
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
+ "unit": "s"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
}
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 35
+ },
+ "id": 165,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": false
},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 44
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, db) (rate(greptime_servers_grpc_prom_request_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{db}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 167,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, db) (rate(greptime_servers_grpc_prom_request_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{db}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "gRPC prom per DB",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 43
+ },
+ "id": 166,
+ "panels": [],
+ "title": "Frontend-Datanode Call",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, request_type) (rate(greptime_grpc_region_request_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{request_type}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-rps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "rowsps"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, request_type) (rate(greptime_grpc_region_request_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{request_type}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
- ],
- "title": "gRPC region call",
- "type": "timeseries"
+ ]
},
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 52
- },
- "id": 168,
- "panels": [],
- "title": "Datanode Mito",
- "type": "row"
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 44
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "id": 186,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Name",
+ "sortDesc": true
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "rate(greptime_table_operator_ingest_rows{instance=~\"$instance\"}[$__rate_interval])",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-rps",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Ingested rows",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ ]
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "points"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
+ "unit": "s"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
}
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 44
+ },
+ "id": 167,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 53
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, request_type) (rate(greptime_grpc_region_request_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{request_type}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 188,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, request_type) (rate(greptime_grpc_region_request_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{request_type}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "gRPC region call",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 52
+ },
+ "id": 168,
+ "panels": [],
+ "title": "Datanode Mito",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{type}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "points"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, type) (rate(greptime_mito_handle_request_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{type}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
- ],
- "title": "handle request",
- "type": "timeseries"
+ ]
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 53
+ },
+ "id": 188,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{type}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, type) (rate(greptime_mito_handle_request_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{type}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Handle request",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "decbytes"
+ ]
},
- "overrides": []
+ "unit": "decbytes"
},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 53
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 53
+ },
+ "id": 187,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
- "id": 187,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "single",
- "sort": "none"
- }
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "greptime_mito_write_buffer_bytes{instance=~\"$instance\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Write buffer bytes",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "expr": "greptime_mito_write_buffer_bytes{greptime_pod=~\"$greptime_pod\"}",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}",
- "range": true,
- "refId": "A",
- "useBackend": false
- }
- ],
- "title": "Write buffer bytes",
- "type": "timeseries"
+ "overrides": []
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 61
+ },
+ "id": 189,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "ops"
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-bytes"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "points"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "bytes"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "sum by(instance) (greptime_mito_write_stall_total{instance=~\"$instance\"})",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Write buffer bytes",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 61
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "id": 169,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 61
+ },
+ "id": 170,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, reason) (rate(greptime_mito_flush_requests_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{reason}}-success",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{stage}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Write stage",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
},
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ops"
+ },
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, reason) (rate(greptime_mito_flush_errors_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{reason}}-error",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-bytes"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "points"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "bytes"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod) (rate(greptime_mito_flush_bytes_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-bytes",
- "range": true,
- "refId": "C",
- "useBackend": false
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
- ],
- "title": "flush total",
- "type": "timeseries"
+ ]
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 69
+ },
+ "id": 169,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, reason) (rate(greptime_mito_flush_requests_total{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{reason}}-success",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, reason) (rate(greptime_mito_flush_errors_total{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{reason}}-error",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance) (rate(greptime_mito_flush_bytes_total{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-bytes",
+ "range": true,
+ "refId": "C",
+ "useBackend": false
+ }
+ ],
+ "title": "Flush total",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ ]
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
+ "unit": "s"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
}
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 69
+ },
+ "id": 191,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 61
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, stage) (rate(greptime_mito_read_stage_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{stage}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 170,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance) (rate(greptime_mito_read_stage_elapsed_count{instance=~\"$instance\", stage=\"total\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Read stage",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{stage}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, stage) (rate(greptime_mito_write_stage_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{stage}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
- ],
- "title": "write stage",
- "type": "timeseries"
+ ]
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 77
+ },
+ "id": 172,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le) (rate(greptime_mito_compaction_total_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance) (rate(greptime_mito_compaction_total_elapsed_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Compaction total",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- },
- "unit": "s"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 77
+ },
+ "id": 171,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{stage}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Compaction stage",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
},
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 69
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 85
+ },
+ "id": 192,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
- "id": 172,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "greptime_mito_cache_bytes{instance=~\"$instance\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{type}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Cached bytes",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 85
+ },
+ "id": 193,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "sum(increase(greptime_mito_cache_hit[$__rate_interval])) by (instance, type) / (sum(increase(greptime_mito_cache_miss[$__rate_interval])) by (instance, type) + sum(increase(greptime_mito_cache_hit[$__rate_interval])) by (instance, type))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{type}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Cache hit rate",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 93
+ },
+ "id": 173,
+ "panels": [],
+ "title": "OpenDAL",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le) (rate(greptime_mito_compaction_total_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod) (rate(greptime_mito_compaction_total_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
- ],
- "title": "compaction total",
- "type": "timeseries"
+ ]
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 0,
+ "y": 94
+ },
+ "id": 178,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, scheme) (rate(opendal_requests_duration_seconds_bucket{instance=~\"$instance\",operation=\"read\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{scheme}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, scheme) (rate(opendal_requests_duration_seconds_count{instance=~\"$instance\", operation=\"read\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{scheme}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Read requests",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
}
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ ]
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
+ "unit": "s"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
}
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 6,
+ "y": 94
+ },
+ "id": 179,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, scheme) (rate(opendal_requests_duration_seconds_bucket{instance=~\"$instance\", operation=\"write\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{scheme}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, scheme) (rate(opendal_requests_duration_seconds_count{instance=~\"$instance\", operation=\"write\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{scheme}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Write requests",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 69
- },
- "id": 171,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{stage}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{stage}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
+ }
+ }
+ ]
}
- ],
- "title": "compaction stage",
- "type": "timeseries"
+ ]
},
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 77
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 12,
+ "y": 94
+ },
+ "id": 180,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "id": 173,
- "panels": [],
- "title": "OpenDAL",
- "type": "row"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, scheme) (rate(opendal_requests_duration_seconds_bucket{instance=~\"$instance\", operation=\"list\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{scheme}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, scheme) (rate(opendal_requests_duration_seconds_count{instance=~\"$instance\", operation=\"list\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{scheme}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "List requests",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 6,
- "x": 0,
- "y": 78
- },
- "id": 178,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\",operation=\"read\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{scheme}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"read\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{scheme}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "requests_duration_seconds_READ",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
- }
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 6,
- "x": 6,
- "y": 78
- },
- "id": 179,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
+ }
+ ]
}
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 18,
+ "y": 94
+ },
+ "id": 182,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation=\"write\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{scheme}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"write\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{scheme}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "requests_duration_seconds_WRITE",
- "type": "timeseries"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, scheme) (rate(opendal_requests_duration_seconds_bucket{instance=~\"$instance\", operation=\"stat\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{scheme}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, scheme) (rate(opendal_requests_duration_seconds_count{instance=~\"$instance\", operation=\"stat\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{scheme}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Stat requests",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 6,
- "x": 12,
- "y": 78
- },
- "id": 180,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation=\"list\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{scheme}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"list\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{scheme}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "requests_duration_seconds_LIST",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- }
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 6,
- "x": 18,
- "y": 78
- },
- "id": 182,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
+ }
+ ]
}
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 0,
+ "y": 102
+ },
+ "id": 181,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation=\"stat\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{scheme}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"stat\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{scheme}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "requests_duration_seconds_STAT",
- "type": "timeseries"
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
},
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, scheme, operation) (rate(opendal_requests_duration_seconds_bucket{instance=~\"$instance\", operation!~\"read|write|list\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{scheme}}-{{operation}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
},
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, scheme, operation) (rate(opendal_requests_duration_seconds_count{instance=~\"$instance\", operation!~\"read|write|list\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{scheme}}-{{operation}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Requests duration",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 8,
- "x": 0,
- "y": 86
- },
- "id": 181,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme, operation) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation!~\"read|write|list\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-bytes"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "bytes"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, scheme, operation) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation!~\"read|write|list\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "requests_duration_seconds",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-bytes"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "bytes"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- }
- ]
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 8,
+ "y": 102
+ },
+ "id": 177,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
- "gridPos": {
- "h": 8,
- "w": 8,
- "x": 8,
- "y": 86
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.99, sum by(instance, le, scheme, operation) (rate(opendal_bytes_total_bucket{instance=~\"$instance\"}[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{scheme}}-{{operation}}-p99",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
},
- "id": 177,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, scheme, operation) (rate(opendal_bytes_total_count{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "interval": "$__rate_interval",
+ "legendFormat": "{{instance}}-{{scheme}}-{{operation}}-bytes",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "Bytes total",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "points",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
- "targets": [
+ "overrides": [
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme, operation) (rate(opendal_bytes_total_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))",
- "fullMetaSearch": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-p99",
- "range": true,
- "refId": "A",
- "useBackend": false
+ "matcher": {
+ "id": "byRegexp",
+ "options": ".*?-qps"
+ },
+ "properties": [
+ {
+ "id": "custom.drawStyle",
+ "value": "line"
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ },
+ {
+ "id": "unit",
+ "value": "ops"
+ },
+ {
+ "id": "custom.stacking",
+ "value": {
+ "group": "A",
+ "mode": "none"
+ }
+ }
+ ]
},
{
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, scheme, operation) (rate(opendal_bytes_total_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "interval": "$__rate_interval",
- "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-bytes",
- "range": true,
- "refId": "B",
- "useBackend": false
- }
- ],
- "title": "bytes_total",
- "type": "timeseries"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisBorderShow": false,
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "points",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "insertNulls": false,
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
+ "matcher": {
+ "id": "byValue",
+ "options": {
+ "op": "gte",
+ "reducer": "allIsZero",
+ "value": 0
}
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": true,
+ "tooltip": true,
+ "viz": true
}
- ]
- },
- "unit": "s"
- },
- "overrides": [
- {
- "matcher": {
- "id": "byRegexp",
- "options": ".*?-qps"
- },
- "properties": [
- {
- "id": "custom.drawStyle",
- "value": "line"
- },
- {
- "id": "custom.axisPlacement",
- "value": "right"
- },
- {
- "id": "unit",
- "value": "ops"
- },
- {
- "id": "custom.stacking",
- "value": {
- "group": "A",
- "mode": "none"
- }
- }
- ]
- },
- {
- "matcher": {
- "id": "byValue",
- "options": {
- "op": "gte",
- "reducer": "allIsZero",
- "value": 0
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": true,
- "tooltip": true,
- "viz": true
- }
- }
- ]
- }
- ]
- },
- "gridPos": {
- "h": 8,
- "w": 8,
- "x": 16,
- "y": 86
- },
- "id": 176,
- "options": {
- "legend": {
- "calcs": [
- "last"
- ],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "tooltip": {
- "mode": "multi",
- "sort": "none"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${datasource}"
- },
- "disableTextWrap": false,
- "editorMode": "builder",
- "exemplar": false,
- "expr": "sum by(greptime_pod, scheme, operation) (rate(opendal_requests_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))",
- "fullMetaSearch": false,
- "hide": false,
- "includeNullMetadata": true,
- "instant": false,
- "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-qps",
- "range": true,
- "refId": "B",
- "useBackend": false
+ }
+ ]
}
- ],
- "title": "requests_total",
- "type": "timeseries"
- }
- ],
- "refresh": "5s",
- "schemaVersion": 39,
- "tags": [],
- "templating": {
- "list": [
- {
- "current": {},
- "hide": 0,
- "includeAll": false,
- "multi": false,
- "name": "datasource",
- "options": [],
- "query": "prometheus",
- "queryValue": "",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "type": "datasource"
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 8,
+ "x": 16,
+ "y": 102
+ },
+ "id": 176,
+ "options": {
+ "legend": {
+ "calcs": [
+ "last"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
- "current": {},
"datasource": {
"type": "prometheus",
- "uid": "${datasource}"
- },
- "definition": "label_values(greptime_pod)",
- "hide": 0,
- "includeAll": true,
- "multi": false,
- "name": "greptime_pod",
- "options": [],
- "query": {
- "qryType": 1,
- "query": "label_values(greptime_pod)",
- "refId": "PrometheusVariableQueryEditor-VariableQuery"
- },
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "type": "query"
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "sum by(instance, scheme, operation) (rate(opendal_requests_total{instance=~\"$instance\"}[$__rate_interval]))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": true,
+ "instant": false,
+ "legendFormat": "{{instance}}-{{scheme}}-{{operation}}-qps",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
}
- ]
- },
- "time": {
- "from": "now-1h",
- "to": "now"
- },
- "timepicker": {},
- "timezone": "",
- "title": "GreptimeDB-Cluster",
- "uid": "ea35efe5-918e-44fa-9743-e9aa1a340a3f",
- "version": 9,
- "weekStart": ""
- }
\ No newline at end of file
+ ],
+ "title": "Requests total",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "prometheus-1",
+ "value": "prometheus-1"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "definition": "label_values(instance)",
+ "hide": 0,
+ "includeAll": true,
+ "multi": false,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "label_values(instance)",
+ "refId": "PrometheusVariableQueryEditor-VariableQuery"
+ },
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-30m",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "GreptimeDB-Cluster",
+ "uid": "ea35efe5-918e-44fa-9743-e9aa1a340a3f",
+ "version": 11,
+ "weekStart": ""
+}
diff --git a/grafana/greptimedb.json b/grafana/greptimedb.json
index 142832ca0cac..1096ca92edf7 100644
--- a/grafana/greptimedb.json
+++ b/grafana/greptimedb.json
@@ -145,9 +145,7 @@
"countRows": false,
"enablePagination": false,
"fields": [],
- "reducer": [
- "sum"
- ],
+ "reducer": ["sum"],
"show": false
},
"showHeader": true,
@@ -225,9 +223,7 @@
"justifyMode": "center",
"orientation": "auto",
"reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
+ "calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -302,9 +298,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
+ "calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -432,7 +426,7 @@
}
]
},
- "unit": "percent"
+ "unit": "percentunit"
},
"overrides": []
},
@@ -449,9 +443,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
+ "calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -467,9 +459,9 @@
"uid": "${DS_PROMETHEUS-1}"
},
"editorMode": "code",
- "expr": "irate(process_cpu_seconds_total[2s]) * 100",
+ "expr": "irate(process_cpu_seconds_total[1m])",
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "{{instance}}",
"range": true,
"refId": "A"
}
@@ -516,9 +508,7 @@
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
+ "calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -536,7 +526,7 @@
"editorMode": "code",
"expr": "process_resident_memory_bytes",
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "{{instance}}",
"range": true,
"refId": "A"
}
@@ -1064,7 +1054,7 @@
"x": 0,
"y": 18
},
- "id": 13,
+ "id": 38,
"interval": "1s",
"options": {
"legend": {
@@ -1086,15 +1076,99 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.95, sum by(le, db) (rate(greptime_servers_grpc_requests_elapsed_bucket[$__rate_interval])))",
+ "expr": "sum by(path) (rate(greptime_servers_http_requests_total[$__rate_interval]))",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
- "legendFormat": "{{db}}-p95",
+ "legendFormat": "__auto",
"range": true,
"refId": "A",
"useBackend": false
+ }
+ ],
+ "title": "HTTP request rate",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 36,
+ "interval": "1s",
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
},
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "prometheus",
@@ -1102,18 +1176,17 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(le, db) (rate(greptime_servers_grpc_requests_elapsed_bucket[$__rate_interval])))",
+ "expr": "sum by(db) (rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))",
"fullMetaSearch": false,
- "hide": false,
"includeNullMetadata": false,
"instant": false,
- "legendFormat": "{{db}}-p99",
+ "legendFormat": "{{db}}",
"range": true,
- "refId": "B",
+ "refId": "A",
"useBackend": false
}
],
- "title": "gRPC insert elapsed",
+ "title": "Logs ingest rate (number of lines)",
"type": "timeseries"
},
{
@@ -1178,10 +1251,10 @@
"gridPos": {
"h": 7,
"w": 12,
- "x": 12,
- "y": 18
+ "x": 0,
+ "y": 25
},
- "id": 36,
+ "id": 13,
"interval": "1s",
"options": {
"legend": {
@@ -1203,17 +1276,34 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "sum by(db) (rate(greptime_servers_http_logs_ingestion_counter[$__rate_interval]))",
+ "expr": "histogram_quantile(0.95, sum by(le, db) (rate(greptime_servers_grpc_requests_elapsed_bucket[$__rate_interval])))",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
- "legendFormat": "{{db}}",
+ "legendFormat": "{{db}}-p95",
"range": true,
"refId": "A",
"useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(le, db) (rate(greptime_servers_grpc_requests_elapsed_bucket[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": false,
+ "instant": false,
+ "legendFormat": "{{db}}-p99",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
}
],
- "title": "Logs ingest rate (number of lines)",
+ "title": "gRPC insert elapsed",
"type": "timeseries"
},
{
@@ -1222,7 +1312,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 25
+ "y": 32
},
"id": 25,
"panels": [],
@@ -1276,8 +1366,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1292,7 +1381,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 26
+ "y": 33
},
"id": 1,
"interval": "1s",
@@ -1343,7 +1432,7 @@
"useBackend": false
}
],
- "title": "handle request elapsed",
+ "title": "Handle request elapsed",
"type": "timeseries"
},
{
@@ -1393,8 +1482,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1409,7 +1497,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 26
+ "y": 33
},
"id": 7,
"interval": "1s",
@@ -1433,7 +1521,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "rate(greptime_mito_write_rows_total[2s])",
+ "expr": "rate(greptime_mito_write_rows_total[$__rate_interval])",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
@@ -1443,7 +1531,7 @@
"useBackend": false
}
],
- "title": "write rows total",
+ "title": "Write rows total",
"type": "timeseries"
},
{
@@ -1493,8 +1581,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1509,7 +1596,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 33
+ "y": 40
},
"id": 3,
"interval": "1s",
@@ -1593,8 +1680,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1609,7 +1695,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 33
+ "y": 40
},
"id": 11,
"interval": "1s",
@@ -1660,7 +1746,7 @@
"useBackend": false
}
],
- "title": "write stage duration",
+ "title": "Write stage duration",
"type": "timeseries"
},
{
@@ -1710,8 +1796,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1726,7 +1811,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 40
+ "y": 47
},
"id": 15,
"interval": "1s",
@@ -1773,7 +1858,7 @@
"refId": "B"
}
],
- "title": "flush / compaction duration",
+ "title": "Flush / compaction duration",
"type": "timeseries"
},
{
@@ -1823,8 +1908,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1839,9 +1923,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 40
+ "y": 47
},
- "id": 9,
+ "id": 39,
"interval": "1s",
"options": {
"legend": {
@@ -1863,11 +1947,11 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "idelta(greptime_mito_flush_requests_total[2s])",
+ "expr": "idelta(greptime_mito_compaction_stage_elapsed_count{stage=\"merge\"}[5m])",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
- "legendFormat": "flush-{{reason}}",
+ "legendFormat": "compaction-{{stage}}",
"range": true,
"refId": "A",
"useBackend": false
@@ -1878,30 +1962,17 @@
"uid": "${DS_PROMETHEUS-1}"
},
"editorMode": "code",
- "expr": "idelta(greptime_mito_compaction_requests_total[2s])",
+ "expr": "histogram_quantile(0.95, sum by(le, type) (rate(greptime_mito_flush_elapsed_bucket[$__rate_interval])))",
"hide": false,
"instant": false,
- "legendFormat": "compaction",
+ "legendFormat": "flush-{{type}}",
"range": true,
"refId": "B"
}
],
- "title": "flush / compaction count",
+ "title": "Flush / compaction count",
"type": "timeseries"
},
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 47
- },
- "id": 26,
- "panels": [],
- "title": "Metric Engine",
- "type": "row"
- },
{
"datasource": {
"type": "prometheus",
@@ -1949,15 +2020,15 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
"value": 80
}
]
- }
+ },
+ "unit": "bytes"
},
"overrides": []
},
@@ -1965,9 +2036,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 48
+ "y": 54
},
- "id": 22,
+ "id": 9,
"interval": "1s",
"options": {
"legend": {
@@ -1989,11 +2060,11 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.95, sum by(le, operation) (rate(greptime_metric_engine_mito_op_elapsed_bucket[$__rate_interval])))",
+ "expr": "greptime_mito_write_buffer_bytes",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
- "legendFormat": "p95-{{operation}}",
+ "legendFormat": "{{instance}}",
"range": true,
"refId": "A",
"useBackend": false
@@ -2004,15 +2075,15 @@
"uid": "${DS_PROMETHEUS-1}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(le, operation) (rate(greptime_metric_engine_mito_op_elapsed_bucket[$__rate_interval])))",
+ "expr": "greptime_mito_memtable_dict_bytes",
"hide": false,
"instant": false,
- "legendFormat": "p99-{{operation}}",
+ "legendFormat": "{{instance}}",
"range": true,
"refId": "B"
}
],
- "title": "metric engine to mito R/W duration",
+ "title": "Write buffer size",
"type": "timeseries"
},
{
@@ -2062,8 +2133,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2078,9 +2148,9 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 48
+ "y": 54
},
- "id": 33,
+ "id": 40,
"interval": "1s",
"options": {
"legend": {
@@ -2102,45 +2172,19 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.95, sum by(le, operation) (rate(greptime_metric_engine_mito_ddl_bucket[$__rate_interval])))",
+ "expr": "greptime_mito_write_stall_total",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
- "legendFormat": "p95-{{operation}}",
+ "legendFormat": "{{instance}}-worker-{{worker}}",
"range": true,
"refId": "A",
"useBackend": false
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS-1}"
- },
- "editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(le, operation) (rate(greptime_metric_engine_mito_ddl_bucket[$__rate_interval])))",
- "hide": false,
- "instant": false,
- "legendFormat": "p99-{{label_name}}",
- "range": true,
- "refId": "B"
}
],
- "title": "metric engine to mito DDL duration",
+ "title": "Write stall count",
"type": "timeseries"
},
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 55
- },
- "id": 21,
- "panels": [],
- "title": "Storage Components",
- "type": "row"
- },
{
"datasource": {
"type": "prometheus",
@@ -2188,8 +2232,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2205,9 +2248,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 56
+ "y": 61
},
- "id": 18,
+ "id": 41,
"interval": "1s",
"options": {
"legend": {
@@ -2229,18 +2272,17 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "rate(opendal_bytes_total_sum[2s])",
+ "expr": "greptime_mito_cache_bytes",
"fullMetaSearch": false,
- "hide": false,
"includeNullMetadata": false,
"instant": false,
- "legendFormat": "{{scheme}}-{{operation}}",
+ "legendFormat": "{{instance}}-{{type}}",
"range": true,
- "refId": "B",
+ "refId": "A",
"useBackend": false
}
],
- "title": "OpenDAL traffic",
+ "title": "Cache size",
"type": "timeseries"
},
{
@@ -2290,15 +2332,15 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
"value": 80
}
]
- }
+ },
+ "unit": "percentunit"
},
"overrides": []
},
@@ -2306,9 +2348,10 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 56
+ "y": 61
},
- "id": 2,
+ "id": 42,
+ "interval": "1s",
"options": {
"legend": {
"calcs": [],
@@ -2328,20 +2371,33 @@
"uid": "${DS_PROMETHEUS-1}"
},
"disableTextWrap": false,
- "editorMode": "builder",
- "expr": "histogram_quantile(0.95, sum by(le, operation, schema) (rate(opendal_requests_duration_seconds_bucket[$__rate_interval])))",
+ "editorMode": "code",
+ "expr": "sum(increase(greptime_mito_cache_hit[$__rate_interval])) by (instance, type) / (sum(increase(greptime_mito_cache_miss[$__rate_interval])) by (instance, type) + sum(increase(greptime_mito_cache_hit[$__rate_interval])) by (instance, type))",
"fullMetaSearch": false,
"includeNullMetadata": false,
"instant": false,
- "legendFormat": "__auto",
+ "legendFormat": "{{instance}}-{{type}}",
"range": true,
"refId": "A",
"useBackend": false
}
],
- "title": "OpenDAL operation duration",
+ "title": "Cache hit",
"type": "timeseries"
},
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 68
+ },
+ "id": 26,
+ "panels": [],
+ "title": "Metric Engine",
+ "type": "row"
+ },
{
"datasource": {
"type": "prometheus",
@@ -2389,8 +2445,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2405,9 +2460,9 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 63
+ "y": 69
},
- "id": 10,
+ "id": 22,
"interval": "1s",
"options": {
"legend": {
@@ -2427,13 +2482,16 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS-1}"
},
+ "disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(le,logstore,optype) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
- "hide": false,
+ "expr": "histogram_quantile(0.95, sum by(le, operation) (rate(greptime_metric_engine_mito_op_elapsed_bucket[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": false,
"instant": false,
- "legendFormat": "{{logstore}}-{{optype}}-p95",
+ "legendFormat": "p95-{{operation}}",
"range": true,
- "refId": "Log Store P95"
+ "refId": "A",
+ "useBackend": false
},
{
"datasource": {
@@ -2441,7 +2499,440 @@
"uid": "${DS_PROMETHEUS-1}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum by(le,logstore,optype) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
+ "expr": "histogram_quantile(0.99, sum by(le, operation) (rate(greptime_metric_engine_mito_op_elapsed_bucket[$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p99-{{operation}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Metric engine to mito R/W duration",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 69
+ },
+ "id": 33,
+ "interval": "1s",
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.95, sum by(le, operation) (rate(greptime_metric_engine_mito_ddl_bucket[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": false,
+ "instant": false,
+ "legendFormat": "p95-{{operation}}",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(le, operation) (rate(greptime_metric_engine_mito_ddl_bucket[$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "p99-{{label_name}}",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Metric engine to mito DDL duration",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 76
+ },
+ "id": 21,
+ "panels": [],
+ "title": "Storage Components",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 77
+ },
+ "id": 18,
+ "interval": "1s",
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "rate(opendal_bytes_total_sum[$__rate_interval])",
+ "fullMetaSearch": false,
+ "hide": false,
+ "includeNullMetadata": false,
+ "instant": false,
+ "legendFormat": "{{scheme}}-{{operation}}",
+ "range": true,
+ "refId": "B",
+ "useBackend": false
+ }
+ ],
+ "title": "OpenDAL traffic",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 77
+ },
+ "id": 2,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "histogram_quantile(0.95, sum by(le, operation, schema) (rate(opendal_requests_duration_seconds_bucket[$__rate_interval])))",
+ "fullMetaSearch": false,
+ "includeNullMetadata": false,
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "OpenDAL operation duration",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 84
+ },
+ "id": 10,
+ "interval": "1s",
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(le,logstore,optype) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{logstore}}-{{optype}}-p95",
+ "range": true,
+ "refId": "Log Store P95"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(le,logstore,optype) (rate(greptime_logstore_op_elapsed_bucket[$__rate_interval])))",
"hide": false,
"instant": false,
"legendFormat": "{{logstore}}-{{optype}}-p99",
@@ -2499,8 +2990,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2516,7 +3006,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 63
+ "y": 84
},
"id": 19,
"interval": "1s",
@@ -2651,8 +3141,103 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 91
+ },
+ "id": 37,
+ "interval": "1s",
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum by(le, type, node) (rate(raft_engine_sync_log_duration_seconds_bucket[$__rate_interval])))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{node}}-{{type}}-p99",
+ "range": true,
+ "refId": "Log Store P95"
+ }
+ ],
+ "title": "WAL sync duration seconds",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS-1}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
},
{
"color": "red",
@@ -2668,7 +3253,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 70
+ "y": 91
},
"id": 12,
"interval": "1s",
@@ -2732,7 +3317,7 @@
"refId": "B"
}
],
- "title": "wal write size",
+ "title": "WAL write size",
"type": "timeseries"
}
],
@@ -2750,6 +3335,6 @@
"timezone": "",
"title": "GreptimeDB",
"uid": "e7097237-669b-4f8d-b751-13067afbfb68",
- "version": 14,
+ "version": 15,
"weekStart": ""
-}
\ No newline at end of file
+}
|
docs
|
Adds more panels to grafana dashboards (#4540)
|
86378ad93a9d1eb345214d0218a33a993bb6673c
|
2023-07-21 12:25:23
|
Sunray Ley
|
docs: fix incorrect document URL (#2012)
| false
|
diff --git a/README.md b/README.md
index 5e93a8038e48..301edeefe7f0 100644
--- a/README.md
+++ b/README.md
@@ -102,7 +102,7 @@ Please see [the online document site](https://docs.greptime.com/getting-started/
Read the [complete getting started guide](https://docs.greptime.com/getting-started/overview#connect) on our [official document site](https://docs.greptime.com/).
-To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/client/overview).
+To write and query data, GreptimeDB is compatible with multiple [protocols and clients](https://docs.greptime.com/user-guide/clients/overview).
## Resources
|
docs
|
fix incorrect document URL (#2012)
|
858dae7b23e08a67cd7a260ecb9c203e536a1c44
|
2025-02-14 13:19:26
|
Zhenchi
|
feat: add stager nofitier to collect metrics (#5530)
| false
|
diff --git a/src/index/src/fulltext_index/tests.rs b/src/index/src/fulltext_index/tests.rs
index 90449f9dde32..f0c064957518 100644
--- a/src/index/src/fulltext_index/tests.rs
+++ b/src/index/src/fulltext_index/tests.rs
@@ -30,7 +30,7 @@ async fn new_bounded_stager(prefix: &str) -> (TempDir, Arc<BoundedStager>) {
let path = staging_dir.path().to_path_buf();
(
staging_dir,
- Arc::new(BoundedStager::new(path, 102400).await.unwrap()),
+ Arc::new(BoundedStager::new(path, 102400, None).await.unwrap()),
)
}
diff --git a/src/mito2/src/sst/index/puffin_manager.rs b/src/mito2/src/sst/index/puffin_manager.rs
index 498ed254f2f0..4b8b6a3dcb0f 100644
--- a/src/mito2/src/sst/index/puffin_manager.rs
+++ b/src/mito2/src/sst/index/puffin_manager.rs
@@ -63,7 +63,7 @@ impl PuffinManagerFactory {
write_buffer_size: Option<usize>,
) -> Result<Self> {
let staging_dir = aux_path.as_ref().join(STAGING_DIR);
- let stager = BoundedStager::new(staging_dir, staging_capacity)
+ let stager = BoundedStager::new(staging_dir, staging_capacity, None)
.await
.context(PuffinInitStagerSnafu)?;
Ok(Self {
diff --git a/src/puffin/src/puffin_manager/stager.rs b/src/puffin/src/puffin_manager/stager.rs
index 6e1581cddbb5..5dc2cb31fc63 100644
--- a/src/puffin/src/puffin_manager/stager.rs
+++ b/src/puffin/src/puffin_manager/stager.rs
@@ -15,6 +15,7 @@
mod bounded_stager;
use std::path::PathBuf;
+use std::time::Duration;
use async_trait::async_trait;
pub use bounded_stager::{BoundedStager, FsBlobGuard, FsDirGuard};
@@ -88,3 +89,40 @@ pub trait Stager: Send + Sync {
dir_size: u64,
) -> Result<()>;
}
+
+/// `StagerNotifier` provides a way to notify the caller of the staging events.
+pub trait StagerNotifier: Send + Sync {
+ /// Notifies the caller that a cache hit occurred.
+ /// `size` is the size of the content that was hit in the cache.
+ fn on_cache_hit(&self, size: u64);
+
+ /// Notifies the caller that a cache miss occurred.
+ /// `size` is the size of the content that was missed in the cache.
+ fn on_cache_miss(&self, size: u64);
+
+ /// Notifies the caller that a blob or directory was inserted into the cache.
+ /// `size` is the size of the content that was inserted into the cache.
+ ///
+ /// Note: not only cache misses will trigger this event, but recoveries and recycles as well.
+ fn on_cache_insert(&self, size: u64);
+
+ /// Notifies the caller that a directory was inserted into the cache.
+ /// `duration` is the time it took to load the directory.
+ fn on_load_dir(&self, duration: Duration);
+
+ /// Notifies the caller that a blob was inserted into the cache.
+ /// `duration` is the time it took to load the blob.
+ fn on_load_blob(&self, duration: Duration);
+
+ /// Notifies the caller that a blob or directory was evicted from the cache.
+ /// `size` is the size of the content that was evicted from the cache.
+ fn on_cache_evict(&self, size: u64);
+
+ /// Notifies the caller that a blob or directory was dropped to the recycle bin.
+ /// `size` is the size of the content that was dropped to the recycle bin.
+ fn on_recycle_insert(&self, size: u64);
+
+ /// Notifies the caller that the recycle bin was cleared.
+ /// `size` is the size of the content that was cleared from the recycle bin.
+ fn on_recycle_clear(&self, size: u64);
+}
diff --git a/src/puffin/src/puffin_manager/stager/bounded_stager.rs b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
index fd5fc74876e7..46ea2548ad92 100644
--- a/src/puffin/src/puffin_manager/stager/bounded_stager.rs
+++ b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
@@ -15,7 +15,7 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
-use std::time::Duration;
+use std::time::{Duration, Instant};
use async_trait::async_trait;
use async_walkdir::{Filtering, WalkDir};
@@ -34,6 +34,7 @@ use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio_util::compat::TokioAsyncWriteCompatExt;
+use super::StagerNotifier;
use crate::error::{
CacheGetSnafu, CreateSnafu, MetadataSnafu, OpenSnafu, ReadSnafu, RemoveSnafu, RenameSnafu,
Result, WalkDirSnafu,
@@ -67,10 +68,17 @@ pub struct BoundedStager {
/// 3. sent the delete task to the delete queue on drop
/// 4. background routine removes the file or directory
delete_queue: Sender<DeleteTask>,
+
+ /// Notifier for the stager.
+ notifier: Option<Arc<dyn StagerNotifier>>,
}
impl BoundedStager {
- pub async fn new(base_dir: PathBuf, capacity: u64) -> Result<Self> {
+ pub async fn new(
+ base_dir: PathBuf,
+ capacity: u64,
+ notifier: Option<Arc<dyn StagerNotifier>>,
+ ) -> Result<Self> {
tokio::fs::create_dir_all(&base_dir)
.await
.context(CreateSnafu)?;
@@ -78,12 +86,17 @@ impl BoundedStager {
let recycle_bin = Cache::builder().time_to_live(RECYCLE_BIN_TTL).build();
let recycle_bin_cloned = recycle_bin.clone();
+ let notifier_cloned = notifier.clone();
let cache = Cache::builder()
.max_capacity(capacity)
.weigher(|_: &String, v: &CacheValue| v.weight())
.eviction_policy(EvictionPolicy::lru())
.async_eviction_listener(move |k, v, _| {
let recycle_bin = recycle_bin_cloned.clone();
+ if let Some(notifier) = notifier_cloned.as_ref() {
+ notifier.on_cache_evict(v.size());
+ notifier.on_recycle_insert(v.size());
+ }
async move {
recycle_bin.insert(k.as_str().to_string(), v).await;
}
@@ -92,13 +105,18 @@ impl BoundedStager {
.build();
let (delete_queue, rx) = tokio::sync::mpsc::channel(DELETE_QUEUE_SIZE);
- common_runtime::global_runtime().spawn(Self::delete_routine(rx, recycle_bin.clone()));
-
+ let notifier_cloned = notifier.clone();
+ common_runtime::global_runtime().spawn(Self::delete_routine(
+ rx,
+ recycle_bin.clone(),
+ notifier_cloned,
+ ));
let stager = Self {
cache,
base_dir,
delete_queue,
recycle_bin,
+ notifier,
};
stager.recover().await?;
@@ -120,29 +138,48 @@ impl Stager for BoundedStager {
) -> Result<Self::Blob> {
let cache_key = Self::encode_cache_key(puffin_file_name, key);
+ let mut miss = false;
let v = self
.cache
- .try_get_with(cache_key.clone(), async {
+ .try_get_with_by_ref(&cache_key, async {
if let Some(v) = self.recycle_bin.remove(&cache_key).await {
+ if let Some(notifier) = self.notifier.as_ref() {
+ let size = v.size();
+ notifier.on_cache_insert(size);
+ notifier.on_recycle_clear(size);
+ }
return Ok(v);
}
+ miss = true;
+ let timer = Instant::now();
let file_name = format!("{}.{}", cache_key, uuid::Uuid::new_v4());
let path = self.base_dir.join(&file_name);
let size = Self::write_blob(&path, init_fn).await?;
-
+ if let Some(notifier) = self.notifier.as_ref() {
+ notifier.on_cache_insert(size);
+ notifier.on_load_blob(timer.elapsed());
+ }
let guard = Arc::new(FsBlobGuard {
path,
delete_queue: self.delete_queue.clone(),
+ size,
});
- Ok(CacheValue::File { guard, size })
+ Ok(CacheValue::File(guard))
})
.await
.context(CacheGetSnafu)?;
+ if let Some(notifier) = self.notifier.as_ref() {
+ if miss {
+ notifier.on_cache_miss(v.size());
+ } else {
+ notifier.on_cache_hit(v.size());
+ }
+ }
match v {
- CacheValue::File { guard, .. } => Ok(guard),
+ CacheValue::File(guard) => Ok(guard),
_ => unreachable!(),
}
}
@@ -155,29 +192,48 @@ impl Stager for BoundedStager {
) -> Result<Self::Dir> {
let cache_key = Self::encode_cache_key(puffin_file_name, key);
+ let mut miss = false;
let v = self
.cache
- .try_get_with(cache_key.clone(), async {
+ .try_get_with_by_ref(&cache_key, async {
if let Some(v) = self.recycle_bin.remove(&cache_key).await {
+ if let Some(notifier) = self.notifier.as_ref() {
+ let size = v.size();
+ notifier.on_cache_insert(size);
+ notifier.on_recycle_clear(size);
+ }
return Ok(v);
}
+ miss = true;
+ let timer = Instant::now();
let dir_name = format!("{}.{}", cache_key, uuid::Uuid::new_v4());
let path = self.base_dir.join(&dir_name);
let size = Self::write_dir(&path, init_fn).await?;
-
+ if let Some(notifier) = self.notifier.as_ref() {
+ notifier.on_cache_insert(size);
+ notifier.on_load_dir(timer.elapsed());
+ }
let guard = Arc::new(FsDirGuard {
path,
+ size,
delete_queue: self.delete_queue.clone(),
});
- Ok(CacheValue::Dir { guard, size })
+ Ok(CacheValue::Dir(guard))
})
.await
.context(CacheGetSnafu)?;
+ if let Some(notifier) = self.notifier.as_ref() {
+ if miss {
+ notifier.on_cache_miss(v.size());
+ } else {
+ notifier.on_cache_hit(v.size());
+ }
+ }
match v {
- CacheValue::Dir { guard, .. } => Ok(guard),
+ CacheValue::Dir(guard) => Ok(guard),
_ => unreachable!(),
}
}
@@ -194,6 +250,11 @@ impl Stager for BoundedStager {
self.cache
.try_get_with(cache_key.clone(), async move {
if let Some(v) = self.recycle_bin.remove(&cache_key).await {
+ if let Some(notifier) = self.notifier.as_ref() {
+ let size = v.size();
+ notifier.on_cache_insert(size);
+ notifier.on_recycle_clear(size);
+ }
return Ok(v);
}
@@ -201,12 +262,15 @@ impl Stager for BoundedStager {
let path = self.base_dir.join(&dir_name);
fs::rename(&dir_path, &path).await.context(RenameSnafu)?;
-
+ if let Some(notifier) = self.notifier.as_ref() {
+ notifier.on_cache_insert(size);
+ }
let guard = Arc::new(FsDirGuard {
path,
+ size,
delete_queue: self.delete_queue.clone(),
});
- Ok(CacheValue::Dir { guard, size })
+ Ok(CacheValue::Dir(guard))
})
.await
.map(|_| ())
@@ -308,32 +372,35 @@ impl BoundedStager {
if meta.is_dir() {
let size = Self::get_dir_size(&path).await?;
- let v = CacheValue::Dir {
- guard: Arc::new(FsDirGuard {
- path,
- delete_queue: self.delete_queue.clone(),
- }),
+ let v = CacheValue::Dir(Arc::new(FsDirGuard {
+ path,
size,
- };
+ delete_queue: self.delete_queue.clone(),
+ }));
// A duplicate dir will be moved to the delete queue.
let _dup_dir = elems.insert(key, v);
} else {
- let v = CacheValue::File {
- guard: Arc::new(FsBlobGuard {
- path,
- delete_queue: self.delete_queue.clone(),
- }),
- size: meta.len(),
- };
+ let size = meta.len();
+ let v = CacheValue::File(Arc::new(FsBlobGuard {
+ path,
+ size,
+ delete_queue: self.delete_queue.clone(),
+ }));
// A duplicate file will be moved to the delete queue.
let _dup_file = elems.insert(key, v);
}
}
}
+ let mut size = 0;
for (key, value) in elems {
+ size += value.size();
self.cache.insert(key, value).await;
}
+ if let Some(notifier) = self.notifier.as_ref() {
+ notifier.on_cache_insert(size);
+ }
+
self.cache.run_pending_tasks().await;
Ok(())
@@ -360,11 +427,12 @@ impl BoundedStager {
async fn delete_routine(
mut receiver: Receiver<DeleteTask>,
recycle_bin: Cache<String, CacheValue>,
+ notifier: Option<Arc<dyn StagerNotifier>>,
) {
loop {
match tokio::time::timeout(RECYCLE_BIN_TTL, receiver.recv()).await {
Ok(Some(task)) => match task {
- DeleteTask::File(path) => {
+ DeleteTask::File(path, size) => {
if let Err(err) = fs::remove_file(&path).await {
if err.kind() == std::io::ErrorKind::NotFound {
continue;
@@ -372,8 +440,13 @@ impl BoundedStager {
warn!(err; "Failed to remove the file.");
}
+
+ if let Some(notifier) = notifier.as_ref() {
+ notifier.on_recycle_clear(size);
+ }
}
- DeleteTask::Dir(path) => {
+
+ DeleteTask::Dir(path, size) => {
let deleted_path = path.with_extension(DELETED_EXTENSION);
if let Err(err) = fs::rename(&path, &deleted_path).await {
if err.kind() == std::io::ErrorKind::NotFound {
@@ -390,6 +463,9 @@ impl BoundedStager {
if let Err(err) = fs::remove_dir_all(&deleted_path).await {
warn!(err; "Failed to remove the dangling directory.");
}
+ if let Some(notifier) = notifier.as_ref() {
+ notifier.on_recycle_clear(size);
+ }
}
DeleteTask::Terminate => {
break;
@@ -415,15 +491,15 @@ impl Drop for BoundedStager {
#[derive(Debug, Clone)]
enum CacheValue {
- File { guard: Arc<FsBlobGuard>, size: u64 },
- Dir { guard: Arc<FsDirGuard>, size: u64 },
+ File(Arc<FsBlobGuard>),
+ Dir(Arc<FsDirGuard>),
}
impl CacheValue {
fn size(&self) -> u64 {
match self {
- CacheValue::File { size, .. } => *size,
- CacheValue::Dir { size, .. } => *size,
+ CacheValue::File(guard) => guard.size,
+ CacheValue::Dir(guard) => guard.size,
}
}
@@ -433,8 +509,8 @@ impl CacheValue {
}
enum DeleteTask {
- File(PathBuf),
- Dir(PathBuf),
+ File(PathBuf, u64),
+ Dir(PathBuf, u64),
Terminate,
}
@@ -443,6 +519,7 @@ enum DeleteTask {
#[derive(Debug)]
pub struct FsBlobGuard {
path: PathBuf,
+ size: u64,
delete_queue: Sender<DeleteTask>,
}
@@ -459,7 +536,7 @@ impl Drop for FsBlobGuard {
fn drop(&mut self) {
if let Err(err) = self
.delete_queue
- .try_send(DeleteTask::File(self.path.clone()))
+ .try_send(DeleteTask::File(self.path.clone(), self.size))
{
if matches!(err, TrySendError::Closed(_)) {
return;
@@ -474,6 +551,7 @@ impl Drop for FsBlobGuard {
#[derive(Debug)]
pub struct FsDirGuard {
path: PathBuf,
+ size: u64,
delete_queue: Sender<DeleteTask>,
}
@@ -487,7 +565,7 @@ impl Drop for FsDirGuard {
fn drop(&mut self) {
if let Err(err) = self
.delete_queue
- .try_send(DeleteTask::Dir(self.path.clone()))
+ .try_send(DeleteTask::Dir(self.path.clone(), self.size))
{
if matches!(err, TrySendError::Closed(_)) {
return;
@@ -526,7 +604,7 @@ impl BoundedStager {
let cache_key = Self::encode_cache_key(puffin_file_name, key);
let value = self.cache.get(&cache_key).await.unwrap();
let path = match &value {
- CacheValue::File { guard, .. } => &guard.path,
+ CacheValue::File(guard) => &guard.path,
_ => panic!("Expected a file, but got a directory."),
};
fs::File::open(path).await.unwrap()
@@ -536,7 +614,7 @@ impl BoundedStager {
let cache_key = Self::encode_cache_key(puffin_file_name, key);
let value = self.cache.get(&cache_key).await.unwrap();
let path = match &value {
- CacheValue::Dir { guard, .. } => &guard.path,
+ CacheValue::Dir(guard) => &guard.path,
_ => panic!("Expected a directory, but got a file."),
};
path.clone()
@@ -550,6 +628,8 @@ impl BoundedStager {
#[cfg(test)]
mod tests {
+ use std::sync::atomic::AtomicU64;
+
use common_base::range_read::RangeReader;
use common_test_util::temp_dir::create_temp_dir;
use futures::AsyncWriteExt;
@@ -559,12 +639,124 @@ mod tests {
use crate::error::BlobNotFoundSnafu;
use crate::puffin_manager::stager::Stager;
+ struct MockNotifier {
+ cache_insert_size: AtomicU64,
+ cache_evict_size: AtomicU64,
+ cache_hit_count: AtomicU64,
+ cache_hit_size: AtomicU64,
+ cache_miss_count: AtomicU64,
+ cache_miss_size: AtomicU64,
+ recycle_insert_size: AtomicU64,
+ recycle_clear_size: AtomicU64,
+ }
+
+ #[derive(Debug, PartialEq, Eq)]
+ struct Stats {
+ cache_insert_size: u64,
+ cache_evict_size: u64,
+ cache_hit_count: u64,
+ cache_hit_size: u64,
+ cache_miss_count: u64,
+ cache_miss_size: u64,
+ recycle_insert_size: u64,
+ recycle_clear_size: u64,
+ }
+
+ impl MockNotifier {
+ fn build() -> Arc<MockNotifier> {
+ Arc::new(Self {
+ cache_insert_size: AtomicU64::new(0),
+ cache_evict_size: AtomicU64::new(0),
+ cache_hit_count: AtomicU64::new(0),
+ cache_hit_size: AtomicU64::new(0),
+ cache_miss_count: AtomicU64::new(0),
+ cache_miss_size: AtomicU64::new(0),
+ recycle_insert_size: AtomicU64::new(0),
+ recycle_clear_size: AtomicU64::new(0),
+ })
+ }
+
+ fn stats(&self) -> Stats {
+ Stats {
+ cache_insert_size: self
+ .cache_insert_size
+ .load(std::sync::atomic::Ordering::Relaxed),
+ cache_evict_size: self
+ .cache_evict_size
+ .load(std::sync::atomic::Ordering::Relaxed),
+ cache_hit_count: self
+ .cache_hit_count
+ .load(std::sync::atomic::Ordering::Relaxed),
+ cache_hit_size: self
+ .cache_hit_size
+ .load(std::sync::atomic::Ordering::Relaxed),
+ cache_miss_count: self
+ .cache_miss_count
+ .load(std::sync::atomic::Ordering::Relaxed),
+ cache_miss_size: self
+ .cache_miss_size
+ .load(std::sync::atomic::Ordering::Relaxed),
+ recycle_insert_size: self
+ .recycle_insert_size
+ .load(std::sync::atomic::Ordering::Relaxed),
+ recycle_clear_size: self
+ .recycle_clear_size
+ .load(std::sync::atomic::Ordering::Relaxed),
+ }
+ }
+ }
+
+ impl StagerNotifier for MockNotifier {
+ fn on_cache_insert(&self, size: u64) {
+ self.cache_insert_size
+ .fetch_add(size, std::sync::atomic::Ordering::Relaxed);
+ }
+
+ fn on_cache_evict(&self, size: u64) {
+ self.cache_evict_size
+ .fetch_add(size, std::sync::atomic::Ordering::Relaxed);
+ }
+
+ fn on_cache_hit(&self, size: u64) {
+ self.cache_hit_count
+ .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ self.cache_hit_size
+ .fetch_add(size, std::sync::atomic::Ordering::Relaxed);
+ }
+
+ fn on_cache_miss(&self, size: u64) {
+ self.cache_miss_count
+ .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ self.cache_miss_size
+ .fetch_add(size, std::sync::atomic::Ordering::Relaxed);
+ }
+
+ fn on_recycle_insert(&self, size: u64) {
+ self.recycle_insert_size
+ .fetch_add(size, std::sync::atomic::Ordering::Relaxed);
+ }
+
+ fn on_recycle_clear(&self, size: u64) {
+ self.recycle_clear_size
+ .fetch_add(size, std::sync::atomic::Ordering::Relaxed);
+ }
+
+ fn on_load_blob(&self, _duration: Duration) {}
+
+ fn on_load_dir(&self, _duration: Duration) {}
+ }
+
#[tokio::test]
async fn test_get_blob() {
let tempdir = create_temp_dir("test_get_blob_");
- let stager = BoundedStager::new(tempdir.path().to_path_buf(), u64::MAX)
- .await
- .unwrap();
+ let notifier = MockNotifier::build();
+ let stager = BoundedStager::new(
+ tempdir.path().to_path_buf(),
+ u64::MAX,
+ Some(notifier.clone()),
+ )
+ .await
+ .unwrap();
let puffin_file_name = "test_get_blob";
let key = "key";
@@ -593,14 +785,34 @@ mod tests {
let mut buf = Vec::new();
file.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, b"hello world");
+
+ let stats = notifier.stats();
+ assert_eq!(
+ stats,
+ Stats {
+ cache_insert_size: 11,
+ cache_evict_size: 0,
+ cache_hit_count: 0,
+ cache_hit_size: 0,
+ cache_miss_count: 1,
+ cache_miss_size: 11,
+ recycle_insert_size: 0,
+ recycle_clear_size: 0,
+ }
+ );
}
#[tokio::test]
async fn test_get_dir() {
let tempdir = create_temp_dir("test_get_dir_");
- let stager = BoundedStager::new(tempdir.path().to_path_buf(), u64::MAX)
- .await
- .unwrap();
+ let notifier = MockNotifier::build();
+ let stager = BoundedStager::new(
+ tempdir.path().to_path_buf(),
+ u64::MAX,
+ Some(notifier.clone()),
+ )
+ .await
+ .unwrap();
let files_in_dir = [
("file_a", "Hello, world!".as_bytes()),
@@ -618,11 +830,13 @@ mod tests {
key,
Box::new(|writer_provider| {
Box::pin(async move {
+ let mut size = 0;
for (rel_path, content) in &files_in_dir {
+ size += content.len();
let mut writer = writer_provider.writer(rel_path).await.unwrap();
writer.write_all(content).await.unwrap();
}
- Ok(0)
+ Ok(size as _)
})
}),
)
@@ -645,14 +859,34 @@ mod tests {
file.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, *content);
}
+
+ let stats = notifier.stats();
+ assert_eq!(
+ stats,
+ Stats {
+ cache_insert_size: 70,
+ cache_evict_size: 0,
+ cache_hit_count: 0,
+ cache_hit_size: 0,
+ cache_miss_count: 1,
+ cache_miss_size: 70,
+ recycle_insert_size: 0,
+ recycle_clear_size: 0
+ }
+ );
}
#[tokio::test]
async fn test_recover() {
let tempdir = create_temp_dir("test_recover_");
- let stager = BoundedStager::new(tempdir.path().to_path_buf(), u64::MAX)
- .await
- .unwrap();
+ let notifier = MockNotifier::build();
+ let stager = BoundedStager::new(
+ tempdir.path().to_path_buf(),
+ u64::MAX,
+ Some(notifier.clone()),
+ )
+ .await
+ .unwrap();
// initialize stager
let puffin_file_name = "test_recover";
@@ -687,11 +921,13 @@ mod tests {
dir_key,
Box::new(|writer_provider| {
Box::pin(async move {
+ let mut size = 0;
for (rel_path, content) in &files_in_dir {
+ size += content.len();
let mut writer = writer_provider.writer(rel_path).await.unwrap();
writer.write_all(content).await.unwrap();
}
- Ok(0)
+ Ok(size as _)
})
}),
)
@@ -701,7 +937,7 @@ mod tests {
// recover stager
drop(stager);
- let stager = BoundedStager::new(tempdir.path().to_path_buf(), u64::MAX)
+ let stager = BoundedStager::new(tempdir.path().to_path_buf(), u64::MAX, None)
.await
.unwrap();
@@ -736,14 +972,31 @@ mod tests {
file.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, *content);
}
+
+ let stats = notifier.stats();
+ assert_eq!(
+ stats,
+ Stats {
+ cache_insert_size: 81,
+ cache_evict_size: 0,
+ cache_hit_count: 0,
+ cache_hit_size: 0,
+ cache_miss_count: 2,
+ cache_miss_size: 81,
+ recycle_insert_size: 0,
+ recycle_clear_size: 0
+ }
+ );
}
#[tokio::test]
async fn test_eviction() {
let tempdir = create_temp_dir("test_eviction_");
+ let notifier = MockNotifier::build();
let stager = BoundedStager::new(
tempdir.path().to_path_buf(),
1, /* extremely small size */
+ Some(notifier.clone()),
)
.await
.unwrap();
@@ -773,6 +1026,21 @@ mod tests {
stager.cache.run_pending_tasks().await;
assert!(!stager.in_cache(puffin_file_name, blob_key));
+ let stats = notifier.stats();
+ assert_eq!(
+ stats,
+ Stats {
+ cache_insert_size: 11,
+ cache_evict_size: 11,
+ cache_hit_count: 0,
+ cache_hit_size: 0,
+ cache_miss_count: 1,
+ cache_miss_size: 11,
+ recycle_insert_size: 11,
+ recycle_clear_size: 0
+ }
+ );
+
let m = reader.metadata().await.unwrap();
let buf = reader.read(0..m.content_length).await.unwrap();
assert_eq!(&*buf, b"Hello world");
@@ -794,6 +1062,21 @@ mod tests {
stager.cache.run_pending_tasks().await;
assert!(!stager.in_cache(puffin_file_name, blob_key));
+ let stats = notifier.stats();
+ assert_eq!(
+ stats,
+ Stats {
+ cache_insert_size: 22,
+ cache_evict_size: 22,
+ cache_hit_count: 1,
+ cache_hit_size: 11,
+ cache_miss_count: 1,
+ cache_miss_size: 11,
+ recycle_insert_size: 22,
+ recycle_clear_size: 11
+ }
+ );
+
let m = reader.metadata().await.unwrap();
let buf = reader.read(0..m.content_length).await.unwrap();
assert_eq!(&*buf, b"Hello world");
@@ -839,6 +1122,21 @@ mod tests {
stager.cache.run_pending_tasks().await;
assert!(!stager.in_cache(puffin_file_name, dir_key));
+ let stats = notifier.stats();
+ assert_eq!(
+ stats,
+ Stats {
+ cache_insert_size: 92,
+ cache_evict_size: 92,
+ cache_hit_count: 1,
+ cache_hit_size: 11,
+ cache_miss_count: 2,
+ cache_miss_size: 81,
+ recycle_insert_size: 92,
+ recycle_clear_size: 11
+ }
+ );
+
// Second time to get the directory
let guard_1 = stager
.get_dir(
@@ -861,6 +1159,21 @@ mod tests {
stager.cache.run_pending_tasks().await;
assert!(!stager.in_cache(puffin_file_name, dir_key));
+ let stats = notifier.stats();
+ assert_eq!(
+ stats,
+ Stats {
+ cache_insert_size: 162,
+ cache_evict_size: 162,
+ cache_hit_count: 2,
+ cache_hit_size: 81,
+ cache_miss_count: 2,
+ cache_miss_size: 81,
+ recycle_insert_size: 162,
+ recycle_clear_size: 81
+ }
+ );
+
// Third time to get the directory and all guards are dropped
drop(guard_0);
drop(guard_1);
@@ -884,12 +1197,27 @@ mod tests {
file.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, *content);
}
+
+ let stats = notifier.stats();
+ assert_eq!(
+ stats,
+ Stats {
+ cache_insert_size: 232,
+ cache_evict_size: 232,
+ cache_hit_count: 3,
+ cache_hit_size: 151,
+ cache_miss_count: 2,
+ cache_miss_size: 81,
+ recycle_insert_size: 232,
+ recycle_clear_size: 151
+ }
+ );
}
#[tokio::test]
async fn test_get_blob_concurrency_on_fail() {
let tempdir = create_temp_dir("test_get_blob_concurrency_on_fail_");
- let stager = BoundedStager::new(tempdir.path().to_path_buf(), u64::MAX)
+ let stager = BoundedStager::new(tempdir.path().to_path_buf(), u64::MAX, None)
.await
.unwrap();
@@ -926,7 +1254,7 @@ mod tests {
#[tokio::test]
async fn test_get_dir_concurrency_on_fail() {
let tempdir = create_temp_dir("test_get_dir_concurrency_on_fail_");
- let stager = BoundedStager::new(tempdir.path().to_path_buf(), u64::MAX)
+ let stager = BoundedStager::new(tempdir.path().to_path_buf(), u64::MAX, None)
.await
.unwrap();
diff --git a/src/puffin/src/puffin_manager/tests.rs b/src/puffin/src/puffin_manager/tests.rs
index 23756aec646c..b4d3450fd584 100644
--- a/src/puffin/src/puffin_manager/tests.rs
+++ b/src/puffin/src/puffin_manager/tests.rs
@@ -32,7 +32,7 @@ async fn new_bounded_stager(prefix: &str, capacity: u64) -> (TempDir, Arc<Bounde
let path = staging_dir.path().to_path_buf();
(
staging_dir,
- Arc::new(BoundedStager::new(path, capacity).await.unwrap()),
+ Arc::new(BoundedStager::new(path, capacity, None).await.unwrap()),
)
}
|
feat
|
add stager nofitier to collect metrics (#5530)
|
58c37f588dcb75239364f007d60ffbeaebb3f93c
|
2023-01-13 11:57:31
|
Ruihang Xia
|
feat: plan some aggregate expr in PromQL planner (#870)
| false
|
diff --git a/src/promql/src/error.rs b/src/promql/src/error.rs
index 4a055729f0d2..d95d500f7ef9 100644
--- a/src/promql/src/error.rs
+++ b/src/promql/src/error.rs
@@ -15,7 +15,7 @@
use std::any::Any;
use common_error::prelude::*;
-use promql_parser::parser::Expr as PromExpr;
+use promql_parser::parser::{Expr as PromExpr, TokenType};
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -23,6 +23,12 @@ pub enum Error {
#[snafu(display("Unsupported expr type: {}", name))]
UnsupportedExpr { name: String, backtrace: Backtrace },
+ #[snafu(display("Unexpected token: {}", token))]
+ UnexpectedToken {
+ token: TokenType,
+ backtrace: Backtrace,
+ },
+
#[snafu(display("Internal error during build DataFusion plan, error: {}", source))]
DataFusionPlanning {
source: datafusion::error::DataFusionError,
@@ -41,6 +47,12 @@ pub enum Error {
#[snafu(display("Cannot find value columns in table {}", table))]
ValueNotFound { table: String, backtrace: Backtrace },
+ #[snafu(display("Cannot find label in table {}, source: {}", table, source))]
+ LabelNotFound {
+ table: String,
+ source: datafusion::error::DataFusionError,
+ },
+
#[snafu(display("Cannot find the table {}", table))]
TableNotFound {
table: String,
@@ -90,7 +102,9 @@ impl ErrorExt for Error {
TimeIndexNotFound { .. }
| ValueNotFound { .. }
| UnsupportedExpr { .. }
+ | UnexpectedToken { .. }
| MultipleVector { .. }
+ | LabelNotFound { .. }
| ExpectExpr { .. } => StatusCode::InvalidArguments,
UnknownTable { .. }
| TableNotFound { .. }
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index 2799c6d2bacc..b580d6cd77cd 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -12,13 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashSet;
use std::str::FromStr;
use std::sync::Arc;
use std::time::{Duration, UNIX_EPOCH};
use datafusion::datasource::DefaultTableSource;
+use datafusion::logical_expr::expr::AggregateFunction;
use datafusion::logical_expr::{
- BinaryExpr, BuiltinScalarFunction, Extension, Filter, LogicalPlan, LogicalPlanBuilder, Operator,
+ AggregateFunction as AggregateFunctionEnum, BinaryExpr, BuiltinScalarFunction, Extension,
+ Filter, LogicalPlan, LogicalPlanBuilder, Operator,
};
use datafusion::optimizer::utils;
use datafusion::prelude::{Column, Expr as DfExpr};
@@ -26,16 +29,16 @@ use datafusion::scalar::ScalarValue;
use datafusion::sql::planner::ContextProvider;
use datafusion::sql::TableReference;
use promql_parser::label::{MatchOp, Matchers, METRIC_NAME};
-use promql_parser::parser::{EvalStmt, Expr as PromExpr, Function};
+use promql_parser::parser::{token, EvalStmt, Expr as PromExpr, Function, TokenType};
use snafu::{OptionExt, ResultExt};
use table::table::adapter::DfTableProviderAdapter;
use crate::error::{
- DataFusionPlanningSnafu, ExpectExprSnafu, MultipleVectorSnafu, Result, TableNameNotFoundSnafu,
- TableNotFoundSnafu, TimeIndexNotFoundSnafu, UnknownTableSnafu, UnsupportedExprSnafu,
- ValueNotFoundSnafu,
+ DataFusionPlanningSnafu, ExpectExprSnafu, LabelNotFoundSnafu, MultipleVectorSnafu, Result,
+ TableNameNotFoundSnafu, TableNotFoundSnafu, TimeIndexNotFoundSnafu, UnexpectedPlanExprSnafu,
+ UnexpectedTokenSnafu, UnknownTableSnafu, UnsupportedExprSnafu, ValueNotFoundSnafu,
};
-use crate::extension_plan::{InstantManipulate, Millisecond, SeriesNormalize};
+use crate::extension_plan::{InstantManipulate, Millisecond, RangeManipulate, SeriesNormalize};
#[derive(Default, Debug, Clone)]
struct PromPlannerContext {
@@ -79,10 +82,55 @@ impl<S: ContextProvider> PromPlanner<S> {
pub fn prom_expr_to_plan(&mut self, prom_expr: PromExpr) -> Result<LogicalPlan> {
let res = match &prom_expr {
- PromExpr::AggregateExpr { .. } => UnsupportedExprSnafu {
- name: "Prom Aggregate",
+ PromExpr::AggregateExpr {
+ op,
+ expr,
+ // TODO(ruihang): support param
+ param: _param,
+ grouping,
+ without,
+ } => {
+ let input = self.prom_expr_to_plan(*expr.clone())?;
+
+ // calculate columns to group by
+ let schema = input.schema();
+ let group_columns_indices = grouping
+ .iter()
+ .map(|label| {
+ schema
+ .index_of_column_by_name(None, label)
+ .with_context(|_| LabelNotFoundSnafu {
+ table: self.ctx.table_name.clone().unwrap(),
+ })
+ })
+ .collect::<Result<HashSet<_>>>()?;
+ let value_names = self.ctx.value_columns.iter().collect::<HashSet<_>>();
+ let group_exprs = schema
+ .fields()
+ .iter()
+ .enumerate()
+ .filter_map(|(i, field)| {
+ if *without != group_columns_indices.contains(&i)
+ && Some(field.name()) != self.ctx.time_index_column.as_ref()
+ && !value_names.contains(&field.name())
+ {
+ Some(DfExpr::Column(Column::from(field.name())))
+ } else {
+ None
+ }
+ })
+ .collect::<Vec<_>>();
+
+ // convert op and value columns to aggregate exprs
+ let aggr_exprs = self.create_aggregate_exprs(*op)?;
+
+ // create plan
+ LogicalPlanBuilder::from(input)
+ .aggregate(group_exprs, aggr_exprs)
+ .context(DataFusionPlanningSnafu)?
+ .build()
+ .context(DataFusionPlanningSnafu)?
}
- .fail()?,
PromExpr::UnaryExpr { .. } => UnsupportedExprSnafu {
name: "Prom Unary Expr",
}
@@ -136,10 +184,47 @@ impl<S: ContextProvider> PromPlanner<S> {
node: Arc::new(manipulate),
})
}
- PromExpr::MatrixSelector { .. } => UnsupportedExprSnafu {
- name: "Prom Matrix Selector",
+ PromExpr::MatrixSelector {
+ vector_selector,
+ range,
+ } => {
+ let normalize = match &**vector_selector {
+ PromExpr::VectorSelector {
+ name: _,
+ offset,
+ start_or_end: _,
+ label_matchers,
+ } => {
+ let matchers = self.preprocess_label_matchers(label_matchers)?;
+ self.setup_context()?;
+ self.selector_to_series_normalize_plan(*offset, matchers)?
+ }
+ _ => UnexpectedPlanExprSnafu {
+ desc: format!(
+ "MatrixSelector must contains a VectorSelector, but found {vector_selector:?}",
+ ),
+ }
+ .fail()?,
+ };
+ let manipulate = RangeManipulate::new(
+ self.ctx.start,
+ self.ctx.end,
+ self.ctx.interval,
+ // TODO(ruihang): convert via Timestamp datatypes to support different time units
+ range.as_millis() as _,
+ self.ctx
+ .time_index_column
+ .clone()
+ .expect("time index should be set in `setup_context`"),
+ self.ctx.value_columns.clone(),
+ normalize,
+ )
+ .context(DataFusionPlanningSnafu)?;
+
+ LogicalPlan::Extension(Extension {
+ node: Arc::new(manipulate),
+ })
}
- .fail()?,
PromExpr::Call { func, args } => {
let args = self.create_function_args(args)?;
let input =
@@ -387,6 +472,41 @@ impl<S: ContextProvider> PromPlanner<S> {
table: self.ctx.table_name.clone().unwrap(),
})
}
+
+ fn create_aggregate_exprs(&self, op: TokenType) -> Result<Vec<DfExpr>> {
+ let aggr = match op {
+ token::T_SUM => AggregateFunctionEnum::Sum,
+ token::T_AVG => AggregateFunctionEnum::Avg,
+ token::T_COUNT => AggregateFunctionEnum::Count,
+ token::T_MIN => AggregateFunctionEnum::Min,
+ token::T_MAX => AggregateFunctionEnum::Max,
+ token::T_GROUP => AggregateFunctionEnum::Grouping,
+ token::T_STDDEV => AggregateFunctionEnum::Stddev,
+ token::T_STDVAR => AggregateFunctionEnum::Variance,
+ token::T_TOPK | token::T_BOTTOMK | token::T_COUNT_VALUES | token::T_QUANTILE => {
+ UnsupportedExprSnafu {
+ name: op.to_string(),
+ }
+ .fail()?
+ }
+ _ => UnexpectedTokenSnafu { token: op }.fail()?,
+ };
+
+ let exprs = self
+ .ctx
+ .value_columns
+ .iter()
+ .map(|col| {
+ DfExpr::AggregateFunction(AggregateFunction {
+ fun: aggr.clone(),
+ args: vec![DfExpr::Column(Column::from_name(col))],
+ distinct: false,
+ filter: None,
+ })
+ })
+ .collect();
+ Ok(exprs)
+ }
}
#[derive(Default, Debug)]
@@ -474,19 +594,19 @@ mod test {
}
// {
- // input: `abs(some_metric{foo!="bar"})`,
- // expected: &Call{
- // Func: MustGetFunction("abs"),
- // Args: Expressions{
- // &VectorSelector{
- // Name: "some_metric",
- // LabelMatchers: []*labels.Matcher{
- // MustLabelMatcher(labels.MatchNotEqual, "foo", "bar"),
- // MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some_metric"),
- // },
- // },
- // },
- // },
+ // input: `abs(some_metric{foo!="bar"})`,
+ // expected: &Call{
+ // Func: MustGetFunction("abs"),
+ // Args: Expressions{
+ // &VectorSelector{
+ // Name: "some_metric",
+ // LabelMatchers: []*labels.Matcher{
+ // MustLabelMatcher(labels.MatchNotEqual, "foo", "bar"),
+ // MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some_metric"),
+ // },
+ // },
+ // },
+ // },
// },
async fn do_single_instant_function_call(fn_name: &'static str, plan_name: &str) {
let prom_expr = PromExpr::Call {
@@ -689,4 +809,161 @@ mod test {
async fn single_rad() {
do_single_instant_function_call("rad", "").await;
}
+
+ // {
+ // input: "avg by (foo)(some_metric)",
+ // expected: &AggregateExpr{
+ // Op: AVG,
+ // Expr: &VectorSelector{
+ // Name: "some_metric",
+ // LabelMatchers: []*labels.Matcher{
+ // MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some_metric"),
+ // },
+ // PosRange: PositionRange{
+ // Start: 13,
+ // End: 24,
+ // },
+ // },
+ // Grouping: []string{"foo"},
+ // PosRange: PositionRange{
+ // Start: 0,
+ // End: 25,
+ // },
+ // },
+ // },
+ async fn do_aggregate_expr_plan(op: TokenType, name: &str) {
+ let prom_expr = PromExpr::AggregateExpr {
+ op,
+ expr: Box::new(PromExpr::VectorSelector {
+ name: Some("some_metric".to_owned()),
+ offset: None,
+ start_or_end: None,
+ label_matchers: Matchers {
+ matchers: vec![
+ Matcher {
+ op: MatchOp::NotEqual,
+ name: "tag_0".to_string(),
+ value: "bar".to_string(),
+ },
+ Matcher {
+ op: MatchOp::Equal,
+ name: METRIC_NAME.to_string(),
+ value: "some_metric".to_string(),
+ },
+ ],
+ },
+ }),
+ param: Box::new(PromExpr::empty_vector_selector()),
+ grouping: vec![String::from("tag_1")],
+ without: false,
+ };
+ let mut eval_stmt = EvalStmt {
+ expr: prom_expr,
+ start: UNIX_EPOCH,
+ end: UNIX_EPOCH
+ .checked_add(Duration::from_secs(100_000))
+ .unwrap(),
+ interval: Duration::from_secs(5),
+ lookback_delta: Duration::from_secs(1),
+ };
+
+ // test group by
+ let context_provider = build_test_context_provider("some_metric".to_string(), 2, 2).await;
+ let plan = PromPlanner::stmt_to_plan(eval_stmt.clone(), context_provider).unwrap();
+ let expected_no_without = String::from(
+ "Aggregate: groupBy=[[some_metric.tag_1]], aggr=[[TEMPLATE(some_metric.field_0), TEMPLATE(some_metric.field_1)]] [tag_1:Utf8, TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
+ \n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
+ \n PromSeriesNormalize: offset=[0], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
+ \n Filter: tag_0 != Utf8(\"bar\") AND timestamp >= TimestampMillisecond(0, None) AND timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
+ \n TableScan: some_metric, unsupported_filters=[tag_0 != Utf8(\"bar\"), timestamp >= TimestampMillisecond(0, None), timestamp <= TimestampMillisecond(100000000, None)] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]")
+ .replace("TEMPLATE", name);
+ assert_eq!(
+ plan.display_indent_schema().to_string(),
+ expected_no_without
+ );
+
+ // test group without
+ if let PromExpr::AggregateExpr { without, .. } = &mut eval_stmt.expr {
+ *without = true;
+ }
+ let context_provider = build_test_context_provider("some_metric".to_string(), 2, 2).await;
+ let plan = PromPlanner::stmt_to_plan(eval_stmt, context_provider).unwrap();
+ let expected_without = String::from(
+ "Aggregate: groupBy=[[some_metric.tag_0]], aggr=[[TEMPLATE(some_metric.field_0), TEMPLATE(some_metric.field_1)]] [tag_0:Utf8, TEMPLATE(some_metric.field_0):Float64;N, TEMPLATE(some_metric.field_1):Float64;N]\
+ \n PromInstantManipulate: range=[0..100000000], lookback=[1000], interval=[5000], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
+ \n PromSeriesNormalize: offset=[0], time index=[timestamp] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
+ \n Filter: tag_0 != Utf8(\"bar\") AND timestamp >= TimestampMillisecond(0, None) AND timestamp <= TimestampMillisecond(100000000, None) [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]\
+ \n TableScan: some_metric, unsupported_filters=[tag_0 != Utf8(\"bar\"), timestamp >= TimestampMillisecond(0, None), timestamp <= TimestampMillisecond(100000000, None)] [tag_0:Utf8, tag_1:Utf8, timestamp:Timestamp(Millisecond, None), field_0:Float64;N, field_1:Float64;N]")
+ .replace("TEMPLATE", name);
+ assert_eq!(plan.display_indent_schema().to_string(), expected_without);
+ }
+
+ #[tokio::test]
+ async fn aggregate_sum() {
+ do_aggregate_expr_plan(token::T_SUM, "SUM").await;
+ }
+
+ #[tokio::test]
+ async fn aggregate_avg() {
+ do_aggregate_expr_plan(token::T_AVG, "AVG").await;
+ }
+
+ #[tokio::test]
+ #[should_panic] // output type doesn't match
+ async fn aggregate_count() {
+ do_aggregate_expr_plan(token::T_COUNT, "COUNT").await;
+ }
+
+ #[tokio::test]
+ async fn aggregate_min() {
+ do_aggregate_expr_plan(token::T_MIN, "MIN").await;
+ }
+
+ #[tokio::test]
+ async fn aggregate_max() {
+ do_aggregate_expr_plan(token::T_MAX, "MAX").await;
+ }
+
+ #[tokio::test]
+ #[should_panic] // output type doesn't match
+ async fn aggregate_group() {
+ do_aggregate_expr_plan(token::T_GROUP, "GROUPING").await;
+ }
+
+ #[tokio::test]
+ async fn aggregate_stddev() {
+ do_aggregate_expr_plan(token::T_STDDEV, "STDDEV").await;
+ }
+
+ #[tokio::test]
+ #[should_panic] // schema doesn't match
+ async fn aggregate_stdvar() {
+ do_aggregate_expr_plan(token::T_STDVAR, "STDVAR").await;
+ }
+
+ #[tokio::test]
+ #[should_panic]
+ async fn aggregate_top_k() {
+ do_aggregate_expr_plan(token::T_TOPK, "").await;
+ }
+
+ #[tokio::test]
+ #[should_panic]
+ async fn aggregate_bottom_k() {
+ do_aggregate_expr_plan(token::T_BOTTOMK, "").await;
+ }
+
+ #[tokio::test]
+ #[should_panic]
+ async fn aggregate_count_values() {
+ do_aggregate_expr_plan(token::T_COUNT_VALUES, "").await;
+ }
+
+ #[tokio::test]
+ #[should_panic]
+ async fn aggregate_quantile() {
+ do_aggregate_expr_plan(token::T_QUANTILE, "").await;
+ }
+
+ // TODO(ruihang): add range fn tests once exprs are ready.
}
|
feat
|
plan some aggregate expr in PromQL planner (#870)
|
17b385a985e7b628ad8d86520671e3b99db1f25f
|
2023-10-08 13:58:45
|
Yingwen
|
fix: compiler errors under `pprof` and `mem-prof` features (#2537)
| false
|
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 8647b60cddeb..3efc2d3c65fa 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -278,8 +278,7 @@ pub enum Error {
#[snafu(display("Failed to dump profile data"))]
DumpProfileData {
location: Location,
- #[snafu(source)]
- error: common_mem_prof::error::Error,
+ source: common_mem_prof::error::Error,
},
#[snafu(display("Invalid prepare statement: {}", err_msg))]
@@ -335,7 +334,9 @@ pub enum Error {
#[cfg(feature = "pprof")]
#[snafu(display("Failed to dump pprof data"))]
- DumpPprof { source: common_pprof::Error },
+ DumpPprof {
+ source: crate::http::pprof::nix::Error,
+ },
#[snafu(display(""))]
Metrics { source: BoxedError },
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 1e9660e93872..d6fbd8e49da8 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -19,7 +19,7 @@ pub mod influxdb;
pub mod mem_prof;
pub mod opentsdb;
pub mod otlp;
-mod pprof;
+pub mod pprof;
pub mod prom_store;
pub mod prometheus;
pub mod script;
diff --git a/src/servers/src/http/pprof.rs b/src/servers/src/http/pprof.rs
index 3e8ed55ac084..23a9a2c2cb2e 100644
--- a/src/servers/src/http/pprof.rs
+++ b/src/servers/src/http/pprof.rs
@@ -13,7 +13,7 @@
// limitations under the License.
#[cfg(feature = "pprof")]
-mod nix;
+pub(crate) mod nix;
#[cfg(feature = "pprof")]
pub mod handler {
diff --git a/src/servers/src/http/pprof/nix.rs b/src/servers/src/http/pprof/nix.rs
index 5c87bd0362c5..4b9c0918886d 100644
--- a/src/servers/src/http/pprof/nix.rs
+++ b/src/servers/src/http/pprof/nix.rs
@@ -17,32 +17,38 @@ use std::time::Duration;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
+use common_macro::stack_trace_debug;
use prost::Message;
use snafu::{Location, ResultExt, Snafu};
-#[derive(Debug, Snafu)]
+#[derive(Snafu)]
+#[stack_trace_debug]
pub enum Error {
#[snafu(display("Failed to create profiler guard"))]
CreateGuard {
- source: pprof::Error,
+ #[snafu(source)]
+ error: pprof::Error,
location: Location,
},
#[snafu(display("Failed to create report"))]
CreateReport {
- source: pprof::Error,
+ #[snafu(source)]
+ error: pprof::Error,
location: Location,
},
#[snafu(display("Failed to create flamegraph"))]
CreateFlamegraph {
- source: pprof::Error,
+ #[snafu(source)]
+ error: pprof::Error,
location: Location,
},
#[snafu(display("Failed to create pprof report"))]
ReportPprof {
- source: pprof::Error,
+ #[snafu(source)]
+ error: pprof::Error,
location: Location,
},
}
|
fix
|
compiler errors under `pprof` and `mem-prof` features (#2537)
|
bd0eed7af982d8478d9f626e9bba02968f4c70e3
|
2023-12-27 18:52:19
|
Ruihang Xia
|
chore: do not send message for xlarge PR (#3020)
| false
|
diff --git a/.github/workflows/size-label.yml b/.github/workflows/size-label.yml
index c0b0d1c052be..fd3d5097fafa 100644
--- a/.github/workflows/size-label.yml
+++ b/.github/workflows/size-label.yml
@@ -21,8 +21,5 @@ jobs:
l_max_size: '1000'
xl_label: 'Size: XL'
fail_if_xl: 'false'
- message_if_xl: >
- This PR exceeds the recommended size of 1000 lines.
- Please make sure you are NOT addressing multiple issues with one PR.
- Note this PR might be rejected due to its size.
+ message_if_xl: ""
files_to_ignore: 'Cargo.lock'
|
chore
|
do not send message for xlarge PR (#3020)
|
a751aa5ba0bdd68d9df3109edbb3f104473c5887
|
2023-07-12 08:05:23
|
Weny Xu
|
feat: switch to using drop table procedure (#1901)
| false
|
diff --git a/src/common/meta/src/rpc/ddl.rs b/src/common/meta/src/rpc/ddl.rs
index 96913cfe1574..dce9c2f24d6e 100644
--- a/src/common/meta/src/rpc/ddl.rs
+++ b/src/common/meta/src/rpc/ddl.rs
@@ -44,6 +44,20 @@ impl DdlTask {
) -> Self {
DdlTask::CreateTable(CreateTableTask::new(expr, partitions, table_info))
}
+
+ pub fn new_drop_table(
+ catalog: String,
+ schema: String,
+ table: String,
+ table_id: TableId,
+ ) -> Self {
+ DdlTask::DropTable(DropTableTask {
+ catalog,
+ schema,
+ table,
+ table_id,
+ })
+ }
}
impl TryFrom<Task> for DdlTask {
@@ -92,19 +106,17 @@ impl TryFrom<SubmitDdlTaskRequest> for PbSubmitDdlTaskRequest {
pub struct SubmitDdlTaskResponse {
pub key: Vec<u8>,
- pub table_id: TableId,
+ pub table_id: Option<TableId>,
}
impl TryFrom<PbSubmitDdlTaskResponse> for SubmitDdlTaskResponse {
type Error = error::Error;
fn try_from(resp: PbSubmitDdlTaskResponse) -> Result<Self> {
- let table_id = resp.table_id.context(error::InvalidProtoMsgSnafu {
- err_msg: "expected table_id",
- })?;
+ let table_id = resp.table_id.map(|t| t.id);
Ok(Self {
key: resp.key,
- table_id: table_id.id,
+ table_id,
})
}
}
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index df460fde1702..c942d6fcd4fa 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -25,6 +25,12 @@ use store_api::storage::RegionNumber;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
+ #[snafu(display("Unexpected, violated: {}", violated))]
+ Unexpected {
+ violated: String,
+ location: Location,
+ },
+
#[snafu(display("Execute the operation timeout, source: {}", source))]
Timeout {
location: Location,
@@ -652,7 +658,8 @@ impl ErrorExt for Error {
| Error::BuildParquetRecordBatchStream { .. }
| Error::ReadRecordBatch { .. }
| Error::BuildFileStream { .. }
- | Error::WriteStreamToFile { .. } => StatusCode::Unexpected,
+ | Error::WriteStreamToFile { .. }
+ | Error::Unexpected { .. } => StatusCode::Unexpected,
Error::Catalog { source, .. } => source.status_code(),
Error::CatalogEntrySerde { source, .. } => source.status_code(),
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index 0456254a9472..d91776710f9d 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -23,11 +23,11 @@ use api::v1::ddl_request::{Expr as DdlExpr, Expr};
use api::v1::greptime_request::Request;
use api::v1::{
column_def, AlterExpr, CompactTableExpr, CreateDatabaseExpr, CreateTableExpr, DeleteRequest,
- DropTableExpr, FlushTableExpr, InsertRequests, TableId,
+ FlushTableExpr, InsertRequests,
};
use async_trait::async_trait;
use catalog::helper::{SchemaKey, SchemaValue};
-use catalog::{CatalogManager, DeregisterTableRequest, RegisterTableRequest};
+use catalog::{CatalogManager, RegisterTableRequest};
use chrono::DateTime;
use client::client_manager::DatanodeClients;
use client::Database;
@@ -36,9 +36,7 @@ use common_catalog::format_full_table_name;
use common_error::prelude::BoxedError;
use common_meta::peer::Peer;
use common_meta::rpc::ddl::{DdlTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse};
-use common_meta::rpc::router::{
- DeleteRequest as MetaDeleteRequest, Partition as MetaPartition, RouteRequest,
-};
+use common_meta::rpc::router::{Partition as MetaPartition, RouteRequest};
use common_meta::rpc::store::CompareAndPutRequest;
use common_meta::table_name::TableName;
use common_query::Output;
@@ -61,7 +59,7 @@ use sql::statements::statement::Statement;
use sql::statements::{self, sql_value_to_value};
use store_api::storage::RegionNumber;
use table::engine::TableReference;
-use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
+use table::metadata::{RawTableInfo, RawTableMeta, TableId, TableIdent, TableType};
use table::requests::TableOptions;
use table::table::AlterContext;
use table::TableRef;
@@ -119,12 +117,15 @@ impl DistInstance {
.create_table_procedure(create_table, partitions, table_info.clone())
.await?;
- let table_id = resp.table_id;
+ let table_id = resp.table_id.context(error::UnexpectedSnafu {
+ violated: "expected table_id",
+ })?;
info!("Successfully created distributed table '{table_name}' with table id {table_id}");
+
table_info.ident.table_id = table_id;
let table_info = Arc::new(table_info.try_into().context(error::CreateTableInfoSnafu)?);
- create_table.table_id = Some(TableId { id: table_id });
+ create_table.table_id = Some(api::v1::TableId { id: table_id });
let table = Arc::new(DistTable::new(
table_name.clone(),
@@ -165,7 +166,7 @@ impl DistInstance {
}
async fn drop_table(&self, table_name: TableName) -> Result<Output> {
- let _ = self
+ let table = self
.catalog_manager
.table(
&table_name.catalog_name,
@@ -178,42 +179,9 @@ impl DistInstance {
table_name: table_name.to_string(),
})?;
- let route_response = self
- .meta_client
- .delete_route(MetaDeleteRequest {
- table_name: table_name.clone(),
- })
- .await
- .context(RequestMetaSnafu)?;
+ let table_id = table.table_info().ident.table_id;
- let request = DeregisterTableRequest {
- catalog: table_name.catalog_name.clone(),
- schema: table_name.schema_name.clone(),
- table_name: table_name.table_name.clone(),
- };
- self.catalog_manager
- .deregister_table(request)
- .await
- .context(CatalogSnafu)?;
-
- let expr = DropTableExpr {
- catalog_name: table_name.catalog_name.clone(),
- schema_name: table_name.schema_name.clone(),
- table_name: table_name.table_name.clone(),
- ..Default::default()
- };
- for table_route in route_response.table_routes.iter() {
- for datanode in table_route.find_leaders() {
- debug!("Dropping table {table_name} on Datanode {datanode:?}");
-
- let client = self.datanode_clients.get_client(&datanode).await;
- let client = Database::new(&expr.catalog_name, &expr.schema_name, client);
- let _ = client
- .drop_table(expr.clone())
- .await
- .context(RequestDatanodeSnafu)?;
- }
- }
+ self.drop_table_procedure(&table_name, table_id).await?;
// Since the table information dropped on meta does not go through KvBackend, so we
// manually invalidate the cache here.
@@ -547,6 +515,30 @@ impl DistInstance {
.context(error::RequestMetaSnafu)
}
+ async fn drop_table_procedure(
+ &self,
+ table_name: &TableName,
+ table_id: TableId,
+ ) -> Result<SubmitDdlTaskResponse> {
+ let request = SubmitDdlTaskRequest {
+ task: DdlTask::new_drop_table(
+ table_name.catalog_name.to_string(),
+ table_name.schema_name.to_string(),
+ table_name.table_name.to_string(),
+ table_id,
+ ),
+ };
+
+ timeout(
+ // TODO(weny): makes timeout configurable.
+ Duration::from_secs(10),
+ self.meta_client.submit_ddl_task(request),
+ )
+ .await
+ .context(error::TimeoutSnafu)?
+ .context(error::RequestMetaSnafu)
+ }
+
async fn handle_dist_insert(
&self,
requests: InsertRequests,
diff --git a/src/meta-client/src/client/ddl.rs b/src/meta-client/src/client/ddl.rs
index 90191dd519c5..8e5fda721d4a 100644
--- a/src/meta-client/src/client/ddl.rs
+++ b/src/meta-client/src/client/ddl.rs
@@ -67,8 +67,7 @@ impl Client {
}
#[derive(Debug)]
-// TODO(weny): removes this in following PRs.
-#[allow(unused)]
+
struct Inner {
id: Id,
role: Role,
diff --git a/src/meta-srv/src/ddl.rs b/src/meta-srv/src/ddl.rs
index e165617ae3c1..e7e07b66ba29 100644
--- a/src/meta-srv/src/ddl.rs
+++ b/src/meta-srv/src/ddl.rs
@@ -83,6 +83,20 @@ impl DdlManager {
)
.context(error::RegisterProcedureLoaderSnafu {
type_name: CreateTableProcedure::TYPE_NAME,
+ })?;
+
+ let context = self.create_context();
+
+ self.procedure_manager
+ .register_loader(
+ DropTableProcedure::TYPE_NAME,
+ Box::new(move |json| {
+ let context = context.clone();
+ DropTableProcedure::from_json(json, context).map(|p| Box::new(p) as _)
+ }),
+ )
+ .context(error::RegisterProcedureLoaderSnafu {
+ type_name: DropTableProcedure::TYPE_NAME,
})
}
diff --git a/src/meta-srv/src/procedure/drop_table.rs b/src/meta-srv/src/procedure/drop_table.rs
index b54f49129302..31ea217fa74b 100644
--- a/src/meta-srv/src/procedure/drop_table.rs
+++ b/src/meta-srv/src/procedure/drop_table.rs
@@ -47,8 +47,6 @@ pub struct DropTableProcedure {
data: DropTableData,
}
-// TODO(weny): removes in following PRs.
-#[allow(unused)]
impl DropTableProcedure {
pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::DropTable";
diff --git a/src/meta-srv/src/procedure/utils.rs b/src/meta-srv/src/procedure/utils.rs
index 113c75e7180d..3823135169b3 100644
--- a/src/meta-srv/src/procedure/utils.rs
+++ b/src/meta-srv/src/procedure/utils.rs
@@ -43,7 +43,7 @@ pub fn build_table_metadata_key(
table_id,
catalog_name: table_ref.catalog,
schema_name: table_ref.schema,
- table_name: table_ref.schema,
+ table_name: table_ref.table,
};
let table_global_key = TableGlobalKey {
|
feat
|
switch to using drop table procedure (#1901)
|
9c42825f5d8ef6f61796c992a1e510d1ec650f58
|
2024-06-07 08:54:56
|
irenjj
|
feat: Implement SHOW CREATE FLOW (#4040)
| false
|
diff --git a/src/common/meta/src/key/flow.rs b/src/common/meta/src/key/flow.rs
index b2ce5d1cb24b..f66d17da33f7 100644
--- a/src/common/meta/src/key/flow.rs
+++ b/src/common/meta/src/key/flow.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub(crate) mod flow_info;
+pub mod flow_info;
pub(crate) mod flow_name;
pub(crate) mod flownode_flow;
pub(crate) mod table_flow;
diff --git a/src/common/meta/src/key/flow/flow_info.rs b/src/common/meta/src/key/flow/flow_info.rs
index c1ce1a1c994f..86b4d2964181 100644
--- a/src/common/meta/src/key/flow/flow_info.rs
+++ b/src/common/meta/src/key/flow/flow_info.rs
@@ -141,6 +141,26 @@ impl FlowInfoValue {
pub fn source_table_ids(&self) -> &[TableId] {
&self.source_table_ids
}
+
+ pub fn flow_name(&self) -> &String {
+ &self.flow_name
+ }
+
+ pub fn sink_table_name(&self) -> &TableName {
+ &self.sink_table_name
+ }
+
+ pub fn raw_sql(&self) -> &String {
+ &self.raw_sql
+ }
+
+ pub fn expire_after(&self) -> Option<i64> {
+ self.expire_after
+ }
+
+ pub fn comment(&self) -> &String {
+ &self.comment
+ }
}
pub type FlowInfoManagerRef = Arc<FlowInfoManager>;
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 4a1dcfbf295d..29c832afe595 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -514,6 +514,9 @@ pub fn check_permission(
Statement::ShowCreateTable(stmt) => {
validate_param(&stmt.table_name, query_ctx)?;
}
+ Statement::ShowCreateFlow(stmt) => {
+ validate_param(&stmt.flow_name, query_ctx)?;
+ }
Statement::CreateExternalTable(stmt) => {
validate_param(&stmt.name, query_ctx)?;
}
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index bbfe34d91e00..8522b5db9bb0 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -239,6 +239,44 @@ impl StatementExecutor {
self.show_create_table(table_name, table_ref, query_ctx)
.await
}
+ Statement::ShowCreateFlow(show) => {
+ let obj_name = &show.flow_name;
+ let (catalog_name, flow_name) = match &obj_name.0[..] {
+ [table] => (query_ctx.current_catalog().to_string(), table.value.clone()),
+ [catalog, table] => (catalog.value.clone(), table.value.clone()),
+ _ => {
+ return InvalidSqlSnafu {
+ err_msg: format!(
+ "expect flow name to be <catalog>.<flow_name> or <flow_name>, actual: {obj_name}",
+ ),
+ }
+ .fail()
+ }
+ };
+
+ let flow_name_val = self
+ .flow_metadata_manager
+ .flow_name_manager()
+ .get(&catalog_name, &flow_name)
+ .await
+ .context(error::TableMetadataManagerSnafu)?
+ .context(error::FlowNotFoundSnafu {
+ flow_name: &flow_name,
+ })?;
+
+ let flow_val = self
+ .flow_metadata_manager
+ .flow_info_manager()
+ .get(flow_name_val.flow_id())
+ .await
+ .context(error::TableMetadataManagerSnafu)?
+ .context(error::FlowNotFoundSnafu {
+ flow_name: &flow_name,
+ })?;
+
+ self.show_create_flow(obj_name.clone(), flow_val, query_ctx)
+ .await
+ }
Statement::SetVariables(set_var) => {
let var_name = set_var.variable.to_string().to_uppercase();
match var_name.as_str() {
diff --git a/src/operator/src/statement/show.rs b/src/operator/src/statement/show.rs
index 818734754b89..b1d0bd85a5c5 100644
--- a/src/operator/src/statement/show.rs
+++ b/src/operator/src/statement/show.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use common_meta::key::flow::flow_info::FlowInfoValue;
use common_query::Output;
use common_telemetry::tracing;
use partition::manager::PartitionInfo;
@@ -23,6 +24,7 @@ use sql::statements::create::Partitions;
use sql::statements::show::{
ShowColumns, ShowDatabases, ShowIndex, ShowKind, ShowTables, ShowVariables,
};
+use sqlparser::ast::ObjectName;
use table::metadata::TableType;
use table::table_name::TableName;
use table::TableRef;
@@ -105,6 +107,17 @@ impl StatementExecutor {
.context(error::ExecuteStatementSnafu)
}
+ #[tracing::instrument(skip_all)]
+ pub async fn show_create_flow(
+ &self,
+ flow_name: ObjectName,
+ flow_val: FlowInfoValue,
+ query_ctx: QueryContextRef,
+ ) -> Result<Output> {
+ query::sql::show_create_flow(flow_name, flow_val, query_ctx)
+ .context(error::ExecuteStatementSnafu)
+ }
+
#[tracing::instrument(skip_all)]
pub fn show_variable(&self, stmt: ShowVariables, query_ctx: QueryContextRef) -> Result<Output> {
query::sql::show_variable(stmt, query_ctx).context(error::ExecuteStatementSnafu)
diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs
index bae83acf4c66..038e26572df9 100644
--- a/src/query/src/sql.rs
+++ b/src/query/src/sql.rs
@@ -31,6 +31,7 @@ use common_datasource::file_format::{infer_schemas, FileFormat, Format};
use common_datasource::lister::{Lister, Source};
use common_datasource::object_store::build_backend;
use common_datasource::util::find_dir_and_filename;
+use common_meta::key::flow::flow_info::FlowInfoValue;
use common_query::prelude::GREPTIME_TIMESTAMP;
use common_query::Output;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
@@ -49,10 +50,13 @@ use regex::Regex;
use session::context::QueryContextRef;
pub use show_create_table::create_table_stmt;
use snafu::{ensure, OptionExt, ResultExt};
-use sql::statements::create::Partitions;
+use sql::ast::Ident;
+use sql::parser::ParserContext;
+use sql::statements::create::{CreateFlow, Partitions};
use sql::statements::show::{
ShowColumns, ShowDatabases, ShowIndex, ShowKind, ShowTables, ShowVariables,
};
+use sqlparser::ast::ObjectName;
use table::requests::{FILE_TABLE_LOCATION_KEY, FILE_TABLE_PATTERN_KEY};
use table::TableRef;
@@ -134,6 +138,13 @@ static SHOW_CREATE_TABLE_OUTPUT_SCHEMA: Lazy<Arc<Schema>> = Lazy::new(|| {
]))
});
+static SHOW_CREATE_FLOW_OUTPUT_SCHEMA: Lazy<Arc<Schema>> = Lazy::new(|| {
+ Arc::new(Schema::new(vec![
+ ColumnSchema::new("Flow", ConcreteDataType::string_datatype(), false),
+ ColumnSchema::new("Create Flow", ConcreteDataType::string_datatype(), false),
+ ]))
+});
+
fn null() -> Expr {
lit(ScalarValue::Null)
}
@@ -606,6 +617,46 @@ pub fn show_create_table(
Ok(Output::new_with_record_batches(records))
}
+pub fn show_create_flow(
+ flow_name: ObjectName,
+ flow_val: FlowInfoValue,
+ query_ctx: QueryContextRef,
+) -> Result<Output> {
+ let mut parser_ctx =
+ ParserContext::new(query_ctx.sql_dialect(), flow_val.raw_sql()).context(error::SqlSnafu)?;
+
+ let query = parser_ctx.parser_query().context(error::SqlSnafu)?;
+
+ let comment = if flow_val.comment().is_empty() {
+ None
+ } else {
+ Some(flow_val.comment().clone())
+ };
+
+ let stmt = CreateFlow {
+ flow_name,
+ sink_table_name: ObjectName(vec![Ident {
+ value: flow_val.sink_table_name().table_name.clone(),
+ quote_style: None,
+ }]),
+ or_replace: true,
+ if_not_exists: true,
+ expire_after: flow_val.expire_after(),
+ comment,
+ query,
+ };
+
+ let sql = format!("{}", stmt);
+ let columns = vec![
+ Arc::new(StringVector::from(vec![flow_val.flow_name().clone()])) as _,
+ Arc::new(StringVector::from(vec![sql])) as _,
+ ];
+ let records = RecordBatches::try_from_columns(SHOW_CREATE_FLOW_OUTPUT_SCHEMA.clone(), columns)
+ .context(error::CreateRecordBatchSnafu)?;
+
+ Ok(Output::new_with_record_batches(records))
+}
+
pub fn describe_table(table: TableRef) -> Result<Output> {
let table_info = table.table_info();
let columns_schemas = table_info.meta.schema.column_schemas();
diff --git a/src/sql/src/error.rs b/src/sql/src/error.rs
index c865e12a8617..ed88a8826a59 100644
--- a/src/sql/src/error.rs
+++ b/src/sql/src/error.rs
@@ -141,6 +141,9 @@ pub enum Error {
#[snafu(display("Invalid table name: {}", name))]
InvalidTableName { name: String },
+ #[snafu(display("Invalid flow name: {}", name))]
+ InvalidFlowName { name: String },
+
#[snafu(display("Invalid default constraint, column: {}", column))]
InvalidDefault {
column: String,
@@ -274,6 +277,7 @@ impl ErrorExt for Error {
| InvalidDatabaseOption { .. }
| ColumnTypeMismatch { .. }
| InvalidTableName { .. }
+ | InvalidFlowName { .. }
| InvalidSqlValue { .. }
| TimestampOverflow { .. }
| InvalidTableOption { .. }
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index 2b90c792dc11..65a12b9ea335 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use snafu::ResultExt;
-use sqlparser::ast::Ident;
+use sqlparser::ast::{Ident, Query};
use sqlparser::dialect::Dialect;
use sqlparser::keywords::Keyword;
use sqlparser::parser::{Parser, ParserError, ParserOptions};
@@ -38,6 +38,21 @@ pub struct ParserContext<'a> {
}
impl<'a> ParserContext<'a> {
+ /// Construct a new ParserContext.
+ pub fn new(dialect: &'a dyn Dialect, sql: &'a str) -> Result<ParserContext<'a>> {
+ let parser = Parser::new(dialect)
+ .with_options(ParserOptions::new().with_trailing_commas(true))
+ .try_with_sql(sql)
+ .context(SyntaxSnafu)?;
+
+ Ok(ParserContext { parser, sql })
+ }
+
+ /// Parses parser context to Query.
+ pub fn parser_query(&mut self) -> Result<Box<Query>> {
+ Ok(Box::new(self.parser.parse_query().context(SyntaxSnafu)?))
+ }
+
/// Parses SQL with given dialect
pub fn create_with_dialect(
sql: &'a str,
@@ -46,11 +61,7 @@ impl<'a> ParserContext<'a> {
) -> Result<Vec<Statement>> {
let mut stmts: Vec<Statement> = Vec::new();
- let parser = Parser::new(dialect)
- .with_options(ParserOptions::new().with_trailing_commas(true))
- .try_with_sql(sql)
- .context(SyntaxSnafu)?;
- let mut parser_ctx = ParserContext { sql, parser };
+ let mut parser_ctx = ParserContext::new(dialect, sql)?;
let mut expecting_statement_delimiter = false;
loop {
diff --git a/src/sql/src/parsers/show_parser.rs b/src/sql/src/parsers/show_parser.rs
index 2ca96b9bc4d2..5cf8b5fe3c3d 100644
--- a/src/sql/src/parsers/show_parser.rs
+++ b/src/sql/src/parsers/show_parser.rs
@@ -16,11 +16,13 @@ use snafu::{ensure, ResultExt};
use sqlparser::keywords::Keyword;
use sqlparser::tokenizer::Token;
-use crate::error::{self, InvalidDatabaseNameSnafu, InvalidTableNameSnafu, Result};
+use crate::error::{
+ self, InvalidDatabaseNameSnafu, InvalidFlowNameSnafu, InvalidTableNameSnafu, Result,
+};
use crate::parser::ParserContext;
use crate::statements::show::{
- ShowColumns, ShowCreateTable, ShowDatabases, ShowIndex, ShowKind, ShowStatus, ShowTables,
- ShowVariables,
+ ShowColumns, ShowCreateFlow, ShowCreateTable, ShowDatabases, ShowIndex, ShowKind, ShowStatus,
+ ShowTables, ShowVariables,
};
use crate::statements::statement::Statement;
@@ -62,6 +64,8 @@ impl<'a> ParserContext<'a> {
} else if self.consume_token("CREATE") {
if self.consume_token("TABLE") {
self.parse_show_create_table()
+ } else if self.consume_token("FLOW") {
+ self.parse_show_create_flow()
} else {
self.unsupported(self.peek_token_as_string())
}
@@ -109,6 +113,24 @@ impl<'a> ParserContext<'a> {
Ok(Statement::ShowCreateTable(ShowCreateTable { table_name }))
}
+ fn parse_show_create_flow(&mut self) -> Result<Statement> {
+ let raw_flow_name = self
+ .parse_object_name()
+ .with_context(|_| error::UnexpectedSnafu {
+ sql: self.sql,
+ expected: "a flow name",
+ actual: self.peek_token_as_string(),
+ })?;
+ let flow_name = Self::canonicalize_object_name(raw_flow_name);
+ ensure!(
+ !flow_name.0.is_empty(),
+ InvalidFlowNameSnafu {
+ name: flow_name.to_string(),
+ }
+ );
+ Ok(Statement::ShowCreateFlow(ShowCreateFlow { flow_name }))
+ }
+
fn parse_show_table_name(&mut self) -> Result<String> {
self.parser.next_token();
let table_name = self
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index 9bcf65c67578..6d43aebca713 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -269,17 +269,17 @@ impl Display for CreateFlow {
if self.or_replace {
write!(f, "OR REPLACE ")?;
}
- write!(f, "TASK ")?;
+ write!(f, "FLOW ")?;
if self.if_not_exists {
write!(f, "IF NOT EXISTS ")?;
}
- write!(f, "{} ", &self.flow_name)?;
- write!(f, "OUTPUT AS {} ", &self.sink_table_name)?;
+ writeln!(f, "{}", &self.flow_name)?;
+ writeln!(f, "SINK TO {}", &self.sink_table_name)?;
if let Some(expire_after) = &self.expire_after {
- write!(f, "EXPIRE AFTER {} ", expire_after)?;
+ writeln!(f, "EXPIRE AFTER {} ", expire_after)?;
}
if let Some(comment) = &self.comment {
- write!(f, "COMMENT '{}' ", comment)?;
+ writeln!(f, "COMMENT '{}'", comment)?;
}
write!(f, "AS {}", &self.query)
}
@@ -604,4 +604,37 @@ WITH(
}
}
}
+
+ #[test]
+ fn test_display_create_flow() {
+ let sql = r"CREATE FLOW filter_numbers
+ SINK TO out_num_cnt
+ AS SELECT number FROM numbers_input where number > 10;";
+ let result =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, result.len());
+
+ match &result[0] {
+ Statement::CreateFlow(c) => {
+ let new_sql = format!("\n{}", c);
+ assert_eq!(
+ r#"
+CREATE FLOW filter_numbers
+SINK TO out_num_cnt
+AS SELECT number FROM numbers_input WHERE number > 10"#,
+ &new_sql
+ );
+
+ let new_result = ParserContext::create_with_dialect(
+ &new_sql,
+ &GreptimeDbDialect {},
+ ParseOptions::default(),
+ )
+ .unwrap();
+ assert_eq!(result, new_result);
+ }
+ _ => unreachable!(),
+ }
+ }
}
diff --git a/src/sql/src/statements/show.rs b/src/sql/src/statements/show.rs
index 2e4a76c145e0..90dad65eaded 100644
--- a/src/sql/src/statements/show.rs
+++ b/src/sql/src/statements/show.rs
@@ -132,6 +132,19 @@ impl Display for ShowCreateTable {
}
}
+/// SQL structure for `SHOW CREATE FLOW`.
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+pub struct ShowCreateFlow {
+ pub flow_name: ObjectName,
+}
+
+impl Display for ShowCreateFlow {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let flow_name = &self.flow_name;
+ write!(f, "SHOW CREATE FLOW {flow_name}")
+ }
+}
+
/// SQL structure for `SHOW VARIABLES xxx`.
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct ShowVariables {
@@ -241,6 +254,35 @@ mod tests {
.is_err());
}
+ #[test]
+ pub fn test_show_create_flow() {
+ let sql = "SHOW CREATE FLOW test";
+ let stmts: Vec<Statement> =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::ShowCreateFlow { .. });
+ match &stmts[0] {
+ Statement::ShowCreateFlow(show) => {
+ let flow_name = show.flow_name.to_string();
+ assert_eq!(flow_name, "test");
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+ #[test]
+ pub fn test_show_create_missing_flow() {
+ let sql = "SHOW CREATE FLOW";
+ assert!(ParserContext::create_with_dialect(
+ sql,
+ &GreptimeDbDialect {},
+ ParseOptions::default()
+ )
+ .is_err());
+ }
+
#[test]
fn test_display_show_variables() {
let sql = r"show variables v1;";
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
index a014ecb125e3..579f2a372ad2 100644
--- a/src/sql/src/statements/statement.rs
+++ b/src/sql/src/statements/statement.rs
@@ -31,8 +31,8 @@ use crate::statements::insert::Insert;
use crate::statements::query::Query;
use crate::statements::set_variables::SetVariables;
use crate::statements::show::{
- ShowColumns, ShowCreateTable, ShowDatabases, ShowIndex, ShowKind, ShowStatus, ShowTables,
- ShowVariables,
+ ShowColumns, ShowCreateFlow, ShowCreateTable, ShowDatabases, ShowIndex, ShowKind, ShowStatus,
+ ShowTables, ShowVariables,
};
use crate::statements::tql::Tql;
use crate::statements::truncate::TruncateTable;
@@ -81,6 +81,8 @@ pub enum Statement {
ShowIndex(ShowIndex),
// SHOW CREATE TABLE
ShowCreateTable(ShowCreateTable),
+ // SHOW CREATE FLOW
+ ShowCreateFlow(ShowCreateFlow),
// SHOW STATUS
ShowStatus(ShowStatus),
// DESCRIBE TABLE
@@ -118,6 +120,7 @@ impl Display for Statement {
Statement::ShowColumns(s) => s.fmt(f),
Statement::ShowIndex(s) => s.fmt(f),
Statement::ShowCreateTable(s) => s.fmt(f),
+ Statement::ShowCreateFlow(s) => s.fmt(f),
Statement::ShowStatus(s) => s.fmt(f),
Statement::DescribeTable(s) => s.fmt(f),
Statement::Explain(s) => s.fmt(f),
diff --git a/tests/cases/standalone/show_create_flow.result b/tests/cases/standalone/show_create_flow.result
new file mode 100644
index 000000000000..b09d026b6efe
--- /dev/null
+++ b/tests/cases/standalone/show_create_flow.result
@@ -0,0 +1,41 @@
+CREATE TABLE numbers_input (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY(number),
+ TIME INDEX(ts)
+);
+
+Affected Rows: 0
+
+create table out_num_cnt (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP TIME INDEX);
+
+Affected Rows: 0
+
+CREATE FLOW filter_numbers SINK TO out_num_cnt AS SELECT number FROM numbers_input where number > 10;
+
+Affected Rows: 0
+
+SHOW CREATE FLOW filter_numbers;
+
++----------------+-------------------------------------------------------+
+| Flow | Create Flow |
++----------------+-------------------------------------------------------+
+| filter_numbers | CREATE OR REPLACE FLOW IF NOT EXISTS filter_numbers |
+| | SINK TO out_num_cnt |
+| | AS SELECT number FROM numbers_input WHERE number > 10 |
++----------------+-------------------------------------------------------+
+
+drop flow filter_numbers;
+
+Affected Rows: 0
+
+drop table out_num_cnt;
+
+Affected Rows: 0
+
+drop table numbers_input;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/show_create_flow.sql b/tests/cases/standalone/show_create_flow.sql
new file mode 100644
index 000000000000..d30557f4c404
--- /dev/null
+++ b/tests/cases/standalone/show_create_flow.sql
@@ -0,0 +1,19 @@
+CREATE TABLE numbers_input (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY(number),
+ TIME INDEX(ts)
+);
+create table out_num_cnt (
+ number INT,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP TIME INDEX);
+
+CREATE FLOW filter_numbers SINK TO out_num_cnt AS SELECT number FROM numbers_input where number > 10;
+
+SHOW CREATE FLOW filter_numbers;
+
+drop flow filter_numbers;
+
+drop table out_num_cnt;
+
+drop table numbers_input;
|
feat
|
Implement SHOW CREATE FLOW (#4040)
|
048368fd876077ea2c34d53ebf92e409c111e31e
|
2024-05-27 14:56:50
|
Weny Xu
|
feat: invoke `flush_table` and `compact_table` in fuzz tests (#4045)
| false
|
diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs
index 38cbc6e4ac4e..e5b4bf2faca3 100644
--- a/src/metric-engine/src/engine.rs
+++ b/src/metric-engine/src/engine.rs
@@ -34,6 +34,7 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_recordbatch::SendableRecordBatchStream;
use mito2::engine::MitoEngine;
+use snafu::ResultExt;
use store_api::metadata::RegionMetadataRef;
use store_api::metric_engine_consts::METRIC_ENGINE_NAME;
use store_api::region_engine::{
@@ -44,7 +45,7 @@ use store_api::storage::{RegionId, ScanRequest};
use self::state::MetricEngineState;
use crate::data_region::DataRegion;
-use crate::error::{Result, UnsupportedRegionRequestSnafu};
+use crate::error::{self, Result, UnsupportedRegionRequestSnafu};
use crate::metadata_region::MetadataRegion;
use crate::utils;
@@ -144,10 +145,33 @@ impl RegionEngine for MetricEngine {
.alter_region(region_id, alter, &mut extension_return_value)
.await
}
- RegionRequest::Delete(_)
- | RegionRequest::Flush(_)
- | RegionRequest::Compact(_)
- | RegionRequest::Truncate(_) => UnsupportedRegionRequestSnafu { request }.fail(),
+ RegionRequest::Flush(_) => {
+ if self.inner.is_physical_region(region_id) {
+ self.inner
+ .mito
+ .handle_request(region_id, request)
+ .await
+ .context(error::MitoFlushOperationSnafu)
+ .map(|response| response.affected_rows)
+ } else {
+ UnsupportedRegionRequestSnafu { request }.fail()
+ }
+ }
+ RegionRequest::Compact(_) => {
+ if self.inner.is_physical_region(region_id) {
+ self.inner
+ .mito
+ .handle_request(region_id, request)
+ .await
+ .context(error::MitoFlushOperationSnafu)
+ .map(|response| response.affected_rows)
+ } else {
+ UnsupportedRegionRequestSnafu { request }.fail()
+ }
+ }
+ RegionRequest::Delete(_) | RegionRequest::Truncate(_) => {
+ UnsupportedRegionRequestSnafu { request }.fail()
+ }
RegionRequest::Catchup(ref req) => self.inner.catchup_region(region_id, *req).await,
};
diff --git a/src/metric-engine/src/error.rs b/src/metric-engine/src/error.rs
index 340f4f19bcfa..72e6d7032e0c 100644
--- a/src/metric-engine/src/error.rs
+++ b/src/metric-engine/src/error.rs
@@ -121,6 +121,13 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Mito flush operation fails"))]
+ MitoFlushOperation {
+ source: BoxedError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Mito catchup operation fails"))]
MitoCatchupOperation {
source: BoxedError,
@@ -128,6 +135,13 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Mito compact operation fails"))]
+ MitoCompactOperation {
+ source: BoxedError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Failed to collect record batch stream"))]
CollectRecordBatchStream {
source: common_recordbatch::error::Error,
@@ -275,7 +289,9 @@ impl ErrorExt for Error {
| CloseMitoRegion { source, .. }
| MitoReadOperation { source, .. }
| MitoWriteOperation { source, .. }
- | MitoCatchupOperation { source, .. } => source.status_code(),
+ | MitoCatchupOperation { source, .. }
+ | MitoFlushOperation { source, .. }
+ | MitoCompactOperation { source, .. } => source.status_code(),
CollectRecordBatchStream { source, .. } => source.status_code(),
diff --git a/tests-fuzz/src/ir.rs b/tests-fuzz/src/ir.rs
index a8907de76d86..01d2cd430981 100644
--- a/tests-fuzz/src/ir.rs
+++ b/tests-fuzz/src/ir.rs
@@ -36,6 +36,7 @@ use rand::Rng;
use serde::{Deserialize, Serialize};
use self::insert_expr::{RowValue, RowValues};
+use crate::context::TableContextRef;
use crate::generator::Random;
use crate::impl_random;
use crate::ir::create_expr::ColumnOption;
@@ -442,7 +443,7 @@ pub fn generate_columns<R: Rng + 'static>(
/// Replace Value::Default with the corresponding default value in the rows for comparison.
pub fn replace_default(
rows: &[RowValues],
- create_expr: &CreateTableExpr,
+ table_ctx_ref: &TableContextRef,
insert_expr: &InsertIntoExpr,
) -> Vec<RowValues> {
let index_map: HashMap<usize, usize> = insert_expr
@@ -450,7 +451,7 @@ pub fn replace_default(
.iter()
.enumerate()
.map(|(insert_idx, insert_column)| {
- let create_idx = create_expr
+ let create_idx = table_ctx_ref
.columns
.iter()
.position(|create_column| create_column.name == insert_column.name)
@@ -464,7 +465,7 @@ pub fn replace_default(
let mut new_row = Vec::new();
for (idx, value) in row.iter().enumerate() {
if let RowValue::Default = value {
- let column = &create_expr.columns[index_map[&idx]];
+ let column = &table_ctx_ref.columns[index_map[&idx]];
new_row.push(RowValue::Value(column.default_value().unwrap().clone()));
} else {
new_row.push(value.clone());
diff --git a/tests-fuzz/src/utils.rs b/tests-fuzz/src/utils.rs
index 9156067b253e..7dff25b5285d 100644
--- a/tests-fuzz/src/utils.rs
+++ b/tests-fuzz/src/utils.rs
@@ -20,9 +20,13 @@ pub mod process;
use std::env;
use common_telemetry::info;
+use snafu::ResultExt;
use sqlx::mysql::MySqlPoolOptions;
use sqlx::{MySql, Pool};
+use crate::error::{self, Result};
+use crate::ir::Ident;
+
/// Database connections
pub struct Connections {
pub mysql: Option<Pool<MySql>>,
@@ -83,3 +87,27 @@ pub fn load_unstable_test_env_variables() -> UnstableTestVariables {
root_dir,
}
}
+
+/// Flushes memtable to SST file.
+pub async fn flush_memtable(e: &Pool<MySql>, table_name: &Ident) -> Result<()> {
+ let sql = format!("SELECT flush_table(\"{}\")", table_name);
+ let result = sqlx::query(&sql)
+ .execute(e)
+ .await
+ .context(error::ExecuteQuerySnafu { sql })?;
+ info!("Flush table: {}\n\nResult: {result:?}\n\n", table_name);
+
+ Ok(())
+}
+
+/// Triggers a compaction for table
+pub async fn compact_table(e: &Pool<MySql>, table_name: &Ident) -> Result<()> {
+ let sql = format!("SELECT compact_table(\"{}\")", table_name);
+ let result = sqlx::query(&sql)
+ .execute(e)
+ .await
+ .context(error::ExecuteQuerySnafu { sql })?;
+ info!("Compact table: {}\n\nResult: {result:?}\n\n", table_name);
+
+ Ok(())
+}
diff --git a/tests-fuzz/targets/fuzz_insert.rs b/tests-fuzz/targets/fuzz_insert.rs
index 11d02ea63d5e..73baf5a39377 100644
--- a/tests-fuzz/targets/fuzz_insert.rs
+++ b/tests-fuzz/targets/fuzz_insert.rs
@@ -39,7 +39,7 @@ use tests_fuzz::ir::{
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator;
use tests_fuzz::translator::DslTranslator;
-use tests_fuzz::utils::{init_greptime_connections_via_env, Connections};
+use tests_fuzz::utils::{flush_memtable, init_greptime_connections_via_env, Connections};
use tests_fuzz::validator;
struct FuzzContext {
@@ -120,7 +120,7 @@ async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
.context(error::ExecuteQuerySnafu { sql: &sql })?;
let table_ctx = Arc::new(TableContext::from(&create_expr));
- let insert_expr = generate_insert_expr(input, &mut rng, table_ctx)?;
+ let insert_expr = generate_insert_expr(input, &mut rng, table_ctx.clone())?;
let translator = InsertIntoExprTranslator;
let sql = translator.translate(&insert_expr)?;
let result = ctx
@@ -141,6 +141,10 @@ async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
}
);
+ if rng.gen_bool(0.5) {
+ flush_memtable(&ctx.greptime, &create_expr.table_name).await?;
+ }
+
// Validate inserted rows
// The order of inserted rows are random, so we need to sort the inserted rows by primary keys and time index for comparison
let primary_keys_names = create_expr
@@ -178,7 +182,7 @@ async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
column_list, create_expr.table_name, primary_keys_column_list
);
let fetched_rows = validator::row::fetch_values(&ctx.greptime, select_sql.as_str()).await?;
- let mut expected_rows = replace_default(&insert_expr.values_list, &create_expr, &insert_expr);
+ let mut expected_rows = replace_default(&insert_expr.values_list, &table_ctx, &insert_expr);
expected_rows.sort_by(|a, b| {
let a_keys: Vec<_> = primary_keys_idxs_in_insert_expr
.iter()
diff --git a/tests-fuzz/targets/fuzz_insert_logical_table.rs b/tests-fuzz/targets/fuzz_insert_logical_table.rs
index 0c66bafdc01d..fe7a25c6761d 100644
--- a/tests-fuzz/targets/fuzz_insert_logical_table.rs
+++ b/tests-fuzz/targets/fuzz_insert_logical_table.rs
@@ -14,6 +14,7 @@
#![no_main]
+use std::collections::HashMap;
use std::sync::Arc;
use common_telemetry::info;
@@ -40,9 +41,10 @@ use tests_fuzz::ir::{
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator;
use tests_fuzz::translator::DslTranslator;
-use tests_fuzz::utils::{init_greptime_connections_via_env, Connections};
+use tests_fuzz::utils::{
+ compact_table, flush_memtable, init_greptime_connections_via_env, Connections,
+};
use tests_fuzz::validator;
-
struct FuzzContext {
greptime: Pool<MySql>,
}
@@ -56,15 +58,15 @@ impl FuzzContext {
#[derive(Copy, Clone, Debug)]
struct FuzzInput {
seed: u64,
- rows: usize,
+ tables: usize,
}
impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
let mut rng = ChaChaRng::seed_from_u64(seed);
- let rows = rng.gen_range(1..4096);
- Ok(FuzzInput { rows, seed })
+ let tables = rng.gen_range(1..256);
+ Ok(FuzzInput { tables, seed })
}
}
@@ -102,26 +104,26 @@ fn generate_create_logical_table_expr<R: Rng + 'static>(
}
fn generate_insert_expr<R: Rng + 'static>(
- input: FuzzInput,
+ rows: usize,
rng: &mut R,
table_ctx: TableContextRef,
) -> Result<InsertIntoExpr> {
let insert_generator = InsertExprGeneratorBuilder::default()
.omit_column_list(false)
.table_ctx(table_ctx)
- .rows(input.rows)
+ .rows(rows)
.value_generator(Box::new(generate_random_value_for_mysql))
.build()
.unwrap();
insert_generator.generate(rng)
}
-async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
- info!("input: {input:?}");
- let mut rng = ChaChaRng::seed_from_u64(input.seed);
-
+async fn create_physical_table<R: Rng + 'static>(
+ ctx: &FuzzContext,
+ rng: &mut R,
+) -> Result<TableContextRef> {
// Create a physical table and a logical table on top of it
- let create_physical_table_expr = generate_create_physical_table_expr(&mut rng).unwrap();
+ let create_physical_table_expr = generate_create_physical_table_expr(rng).unwrap();
let translator = CreateTableExprTranslator;
let sql = translator.translate(&create_physical_table_expr)?;
let result = sqlx::query(&sql)
@@ -130,43 +132,17 @@ async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
.context(error::ExecuteQuerySnafu { sql: &sql })?;
info!("Create physical table: {sql}, result: {result:?}");
- let physical_table_ctx = Arc::new(TableContext::from(&create_physical_table_expr));
-
- let create_logical_table_expr =
- generate_create_logical_table_expr(physical_table_ctx, &mut rng).unwrap();
- let sql = translator.translate(&create_logical_table_expr)?;
- let result = sqlx::query(&sql)
- .execute(&ctx.greptime)
- .await
- .context(error::ExecuteQuerySnafu { sql: &sql })?;
- info!("Create logical table: {sql}, result: {result:?}");
-
- let logical_table_ctx = Arc::new(TableContext::from(&create_logical_table_expr));
-
- let insert_expr = generate_insert_expr(input, &mut rng, logical_table_ctx)?;
- let translator = InsertIntoExprTranslator;
- let sql = translator.translate(&insert_expr)?;
- let result = ctx
- .greptime
- // unprepared query, see <https://github.com/GreptimeTeam/greptimedb/issues/3500>
- .execute(sql.as_str())
- .await
- .context(error::ExecuteQuerySnafu { sql: &sql })?;
-
- ensure!(
- result.rows_affected() == input.rows as u64,
- error::AssertSnafu {
- reason: format!(
- "expected rows affected: {}, actual: {}",
- input.rows,
- result.rows_affected(),
- )
- }
- );
+ Ok(Arc::new(TableContext::from(&create_physical_table_expr)))
+}
+async fn validate_values(
+ ctx: &FuzzContext,
+ logical_table_ctx: TableContextRef,
+ insert_expr: &InsertIntoExpr,
+) -> Result<()> {
// Validate inserted rows
// The order of inserted rows are random, so we need to sort the inserted rows by primary keys and time index for comparison
- let primary_keys_names = create_logical_table_expr
+ let primary_keys_names = logical_table_ctx
.columns
.iter()
.filter(|c| c.is_primary_key() || c.is_time_index())
@@ -198,14 +174,11 @@ async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
let select_sql = format!(
"SELECT {} FROM {} ORDER BY {}",
- column_list, create_logical_table_expr.table_name, primary_keys_column_list
+ column_list, logical_table_ctx.name, primary_keys_column_list
);
let fetched_rows = validator::row::fetch_values(&ctx.greptime, select_sql.as_str()).await?;
- let mut expected_rows = replace_default(
- &insert_expr.values_list,
- &create_logical_table_expr,
- &insert_expr,
- );
+ let mut expected_rows =
+ replace_default(&insert_expr.values_list, &logical_table_ctx, insert_expr);
expected_rows.sort_by(|a, b| {
let a_keys: Vec<_> = primary_keys_idxs_in_insert_expr
.iter()
@@ -225,26 +198,95 @@ async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
});
validator::row::assert_eq::<MySql>(&insert_expr.columns, &fetched_rows, &expected_rows)?;
- // Clean up logical table
- let sql = format!("DROP TABLE {}", create_logical_table_expr.table_name);
- let result = sqlx::query(&sql)
- .execute(&ctx.greptime)
+ Ok(())
+}
+
+async fn insert_values<R: Rng + 'static>(
+ ctx: &FuzzContext,
+ rng: &mut R,
+ logical_table_ctx: TableContextRef,
+) -> Result<InsertIntoExpr> {
+ let rows = rng.gen_range(1..2048);
+ let insert_expr = generate_insert_expr(rows, rng, logical_table_ctx.clone())?;
+ let translator = InsertIntoExprTranslator;
+ let sql = translator.translate(&insert_expr)?;
+ let result = ctx
+ .greptime
+ // unprepared query, see <https://github.com/GreptimeTeam/greptimedb/issues/3500>
+ .execute(sql.as_str())
.await
.context(error::ExecuteQuerySnafu { sql: &sql })?;
- info!(
- "Drop table: {}, result: {result:?}",
- create_logical_table_expr.table_name
+
+ ensure!(
+ result.rows_affected() == rows as u64,
+ error::AssertSnafu {
+ reason: format!(
+ "expected rows affected: {}, actual: {}",
+ rows,
+ result.rows_affected(),
+ )
+ }
);
+ Ok(insert_expr)
+}
+
+async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
+ info!("input: {input:?}");
+ let mut rng = ChaChaRng::seed_from_u64(input.seed);
+ let physical_table_ctx = create_physical_table(&ctx, &mut rng).await?;
+
+ let mut tables = HashMap::with_capacity(input.tables);
+
+ // Create logical tables
+ for _ in 0..input.tables {
+ let translator = CreateTableExprTranslator;
+ let create_logical_table_expr =
+ generate_create_logical_table_expr(physical_table_ctx.clone(), &mut rng).unwrap();
+ if tables.contains_key(&create_logical_table_expr.table_name) {
+ // Ignores same name logical table.
+ continue;
+ }
+ let sql = translator.translate(&create_logical_table_expr)?;
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql: &sql })?;
+ info!("Create logical table: {sql}, result: {result:?}");
+ let logical_table_ctx = Arc::new(TableContext::from(&create_logical_table_expr));
+
+ let insert_expr = insert_values(&ctx, &mut rng, logical_table_ctx.clone()).await?;
+ validate_values(&ctx, logical_table_ctx.clone(), &insert_expr).await?;
+ tables.insert(logical_table_ctx.name.clone(), logical_table_ctx.clone());
+ if rng.gen_bool(0.1) {
+ flush_memtable(&ctx.greptime, &physical_table_ctx.name).await?;
+ validate_values(&ctx, logical_table_ctx.clone(), &insert_expr).await?;
+ }
+ if rng.gen_bool(0.1) {
+ compact_table(&ctx.greptime, &physical_table_ctx.name).await?;
+ validate_values(&ctx, logical_table_ctx.clone(), &insert_expr).await?;
+ }
+ }
+
+ // Clean up logical table
+ for (table_name, _) in tables {
+ let sql = format!("DROP TABLE {}", table_name);
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql: &sql })?;
+ info!("Drop table: {}, result: {result:?}", table_name);
+ }
+
// Clean up physical table
- let sql = format!("DROP TABLE {}", create_physical_table_expr.table_name);
+ let sql = format!("DROP TABLE {}", physical_table_ctx.name);
let result = sqlx::query(&sql)
.execute(&ctx.greptime)
.await
.context(error::ExecuteQuerySnafu { sql })?;
info!(
"Drop table: {}, result: {result:?}",
- create_physical_table_expr.table_name
+ physical_table_ctx.name
);
ctx.close().await;
|
feat
|
invoke `flush_table` and `compact_table` in fuzz tests (#4045)
|
4085fc78990b32265875777c29245d1a722f1a24
|
2022-11-27 07:48:39
|
Xuanwo
|
chore: Bump OpenDAL to v0.21.1 (#639)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c4965d9e046b..064d71ec9873 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3661,9 +3661,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "opendal"
-version = "0.20.1"
+version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "63b17b778cf11d10fbaaae4a5a0f82d5c6f527f96a9e4843f4e2dd6cd0dbe580"
+checksum = "8c9be1e30ca12b989107a5ee5bb75468a7f538059e43255ccd4743089b42aeeb"
dependencies = [
"anyhow",
"async-compat",
@@ -3687,7 +3687,6 @@ dependencies = [
"reqwest",
"serde",
"serde_json",
- "thiserror",
"time 0.3.14",
"tokio",
"tracing",
@@ -4727,9 +4726,9 @@ dependencies = [
[[package]]
name = "reqsign"
-version = "0.6.4"
+version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e22524be78041476bf8673f2720fa1000f34432b384d9ad5846b024569a4b150"
+checksum = "d34ea360414ee77ddab3a8360a0c241fc77ab5e27892dcde1d2cfcc29d4e0f55"
dependencies = [
"anyhow",
"backon",
diff --git a/src/catalog/Cargo.toml b/src/catalog/Cargo.toml
index dddf607078e0..5336ef98e41b 100644
--- a/src/catalog/Cargo.toml
+++ b/src/catalog/Cargo.toml
@@ -27,7 +27,7 @@ futures = "0.3"
futures-util = "0.3"
lazy_static = "1.4"
meta-client = { path = "../meta-client" }
-opendal = "0.20"
+opendal = "0.21"
regex = "1.6"
serde = "1.0"
serde_json = "1.0"
@@ -40,7 +40,7 @@ tokio = { version = "1.18", features = ["full"] }
chrono = "0.4"
log-store = { path = "../log-store" }
object-store = { path = "../object-store" }
-opendal = "0.20"
+opendal = "0.21"
storage = { path = "../storage" }
mito = { path = "../mito", features = ["test"] }
tempdir = "0.3"
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index 564acc7ba53b..0a2dac59cb7f 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -456,7 +456,7 @@ mod tests {
pub async fn prepare_table_engine() -> (TempDir, TableEngineRef) {
let dir = TempDir::new("system-table-test").unwrap();
let store_dir = dir.path().to_string_lossy();
- let accessor = opendal::services::fs::Builder::default()
+ let accessor = object_store::backend::fs::Builder::default()
.root(&store_dir)
.build()
.unwrap();
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 181fa3f43f23..7a97abcad2c3 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -145,7 +145,7 @@ pub enum Error {
#[snafu(display("Failed to init backend, dir: {}, source: {}", dir, source))]
InitBackend {
dir: String,
- source: std::io::Error,
+ source: object_store::Error,
backtrace: Backtrace,
},
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index c85ae8c1729d..7bbaabdc5b53 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -6,7 +6,7 @@ license = "Apache-2.0"
[dependencies]
futures = { version = "0.3" }
-opendal = { version = "0.20", features = ["layers-tracing", "layers-metrics"]}
+opendal = { version = "0.21", features = ["layers-tracing", "layers-metrics"]}
tokio = { version = "1.0", features = ["full"] }
[dev-dependencies]
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index 2be43fa5c703..94155be6fe7c 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub use opendal::io_util::SeekableReader;
+pub use opendal::raw::SeekableReader;
pub use opendal::{
- layers, services, Accessor, Layer, Object, ObjectEntry, ObjectMetadata, ObjectMode,
- ObjectStreamer, Operator as ObjectStore,
+ layers, services, Error, ErrorKind, Layer, Object, ObjectLister, ObjectMetadata, ObjectMode,
+ Operator as ObjectStore,
};
pub mod backend;
pub mod util;
diff --git a/src/object-store/src/util.rs b/src/object-store/src/util.rs
index 01bb9e536045..298069ab3bf3 100644
--- a/src/object-store/src/util.rs
+++ b/src/object-store/src/util.rs
@@ -14,9 +14,9 @@
use futures::TryStreamExt;
-use crate::{ObjectEntry, ObjectStreamer};
+use crate::{Object, ObjectLister};
-pub async fn collect(stream: ObjectStreamer) -> Result<Vec<ObjectEntry>, std::io::Error> {
+pub async fn collect(stream: ObjectLister) -> Result<Vec<Object>, opendal::Error> {
stream.try_collect::<Vec<_>>().await
}
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index 27fa76262b06..58f91e5b88ba 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -17,7 +17,7 @@ use std::env;
use anyhow::Result;
use common_telemetry::logging;
use object_store::backend::{fs, s3};
-use object_store::{util, Object, ObjectMode, ObjectStore, ObjectStreamer};
+use object_store::{util, Object, ObjectLister, ObjectMode, ObjectStore};
use tempdir::TempDir;
async fn test_object_crud(store: &ObjectStore) -> Result<()> {
@@ -61,7 +61,7 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> {
// List objects
let o: Object = store.object("/");
- let obs: ObjectStreamer = o.list().await?;
+ let obs: ObjectLister = o.list().await?;
let objects = util::collect(obs).await?;
assert_eq!(3, objects.len());
@@ -74,7 +74,7 @@ async fn test_object_list(store: &ObjectStore) -> Result<()> {
assert_eq!(1, objects.len());
// Only o2 is exists
- let o2 = &objects[0].clone().into_object();
+ let o2 = &objects[0].clone();
let bs = o2.read().await?;
assert_eq!("Hello, object2!", String::from_utf8(bs)?);
// Delete o2
diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs
index d18054f6fb1e..f4465cb3155b 100644
--- a/src/storage/src/error.rs
+++ b/src/storage/src/error.rs
@@ -48,7 +48,7 @@ pub enum Error {
#[snafu(display("Failed to write columns, source: {}", source))]
FlushIo {
- source: std::io::Error,
+ source: object_store::Error,
backtrace: Backtrace,
},
@@ -62,28 +62,28 @@ pub enum Error {
ReadObject {
path: String,
backtrace: Backtrace,
- source: IoError,
+ source: object_store::Error,
},
#[snafu(display("Fail to write object into path: {}, source: {}", path, source))]
WriteObject {
path: String,
backtrace: Backtrace,
- source: IoError,
+ source: object_store::Error,
},
#[snafu(display("Fail to delete object from path: {}, source: {}", path, source))]
DeleteObject {
path: String,
backtrace: Backtrace,
- source: IoError,
+ source: object_store::Error,
},
#[snafu(display("Fail to list objects in path: {}, source: {}", path, source))]
ListObjects {
path: String,
backtrace: Backtrace,
- source: IoError,
+ source: object_store::Error,
},
#[snafu(display("Fail to create str from bytes, source: {}", source))]
@@ -457,7 +457,14 @@ mod tests {
))
}
- let error = throw_io_error().context(FlushIoSnafu).err().unwrap();
+ let error = throw_io_error()
+ .map_err(|err| {
+ object_store::Error::new(object_store::ErrorKind::Unexpected, "writer close failed")
+ .set_source(err)
+ })
+ .context(FlushIoSnafu)
+ .err()
+ .unwrap();
assert_eq!(StatusCode::StorageUnavailable, error.status_code());
assert!(error.backtrace_opt().is_some());
}
diff --git a/src/storage/src/manifest/storage.rs b/src/storage/src/manifest/storage.rs
index 27c924e56e19..744f97a6eb21 100644
--- a/src/storage/src/manifest/storage.rs
+++ b/src/storage/src/manifest/storage.rs
@@ -19,7 +19,7 @@ use async_trait::async_trait;
use common_telemetry::logging;
use futures::TryStreamExt;
use lazy_static::lazy_static;
-use object_store::{util, ObjectEntry, ObjectStore};
+use object_store::{util, Object, ObjectStore};
use regex::Regex;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
@@ -63,7 +63,7 @@ pub fn is_delta_file(file_name: &str) -> bool {
}
pub struct ObjectStoreLogIterator {
- iter: Box<dyn Iterator<Item = (ManifestVersion, ObjectEntry)> + Send + Sync>,
+ iter: Box<dyn Iterator<Item = (ManifestVersion, Object)> + Send + Sync>,
}
#[async_trait]
@@ -72,8 +72,7 @@ impl LogIterator for ObjectStoreLogIterator {
async fn next_log(&mut self) -> Result<Option<(ManifestVersion, Vec<u8>)>> {
match self.iter.next() {
- Some((v, e)) => {
- let object = e.into_object();
+ Some((v, object)) => {
let bytes = object.read().await.context(ReadObjectSnafu {
path: object.path(),
})?;
@@ -156,7 +155,7 @@ impl ManifestLogStorage for ManifestObjectStore {
.await
.context(ListObjectsSnafu { path: &self.path })?;
- let mut entries: Vec<(ManifestVersion, ObjectEntry)> = streamer
+ let mut entries: Vec<(ManifestVersion, Object)> = streamer
.try_filter_map(|e| async move {
let file_name = e.name();
if is_delta_file(file_name) {
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index 1864cd6bcbf9..1244582b69a4 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -122,9 +122,19 @@ impl<'a> ParquetWriter<'a> {
sink.close().await.context(error::WriteParquetSnafu)?;
drop(sink);
- writer.close().await.context(error::WriteObjectSnafu {
- path: self.file_path,
- })
+ writer
+ .close()
+ .await
+ .map_err(|err| {
+ object_store::Error::new(
+ object_store::ErrorKind::Unexpected,
+ "writer close failed",
+ )
+ .set_source(err)
+ })
+ .context(error::WriteObjectSnafu {
+ path: self.file_path,
+ })
}
)
.map(|_| ())
|
chore
|
Bump OpenDAL to v0.21.1 (#639)
|
08cc775d7c2742dc7b65b8b521a0593a4908ea52
|
2022-11-16 15:37:17
|
Ruihang Xia
|
chore: remove clean disk job (#543)
| false
|
diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index 951c49c40a67..af5d54e1b4cc 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -41,10 +41,6 @@ jobs:
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- - name: Cleanup disk
- uses: curoky/[email protected]
- with:
- retain: 'rust'
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov
|
chore
|
remove clean disk job (#543)
|
056185eb24e44ad3972ba9cd785dda5f16737791
|
2022-06-20 11:39:31
|
evenyag
|
feat(storage): Implement snapshot scan for region (#46)
| false
|
diff --git a/src/common/recordbatch/src/recordbatch.rs b/src/common/recordbatch/src/recordbatch.rs
index 8fcf58fdef9f..8b6bb964259d 100644
--- a/src/common/recordbatch/src/recordbatch.rs
+++ b/src/common/recordbatch/src/recordbatch.rs
@@ -1,14 +1,12 @@
-use std::sync::Arc;
-
use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
-use datatypes::schema::Schema;
+use datatypes::schema::SchemaRef;
use datatypes::vectors::Helper;
use serde::ser::{Error, SerializeStruct};
use serde::{Serialize, Serializer};
#[derive(Clone, Debug, PartialEq)]
pub struct RecordBatch {
- pub schema: Arc<Schema>,
+ pub schema: SchemaRef,
pub df_recordbatch: DfRecordBatch,
}
@@ -35,10 +33,13 @@ impl Serialize for RecordBatch {
#[cfg(test)]
mod tests {
+ use std::sync::Arc;
+
use arrow::array::UInt32Array;
use arrow::datatypes::{DataType, Field, Schema as ArrowSchema};
use datafusion_common::field_util::SchemaExt;
use datafusion_common::record_batch::RecordBatch as DfRecordBatch;
+ use datatypes::schema::Schema;
use super::*;
diff --git a/src/datatypes/src/vectors/builder.rs b/src/datatypes/src/vectors/builder.rs
index fc2c2bf87fb3..ce8318e6b1a2 100644
--- a/src/datatypes/src/vectors/builder.rs
+++ b/src/datatypes/src/vectors/builder.rs
@@ -5,8 +5,8 @@ use crate::scalars::ScalarVectorBuilder;
use crate::value::Value;
use crate::vectors::{
BinaryVectorBuilder, BooleanVectorBuilder, Float32VectorBuilder, Float64VectorBuilder,
- Int16VectorBuilder, Int32VectorBuilder, Int64VectorBuilder, Int8VectorBuilder, NullVector,
- StringVectorBuilder, UInt16VectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder,
+ Int16VectorBuilder, Int32VectorBuilder, Int64VectorBuilder, Int8VectorBuilder, MutableVector,
+ NullVector, StringVectorBuilder, UInt16VectorBuilder, UInt32VectorBuilder, UInt64VectorBuilder,
UInt8VectorBuilder, VectorRef,
};
@@ -81,13 +81,32 @@ impl VectorBuilder {
}
}
+ pub fn data_type(&self) -> ConcreteDataType {
+ match self {
+ VectorBuilder::Null(_) => ConcreteDataType::null_datatype(),
+ VectorBuilder::Boolean(b) => b.data_type(),
+ VectorBuilder::UInt8(b) => b.data_type(),
+ VectorBuilder::UInt16(b) => b.data_type(),
+ VectorBuilder::UInt32(b) => b.data_type(),
+ VectorBuilder::UInt64(b) => b.data_type(),
+ VectorBuilder::Int8(b) => b.data_type(),
+ VectorBuilder::Int16(b) => b.data_type(),
+ VectorBuilder::Int32(b) => b.data_type(),
+ VectorBuilder::Int64(b) => b.data_type(),
+ VectorBuilder::Float32(b) => b.data_type(),
+ VectorBuilder::Float64(b) => b.data_type(),
+ VectorBuilder::String(b) => b.data_type(),
+ VectorBuilder::Binary(b) => b.data_type(),
+ }
+ }
+
pub fn push(&mut self, value: &Value) {
if value.is_null() {
self.push_null();
return;
}
- match (self, value) {
+ match (&mut *self, value) {
(VectorBuilder::Boolean(b), Value::Boolean(v)) => b.push(Some(*v)),
(VectorBuilder::UInt8(b), Value::UInt8(v)) => b.push(Some(*v)),
(VectorBuilder::UInt16(b), Value::UInt16(v)) => b.push(Some(*v)),
@@ -101,7 +120,11 @@ impl VectorBuilder {
(VectorBuilder::Float64(b), Value::Float64(v)) => b.push(Some(v.into_inner())),
(VectorBuilder::String(b), Value::String(v)) => b.push(Some(v.as_utf8())),
(VectorBuilder::Binary(b), Value::Binary(v)) => b.push(Some(v)),
- _ => panic!("Value {:?} does not match builder type", value),
+ _ => panic!(
+ "Value {:?} does not match builder type {:?}",
+ value,
+ self.data_type()
+ ),
}
}
@@ -152,7 +175,10 @@ mod tests {
macro_rules! impl_integer_builder_test {
($Type: ident, $datatype: ident) => {
- let mut builder = VectorBuilder::with_capacity(ConcreteDataType::$datatype(), 10);
+ let data_type = ConcreteDataType::$datatype();
+ let mut builder = VectorBuilder::with_capacity(data_type.clone(), 10);
+ assert_eq!(data_type, builder.data_type());
+
for i in 0..10 {
builder.push(&Value::$Type(i));
}
@@ -175,6 +201,7 @@ mod tests {
#[test]
fn test_null_vector_builder() {
let mut builder = VectorBuilder::new(ConcreteDataType::null_datatype());
+ assert_eq!(ConcreteDataType::null_datatype(), builder.data_type());
builder.push(&Value::Null);
let vector = builder.finish();
assert!(vector.is_null(0));
@@ -194,7 +221,10 @@ mod tests {
#[test]
fn test_float_vector_builder() {
- let mut builder = VectorBuilder::new(ConcreteDataType::float32_datatype());
+ let data_type = ConcreteDataType::float32_datatype();
+ let mut builder = VectorBuilder::new(data_type.clone());
+ assert_eq!(data_type, builder.data_type());
+
builder.push(&Value::Float32(OrderedFloat(1.0)));
let vector = builder.finish();
assert_eq!(Value::Float32(OrderedFloat(1.0)), vector.get(0));
@@ -207,8 +237,10 @@ mod tests {
#[test]
fn test_binary_vector_builder() {
+ let data_type = ConcreteDataType::binary_datatype();
let hello: &[u8] = b"hello";
- let mut builder = VectorBuilder::new(ConcreteDataType::binary_datatype());
+ let mut builder = VectorBuilder::new(data_type.clone());
+ assert_eq!(data_type, builder.data_type());
builder.push(&Value::Binary(hello.into()));
let vector = builder.finish();
assert_eq!(Value::Binary(hello.into()), vector.get(0));
@@ -216,8 +248,10 @@ mod tests {
#[test]
fn test_string_vector_builder() {
+ let data_type = ConcreteDataType::string_datatype();
let hello = "hello";
- let mut builder = VectorBuilder::new(ConcreteDataType::string_datatype());
+ let mut builder = VectorBuilder::new(data_type.clone());
+ assert_eq!(data_type, builder.data_type());
builder.push(&Value::String(hello.into()));
let vector = builder.finish();
assert_eq!(Value::String(hello.into()), vector.get(0));
diff --git a/src/storage/src/chunk.rs b/src/storage/src/chunk.rs
new file mode 100644
index 000000000000..74ad5c390caa
--- /dev/null
+++ b/src/storage/src/chunk.rs
@@ -0,0 +1,41 @@
+use async_trait::async_trait;
+use store_api::storage::{Chunk, ChunkReader, SchemaRef};
+
+use crate::error::{Error, Result};
+use crate::memtable::BatchIteratorPtr;
+
+pub struct ChunkReaderImpl {
+ schema: SchemaRef,
+ // Now we only read data from one memtable, so we just holds the memtable iterator here.
+ iter: BatchIteratorPtr,
+}
+
+#[async_trait]
+impl ChunkReader for ChunkReaderImpl {
+ type Error = Error;
+
+ fn schema(&self) -> &SchemaRef {
+ &self.schema
+ }
+
+ async fn next_chunk(&mut self) -> Result<Option<Chunk>> {
+ let mut batch = match self.iter.next()? {
+ Some(b) => b,
+ None => return Ok(None),
+ };
+
+ // TODO(yingwen): Check schema, now we assumes the schema is the same as key columns
+ // combine with value columns.
+ let mut columns = Vec::with_capacity(batch.keys.len() + batch.values.len());
+ columns.append(&mut batch.keys);
+ columns.append(&mut batch.values);
+
+ Ok(Some(Chunk::new(columns)))
+ }
+}
+
+impl ChunkReaderImpl {
+ pub fn new(schema: SchemaRef, iter: BatchIteratorPtr) -> ChunkReaderImpl {
+ ChunkReaderImpl { schema, iter }
+ }
+}
diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs
index e0e88d67a75e..9fd1437c309c 100644
--- a/src/storage/src/lib.rs
+++ b/src/storage/src/lib.rs
@@ -1,11 +1,11 @@
//! Storage engine implementation.
+mod chunk;
mod engine;
mod error;
pub mod memtable;
pub mod metadata;
mod region;
-mod region_writer;
mod snapshot;
pub mod sync;
mod version;
diff --git a/src/storage/src/memtable.rs b/src/storage/src/memtable.rs
index d828398adf9f..6e77c221e212 100644
--- a/src/storage/src/memtable.rs
+++ b/src/storage/src/memtable.rs
@@ -9,7 +9,7 @@ use std::sync::Arc;
use datatypes::vectors::{UInt64Vector, UInt8Vector, VectorRef};
use snafu::Snafu;
-use store_api::storage::{SequenceNumber, ValueType};
+use store_api::storage::{consts, SequenceNumber, ValueType};
use crate::error::Result;
use crate::memtable::btree::BTreeMemtable;
@@ -41,11 +41,17 @@ pub type MemtableRef = Arc<dyn Memtable>;
pub struct IterContext {
/// The suggested batch size of the iterator.
pub batch_size: usize,
+ /// Max visible sequence (inclusive).
+ pub visible_sequence: SequenceNumber,
}
impl Default for IterContext {
fn default() -> Self {
- Self { batch_size: 256 }
+ Self {
+ batch_size: consts::READ_BATCH_SIZE,
+ // All data in memory is visible by default.
+ visible_sequence: SequenceNumber::MAX,
+ }
}
}
diff --git a/src/storage/src/memtable/btree.rs b/src/storage/src/memtable/btree.rs
index 4d3464a4d802..7a2bcb76bdbc 100644
--- a/src/storage/src/memtable/btree.rs
+++ b/src/storage/src/memtable/btree.rs
@@ -100,7 +100,7 @@ impl BTreeIterator {
} else {
map.range(..)
};
- let iter = MapIterWrapper::new(iter);
+ let iter = MapIterWrapper::new(iter, self.ctx.visible_sequence);
let mut keys = Vec::with_capacity(self.ctx.batch_size);
let mut sequences = UInt64VectorBuilder::with_capacity(self.ctx.batch_size);
@@ -116,13 +116,26 @@ impl BTreeIterator {
if keys.is_empty() {
return None;
}
- self.last_key = keys.last().map(|k| (*k).clone());
+ self.last_key = keys.last().map(|k| {
+ let mut last_key = (*k).clone();
+ last_key.reset_for_seek();
+ last_key
+ });
+
+ let key_data_types = self
+ .schema
+ .row_key_columns()
+ .map(|column_meta| column_meta.desc.data_type.clone());
+ let value_data_types = self
+ .schema
+ .value_columns()
+ .map(|column_meta| column_meta.desc.data_type.clone());
Some(Batch {
- keys: rows_to_vectors(keys.as_slice()),
+ keys: rows_to_vectors(key_data_types, keys.as_slice()),
sequences: sequences.finish(),
value_types: value_types.finish(),
- values: rows_to_vectors(values.as_slice()),
+ values: rows_to_vectors(value_data_types, values.as_slice()),
})
}
}
@@ -131,24 +144,37 @@ impl BTreeIterator {
struct MapIterWrapper<'a, InnerKey, RowValue> {
iter: btree_map::Range<'a, InnerKey, RowValue>,
prev_key: Option<InnerKey>,
+ visible_sequence: SequenceNumber,
}
impl<'a> MapIterWrapper<'a, InnerKey, RowValue> {
fn new(
iter: btree_map::Range<'a, InnerKey, RowValue>,
+ visible_sequence: SequenceNumber,
) -> MapIterWrapper<'a, InnerKey, RowValue> {
MapIterWrapper {
iter,
prev_key: None,
+ visible_sequence,
}
}
+
+ fn next_visible_entry(&mut self) -> Option<(&'a InnerKey, &'a RowValue)> {
+ for (k, v) in self.iter.by_ref() {
+ if k.is_visible(self.visible_sequence) {
+ return Some((k, v));
+ }
+ }
+
+ None
+ }
}
impl<'a> Iterator for MapIterWrapper<'a, InnerKey, RowValue> {
type Item = (&'a InnerKey, &'a RowValue);
fn next(&mut self) -> Option<(&'a InnerKey, &'a RowValue)> {
- let (mut current_key, mut current_value) = self.iter.next()?;
+ let (mut current_key, mut current_value) = self.next_visible_entry()?;
if self.prev_key.is_none() {
self.prev_key = Some(current_key.clone());
return Some((current_key, current_value));
@@ -156,7 +182,7 @@ impl<'a> Iterator for MapIterWrapper<'a, InnerKey, RowValue> {
let prev_key = self.prev_key.take().unwrap();
while prev_key.is_row_key_equal(current_key) {
- if let Some((next_key, next_value)) = self.iter.next() {
+ if let Some((next_key, next_value)) = self.next_visible_entry() {
(current_key, current_value) = (next_key, next_value);
} else {
return None;
@@ -256,9 +282,26 @@ impl PartialOrd for InnerKey {
}
impl InnerKey {
+ #[inline]
fn is_row_key_equal(&self, other: &InnerKey) -> bool {
self.row_key == other.row_key
}
+
+ #[inline]
+ fn is_visible(&self, sequence: SequenceNumber) -> bool {
+ self.sequence <= sequence
+ }
+
+ /// Reset the `InnerKey` so that we can use it to seek next key that
+ /// has different row key.
+ fn reset_for_seek(&mut self) {
+ // sequence, index_in_batch, value_type are ordered in desc order, so
+ // we can represent the last inner key with same row key by setting them
+ // to zero (Minimum value).
+ self.sequence = 0;
+ self.index_in_batch = 0;
+ self.value_type = ValueType::min_type();
+ }
}
#[derive(Clone, Debug)]
@@ -300,7 +343,10 @@ impl<'a> RowsProvider for &'a [&RowValue] {
}
}
-fn rows_to_vectors<T: RowsProvider>(provider: T) -> Vec<VectorRef> {
+fn rows_to_vectors<I: Iterator<Item = ConcreteDataType>, T: RowsProvider>(
+ data_types: I,
+ provider: T,
+) -> Vec<VectorRef> {
if provider.is_empty() {
return Vec::new();
}
@@ -308,8 +354,8 @@ fn rows_to_vectors<T: RowsProvider>(provider: T) -> Vec<VectorRef> {
let column_num = provider.column_num();
let row_num = provider.row_num();
let mut builders = Vec::with_capacity(column_num);
- for v in provider.row_by_index(0) {
- builders.push(VectorBuilder::with_capacity(v.data_type(), row_num));
+ for data_type in data_types {
+ builders.push(VectorBuilder::with_capacity(data_type, row_num));
}
let mut vectors = Vec::with_capacity(column_num);
diff --git a/src/storage/src/memtable/tests.rs b/src/storage/src/memtable/tests.rs
index 13349a9449b2..81a38947dcd8 100644
--- a/src/storage/src/memtable/tests.rs
+++ b/src/storage/src/memtable/tests.rs
@@ -199,9 +199,12 @@ fn write_iter_memtable_case(ctx: &TestContext) {
&[None, Some(5), None], // values
);
- let batch_sizes = [1, 4, 8, 256];
+ let batch_sizes = [1, 4, 8, consts::READ_BATCH_SIZE];
for batch_size in batch_sizes {
- let iter_ctx = IterContext { batch_size };
+ let iter_ctx = IterContext {
+ batch_size,
+ ..Default::default()
+ };
let mut iter = ctx.memtable.iter(iter_ctx).unwrap();
assert_eq!(ctx.schema, *iter.schema());
assert_eq!(RowOrdering::Key, iter.ordering());
@@ -295,10 +298,168 @@ fn test_iter_batch_size() {
// Batch size [less than, equal to, greater than] total
let batch_sizes = [1, 6, 8];
for batch_size in batch_sizes {
- let iter_ctx = IterContext { batch_size };
+ let iter_ctx = IterContext {
+ batch_size,
+ ..Default::default()
+ };
let mut iter = ctx.memtable.iter(iter_ctx).unwrap();
check_iter_batch_size(&mut *iter, total, batch_size);
}
});
}
+
+#[test]
+fn test_duplicate_key_across_batch() {
+ let tester = MemtableTester::default();
+ tester.run_testcase(|ctx| {
+ write_kvs(
+ &*ctx.memtable,
+ 10, // sequence
+ ValueType::Put,
+ &[(1000, 1), (1000, 2), (2000, 1), (2001, 2)], // keys
+ &[Some(1), None, None, None], // values
+ );
+
+ write_kvs(
+ &*ctx.memtable,
+ 11, // sequence
+ ValueType::Put,
+ &[(1000, 1), (2001, 2)], // keys
+ &[Some(1231), Some(1232)], // values
+ );
+
+ let batch_sizes = [1, 2, 3, 4, 5];
+ for batch_size in batch_sizes {
+ let iter_ctx = IterContext {
+ batch_size,
+ ..Default::default()
+ };
+
+ let mut iter = ctx.memtable.iter(iter_ctx).unwrap();
+ check_iter_content(
+ &mut *iter,
+ &[(1000, 1), (1000, 2), (2000, 1), (2001, 2)], // keys
+ &[11, 10, 10, 11], // sequences
+ &[
+ ValueType::Put,
+ ValueType::Put,
+ ValueType::Put,
+ ValueType::Put,
+ ], // value types
+ &[Some(1231), None, None, Some(1232)], // values
+ );
+ }
+ });
+}
+
+#[test]
+fn test_duplicate_key_in_batch() {
+ let tester = MemtableTester::default();
+ tester.run_testcase(|ctx| {
+ write_kvs(
+ &*ctx.memtable,
+ 10, // sequence
+ ValueType::Put,
+ &[(1000, 1), (1000, 2), (1000, 1), (2001, 2)], // keys
+ &[None, None, Some(1234), None], // values
+ );
+
+ let batch_sizes = [1, 2, 3, 4, 5];
+ for batch_size in batch_sizes {
+ let iter_ctx = IterContext {
+ batch_size,
+ ..Default::default()
+ };
+
+ let mut iter = ctx.memtable.iter(iter_ctx).unwrap();
+ check_iter_content(
+ &mut *iter,
+ &[(1000, 1), (1000, 2), (2001, 2)], // keys
+ &[10, 10, 10], // sequences
+ &[ValueType::Put, ValueType::Put, ValueType::Put], // value types
+ &[Some(1234), None, None, None], // values
+ );
+ }
+ });
+}
+
+#[test]
+fn test_sequence_visibility() {
+ let tester = MemtableTester::default();
+ tester.run_testcase(|ctx| {
+ write_kvs(
+ &*ctx.memtable,
+ 10, // sequence
+ ValueType::Put,
+ &[(1000, 1), (1000, 2)], // keys
+ &[Some(1), Some(2)], // values
+ );
+
+ write_kvs(
+ &*ctx.memtable,
+ 11, // sequence
+ ValueType::Put,
+ &[(1000, 1), (1000, 2)], // keys
+ &[Some(11), Some(12)], // values
+ );
+
+ write_kvs(
+ &*ctx.memtable,
+ 12, // sequence
+ ValueType::Put,
+ &[(1000, 1), (1000, 2)], // keys
+ &[Some(21), Some(22)], // values
+ );
+
+ {
+ let iter_ctx = IterContext {
+ batch_size: 1,
+ visible_sequence: 9,
+ };
+
+ let mut iter = ctx.memtable.iter(iter_ctx).unwrap();
+ check_iter_content(
+ &mut *iter,
+ &[], // keys
+ &[], // sequences
+ &[], // value types
+ &[], // values
+ );
+ }
+
+ {
+ let iter_ctx = IterContext {
+ batch_size: 1,
+ visible_sequence: 10,
+ };
+
+ let mut iter = ctx.memtable.iter(iter_ctx).unwrap();
+ check_iter_content(
+ &mut *iter,
+ &[(1000, 1), (1000, 2)], // keys
+ &[10, 10], // sequences
+ &[ValueType::Put, ValueType::Put], // value types
+ &[Some(1), Some(2)], // values
+ );
+ }
+
+ {
+ let iter_ctx = IterContext {
+ batch_size: 1,
+ visible_sequence: 11,
+ };
+
+ let mut iter = ctx.memtable.iter(iter_ctx).unwrap();
+ check_iter_content(
+ &mut *iter,
+ &[(1000, 1), (1000, 2)], // keys
+ &[11, 11], // sequences
+ &[ValueType::Put, ValueType::Put], // value types
+ &[Some(11), Some(12)], // values
+ );
+ }
+ });
+}
+
+// TODO(yingwen): Test key overwrite in same batch.
diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs
index b3bfdfff7bb2..84d86130b3f7 100644
--- a/src/storage/src/region.rs
+++ b/src/storage/src/region.rs
@@ -1,3 +1,7 @@
+#[cfg(test)]
+mod tests;
+mod writer;
+
use std::sync::Arc;
use async_trait::async_trait;
@@ -8,7 +12,7 @@ use tokio::sync::Mutex;
use crate::error::{self, Error, Result};
use crate::memtable::{DefaultMemtableBuilder, MemtableBuilder, MemtableSchema, MemtableSet};
use crate::metadata::{RegionMetaImpl, RegionMetadata};
-use crate::region_writer::RegionWriter;
+use crate::region::writer::RegionWriter;
use crate::snapshot::SnapshotImpl;
use crate::version::{VersionControl, VersionControlRef};
use crate::write_batch::WriteBatch;
@@ -39,7 +43,7 @@ impl Region for RegionImpl {
}
fn snapshot(&self, _ctx: &ReadContext) -> Result<SnapshotImpl> {
- unimplemented!()
+ Ok(self.inner.create_snapshot())
}
}
@@ -59,6 +63,12 @@ impl RegionImpl {
RegionImpl { inner }
}
+
+ #[cfg(test)]
+ #[inline]
+ fn committed_sequence(&self) -> store_api::storage::SequenceNumber {
+ self.inner.version.committed_sequence()
+ }
}
struct RegionInner {
@@ -87,36 +97,11 @@ impl RegionInner {
let mut writer = self.writer.lock().await;
writer.write(ctx, &self.version, request).await
}
-}
-#[cfg(test)]
-mod tests {
- use datatypes::type_id::LogicalTypeId;
- use store_api::storage::consts;
-
- use super::*;
- use crate::test_util::descriptor_util::RegionDescBuilder;
- use crate::test_util::schema_util;
-
- #[test]
- fn test_new_region() {
- let region_name = "region-0";
- let desc = RegionDescBuilder::new(region_name)
- .push_key_column(("k1", LogicalTypeId::Int32, false))
- .push_value_column(("v1", LogicalTypeId::Float32, true))
- .build();
- let metadata = desc.try_into().unwrap();
-
- let region = RegionImpl::new(region_name.to_string(), metadata);
-
- let expect_schema = schema_util::new_schema_ref(&[
- ("k1", LogicalTypeId::Int32, false),
- ("timestamp", LogicalTypeId::UInt64, false),
- (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false),
- ("v1", LogicalTypeId::Float32, true),
- ]);
-
- assert_eq!(region_name, region.name());
- assert_eq!(expect_schema, *region.in_memory_metadata().schema());
+ fn create_snapshot(&self) -> SnapshotImpl {
+ let version = self.version.current();
+ let sequence = self.version.committed_sequence();
+
+ SnapshotImpl::new(version, sequence)
}
}
diff --git a/src/storage/src/region/tests.rs b/src/storage/src/region/tests.rs
new file mode 100644
index 000000000000..ce1f22c407a1
--- /dev/null
+++ b/src/storage/src/region/tests.rs
@@ -0,0 +1,31 @@
+//! Region tests.
+
+mod read_write;
+
+use datatypes::type_id::LogicalTypeId;
+use store_api::storage::consts;
+
+use super::*;
+use crate::test_util::{self, descriptor_util::RegionDescBuilder, schema_util};
+
+#[test]
+fn test_new_region() {
+ let region_name = "region-0";
+ let desc = RegionDescBuilder::new(region_name)
+ .push_key_column(("k1", LogicalTypeId::Int32, false))
+ .push_value_column(("v1", LogicalTypeId::Float32, true))
+ .build();
+ let metadata = desc.try_into().unwrap();
+
+ let region = RegionImpl::new(region_name.to_string(), metadata);
+
+ let expect_schema = schema_util::new_schema_ref(&[
+ ("k1", LogicalTypeId::Int32, false),
+ (test_util::TIMESTAMP_NAME, LogicalTypeId::Int64, false),
+ (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false),
+ ("v1", LogicalTypeId::Float32, true),
+ ]);
+
+ assert_eq!(region_name, region.name());
+ assert_eq!(expect_schema, *region.in_memory_metadata().schema());
+}
diff --git a/src/storage/src/region/tests/read_write.rs b/src/storage/src/region/tests/read_write.rs
new file mode 100644
index 000000000000..2e3cc16ee782
--- /dev/null
+++ b/src/storage/src/region/tests/read_write.rs
@@ -0,0 +1,164 @@
+//! Region read/write tests.
+
+use std::sync::Arc;
+
+use datatypes::prelude::*;
+use datatypes::type_id::LogicalTypeId;
+use datatypes::vectors::Int64Vector;
+use store_api::storage::{
+ consts, Chunk, ChunkReader, PutOperation, ReadContext, Region, RegionMeta, ScanRequest,
+ SequenceNumber, Snapshot, WriteContext, WriteRequest, WriteResponse,
+};
+
+use crate::region::RegionImpl;
+use crate::test_util::{self, descriptor_util::RegionDescBuilder, write_batch_util};
+use crate::write_batch::{PutData, WriteBatch};
+
+/// Create a new region for read/write test
+fn new_region_for_rw(enable_version_column: bool) -> RegionImpl {
+ let region_name = "region-rw-0";
+ let desc = RegionDescBuilder::new(region_name)
+ .enable_version_column(enable_version_column)
+ .push_value_column(("v1", LogicalTypeId::Int64, true))
+ .build();
+ let metadata = desc.try_into().unwrap();
+
+ RegionImpl::new(region_name.to_string(), metadata)
+}
+
+fn new_write_batch_for_test(enable_version_column: bool) -> WriteBatch {
+ if enable_version_column {
+ write_batch_util::new_write_batch(&[
+ (test_util::TIMESTAMP_NAME, LogicalTypeId::Int64, false),
+ (consts::VERSION_COLUMN_NAME, LogicalTypeId::UInt64, false),
+ ("v1", LogicalTypeId::Int64, true),
+ ])
+ } else {
+ write_batch_util::new_write_batch(&[
+ (test_util::TIMESTAMP_NAME, LogicalTypeId::Int64, false),
+ ("v1", LogicalTypeId::Int64, true),
+ ])
+ }
+}
+
+fn new_put_data(data: &[(i64, Option<i64>)]) -> PutData {
+ let mut put_data = PutData::with_num_columns(2);
+
+ let timestamps = Int64Vector::from_values(data.iter().map(|kv| kv.0));
+ let values = Int64Vector::from_iter(data.iter().map(|kv| kv.1));
+
+ put_data
+ .add_key_column(test_util::TIMESTAMP_NAME, Arc::new(timestamps))
+ .unwrap();
+ put_data.add_value_column("v1", Arc::new(values)).unwrap();
+
+ put_data
+}
+
+fn append_chunk_to(chunk: &Chunk, dst: &mut Vec<(i64, Option<i64>)>) {
+ assert_eq!(2, chunk.columns.len());
+
+ let timestamps = chunk.columns[0]
+ .as_any()
+ .downcast_ref::<Int64Vector>()
+ .unwrap();
+ let values = chunk.columns[1]
+ .as_any()
+ .downcast_ref::<Int64Vector>()
+ .unwrap();
+ for (ts, value) in timestamps.iter_data().zip(values.iter_data()) {
+ dst.push((ts.unwrap(), value));
+ }
+}
+
+/// Test region without considering version column.
+struct Tester {
+ region: RegionImpl,
+ write_ctx: WriteContext,
+ read_ctx: ReadContext,
+}
+
+impl Default for Tester {
+ fn default() -> Tester {
+ Tester::new()
+ }
+}
+
+impl Tester {
+ fn new() -> Tester {
+ let region = new_region_for_rw(false);
+
+ Tester {
+ region,
+ write_ctx: WriteContext::default(),
+ read_ctx: ReadContext::default(),
+ }
+ }
+
+ /// Put without version specified.
+ ///
+ /// Format of data: (timestamp, v1), timestamp is key, v1 is value.
+ async fn put(&self, data: &[(i64, Option<i64>)]) -> WriteResponse {
+ // Build a batch without version.
+ let mut batch = new_write_batch_for_test(false);
+ let put_data = new_put_data(data);
+ batch.put(put_data).unwrap();
+
+ self.region.write(&self.write_ctx, batch).await.unwrap()
+ }
+
+ async fn full_scan(&self) -> Vec<(i64, Option<i64>)> {
+ let snapshot = self.region.snapshot(&self.read_ctx).unwrap();
+
+ let resp = snapshot
+ .scan(&self.read_ctx, ScanRequest::default())
+ .await
+ .unwrap();
+ let mut reader = resp.reader;
+
+ let metadata = self.region.in_memory_metadata();
+ assert_eq!(metadata.schema(), reader.schema());
+
+ let mut dst = Vec::new();
+ while let Some(chunk) = reader.next_chunk().await.unwrap() {
+ append_chunk_to(&chunk, &mut dst);
+ }
+
+ dst
+ }
+
+ fn committed_sequence(&self) -> SequenceNumber {
+ self.region.committed_sequence()
+ }
+}
+
+#[tokio::test]
+async fn test_simple_put_scan() {
+ let tester = Tester::default();
+
+ let data = vec![
+ (1000, Some(100)),
+ (1001, Some(101)),
+ (1002, None),
+ (1003, Some(103)),
+ (1004, Some(104)),
+ ];
+
+ tester.put(&data).await;
+
+ let output = tester.full_scan().await;
+ assert_eq!(data, output);
+}
+
+#[tokio::test]
+async fn test_sequence_increase() {
+ let tester = Tester::default();
+
+ let mut committed_sequence = tester.committed_sequence();
+ for i in 0..100 {
+ tester.put(&[(i, Some(1234))]).await;
+ committed_sequence += 1;
+
+ assert_eq!(committed_sequence, tester.committed_sequence());
+ }
+}
diff --git a/src/storage/src/region_writer.rs b/src/storage/src/region/writer.rs
similarity index 62%
rename from src/storage/src/region_writer.rs
rename to src/storage/src/region/writer.rs
index 3d4e7eb2c45b..899da728a1ed 100644
--- a/src/storage/src/region_writer.rs
+++ b/src/storage/src/region/writer.rs
@@ -1,4 +1,4 @@
-use store_api::storage::{SequenceNumber, WriteContext, WriteResponse};
+use store_api::storage::{WriteContext, WriteResponse};
use crate::error::Result;
use crate::memtable::{Inserter, MemtableBuilderRef};
@@ -7,15 +7,11 @@ use crate::write_batch::WriteBatch;
pub struct RegionWriter {
_memtable_builder: MemtableBuilderRef,
- last_sequence: SequenceNumber,
}
impl RegionWriter {
pub fn new(_memtable_builder: MemtableBuilderRef) -> RegionWriter {
- RegionWriter {
- _memtable_builder,
- last_sequence: 0,
- }
+ RegionWriter { _memtable_builder }
}
// TODO(yingwen): Support group commit so we can avoid taking mutable reference.
@@ -31,13 +27,20 @@ impl RegionWriter {
// TODO(yingwen): Write wal and get sequence.
let version = version_control.current();
- let memtables = &version.memtables;
+ let mem = version.mutable_memtable();
- let mem = memtables.mutable_memtable();
- self.last_sequence += 1;
- let mut inserter = Inserter::new(self.last_sequence);
+ let committed_sequence = version_control.committed_sequence();
+ // Sequence for current write batch.
+ let next_sequence = committed_sequence + 1;
+
+ // Insert batch into memtable.
+ let mut inserter = Inserter::new(next_sequence);
inserter.insert_memtable(&request, &**mem)?;
+ // Update committed_sequence to make current batch visible. The `&mut self` of RegionWriter
+ // guarantees the writer is exclusive.
+ version_control.set_committed_sequence(next_sequence);
+
Ok(WriteResponse {})
}
}
diff --git a/src/storage/src/snapshot.rs b/src/storage/src/snapshot.rs
index ff76c4947e57..7b89a19a8e0e 100644
--- a/src/storage/src/snapshot.rs
+++ b/src/storage/src/snapshot.rs
@@ -1,26 +1,68 @@
+use std::cmp;
+
use async_trait::async_trait;
use store_api::storage::{
- GetRequest, GetResponse, ReadContext, ScanRequest, ScanResponse, SchemaRef, Snapshot,
+ GetRequest, GetResponse, ReadContext, ScanRequest, ScanResponse, SchemaRef, SequenceNumber,
+ Snapshot,
};
+use crate::chunk::ChunkReaderImpl;
use crate::error::{Error, Result};
+use crate::memtable::IterContext;
+use crate::version::VersionRef;
/// [Snapshot] implementation.
-pub struct SnapshotImpl {}
+pub struct SnapshotImpl {
+ version: VersionRef,
+ /// Max sequence number (inclusive) visible to user.
+ visible_sequence: SequenceNumber,
+}
#[async_trait]
impl Snapshot for SnapshotImpl {
type Error = Error;
+ type Reader = ChunkReaderImpl;
fn schema(&self) -> &SchemaRef {
- unimplemented!()
+ self.version.schema()
}
- async fn scan(&self, _ctx: &ReadContext, _request: ScanRequest) -> Result<ScanResponse> {
- unimplemented!()
+ async fn scan(
+ &self,
+ ctx: &ReadContext,
+ request: ScanRequest,
+ ) -> Result<ScanResponse<ChunkReaderImpl>> {
+ let visible_sequence = self.sequence_to_read(request.sequence);
+
+ let mem = self.version.mutable_memtable();
+ let iter_ctx = IterContext {
+ batch_size: ctx.batch_size,
+ visible_sequence,
+ };
+ let iter = mem.iter(iter_ctx)?;
+
+ let reader = ChunkReaderImpl::new(self.version.schema().clone(), iter);
+
+ Ok(ScanResponse { reader })
}
async fn get(&self, _ctx: &ReadContext, _request: GetRequest) -> Result<GetResponse> {
unimplemented!()
}
}
+
+impl SnapshotImpl {
+ pub fn new(version: VersionRef, visible_sequence: SequenceNumber) -> SnapshotImpl {
+ SnapshotImpl {
+ version,
+ visible_sequence,
+ }
+ }
+
+ #[inline]
+ fn sequence_to_read(&self, request_sequence: Option<SequenceNumber>) -> SequenceNumber {
+ request_sequence
+ .map(|s| cmp::min(s, self.visible_sequence))
+ .unwrap_or(self.visible_sequence)
+ }
+}
diff --git a/src/storage/src/sync.rs b/src/storage/src/sync.rs
index 3455fe2ade5a..7408db7d9448 100644
--- a/src/storage/src/sync.rs
+++ b/src/storage/src/sync.rs
@@ -50,7 +50,7 @@ impl<T: Clone> CowCell<T> {
/// A RAII implementation of a write transaction of the [CowCell].
///
-/// When this txn is dropped (falls out of scope or commited), the lock will be
+/// When this txn is dropped (falls out of scope or committed), the lock will be
/// unlocked, but updates to the content won't be visible unless the txn is committed.
#[must_use = "if unused the CowCell will immediately unlock"]
pub struct TxnGuard<'a, T: Clone> {
diff --git a/src/storage/src/test_util.rs b/src/storage/src/test_util.rs
index dcd7bb18f2da..92828d8d5934 100644
--- a/src/storage/src/test_util.rs
+++ b/src/storage/src/test_util.rs
@@ -1,3 +1,5 @@
pub mod descriptor_util;
pub mod schema_util;
pub mod write_batch_util;
+
+pub const TIMESTAMP_NAME: &str = "timestamp";
diff --git a/src/storage/src/test_util/descriptor_util.rs b/src/storage/src/test_util/descriptor_util.rs
index 81da5cb52df2..b16b8aaf13b7 100644
--- a/src/storage/src/test_util/descriptor_util.rs
+++ b/src/storage/src/test_util/descriptor_util.rs
@@ -4,7 +4,7 @@ use store_api::storage::{
RegionDescriptor, RowKeyDescriptorBuilder,
};
-use crate::test_util::schema_util::ColumnDef;
+use crate::test_util::{self, schema_util::ColumnDef};
/// A RegionDescriptor builder for test.
pub struct RegionDescBuilder {
@@ -17,9 +17,13 @@ pub struct RegionDescBuilder {
impl RegionDescBuilder {
pub fn new<T: Into<String>>(name: T) -> Self {
let key_builder = RowKeyDescriptorBuilder::new(
- ColumnDescriptorBuilder::new(2, "timestamp", ConcreteDataType::uint64_datatype())
- .is_nullable(false)
- .build(),
+ ColumnDescriptorBuilder::new(
+ 2,
+ test_util::TIMESTAMP_NAME,
+ ConcreteDataType::int64_datatype(),
+ )
+ .is_nullable(false)
+ .build(),
);
Self {
diff --git a/src/storage/src/version.rs b/src/storage/src/version.rs
index 6ca8d6ce37e7..6267db5fd4e4 100644
--- a/src/storage/src/version.rs
+++ b/src/storage/src/version.rs
@@ -1,12 +1,26 @@
+//! Version control of storage.
+//!
+//! To read latest data from `VersionControl`, we need to
+//! 1. Acquire `Version` from `VersionControl`.
+//! 2. Then acquire last sequence.
+//!
+//! Reason: data may be flushed/compacted and some data with old sequence may be removed
+//! and became invisible between step 1 and 2, so need to acquire version at first.
+
+use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
-use crate::memtable::MemtableSet;
+use store_api::storage::{SchemaRef, SequenceNumber};
+
+use crate::memtable::{MemtableRef, MemtableSet};
use crate::metadata::{RegionMetadata, RegionMetadataRef};
use crate::sync::CowCell;
/// Controls version of in memory state for a region.
pub struct VersionControl {
version: CowCell<Version>,
+ /// Latest sequence that is committed and visible to user.
+ committed_sequence: AtomicU64,
}
impl VersionControl {
@@ -14,10 +28,12 @@ impl VersionControl {
pub fn new(metadata: RegionMetadata, memtables: MemtableSet) -> VersionControl {
VersionControl {
version: CowCell::new(Version::new(metadata, memtables)),
+ committed_sequence: AtomicU64::new(0),
}
}
/// Returns current version.
+ #[inline]
pub fn current(&self) -> VersionRef {
self.version.get()
}
@@ -27,6 +43,21 @@ impl VersionControl {
let version = self.current();
version.metadata.clone()
}
+
+ #[inline]
+ pub fn committed_sequence(&self) -> SequenceNumber {
+ self.committed_sequence.load(Ordering::Acquire)
+ }
+
+ /// Set committed sequence to `value`.
+ ///
+ /// External synchronization is required to ensure only one thread can update the
+ /// last sequence.
+ #[inline]
+ pub fn set_committed_sequence(&self, value: SequenceNumber) {
+ // Release ordering should be enough to guarantee sequence is updated at last.
+ self.committed_sequence.store(value, Ordering::Release);
+ }
}
pub type VersionControlRef = Arc<VersionControl>;
@@ -45,7 +76,7 @@ pub struct Version {
/// in Arc to allow sharing metadata and reuse metadata when creating a new
/// `Version`.
metadata: RegionMetadataRef,
- pub memtables: MemtableSet,
+ memtables: MemtableSet,
// TODO(yingwen): Also need to store last sequence to this version when switching
// version, so we can know the newest data can read from this version.
}
@@ -57,4 +88,43 @@ impl Version {
memtables,
}
}
+
+ #[inline]
+ pub fn schema(&self) -> &SchemaRef {
+ &self.metadata.schema
+ }
+
+ #[inline]
+ pub fn mutable_memtable(&self) -> &MemtableRef {
+ self.memtables.mutable_memtable()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::memtable::{DefaultMemtableBuilder, MemtableBuilder, MemtableSchema};
+ use crate::test_util::descriptor_util::RegionDescBuilder;
+
+ fn new_version_control() -> VersionControl {
+ let desc = RegionDescBuilder::new("version-test")
+ .enable_version_column(false)
+ .build();
+ let metadata: RegionMetadata = desc.try_into().unwrap();
+
+ let schema = MemtableSchema::new(metadata.columns_row_key.clone());
+ let memtable = DefaultMemtableBuilder {}.build(schema);
+ let memtable_set = MemtableSet::new(memtable);
+
+ VersionControl::new(metadata, memtable_set)
+ }
+
+ #[test]
+ fn test_version_control() {
+ let version_control = new_version_control();
+
+ assert_eq!(0, version_control.committed_sequence());
+ version_control.set_committed_sequence(12345);
+ assert_eq!(12345, version_control.committed_sequence());
+ }
}
diff --git a/src/store-api/src/storage.rs b/src/store-api/src/storage.rs
index 29b4d479258f..3f39a8e56ab7 100644
--- a/src/store-api/src/storage.rs
+++ b/src/store-api/src/storage.rs
@@ -1,5 +1,6 @@
//! Storage APIs.
+mod chunk;
pub mod consts;
mod descriptors;
mod engine;
@@ -13,6 +14,7 @@ mod types;
pub use datatypes::data_type::ConcreteDataType;
pub use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
+pub use self::chunk::{Chunk, ChunkReader};
pub use self::descriptors::{
gen_region_name, ColumnDescriptor, ColumnDescriptorBuilder, ColumnFamilyDescriptor,
ColumnFamilyDescriptorBuilder, ColumnFamilyId, ColumnId, RegionDescriptor, RegionId,
diff --git a/src/store-api/src/storage/chunk.rs b/src/store-api/src/storage/chunk.rs
new file mode 100644
index 000000000000..f2eb031f6581
--- /dev/null
+++ b/src/store-api/src/storage/chunk.rs
@@ -0,0 +1,26 @@
+use async_trait::async_trait;
+use common_error::ext::ErrorExt;
+use datatypes::vectors::VectorRef;
+
+use crate::storage::SchemaRef;
+
+/// A bunch of rows in columnar format.
+pub struct Chunk {
+ pub columns: Vec<VectorRef>,
+ // TODO(yingwen): Sequences.
+}
+
+impl Chunk {
+ pub fn new(columns: Vec<VectorRef>) -> Chunk {
+ Chunk { columns }
+ }
+}
+
+#[async_trait]
+pub trait ChunkReader: Send {
+ type Error: ErrorExt + Send + Sync;
+
+ fn schema(&self) -> &SchemaRef;
+
+ async fn next_chunk(&mut self) -> Result<Option<Chunk>, Self::Error>;
+}
diff --git a/src/store-api/src/storage/consts.rs b/src/store-api/src/storage/consts.rs
index 2effce471826..5863d3cb8f1d 100644
--- a/src/store-api/src/storage/consts.rs
+++ b/src/store-api/src/storage/consts.rs
@@ -2,7 +2,7 @@
use crate::storage::descriptors::{ColumnFamilyId, ColumnId};
-// Ids reserved for internal column families:
+// ---------- Ids reserved for internal column families ------------------------
/// Column family Id for row key columns.
///
@@ -12,16 +12,27 @@ pub const KEY_CF_ID: ColumnFamilyId = 0;
/// Id for default column family.
pub const DEFAULT_CF_ID: ColumnFamilyId = 1;
-// Ids reserved for internal columns:
+// -----------------------------------------------------------------------------
+
+// ---------- Ids reserved for internal columns --------------------------------
// TODO(yingwen): Reserve one bit for internal columns.
/// Column id for version column.
pub const VERSION_COLUMN_ID: ColumnId = 1;
-// Names reserved for internal columns:
+// -----------------------------------------------------------------------------
+
+// ---------- Names reserved for internal columns and engine -------------------
/// Name of version column.
pub const VERSION_COLUMN_NAME: &str = "__version";
-
// Names for default column family.
pub const DEFAULT_CF_NAME: &str = "default";
+
+// -----------------------------------------------------------------------------
+
+// ---------- Default options --------------------------------------------------
+
+pub const READ_BATCH_SIZE: usize = 256;
+
+// -----------------------------------------------------------------------------
diff --git a/src/store-api/src/storage/descriptors.rs b/src/store-api/src/storage/descriptors.rs
index 5295a457caca..c10e8b81b636 100644
--- a/src/store-api/src/storage/descriptors.rs
+++ b/src/store-api/src/storage/descriptors.rs
@@ -45,6 +45,7 @@ pub struct RowKeyDescriptor {
/// Enable version column in row key if this field is true.
///
/// The default value is true.
+ // FIXME(yingwen): Change default value to true (Disable version column by default).
pub enable_version_column: bool,
}
diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs
index bb413446646b..8d50f8aede6f 100644
--- a/src/store-api/src/storage/requests.rs
+++ b/src/store-api/src/storage/requests.rs
@@ -2,6 +2,8 @@ use common_error::ext::ErrorExt;
use datatypes::schema::SchemaRef;
use datatypes::vectors::VectorRef;
+use crate::storage::SequenceNumber;
+
/// Write request holds a collection of updates to apply to a region.
pub trait WriteRequest: Send {
type Error: ErrorExt + Send + Sync;
@@ -27,8 +29,14 @@ pub trait PutOperation: Send {
fn add_value_column(&mut self, name: &str, vector: VectorRef) -> Result<(), Self::Error>;
}
-#[derive(Debug)]
-pub struct ScanRequest {}
+#[derive(Debug, Default)]
+pub struct ScanRequest {
+ /// Max sequence number to read, None for latest sequence.
+ ///
+ /// Default is None. Only returns data whose sequence number is less than or
+ /// equal to the `sequence`.
+ pub sequence: Option<SequenceNumber>,
+}
#[derive(Debug)]
pub struct GetRequest {}
diff --git a/src/store-api/src/storage/responses.rs b/src/store-api/src/storage/responses.rs
index 823eb060d9fa..7cd094abfffc 100644
--- a/src/store-api/src/storage/responses.rs
+++ b/src/store-api/src/storage/responses.rs
@@ -2,7 +2,10 @@
pub struct WriteResponse {}
#[derive(Debug)]
-pub struct ScanResponse {}
+pub struct ScanResponse<R> {
+ /// Reader to read result chunks.
+ pub reader: R,
+}
#[derive(Debug)]
pub struct GetResponse {}
diff --git a/src/store-api/src/storage/snapshot.rs b/src/store-api/src/storage/snapshot.rs
index 9527ac86354e..7761c69c4d9e 100644
--- a/src/store-api/src/storage/snapshot.rs
+++ b/src/store-api/src/storage/snapshot.rs
@@ -2,6 +2,8 @@ use async_trait::async_trait;
use common_error::ext::ErrorExt;
use datatypes::schema::SchemaRef;
+use crate::storage::chunk::ChunkReader;
+use crate::storage::consts;
use crate::storage::requests::{GetRequest, ScanRequest};
use crate::storage::responses::{GetResponse, ScanResponse};
@@ -9,6 +11,7 @@ use crate::storage::responses::{GetResponse, ScanResponse};
#[async_trait]
pub trait Snapshot: Send + Sync {
type Error: ErrorExt + Send + Sync;
+ type Reader: ChunkReader;
fn schema(&self) -> &SchemaRef;
@@ -16,7 +19,7 @@ pub trait Snapshot: Send + Sync {
&self,
ctx: &ReadContext,
request: ScanRequest,
- ) -> Result<ScanResponse, Self::Error>;
+ ) -> Result<ScanResponse<Self::Reader>, Self::Error>;
async fn get(&self, ctx: &ReadContext, request: GetRequest)
-> Result<GetResponse, Self::Error>;
@@ -24,4 +27,15 @@ pub trait Snapshot: Send + Sync {
/// Context for read.
#[derive(Debug, Clone)]
-pub struct ReadContext {}
+pub struct ReadContext {
+ /// Suggested batch size of chunk.
+ pub batch_size: usize,
+}
+
+impl Default for ReadContext {
+ fn default() -> ReadContext {
+ ReadContext {
+ batch_size: consts::READ_BATCH_SIZE,
+ }
+ }
+}
diff --git a/src/store-api/src/storage/types.rs b/src/store-api/src/storage/types.rs
index 361ba7caca66..91e6bec06159 100644
--- a/src/store-api/src/storage/types.rs
+++ b/src/store-api/src/storage/types.rs
@@ -15,4 +15,20 @@ impl ValueType {
pub fn as_u8(&self) -> u8 {
*self as u8
}
+
+ /// Minimum value type after casting to u8.
+ pub const fn min_type() -> ValueType {
+ ValueType::Put
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_value_type() {
+ assert_eq!(0, ValueType::Put.as_u8());
+ assert_eq!(0, ValueType::min_type().as_u8());
+ }
}
|
feat
|
Implement snapshot scan for region (#46)
|
9a68e4ca889498dc0f621f9a2bf1e1af90d3eb9a
|
2022-08-19 08:07:30
|
LFC
|
fix: correctly convert Value::Null to ScalarValue (#187)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 9675130378af..24c6a95375e8 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -759,6 +759,7 @@ name = "common-query"
version = "0.1.0"
dependencies = [
"arrow2",
+ "common-base",
"common-error",
"datafusion",
"datafusion-common",
diff --git a/src/common/query/Cargo.toml b/src/common/query/Cargo.toml
index 7623c1a332b5..3c369923e436 100644
--- a/src/common/query/Cargo.toml
+++ b/src/common/query/Cargo.toml
@@ -17,3 +17,4 @@ snafu = { version = "0.7", features = ["backtraces"] }
[dev-dependencies]
tokio = { version = "1.0", features = ["full"] }
+common-base = {path = "../base"}
diff --git a/src/common/query/src/error.rs b/src/common/query/src/error.rs
index fc418546effe..8c17fd4c828e 100644
--- a/src/common/query/src/error.rs
+++ b/src/common/query/src/error.rs
@@ -34,6 +34,12 @@ pub enum InnerError {
#[snafu(display("Failed to downcast vector: {}", err_msg))]
DowncastVector { err_msg: String },
+
+ #[snafu(display("Bad accumulator implementation: {}", err_msg))]
+ BadAccumulatorImpl {
+ err_msg: String,
+ backtrace: Backtrace,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -43,7 +49,8 @@ impl ErrorExt for InnerError {
match self {
InnerError::ExecuteFunction { .. }
| InnerError::CreateAccumulator { .. }
- | InnerError::DowncastVector { .. } => StatusCode::EngineExecuteQuery,
+ | InnerError::DowncastVector { .. }
+ | InnerError::BadAccumulatorImpl { .. } => StatusCode::EngineExecuteQuery,
InnerError::IntoVector { source, .. } => source.status_code(),
InnerError::FromScalarValue { source } => source.status_code(),
}
diff --git a/src/common/query/src/logical_plan/accumulator.rs b/src/common/query/src/logical_plan/accumulator.rs
index e7a0eeb307a8..b1d6f077fbee 100644
--- a/src/common/query/src/logical_plan/accumulator.rs
+++ b/src/common/query/src/logical_plan/accumulator.rs
@@ -7,11 +7,12 @@ use arrow::array::ArrayRef;
use datafusion_common::Result as DfResult;
use datafusion_expr::Accumulator as DfAccumulator;
use datatypes::prelude::*;
+use datatypes::value::ListValue;
use datatypes::vectors::Helper as VectorHelper;
use datatypes::vectors::VectorRef;
use snafu::ResultExt;
-use crate::error::{Error, FromScalarValueSnafu, IntoVectorSnafu, Result};
+use crate::error::{self, Error, FromScalarValueSnafu, IntoVectorSnafu, Result};
use crate::prelude::*;
pub type AggregateFunctionCreatorRef = Arc<dyn AggregateFunctionCreator>;
@@ -87,22 +88,49 @@ pub fn make_state_function(creator: Arc<dyn AggregateFunctionCreator>) -> StateT
Arc::new(move |_| Ok(Arc::new(creator.state_types()?)))
}
-/// A wrapper newtype for our Accumulator to DataFusion's Accumulator,
+/// A wrapper type for our Accumulator to DataFusion's Accumulator,
/// so to make our Accumulator able to be executed by DataFusion query engine.
#[derive(Debug)]
-pub struct DfAccumulatorAdaptor(pub Box<dyn Accumulator>);
+pub struct DfAccumulatorAdaptor {
+ accumulator: Box<dyn Accumulator>,
+ creator: AggregateFunctionCreatorRef,
+}
+
+impl DfAccumulatorAdaptor {
+ pub fn new(accumulator: Box<dyn Accumulator>, creator: AggregateFunctionCreatorRef) -> Self {
+ Self {
+ accumulator,
+ creator,
+ }
+ }
+}
impl DfAccumulator for DfAccumulatorAdaptor {
fn state(&self) -> DfResult<Vec<ScalarValue>> {
- let state = self.0.state()?;
- Ok(state.into_iter().map(ScalarValue::from).collect())
+ let state_values = self.accumulator.state()?;
+ let state_types = self.creator.state_types()?;
+ if state_values.len() != state_types.len() {
+ return error::BadAccumulatorImplSnafu {
+ err_msg: format!("Accumulator {:?} returned state values size do not match its state types size.", self),
+ }
+ .fail()
+ .map_err(Error::from)?;
+ }
+ Ok(state_values
+ .into_iter()
+ .zip(state_types.iter())
+ .map(|(v, t)| try_into_scalar_value(v, t))
+ .collect::<Result<Vec<_>>>()
+ .map_err(Error::from)?)
}
fn update_batch(&mut self, values: &[ArrayRef]) -> DfResult<()> {
let vectors = VectorHelper::try_into_vectors(values)
.context(FromScalarValueSnafu)
.map_err(Error::from)?;
- self.0.update_batch(&vectors).map_err(|e| e.into())
+ self.accumulator
+ .update_batch(&vectors)
+ .map_err(|e| e.into())
}
fn merge_batch(&mut self, states: &[ArrayRef]) -> DfResult<()> {
@@ -116,10 +144,287 @@ impl DfAccumulator for DfAccumulatorAdaptor {
.map_err(Error::from)?,
);
}
- self.0.merge_batch(&vectors).map_err(|e| e.into())
+ self.accumulator.merge_batch(&vectors).map_err(|e| e.into())
}
fn evaluate(&self) -> DfResult<ScalarValue> {
- Ok(ScalarValue::from(self.0.evaluate()?))
+ let value = self.accumulator.evaluate()?;
+ let output_type = self.creator.output_type()?;
+ Ok(try_into_scalar_value(value, &output_type)?)
+ }
+}
+
+fn try_into_scalar_value(value: Value, datatype: &ConcreteDataType) -> Result<ScalarValue> {
+ if !matches!(value, Value::Null) && datatype != &value.data_type() {
+ return error::BadAccumulatorImplSnafu {
+ err_msg: format!(
+ "expect value to return datatype {:?}, actual: {:?}",
+ datatype,
+ value.data_type()
+ ),
+ }
+ .fail()?;
+ }
+
+ Ok(match value {
+ Value::Boolean(v) => ScalarValue::Boolean(Some(v)),
+ Value::UInt8(v) => ScalarValue::UInt8(Some(v)),
+ Value::UInt16(v) => ScalarValue::UInt16(Some(v)),
+ Value::UInt32(v) => ScalarValue::UInt32(Some(v)),
+ Value::UInt64(v) => ScalarValue::UInt64(Some(v)),
+ Value::Int8(v) => ScalarValue::Int8(Some(v)),
+ Value::Int16(v) => ScalarValue::Int16(Some(v)),
+ Value::Int32(v) => ScalarValue::Int32(Some(v)),
+ Value::Int64(v) => ScalarValue::Int64(Some(v)),
+ Value::Float32(v) => ScalarValue::Float32(Some(v.0)),
+ Value::Float64(v) => ScalarValue::Float64(Some(v.0)),
+ Value::String(v) => ScalarValue::LargeUtf8(Some(v.as_utf8().to_string())),
+ Value::Binary(v) => ScalarValue::LargeBinary(Some(v.to_vec())),
+ Value::Date(v) => ScalarValue::Date32(Some(v)),
+ Value::DateTime(v) => ScalarValue::Date64(Some(v)),
+ Value::Null => try_convert_null_value(datatype)?,
+ Value::List(list) => try_convert_list_value(list)?,
+ })
+}
+
+fn try_convert_null_value(datatype: &ConcreteDataType) -> Result<ScalarValue> {
+ Ok(match datatype {
+ ConcreteDataType::Boolean(_) => ScalarValue::Boolean(None),
+ ConcreteDataType::Int8(_) => ScalarValue::Int8(None),
+ ConcreteDataType::Int16(_) => ScalarValue::Int16(None),
+ ConcreteDataType::Int32(_) => ScalarValue::Int32(None),
+ ConcreteDataType::Int64(_) => ScalarValue::Int64(None),
+ ConcreteDataType::UInt8(_) => ScalarValue::UInt8(None),
+ ConcreteDataType::UInt16(_) => ScalarValue::UInt16(None),
+ ConcreteDataType::UInt32(_) => ScalarValue::UInt32(None),
+ ConcreteDataType::UInt64(_) => ScalarValue::UInt64(None),
+ ConcreteDataType::Float32(_) => ScalarValue::Float32(None),
+ ConcreteDataType::Float64(_) => ScalarValue::Float64(None),
+ ConcreteDataType::Binary(_) => ScalarValue::LargeBinary(None),
+ ConcreteDataType::String(_) => ScalarValue::LargeUtf8(None),
+ _ => {
+ return error::BadAccumulatorImplSnafu {
+ err_msg: format!(
+ "undefined transition from null value to datatype {:?}",
+ datatype
+ ),
+ }
+ .fail()?
+ }
+ })
+}
+
+fn try_convert_list_value(list: ListValue) -> Result<ScalarValue> {
+ let vs = if let Some(items) = list.items() {
+ Some(Box::new(
+ items
+ .iter()
+ .map(|v| try_into_scalar_value(v.clone(), list.datatype()))
+ .collect::<Result<Vec<_>>>()?,
+ ))
+ } else {
+ None
+ };
+ Ok(ScalarValue::List(
+ vs,
+ Box::new(list.datatype().as_arrow_type()),
+ ))
+}
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::DataType;
+ use common_base::bytes::{Bytes, StringBytes};
+ use datafusion_common::ScalarValue;
+ use datatypes::value::{ListValue, OrderedFloat};
+
+ use super::*;
+
+ #[test]
+ fn test_not_null_value_to_scalar_value() {
+ assert_eq!(
+ ScalarValue::Boolean(Some(true)),
+ try_into_scalar_value(Value::Boolean(true), &ConcreteDataType::boolean_datatype())
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Boolean(Some(false)),
+ try_into_scalar_value(Value::Boolean(false), &ConcreteDataType::boolean_datatype())
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::UInt8(Some(u8::MIN + 1)),
+ try_into_scalar_value(
+ Value::UInt8(u8::MIN + 1),
+ &ConcreteDataType::uint8_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::UInt16(Some(u16::MIN + 2)),
+ try_into_scalar_value(
+ Value::UInt16(u16::MIN + 2),
+ &ConcreteDataType::uint16_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::UInt32(Some(u32::MIN + 3)),
+ try_into_scalar_value(
+ Value::UInt32(u32::MIN + 3),
+ &ConcreteDataType::uint32_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::UInt64(Some(u64::MIN + 4)),
+ try_into_scalar_value(
+ Value::UInt64(u64::MIN + 4),
+ &ConcreteDataType::uint64_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Int8(Some(i8::MIN + 4)),
+ try_into_scalar_value(Value::Int8(i8::MIN + 4), &ConcreteDataType::int8_datatype())
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Int16(Some(i16::MIN + 5)),
+ try_into_scalar_value(
+ Value::Int16(i16::MIN + 5),
+ &ConcreteDataType::int16_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Int32(Some(i32::MIN + 6)),
+ try_into_scalar_value(
+ Value::Int32(i32::MIN + 6),
+ &ConcreteDataType::int32_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Int64(Some(i64::MIN + 7)),
+ try_into_scalar_value(
+ Value::Int64(i64::MIN + 7),
+ &ConcreteDataType::int64_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Float32(Some(8.0f32)),
+ try_into_scalar_value(
+ Value::Float32(OrderedFloat(8.0f32)),
+ &ConcreteDataType::float32_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Float64(Some(9.0f64)),
+ try_into_scalar_value(
+ Value::Float64(OrderedFloat(9.0f64)),
+ &ConcreteDataType::float64_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::LargeUtf8(Some("hello".to_string())),
+ try_into_scalar_value(
+ Value::String(StringBytes::from("hello")),
+ &ConcreteDataType::string_datatype()
+ )
+ .unwrap()
+ );
+ assert_eq!(
+ ScalarValue::LargeBinary(Some("world".as_bytes().to_vec())),
+ try_into_scalar_value(
+ Value::Binary(Bytes::from("world".as_bytes())),
+ &ConcreteDataType::binary_datatype()
+ )
+ .unwrap()
+ );
+ }
+
+ #[test]
+ fn test_null_value_to_scalar_value() {
+ assert_eq!(
+ ScalarValue::Boolean(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::boolean_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::UInt8(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::uint8_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::UInt16(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::uint16_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::UInt32(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::uint32_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::UInt64(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::uint64_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Int8(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::int8_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Int16(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::int16_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Int32(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::int32_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Int64(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::int64_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Float32(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::float32_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::Float64(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::float64_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::LargeUtf8(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::string_datatype()).unwrap()
+ );
+ assert_eq!(
+ ScalarValue::LargeBinary(None),
+ try_into_scalar_value(Value::Null, &ConcreteDataType::binary_datatype()).unwrap()
+ );
+ }
+
+ #[test]
+ fn test_list_value_to_scalar_value() {
+ let items = Some(Box::new(vec![Value::Int32(-1), Value::Null]));
+ let list = Value::List(ListValue::new(items, ConcreteDataType::int32_datatype()));
+ let df_list = try_into_scalar_value(
+ list,
+ &ConcreteDataType::list_datatype(ConcreteDataType::int32_datatype()),
+ )
+ .unwrap();
+ assert!(matches!(df_list, ScalarValue::List(_, _)));
+ match df_list {
+ ScalarValue::List(vs, datatype) => {
+ assert_eq!(*datatype, DataType::Int32);
+
+ assert!(vs.is_some());
+ let vs = *vs.unwrap();
+ assert_eq!(
+ vs,
+ vec![ScalarValue::Int32(Some(-1)), ScalarValue::Int32(None)]
+ );
+ }
+ _ => unreachable!(),
+ }
}
}
diff --git a/src/common/query/src/logical_plan/mod.rs b/src/common/query/src/logical_plan/mod.rs
index ffc0f5f317c4..15f9f317f147 100644
--- a/src/common/query/src/logical_plan/mod.rs
+++ b/src/common/query/src/logical_plan/mod.rs
@@ -49,6 +49,7 @@ pub fn create_aggregate_function(
return_type,
accumulator,
state_type,
+ creator,
)
}
diff --git a/src/common/query/src/logical_plan/udaf.rs b/src/common/query/src/logical_plan/udaf.rs
index ff629a71a8c3..a4e8d867436a 100644
--- a/src/common/query/src/logical_plan/udaf.rs
+++ b/src/common/query/src/logical_plan/udaf.rs
@@ -15,6 +15,7 @@ use crate::function::{
to_df_return_type, AccumulatorFunctionImpl, ReturnTypeFunction, StateTypeFunction,
};
use crate::logical_plan::accumulator::DfAccumulatorAdaptor;
+use crate::logical_plan::AggregateFunctionCreatorRef;
use crate::signature::Signature;
/// Logical representation of a user-defined aggregate function (UDAF)
@@ -31,6 +32,8 @@ pub struct AggregateFunction {
pub accumulator: AccumulatorFunctionImpl,
/// the accumulator's state's description as a function of the return type
pub state_type: StateTypeFunction,
+ /// the creator that creates aggregate functions
+ creator: AggregateFunctionCreatorRef,
}
impl Debug for AggregateFunction {
@@ -57,6 +60,7 @@ impl AggregateFunction {
return_type: ReturnTypeFunction,
accumulator: AccumulatorFunctionImpl,
state_type: StateTypeFunction,
+ creator: AggregateFunctionCreatorRef,
) -> Self {
Self {
name,
@@ -64,6 +68,7 @@ impl AggregateFunction {
return_type,
accumulator,
state_type,
+ creator,
}
}
}
@@ -74,16 +79,20 @@ impl From<AggregateFunction> for DfAggregateUdf {
&udaf.name,
&udaf.signature.into(),
&to_df_return_type(udaf.return_type),
- &to_df_accumulator_func(udaf.accumulator),
+ &to_df_accumulator_func(udaf.accumulator, udaf.creator.clone()),
&to_df_state_type(udaf.state_type),
)
}
}
-fn to_df_accumulator_func(func: AccumulatorFunctionImpl) -> DfAccumulatorFunctionImplementation {
+fn to_df_accumulator_func(
+ accumulator: AccumulatorFunctionImpl,
+ creator: AggregateFunctionCreatorRef,
+) -> DfAccumulatorFunctionImplementation {
Arc::new(move || {
- let acc = func()?;
- Ok(Box::new(DfAccumulatorAdaptor(acc)))
+ let accumulator = accumulator()?;
+ let creator = creator.clone();
+ Ok(Box::new(DfAccumulatorAdaptor::new(accumulator, creator)))
})
}
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index d40e6cd72bed..80248980f9f6 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -1,7 +1,6 @@
use std::cmp::Ordering;
use common_base::bytes::{Bytes, StringBytes};
-use datafusion_common::ScalarValue;
pub use ordered_float::OrderedFloat;
use serde::{Deserialize, Serialize};
@@ -64,7 +63,8 @@ impl Value {
Value::Float64(_) => ConcreteDataType::float64_datatype(),
Value::String(_) => ConcreteDataType::string_datatype(),
Value::Binary(_) => ConcreteDataType::binary_datatype(),
- Value::Date(_) | Value::DateTime(_) | Value::List(_) => {
+ Value::List(list) => ConcreteDataType::list_datatype(list.datatype().clone()),
+ Value::Date(_) | Value::DateTime(_) => {
unimplemented!("Unsupported data type of value {:?}", self)
}
}
@@ -160,34 +160,6 @@ impl TryFrom<Value> for serde_json::Value {
}
}
-impl From<Value> for ScalarValue {
- fn from(value: Value) -> Self {
- match value {
- Value::Boolean(v) => ScalarValue::Boolean(Some(v)),
- Value::UInt8(v) => ScalarValue::UInt8(Some(v)),
- Value::UInt16(v) => ScalarValue::UInt16(Some(v)),
- Value::UInt32(v) => ScalarValue::UInt32(Some(v)),
- Value::UInt64(v) => ScalarValue::UInt64(Some(v)),
- Value::Int8(v) => ScalarValue::Int8(Some(v)),
- Value::Int16(v) => ScalarValue::Int16(Some(v)),
- Value::Int32(v) => ScalarValue::Int32(Some(v)),
- Value::Int64(v) => ScalarValue::Int64(Some(v)),
- Value::Float32(v) => ScalarValue::Float32(Some(v.0)),
- Value::Float64(v) => ScalarValue::Float64(Some(v.0)),
- Value::String(v) => ScalarValue::LargeUtf8(Some(v.as_utf8().to_string())),
- Value::Binary(v) => ScalarValue::LargeBinary(Some(v.to_vec())),
- Value::Date(v) => ScalarValue::Date32(Some(v)),
- Value::DateTime(v) => ScalarValue::Date64(Some(v)),
- Value::Null => ScalarValue::Boolean(None),
- Value::List(v) => ScalarValue::List(
- v.items
- .map(|vs| Box::new(vs.into_iter().map(ScalarValue::from).collect())),
- Box::new(v.datatype.as_arrow_type()),
- ),
- }
- }
-}
-
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ListValue {
/// List of nested Values (boxed to reduce size_of(Value))
@@ -204,6 +176,14 @@ impl ListValue {
pub fn new(items: Option<Box<Vec<Value>>>, datatype: ConcreteDataType) -> Self {
Self { items, datatype }
}
+
+ pub fn items(&self) -> &Option<Box<Vec<Value>>> {
+ &self.items
+ }
+
+ pub fn datatype(&self) -> &ConcreteDataType {
+ &self.datatype
+ }
}
impl PartialOrd for ListValue {
@@ -367,78 +347,6 @@ mod tests {
assert_eq!(Value::Binary(Bytes::from(world)), Value::from(world));
}
- #[test]
- fn test_value_into_scalar_value() {
- assert_eq!(
- ScalarValue::Boolean(Some(true)),
- Value::Boolean(true).into()
- );
- assert_eq!(
- ScalarValue::Boolean(Some(true)),
- Value::Boolean(true).into()
- );
-
- assert_eq!(
- ScalarValue::UInt8(Some(u8::MIN + 1)),
- Value::UInt8(u8::MIN + 1).into()
- );
- assert_eq!(
- ScalarValue::UInt16(Some(u16::MIN + 2)),
- Value::UInt16(u16::MIN + 2).into()
- );
- assert_eq!(
- ScalarValue::UInt32(Some(u32::MIN + 3)),
- Value::UInt32(u32::MIN + 3).into()
- );
- assert_eq!(
- ScalarValue::UInt64(Some(u64::MIN + 4)),
- Value::UInt64(u64::MIN + 4).into()
- );
-
- assert_eq!(
- ScalarValue::Int8(Some(i8::MIN + 4)),
- Value::Int8(i8::MIN + 4).into()
- );
- assert_eq!(
- ScalarValue::Int16(Some(i16::MIN + 5)),
- Value::Int16(i16::MIN + 5).into()
- );
- assert_eq!(
- ScalarValue::Int32(Some(i32::MIN + 6)),
- Value::Int32(i32::MIN + 6).into()
- );
- assert_eq!(
- ScalarValue::Int64(Some(i64::MIN + 7)),
- Value::Int64(i64::MIN + 7).into()
- );
-
- assert_eq!(
- ScalarValue::Float32(Some(8.0f32)),
- Value::Float32(OrderedFloat(8.0f32)).into()
- );
- assert_eq!(
- ScalarValue::Float64(Some(9.0f64)),
- Value::Float64(OrderedFloat(9.0f64)).into()
- );
-
- assert_eq!(
- ScalarValue::LargeUtf8(Some("hello".to_string())),
- Value::String(StringBytes::from("hello")).into()
- );
- assert_eq!(
- ScalarValue::LargeBinary(Some("world".as_bytes().to_vec())),
- Value::Binary(Bytes::from("world".as_bytes())).into()
- );
-
- assert_eq!(ScalarValue::Date32(Some(10i32)), Value::Date(10i32).into());
- assert_eq!(
- ScalarValue::Date64(Some(20i64)),
- Value::DateTime(20i64).into()
- );
-
- assert_eq!(ScalarValue::Boolean(None), Value::Null.into());
- }
-
fn to_json(value: Value) -> serde_json::Value {
value.try_into().unwrap()
}
|
fix
|
correctly convert Value::Null to ScalarValue (#187)
|
8d5d4000e67170d67f6189f3336c4901e985eb6b
|
2025-01-16 16:46:56
|
Ruihang Xia
|
feat: set default compaction parallelism (#5371)
| false
|
diff --git a/src/mito2/src/compaction.rs b/src/mito2/src/compaction.rs
index bf8df5fcec7a..6f9e5c0261ff 100644
--- a/src/mito2/src/compaction.rs
+++ b/src/mito2/src/compaction.rs
@@ -40,6 +40,7 @@ use snafu::{OptionExt, ResultExt};
use store_api::metadata::RegionMetadataRef;
use store_api::storage::{RegionId, TableId};
use table::predicate::Predicate;
+use task::MAX_PARALLEL_COMPACTION;
use tokio::sync::mpsc::{self, Sender};
use crate::access_layer::AccessLayerRef;
@@ -85,6 +86,7 @@ pub struct CompactionRequest {
pub(crate) manifest_ctx: ManifestContextRef,
pub(crate) listener: WorkerListener,
pub(crate) schema_metadata_manager: SchemaMetadataManagerRef,
+ pub(crate) max_parallelism: usize,
}
impl CompactionRequest {
@@ -145,6 +147,7 @@ impl CompactionScheduler {
waiter: OptionOutputTx,
manifest_ctx: &ManifestContextRef,
schema_metadata_manager: SchemaMetadataManagerRef,
+ max_parallelism: usize,
) -> Result<()> {
if let Some(status) = self.region_status.get_mut(®ion_id) {
// Region is compacting. Add the waiter to pending list.
@@ -163,6 +166,7 @@ impl CompactionScheduler {
manifest_ctx,
self.listener.clone(),
schema_metadata_manager,
+ max_parallelism,
);
self.region_status.insert(region_id, status);
let result = self
@@ -193,6 +197,7 @@ impl CompactionScheduler {
manifest_ctx,
self.listener.clone(),
schema_metadata_manager,
+ MAX_PARALLEL_COMPACTION,
);
// Try to schedule next compaction task for this region.
if let Err(e) = self
@@ -264,6 +269,7 @@ impl CompactionScheduler {
manifest_ctx,
listener,
schema_metadata_manager,
+ max_parallelism,
} = request;
let ttl = find_ttl(
@@ -294,6 +300,7 @@ impl CompactionScheduler {
manifest_ctx: manifest_ctx.clone(),
file_purger: None,
ttl: Some(ttl),
+ max_parallelism,
};
let picker_output = {
@@ -521,6 +528,7 @@ impl CompactionStatus {
manifest_ctx: &ManifestContextRef,
listener: WorkerListener,
schema_metadata_manager: SchemaMetadataManagerRef,
+ max_parallelism: usize,
) -> CompactionRequest {
let current_version = CompactionVersion::from(self.version_control.current().version);
let start_time = Instant::now();
@@ -535,6 +543,7 @@ impl CompactionStatus {
manifest_ctx: manifest_ctx.clone(),
listener,
schema_metadata_manager,
+ max_parallelism,
};
if let Some(pending) = self.pending_compaction.take() {
@@ -722,6 +731,7 @@ mod tests {
waiter,
&manifest_ctx,
schema_metadata_manager.clone(),
+ 1,
)
.await
.unwrap();
@@ -742,6 +752,7 @@ mod tests {
waiter,
&manifest_ctx,
schema_metadata_manager,
+ 1,
)
.await
.unwrap();
@@ -795,6 +806,7 @@ mod tests {
OptionOutputTx::none(),
&manifest_ctx,
schema_metadata_manager.clone(),
+ 1,
)
.await
.unwrap();
@@ -825,6 +837,7 @@ mod tests {
OptionOutputTx::none(),
&manifest_ctx,
schema_metadata_manager.clone(),
+ 1,
)
.await
.unwrap();
@@ -860,6 +873,7 @@ mod tests {
OptionOutputTx::none(),
&manifest_ctx,
schema_metadata_manager,
+ 1,
)
.await
.unwrap();
diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs
index ceeb509bc17e..ae3aeea45b65 100644
--- a/src/mito2/src/compaction/compactor.rs
+++ b/src/mito2/src/compaction/compactor.rs
@@ -91,6 +91,12 @@ pub struct CompactionRegion {
pub(crate) current_version: CompactionVersion,
pub(crate) file_purger: Option<Arc<LocalFilePurger>>,
pub(crate) ttl: Option<TimeToLive>,
+
+ /// Controls the parallelism of this compaction task. Default is 1.
+ ///
+ /// The parallel is inside this compaction task, not across different compaction tasks.
+ /// It can be different windows of the same compaction task or something like this.
+ pub max_parallelism: usize,
}
/// OpenCompactionRegionRequest represents the request to open a compaction region.
@@ -99,6 +105,7 @@ pub struct OpenCompactionRegionRequest {
pub region_id: RegionId,
pub region_dir: String,
pub region_options: RegionOptions,
+ pub max_parallelism: usize,
}
/// Open a compaction region from a compaction request.
@@ -205,6 +212,7 @@ pub async fn open_compaction_region(
current_version,
file_purger: Some(file_purger),
ttl: Some(ttl),
+ max_parallelism: req.max_parallelism,
})
}
@@ -266,6 +274,7 @@ impl Compactor for DefaultCompactor {
let mut futs = Vec::with_capacity(picker_output.outputs.len());
let mut compacted_inputs =
Vec::with_capacity(picker_output.outputs.iter().map(|o| o.inputs.len()).sum());
+ let internal_parallelism = compaction_region.max_parallelism.max(1);
for output in picker_output.outputs.drain(..) {
compacted_inputs.extend(output.inputs.iter().map(|f| f.meta_ref().clone()));
@@ -358,9 +367,8 @@ impl Compactor for DefaultCompactor {
}
let mut output_files = Vec::with_capacity(futs.len());
while !futs.is_empty() {
- let mut task_chunk =
- Vec::with_capacity(crate::compaction::task::MAX_PARALLEL_COMPACTION);
- for _ in 0..crate::compaction::task::MAX_PARALLEL_COMPACTION {
+ let mut task_chunk = Vec::with_capacity(internal_parallelism);
+ for _ in 0..internal_parallelism {
if let Some(task) = futs.pop() {
task_chunk.push(common_runtime::spawn_compact(task));
}
diff --git a/src/mito2/src/compaction/task.rs b/src/mito2/src/compaction/task.rs
index c76595097753..f083e09587fe 100644
--- a/src/mito2/src/compaction/task.rs
+++ b/src/mito2/src/compaction/task.rs
@@ -32,7 +32,7 @@ use crate::request::{
use crate::worker::WorkerListener;
/// Maximum number of compaction tasks in parallel.
-pub const MAX_PARALLEL_COMPACTION: usize = 8;
+pub const MAX_PARALLEL_COMPACTION: usize = 1;
pub(crate) struct CompactionTaskImpl {
pub compaction_region: CompactionRegion,
diff --git a/src/mito2/src/engine/open_test.rs b/src/mito2/src/engine/open_test.rs
index 55bae04633f0..32b963b5067b 100644
--- a/src/mito2/src/engine/open_test.rs
+++ b/src/mito2/src/engine/open_test.rs
@@ -464,6 +464,7 @@ async fn test_open_compaction_region() {
region_id,
region_dir: region_dir.clone(),
region_options: RegionOptions::default(),
+ max_parallelism: 1,
};
let compaction_region = open_compaction_region(
diff --git a/src/mito2/src/worker/handle_compaction.rs b/src/mito2/src/worker/handle_compaction.rs
index 292eb237357b..6fb9f640f7c0 100644
--- a/src/mito2/src/worker/handle_compaction.rs
+++ b/src/mito2/src/worker/handle_compaction.rs
@@ -45,6 +45,8 @@ impl<S> RegionWorkerLoop<S> {
sender,
®ion.manifest_ctx,
self.schema_metadata_manager.clone(),
+ // TODO(yingwen): expose this to frontend
+ 1,
)
.await
{
@@ -113,6 +115,7 @@ impl<S> RegionWorkerLoop<S> {
OptionOutputTx::none(),
®ion.manifest_ctx,
self.schema_metadata_manager.clone(),
+ 1,
)
.await
{
|
feat
|
set default compaction parallelism (#5371)
|
e1d2f9a596fec42b8512a39b0529f9af1141a6ad
|
2024-04-08 23:45:00
|
tison
|
chore: improve contributor click in git-cliff (#3672)
| false
|
diff --git a/cliff.toml b/cliff.toml
index 8e25a4147659..91584d7b7100 100644
--- a/cliff.toml
+++ b/cliff.toml
@@ -53,7 +53,7 @@ Release date: {{ timestamp | date(format="%B %d, %Y") }}
## New Contributors
{% endif -%}
{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
- * @{{ contributor.username }} made their first contribution
+ * [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) made their first contribution
{%- if contributor.pr_number %} in \
[#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
{%- endif %}
@@ -65,7 +65,17 @@ Release date: {{ timestamp | date(format="%B %d, %Y") }}
We would like to thank the following contributors from the GreptimeDB community:
-{{ github.contributors | map(attribute="username") | join(sep=", ") }}
+{%- set contributors = github.contributors | sort(attribute="username") | map(attribute="username") -%}
+{%- set bots = ['dependabot[bot]'] %}
+
+{% for contributor in contributors %}
+{%- if bots is containing(contributor) -%}{% continue %}{%- endif -%}
+{%- if loop.first -%}
+ [@{{ contributor }}](https://github.com/{{ contributor }})
+{%- else -%}
+ , [@{{ contributor }}](https://github.com/{{ contributor }})
+{%- endif -%}
+{%- endfor %}
{%- endif %}
{% raw %}\n{% endraw %}
|
chore
|
improve contributor click in git-cliff (#3672)
|
fe954b78a27b279a8ead4287bc3fa91b3a1f6836
|
2023-09-12 18:27:15
|
LFC
|
refactor: system tables in new region server (#2344)
| false
|
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index 9fe1f5cbfb2d..d97ccd544a4f 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -22,8 +22,6 @@ use datatypes::prelude::ConcreteDataType;
use snafu::{Location, Snafu};
use tokio::task::JoinError;
-use crate::DeregisterTableRequest;
-
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
@@ -179,20 +177,6 @@ pub enum Error {
source: table::error::Error,
},
- #[snafu(display(
- "Failed to deregister table, request: {:?}, source: {}",
- request,
- source
- ))]
- DeregisterTable {
- request: DeregisterTableRequest,
- location: Location,
- source: table::error::Error,
- },
-
- #[snafu(display("Illegal catalog manager state: {}", msg))]
- IllegalManagerState { location: Location, msg: String },
-
#[snafu(display("Failed to scan system catalog table, source: {}", source))]
SystemCatalogTableScan {
location: Location,
@@ -269,7 +253,6 @@ impl ErrorExt for Error {
Error::InvalidKey { .. }
| Error::SchemaNotFound { .. }
| Error::TableNotFound { .. }
- | Error::IllegalManagerState { .. }
| Error::CatalogNotFound { .. }
| Error::InvalidEntryType { .. }
| Error::ParallelOpenTable { .. } => StatusCode::Unexpected,
@@ -302,7 +285,6 @@ impl ErrorExt for Error {
| Error::InsertCatalogRecord { source, .. }
| Error::OpenTable { source, .. }
| Error::CreateTable { source, .. }
- | Error::DeregisterTable { source, .. }
| Error::TableSchemaMismatch { source, .. } => source.status_code(),
Error::MetaSrv { source, .. } => source.status_code(),
diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs
index 81b4b742b89a..4f2af428af3b 100644
--- a/src/catalog/src/lib.rs
+++ b/src/catalog/src/lib.rs
@@ -44,36 +44,40 @@ pub mod tables;
pub trait CatalogManager: Send + Sync {
fn as_any(&self) -> &dyn Any;
- /// Starts a catalog manager.
- async fn start(&self) -> Result<()>;
-
- /// Registers a catalog to catalog manager, returns whether the catalog exist before.
- async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool>;
+ /// Register a local catalog.
+ ///
+ /// # Returns
+ ///
+ /// Whether the catalog is registered.
+ fn register_local_catalog(&self, name: &str) -> Result<bool>;
- /// Register a schema with catalog name and schema name. Retuens whether the
- /// schema registered.
+ /// Register a local schema.
+ ///
+ /// # Returns
+ ///
+ /// Whether the schema is registered.
///
/// # Errors
///
/// This method will/should fail if catalog not exist
- async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
+ fn register_local_schema(&self, request: RegisterSchemaRequest) -> Result<bool>;
/// Deregisters a database within given catalog/schema to catalog manager
- async fn deregister_schema(&self, request: DeregisterSchemaRequest) -> Result<bool>;
+ fn deregister_local_schema(&self, request: DeregisterSchemaRequest) -> Result<bool>;
- /// Registers a table within given catalog/schema to catalog manager,
- /// returns whether the table registered.
+ /// Registers a local table.
+ ///
+ /// # Returns
+ ///
+ /// Whether the table is registered.
///
/// # Errors
///
/// This method will/should fail if catalog or schema not exist
- async fn register_table(&self, request: RegisterTableRequest) -> Result<bool>;
+ fn register_local_table(&self, request: RegisterTableRequest) -> Result<bool>;
/// Deregisters a table within given catalog/schema to catalog manager
- async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()>;
-
- /// Rename a table to [RenameTableRequest::new_table_name], returns whether the table is renamed.
- async fn rename_table(&self, request: RenameTableRequest) -> Result<bool>;
+ fn deregister_local_table(&self, request: DeregisterTableRequest) -> Result<()>;
async fn catalog_names(&self) -> Result<Vec<String>>;
@@ -160,7 +164,7 @@ pub struct RegisterSchemaRequest {
pub schema: String,
}
-pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
+pub(crate) async fn handle_system_table_request<'a, M: CatalogManager + ?Sized>(
manager: &'a M,
engine: TableEngineRef,
sys_table_requests: &'a mut Vec<RegisterSystemTableRequest>,
@@ -185,15 +189,13 @@ pub(crate) async fn handle_system_table_request<'a, M: CatalogManager>(
table_name,
),
})?;
- let _ = manager
- .register_table(RegisterTableRequest {
- catalog: catalog_name.clone(),
- schema: schema_name.clone(),
- table_name: table_name.clone(),
- table_id,
- table: table.clone(),
- })
- .await?;
+ manager.register_local_table(RegisterTableRequest {
+ catalog: catalog_name.clone(),
+ schema: schema_name.clone(),
+ table_name: table_name.clone(),
+ table_id,
+ table: table.clone(),
+ })?;
info!("Created and registered system table: {table_name}");
table
};
diff --git a/src/catalog/src/local.rs b/src/catalog/src/local.rs
index 8e8bcf40b556..39b1a7de38d6 100644
--- a/src/catalog/src/local.rs
+++ b/src/catalog/src/local.rs
@@ -15,5 +15,4 @@
pub mod manager;
pub mod memory;
-pub use manager::LocalCatalogManager;
pub use memory::{new_memory_catalog_manager, MemoryCatalogManager};
diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs
index 5c5fb11623b0..cf5543f57e9d 100644
--- a/src/catalog/src/local/manager.rs
+++ b/src/catalog/src/local/manager.rs
@@ -12,77 +12,61 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::any::Any;
-use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use common_catalog::consts::{
- DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
- MITO_ENGINE, NUMBERS_TABLE_ID, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID,
- SYSTEM_CATALOG_TABLE_NAME,
+ DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MITO_ENGINE,
+ NUMBERS_TABLE_ID, SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
};
-use common_catalog::format_full_table_name;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
-use common_telemetry::{error, info};
+use common_telemetry::info;
use datatypes::prelude::ScalarVector;
use datatypes::vectors::{BinaryVector, UInt8Vector};
use futures_util::lock::Mutex;
-use metrics::increment_gauge;
use snafu::{ensure, OptionExt, ResultExt};
use table::engine::manager::TableEngineManagerRef;
use table::engine::EngineContext;
-use table::metadata::TableId;
use table::requests::OpenTableRequest;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
-use table::table::TableIdProvider;
-use table::TableRef;
use crate::error::{
- self, CatalogNotFoundSnafu, IllegalManagerStateSnafu, OpenTableSnafu, ReadSystemCatalogSnafu,
- Result, SchemaExistsSnafu, SchemaNotFoundSnafu, SystemCatalogSnafu,
- SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu, TableExistsSnafu, TableNotExistSnafu,
- TableNotFoundSnafu, UnimplementedSnafu,
+ CatalogNotFoundSnafu, OpenTableSnafu, ReadSystemCatalogSnafu, Result, SchemaNotFoundSnafu,
+ SystemCatalogSnafu, SystemCatalogTypeMismatchSnafu, TableEngineNotFoundSnafu,
+ TableNotFoundSnafu,
};
-use crate::local::memory::MemoryCatalogManager;
use crate::system::{
decode_system_catalog, Entry, SystemCatalogTable, TableEntry, ENTRY_TYPE_INDEX, KEY_INDEX,
VALUE_INDEX,
};
use crate::tables::SystemCatalog;
use crate::{
- handle_system_table_request, CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest,
- RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, RenameTableRequest,
+ handle_system_table_request, CatalogManagerRef, RegisterSchemaRequest,
+ RegisterSystemTableRequest, RegisterTableRequest,
};
-/// A `CatalogManager` consists of a system catalog and a bunch of user catalogs.
-pub struct LocalCatalogManager {
+pub struct SystemTableInitializer {
system: Arc<SystemCatalog>,
- catalogs: Arc<MemoryCatalogManager>,
+ catalog_manager: CatalogManagerRef,
engine_manager: TableEngineManagerRef,
- next_table_id: AtomicU32,
- init_lock: Mutex<bool>,
- register_lock: Mutex<()>,
system_table_requests: Mutex<Vec<RegisterSystemTableRequest>>,
}
-impl LocalCatalogManager {
- /// Create a new [CatalogManager] with given user catalogs and mito engine
- pub async fn try_new(engine_manager: TableEngineManagerRef) -> Result<Self> {
+impl SystemTableInitializer {
+ pub async fn try_new(
+ engine_manager: TableEngineManagerRef,
+ catalog_manager: CatalogManagerRef,
+ ) -> Result<Self> {
let engine = engine_manager
.engine(MITO_ENGINE)
.context(TableEngineNotFoundSnafu {
engine_name: MITO_ENGINE,
})?;
let table = SystemCatalogTable::new(engine.clone()).await?;
- let memory_catalog_manager = crate::local::memory::new_memory_catalog_manager()?;
let system_catalog = Arc::new(SystemCatalog::new(table));
Ok(Self {
system: system_catalog,
- catalogs: memory_catalog_manager,
+ catalog_manager,
engine_manager,
- next_table_id: AtomicU32::new(MIN_USER_TABLE_ID),
- init_lock: Mutex::new(false),
- register_lock: Mutex::new(()),
system_table_requests: Mutex::new(Vec::default()),
})
}
@@ -92,15 +76,7 @@ impl LocalCatalogManager {
self.init_system_catalog().await?;
let system_records = self.system.information_schema.system.records().await?;
let entries = self.collect_system_catalog_entries(system_records).await?;
- let max_table_id = self.handle_system_catalog_entries(entries).await?;
-
- info!(
- "All system catalog entries processed, max table id: {}",
- max_table_id
- );
- self.next_table_id
- .store((max_table_id + 1).max(MIN_USER_TABLE_ID), Ordering::Relaxed);
- *self.init_lock.lock().await = true;
+ self.handle_system_catalog_entries(entries).await?;
// Processing system table hooks
let mut sys_table_requests = self.system_table_requests.lock().await;
@@ -111,26 +87,24 @@ impl LocalCatalogManager {
engine_name: MITO_ENGINE,
})?;
- handle_system_table_request(self, engine, &mut sys_table_requests).await?;
+ handle_system_table_request(
+ self.catalog_manager.as_ref(),
+ engine,
+ &mut sys_table_requests,
+ )
+ .await?;
Ok(())
}
async fn init_system_catalog(&self) -> Result<()> {
- // register default catalog and default schema
- self.catalogs
- .register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())?;
- self.catalogs.register_schema_sync(RegisterSchemaRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- })?;
+ let catalog_manager = &self.catalog_manager;
+ catalog_manager.register_local_catalog(SYSTEM_CATALOG_NAME)?;
- // register SystemCatalogTable
- self.catalogs
- .register_catalog_sync(SYSTEM_CATALOG_NAME.to_string())?;
- self.catalogs.register_schema_sync(RegisterSchemaRequest {
+ catalog_manager.register_local_schema(RegisterSchemaRequest {
catalog: SYSTEM_CATALOG_NAME.to_string(),
schema: INFORMATION_SCHEMA_NAME.to_string(),
})?;
+
let register_table_req = RegisterTableRequest {
catalog: SYSTEM_CATALOG_NAME.to_string(),
schema: INFORMATION_SCHEMA_NAME.to_string(),
@@ -138,7 +112,7 @@ impl LocalCatalogManager {
table_id: SYSTEM_CATALOG_TABLE_ID,
table: self.system.information_schema.system.as_table_ref(),
};
- self.catalogs.register_table(register_table_req).await?;
+ catalog_manager.register_local_table(register_table_req)?;
// Add numbers table for test
let register_number_table_req = RegisterTableRequest {
@@ -149,9 +123,7 @@ impl LocalCatalogManager {
table: NumbersTable::table(NUMBERS_TABLE_ID),
};
- self.catalogs
- .register_table(register_number_table_req)
- .await?;
+ catalog_manager.register_local_table(register_number_table_req)?;
Ok(())
}
@@ -216,16 +188,14 @@ impl LocalCatalogManager {
Ok(res)
}
- /// Processes records from system catalog table and returns the max table id persisted
- /// in system catalog table.
- async fn handle_system_catalog_entries(&self, entries: Vec<Entry>) -> Result<TableId> {
+ /// Processes records from system catalog table.
+ async fn handle_system_catalog_entries(&self, entries: Vec<Entry>) -> Result<()> {
let entries = Self::sort_entries(entries);
- let mut max_table_id = 0;
for entry in entries {
match entry {
Entry::Catalog(c) => {
- self.catalogs
- .register_catalog_sync(c.catalog_name.clone())?;
+ self.catalog_manager
+ .register_local_catalog(&c.catalog_name)?;
info!("Register catalog: {}", c.catalog_name);
}
Entry::Schema(s) => {
@@ -233,11 +203,10 @@ impl LocalCatalogManager {
catalog: s.catalog_name.clone(),
schema: s.schema_name.clone(),
};
- let _ = self.catalogs.register_schema_sync(req)?;
+ self.catalog_manager.register_local_schema(req)?;
info!("Registered schema: {:?}", s);
}
Entry::Table(t) => {
- max_table_id = max_table_id.max(t.table_id);
if t.is_deleted {
continue;
}
@@ -246,7 +215,7 @@ impl LocalCatalogManager {
}
}
}
- Ok(max_table_id)
+ Ok(())
}
/// Sort catalog entries to ensure catalog entries comes first, then schema entries,
@@ -298,19 +267,8 @@ impl LocalCatalogManager {
table_id: t.table_id,
table: table_ref,
};
- let _ = self.catalogs.register_table(register_request).await?;
-
- Ok(())
- }
-
- async fn check_state(&self) -> Result<()> {
- let started = self.init_lock.lock().await;
- ensure!(
- *started,
- IllegalManagerStateSnafu {
- msg: "Catalog manager not started",
- }
- );
+ self.catalog_manager
+ .register_local_table(register_request)?;
Ok(())
}
@@ -319,11 +277,11 @@ impl LocalCatalogManager {
catalog_name: &str,
schema_name: &str,
) -> Result<()> {
- if !self.catalogs.catalog_exist(catalog_name).await? {
+ if !self.catalog_manager.catalog_exist(catalog_name).await? {
return CatalogNotFoundSnafu { catalog_name }.fail()?;
}
if !self
- .catalogs
+ .catalog_manager
.schema_exist(catalog_name, schema_name)
.await?
{
@@ -337,234 +295,6 @@ impl LocalCatalogManager {
}
}
-#[async_trait::async_trait]
-impl TableIdProvider for LocalCatalogManager {
- async fn next_table_id(&self) -> table::Result<TableId> {
- Ok(self.next_table_id.fetch_add(1, Ordering::Relaxed))
- }
-}
-
-#[async_trait::async_trait]
-impl CatalogManager for LocalCatalogManager {
- /// Start [LocalCatalogManager] to load all information from system catalog table.
- /// Make sure table engine is initialized before starting [MemoryCatalogManager].
- async fn start(&self) -> Result<()> {
- self.init().await
- }
-
- async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
- self.check_state().await?;
-
- let catalog_name = request.catalog.clone();
- let schema_name = request.schema.clone();
-
- self.check_catalog_schema_exist(&catalog_name, &schema_name)
- .await?;
-
- {
- let _lock = self.register_lock.lock().await;
- if let Some(existing) = self
- .catalogs
- .table(&request.catalog, &request.schema, &request.table_name)
- .await?
- {
- if existing.table_info().ident.table_id != request.table_id {
- error!(
- "Unexpected table register request: {:?}, existing: {:?}",
- request,
- existing.table_info()
- );
- return TableExistsSnafu {
- table: format_full_table_name(
- &catalog_name,
- &schema_name,
- &request.table_name,
- ),
- }
- .fail();
- }
- // Try to register table with same table id, just ignore.
- Ok(false)
- } else {
- // table does not exist
- let engine = request.table.table_info().meta.engine.to_string();
- let table_name = request.table_name.clone();
- let table_id = request.table_id;
- let _ = self.catalogs.register_table(request).await?;
- let _ = self
- .system
- .register_table(
- catalog_name.clone(),
- schema_name.clone(),
- table_name,
- table_id,
- engine,
- )
- .await?;
- increment_gauge!(
- crate::metrics::METRIC_CATALOG_MANAGER_TABLE_COUNT,
- 1.0,
- &[crate::metrics::db_label(&catalog_name, &schema_name)],
- );
- Ok(true)
- }
- }
- }
-
- async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
- self.check_state().await?;
-
- let catalog_name = &request.catalog;
- let schema_name = &request.schema;
-
- self.check_catalog_schema_exist(catalog_name, schema_name)
- .await?;
- ensure!(
- self.catalogs
- .table(catalog_name, schema_name, &request.new_table_name)
- .await?
- .is_none(),
- TableExistsSnafu {
- table: &request.new_table_name
- }
- );
-
- let _lock = self.register_lock.lock().await;
- let old_table = self
- .catalogs
- .table(catalog_name, schema_name, &request.table_name)
- .await?
- .context(TableNotExistSnafu {
- table: &request.table_name,
- })?;
-
- let engine = old_table.table_info().meta.engine.to_string();
- // rename table in system catalog
- let _ = self
- .system
- .register_table(
- catalog_name.clone(),
- schema_name.clone(),
- request.new_table_name.clone(),
- request.table_id,
- engine,
- )
- .await?;
-
- self.catalogs.rename_table(request).await
- }
-
- async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
- self.check_state().await?;
-
- {
- let _ = self.register_lock.lock().await;
-
- let DeregisterTableRequest {
- catalog,
- schema,
- table_name,
- } = &request;
- let table_id = self
- .catalogs
- .table(catalog, schema, table_name)
- .await?
- .with_context(|| error::TableNotExistSnafu {
- table: format_full_table_name(catalog, schema, table_name),
- })?
- .table_info()
- .ident
- .table_id;
-
- self.system.deregister_table(&request, table_id).await?;
- self.catalogs.deregister_table(request).await
- }
- }
-
- async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
- self.check_state().await?;
-
- let catalog_name = &request.catalog;
- let schema_name = &request.schema;
-
- if !self.catalogs.catalog_exist(catalog_name).await? {
- return CatalogNotFoundSnafu { catalog_name }.fail()?;
- }
-
- {
- let _lock = self.register_lock.lock().await;
- ensure!(
- !self
- .catalogs
- .schema_exist(catalog_name, schema_name)
- .await?,
- SchemaExistsSnafu {
- schema: schema_name,
- }
- );
- let _ = self
- .system
- .register_schema(request.catalog.clone(), schema_name.clone())
- .await?;
- self.catalogs.register_schema_sync(request)
- }
- }
-
- async fn deregister_schema(&self, _request: DeregisterSchemaRequest) -> Result<bool> {
- UnimplementedSnafu {
- operation: "deregister schema",
- }
- .fail()
- }
-
- async fn schema_exist(&self, catalog: &str, schema: &str) -> Result<bool> {
- self.catalogs.schema_exist(catalog, schema).await
- }
-
- async fn table(
- &self,
- catalog_name: &str,
- schema_name: &str,
- table_name: &str,
- ) -> Result<Option<TableRef>> {
- self.catalogs
- .table(catalog_name, schema_name, table_name)
- .await
- }
-
- async fn catalog_exist(&self, catalog: &str) -> Result<bool> {
- if catalog.eq_ignore_ascii_case(SYSTEM_CATALOG_NAME) {
- Ok(true)
- } else {
- self.catalogs.catalog_exist(catalog).await
- }
- }
-
- async fn table_exist(&self, catalog: &str, schema: &str, table: &str) -> Result<bool> {
- self.catalogs.table_exist(catalog, schema, table).await
- }
-
- async fn catalog_names(&self) -> Result<Vec<String>> {
- self.catalogs.catalog_names().await
- }
-
- async fn schema_names(&self, catalog_name: &str) -> Result<Vec<String>> {
- self.catalogs.schema_names(catalog_name).await
- }
-
- async fn table_names(&self, catalog_name: &str, schema_name: &str) -> Result<Vec<String>> {
- self.catalogs.table_names(catalog_name, schema_name).await
- }
-
- async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
- self.catalogs.clone().register_catalog(name).await
- }
-
- fn as_any(&self) -> &dyn Any {
- self
- }
-}
-
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
@@ -608,7 +338,7 @@ mod tests {
is_deleted: false,
}),
];
- let res = LocalCatalogManager::sort_entries(vec);
+ let res = SystemTableInitializer::sort_entries(vec);
assert_matches!(res[0], Entry::Catalog(..));
assert_matches!(res[1], Entry::Catalog(..));
assert_matches!(res[2], Entry::Schema(..));
diff --git a/src/catalog/src/local/memory.rs b/src/catalog/src/local/memory.rs
index c4449bffac6b..7360941483f9 100644
--- a/src/catalog/src/local/memory.rs
+++ b/src/catalog/src/local/memory.rs
@@ -15,96 +15,47 @@
use std::any::Any;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
-use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, RwLock, Weak};
-use common_catalog::consts::{
- DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, MIN_USER_TABLE_ID,
-};
+use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
use metrics::{decrement_gauge, increment_gauge};
use snafu::OptionExt;
-use table::metadata::TableId;
-use table::table::TableIdProvider;
use table::TableRef;
-use crate::error::{
- CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, TableNotFoundSnafu,
-};
+use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
use crate::information_schema::InformationSchemaProvider;
use crate::{
CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest, RegisterSchemaRequest,
- RegisterTableRequest, RenameTableRequest,
+ RegisterTableRequest,
};
type SchemaEntries = HashMap<String, HashMap<String, TableRef>>;
/// Simple in-memory list of catalogs
+#[derive(Clone)]
pub struct MemoryCatalogManager {
/// Collection of catalogs containing schemas and ultimately Tables
- pub catalogs: RwLock<HashMap<String, SchemaEntries>>,
- pub table_id: AtomicU32,
-}
-
-#[async_trait::async_trait]
-impl TableIdProvider for MemoryCatalogManager {
- async fn next_table_id(&self) -> table::error::Result<TableId> {
- Ok(self.table_id.fetch_add(1, Ordering::Relaxed))
- }
+ catalogs: Arc<RwLock<HashMap<String, SchemaEntries>>>,
}
#[async_trait::async_trait]
impl CatalogManager for MemoryCatalogManager {
- async fn start(&self) -> Result<()> {
- self.table_id.store(MIN_USER_TABLE_ID, Ordering::Relaxed);
- Ok(())
+ fn register_local_catalog(&self, name: &str) -> Result<bool> {
+ self.register_catalog(name)
}
-
- async fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
- self.register_table_sync(request)
+ fn register_local_table(&self, request: RegisterTableRequest) -> Result<bool> {
+ self.register_table(request)
}
- async fn rename_table(&self, request: RenameTableRequest) -> Result<bool> {
- let mut catalogs = self.catalogs.write().unwrap();
- let schema = catalogs
- .get_mut(&request.catalog)
- .with_context(|| CatalogNotFoundSnafu {
- catalog_name: &request.catalog,
- })?
- .get_mut(&request.schema)
- .with_context(|| SchemaNotFoundSnafu {
- catalog: &request.catalog,
- schema: &request.schema,
- })?;
-
- // check old and new table names
- if !schema.contains_key(&request.table_name) {
- return TableNotFoundSnafu {
- table_info: request.table_name.to_string(),
- }
- .fail()?;
- }
- if schema.contains_key(&request.new_table_name) {
- return TableExistsSnafu {
- table: &request.new_table_name,
- }
- .fail();
- }
-
- let table = schema.remove(&request.table_name).unwrap();
- let _ = schema.insert(request.new_table_name, table);
-
- Ok(true)
- }
-
- async fn deregister_table(&self, request: DeregisterTableRequest) -> Result<()> {
+ fn deregister_local_table(&self, request: DeregisterTableRequest) -> Result<()> {
self.deregister_table_sync(request)
}
- async fn register_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
+ fn register_local_schema(&self, request: RegisterSchemaRequest) -> Result<bool> {
self.register_schema_sync(request)
}
- async fn deregister_schema(&self, request: DeregisterSchemaRequest) -> Result<bool> {
+ fn deregister_local_schema(&self, request: DeregisterSchemaRequest) -> Result<bool> {
let mut catalogs = self.catalogs.write().unwrap();
let schemas = catalogs
.get_mut(&request.catalog)
@@ -203,28 +154,27 @@ impl CatalogManager for MemoryCatalogManager {
.collect())
}
- async fn register_catalog(self: Arc<Self>, name: String) -> Result<bool> {
- self.register_catalog_sync(name)
- }
-
fn as_any(&self) -> &dyn Any {
self
}
}
impl MemoryCatalogManager {
+ pub fn new() -> Arc<Self> {
+ Arc::new(Self {
+ catalogs: Default::default(),
+ })
+ }
+
/// Creates a manager with some default setups
/// (e.g. default catalog/schema and information schema)
pub fn with_default_setup() -> Arc<Self> {
let manager = Arc::new(Self {
- table_id: AtomicU32::new(MIN_USER_TABLE_ID),
catalogs: Default::default(),
});
// Safety: default catalog/schema is registered in order so no CatalogNotFound error will occur
- manager
- .register_catalog_sync(DEFAULT_CATALOG_NAME.to_string())
- .unwrap();
+ manager.register_catalog(DEFAULT_CATALOG_NAME).unwrap();
manager
.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
@@ -252,12 +202,15 @@ impl MemoryCatalogManager {
}
/// Registers a catalog if it does not exist and returns false if the schema exists.
- pub fn register_catalog_sync(self: &Arc<Self>, name: String) -> Result<bool> {
+ pub fn register_catalog(&self, name: &str) -> Result<bool> {
+ let name = name.to_string();
+
let mut catalogs = self.catalogs.write().unwrap();
match catalogs.entry(name.clone()) {
Entry::Vacant(e) => {
- let catalog = self.create_catalog_entry(name);
+ let arc_self = Arc::new(self.clone());
+ let catalog = arc_self.create_catalog_entry(name);
e.insert(catalog);
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
Ok(true)
@@ -311,7 +264,7 @@ impl MemoryCatalogManager {
}
/// Registers a schema and returns an error if the catalog or schema does not exist.
- pub fn register_table_sync(&self, request: RegisterTableRequest) -> Result<bool> {
+ pub fn register_table(&self, request: RegisterTableRequest) -> Result<bool> {
let mut catalogs = self.catalogs.write().unwrap();
let schema = catalogs
.get_mut(&request.catalog)
@@ -356,7 +309,7 @@ impl MemoryCatalogManager {
let schema = &table.table_info().schema_name;
if !manager.catalog_exist_sync(catalog).unwrap() {
- manager.register_catalog_sync(catalog.to_string()).unwrap();
+ manager.register_catalog(catalog).unwrap();
}
if !manager.schema_exist_sync(catalog, schema).unwrap() {
@@ -375,7 +328,7 @@ impl MemoryCatalogManager {
table_id: table.table_info().ident.table_id,
table,
};
- let _ = manager.register_table_sync(request).unwrap();
+ let _ = manager.register_table(request).unwrap();
manager
}
}
@@ -388,8 +341,6 @@ pub fn new_memory_catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
#[cfg(test)]
mod tests {
use common_catalog::consts::*;
- use common_error::ext::ErrorExt;
- use common_error::status_code::StatusCode;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
use super::*;
@@ -406,7 +357,7 @@ mod tests {
table: NumbersTable::table(NUMBERS_TABLE_ID),
};
- let _ = catalog_list.register_table(register_request).await.unwrap();
+ catalog_list.register_local_table(register_request).unwrap();
let table = catalog_list
.table(
DEFAULT_CATALOG_NAME,
@@ -423,130 +374,11 @@ mod tests {
.is_none());
}
- #[tokio::test]
- async fn test_mem_manager_rename_table() {
- let catalog = MemoryCatalogManager::with_default_setup();
- let table_name = "test_table";
- assert!(!catalog
- .table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
- .await
- .unwrap());
- // register test table
- let table_id = 2333;
- let register_request = RegisterTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: table_name.to_string(),
- table_id,
- table: NumbersTable::table(table_id),
- };
- assert!(catalog.register_table(register_request).await.unwrap());
- assert!(catalog
- .table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
- .await
- .unwrap());
-
- // rename test table
- let new_table_name = "test_table_renamed";
- let rename_request = RenameTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: table_name.to_string(),
- new_table_name: new_table_name.to_string(),
- table_id,
- };
- let _ = catalog.rename_table(rename_request).await.unwrap();
-
- // test old table name not exist
- assert!(!catalog
- .table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
- .await
- .unwrap());
-
- // test new table name exists
- assert!(catalog
- .table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
- .await
- .unwrap());
- let registered_table = catalog
- .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
- .await
- .unwrap()
- .unwrap();
- assert_eq!(registered_table.table_info().ident.table_id, table_id);
-
- let dup_register_request = RegisterTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: new_table_name.to_string(),
- table_id: table_id + 1,
- table: NumbersTable::table(table_id + 1),
- };
- let result = catalog.register_table(dup_register_request).await;
- let err = result.err().unwrap();
- assert_eq!(StatusCode::TableAlreadyExists, err.status_code());
- }
-
- #[tokio::test]
- async fn test_catalog_rename_table() {
- let catalog = MemoryCatalogManager::with_default_setup();
- let table_name = "num";
- let table_id = 2333;
- let table = NumbersTable::table(table_id);
-
- // register table
- let register_table_req = RegisterTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: table_name.to_string(),
- table_id,
- table,
- };
- assert!(catalog.register_table(register_table_req).await.unwrap());
- assert!(catalog
- .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
- .await
- .unwrap()
- .is_some());
-
- // rename table
- let new_table_name = "numbers_new";
- let rename_table_req = RenameTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: table_name.to_string(),
- new_table_name: new_table_name.to_string(),
- table_id,
- };
- assert!(catalog.rename_table(rename_table_req).await.unwrap());
- assert!(catalog
- .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
- .await
- .unwrap()
- .is_none());
- assert!(catalog
- .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
- .await
- .unwrap()
- .is_some());
-
- let registered_table = catalog
- .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
- .await
- .unwrap()
- .unwrap();
- assert_eq!(registered_table.table_info().ident.table_id, table_id);
- }
-
#[test]
pub fn test_register_catalog_sync() {
let list = MemoryCatalogManager::with_default_setup();
- assert!(list
- .register_catalog_sync("test_catalog".to_string())
- .unwrap());
- assert!(!list
- .register_catalog_sync("test_catalog".to_string())
- .unwrap());
+ assert!(list.register_catalog("test_catalog").unwrap());
+ assert!(!list.register_catalog("test_catalog").unwrap());
}
#[tokio::test]
@@ -561,7 +393,7 @@ mod tests {
table_id: 2333,
table: NumbersTable::table(2333),
};
- let _ = catalog.register_table(register_table_req).await.unwrap();
+ catalog.register_local_table(register_table_req).unwrap();
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
.await
@@ -574,8 +406,7 @@ mod tests {
table_name: table_name.to_string(),
};
catalog
- .deregister_table(deregister_table_req)
- .await
+ .deregister_local_table(deregister_table_req)
.unwrap();
assert!(catalog
.table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
@@ -603,20 +434,16 @@ mod tests {
table_id: 0,
table: NumbersTable::table(0),
};
- catalog
- .clone()
- .register_catalog(catalog_name.clone())
- .await
- .unwrap();
- catalog.register_schema(schema).await.unwrap();
- catalog.register_table(table).await.unwrap();
+ catalog.register_local_catalog(&catalog_name).unwrap();
+ catalog.register_local_schema(schema).unwrap();
+ catalog.register_local_table(table).unwrap();
let request = DeregisterSchemaRequest {
catalog: catalog_name.clone(),
schema: schema_name.clone(),
};
- assert!(catalog.deregister_schema(request).await.unwrap());
+ assert!(catalog.deregister_local_schema(request).unwrap());
assert!(!catalog
.schema_exist(&catalog_name, &schema_name)
.await
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index c4c9d654eaaa..624b1c697672 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -20,7 +20,7 @@ use common_catalog::consts::{
SYSTEM_CATALOG_NAME, SYSTEM_CATALOG_TABLE_ID, SYSTEM_CATALOG_TABLE_NAME,
};
use common_recordbatch::SendableRecordBatchStream;
-use common_telemetry::{debug, warn};
+use common_telemetry::debug;
use common_time::util;
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
use datatypes::schema::{ColumnSchema, RawSchema};
@@ -34,11 +34,9 @@ use table::requests::{CreateTableRequest, InsertRequest, OpenTableRequest, Table
use table::TableRef;
use crate::error::{
- self, CreateSystemCatalogSnafu, DeregisterTableSnafu, EmptyValueSnafu, Error,
- InsertCatalogRecordSnafu, InvalidEntryTypeSnafu, InvalidKeySnafu, OpenSystemCatalogSnafu,
- Result, ValueDeserializeSnafu,
+ self, CreateSystemCatalogSnafu, EmptyValueSnafu, Error, InsertCatalogRecordSnafu,
+ InvalidEntryTypeSnafu, InvalidKeySnafu, OpenSystemCatalogSnafu, Result, ValueDeserializeSnafu,
};
-use crate::DeregisterTableRequest;
pub const ENTRY_TYPE_INDEX: usize = 0;
pub const KEY_INDEX: usize = 1;
@@ -104,30 +102,6 @@ impl SystemCatalogTable {
.context(InsertCatalogRecordSnafu)
}
- pub(crate) async fn deregister_table(
- &self,
- request: &DeregisterTableRequest,
- table_id: TableId,
- ) -> Result<()> {
- let deletion_request = build_table_deletion_request(request, table_id);
- self.0
- .insert(deletion_request)
- .await
- .map(|x| {
- if x != 1 {
- let table = common_catalog::format_full_table_name(
- &request.catalog,
- &request.schema,
- &request.table_name
- );
- warn!("Failed to delete table record from information_schema, unexpected returned result: {x}, table: {table}");
- }
- })
- .with_context(|_| DeregisterTableSnafu {
- request: request.clone(),
- })
- }
-
pub async fn register_schema(&self, catalog: String, schema: String) -> Result<usize> {
let insert_request = build_schema_insert_request(catalog, schema);
self.0
@@ -232,24 +206,6 @@ pub fn build_table_insert_request(
)
}
-pub(crate) fn build_table_deletion_request(
- request: &DeregisterTableRequest,
- table_id: TableId,
-) -> InsertRequest {
- let entry_key = format_table_entry_key(&request.catalog, &request.schema, table_id);
- build_insert_request(
- EntryType::Table,
- entry_key.as_bytes(),
- serde_json::to_string(&TableEntryValue {
- table_name: "".to_string(),
- engine: "".to_string(),
- is_deleted: true,
- })
- .unwrap()
- .as_bytes(),
- )
-}
-
fn build_primary_key_columns(entry_type: EntryType, key: &[u8]) -> HashMap<String, VectorRef> {
HashMap::from([
(
@@ -614,21 +570,5 @@ mod tests {
is_deleted: false,
});
assert_eq!(entry, expected);
-
- catalog_table
- .deregister_table(
- &DeregisterTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: "my_table".to_string(),
- },
- 1,
- )
- .await
- .unwrap();
-
- let records = catalog_table.records().await.unwrap();
- let batches = RecordBatches::try_collect(records).await.unwrap().take();
- assert_eq!(batches.len(), 1);
}
}
diff --git a/src/catalog/src/tables.rs b/src/catalog/src/tables.rs
index 7efa1fba062d..003dc3be3c5f 100644
--- a/src/catalog/src/tables.rs
+++ b/src/catalog/src/tables.rs
@@ -19,7 +19,6 @@ use std::sync::Arc;
use table::metadata::TableId;
use crate::system::SystemCatalogTable;
-use crate::DeregisterTableRequest;
pub struct InformationSchema {
pub system: Arc<SystemCatalogTable>,
@@ -53,17 +52,6 @@ impl SystemCatalog {
.await
}
- pub(crate) async fn deregister_table(
- &self,
- request: &DeregisterTableRequest,
- table_id: TableId,
- ) -> crate::error::Result<()> {
- self.information_schema
- .system
- .deregister_table(request, table_id)
- .await
- }
-
pub async fn register_schema(
&self,
catalog: String,
diff --git a/src/catalog/tests/local_catalog_tests.rs b/src/catalog/tests/local_catalog_tests.rs
deleted file mode 100644
index 483bb1e31830..000000000000
--- a/src/catalog/tests/local_catalog_tests.rs
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#[cfg(test)]
-mod tests {
- use std::sync::Arc;
-
- use catalog::local::LocalCatalogManager;
- use catalog::{CatalogManager, RegisterTableRequest, RenameTableRequest};
- use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use common_telemetry::{error, info};
- use common_test_util::temp_dir::TempDir;
- use mito::config::EngineConfig;
- use table::engine::manager::MemoryTableEngineManager;
- use table::table::numbers::NumbersTable;
- use tokio::sync::Mutex;
-
- async fn create_local_catalog_manager(
- ) -> Result<(TempDir, LocalCatalogManager), catalog::error::Error> {
- let (dir, object_store) =
- mito::table::test_util::new_test_object_store("setup_mock_engine_and_table").await;
- let mock_engine = Arc::new(mito::table::test_util::MockMitoEngine::new(
- EngineConfig::default(),
- mito::table::test_util::MockEngine::default(),
- object_store,
- ));
- let engine_manager = Arc::new(MemoryTableEngineManager::new(mock_engine.clone()));
- let catalog_manager = LocalCatalogManager::try_new(engine_manager).await.unwrap();
- catalog_manager.start().await?;
- Ok((dir, catalog_manager))
- }
-
- #[tokio::test]
- async fn test_rename_table() {
- common_telemetry::init_default_ut_logging();
- let (_dir, catalog_manager) = create_local_catalog_manager().await.unwrap();
- // register table
- let table_name = "test_table";
- let table_id = 42;
- let request = RegisterTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: table_name.to_string(),
- table_id,
- table: NumbersTable::table(table_id),
- };
- assert!(catalog_manager.register_table(request).await.unwrap());
-
- // rename table
- let new_table_name = "table_t";
- let rename_table_req = RenameTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: table_name.to_string(),
- new_table_name: new_table_name.to_string(),
- table_id,
- };
- assert!(catalog_manager
- .rename_table(rename_table_req)
- .await
- .unwrap());
-
- let registered_table = catalog_manager
- .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
- .await
- .unwrap()
- .unwrap();
- assert_eq!(registered_table.table_info().ident.table_id, table_id);
- }
-
- #[tokio::test]
- async fn test_duplicate_register() {
- let (_dir, catalog_manager) = create_local_catalog_manager().await.unwrap();
- let request = RegisterTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: "test_table".to_string(),
- table_id: 42,
- table: NumbersTable::table(42),
- };
- assert!(catalog_manager
- .register_table(request.clone())
- .await
- .unwrap());
-
- // register table with same table id will succeed with 0 as return val.
- assert!(!catalog_manager.register_table(request).await.unwrap());
-
- let err = catalog_manager
- .register_table(RegisterTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: "test_table".to_string(),
- table_id: 43,
- table: NumbersTable::table(43),
- })
- .await
- .unwrap_err();
- assert!(
- err.to_string()
- .contains("Table `greptime.public.test_table` already exists"),
- "Actual error message: {err}",
- );
- }
-
- #[test]
- fn test_concurrent_register() {
- common_telemetry::init_default_ut_logging();
- let rt = Arc::new(tokio::runtime::Builder::new_multi_thread().build().unwrap());
- let (_dir, catalog_manager) =
- rt.block_on(async { create_local_catalog_manager().await.unwrap() });
- let catalog_manager = Arc::new(catalog_manager);
-
- let succeed = Arc::new(Mutex::new(None));
-
- let mut handles = Vec::with_capacity(8);
- for i in 0..8 {
- let catalog = catalog_manager.clone();
- let succeed = succeed.clone();
- let handle = rt.spawn(async move {
- let table_id = 42 + i;
- let table = NumbersTable::table(table_id);
- let table_info = table.table_info();
- let req = RegisterTableRequest {
- catalog: DEFAULT_CATALOG_NAME.to_string(),
- schema: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: "test_table".to_string(),
- table_id,
- table,
- };
- match catalog.register_table(req).await {
- Ok(res) => {
- if res {
- let mut succeed = succeed.lock().await;
- info!("Successfully registered table: {}", table_id);
- *succeed = Some(table_info);
- }
- }
- Err(_) => {
- error!("Failed to register table {}", table_id);
- }
- }
- });
- handles.push(handle);
- }
-
- rt.block_on(async move {
- for handle in handles {
- handle.await.unwrap();
- }
- let guard = succeed.lock().await;
- let table_info = guard.as_ref().unwrap();
- let table_registered = catalog_manager
- .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "test_table")
- .await
- .unwrap()
- .unwrap();
- assert_eq!(
- table_registered.table_info().ident.table_id,
- table_info.ident.table_id
- );
- });
- }
-}
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 57780ff1a04e..db3d4d6422ca 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -25,7 +25,7 @@ use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
-use crate::error::{self, IllegalAuthConfigSnafu, Result, StartCatalogManagerSnafu};
+use crate::error::{self, IllegalAuthConfigSnafu, Result};
use crate::options::{Options, TopLevelOptions};
pub struct Instance {
@@ -34,12 +34,6 @@ pub struct Instance {
impl Instance {
pub async fn start(&mut self) -> Result<()> {
- self.frontend
- .catalog_manager()
- .start()
- .await
- .context(StartCatalogManagerSnafu)?;
-
self.frontend
.start()
.await
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 099edcbcef26..fc2d3c447f29 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -126,12 +126,6 @@ pub enum Error {
#[snafu(display("Incorrect internal state: {}", state))]
IncorrectInternalState { state: String, location: Location },
- #[snafu(display("Failed to create catalog list, source: {}", source))]
- NewCatalog {
- location: Location,
- source: catalog::error::Error,
- },
-
#[snafu(display("Catalog not found: {}", name))]
CatalogNotFound { name: String, location: Location },
@@ -583,7 +577,7 @@ impl ErrorExt for Error {
HandleHeartbeatResponse { source, .. } => source.status_code(),
DecodeLogicalPlan { source, .. } => source.status_code(),
- NewCatalog { source, .. } | RegisterSchema { source, .. } => source.status_code(),
+ RegisterSchema { source, .. } => source.status_code(),
CreateTable { source, .. } => source.status_code(),
DropTable { source, .. } => source.status_code(),
FlushTable { source, .. } => source.status_code(),
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index bc6c1ce249b1..89170efaaea1 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -21,10 +21,11 @@ use catalog::error::{
TableMetadataManagerSnafu,
};
use catalog::information_schema::{InformationSchemaProvider, COLUMNS, TABLES};
+use catalog::local::MemoryCatalogManager;
use catalog::remote::KvCacheInvalidatorRef;
use catalog::{
CatalogManager, DeregisterSchemaRequest, DeregisterTableRequest, RegisterSchemaRequest,
- RegisterTableRequest, RenameTableRequest,
+ RegisterTableRequest,
};
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME, NUMBERS_TABLE_ID,
@@ -50,13 +51,44 @@ use table::TableRef;
use crate::table::DistTable;
+// There are two sources for finding a table: the `local_catalog_manager` and the
+// `table_metadata_manager`.
+//
+// The `local_catalog_manager` is for storing tables that are often transparent, not saving any
+// real data. For example, our system tables, the `numbers` table and the "information_schema"
+// table.
+//
+// The `table_metadata_manager`, on the other hand, is for storing tables that are created by users,
+// obviously.
+//
+// For now, separating the two makes the code simpler, at least in the retrieval site. Now we have
+// `numbers` and `information_schema` system tables. Both have their special implementations. If we
+// put them with other ordinary tables that are created by users, we need to check the table name
+// to decide which `TableRef` to return. Like this:
+//
+// ```rust
+// match table_name {
+// "numbers" => ... // return NumbersTable impl
+// "information_schema" => ... // return InformationSchemaTable impl
+// _ => .. // return DistTable impl
+// }
+// ```
+//
+// On the other hand, because we use `MemoryCatalogManager` for system tables, we can easily store
+// and retrieve the concrete implementation of the system tables by their names, no more "if-else"s.
+//
+// However, if the system table is designed to have more features in the future, we may revisit
+// the implementation here.
#[derive(Clone)]
pub struct FrontendCatalogManager {
- backend: KvBackendRef,
+ // TODO(LFC): Maybe use a real implementation for Standalone mode.
+ // Now we use `NoopKvCacheInvalidator` for Standalone mode. In Standalone mode, the KV backend
+ // is implemented by RaftEngine. Maybe we need a cache for it?
backend_cache_invalidator: KvCacheInvalidatorRef,
partition_manager: PartitionRuleManagerRef,
table_metadata_manager: TableMetadataManagerRef,
datanode_manager: DatanodeManagerRef,
+ local_catalog_manager: Arc<MemoryCatalogManager>,
}
#[async_trait::async_trait]
@@ -105,18 +137,14 @@ impl FrontendCatalogManager {
datanode_manager: DatanodeManagerRef,
) -> Self {
Self {
- backend: backend.clone(),
backend_cache_invalidator,
partition_manager: Arc::new(PartitionRuleManager::new(backend.clone())),
table_metadata_manager: Arc::new(TableMetadataManager::new(backend)),
datanode_manager,
+ local_catalog_manager: MemoryCatalogManager::new(),
}
}
- pub fn backend(&self) -> KvBackendRef {
- self.backend.clone()
- }
-
pub fn partition_manager(&self) -> PartitionRuleManagerRef {
self.partition_manager.clone()
}
@@ -136,45 +164,34 @@ impl FrontendCatalogManager {
}
}
-// FIXME(hl): Frontend only needs a CatalogList, should replace with trait upcasting
-// as soon as it's stable: https://github.com/rust-lang/rust/issues/65991
#[async_trait::async_trait]
impl CatalogManager for FrontendCatalogManager {
- async fn start(&self) -> catalog::error::Result<()> {
- Ok(())
- }
-
- async fn register_catalog(self: Arc<Self>, _name: String) -> CatalogResult<bool> {
- unimplemented!("FrontendCatalogManager does not support registering catalog")
+ fn register_local_catalog(&self, name: &str) -> CatalogResult<bool> {
+ self.local_catalog_manager.register_catalog(name)
}
- // TODO(LFC): Handle the table caching in (de)register_table.
- async fn register_table(&self, _request: RegisterTableRequest) -> CatalogResult<bool> {
- Ok(true)
+ fn register_local_table(&self, request: RegisterTableRequest) -> CatalogResult<bool> {
+ self.local_catalog_manager.register_table(request)
}
- async fn deregister_table(&self, _request: DeregisterTableRequest) -> CatalogResult<()> {
+ fn deregister_local_table(&self, _request: DeregisterTableRequest) -> CatalogResult<()> {
Ok(())
}
- async fn register_schema(
+ fn register_local_schema(
&self,
_request: RegisterSchemaRequest,
) -> catalog::error::Result<bool> {
unimplemented!("FrontendCatalogManager does not support registering schema")
}
- async fn deregister_schema(
+ fn deregister_local_schema(
&self,
_request: DeregisterSchemaRequest,
) -> catalog_err::Result<bool> {
unimplemented!("FrontendCatalogManager does not support deregistering schema")
}
- async fn rename_table(&self, _request: RenameTableRequest) -> catalog_err::Result<bool> {
- unimplemented!()
- }
-
async fn catalog_names(&self) -> CatalogResult<Vec<String>> {
let stream = self
.table_metadata_manager
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index a3d9872d02c6..fdb7daa18773 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -28,6 +28,7 @@ use api::v1::meta::Role;
use api::v1::{DeleteRequests, InsertRequests, RowDeleteRequests, RowInsertRequests};
use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
+use catalog::local::manager::SystemTableInitializer;
use catalog::remote::CachedMetaKvBackend;
use catalog::CatalogManagerRef;
use client::client_manager::DatanodeClients;
@@ -78,14 +79,16 @@ use sql::statements::copy::CopyTable;
use sql::statements::statement::Statement;
use sqlparser::ast::ObjectName;
pub use standalone::StandaloneDatanodeManager;
+use table::engine::manager::MemoryTableEngineManager;
use self::distributed::DistRegionRequestHandler;
use self::standalone::{StandaloneRegionRequestHandler, StandaloneTableMetadataCreator};
use crate::catalog::FrontendCatalogManager;
use crate::delete::Deleter;
use crate::error::{
- self, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExternalSnafu, MissingMetasrvOptsSnafu,
- ParseSqlSnafu, PermissionSnafu, PlanStatementSnafu, Result, SqlExecInterceptedSnafu,
+ self, CatalogSnafu, Error, ExecLogicalPlanSnafu, ExecutePromqlSnafu, ExternalSnafu,
+ MissingMetasrvOptsSnafu, ParseSqlSnafu, PermissionSnafu, PlanStatementSnafu, Result,
+ SqlExecInterceptedSnafu,
};
use crate::expr_factory::CreateExprFactory;
use crate::frontend::FrontendOptions;
@@ -159,11 +162,11 @@ impl Instance {
datanode_clients.clone(),
));
- let dist_request_handler = DistRegionRequestHandler::arc(catalog_manager.clone());
+ let region_request_handler = DistRegionRequestHandler::arc(catalog_manager.clone());
let query_engine = QueryEngineFactory::new_with_plugins(
catalog_manager.clone(),
- Some(dist_request_handler),
+ Some(region_request_handler.clone()),
true,
plugins.clone(),
)
@@ -421,6 +424,14 @@ impl FrontendInstance for Instance {
heartbeat_task.start().await?;
}
+ let initializer = SystemTableInitializer::try_new(
+ Arc::new(MemoryTableEngineManager::new_empty()),
+ self.catalog_manager.clone(),
+ )
+ .await
+ .context(CatalogSnafu)?;
+ initializer.init().await.context(CatalogSnafu)?;
+
self.script_executor.start(self).await?;
futures::future::try_join_all(self.servers.values().map(start_server))
diff --git a/src/frontend/src/statement/ddl.rs b/src/frontend/src/statement/ddl.rs
index a250e5febba5..ac949e77f32a 100644
--- a/src/frontend/src/statement/ddl.rs
+++ b/src/frontend/src/statement/ddl.rs
@@ -17,7 +17,7 @@ use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::{column_def, AlterExpr, CreateTableExpr, TruncateTableExpr};
-use catalog::{CatalogManagerRef, DeregisterTableRequest, RegisterTableRequest};
+use catalog::CatalogManagerRef;
use chrono::DateTime;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::format_full_table_name;
@@ -34,7 +34,7 @@ use datatypes::prelude::ConcreteDataType;
use datatypes::schema::RawSchema;
use partition::partition::{PartitionBound, PartitionDef};
use session::context::QueryContextRef;
-use snafu::{ensure, OptionExt, ResultExt};
+use snafu::{OptionExt, ResultExt};
use sql::ast::Value as SqlValue;
use sql::statements::alter::AlterTable;
use sql::statements::create::{CreateExternalTable, CreateTable, Partitions};
@@ -46,7 +46,7 @@ use table::TableRef;
use super::StatementExecutor;
use crate::error::{
self, AlterExprToRequestSnafu, CatalogSnafu, ColumnDataTypeSnafu, ColumnNotFoundSnafu,
- DeserializePartitionSnafu, ParseSqlSnafu, Result, SchemaNotFoundSnafu, TableAlreadyExistSnafu,
+ DeserializePartitionSnafu, ParseSqlSnafu, Result, SchemaNotFoundSnafu,
TableMetadataManagerSnafu, TableNotFoundSnafu, UnrecognizedTableOptionSnafu,
};
use crate::table::DistTable;
@@ -121,23 +121,6 @@ impl StatementExecutor {
let table = DistTable::table(table_info);
- let request = RegisterTableRequest {
- catalog: table_name.catalog_name.clone(),
- schema: table_name.schema_name.clone(),
- table_name: table_name.table_name.clone(),
- table_id,
- table: table.clone(),
- };
- ensure!(
- self.catalog_manager
- .register_table(request)
- .await
- .context(CatalogSnafu)?,
- TableAlreadyExistSnafu {
- table: table_name.to_string()
- }
- );
-
// Invalidates local cache ASAP.
self.cache_invalidator
.invalidate_table(
@@ -173,16 +156,6 @@ impl StatementExecutor {
let engine = table.table_info().meta.engine.to_string();
self.drop_table_procedure(&table_name, table_id).await?;
- let request = DeregisterTableRequest {
- catalog: table_name.catalog_name.clone(),
- schema: table_name.schema_name.clone(),
- table_name: table_name.table_name.clone(),
- };
- self.catalog_manager
- .deregister_table(request)
- .await
- .context(CatalogSnafu)?;
-
// Invalidates local cache ASAP.
self.cache_invalidator
.invalidate_table(
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index 946d0a28daae..bbaab7021ac8 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -1432,14 +1432,13 @@ mod test {
let table = EmptyTable::from_table_info(&table_info);
let catalog_list = MemoryCatalogManager::with_default_setup();
assert!(catalog_list
- .register_table(RegisterTableRequest {
+ .register_local_table(RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name,
table_id: 1024,
table,
})
- .await
.is_ok());
DfTableSourceProvider::new(catalog_list, false, QueryContext::arc().as_ref())
}
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 237f4e750d38..4790aa498700 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -515,7 +515,7 @@ mod tests {
table_id: NUMBERS_TABLE_ID,
table: NumbersTable::table(NUMBERS_TABLE_ID),
};
- let _ = catalog_manager.register_table(req).await.unwrap();
+ catalog_manager.register_local_table(req).unwrap();
QueryEngineFactory::new(catalog_manager, None, false).query_engine()
}
diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs
index eff319f456d7..e079b31fc5bb 100644
--- a/src/query/src/range_select/plan_rewrite.rs
+++ b/src/query/src/range_select/plan_rewrite.rs
@@ -380,14 +380,13 @@ mod test {
let table = EmptyTable::from_table_info(&table_info);
let catalog_list = MemoryCatalogManager::with_default_setup();
assert!(catalog_list
- .register_table(RegisterTableRequest {
+ .register_local_table(RegisterTableRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
table_name,
table_id: 1024,
table,
})
- .await
.is_ok());
QueryEngineFactory::new(catalog_list, None, false).query_engine()
}
diff --git a/src/query/src/tests/query_engine_test.rs b/src/query/src/tests/query_engine_test.rs
index 83ec1e52bc67..c98c471e6e16 100644
--- a/src/query/src/tests/query_engine_test.rs
+++ b/src/query/src/tests/query_engine_test.rs
@@ -112,7 +112,7 @@ fn catalog_manager() -> Result<Arc<MemoryCatalogManager>> {
table_id: NUMBERS_TABLE_ID,
table: NumbersTable::table(NUMBERS_TABLE_ID),
};
- let _ = catalog_manager.register_table_sync(req).unwrap();
+ let _ = catalog_manager.register_table(req).unwrap();
Ok(catalog_manager)
}
diff --git a/src/query/src/tests/time_range_filter_test.rs b/src/query/src/tests/time_range_filter_test.rs
index 383ac2ea8035..fdf529e2072c 100644
--- a/src/query/src/tests/time_range_filter_test.rs
+++ b/src/query/src/tests/time_range_filter_test.rs
@@ -104,7 +104,7 @@ fn create_test_engine() -> TimeRangeTester {
table_id: table.table_info().ident.table_id,
table: table.clone(),
};
- let _ = catalog_manager.register_table_sync(req).unwrap();
+ let _ = catalog_manager.register_table(req).unwrap();
let engine = QueryEngineFactory::new(catalog_manager, None, false).query_engine();
TimeRangeTester { engine, filter }
diff --git a/src/script/src/manager.rs b/src/script/src/manager.rs
index 08a9a27840ce..87cdf25eea10 100644
--- a/src/script/src/manager.rs
+++ b/src/script/src/manager.rs
@@ -160,59 +160,22 @@ impl ScriptManager {
#[cfg(test)]
mod tests {
- use catalog::CatalogManager;
- use common_config::WalConfig;
- use common_test_util::temp_dir::create_temp_dir;
- use log_store::raft_engine::log_store::RaftEngineLogStore;
- use mito::config::EngineConfig as TableEngineConfig;
- use mito::engine::MitoEngine;
- use mito::table::test_util::new_test_object_store;
+ use catalog::local::MemoryCatalogManager;
use query::QueryEngineFactory;
- use storage::compaction::noop::NoopCompactionScheduler;
- use storage::config::EngineConfig as StorageEngineConfig;
- use storage::EngineImpl;
- use table::engine::manager::MemoryTableEngineManager;
use super::*;
- type DefaultEngine = MitoEngine<EngineImpl<RaftEngineLogStore>>;
-
#[ignore = "script engine is temporary disabled"]
#[tokio::test]
async fn test_insert_find_compile_script() {
- let wal_dir = create_temp_dir("test_insert_find_compile_script_wal");
- let wal_dir_str = wal_dir.path().to_string_lossy().to_string();
-
common_telemetry::init_default_ut_logging();
- let (_dir, object_store) = new_test_object_store("test_insert_find_compile_script").await;
- let log_store = RaftEngineLogStore::try_new(wal_dir_str, WalConfig::default())
- .await
- .unwrap();
- let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
- let mock_engine = Arc::new(DefaultEngine::new(
- TableEngineConfig::default(),
- EngineImpl::new(
- StorageEngineConfig::default(),
- Arc::new(log_store),
- object_store.clone(),
- compaction_scheduler,
- )
- .unwrap(),
- object_store,
- ));
- let engine_manager = Arc::new(MemoryTableEngineManager::new(mock_engine.clone()));
- let catalog_manager = Arc::new(
- catalog::local::LocalCatalogManager::try_new(engine_manager)
- .await
- .unwrap(),
- );
+ let catalog_manager = MemoryCatalogManager::new();
let factory = QueryEngineFactory::new(catalog_manager.clone(), None, false);
let query_engine = factory.query_engine();
let mgr = ScriptManager::new(catalog_manager.clone(), query_engine)
.await
.unwrap();
- catalog_manager.start().await.unwrap();
let schema = "schema";
let name = "test";
diff --git a/src/table-procedure/src/alter.rs b/src/table-procedure/src/alter.rs
index 9d30c54fc023..72f0ac2ab9b0 100644
--- a/src/table-procedure/src/alter.rs
+++ b/src/table-procedure/src/alter.rs
@@ -15,7 +15,7 @@
//! Procedure to alter a table.
use async_trait::async_trait;
-use catalog::{CatalogManagerRef, RenameTableRequest};
+use catalog::CatalogManagerRef;
use common_procedure::error::SubprocedureFailedSnafu;
use common_procedure::{
Context, Error, LockKey, Procedure, ProcedureId, ProcedureManager, ProcedureState,
@@ -50,7 +50,9 @@ impl Procedure for AlterTableProcedure {
match self.data.state {
AlterTableState::Prepare => self.on_prepare().await,
AlterTableState::EngineAlterTable => self.on_engine_alter_table(ctx).await,
- AlterTableState::RenameInCatalog => self.on_rename_in_catalog().await,
+ // No more need to "rename table in catalog", because the table metadata is now stored
+ // in kv backend, and updated by the unified DDL procedure soon. For ordinary tables,
+ // catalog manager will be a readonly proxy.
}
}
@@ -214,15 +216,7 @@ impl AlterTableProcedure {
self.data.request.table_name,
sub_id
);
- // The sub procedure is done, we can execute next step.
- if self.data.request.is_rename_table() {
- // We also need to rename the table in the catalog.
- self.data.state = AlterTableState::RenameInCatalog;
- Ok(Status::executing(true))
- } else {
- // If this isn't a rename operation, we are done.
- Ok(Status::Done)
- }
+ Ok(Status::Done)
}
ProcedureState::Failed { error } => {
// Return error if the subprocedure is failed.
@@ -232,28 +226,6 @@ impl AlterTableProcedure {
}
}
}
-
- async fn on_rename_in_catalog(&mut self) -> Result<Status> {
- // Safety: table id is available in this state.
- let table_id = self.data.table_id.unwrap();
- if let AlterKind::RenameTable { new_table_name } = &self.data.request.alter_kind {
- let rename_req = RenameTableRequest {
- catalog: self.data.request.catalog_name.clone(),
- schema: self.data.request.schema_name.clone(),
- table_name: self.data.request.table_name.clone(),
- new_table_name: new_table_name.clone(),
- table_id,
- };
-
- let _ = self
- .catalog_manager
- .rename_table(rename_req)
- .await
- .map_err(Error::from_error_ext)?;
- }
-
- Ok(Status::Done)
- }
}
/// Represents each step while altering a table in the datanode.
@@ -263,8 +235,6 @@ enum AlterTableState {
Prepare,
/// Alter table in the table engine.
EngineAlterTable,
- /// Rename the table in the catalog (optional).
- RenameInCatalog,
}
/// Serializable data of [AlterTableProcedure].
@@ -294,56 +264,3 @@ impl AlterTableData {
}
}
}
-
-#[cfg(test)]
-mod tests {
- use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
-
- use super::*;
- use crate::test_util::TestEnv;
-
- #[tokio::test]
- async fn test_alter_table_procedure_rename() {
- let env = TestEnv::new("rename");
- let table_name = "test_old";
- let table_id = env.create_table(table_name).await;
-
- let new_table_name = "test_new";
- let request = AlterTableRequest {
- catalog_name: DEFAULT_CATALOG_NAME.to_string(),
- schema_name: DEFAULT_SCHEMA_NAME.to_string(),
- table_name: table_name.to_string(),
- table_id,
- alter_kind: AlterKind::RenameTable {
- new_table_name: new_table_name.to_string(),
- },
- table_version: None,
- };
-
- let TestEnv {
- dir: _dir,
- table_engine,
- procedure_manager,
- catalog_manager,
- } = env;
- let procedure =
- AlterTableProcedure::new(request, catalog_manager.clone(), table_engine.clone());
- let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
-
- let mut watcher = procedure_manager.submit(procedure_with_id).await.unwrap();
- watcher.changed().await.unwrap();
-
- let table = catalog_manager
- .table(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, new_table_name)
- .await
- .unwrap()
- .unwrap();
- let table_info = table.table_info();
- assert_eq!(new_table_name, table_info.name);
-
- assert!(!catalog_manager
- .table_exist(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, table_name)
- .await
- .unwrap());
- }
-}
diff --git a/src/table-procedure/src/create.rs b/src/table-procedure/src/create.rs
index ecdbf37bc826..126f7c735a08 100644
--- a/src/table-procedure/src/create.rs
+++ b/src/table-procedure/src/create.rs
@@ -256,8 +256,7 @@ impl CreateTableProcedure {
};
let _ = self
.catalog_manager
- .register_table(register_req)
- .await
+ .register_local_table(register_req)
.map_err(Error::from_error_ext)?;
Ok(Status::Done)
diff --git a/src/table-procedure/src/drop.rs b/src/table-procedure/src/drop.rs
index 9db9f37723e9..0fa521728757 100644
--- a/src/table-procedure/src/drop.rs
+++ b/src/table-procedure/src/drop.rs
@@ -163,8 +163,7 @@ impl DropTableProcedure {
table_name: self.data.request.table_name.clone(),
};
self.catalog_manager
- .deregister_table(deregister_table_req)
- .await
+ .deregister_local_table(deregister_table_req)
.context(AccessCatalogSnafu)?;
}
|
refactor
|
system tables in new region server (#2344)
|
641592644d195c3df3e2e3ae3f6c3e3fc7a24fd6
|
2024-03-19 14:20:10
|
Yingwen
|
feat: support per table memtable options (#3524)
| false
|
diff --git a/src/metric-engine/src/engine/create.rs b/src/metric-engine/src/engine/create.rs
index fbadbf5d2e18..8fad9f9dc8eb 100644
--- a/src/metric-engine/src/engine/create.rs
+++ b/src/metric-engine/src/engine/create.rs
@@ -36,7 +36,9 @@ use store_api::region_request::{AffectedRows, RegionCreateRequest, RegionRequest
use store_api::storage::consts::ReservedColumnId;
use store_api::storage::RegionId;
-use crate::engine::options::set_index_options_for_data_region;
+use crate::engine::options::{
+ set_index_options_for_data_region, set_memtable_options_for_data_region,
+};
use crate::engine::MetricEngineInner;
use crate::error::{
ConflictRegionOptionSnafu, CreateMitoRegionSnafu, InternalColumnOccupiedSnafu,
@@ -380,6 +382,9 @@ impl MetricEngineInner {
// set index options
set_index_options_for_data_region(&mut data_region_request.options);
+ // Set memtable options.
+ set_memtable_options_for_data_region(&mut data_region_request.options);
+
data_region_request
}
diff --git a/src/metric-engine/src/engine/options.rs b/src/metric-engine/src/engine/options.rs
index ee071e8d48e5..034caac6d1b4 100644
--- a/src/metric-engine/src/engine/options.rs
+++ b/src/metric-engine/src/engine/options.rs
@@ -42,3 +42,8 @@ pub fn set_index_options_for_data_region(options: &mut HashMap<String, String>)
SEG_ROW_COUNT_FOR_DATA_REGION.to_string(),
);
}
+
+/// Set memtable options for the data region.
+pub fn set_memtable_options_for_data_region(options: &mut HashMap<String, String>) {
+ options.insert("memtable.type".to_string(), "experimental".to_string());
+}
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index cf1a8533f1e5..917d7d0b4156 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -58,7 +58,7 @@ prost.workspace = true
puffin.workspace = true
rand.workspace = true
regex = "1.5"
-serde = { version = "1.0", features = ["derive"] }
+serde.workspace = true
serde_json.workspace = true
serde_with.workspace = true
smallvec.workspace = true
diff --git a/src/mito2/src/engine/create_test.rs b/src/mito2/src/engine/create_test.rs
index eb1cb7169013..5e2d804123d5 100644
--- a/src/mito2/src/engine/create_test.rs
+++ b/src/mito2/src/engine/create_test.rs
@@ -14,12 +14,15 @@
use std::time::Duration;
+use api::v1::Rows;
+use common_recordbatch::RecordBatches;
use store_api::region_engine::RegionEngine;
use store_api::region_request::{RegionCloseRequest, RegionRequest};
-use store_api::storage::RegionId;
+use store_api::storage::{RegionId, ScanRequest};
use crate::config::MitoConfig;
-use crate::test_util::{CreateRequestBuilder, TestEnv};
+use crate::region::options::MemtableOptions;
+use crate::test_util::{build_rows, put_rows, rows_schema, CreateRequestBuilder, TestEnv};
#[tokio::test]
async fn test_engine_create_new_region() {
@@ -198,3 +201,45 @@ async fn test_engine_create_with_custom_store() {
.await
.unwrap());
}
+
+#[tokio::test]
+async fn test_engine_create_with_memtable_opts() {
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new()
+ .insert_option("memtable.type", "experimental")
+ .insert_option("memtable.experimental.index_max_keys_per_shard", "2")
+ .build();
+ let column_schemas = rows_schema(&request);
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+ let region = engine.get_region(region_id).unwrap();
+ let Some(MemtableOptions::Experimental(memtable_opts)) = ®ion.version().options.memtable
+ else {
+ unreachable!();
+ };
+ assert_eq!(2, memtable_opts.index_max_keys_per_shard);
+
+ let rows = Rows {
+ schema: column_schemas,
+ rows: build_rows(0, 3),
+ };
+ put_rows(&engine, region_id, rows).await;
+
+ let request = ScanRequest::default();
+ let stream = engine.handle_query(region_id, request).await.unwrap();
+ let batches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++-------+---------+---------------------+
+| tag_0 | field_0 | ts |
++-------+---------+---------------------+
+| 0 | 0.0 | 1970-01-01T00:00:00 |
+| 1 | 1.0 | 1970-01-01T00:00:01 |
+| 2 | 2.0 | 1970-01-01T00:00:02 |
++-------+---------+---------------------+";
+ assert_eq!(expected, batches.pretty_print().unwrap());
+}
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 3885e3ae8506..39c1527e0834 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -572,6 +572,9 @@ pub enum Error {
#[snafu(source)]
error: parquet::errors::ParquetError,
},
+
+ #[snafu(display("Invalid region options, {}", reason))]
+ InvalidRegionOptions { reason: String, location: Location },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -621,7 +624,8 @@ impl ErrorExt for Error {
| FillDefault { .. }
| ConvertColumnDataType { .. }
| ColumnNotFound { .. }
- | InvalidMetadata { .. } => StatusCode::InvalidArguments,
+ | InvalidMetadata { .. }
+ | InvalidRegionOptions { .. } => StatusCode::InvalidArguments,
InvalidRegionRequestSchemaVersion { .. } => StatusCode::RequestOutdated,
diff --git a/src/mito2/src/memtable.rs b/src/mito2/src/memtable.rs
index 8c9cd0172a0c..9ec4231d7c53 100644
--- a/src/mito2/src/memtable.rs
+++ b/src/mito2/src/memtable.rs
@@ -28,9 +28,11 @@ use crate::error::Result;
use crate::flush::WriteBufferManagerRef;
use crate::memtable::key_values::KeyValue;
pub use crate::memtable::key_values::KeyValues;
-use crate::memtable::merge_tree::MergeTreeConfig;
+use crate::memtable::merge_tree::{MergeTreeConfig, MergeTreeMemtableBuilder};
+use crate::memtable::time_series::TimeSeriesMemtableBuilder;
use crate::metrics::WRITE_BUFFER_BYTES;
use crate::read::Batch;
+use crate::region::options::MemtableOptions;
pub mod key_values;
pub mod merge_tree;
@@ -53,7 +55,7 @@ pub enum MemtableConfig {
impl Default for MemtableConfig {
fn default() -> Self {
- Self::Experimental(MergeTreeConfig::default())
+ Self::TimeSeries
}
}
@@ -206,6 +208,46 @@ impl Drop for AllocTracker {
}
}
+/// Provider of memtable builders for regions.
+#[derive(Clone)]
+pub(crate) struct MemtableBuilderProvider {
+ write_buffer_manager: Option<WriteBufferManagerRef>,
+ default_memtable_builder: MemtableBuilderRef,
+}
+
+impl MemtableBuilderProvider {
+ pub(crate) fn new(
+ write_buffer_manager: Option<WriteBufferManagerRef>,
+ default_memtable_builder: MemtableBuilderRef,
+ ) -> Self {
+ Self {
+ write_buffer_manager,
+ default_memtable_builder,
+ }
+ }
+
+ pub(crate) fn builder_for_options(
+ &self,
+ options: Option<&MemtableOptions>,
+ ) -> MemtableBuilderRef {
+ match options {
+ Some(MemtableOptions::TimeSeries) => Arc::new(TimeSeriesMemtableBuilder::new(
+ self.write_buffer_manager.clone(),
+ )),
+ Some(MemtableOptions::Experimental(opts)) => Arc::new(MergeTreeMemtableBuilder::new(
+ MergeTreeConfig {
+ index_max_keys_per_shard: opts.index_max_keys_per_shard,
+ data_freeze_threshold: opts.data_freeze_threshold,
+ fork_dictionary_bytes: opts.fork_dictionary_bytes,
+ ..Default::default()
+ },
+ self.write_buffer_manager.clone(),
+ )),
+ None => self.default_memtable_builder.clone(),
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use common_base::readable_size::ReadableSize;
diff --git a/src/mito2/src/memtable/merge_tree.rs b/src/mito2/src/memtable/merge_tree.rs
index a916f4f9b496..a449c1a4c63e 100644
--- a/src/mito2/src/memtable/merge_tree.rs
+++ b/src/mito2/src/memtable/merge_tree.rs
@@ -46,6 +46,8 @@ use crate::memtable::{
/// Use `1/DICTIONARY_SIZE_FACTOR` of OS memory as dictionary size.
const DICTIONARY_SIZE_FACTOR: u64 = 8;
+pub(crate) const DEFAULT_MAX_KEYS_PER_SHARD: usize = 8192;
+pub(crate) const DEFAULT_FREEZE_THRESHOLD: usize = 131072;
/// Id of a shard, only unique inside a partition.
type ShardId = u32;
@@ -59,6 +61,9 @@ struct PkId {
pk_index: PkIndex,
}
+// TODO(yingwen): `fork_dictionary_bytes` is per region option, if we have multiple merge
+// tree memtable then we will use a lot memory. We should find a better way to control the
+// dictionary size.
/// Config for the merge tree memtable.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(default)]
@@ -68,6 +73,10 @@ pub struct MergeTreeConfig {
/// Number of rows to freeze a data part.
pub data_freeze_threshold: usize,
/// Whether to delete duplicates rows.
+ ///
+ /// Skips deserializing as it should be determined by whether the
+ /// table is append only.
+ #[serde(skip_deserializing)]
pub dedup: bool,
/// Total bytes of dictionary to keep in fork.
pub fork_dictionary_bytes: ReadableSize,
@@ -539,4 +548,17 @@ mod tests {
assert_eq!(timestamps, read);
}
}
+
+ #[test]
+ fn test_deserialize_config() {
+ let config = MergeTreeConfig {
+ dedup: false,
+ ..Default::default()
+ };
+ // Creates a json with dedup = false.
+ let json = serde_json::to_string(&config).unwrap();
+ let config: MergeTreeConfig = serde_json::from_str(&json).unwrap();
+ assert!(config.dedup);
+ assert_eq!(MergeTreeConfig::default(), config);
+ }
}
diff --git a/src/mito2/src/region.rs b/src/mito2/src/region.rs
index c32e45b87a8a..1fa3fb7d4ff5 100644
--- a/src/mito2/src/region.rs
+++ b/src/mito2/src/region.rs
@@ -32,7 +32,7 @@ use crate::access_layer::AccessLayerRef;
use crate::error::{RegionNotFoundSnafu, RegionReadonlySnafu, Result};
use crate::manifest::action::{RegionEdit, RegionMetaAction, RegionMetaActionList};
use crate::manifest::manager::RegionManifestManager;
-use crate::memtable::MemtableId;
+use crate::memtable::{MemtableBuilderRef, MemtableId};
use crate::region::version::{VersionControlRef, VersionRef};
use crate::request::OnFailure;
use crate::sst::file_purger::FilePurgerRef;
@@ -83,9 +83,10 @@ pub(crate) struct MitoRegion {
last_flush_millis: AtomicI64,
/// Whether the region is writable.
writable: AtomicBool,
-
/// Provider to get current time.
time_provider: TimeProviderRef,
+ /// Memtable builder for the region.
+ pub(crate) memtable_builder: MemtableBuilderRef,
}
pub(crate) type MitoRegionRef = Arc<MitoRegion>;
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index d0ac1a5530cd..f99ab4e5d04c 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -37,7 +37,7 @@ use crate::error::{
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
use crate::manifest::storage::manifest_compress_type;
use crate::memtable::time_partition::TimePartitions;
-use crate::memtable::MemtableBuilderRef;
+use crate::memtable::MemtableBuilderProvider;
use crate::region::options::RegionOptions;
use crate::region::version::{VersionBuilder, VersionControl, VersionControlRef};
use crate::region::MitoRegion;
@@ -53,7 +53,7 @@ use crate::wal::{EntryId, Wal};
pub(crate) struct RegionOpener {
region_id: RegionId,
metadata: Option<RegionMetadata>,
- memtable_builder: MemtableBuilderRef,
+ memtable_builder_provider: MemtableBuilderProvider,
object_store_manager: ObjectStoreManagerRef,
region_dir: String,
scheduler: SchedulerRef,
@@ -69,7 +69,7 @@ impl RegionOpener {
pub(crate) fn new(
region_id: RegionId,
region_dir: &str,
- memtable_builder: MemtableBuilderRef,
+ memtable_builder_provider: MemtableBuilderProvider,
object_store_manager: ObjectStoreManagerRef,
scheduler: SchedulerRef,
intermediate_manager: IntermediateManager,
@@ -77,7 +77,7 @@ impl RegionOpener {
RegionOpener {
region_id,
metadata: None,
- memtable_builder,
+ memtable_builder_provider,
object_store_manager,
region_dir: normalize_dir(region_dir),
scheduler,
@@ -171,11 +171,14 @@ impl RegionOpener {
let manifest_manager =
RegionManifestManager::new(metadata.clone(), region_manifest_options).await?;
+ let memtable_builder = self
+ .memtable_builder_provider
+ .builder_for_options(options.memtable.as_ref());
// Initial memtable id is 0.
let part_duration = options.compaction.time_window();
let mutable = Arc::new(TimePartitions::new(
metadata.clone(),
- self.memtable_builder,
+ memtable_builder.clone(),
0,
part_duration,
));
@@ -210,6 +213,7 @@ impl RegionOpener {
// Region is writable after it is created.
writable: AtomicBool::new(true),
time_provider,
+ memtable_builder,
})
}
@@ -277,11 +281,14 @@ impl RegionOpener {
access_layer.clone(),
self.cache_manager.clone(),
));
+ let memtable_builder = self
+ .memtable_builder_provider
+ .builder_for_options(region_options.memtable.as_ref());
// Initial memtable id is 0.
let part_duration = region_options.compaction.time_window();
let mutable = Arc::new(TimePartitions::new(
metadata.clone(),
- self.memtable_builder.clone(),
+ memtable_builder.clone(),
0,
part_duration,
));
@@ -329,6 +336,7 @@ impl RegionOpener {
// Region is always opened in read only mode.
writable: AtomicBool::new(false),
time_provider,
+ memtable_builder,
};
Ok(Some(region))
}
diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs
index 1667b5757303..c45e4310482a 100644
--- a/src/mito2/src/region/options.rs
+++ b/src/mito2/src/region/options.rs
@@ -19,15 +19,17 @@
use std::collections::HashMap;
use std::time::Duration;
+use common_base::readable_size::ReadableSize;
use common_wal::options::{WalOptions, WAL_OPTIONS_KEY};
use serde::de::Error as _;
use serde::{Deserialize, Deserializer};
use serde_json::Value;
use serde_with::{serde_as, with_prefix, DisplayFromStr};
-use snafu::ResultExt;
+use snafu::{ensure, ResultExt};
use store_api::storage::ColumnId;
-use crate::error::{Error, JsonOptionsSnafu, Result};
+use crate::error::{Error, InvalidRegionOptionsSnafu, JsonOptionsSnafu, Result};
+use crate::memtable::merge_tree::{DEFAULT_FREEZE_THRESHOLD, DEFAULT_MAX_KEYS_PER_SHARD};
const DEFAULT_INDEX_SEGMENT_ROW_COUNT: usize = 1024;
@@ -48,6 +50,8 @@ pub struct RegionOptions {
pub wal_options: WalOptions,
/// Index options.
pub index_options: IndexOptions,
+ /// Memtable options.
+ pub memtable: Option<MemtableOptions>,
}
impl TryFrom<&HashMap<String, String>> for RegionOptions {
@@ -62,7 +66,11 @@ impl TryFrom<&HashMap<String, String>> for RegionOptions {
// See https://github.com/serde-rs/serde/issues/1626
let options: RegionOptionsWithoutEnum =
serde_json::from_str(&json).context(JsonOptionsSnafu)?;
- let compaction: CompactionOptions = serde_json::from_str(&json).unwrap_or_default();
+ let compaction = if validate_enum_options(options_map, "compaction.type")? {
+ serde_json::from_str(&json).context(JsonOptionsSnafu)?
+ } else {
+ CompactionOptions::default()
+ };
// Tries to decode the wal options from the map or sets to the default if there's none wal options in the map.
let wal_options = options_map.get(WAL_OPTIONS_KEY).map_or_else(
@@ -73,6 +81,11 @@ impl TryFrom<&HashMap<String, String>> for RegionOptions {
)?;
let index_options: IndexOptions = serde_json::from_str(&json).context(JsonOptionsSnafu)?;
+ let memtable = if validate_enum_options(options_map, "memtable.type")? {
+ Some(serde_json::from_str(&json).context(JsonOptionsSnafu)?)
+ } else {
+ None
+ };
Ok(RegionOptions {
ttl: options.ttl,
@@ -80,6 +93,7 @@ impl TryFrom<&HashMap<String, String>> for RegionOptions {
storage: options.storage,
wal_options,
index_options,
+ memtable,
})
}
}
@@ -87,7 +101,7 @@ impl TryFrom<&HashMap<String, String>> for RegionOptions {
/// Options for compactions
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
#[serde(tag = "compaction.type")]
-#[serde(rename_all = "lowercase")]
+#[serde(rename_all = "snake_case")]
pub enum CompactionOptions {
/// Time window compaction strategy.
#[serde(with = "prefix_twcs")]
@@ -206,6 +220,42 @@ impl Default for InvertedIndexOptions {
}
}
+/// Options for region level memtable.
+#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
+#[serde(tag = "memtable.type", rename_all = "snake_case")]
+pub enum MemtableOptions {
+ TimeSeries,
+ #[serde(with = "prefix_experimental")]
+ Experimental(ExperimentalOptions),
+}
+
+with_prefix!(prefix_experimental "memtable.experimental.");
+
+/// Experimental memtable options.
+#[serde_as]
+#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
+#[serde(default)]
+pub struct ExperimentalOptions {
+ /// Max keys in an index shard.
+ #[serde_as(as = "DisplayFromStr")]
+ pub index_max_keys_per_shard: usize,
+ /// Number of rows to freeze a data part.
+ #[serde_as(as = "DisplayFromStr")]
+ pub data_freeze_threshold: usize,
+ /// Total bytes of dictionary to keep in fork.
+ pub fork_dictionary_bytes: ReadableSize,
+}
+
+impl Default for ExperimentalOptions {
+ fn default() -> Self {
+ Self {
+ index_max_keys_per_shard: DEFAULT_MAX_KEYS_PER_SHARD,
+ data_freeze_threshold: DEFAULT_FREEZE_THRESHOLD,
+ fork_dictionary_bytes: ReadableSize::mb(64),
+ }
+ }
+}
+
fn deserialize_ignore_column_ids<'de, D>(deserializer: D) -> Result<Vec<ColumnId>, D::Error>
where
D: Deserializer<'de>,
@@ -221,25 +271,56 @@ where
/// Converts the `options` map to a json object.
///
-/// Converts all key-values to lowercase and replaces "null" strings by `null` json values.
+/// Replaces "null" strings by `null` json values.
fn options_map_to_value(options: &HashMap<String, String>) -> Value {
let map = options
.iter()
.map(|(key, value)| {
- let (key, value) = (key.to_lowercase(), value.to_lowercase());
-
- if value == "null" {
- (key, Value::Null)
+ // Only convert the key to lowercase.
+ if value.eq_ignore_ascii_case("null") {
+ (key.to_string(), Value::Null)
} else {
- (key, Value::from(value))
+ (key.to_string(), Value::from(value.to_string()))
}
})
.collect();
Value::Object(map)
}
+// `#[serde(default)]` doesn't support enum (https://github.com/serde-rs/serde/issues/1799) so we
+// check the type key first.
+/// Validates whether the `options_map` has valid options for specific `enum_tag_key`
+/// and returns `true` if the map contains enum options.
+fn validate_enum_options(
+ options_map: &HashMap<String, String>,
+ enum_tag_key: &str,
+) -> Result<bool> {
+ let enum_type = enum_tag_key.split('.').next().unwrap();
+ let mut has_other_options = false;
+ let mut has_tag = false;
+ for key in options_map.keys() {
+ if key == enum_tag_key {
+ has_tag = true;
+ } else if key.starts_with(enum_type) {
+ has_other_options = true;
+ }
+ }
+
+ // If tag is not provided, then other options for the enum should not exist.
+ ensure!(
+ has_tag || !has_other_options,
+ InvalidRegionOptionsSnafu {
+ reason: format!("missing key {} in options", enum_tag_key),
+ }
+ );
+
+ Ok(has_tag)
+}
+
#[cfg(test)]
mod tests {
+ use common_error::ext::ErrorExt;
+ use common_error::status_code::StatusCode;
use common_wal::options::KafkaWalOptions;
use super::*;
@@ -274,7 +355,7 @@ mod tests {
let map = make_map(&[("storage", "S3")]);
let options = RegionOptions::try_from(&map).unwrap();
let expect = RegionOptions {
- storage: Some("s3".to_string()),
+ storage: Some("S3".to_string()),
..Default::default()
};
assert_eq!(expect, options);
@@ -282,16 +363,12 @@ mod tests {
#[test]
fn test_without_compaction_type() {
- // If `compaction.type` is not provided, we ignore all compaction
- // related options. Actually serde does not support deserialize
- // an enum without knowning its type.
let map = make_map(&[
("compaction.twcs.max_active_window_files", "8"),
("compaction.twcs.time_window", "2h"),
]);
- let options = RegionOptions::try_from(&map).unwrap();
- let expect = RegionOptions::default();
- assert_eq!(expect, options);
+ let err = RegionOptions::try_from(&map).unwrap_err();
+ assert_eq!(StatusCode::InvalidArguments, err.status_code());
}
#[test]
@@ -355,6 +432,32 @@ mod tests {
all_wal_options.iter().all(test_with_wal_options);
}
+ #[test]
+ fn test_with_memtable() {
+ let map = make_map(&[("memtable.type", "time_series")]);
+ let options = RegionOptions::try_from(&map).unwrap();
+ let expect = RegionOptions {
+ memtable: Some(MemtableOptions::TimeSeries),
+ ..Default::default()
+ };
+ assert_eq!(expect, options);
+
+ let map = make_map(&[("memtable.type", "experimental")]);
+ let options = RegionOptions::try_from(&map).unwrap();
+ let expect = RegionOptions {
+ memtable: Some(MemtableOptions::Experimental(ExperimentalOptions::default())),
+ ..Default::default()
+ };
+ assert_eq!(expect, options);
+ }
+
+ #[test]
+ fn test_unknown_memtable_type() {
+ let map = make_map(&[("memtable.type", "no_such_memtable")]);
+ let err = RegionOptions::try_from(&map).unwrap_err();
+ assert_eq!(StatusCode::InvalidArguments, err.status_code());
+ }
+
#[test]
fn test_with_all() {
let wal_options = WalOptions::Kafka(KafkaWalOptions {
@@ -373,6 +476,10 @@ mod tests {
WAL_OPTIONS_KEY,
&serde_json::to_string(&wal_options).unwrap(),
),
+ ("memtable.type", "experimental"),
+ ("memtable.experimental.index_max_keys_per_shard", "2048"),
+ ("memtable.experimental.data_freeze_threshold", "2048"),
+ ("memtable.experimental.fork_dictionary_bytes", "128M"),
]);
let options = RegionOptions::try_from(&map).unwrap();
let expect = RegionOptions {
@@ -382,7 +489,7 @@ mod tests {
max_inactive_window_files: 2,
time_window: Some(Duration::from_secs(3600 * 2)),
}),
- storage: Some("s3".to_string()),
+ storage: Some("S3".to_string()),
wal_options,
index_options: IndexOptions {
inverted_index: InvertedIndexOptions {
@@ -390,6 +497,11 @@ mod tests {
segment_row_count: 512,
},
},
+ memtable: Some(MemtableOptions::Experimental(ExperimentalOptions {
+ index_max_keys_per_shard: 2048,
+ data_freeze_threshold: 2048,
+ fork_dictionary_bytes: ReadableSize::mb(128),
+ })),
};
assert_eq!(expect, options);
}
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index a8fe38e87d35..6f4f5d7692f5 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -50,7 +50,7 @@ use crate::flush::{FlushScheduler, WriteBufferManagerImpl, WriteBufferManagerRef
use crate::manifest::action::RegionEdit;
use crate::memtable::merge_tree::MergeTreeMemtableBuilder;
use crate::memtable::time_series::TimeSeriesMemtableBuilder;
-use crate::memtable::{MemtableBuilderRef, MemtableConfig};
+use crate::memtable::{MemtableBuilderProvider, MemtableConfig};
use crate::region::{MitoRegionRef, RegionMap, RegionMapRef};
use crate::request::{
BackgroundNotify, DdlRequest, SenderDdlRequest, SenderWriteRequest, WorkerRequest,
@@ -338,7 +338,8 @@ impl<S: LogStore> WorkerStarter<S> {
let (sender, receiver) = mpsc::channel(self.config.worker_channel_size);
let running = Arc::new(AtomicBool::new(true));
- let memtable_builder = match &self.config.memtable {
+
+ let default_memtable_builder = match &self.config.memtable {
MemtableConfig::Experimental(merge_tree) => Arc::new(MergeTreeMemtableBuilder::new(
merge_tree.clone(),
Some(self.write_buffer_manager.clone()),
@@ -358,7 +359,10 @@ impl<S: LogStore> WorkerStarter<S> {
wal: Wal::new(self.log_store),
object_store_manager: self.object_store_manager.clone(),
running: running.clone(),
- memtable_builder,
+ memtable_builder_provider: MemtableBuilderProvider::new(
+ Some(self.write_buffer_manager.clone()),
+ default_memtable_builder,
+ ),
scheduler: self.scheduler.clone(),
write_buffer_manager: self.write_buffer_manager,
flush_scheduler: FlushScheduler::new(self.scheduler.clone()),
@@ -513,8 +517,8 @@ struct RegionWorkerLoop<S> {
object_store_manager: ObjectStoreManagerRef,
/// Whether the worker thread is still running.
running: Arc<AtomicBool>,
- /// Memtable builder for each region.
- memtable_builder: MemtableBuilderRef,
+ /// Memtable builder provider for each region.
+ memtable_builder_provider: MemtableBuilderProvider,
/// Background job scheduler.
scheduler: SchedulerRef,
/// Engine write buffer manager.
diff --git a/src/mito2/src/worker/handle_alter.rs b/src/mito2/src/worker/handle_alter.rs
index eefee19233c3..d0d6e51039e3 100644
--- a/src/mito2/src/worker/handle_alter.rs
+++ b/src/mito2/src/worker/handle_alter.rs
@@ -27,7 +27,6 @@ use crate::error::{
};
use crate::flush::FlushReason;
use crate::manifest::action::{RegionChange, RegionMetaAction, RegionMetaActionList};
-use crate::memtable::MemtableBuilderRef;
use crate::region::version::Version;
use crate::region::MitoRegionRef;
use crate::request::{DdlRequest, OptionOutputTx, SenderDdlRequest};
@@ -109,9 +108,7 @@ impl<S> RegionWorkerLoop<S> {
}
// Now we can alter the region directly.
- if let Err(e) =
- alter_region_schema(®ion, &version, request, &self.memtable_builder).await
- {
+ if let Err(e) = alter_region_schema(®ion, &version, request).await {
error!(e; "Failed to alter region schema, region_id: {}", region_id);
sender.send(Err(e));
return;
@@ -134,7 +131,6 @@ async fn alter_region_schema(
region: &MitoRegionRef,
version: &Version,
request: RegionAlterRequest,
- builder: &MemtableBuilderRef,
) -> Result<()> {
let new_meta = metadata_after_alteration(&version.metadata, request)?;
// Persist the metadata to region's manifest.
@@ -145,7 +141,9 @@ async fn alter_region_schema(
region.manifest_manager.update(action_list).await?;
// Apply the metadata to region's version.
- region.version_control.alter_schema(new_meta, builder);
+ region
+ .version_control
+ .alter_schema(new_meta, ®ion.memtable_builder);
Ok(())
}
diff --git a/src/mito2/src/worker/handle_catchup.rs b/src/mito2/src/worker/handle_catchup.rs
index 3622793273af..4a9730dd7a9b 100644
--- a/src/mito2/src/worker/handle_catchup.rs
+++ b/src/mito2/src/worker/handle_catchup.rs
@@ -51,7 +51,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
RegionOpener::new(
region_id,
region.region_dir(),
- self.memtable_builder.clone(),
+ self.memtable_builder_provider.clone(),
self.object_store_manager.clone(),
self.scheduler.clone(),
self.intermediate_manager.clone(),
diff --git a/src/mito2/src/worker/handle_create.rs b/src/mito2/src/worker/handle_create.rs
index 0a87ba2ed54c..6b5428963753 100644
--- a/src/mito2/src/worker/handle_create.rs
+++ b/src/mito2/src/worker/handle_create.rs
@@ -58,7 +58,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
let region = RegionOpener::new(
region_id,
&request.region_dir,
- self.memtable_builder.clone(),
+ self.memtable_builder_provider.clone(),
self.object_store_manager.clone(),
self.scheduler.clone(),
self.intermediate_manager.clone(),
diff --git a/src/mito2/src/worker/handle_drop.rs b/src/mito2/src/worker/handle_drop.rs
index fa0d1181a988..490fa432aa65 100644
--- a/src/mito2/src/worker/handle_drop.rs
+++ b/src/mito2/src/worker/handle_drop.rs
@@ -61,7 +61,9 @@ impl<S> RegionWorkerLoop<S> {
self.compaction_scheduler.on_region_dropped(region_id);
// mark region version as dropped
- region.version_control.mark_dropped(&self.memtable_builder);
+ region
+ .version_control
+ .mark_dropped(®ion.memtable_builder);
info!(
"Region {} is dropped logically, but some files are not deleted yet",
region_id
diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs
index 9163b6f174c6..884012473e1d 100644
--- a/src/mito2/src/worker/handle_open.rs
+++ b/src/mito2/src/worker/handle_open.rs
@@ -65,7 +65,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
let region = RegionOpener::new(
region_id,
&request.region_dir,
- self.memtable_builder.clone(),
+ self.memtable_builder_provider.clone(),
self.object_store_manager.clone(),
self.scheduler.clone(),
self.intermediate_manager.clone(),
diff --git a/src/mito2/src/worker/handle_truncate.rs b/src/mito2/src/worker/handle_truncate.rs
index 811a6f2c9993..c853f5eb030a 100644
--- a/src/mito2/src/worker/handle_truncate.rs
+++ b/src/mito2/src/worker/handle_truncate.rs
@@ -55,7 +55,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
region.version_control.truncate(
truncated_entry_id,
truncated_sequence,
- &self.memtable_builder,
+ ®ion.memtable_builder,
);
// Make all data obsolete.
diff --git a/src/store-api/src/mito_engine_options.rs b/src/store-api/src/mito_engine_options.rs
index 7ef963a4c033..26b62551ecae 100644
--- a/src/store-api/src/mito_engine_options.rs
+++ b/src/store-api/src/mito_engine_options.rs
@@ -29,6 +29,10 @@ pub fn is_mito_engine_option_key(key: &str) -> bool {
"index.inverted_index.ignore_column_ids",
"index.inverted_index.segment_row_count",
WAL_OPTIONS_KEY,
+ "memtable.type",
+ "memtable.experimental.index_max_keys_per_shard",
+ "memtable.experimental.data_freeze_threshold",
+ "memtable.experimental.fork_dictionary_bytes",
]
.contains(&key)
}
@@ -56,6 +60,16 @@ mod tests {
"index.inverted_index.segment_row_count"
));
assert!(is_mito_engine_option_key("wal_options"));
+ assert!(is_mito_engine_option_key("memtable.type"));
+ assert!(is_mito_engine_option_key(
+ "memtable.experimental.index_max_keys_per_shard"
+ ));
+ assert!(is_mito_engine_option_key(
+ "memtable.experimental.data_freeze_threshold"
+ ));
+ assert!(is_mito_engine_option_key(
+ "memtable.experimental.fork_dictionary_bytes"
+ ));
assert!(!is_mito_engine_option_key("foo"));
}
}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index a75e9e219612..abf207fd3577 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -787,11 +787,7 @@ mem_threshold_on_create = "64.0MiB"
intermediate_path = ""
[datanode.region_engine.mito.memtable]
-type = "experimental"
-index_max_keys_per_shard = 8192
-data_freeze_threshold = 131072
-dedup = true
-fork_dictionary_bytes = "1GiB"
+type = "time_series"
[[datanode.region_engine]]
diff --git a/tests/cases/standalone/common/create/create_with_options.result b/tests/cases/standalone/common/create/create_with_options.result
index e50746d127be..5dd872c7f7f3 100644
--- a/tests/cases/standalone/common/create/create_with_options.result
+++ b/tests/cases/standalone/common/create/create_with_options.result
@@ -74,6 +74,7 @@ with(
'index.inverted_index.ignore_column_ids'='1,2,3',
'index.inverted_index.segment_row_count'='512',
'wal_options'='{"wal.provider":"raft_engine"}',
+ 'memtable.type' = 'experimental',
);
Affected Rows: 0
@@ -82,3 +83,15 @@ drop table test_mito_options;
Affected Rows: 0
+create table if not exists invalid_compaction(
+ host string,
+ ts timestamp,
+ memory double,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+)
+engine=mito
+with('compaction.type'='twcs', 'compaction.twcs.max_active_window_files'='8d');
+
+Error: 1004(InvalidArguments), Invalid options: invalid digit found in string
+
diff --git a/tests/cases/standalone/common/create/create_with_options.sql b/tests/cases/standalone/common/create/create_with_options.sql
index 094d905edac1..1f3c5334121e 100644
--- a/tests/cases/standalone/common/create/create_with_options.sql
+++ b/tests/cases/standalone/common/create/create_with_options.sql
@@ -64,6 +64,17 @@ with(
'index.inverted_index.ignore_column_ids'='1,2,3',
'index.inverted_index.segment_row_count'='512',
'wal_options'='{"wal.provider":"raft_engine"}',
+ 'memtable.type' = 'experimental',
);
drop table test_mito_options;
+
+create table if not exists invalid_compaction(
+ host string,
+ ts timestamp,
+ memory double,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+)
+engine=mito
+with('compaction.type'='twcs', 'compaction.twcs.max_active_window_files'='8d');
diff --git a/tests/cases/standalone/common/show/show_create.result b/tests/cases/standalone/common/show/show_create.result
index b016174a386b..60bd139fcf9c 100644
--- a/tests/cases/standalone/common/show/show_create.result
+++ b/tests/cases/standalone/common/show/show_create.result
@@ -96,5 +96,5 @@ WITH(
storage = 'S3'
);
-Error: 1004(InvalidArguments), Object store not found: s3
+Error: 1004(InvalidArguments), Object store not found: S3
|
feat
|
support per table memtable options (#3524)
|
4c76d4d97ece13acb23b68ff793700c284deac3c
|
2023-11-21 17:31:28
|
Weny Xu
|
feat: add update metadata step for downgrading leader region (#2771)
| false
|
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index fa06558a9ba3..c66ead5b8002 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -631,7 +631,7 @@ impl TableMetadataManager {
pub async fn update_leader_region_status<F>(
&self,
table_id: TableId,
- current_table_route_value: DeserializedValueWithBytes<TableRouteValue>,
+ current_table_route_value: &DeserializedValueWithBytes<TableRouteValue>,
next_region_route_status: F,
) -> Result<()>
where
@@ -658,7 +658,7 @@ impl TableMetadataManager {
let (update_table_route_txn, on_update_table_route_failure) = self
.table_route_manager()
- .build_update_txn(table_id, ¤t_table_route_value, &new_table_route_value)?;
+ .build_update_txn(table_id, current_table_route_value, &new_table_route_value)?;
let r = self.kv_backend.txn(update_table_route_txn).await?;
@@ -1094,7 +1094,7 @@ mod tests {
.unwrap();
table_metadata_manager
- .update_leader_region_status(table_id, current_table_route_value, |region_route| {
+ .update_leader_region_status(table_id, ¤t_table_route_value, |region_route| {
if region_route.leader_status.is_some() {
None
} else {
diff --git a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
index 04b3ccde97e4..c120b30a26da 100644
--- a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
+++ b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
@@ -58,7 +58,7 @@ impl DeactivateRegion {
.context(error::TableRouteNotFoundSnafu { table_id })?;
ctx.table_metadata_manager
- .update_leader_region_status(table_id, table_route_value, |region| {
+ .update_leader_region_status(table_id, &table_route_value, |region| {
if region.region.id.region_number() == failed_region.region_number {
Some(Some(RegionStatus::Downgraded))
} else {
diff --git a/src/meta-srv/src/procedure/region_migration.rs b/src/meta-srv/src/procedure/region_migration.rs
index aa5ff5452884..cabd1f7805ab 100644
--- a/src/meta-srv/src/procedure/region_migration.rs
+++ b/src/meta-srv/src/procedure/region_migration.rs
@@ -18,11 +18,13 @@ pub(crate) mod migration_start;
pub(crate) mod open_candidate_region;
#[cfg(test)]
pub(crate) mod test_util;
+pub(crate) mod update_metadata;
use std::any::Any;
use std::fmt::Debug;
-use common_meta::key::TableMetadataManagerRef;
+use common_meta::key::table_route::TableRouteValue;
+use common_meta::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
use common_meta::peer::Peer;
use common_meta::ClusterId;
use common_procedure::error::{
@@ -30,11 +32,11 @@ use common_procedure::error::{
};
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
use serde::{Deserialize, Serialize};
-use snafu::ResultExt;
+use snafu::{location, Location, OptionExt, ResultExt};
use store_api::storage::RegionId;
use self::migration_start::RegionMigrationStart;
-use crate::error::{Error, Result};
+use crate::error::{self, Error, Result};
use crate::procedure::utils::region_lock_key;
use crate::region::lease_keeper::{OpeningRegionGuard, OpeningRegionKeeperRef};
use crate::service::mailbox::MailboxRef;
@@ -74,8 +76,10 @@ pub struct VolatileContext {
///
/// `opening_region_guard` should be consumed after
/// the corresponding [RegionRoute](common_meta::rpc::router::RegionRoute) of the opening region
- /// was written into [TableRouteValue](common_meta::key::table_route::TableRouteValue) .
+ /// was written into [TableRouteValue](common_meta::key::table_route::TableRouteValue).
opening_region_guard: Option<OpeningRegionGuard>,
+ /// `table_route_info` is stored via previous steps for future use.
+ table_route_info: Option<DeserializedValueWithBytes<TableRouteValue>>,
}
/// Used to generate new [Context].
@@ -122,6 +126,47 @@ impl Context {
pub fn server_addr(&self) -> &str {
&self.server_addr
}
+
+ /// Returns the `table_route_value` of [VolatileContext] if any.
+ /// Otherwise, returns the value retrieved from remote.
+ ///
+ /// Retry:
+ /// - Failed to retrieve the metadata of table.
+ pub async fn get_table_route_value(
+ &mut self,
+ ) -> Result<&DeserializedValueWithBytes<TableRouteValue>> {
+ let table_route_value = &mut self.volatile_ctx.table_route_info;
+
+ if table_route_value.is_none() {
+ let table_id = self.persistent_ctx.region_id.table_id();
+ let table_route = self
+ .table_metadata_manager
+ .table_route_manager()
+ .get(table_id)
+ .await
+ .context(error::TableMetadataManagerSnafu)
+ .map_err(|e| error::Error::RetryLater {
+ reason: e.to_string(),
+ location: location!(),
+ })?
+ .context(error::TableRouteNotFoundSnafu { table_id })?;
+
+ *table_route_value = Some(table_route);
+ }
+
+ Ok(table_route_value.as_ref().unwrap())
+ }
+
+ /// Removes the `table_route_value` of [VolatileContext], returns true if any.
+ pub fn remove_table_route_value(&mut self) -> bool {
+ let value = self.volatile_ctx.table_route_info.take();
+ value.is_some()
+ }
+
+ /// Returns the [RegionId].
+ pub fn region_id(&self) -> RegionId {
+ self.persistent_ctx.region_id
+ }
}
#[async_trait::async_trait]
diff --git a/src/meta-srv/src/procedure/region_migration/migration_start.rs b/src/meta-srv/src/procedure/region_migration/migration_start.rs
index 6f2e43c8ace4..ab61da316c8d 100644
--- a/src/meta-srv/src/procedure/region_migration/migration_start.rs
+++ b/src/meta-srv/src/procedure/region_migration/migration_start.rs
@@ -17,7 +17,7 @@ use std::any::Any;
use common_meta::peer::Peer;
use common_meta::rpc::router::RegionRoute;
use serde::{Deserialize, Serialize};
-use snafu::{location, Location, OptionExt, ResultExt};
+use snafu::OptionExt;
use store_api::storage::RegionId;
use super::downgrade_leader_region::DowngradeLeaderRegion;
@@ -41,9 +41,8 @@ impl State for RegionMigrationStart {
/// Otherwise go to the OpenCandidateRegion state.
async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
let region_id = ctx.persistent_ctx.region_id;
- let to_peer = &ctx.persistent_ctx.to_peer;
-
let region_route = self.retrieve_region_route(ctx, region_id).await?;
+ let to_peer = &ctx.persistent_ctx.to_peer;
if self.check_leader_region_on_peer(®ion_route, to_peer)? {
Ok(Box::new(RegionMigrationEnd))
@@ -70,21 +69,11 @@ impl RegionMigrationStart {
/// - Failed to retrieve the metadata of table.
async fn retrieve_region_route(
&self,
- ctx: &Context,
+ ctx: &mut Context,
region_id: RegionId,
) -> Result<RegionRoute> {
let table_id = region_id.table_id();
- let table_route = ctx
- .table_metadata_manager
- .table_route_manager()
- .get(table_id)
- .await
- .context(error::TableMetadataManagerSnafu)
- .map_err(|e| error::Error::RetryLater {
- reason: e.to_string(),
- location: location!(),
- })?
- .context(error::TableRouteNotFoundSnafu { table_id })?;
+ let table_route = ctx.get_table_route_value().await?;
let region_route = table_route
.region_routes
@@ -165,10 +154,10 @@ mod tests {
let state = RegionMigrationStart;
let env = TestingEnv::new();
let persistent_context = new_persistent_context();
- let ctx = env.context_factory().new_context(persistent_context);
+ let mut ctx = env.context_factory().new_context(persistent_context);
let err = state
- .retrieve_region_route(&ctx, RegionId::new(1024, 1))
+ .retrieve_region_route(&mut ctx, RegionId::new(1024, 1))
.await
.unwrap_err();
@@ -184,7 +173,7 @@ mod tests {
let from_peer = persistent_context.from_peer.clone();
let env = TestingEnv::new();
- let ctx = env.context_factory().new_context(persistent_context);
+ let mut ctx = env.context_factory().new_context(persistent_context);
let table_info = new_test_table_info(1024, vec![1]).into();
let region_route = RegionRoute {
@@ -199,7 +188,7 @@ mod tests {
.unwrap();
let err = state
- .retrieve_region_route(&ctx, RegionId::new(1024, 3))
+ .retrieve_region_route(&mut ctx, RegionId::new(1024, 3))
.await
.unwrap_err();
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata.rs b/src/meta-srv/src/procedure/region_migration/update_metadata.rs
new file mode 100644
index 000000000000..f41b66f4c09e
--- /dev/null
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata.rs
@@ -0,0 +1,239 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+
+use common_meta::rpc::router::RegionStatus;
+use serde::{Deserialize, Serialize};
+use snafu::ResultExt;
+
+use crate::error::{self, Result};
+use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeaderRegion;
+use crate::procedure::region_migration::{Context, State};
+
+#[derive(Debug, Serialize, Deserialize)]
+#[serde(tag = "UpdateMetadata")]
+pub enum UpdateMetadata {
+ Downgrade,
+}
+
+#[async_trait::async_trait]
+#[typetag::serde]
+impl State for UpdateMetadata {
+ async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
+ match self {
+ UpdateMetadata::Downgrade => {
+ self.downgrade_leader_region(ctx).await?;
+
+ Ok(Box::new(DowngradeLeaderRegion))
+ }
+ }
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+}
+
+impl UpdateMetadata {
+ /// Downgrades the leader region.
+ ///
+ /// Abort(non-retry):
+ /// - TableRoute is not found.
+ ///
+ /// Retry:
+ /// - Failed to update [TableRouteValue](common_meta::key::table_region::TableRegionValue).
+ /// - Failed to retrieve the metadata of table.
+ ///
+ /// About the failure of updating the [TableRouteValue](common_meta::key::table_region::TableRegionValue):
+ ///
+ /// - There may be another [RegionMigrationProcedure](crate::procedure::region_migration::RegionMigrationProcedure)
+ /// that is executed concurrently for **other region**.
+ /// It will only update **other region** info. Therefore, It's safe to retry after failure.
+ ///
+ /// - There is no other DDL procedure executed concurrently for the current table.
+ async fn downgrade_leader_region(&self, ctx: &mut Context) -> Result<()> {
+ let table_metadata_manager = ctx.table_metadata_manager.clone();
+ let region_id = ctx.region_id();
+ let table_id = region_id.table_id();
+ let current_table_route_value = ctx.get_table_route_value().await?;
+
+ if let Err(err) = table_metadata_manager
+ .update_leader_region_status(table_id, current_table_route_value, |route| {
+ if route.region.id == region_id {
+ Some(Some(RegionStatus::Downgraded))
+ } else {
+ None
+ }
+ })
+ .await
+ .context(error::TableMetadataManagerSnafu)
+ {
+ debug_assert!(ctx.remove_table_route_value());
+ return error::RetryLaterSnafu {
+ reason: format!("Failed to update the table route during the downgrading leader region, error: {err}")
+ }.fail();
+ }
+
+ debug_assert!(ctx.remove_table_route_value());
+
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use common_meta::key::test_utils::new_test_table_info;
+ use common_meta::peer::Peer;
+ use common_meta::rpc::router::{Region, RegionRoute};
+ use store_api::storage::RegionId;
+
+ use super::*;
+ use crate::error::Error;
+ use crate::procedure::region_migration::test_util::TestingEnv;
+ use crate::procedure::region_migration::{ContextFactory, PersistentContext};
+
+ fn new_persistent_context() -> PersistentContext {
+ PersistentContext {
+ from_peer: Peer::empty(1),
+ to_peer: Peer::empty(2),
+ region_id: RegionId::new(1024, 1),
+ cluster_id: 0,
+ }
+ }
+
+ #[test]
+ fn test_state_serialization() {
+ let state = UpdateMetadata::Downgrade;
+ let expected = r#"{"UpdateMetadata":"Downgrade"}"#;
+ assert_eq!(expected, serde_json::to_string(&state).unwrap());
+ }
+
+ #[tokio::test]
+ async fn test_table_route_is_not_found_error() {
+ let state = UpdateMetadata::Downgrade;
+ let env = TestingEnv::new();
+ let persistent_context = new_persistent_context();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+
+ let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
+
+ assert_matches!(err, Error::TableRouteNotFound { .. });
+
+ assert!(!err.is_retryable());
+ }
+
+ #[tokio::test]
+ async fn test_failed_to_update_table_route_error() {
+ let state = UpdateMetadata::Downgrade;
+ let persistent_context = new_persistent_context();
+ let from_peer = persistent_context.from_peer.clone();
+
+ let env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let table_id = ctx.region_id().table_id();
+
+ let table_info = new_test_table_info(1024, vec![1, 2]).into();
+ let region_routes = vec![
+ RegionRoute {
+ region: Region::new_test(RegionId::new(1024, 1)),
+ leader_peer: Some(from_peer.clone()),
+ ..Default::default()
+ },
+ RegionRoute {
+ region: Region::new_test(RegionId::new(1024, 2)),
+ leader_peer: Some(Peer::empty(4)),
+ ..Default::default()
+ },
+ ];
+
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes)
+ .await
+ .unwrap();
+
+ let original_table_route = table_metadata_manager
+ .table_route_manager()
+ .get(table_id)
+ .await
+ .unwrap()
+ .unwrap();
+
+ // modifies the table route.
+ table_metadata_manager
+ .update_leader_region_status(table_id, &original_table_route, |route| {
+ if route.region.id == RegionId::new(1024, 2) {
+ Some(Some(RegionStatus::Downgraded))
+ } else {
+ None
+ }
+ })
+ .await
+ .unwrap();
+
+ // sets the old table route.
+ ctx.volatile_ctx.table_route_info = Some(original_table_route);
+
+ let err = state.downgrade_leader_region(&mut ctx).await.unwrap_err();
+
+ assert_matches!(err, Error::RetryLater { .. });
+
+ assert!(err.is_retryable());
+ assert!(err.to_string().contains("Failed to update the table route"));
+ }
+
+ #[tokio::test]
+ async fn test_next_downgrade_leader_region_state() {
+ let mut state = Box::new(UpdateMetadata::Downgrade);
+ let persistent_context = new_persistent_context();
+ let from_peer = persistent_context.from_peer.clone();
+
+ let env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let table_id = ctx.region_id().table_id();
+
+ let table_info = new_test_table_info(1024, vec![1, 2]).into();
+ let region_routes = vec![RegionRoute {
+ region: Region::new_test(RegionId::new(1024, 1)),
+ leader_peer: Some(from_peer.clone()),
+ ..Default::default()
+ }];
+
+ let table_metadata_manager = env.table_metadata_manager();
+ table_metadata_manager
+ .create_table_metadata(table_info, region_routes)
+ .await
+ .unwrap();
+
+ let next = state.next(&mut ctx).await.unwrap();
+
+ let _ = next
+ .as_any()
+ .downcast_ref::<DowngradeLeaderRegion>()
+ .unwrap();
+
+ let latest_table_route = table_metadata_manager
+ .table_route_manager()
+ .get(table_id)
+ .await
+ .unwrap()
+ .unwrap();
+
+ assert!(latest_table_route.region_routes[0].is_leader_downgraded());
+ assert!(ctx.volatile_ctx.table_route_info.is_none());
+ }
+}
|
feat
|
add update metadata step for downgrading leader region (#2771)
|
e6cc4df8c89eaf1f53ea626b69ca81dce0b21e16
|
2024-08-05 19:27:48
|
discord9
|
feat: flow recreate on reboot (#4509)
| false
|
diff --git a/src/cmd/src/flownode.rs b/src/cmd/src/flownode.rs
index 328693f326fc..60ec6c6614c6 100644
--- a/src/cmd/src/flownode.rs
+++ b/src/cmd/src/flownode.rs
@@ -24,6 +24,7 @@ use common_grpc::channel_manager::ChannelConfig;
use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
use common_meta::heartbeat::handler::HandlerGroupExecutor;
+use common_meta::key::flow::FlowMetadataManager;
use common_meta::key::TableMetadataManager;
use common_telemetry::info;
use common_telemetry::logging::TracingOptions;
@@ -296,11 +297,13 @@ impl StartCommand {
Arc::new(executor),
);
+ let flow_metadata_manager = Arc::new(FlowMetadataManager::new(cached_meta_backend.clone()));
let flownode_builder = FlownodeBuilder::new(
opts,
Plugins::new(),
table_metadata_manager,
catalog_manager.clone(),
+ flow_metadata_manager,
)
.with_heartbeat_task(heartbeat_task);
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index ba25ab555f7c..efa360713ff9 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -476,11 +476,13 @@ impl StartCommand {
.await
.context(StartDatanodeSnafu)?;
+ let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let flow_builder = FlownodeBuilder::new(
Default::default(),
plugins.clone(),
table_metadata_manager.clone(),
catalog_manager.clone(),
+ flow_metadata_manager.clone(),
);
let flownode = Arc::new(
flow_builder
@@ -511,7 +513,6 @@ impl StartCommand {
opts.wal.into(),
kv_backend.clone(),
));
- let flow_metadata_manager = Arc::new(FlowMetadataManager::new(kv_backend.clone()));
let table_meta_allocator = Arc::new(TableMetadataAllocator::new(
table_id_sequence,
wal_options_allocator.clone(),
diff --git a/src/flow/src/error.rs b/src/flow/src/error.rs
index 3b8877ed86dd..8b4f3adc65d2 100644
--- a/src/flow/src/error.rs
+++ b/src/flow/src/error.rs
@@ -83,6 +83,14 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to list flows in flownode={id:?}"))]
+ ListFlows {
+ id: Option<common_meta::FlownodeId>,
+ source: common_meta::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Flow already exist, id={id}"))]
FlowAlreadyExist {
id: FlowId,
@@ -214,7 +222,8 @@ impl ErrorExt for Error {
}
Self::TableNotFound { .. }
| Self::TableNotFoundMeta { .. }
- | Self::FlowNotFound { .. } => StatusCode::TableNotFound,
+ | Self::FlowNotFound { .. }
+ | Self::ListFlows { .. } => StatusCode::TableNotFound,
Self::InvalidQueryProst { .. }
| &Self::InvalidQuery { .. }
| &Self::Plan { .. }
diff --git a/src/flow/src/server.rs b/src/flow/src/server.rs
index a8c850349fdd..d470eb0ad8e7 100644
--- a/src/flow/src/server.rs
+++ b/src/flow/src/server.rs
@@ -29,12 +29,13 @@ use common_meta::cache::{
};
use common_meta::ddl::{table_meta, ProcedureExecutorRef};
use common_meta::heartbeat::handler::HandlerGroupExecutor;
+use common_meta::key::flow::FlowMetadataManagerRef;
use common_meta::key::TableMetadataManagerRef;
use common_meta::kv_backend::KvBackendRef;
use common_meta::node_manager::{self, Flownode, NodeManagerRef};
use common_query::Output;
use common_telemetry::tracing::info;
-use futures::FutureExt;
+use futures::{FutureExt, StreamExt, TryStreamExt};
use greptime_proto::v1::flow::{flow_server, FlowRequest, FlowResponse, InsertRequests};
use itertools::Itertools;
use meta_client::client::MetaClient;
@@ -47,7 +48,7 @@ use serde::de::Unexpected;
use servers::error::{AlreadyStartedSnafu, StartGrpcSnafu, TcpBindSnafu, TcpIncomingSnafu};
use servers::heartbeat_options::HeartbeatOptions;
use servers::server::Server;
-use session::context::QueryContextRef;
+use session::context::{QueryContext, QueryContextBuilder, QueryContextRef};
use snafu::{ensure, OptionExt, ResultExt};
use tokio::net::TcpListener;
use tokio::sync::{broadcast, oneshot, Mutex};
@@ -57,7 +58,8 @@ use tonic::{Request, Response, Status};
use crate::adapter::FlowWorkerManagerRef;
use crate::error::{
- CacheRequiredSnafu, ParseAddrSnafu, ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
+ CacheRequiredSnafu, ExternalSnafu, FlowNotFoundSnafu, ListFlowsSnafu, ParseAddrSnafu,
+ ShutdownServerSnafu, StartServerSnafu, UnexpectedSnafu,
};
use crate::heartbeat::HeartbeatTask;
use crate::transform::register_function_to_query_engine;
@@ -240,6 +242,7 @@ pub struct FlownodeBuilder {
plugins: Plugins,
table_meta: TableMetadataManagerRef,
catalog_manager: CatalogManagerRef,
+ flow_metadata_manager: FlowMetadataManagerRef,
heartbeat_task: Option<HeartbeatTask>,
}
@@ -250,12 +253,14 @@ impl FlownodeBuilder {
plugins: Plugins,
table_meta: TableMetadataManagerRef,
catalog_manager: CatalogManagerRef,
+ flow_metadata_manager: FlowMetadataManagerRef,
) -> Self {
Self {
opts,
plugins,
table_meta,
catalog_manager,
+ flow_metadata_manager,
heartbeat_task: None,
}
}
@@ -283,6 +288,11 @@ impl FlownodeBuilder {
self.build_manager(query_engine_factory.query_engine())
.await?,
);
+
+ if let Err(err) = self.recover_flows(&manager).await {
+ common_telemetry::error!(err; "Failed to recover flows");
+ }
+
let server = FlownodeServer::new(FlowService::new(manager.clone()));
let heartbeat_task = self.heartbeat_task;
@@ -296,6 +306,85 @@ impl FlownodeBuilder {
Ok(instance)
}
+ /// recover all flow tasks in this flownode in distributed mode(nodeid is Some(<num>))
+ ///
+ /// or recover all existing flow tasks if in standalone mode(nodeid is None)
+ ///
+ /// TODO(discord9): persisent flow tasks with internal state
+ async fn recover_flows(&self, manager: &FlowWorkerManagerRef) -> Result<usize, Error> {
+ let nodeid = self.opts.node_id;
+ let to_be_recovered: Vec<_> = if let Some(nodeid) = nodeid {
+ let to_be_recover = self
+ .flow_metadata_manager
+ .flownode_flow_manager()
+ .flows(nodeid)
+ .try_collect::<Vec<_>>()
+ .await
+ .context(ListFlowsSnafu { id: Some(nodeid) })?;
+ to_be_recover.into_iter().map(|(id, _)| id).collect()
+ } else {
+ let all_catalogs = self
+ .catalog_manager
+ .catalog_names()
+ .await
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ let mut all_flow_ids = vec![];
+ for catalog in all_catalogs {
+ let flows = self
+ .flow_metadata_manager
+ .flow_name_manager()
+ .flow_names(&catalog)
+ .await
+ .try_collect::<Vec<_>>()
+ .await
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+
+ all_flow_ids.extend(flows.into_iter().map(|(_, id)| id.flow_id()));
+ }
+ all_flow_ids
+ };
+ let cnt = to_be_recovered.len();
+
+ // TODO(discord9): recover in parallel
+ for flow_id in to_be_recovered {
+ let info = self
+ .flow_metadata_manager
+ .flow_info_manager()
+ .get(flow_id)
+ .await
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?
+ .context(FlowNotFoundSnafu { id: flow_id })?;
+
+ let sink_table_name = [
+ info.sink_table_name().catalog_name.clone(),
+ info.sink_table_name().schema_name.clone(),
+ info.sink_table_name().table_name.clone(),
+ ];
+ manager
+ .create_flow(
+ flow_id as _,
+ sink_table_name,
+ info.source_table_ids(),
+ true,
+ info.expire_after(),
+ Some(info.comment().clone()),
+ info.raw_sql().clone(),
+ info.options().clone(),
+ Some(
+ QueryContextBuilder::default()
+ .current_catalog(info.catalog_name().clone())
+ .build(),
+ ),
+ )
+ .await?;
+ }
+
+ Ok(cnt)
+ }
+
/// build [`FlowWorkerManager`], note this doesn't take ownership of `self`,
/// nor does it actually start running the worker.
async fn build_manager(
diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs
index 9de4e594980d..9f7188568fa4 100644
--- a/tests-integration/src/standalone.rs
+++ b/tests-integration/src/standalone.rs
@@ -156,6 +156,7 @@ impl GreptimeDbStandaloneBuilder {
plugins.clone(),
table_metadata_manager.clone(),
catalog_manager.clone(),
+ flow_metadata_manager.clone(),
);
let flownode = Arc::new(flow_builder.build().await.unwrap());
diff --git a/tests/cases/standalone/common/flow/flow_basic.result b/tests/cases/standalone/common/flow/flow_basic.result
index e5983ea8d424..3c49535b0fd3 100644
--- a/tests/cases/standalone/common/flow/flow_basic.result
+++ b/tests/cases/standalone/common/flow/flow_basic.result
@@ -24,6 +24,7 @@ select flush_flow('test_numbers_basic')<=1;
| true |
+----------------------------------------------------+
+-- SQLNESS ARG restart=true
INSERT INTO numbers_input_basic
VALUES
(20, "2021-07-01 00:00:00.200"),
diff --git a/tests/cases/standalone/common/flow/flow_basic.sql b/tests/cases/standalone/common/flow/flow_basic.sql
index dab3d78f836b..1f282cca2e03 100644
--- a/tests/cases/standalone/common/flow/flow_basic.sql
+++ b/tests/cases/standalone/common/flow/flow_basic.sql
@@ -14,6 +14,7 @@ SELECT sum(number) FROM numbers_input_basic GROUP BY tumble(ts, '1 second', '202
-- because flush_flow result is at most 1
select flush_flow('test_numbers_basic')<=1;
+-- SQLNESS ARG restart=true
INSERT INTO numbers_input_basic
VALUES
(20, "2021-07-01 00:00:00.200"),
|
feat
|
flow recreate on reboot (#4509)
|
03a144fa56a19b695d237e2aefc60041f0232d9a
|
2025-01-13 09:38:36
|
yihong
|
chore: drop useless import raw_normalize_path in object-store lib (#5349)
| false
|
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index 851484a0cffa..b727b6735274 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub use opendal::raw::{normalize_path as raw_normalize_path, Access, HttpClient};
+pub use opendal::raw::{Access, HttpClient};
pub use opendal::{
services, Buffer, Builder as ObjectStoreBuilder, Entry, EntryMode, Error, ErrorKind,
FuturesAsyncReader, FuturesAsyncWriter, Lister, Metakey, Operator as ObjectStore, Reader,
|
chore
|
drop useless import raw_normalize_path in object-store lib (#5349)
|
dadee99d69b2453c49c5ca233a1bdb1e64960048
|
2024-04-28 14:34:01
|
Weny Xu
|
chore: add warn log for denied to renew region lease (#3827)
| false
|
diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs
index 968deb7e12df..3e15360e8d7e 100644
--- a/src/meta-srv/src/region/lease_keeper.rs
+++ b/src/meta-srv/src/region/lease_keeper.rs
@@ -20,6 +20,7 @@ use common_meta::key::TableMetadataManagerRef;
use common_meta::region_keeper::MemoryRegionKeeperRef;
use common_meta::rpc::router::RegionRoute;
use common_meta::DatanodeId;
+use common_telemetry::warn;
use snafu::ResultExt;
use store_api::region_engine::RegionRole;
use store_api::storage::{RegionId, TableId};
@@ -80,6 +81,10 @@ fn renew_region_lease_via_region_route(
return Some((region_id, RegionRole::Follower));
}
+ warn!(
+ "Denied to renew region lease for datanode: {datanode_id}, region_id: {region_id}, region_routes: {:?}",
+ region_route
+ );
// The region doesn't belong to this datanode.
None
}
@@ -140,7 +145,10 @@ impl RegionLeaseKeeper {
return renew_region_lease_via_region_route(®ion_route, datanode_id, region_id);
}
}
-
+ warn!(
+ "Denied to renew region lease for datanode: {datanode_id}, region_id: {region_id}, table({}) is not found",
+ region_id.table_id()
+ );
None
}
|
chore
|
add warn log for denied to renew region lease (#3827)
|
2ef84f64f19a9069d98d0d83e7bf0e72ffcb31c9
|
2023-07-04 12:43:14
|
Yingwen
|
feat(servers): enlarge default body limit to 64M (#1873)
| false
|
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 4e8da291928b..9e3865a55558 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -34,7 +34,7 @@ use aide::openapi::{Info, OpenApi, Server as OpenAPIServer};
use async_trait::async_trait;
use axum::body::BoxBody;
use axum::error_handling::HandleErrorLayer;
-use axum::extract::MatchedPath;
+use axum::extract::{DefaultBodyLimit, MatchedPath};
use axum::http::Request;
use axum::middleware::{self, Next};
use axum::response::{Html, IntoResponse, Json};
@@ -104,6 +104,8 @@ pub(crate) async fn query_context_from_db(
pub const HTTP_API_VERSION: &str = "v1";
pub const HTTP_API_PREFIX: &str = "/v1/";
+/// Default http body limit (64M).
+const DEFAULT_BODY_LIMIT: usize = 64 * 1024 * 1024;
// TODO(fys): This is a temporary workaround, it will be improved later
pub static PUBLIC_APIS: [&str; 2] = ["/v1/influxdb/ping", "/v1/influxdb/health"];
@@ -544,6 +546,7 @@ impl HttpServer {
.layer(HandleErrorLayer::new(handle_error))
.layer(TraceLayer::new_for_http())
.layer(TimeoutLayer::new(self.options.timeout))
+ .layer(DefaultBodyLimit::max(DEFAULT_BODY_LIMIT))
// custom layer
.layer(AsyncRequireAuthorizationLayer::new(
HttpAuth::<BoxBody>::new(self.user_provider.clone()),
|
feat
|
enlarge default body limit to 64M (#1873)
|
1eeb5b4330fda23ea4d858c9b90c2c0882b6e10a
|
2023-06-15 13:56:27
|
JeremyHi
|
feat: disable_region_failover option for metasrv (#1777)
| false
|
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index df268c9e3d69..9b1ac3782445 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -93,6 +93,8 @@ struct StartCommand {
#[clap(long)]
use_memory_store: bool,
#[clap(long)]
+ disable_region_failover: bool,
+ #[clap(long)]
http_addr: Option<String>,
#[clap(long)]
http_timeout: Option<u64>,
@@ -134,9 +136,9 @@ impl StartCommand {
.context(error::UnsupportedSelectorTypeSnafu { selector_type })?;
}
- if self.use_memory_store {
- opts.use_memory_store = true;
- }
+ opts.use_memory_store = self.use_memory_store;
+
+ opts.disable_region_failover = self.disable_region_failover;
if let Some(http_addr) = &self.http_addr {
opts.http_opts.addr = http_addr.clone();
diff --git a/src/meta-srv/src/handler/collect_stats_handler.rs b/src/meta-srv/src/handler/collect_stats_handler.rs
index d82874caf9d1..5b02ae6f4385 100644
--- a/src/meta-srv/src/handler/collect_stats_handler.rs
+++ b/src/meta-srv/src/handler/collect_stats_handler.rs
@@ -20,6 +20,7 @@ use crate::error::Result;
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
use crate::metasrv::Context;
+#[derive(Default)]
pub struct CollectStatsHandler;
#[async_trait::async_trait]
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 6cb07f36bf45..6dbda2729040 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -49,6 +49,7 @@ pub struct MetaSrvOptions {
pub datanode_lease_secs: i64,
pub selector: SelectorType,
pub use_memory_store: bool,
+ pub disable_region_failover: bool,
pub http_opts: HttpOptions,
pub logging: LoggingOptions,
}
@@ -62,6 +63,7 @@ impl Default for MetaSrvOptions {
datanode_lease_secs: 15,
selector: SelectorType::default(),
use_memory_store: false,
+ disable_region_failover: false,
http_opts: HttpOptions::default(),
logging: LoggingOptions::default(),
}
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index c1eafbc3d2b0..155791161edc 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -146,24 +146,29 @@ impl MetaSrvBuilder {
let handler_group = match handler_group {
Some(handler_group) => handler_group,
None => {
- let region_failover_manager = Arc::new(RegionFailoverManager::new(
- mailbox.clone(),
- procedure_manager.clone(),
- selector.clone(),
- SelectorContext {
- server_addr: options.server_addr.clone(),
- datanode_lease_secs: options.datanode_lease_secs,
- kv_store: kv_store.clone(),
- catalog: None,
- schema: None,
- table: None,
- },
- lock.clone(),
- ));
-
- let region_failure_handler =
- RegionFailureHandler::try_new(election.clone(), region_failover_manager)
- .await?;
+ let region_failover_handler = if options.disable_region_failover {
+ None
+ } else {
+ let region_failover_manager = Arc::new(RegionFailoverManager::new(
+ mailbox.clone(),
+ procedure_manager.clone(),
+ selector.clone(),
+ SelectorContext {
+ server_addr: options.server_addr.clone(),
+ datanode_lease_secs: options.datanode_lease_secs,
+ kv_store: kv_store.clone(),
+ catalog: None,
+ schema: None,
+ table: None,
+ },
+ lock.clone(),
+ ));
+
+ Some(
+ RegionFailureHandler::try_new(election.clone(), region_failover_manager)
+ .await?,
+ )
+ };
let group = HeartbeatHandlerGroup::new(pushers);
let keep_lease_handler = KeepLeaseHandler::new(kv_store.clone());
@@ -174,9 +179,11 @@ impl MetaSrvBuilder {
group.add_handler(keep_lease_handler).await;
group.add_handler(CheckLeaderHandler::default()).await;
group.add_handler(OnLeaderStartHandler::default()).await;
- group.add_handler(CollectStatsHandler).await;
- group.add_handler(MailboxHandler).await;
- group.add_handler(region_failure_handler).await;
+ group.add_handler(CollectStatsHandler::default()).await;
+ group.add_handler(MailboxHandler::default()).await;
+ if let Some(region_failover_handler) = region_failover_handler {
+ group.add_handler(region_failover_handler).await;
+ }
group.add_handler(PersistStatsHandler::default()).await;
group
}
|
feat
|
disable_region_failover option for metasrv (#1777)
|
d1f8ea788054e46203b46b0e038af48fee5b0fa7
|
2025-01-08 13:25:06
|
shuiyisong
|
perf: parse Loki labels in protobuf write path (#5305)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 5e5d2057534f..ad3076094f17 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9170,6 +9170,12 @@ dependencies = [
"proc-macro2",
]
+[[package]]
+name = "quoted-string"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a206a30ce37189d1340e7da2ee0b4d65e342590af676541c23a4f3959ba272e"
+
[[package]]
name = "radium"
version = "0.7.0"
@@ -10907,6 +10913,7 @@ dependencies = [
"promql-parser",
"prost 0.12.6",
"query",
+ "quoted-string",
"rand",
"regex",
"reqwest",
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index a4508291993e..674cd3f7df05 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -63,7 +63,6 @@ humantime-serde.workspace = true
hyper = { version = "0.14", features = ["full"] }
influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", branch = "feat/line-protocol" }
itertools.workspace = true
-json5 = "0.4"
jsonb.workspace = true
lazy_static.workspace = true
log-query.workspace = true
@@ -86,6 +85,7 @@ prometheus.workspace = true
promql-parser.workspace = true
prost.workspace = true
query.workspace = true
+quoted-string = "0.6"
rand.workspace = true
regex.workspace = true
reqwest.workspace = true
@@ -123,6 +123,7 @@ client = { workspace = true, features = ["testing"] }
common-base.workspace = true
common-test-util.workspace = true
criterion = "0.5"
+json5 = "0.4"
mysql_async = { version = "0.33", default-features = false, features = [
"default-rustls",
] }
@@ -149,3 +150,7 @@ harness = false
[[bench]]
name = "to_http_output"
harness = false
+
+[[bench]]
+name = "loki_labels"
+harness = false
diff --git a/src/servers/benches/loki_labels.rs b/src/servers/benches/loki_labels.rs
new file mode 100644
index 000000000000..e0d64976efb7
--- /dev/null
+++ b/src/servers/benches/loki_labels.rs
@@ -0,0 +1,41 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::BTreeMap;
+
+use criterion::{black_box, criterion_group, criterion_main, Criterion};
+use servers::error::Result;
+use servers::http::loki::parse_loki_labels;
+
+// cargo bench loki_labels
+
+fn json5_parse(input: &str) -> Result<BTreeMap<String, String>> {
+ let input = input.replace("=", ":");
+ let result: BTreeMap<String, String> = json5::from_str(&input).unwrap();
+ Ok(result)
+}
+
+fn criterion_benchmark(c: &mut Criterion) {
+ let mut group = c.benchmark_group("loki_labels");
+ let input = r#"{job="foobar", cluster="foo-central1", namespace="bar", container_name="buzz"}"#;
+
+ group.bench_function("json5", |b| b.iter(|| json5_parse(black_box(input))));
+ group.bench_function("hand_parse", |b| {
+ b.iter(|| parse_loki_labels(black_box(input)))
+ });
+ group.finish(); // Important to call finish() on the group
+}
+
+criterion_group!(benches, criterion_benchmark);
+criterion_main!(benches);
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 31aa5342be57..621211cf86ed 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -506,10 +506,9 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Failed to parse payload as json5"))]
- ParseJson5 {
- #[snafu(source)]
- error: json5::Error,
+ #[snafu(display("Invalid Loki labels: {}", msg))]
+ InvalidLokiLabels {
+ msg: String,
#[snafu(implicit)]
location: Location,
},
@@ -666,7 +665,7 @@ impl ErrorExt for Error {
| MissingQueryContext { .. }
| MysqlValueConversion { .. }
| ParseJson { .. }
- | ParseJson5 { .. }
+ | InvalidLokiLabels { .. }
| InvalidLokiPayload { .. }
| UnsupportedContentType { .. }
| TimestampOverflow { .. }
diff --git a/src/servers/src/http/loki.rs b/src/servers/src/http/loki.rs
index b1014110613b..272d2867db57 100644
--- a/src/servers/src/http/loki.rs
+++ b/src/servers/src/http/loki.rs
@@ -27,17 +27,18 @@ use axum::{Extension, TypedHeader};
use bytes::Bytes;
use common_query::prelude::GREPTIME_TIMESTAMP;
use common_query::{Output, OutputData};
-use common_telemetry::warn;
+use common_telemetry::{error, warn};
use hashbrown::HashMap;
use lazy_static::lazy_static;
use loki_api::prost_types::Timestamp;
use prost::Message;
+use quoted_string::test_utils::TestSpec;
use session::context::{Channel, QueryContext};
-use snafu::{OptionExt, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{
- DecodeOtlpRequestSnafu, InvalidLokiPayloadSnafu, ParseJson5Snafu, ParseJsonSnafu, Result,
- UnsupportedContentTypeSnafu,
+ DecodeOtlpRequestSnafu, InvalidLokiLabelsSnafu, InvalidLokiPayloadSnafu, ParseJsonSnafu,
+ Result, UnsupportedContentTypeSnafu,
};
use crate::http::event::{LogState, JSON_CONTENT_TYPE, PB_CONTENT_TYPE};
use crate::http::extractor::LogTableName;
@@ -191,8 +192,7 @@ async fn handle_json_req(
l.iter()
.filter_map(|(k, v)| v.as_str().map(|v| (k.clone(), v.to_string())))
.collect::<BTreeMap<String, String>>()
- })
- .unwrap_or_default();
+ });
// process each line
for (line_index, line) in lines.iter().enumerate() {
@@ -230,7 +230,7 @@ async fn handle_json_req(
// TODO(shuiyisong): we'll ignore structured metadata for now
let mut row = init_row(schemas.len(), ts, line_text);
- process_labels(&mut column_indexer, schemas, &mut row, labels.iter());
+ process_labels(&mut column_indexer, schemas, &mut row, labels.as_ref());
rows.push(row);
}
@@ -255,13 +255,11 @@ async fn handle_pb_req(
let mut rows = Vec::with_capacity(cnt);
for stream in req.streams {
- // parse labels for each row
- // encoding: https://github.com/grafana/alloy/blob/be34410b9e841cc0c37c153f9550d9086a304bca/internal/component/common/loki/client/batch.go#L114-L145
- // use very dirty hack to parse labels
- // TODO(shuiyisong): remove json5 and parse the string directly
- let labels = stream.labels.replace("=", ":");
- // use btreemap to keep order
- let labels: BTreeMap<String, String> = json5::from_str(&labels).context(ParseJson5Snafu)?;
+ let labels = parse_loki_labels(&stream.labels)
+ .inspect_err(|e| {
+ error!(e; "failed to parse loki labels");
+ })
+ .ok();
// process entries
for entry in stream.entries {
@@ -273,7 +271,7 @@ async fn handle_pb_req(
let line = entry.line;
let mut row = init_row(schemas.len(), prost_ts_to_nano(&ts), line);
- process_labels(&mut column_indexer, schemas, &mut row, labels.iter());
+ process_labels(&mut column_indexer, schemas, &mut row, labels.as_ref());
rows.push(row);
}
@@ -282,6 +280,81 @@ async fn handle_pb_req(
Ok(rows)
}
+/// since we're hand-parsing the labels, if any error is encountered, we'll just skip the label
+/// note: pub here for bench usage
+/// ref:
+/// 1. encoding: https://github.com/grafana/alloy/blob/be34410b9e841cc0c37c153f9550d9086a304bca/internal/component/common/loki/client/batch.go#L114-L145
+/// 2. test data: https://github.com/grafana/loki/blob/a24ef7b206e0ca63ee74ca6ecb0a09b745cd2258/pkg/push/types_test.go
+pub fn parse_loki_labels(labels: &str) -> Result<BTreeMap<String, String>> {
+ let mut labels = labels.trim();
+ ensure!(
+ labels.len() >= 2,
+ InvalidLokiLabelsSnafu {
+ msg: "labels string too short"
+ }
+ );
+ ensure!(
+ labels.starts_with("{"),
+ InvalidLokiLabelsSnafu {
+ msg: "missing `{` at the beginning"
+ }
+ );
+ ensure!(
+ labels.ends_with("}"),
+ InvalidLokiLabelsSnafu {
+ msg: "missing `}` at the end"
+ }
+ );
+
+ let mut result = BTreeMap::new();
+ labels = &labels[1..labels.len() - 1];
+
+ while !labels.is_empty() {
+ // parse key
+ let first_index = labels.find("=").context(InvalidLokiLabelsSnafu {
+ msg: format!("missing `=` near: {}", labels),
+ })?;
+ let key = &labels[..first_index];
+ labels = &labels[first_index + 1..];
+
+ // parse value
+ let qs = quoted_string::parse::<TestSpec>(labels)
+ .map_err(|e| {
+ InvalidLokiLabelsSnafu {
+ msg: format!(
+ "failed to parse quoted string near: {}, reason: {}",
+ labels, e.1
+ ),
+ }
+ .build()
+ })?
+ .quoted_string;
+
+ labels = &labels[qs.len()..];
+
+ let value = quoted_string::to_content::<TestSpec>(qs).map_err(|e| {
+ InvalidLokiLabelsSnafu {
+ msg: format!("failed to unquote the string: {}, reason: {}", qs, e),
+ }
+ .build()
+ })?;
+
+ // insert key and value
+ result.insert(key.to_string(), value.to_string());
+
+ if labels.is_empty() {
+ break;
+ }
+ ensure!(
+ labels.starts_with(","),
+ InvalidLokiLabelsSnafu { msg: "missing `,`" }
+ );
+ labels = labels[1..].trim_start();
+ }
+
+ Ok(result)
+}
+
#[inline]
fn prost_ts_to_nano(ts: &Timestamp) -> i64 {
ts.seconds * 1_000_000_000 + ts.nanos as i64
@@ -303,12 +376,16 @@ fn init_row(schema_len: usize, ts: i64, line: String) -> Vec<GreptimeValue> {
row
}
-fn process_labels<'a>(
+fn process_labels(
column_indexer: &mut HashMap<String, u16>,
schemas: &mut Vec<ColumnSchema>,
row: &mut Vec<GreptimeValue>,
- labels: impl Iterator<Item = (&'a String, &'a String)>,
+ labels: Option<&BTreeMap<String, String>>,
) {
+ let Some(labels) = labels else {
+ return;
+ };
+
// insert labels
for (k, v) in labels {
if let Some(index) = column_indexer.get(k) {
@@ -359,9 +436,12 @@ macro_rules! unwrap_or_warn_continue {
#[cfg(test)]
mod tests {
+ use std::collections::BTreeMap;
+
use loki_api::prost_types::Timestamp;
- use crate::http::loki::prost_ts_to_nano;
+ use crate::error::Error::InvalidLokiLabels;
+ use crate::http::loki::{parse_loki_labels, prost_ts_to_nano};
#[test]
fn test_ts_to_nano() {
@@ -374,4 +454,50 @@ mod tests {
};
assert_eq!(prost_ts_to_nano(&ts), 1731748568804293888);
}
+
+ #[test]
+ fn test_parse_loki_labels() {
+ let mut expected = BTreeMap::new();
+ expected.insert("job".to_string(), "foobar".to_string());
+ expected.insert("cluster".to_string(), "foo-central1".to_string());
+ expected.insert("namespace".to_string(), "bar".to_string());
+ expected.insert("container_name".to_string(), "buzz".to_string());
+
+ // perfect case
+ let valid_labels =
+ r#"{job="foobar", cluster="foo-central1", namespace="bar", container_name="buzz"}"#;
+ let re = parse_loki_labels(valid_labels);
+ assert!(re.is_ok());
+ assert_eq!(re.unwrap(), expected);
+
+ // too short
+ let too_short = r#"}"#;
+ let re = parse_loki_labels(too_short);
+ assert!(matches!(re.err().unwrap(), InvalidLokiLabels { .. }));
+
+ // missing start
+ let missing_start = r#"job="foobar"}"#;
+ let re = parse_loki_labels(missing_start);
+ assert!(matches!(re.err().unwrap(), InvalidLokiLabels { .. }));
+
+ // missing start
+ let missing_end = r#"{job="foobar""#;
+ let re = parse_loki_labels(missing_end);
+ assert!(matches!(re.err().unwrap(), InvalidLokiLabels { .. }));
+
+ // missing equal
+ let missing_equal = r#"{job"foobar"}"#;
+ let re = parse_loki_labels(missing_equal);
+ assert!(matches!(re.err().unwrap(), InvalidLokiLabels { .. }));
+
+ // missing quote
+ let missing_quote = r#"{job=foobar}"#;
+ let re = parse_loki_labels(missing_quote);
+ assert!(matches!(re.err().unwrap(), InvalidLokiLabels { .. }));
+
+ // missing comma
+ let missing_comma = r#"{job="foobar" cluster="foo-central1"}"#;
+ let re = parse_loki_labels(missing_comma);
+ assert!(matches!(re.err().unwrap(), InvalidLokiLabels { .. }));
+ }
}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 0027700e802c..8b252e71e48c 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -1873,7 +1873,7 @@ pub async fn test_loki_pb_logs(store_type: StorageType) {
// init loki request
let req: PushRequest = PushRequest {
streams: vec![StreamAdapter {
- labels: r#"{service="test",source="integration","wadaxi"="do anything"}"#.to_string(),
+ labels: r#"{service="test",source="integration",wadaxi="do anything"}"#.to_string(),
entries: vec![
EntryAdapter {
timestamp: Some(Timestamp::from_str("2024-11-07T10:53:50").unwrap()),
@@ -1953,7 +1953,8 @@ pub async fn test_loki_json_logs(store_type: StorageType) {
"streams": [
{
"stream": {
- "source": "test"
+ "source": "test",
+ "sender": "integration"
},
"values": [
[ "1735901380059465984", "this is line one" ],
@@ -1987,7 +1988,7 @@ pub async fn test_loki_json_logs(store_type: StorageType) {
assert_eq!(StatusCode::OK, res.status());
// test schema
- let expected = "[[\"loki_table_name\",\"CREATE TABLE IF NOT EXISTS \\\"loki_table_name\\\" (\\n \\\"greptime_timestamp\\\" TIMESTAMP(9) NOT NULL,\\n \\\"line\\\" STRING NULL,\\n \\\"source\\\" STRING NULL,\\n TIME INDEX (\\\"greptime_timestamp\\\"),\\n PRIMARY KEY (\\\"source\\\")\\n)\\n\\nENGINE=mito\\nWITH(\\n append_mode = 'true'\\n)\"]]";
+ let expected = "[[\"loki_table_name\",\"CREATE TABLE IF NOT EXISTS \\\"loki_table_name\\\" (\\n \\\"greptime_timestamp\\\" TIMESTAMP(9) NOT NULL,\\n \\\"line\\\" STRING NULL,\\n \\\"sender\\\" STRING NULL,\\n \\\"source\\\" STRING NULL,\\n TIME INDEX (\\\"greptime_timestamp\\\"),\\n PRIMARY KEY (\\\"sender\\\", \\\"source\\\")\\n)\\n\\nENGINE=mito\\nWITH(\\n append_mode = 'true'\\n)\"]]";
validate_data(
"loki_json_schema",
&client,
@@ -1997,7 +1998,7 @@ pub async fn test_loki_json_logs(store_type: StorageType) {
.await;
// test content
- let expected = "[[1735901380059465984,\"this is line one\",\"test\"],[1735901398478897920,\"this is line two\",\"test\"]]";
+ let expected = "[[1735901380059465984,\"this is line one\",\"integration\",\"test\"],[1735901398478897920,\"this is line two\",\"integration\",\"test\"]]";
validate_data(
"loki_json_content",
&client,
|
perf
|
parse Loki labels in protobuf write path (#5305)
|
23bf55a2659ee805b107e1d45f9f11645d560c80
|
2023-06-21 08:29:58
|
Ruihang Xia
|
fix: __field__ matcher on single value column (#1805)
| false
|
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index c282d25f3c90..51091e89f935 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -541,7 +541,7 @@ impl PromPlanner {
result_set.insert(matcher.value.clone());
} else {
return Err(ColumnNotFoundSnafu {
- col: self.ctx.table_name.clone().unwrap(),
+ col: matcher.value.clone(),
}
.build());
}
@@ -550,8 +550,8 @@ impl PromPlanner {
if col_set.contains(&matcher.value) {
reverse_set.insert(matcher.value.clone());
} else {
- return Err(ValueNotFoundSnafu {
- table: self.ctx.table_name.clone().unwrap(),
+ return Err(ColumnNotFoundSnafu {
+ col: matcher.value.clone(),
}
.build());
}
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index 8e5b183f2a90..e857ddf498a2 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -162,15 +162,14 @@ impl TableMeta {
}
pub fn field_column_names(&self) -> impl Iterator<Item = &String> {
- let columns_schemas = &self.schema.column_schemas();
- self.value_indices.iter().filter_map(|idx| {
- let column = &columns_schemas[*idx];
- if column.is_time_index() {
- None
- } else {
- Some(&column.name)
- }
- })
+ // `value_indices` is wrong under distributed mode. Use the logic copied from DESC TABLE
+ let columns_schemas = self.schema.column_schemas();
+ let primary_key_indices = &self.primary_key_indices;
+ columns_schemas
+ .iter()
+ .enumerate()
+ .filter(|(i, cs)| !primary_key_indices.contains(i) && !cs.is_time_index())
+ .map(|(_, cs)| &cs.name)
}
/// Returns the new [TableMetaBuilder] after applying given `alter_kind`.
diff --git a/tests/cases/distributed/tql-explain-analyze/explain.result b/tests/cases/distributed/tql-explain-analyze/explain.result
index 7fb8a4384f96..1dc38f1f2f7a 100644
--- a/tests/cases/distributed/tql-explain-analyze/explain.result
+++ b/tests/cases/distributed/tql-explain-analyze/explain.result
@@ -31,3 +31,41 @@ DROP TABLE test;
Affected Rows: 1
+CREATE TABLE host_load1 (
+ ts TIMESTAMP(3) NOT NULL,
+ collector STRING NULL,
+ host STRING NULL,
+ val DOUBLE NULL,
+ TIME INDEX (ts),
+ PRIMARY KEY (collector, host)
+);
+
+Affected Rows: 0
+
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (peer-.*) REDACTED
+TQL EXPLAIN host_load1{__field__="val"};
+
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[ts] |
+| | PromSeriesNormalize: offset=[0], time index=[ts], filter NaN: [false] |
+| | PromSeriesDivide: tags=["collector", "host"] |
+| | Sort: host_load1.collector DESC NULLS LAST, host_load1.host DESC NULLS LAST, host_load1.ts DESC NULLS LAST |
+| | Projection: host_load1.val, host_load1.collector, host_load1.host, host_load1.ts |
+| | MergeScan [is_placeholder=false] |
+| | TableScan: host_load1 projection=[ts, collector, host, val], partial_filters=[ts >= TimestampMillisecond(-300000, None), ts <= TimestampMillisecond(300000, None)] |
+| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[ts] |
+| | PromSeriesNormalizeExec: offset=[0], time index=[ts], filter NaN: [false] |
+| | PromSeriesDivideExec: tags=["collector", "host"] |
+| | RepartitionExec: partitioning=REDACTED
+| | ProjectionExec: expr=[val@3 as val, collector@1 as collector, host@2 as host, ts@0 as ts] |
+| | MergeScanExec: peers=[REDACTED
+| | |
++---------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+DROP TABLE host_load1;
+
+Affected Rows: 1
+
diff --git a/tests/cases/distributed/tql-explain-analyze/explain.sql b/tests/cases/distributed/tql-explain-analyze/explain.sql
index 58f7bcf1a67c..d0f237d0aaf2 100644
--- a/tests/cases/distributed/tql-explain-analyze/explain.sql
+++ b/tests/cases/distributed/tql-explain-analyze/explain.sql
@@ -9,3 +9,18 @@ INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
TQL EXPLAIN (0, 10, '5s') test;
DROP TABLE test;
+
+CREATE TABLE host_load1 (
+ ts TIMESTAMP(3) NOT NULL,
+ collector STRING NULL,
+ host STRING NULL,
+ val DOUBLE NULL,
+ TIME INDEX (ts),
+ PRIMARY KEY (collector, host)
+);
+
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (peer-.*) REDACTED
+TQL EXPLAIN host_load1{__field__="val"};
+
+DROP TABLE host_load1;
|
fix
|
__field__ matcher on single value column (#1805)
|
4aa756c8960407e425da52f2891dc1563ade3175
|
2024-05-28 19:28:15
|
Weny Xu
|
feat: open region in background (#4052)
| false
|
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index b8f236c7ded2..469ed0a6ccf1 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -409,9 +409,7 @@ impl RegionServerInner {
let engine = match region_change {
RegionChange::Register(attribute) => match current_region_status {
Some(status) => match status.clone() {
- RegionEngineWithStatus::Registering(_) => {
- return Ok(CurrentEngine::EarlyReturn(0))
- }
+ RegionEngineWithStatus::Registering(engine) => engine,
RegionEngineWithStatus::Deregistering(_) => {
return error::RegionBusySnafu { region_id }.fail()
}
@@ -781,34 +779,32 @@ mod tests {
let mut mock_region_server = mock_region_server();
let (engine, _receiver) = MockRegionEngine::new(MITO_ENGINE_NAME);
let engine_name = engine.name();
-
mock_region_server.register_engine(engine.clone());
-
let region_id = RegionId::new(1, 1);
let builder = CreateRequestBuilder::new();
let create_req = builder.build();
-
// Tries to create/open a registering region.
mock_region_server.inner.region_map.insert(
region_id,
RegionEngineWithStatus::Registering(engine.clone()),
);
-
let response = mock_region_server
.handle_request(region_id, RegionRequest::Create(create_req))
.await
.unwrap();
assert_eq!(response.affected_rows, 0);
-
let status = mock_region_server
.inner
.region_map
.get(®ion_id)
.unwrap()
.clone();
+ assert!(matches!(status, RegionEngineWithStatus::Ready(_)));
- assert!(matches!(status, RegionEngineWithStatus::Registering(_)));
-
+ mock_region_server.inner.region_map.insert(
+ region_id,
+ RegionEngineWithStatus::Registering(engine.clone()),
+ );
let response = mock_region_server
.handle_request(
region_id,
@@ -822,14 +818,13 @@ mod tests {
.await
.unwrap();
assert_eq!(response.affected_rows, 0);
-
let status = mock_region_server
.inner
.region_map
.get(®ion_id)
.unwrap()
.clone();
- assert!(matches!(status, RegionEngineWithStatus::Registering(_)));
+ assert!(matches!(status, RegionEngineWithStatus::Ready(_)));
}
#[tokio::test]
@@ -1020,7 +1015,7 @@ mod tests {
region_change: RegionChange::Register(RegionAttribute::Mito),
assert: Box::new(|result| {
let current_engine = result.unwrap();
- assert_matches!(current_engine, CurrentEngine::EarlyReturn(_));
+ assert_matches!(current_engine, CurrentEngine::Engine(_));
}),
},
CurrentEngineTest {
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index e0223a5585ee..09bfe2535a12 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -104,6 +104,11 @@ impl MitoEngine {
self.inner.workers.is_region_exists(region_id)
}
+ /// Returns true if the specific region exists.
+ pub fn is_region_opening(&self, region_id: RegionId) -> bool {
+ self.inner.workers.is_region_opening(region_id)
+ }
+
/// Returns the region disk/memory usage information.
pub async fn get_region_usage(&self, region_id: RegionId) -> Result<RegionUsage> {
let region = self
diff --git a/src/mito2/src/engine/open_test.rs b/src/mito2/src/engine/open_test.rs
index 3cf4a21e561a..c30c29648042 100644
--- a/src/mito2/src/engine/open_test.rs
+++ b/src/mito2/src/engine/open_test.rs
@@ -24,8 +24,10 @@ use store_api::region_request::{
RegionCloseRequest, RegionOpenRequest, RegionPutRequest, RegionRequest,
};
use store_api::storage::{RegionId, ScanRequest};
+use tokio::sync::oneshot;
use crate::config::MitoConfig;
+use crate::error;
use crate::test_util::{
build_rows, flush_region, put_rows, reopen_region, rows_schema, CreateRequestBuilder, TestEnv,
};
@@ -319,3 +321,87 @@ async fn test_open_region_skip_wal_replay() {
+-------+---------+---------------------+";
assert_eq!(expected, batches.pretty_print().unwrap());
}
+
+#[tokio::test]
+async fn test_open_region_wait_for_opening_region_ok() {
+ let mut env = TestEnv::with_prefix("wait-for-opening-region-ok");
+ let engine = env.create_engine(MitoConfig::default()).await;
+ let region_id = RegionId::new(1, 1);
+ let worker = engine.inner.workers.worker(region_id);
+ let (tx, rx) = oneshot::channel();
+ let opening_regions = worker.opening_regions().clone();
+ opening_regions.insert_sender(region_id, tx.into());
+ assert!(engine.is_region_opening(region_id));
+
+ let handle_open = tokio::spawn(async move {
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Open(RegionOpenRequest {
+ engine: String::new(),
+ region_dir: "empty".to_string(),
+ options: HashMap::default(),
+ skip_wal_replay: false,
+ }),
+ )
+ .await
+ });
+
+ // Wait for conditions
+ while opening_regions.sender_len(region_id) != 2 {
+ tokio::time::sleep(Duration::from_millis(100)).await;
+ }
+
+ let senders = opening_regions.remove_sender(region_id);
+ for sender in senders {
+ sender.send(Ok(0));
+ }
+
+ assert_eq!(handle_open.await.unwrap().unwrap().affected_rows, 0);
+ assert_eq!(rx.await.unwrap().unwrap(), 0);
+}
+
+#[tokio::test]
+async fn test_open_region_wait_for_opening_region_err() {
+ let mut env = TestEnv::with_prefix("wait-for-opening-region-err");
+ let engine = env.create_engine(MitoConfig::default()).await;
+ let region_id = RegionId::new(1, 1);
+ let worker = engine.inner.workers.worker(region_id);
+ let (tx, rx) = oneshot::channel();
+ let opening_regions = worker.opening_regions().clone();
+ opening_regions.insert_sender(region_id, tx.into());
+ assert!(engine.is_region_opening(region_id));
+
+ let handle_open = tokio::spawn(async move {
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Open(RegionOpenRequest {
+ engine: String::new(),
+ region_dir: "empty".to_string(),
+ options: HashMap::default(),
+ skip_wal_replay: false,
+ }),
+ )
+ .await
+ });
+
+ // Wait for conditions
+ while opening_regions.sender_len(region_id) != 2 {
+ tokio::time::sleep(Duration::from_millis(100)).await;
+ }
+
+ let senders = opening_regions.remove_sender(region_id);
+ for sender in senders {
+ sender.send(Err(error::RegionNotFoundSnafu { region_id }.build()));
+ }
+
+ assert_eq!(
+ handle_open.await.unwrap().unwrap_err().status_code(),
+ StatusCode::RegionNotFound
+ );
+ assert_eq!(
+ rx.await.unwrap().unwrap_err().status_code(),
+ StatusCode::RegionNotFound
+ );
+}
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 281ea7130c6d..400284fdf124 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -722,6 +722,13 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Failed to open region"))]
+ OpenRegion {
+ #[snafu(implicit)]
+ location: Location,
+ source: Arc<Error>,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -783,6 +790,7 @@ impl ErrorExt for Error {
| Recv { .. }
| EncodeWal { .. }
| DecodeWal { .. } => StatusCode::Internal,
+ OpenRegion { source, .. } => source.status_code(),
WriteBuffer { source, .. } => source.status_code(),
WriteGroup { source, .. } => source.status_code(),
FieldTypeMismatch { source, .. } => source.status_code(),
diff --git a/src/mito2/src/region.rs b/src/mito2/src/region.rs
index 71006324d98d..c9930d2d04a7 100644
--- a/src/mito2/src/region.rs
+++ b/src/mito2/src/region.rs
@@ -18,6 +18,7 @@ pub(crate) mod opener;
pub mod options;
pub(crate) mod version;
+use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::{Arc, RwLock};
@@ -35,7 +36,7 @@ use crate::manifest::action::{RegionMetaAction, RegionMetaActionList};
use crate::manifest::manager::RegionManifestManager;
use crate::memtable::MemtableBuilderRef;
use crate::region::version::{VersionControlRef, VersionRef};
-use crate::request::OnFailure;
+use crate::request::{OnFailure, OptionOutputTx};
use crate::sst::file_purger::FilePurgerRef;
use crate::time_provider::TimeProviderRef;
@@ -471,6 +472,60 @@ impl RegionMap {
pub(crate) type RegionMapRef = Arc<RegionMap>;
+/// Opening regions
+#[derive(Debug, Default)]
+pub(crate) struct OpeningRegions {
+ regions: RwLock<HashMap<RegionId, Vec<OptionOutputTx>>>,
+}
+
+impl OpeningRegions {
+ /// Registers `sender` for an opening region; Otherwise, it returns `None`.
+ pub(crate) fn wait_for_opening_region(
+ &self,
+ region_id: RegionId,
+ sender: OptionOutputTx,
+ ) -> Option<OptionOutputTx> {
+ let mut regions = self.regions.write().unwrap();
+ match regions.entry(region_id) {
+ Entry::Occupied(mut senders) => {
+ senders.get_mut().push(sender);
+ None
+ }
+ Entry::Vacant(_) => Some(sender),
+ }
+ }
+
+ /// Returns true if the region exists.
+ pub(crate) fn is_region_exists(&self, region_id: RegionId) -> bool {
+ let regions = self.regions.read().unwrap();
+ regions.contains_key(®ion_id)
+ }
+
+ /// Inserts a new region into the map.
+ pub(crate) fn insert_sender(&self, region: RegionId, sender: OptionOutputTx) {
+ let mut regions = self.regions.write().unwrap();
+ regions.insert(region, vec![sender]);
+ }
+
+ /// Remove region by id.
+ pub(crate) fn remove_sender(&self, region_id: RegionId) -> Vec<OptionOutputTx> {
+ let mut regions = self.regions.write().unwrap();
+ regions.remove(®ion_id).unwrap_or_default()
+ }
+
+ #[cfg(test)]
+ pub(crate) fn sender_len(&self, region_id: RegionId) -> usize {
+ let regions = self.regions.read().unwrap();
+ if let Some(senders) = regions.get(®ion_id) {
+ senders.len()
+ } else {
+ 0
+ }
+ }
+}
+
+pub(crate) type OpeningRegionsRef = Arc<OpeningRegions>;
+
#[cfg(test)]
mod tests {
use crossbeam_utils::atomic::AtomicCell;
diff --git a/src/mito2/src/wal.rs b/src/mito2/src/wal.rs
index a36493f300fb..0b3b8282833c 100644
--- a/src/mito2/src/wal.rs
+++ b/src/mito2/src/wal.rs
@@ -49,7 +49,7 @@ pub type WalEntryStream<'a> = BoxStream<'a, Result<(EntryId, WalEntry)>>;
/// Write ahead log.
///
/// All regions in the engine shares the same WAL instance.
-#[derive(Debug, Clone)]
+#[derive(Debug)]
pub struct Wal<S> {
/// The underlying log store.
store: Arc<S>,
@@ -62,6 +62,14 @@ impl<S> Wal<S> {
}
}
+impl<S> Clone for Wal<S> {
+ fn clone(&self) -> Self {
+ Self {
+ store: self.store.clone(),
+ }
+ }
+}
+
impl<S: LogStore> Wal<S> {
/// Returns a writer to write to the WAL.
pub fn writer(&self) -> WalWriter<S> {
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index ad243afdd73c..2122a052c839 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -51,7 +51,7 @@ use crate::error::{JoinSnafu, Result, WorkerStoppedSnafu};
use crate::flush::{FlushScheduler, WriteBufferManagerImpl, WriteBufferManagerRef};
use crate::memtable::MemtableBuilderProvider;
use crate::metrics::WRITE_STALL_TOTAL;
-use crate::region::{MitoRegionRef, RegionMap, RegionMapRef};
+use crate::region::{MitoRegionRef, OpeningRegions, OpeningRegionsRef, RegionMap, RegionMapRef};
use crate::request::{
BackgroundNotify, DdlRequest, SenderDdlRequest, SenderWriteRequest, WorkerRequest,
};
@@ -212,6 +212,11 @@ impl WorkerGroup {
self.worker(region_id).is_region_exists(region_id)
}
+ /// Returns true if the specific region is opening.
+ pub(crate) fn is_region_opening(&self, region_id: RegionId) -> bool {
+ self.worker(region_id).is_region_opening(region_id)
+ }
+
/// Returns region of specific `region_id`.
///
/// This method should not be public.
@@ -225,7 +230,7 @@ impl WorkerGroup {
}
/// Get worker for specific `region_id`.
- fn worker(&self, region_id: RegionId) -> &RegionWorker {
+ pub(crate) fn worker(&self, region_id: RegionId) -> &RegionWorker {
let index = region_id_to_index(region_id, self.workers.len());
&self.workers[index]
@@ -364,6 +369,7 @@ impl<S: LogStore> WorkerStarter<S> {
/// Starts a region worker and its background thread.
fn start(self) -> RegionWorker {
let regions = Arc::new(RegionMap::default());
+ let opening_regions = Arc::new(OpeningRegions::default());
let (sender, receiver) = mpsc::channel(self.config.worker_channel_size);
let running = Arc::new(AtomicBool::new(true));
@@ -373,6 +379,7 @@ impl<S: LogStore> WorkerStarter<S> {
config: self.config.clone(),
regions: regions.clone(),
dropping_regions: Arc::new(RegionMap::default()),
+ opening_regions: opening_regions.clone(),
sender: sender.clone(),
receiver,
wal: Wal::new(self.log_store),
@@ -409,6 +416,7 @@ impl<S: LogStore> WorkerStarter<S> {
RegionWorker {
id: self.id,
regions,
+ opening_regions,
sender,
handle: Mutex::new(Some(handle)),
running,
@@ -422,6 +430,8 @@ pub(crate) struct RegionWorker {
id: WorkerId,
/// Regions bound to the worker.
regions: RegionMapRef,
+ /// The opening regions.
+ opening_regions: OpeningRegionsRef,
/// Request sender.
sender: Sender<WorkerRequest>,
/// Handle to the worker thread.
@@ -481,10 +491,21 @@ impl RegionWorker {
self.regions.is_region_exists(region_id)
}
+ /// Returns true if the region is opening.
+ fn is_region_opening(&self, region_id: RegionId) -> bool {
+ self.opening_regions.is_region_exists(region_id)
+ }
+
/// Returns region of specific `region_id`.
fn get_region(&self, region_id: RegionId) -> Option<MitoRegionRef> {
self.regions.get_region(region_id)
}
+
+ #[cfg(test)]
+ /// Returns the [OpeningRegionsRef].
+ pub(crate) fn opening_regions(&self) -> &OpeningRegionsRef {
+ &self.opening_regions
+ }
}
impl Drop for RegionWorker {
@@ -531,6 +552,8 @@ struct RegionWorkerLoop<S> {
regions: RegionMapRef,
/// Regions that are not yet fully dropped.
dropping_regions: RegionMapRef,
+ /// Regions that are opening.
+ opening_regions: OpeningRegionsRef,
/// Request sender.
sender: Sender<WorkerRequest>,
/// Request receiver.
@@ -698,7 +721,11 @@ impl<S: LogStore> RegionWorkerLoop<S> {
let res = match ddl.request {
DdlRequest::Create(req) => self.handle_create_request(ddl.region_id, req).await,
DdlRequest::Drop(_) => self.handle_drop_request(ddl.region_id).await,
- DdlRequest::Open(req) => self.handle_open_request(ddl.region_id, req).await,
+ DdlRequest::Open(req) => {
+ self.handle_open_request(ddl.region_id, req, ddl.sender)
+ .await;
+ continue;
+ }
DdlRequest::Close(_) => self.handle_close_request(ddl.region_id).await,
DdlRequest::Alter(req) => {
self.handle_alter_request(ddl.region_id, req, ddl.sender)
diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs
index bcd050220e61..6c63abf1535c 100644
--- a/src/mito2/src/worker/handle_open.rs
+++ b/src/mito2/src/worker/handle_open.rs
@@ -20,24 +20,24 @@ use common_telemetry::info;
use object_store::util::join_path;
use snafu::{OptionExt, ResultExt};
use store_api::logstore::LogStore;
-use store_api::region_request::{AffectedRows, RegionOpenRequest};
+use store_api::region_request::RegionOpenRequest;
use store_api::storage::RegionId;
-use crate::error::{ObjectStoreNotFoundSnafu, OpenDalSnafu, RegionNotFoundSnafu, Result};
+use crate::error::{
+ ObjectStoreNotFoundSnafu, OpenDalSnafu, OpenRegionSnafu, RegionNotFoundSnafu, Result,
+};
use crate::metrics::REGION_COUNT;
use crate::region::opener::RegionOpener;
+use crate::request::OptionOutputTx;
use crate::worker::handle_drop::remove_region_dir_once;
use crate::worker::{RegionWorkerLoop, DROPPING_MARKER_FILE};
impl<S: LogStore> RegionWorkerLoop<S> {
- pub(crate) async fn handle_open_request(
- &mut self,
+ async fn check_and_cleanup_region(
+ &self,
region_id: RegionId,
- request: RegionOpenRequest,
- ) -> Result<AffectedRows> {
- if self.regions.is_region_exists(region_id) {
- return Ok(0);
- }
+ request: &RegionOpenRequest,
+ ) -> Result<()> {
let object_store = if let Some(storage_name) = request.options.get("storage") {
self.object_store_manager
.find(storage_name)
@@ -59,10 +59,33 @@ impl<S: LogStore> RegionWorkerLoop<S> {
return RegionNotFoundSnafu { region_id }.fail();
}
+ Ok(())
+ }
+
+ pub(crate) async fn handle_open_request(
+ &mut self,
+ region_id: RegionId,
+ request: RegionOpenRequest,
+ sender: OptionOutputTx,
+ ) {
+ if self.regions.is_region_exists(region_id) {
+ sender.send(Ok(0));
+ return;
+ }
+ let Some(sender) = self
+ .opening_regions
+ .wait_for_opening_region(region_id, sender)
+ else {
+ return;
+ };
+ if let Err(err) = self.check_and_cleanup_region(region_id, &request).await {
+ sender.send(Err(err));
+ return;
+ }
info!("Try to open region {}", region_id);
// Open region from specific region dir.
- let region = RegionOpener::new(
+ let opener = match RegionOpener::new(
region_id,
&request.region_dir,
self.memtable_builder_provider.clone(),
@@ -71,18 +94,43 @@ impl<S: LogStore> RegionWorkerLoop<S> {
self.intermediate_manager.clone(),
)
.skip_wal_replay(request.skip_wal_replay)
- .parse_options(request.options)?
.cache(Some(self.cache_manager.clone()))
- .open(&self.config, &self.wal)
- .await?;
-
- info!("Region {} is opened", region_id);
+ .parse_options(request.options)
+ {
+ Ok(opener) => opener,
+ Err(err) => {
+ sender.send(Err(err));
+ return;
+ }
+ };
- REGION_COUNT.inc();
+ let regions = self.regions.clone();
+ let wal = self.wal.clone();
+ let config = self.config.clone();
+ let opening_regions = self.opening_regions.clone();
+ opening_regions.insert_sender(region_id, sender);
+ common_runtime::spawn_bg(async move {
+ match opener.open(&config, &wal).await {
+ Ok(region) => {
+ info!("Region {} is opened", region_id);
+ REGION_COUNT.inc();
- // Insert the MitoRegion into the RegionMap.
- self.regions.insert_region(Arc::new(region));
+ // Insert the Region into the RegionMap.
+ regions.insert_region(Arc::new(region));
- Ok(0)
+ let senders = opening_regions.remove_sender(region_id);
+ for sender in senders {
+ sender.send(Ok(0));
+ }
+ }
+ Err(err) => {
+ let senders = opening_regions.remove_sender(region_id);
+ let err = Arc::new(err);
+ for sender in senders {
+ sender.send(Err(err.clone()).context(OpenRegionSnafu));
+ }
+ }
+ }
+ });
}
}
|
feat
|
open region in background (#4052)
|
e2a1cb58401f7b98c1d4081bc06963e6741f8f74
|
2024-05-17 14:01:55
|
WU Jingdi
|
feat: support evaluate expr in range query param (#3823)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index ea8de8ee8bae..23bf8f1200ab 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2043,7 +2043,7 @@ dependencies = [
"datatypes",
"serde",
"snafu 0.8.2",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0)",
+ "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
"sqlparser_derive 0.1.1",
"statrs",
"tokio",
@@ -3947,7 +3947,7 @@ dependencies = [
"session",
"snafu 0.8.2",
"sql",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0)",
+ "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
"store-api",
"strfmt",
"table",
@@ -6670,7 +6670,7 @@ dependencies = [
"session",
"snafu 0.8.2",
"sql",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0)",
+ "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
"store-api",
"substrait 0.7.2",
"table",
@@ -6932,7 +6932,7 @@ dependencies = [
"serde_json",
"snafu 0.8.2",
"sql",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0)",
+ "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
"store-api",
"table",
]
@@ -9903,7 +9903,7 @@ dependencies = [
"lazy_static",
"regex",
"snafu 0.8.2",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0)",
+ "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
"sqlparser_derive 0.1.1",
"table",
]
@@ -9967,13 +9967,13 @@ dependencies = [
[[package]]
name = "sqlparser"
version = "0.44.0"
-source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0#c919990bf62ad38d2b0c0a3bc90b26ad919d51b0"
+source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d#e4e496b8d62416ad50ce70a1b460c7313610cf5d"
dependencies = [
"lazy_static",
"log",
"regex",
"sqlparser 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sqlparser_derive 0.2.2 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0)",
+ "sqlparser_derive 0.2.2 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
]
[[package]]
@@ -10001,7 +10001,7 @@ dependencies = [
[[package]]
name = "sqlparser_derive"
version = "0.2.2"
-source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0#c919990bf62ad38d2b0c0a3bc90b26ad919d51b0"
+source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d#e4e496b8d62416ad50ce70a1b460c7313610cf5d"
dependencies = [
"proc-macro2",
"quote",
@@ -10645,7 +10645,7 @@ dependencies = [
"serde_json",
"snafu 0.8.2",
"sql",
- "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0)",
+ "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=e4e496b8d62416ad50ce70a1b460c7313610cf5d)",
"sqlx",
"tinytemplate",
"tokio",
diff --git a/Cargo.toml b/Cargo.toml
index 1ece9e77fabc..0cf22de319cd 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -159,7 +159,7 @@ smallvec = { version = "1", features = ["serde"] }
snafu = "0.8"
sysinfo = "0.30"
# on branch v0.44.x
-sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "c919990bf62ad38d2b0c0a3bc90b26ad919d51b0", features = [
+sqlparser = { git = "https://github.com/GreptimeTeam/sqlparser-rs.git", rev = "e4e496b8d62416ad50ce70a1b460c7313610cf5d", features = [
"visitor",
] }
strum = { version = "0.25", features = ["derive"] }
diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs
index 4035f20a63e2..087fa2c9010a 100644
--- a/src/query/src/range_select/plan_rewrite.rs
+++ b/src/query/src/range_select/plan_rewrite.rs
@@ -19,6 +19,7 @@ use std::time::Duration;
use arrow_schema::DataType;
use async_recursion::async_recursion;
use catalog::table_source::DfTableSourceProvider;
+use chrono::Utc;
use common_time::interval::NANOS_PER_MILLI;
use common_time::timestamp::TimeUnit;
use common_time::{Interval, Timestamp, Timezone};
@@ -27,10 +28,13 @@ use datafusion::prelude::Column;
use datafusion::scalar::ScalarValue;
use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRecursion, TreeNodeRewriter};
use datafusion_common::{DFSchema, DataFusionError, Result as DFResult};
+use datafusion_expr::execution_props::ExecutionProps;
+use datafusion_expr::simplify::SimplifyContext;
use datafusion_expr::{
Aggregate, Analyze, Explain, Expr, ExprSchemable, Extension, LogicalPlan, LogicalPlanBuilder,
Projection,
};
+use datafusion_optimizer::simplify_expressions::ExprSimplifier;
use datatypes::prelude::ConcreteDataType;
use promql_parser::util::parse_duration;
use session::context::QueryContextRef;
@@ -108,34 +112,84 @@ fn parse_expr_to_string(args: &[Expr], i: usize) -> DFResult<String> {
/// Parse a duraion expr:
/// 1. duration string (e.g. `'1h'`)
/// 2. Interval expr (e.g. `INTERVAL '1 year 3 hours 20 minutes'`)
+/// 3. An interval expr can be evaluated at the logical plan stage (e.g. `INTERVAL '2' day - INTERVAL '1' day`)
fn parse_duration_expr(args: &[Expr], i: usize) -> DFResult<Duration> {
- let interval_to_duration = |interval: Interval| -> Duration {
- Duration::from_millis((interval.to_nanosecond() / NANOS_PER_MILLI as i128) as u64)
- };
match args.get(i) {
Some(Expr::Literal(ScalarValue::Utf8(Some(str)))) => {
parse_duration(str).map_err(DataFusionError::Plan)
}
- Some(Expr::Literal(ScalarValue::IntervalYearMonth(Some(i)))) => {
- Ok(interval_to_duration(Interval::from_i32(*i)))
+ Some(expr) => {
+ let ms = evaluate_expr_to_millisecond(args, i, true)?;
+ if ms <= 0 {
+ return Err(dispose_parse_error(Some(expr)));
+ }
+ Ok(Duration::from_millis(ms as u64))
+ }
+ None => Err(dispose_parse_error(None)),
+ }
+}
+
+/// Evaluate a time calculation expr, case like:
+/// 1. `INTERVAL '1' day + INTERVAL '1 year 2 hours 3 minutes'`
+/// 2. `now() - INTERVAL '1' day` (when `interval_only==false`)
+///
+/// Output a millisecond timestamp
+///
+/// if `interval_only==true`, only accept expr with all interval type (case 2 will return a error)
+fn evaluate_expr_to_millisecond(args: &[Expr], i: usize, interval_only: bool) -> DFResult<i64> {
+ let Some(expr) = args.get(i) else {
+ return Err(dispose_parse_error(None));
+ };
+ if interval_only && !interval_only_in_expr(expr) {
+ return Err(dispose_parse_error(Some(expr)));
+ }
+ let execution_props = ExecutionProps::new().with_query_execution_start_time(Utc::now());
+ let info = SimplifyContext::new(&execution_props).with_schema(Arc::new(DFSchema::empty()));
+ let interval_to_ms =
+ |interval: Interval| -> i64 { (interval.to_nanosecond() / NANOS_PER_MILLI as i128) as i64 };
+ let simplify_expr = ExprSimplifier::new(info).simplify(expr.clone())?;
+ match simplify_expr {
+ Expr::Literal(ScalarValue::TimestampNanosecond(ts_nanos, _))
+ | Expr::Literal(ScalarValue::DurationNanosecond(ts_nanos)) => {
+ ts_nanos.map(|v| v / 1_000_000)
+ }
+ Expr::Literal(ScalarValue::TimestampMicrosecond(ts_micros, _))
+ | Expr::Literal(ScalarValue::DurationMicrosecond(ts_micros)) => {
+ ts_micros.map(|v| v / 1_000)
}
- Some(Expr::Literal(ScalarValue::IntervalDayTime(Some(i)))) => {
- Ok(interval_to_duration(Interval::from_i64(*i)))
+ Expr::Literal(ScalarValue::TimestampMillisecond(ts_millis, _))
+ | Expr::Literal(ScalarValue::DurationMillisecond(ts_millis)) => ts_millis,
+ Expr::Literal(ScalarValue::TimestampSecond(ts_secs, _))
+ | Expr::Literal(ScalarValue::DurationSecond(ts_secs)) => ts_secs.map(|v| v * 1_000),
+ Expr::Literal(ScalarValue::IntervalYearMonth(interval)) => {
+ interval.map(|v| interval_to_ms(Interval::from_i32(v)))
}
- Some(Expr::Literal(ScalarValue::IntervalMonthDayNano(Some(i)))) => {
- Ok(interval_to_duration(Interval::from_i128(*i)))
+ Expr::Literal(ScalarValue::IntervalDayTime(interval)) => {
+ interval.map(|v| interval_to_ms(Interval::from_i64(v)))
}
- other => Err(dispose_parse_error(other)),
+ Expr::Literal(ScalarValue::IntervalMonthDayNano(interval)) => {
+ interval.map(|v| interval_to_ms(Interval::from_i128(v)))
+ }
+ _ => None,
}
+ .ok_or_else(|| {
+ DataFusionError::Plan(format!(
+ "{} is not a expr can be evaluate and use in range query",
+ expr.display_name().unwrap_or_default()
+ ))
+ })
}
/// Parse the `align to` clause and return a UTC timestamp with unit of millisecond,
/// which is used as the basis for dividing time slot during the align operation.
/// 1. NOW: align to current execute time
/// 2. Timestamp string: align to specific timestamp
-/// 3. leave empty (as Default Option): align to unix epoch 0 (timezone aware)
+/// 3. An expr can be evaluated at the logical plan stage (e.g. `now() - INTERVAL '1' day`)
+/// 4. leave empty (as Default Option): align to unix epoch 0 (timezone aware)
fn parse_align_to(args: &[Expr], i: usize, timezone: Option<&Timezone>) -> DFResult<i64> {
- let s = parse_str_expr(args, i)?;
+ let Ok(s) = parse_str_expr(args, i) else {
+ return evaluate_expr_to_millisecond(args, i, false);
+ };
let upper = s.to_uppercase();
match upper.as_str() {
"NOW" => return Ok(Timestamp::current_millis().value()),
@@ -469,6 +523,25 @@ fn have_range_in_exprs(exprs: &[Expr]) -> bool {
})
}
+fn interval_only_in_expr(expr: &Expr) -> bool {
+ let mut all_interval = true;
+ let _ = expr.apply(&mut |expr| {
+ if !matches!(
+ expr,
+ Expr::Literal(ScalarValue::IntervalDayTime(_))
+ | Expr::Literal(ScalarValue::IntervalMonthDayNano(_))
+ | Expr::Literal(ScalarValue::IntervalYearMonth(_))
+ | Expr::BinaryExpr(_)
+ ) {
+ all_interval = false;
+ Ok(TreeNodeRecursion::Stop)
+ } else {
+ Ok(TreeNodeRecursion::Continue)
+ }
+ });
+ all_interval
+}
+
#[cfg(test)]
mod test {
@@ -477,6 +550,7 @@ mod test {
use catalog::memory::MemoryCatalogManager;
use catalog::RegisterTableRequest;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+ use datafusion_expr::{BinaryExpr, Operator};
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use session::context::QueryContext;
@@ -754,8 +828,42 @@ mod test {
parse_duration_expr(&args, 0).unwrap(),
parse_duration("1y4w").unwrap()
);
- // test err
+ // test index err
assert!(parse_duration_expr(&args, 10).is_err());
+ // test evaluate expr
+ let args = vec![Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ op: Operator::Plus,
+ right: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ })];
+ assert_eq!(
+ parse_duration_expr(&args, 0).unwrap().as_millis(),
+ interval_to_ms(Interval::from_year_month(20))
+ );
+ let args = vec![Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ op: Operator::Minus,
+ right: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ })];
+ // test zero interval error
+ assert!(parse_duration_expr(&args, 0).is_err());
+ // test must all be interval
+ let args = vec![Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ op: Operator::Minus,
+ right: Box::new(Expr::Literal(ScalarValue::Time64Microsecond(Some(0)))),
+ })];
+ assert!(parse_duration_expr(&args, 0).is_err());
}
#[test]
@@ -787,19 +895,56 @@ mod test {
let args = vec![Expr::Literal(ScalarValue::Utf8(Some(
"1970-01-01T00:00:00+08:00".into(),
)))];
- assert!(parse_align_to(&args, 0, None).unwrap() == -8 * 60 * 60 * 1000);
+ assert_eq!(parse_align_to(&args, 0, None).unwrap(), -8 * 60 * 60 * 1000);
// timezone
let args = vec![Expr::Literal(ScalarValue::Utf8(Some(
"1970-01-01T00:00:00".into(),
)))];
- assert!(
+ assert_eq!(
parse_align_to(
&args,
0,
Some(&Timezone::from_tz_string("Asia/Shanghai").unwrap())
)
- .unwrap()
- == -8 * 60 * 60 * 1000
+ .unwrap(),
+ -8 * 60 * 60 * 1000
);
+ // test evaluate expr
+ let args = vec![Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ op: Operator::Plus,
+ right: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ })];
+ assert_eq!(
+ parse_align_to(&args, 0, None).unwrap(),
+ // 20 month
+ 20 * 30 * 24 * 60 * 60 * 1000
+ );
+ }
+
+ #[test]
+ fn test_interval_only() {
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Literal(ScalarValue::DurationMillisecond(Some(20)))),
+ op: Operator::Minus,
+ right: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ });
+ assert!(!interval_only_in_expr(&expr));
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ op: Operator::Minus,
+ right: Box::new(Expr::Literal(ScalarValue::IntervalYearMonth(Some(
+ Interval::from_year_month(10).to_i32(),
+ )))),
+ });
+ assert!(interval_only_in_expr(&expr));
}
}
diff --git a/tests/cases/standalone/common/range/error.result b/tests/cases/standalone/common/range/error.result
index 01beca591f2a..ecdaac7d782e 100644
--- a/tests/cases/standalone/common/range/error.result
+++ b/tests/cases/standalone/common/range/error.result
@@ -98,11 +98,11 @@ Error: 3000(PlanQuery), DataFusion error: Error during planning: duration must b
SELECT min(val) RANGE '5s' FROM host ALIGN (INTERVAL '0' day);
-Error: 2000(InvalidSyntax), Range Query: Can't use 0 as align in Range Query
+Error: 3000(PlanQuery), DataFusion error: Error during planning: Illegal argument `IntervalMonthDayNano("0")` in range select query
SELECT min(val) RANGE (INTERVAL '0' day) FROM host ALIGN '5s';
-Error: 2000(InvalidSyntax), Range Query: Invalid Range expr `MIN(host.val) RANGE IntervalMonthDayNano("0")`, Can't use 0 as range in Range Query
+Error: 3000(PlanQuery), DataFusion error: Error during planning: Illegal argument `IntervalMonthDayNano("0")` in range select query
DROP TABLE host;
diff --git a/tests/cases/standalone/common/range/to.result b/tests/cases/standalone/common/range/to.result
index a2bfb3de4cd8..e7e0445316b3 100644
--- a/tests/cases/standalone/common/range/to.result
+++ b/tests/cases/standalone/common/range/to.result
@@ -82,6 +82,30 @@ SELECT ts, min(val) RANGE (INTERVAL '1' day) FROM host ALIGN (INTERVAL '1' day)
| 2024-01-24T23:00:00 | 3 |
+---------------------+------------------------------------------------------------------+
+SELECT ts, min(val) RANGE (INTERVAL '2' day - INTERVAL '1' day) FROM host ALIGN (INTERVAL '2' day - INTERVAL '1' day) TO (now() - (now() + INTERVAL '1' hour)) by (1) ORDER BY ts;
+
++---------------------+-----------------------------------------------------------------------------------------------------------------+
+| ts | MIN(host.val) RANGE IntervalMonthDayNano("36893488147419103232") - IntervalMonthDayNano("18446744073709551616") |
++---------------------+-----------------------------------------------------------------------------------------------------------------+
+| 2024-01-22T23:00:00 | 0 |
+| 2024-01-23T23:00:00 | 1 |
+| 2024-01-24T23:00:00 | 3 |
++---------------------+-----------------------------------------------------------------------------------------------------------------+
+
+-- non-positive duration
+SELECT ts, min(val) RANGE (INTERVAL '1' day - INTERVAL '2' day) FROM host ALIGN (INTERVAL '1' day) TO '1900-01-01T00:00:00+01:00' by (1) ORDER BY ts;
+
+Error: 3000(PlanQuery), DataFusion error: Error during planning: Illegal argument `IntervalMonthDayNano("18446744073709551616") - IntervalMonthDayNano("36893488147419103232")` in range select query
+
+SELECT ts, min(val) RANGE (INTERVAL '1' day - INTERVAL '1' day) FROM host ALIGN (INTERVAL '1' day) TO '1900-01-01T00:00:00+01:00' by (1) ORDER BY ts;
+
+Error: 3000(PlanQuery), DataFusion error: Error during planning: Illegal argument `IntervalMonthDayNano("18446744073709551616") - IntervalMonthDayNano("18446744073709551616")` in range select query
+
+-- duration not all interval
+SELECT ts, min(val) RANGE (now() - INTERVAL '1' day) FROM host ALIGN (INTERVAL '1' day) TO '1900-01-01T00:00:00+01:00' by (1) ORDER BY ts;
+
+Error: 3000(PlanQuery), DataFusion error: Error during planning: Illegal argument `now() - IntervalMonthDayNano("18446744073709551616")` in range select query
+
--- ALIGN TO with time zone ---
set time_zone='Asia/Shanghai';
diff --git a/tests/cases/standalone/common/range/to.sql b/tests/cases/standalone/common/range/to.sql
index 29610ca16558..70b6849c0d03 100644
--- a/tests/cases/standalone/common/range/to.sql
+++ b/tests/cases/standalone/common/range/to.sql
@@ -26,6 +26,18 @@ SELECT ts, host, min(val) RANGE '1d' FROM host ALIGN '1d' TO '2023-01-01T00:00:0
SELECT ts, min(val) RANGE (INTERVAL '1' day) FROM host ALIGN (INTERVAL '1' day) TO '1900-01-01T00:00:00+01:00' by (1) ORDER BY ts;
+SELECT ts, min(val) RANGE (INTERVAL '2' day - INTERVAL '1' day) FROM host ALIGN (INTERVAL '2' day - INTERVAL '1' day) TO (now() - (now() + INTERVAL '1' hour)) by (1) ORDER BY ts;
+
+-- non-positive duration
+
+SELECT ts, min(val) RANGE (INTERVAL '1' day - INTERVAL '2' day) FROM host ALIGN (INTERVAL '1' day) TO '1900-01-01T00:00:00+01:00' by (1) ORDER BY ts;
+
+SELECT ts, min(val) RANGE (INTERVAL '1' day - INTERVAL '1' day) FROM host ALIGN (INTERVAL '1' day) TO '1900-01-01T00:00:00+01:00' by (1) ORDER BY ts;
+
+-- duration not all interval
+
+SELECT ts, min(val) RANGE (now() - INTERVAL '1' day) FROM host ALIGN (INTERVAL '1' day) TO '1900-01-01T00:00:00+01:00' by (1) ORDER BY ts;
+
--- ALIGN TO with time zone ---
set time_zone='Asia/Shanghai';
|
feat
|
support evaluate expr in range query param (#3823)
|
78b07996b1afb75c6f4448710fa73343fd9c65fc
|
2023-06-26 14:42:48
|
JeremyHi
|
feat: txn for meta (#1828)
| false
|
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 3ed8819d6e11..c62f2e65d746 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -360,6 +360,9 @@ pub enum Error {
source: common_meta::error::Error,
},
+ #[snafu(display("Etcd txn got an error: {err_msg}"))]
+ EtcdTxnOpResponse { err_msg: String, location: Location },
+
// this error is used for custom error mapping
// please do not delete it
#[snafu(display("Other error, source: {}", source))]
@@ -437,6 +440,7 @@ impl ErrorExt for Error {
| Error::InvalidTxnResult { .. }
| Error::InvalidUtf8Value { .. }
| Error::UnexpectedInstructionReply { .. }
+ | Error::EtcdTxnOpResponse { .. }
| Error::Unexpected { .. } => StatusCode::Unexpected,
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::InvalidCatalogValue { source, .. } => source.status_code(),
diff --git a/src/meta-srv/src/metrics.rs b/src/meta-srv/src/metrics.rs
index cac65989917a..13cdd98c029b 100644
--- a/src/meta-srv/src/metrics.rs
+++ b/src/meta-srv/src/metrics.rs
@@ -15,6 +15,7 @@
pub(crate) const METRIC_META_CREATE_CATALOG: &str = "meta.create_catalog";
pub(crate) const METRIC_META_CREATE_SCHEMA: &str = "meta.create_schema";
pub(crate) const METRIC_META_KV_REQUEST: &str = "meta.kv_request";
+pub(crate) const METRIC_META_TXN_REQUEST: &str = "meta.txn_request";
pub(crate) const METRIC_META_ROUTE_REQUEST: &str = "meta.route_request";
pub(crate) const METRIC_META_HEARTBEAT_CONNECTION_NUM: &str = "meta.heartbeat_connection_num";
pub(crate) const METRIC_META_HANDLER_EXECUTE: &str = "meta.handler_execute";
diff --git a/src/meta-srv/src/sequence.rs b/src/meta-srv/src/sequence.rs
index a4e763944f92..747e488fb8c6 100644
--- a/src/meta-srv/src/sequence.rs
+++ b/src/meta-srv/src/sequence.rs
@@ -165,6 +165,7 @@ mod tests {
use super::*;
use crate::service::store::kv::KvStore;
use crate::service::store::memory::MemStore;
+ use crate::service::store::txn::TxnService;
#[tokio::test]
async fn test_sequence() {
@@ -199,6 +200,8 @@ mod tests {
async fn test_sequence_force_quit() {
struct Noop;
+ impl TxnService for Noop {}
+
#[async_trait::async_trait]
impl KvStore for Noop {
async fn range(
diff --git a/src/meta-srv/src/service/store.rs b/src/meta-srv/src/service/store.rs
index 926bd2f663d7..47c012a01481 100644
--- a/src/meta-srv/src/service/store.rs
+++ b/src/meta-srv/src/service/store.rs
@@ -13,9 +13,11 @@
// limitations under the License.
pub mod etcd;
+pub(crate) mod etcd_util;
pub mod ext;
pub mod kv;
pub mod memory;
+pub mod txn;
use api::v1::meta::{
store_server, BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse,
diff --git a/src/meta-srv/src/service/store/etcd.rs b/src/meta-srv/src/service/store/etcd.rs
index c97543137c52..820ec813e26c 100644
--- a/src/meta-srv/src/service/store/etcd.rs
+++ b/src/meta-srv/src/service/store/etcd.rs
@@ -29,8 +29,10 @@ use etcd_client::{
use crate::error;
use crate::error::Result;
-use crate::metrics::METRIC_META_KV_REQUEST;
+use crate::metrics::{METRIC_META_KV_REQUEST, METRIC_META_TXN_REQUEST};
+use crate::service::store::etcd_util::KvPair;
use crate::service::store::kv::{KvStore, KvStoreRef};
+use crate::service::store::txn::TxnService;
// Maximum number of operations permitted in a transaction.
// The etcd default configuration's `--max-txn-ops` is 128.
@@ -463,6 +465,28 @@ impl KvStore for EtcdStore {
}
}
+#[async_trait::async_trait]
+impl TxnService for EtcdStore {
+ async fn txn(
+ &self,
+ txn: crate::service::store::txn::Txn,
+ ) -> Result<crate::service::store::txn::TxnResponse> {
+ let _timer = timer!(
+ METRIC_META_TXN_REQUEST,
+ &[("target", "etcd".to_string()), ("op", "txn".to_string()),]
+ );
+
+ let etcd_txn: Txn = txn.into();
+ let txn_res = self
+ .client
+ .kv_client()
+ .txn(etcd_txn)
+ .await
+ .context(error::EtcdFailedSnafu)?;
+ txn_res.try_into()
+ }
+}
+
struct Get {
cluster_id: u64,
key: Vec<u8>,
@@ -704,30 +728,6 @@ impl TryFrom<MoveValueRequest> for MoveValue {
}
}
-struct KvPair<'a>(&'a etcd_client::KeyValue);
-
-impl<'a> KvPair<'a> {
- /// Creates a `KvPair` from etcd KeyValue
- #[inline]
- fn new(kv: &'a etcd_client::KeyValue) -> Self {
- Self(kv)
- }
-
- #[inline]
- fn from_etcd_kv(kv: &etcd_client::KeyValue) -> KeyValue {
- KeyValue::from(KvPair::new(kv))
- }
-}
-
-impl<'a> From<KvPair<'a>> for KeyValue {
- fn from(kv: KvPair<'a>) -> Self {
- Self {
- key: kv.0.key().to_vec(),
- value: kv.0.value().to_vec(),
- }
- }
-}
-
#[cfg(test)]
mod tests {
use super::*;
diff --git a/src/meta-srv/src/service/store/etcd_util.rs b/src/meta-srv/src/service/store/etcd_util.rs
new file mode 100644
index 000000000000..e97e8edba361
--- /dev/null
+++ b/src/meta-srv/src/service/store/etcd_util.rs
@@ -0,0 +1,39 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::KeyValue;
+
+pub struct KvPair<'a>(&'a etcd_client::KeyValue);
+
+impl<'a> KvPair<'a> {
+ /// Creates a `KvPair` from etcd KeyValue
+ #[inline]
+ pub fn new(kv: &'a etcd_client::KeyValue) -> Self {
+ Self(kv)
+ }
+
+ #[inline]
+ pub fn from_etcd_kv(kv: &etcd_client::KeyValue) -> KeyValue {
+ KeyValue::from(KvPair::new(kv))
+ }
+}
+
+impl<'a> From<KvPair<'a>> for KeyValue {
+ fn from(kv: KvPair<'a>) -> Self {
+ Self {
+ key: kv.0.key().to_vec(),
+ value: kv.0.value().to_vec(),
+ }
+ }
+}
diff --git a/src/meta-srv/src/service/store/kv.rs b/src/meta-srv/src/service/store/kv.rs
index efbb6a4f00d3..5c03e9362e97 100644
--- a/src/meta-srv/src/service/store/kv.rs
+++ b/src/meta-srv/src/service/store/kv.rs
@@ -22,12 +22,13 @@ use api::v1::meta::{
};
use crate::error::Result;
+use crate::service::store::txn::TxnService;
pub type KvStoreRef = Arc<dyn KvStore>;
pub type ResettableKvStoreRef = Arc<dyn ResettableKvStore>;
#[async_trait::async_trait]
-pub trait KvStore: Send + Sync {
+pub trait KvStore: TxnService {
async fn range(&self, req: RangeRequest) -> Result<RangeResponse>;
async fn put(&self, req: PutRequest) -> Result<PutResponse>;
diff --git a/src/meta-srv/src/service/store/memory.rs b/src/meta-srv/src/service/store/memory.rs
index a0d2ed0920b0..b4cc67ad8819 100644
--- a/src/meta-srv/src/service/store/memory.rs
+++ b/src/meta-srv/src/service/store/memory.rs
@@ -27,8 +27,9 @@ use parking_lot::RwLock;
use super::ext::KvStoreExt;
use crate::error::Result;
-use crate::metrics::METRIC_META_KV_REQUEST;
+use crate::metrics::{METRIC_META_KV_REQUEST, METRIC_META_TXN_REQUEST};
use crate::service::store::kv::{KvStore, ResettableKvStore};
+use crate::service::store::txn::{Txn, TxnOp, TxnOpResponse, TxnRequest, TxnResponse, TxnService};
pub struct MemStore {
inner: RwLock<BTreeMap<Vec<u8>, Vec<u8>>>,
@@ -119,6 +120,7 @@ impl KvStore for MemStore {
} = req;
let mut memory = self.inner.write();
+
let prev_value = memory.insert(key.clone(), value);
let prev_kv = if prev_kv {
prev_value.map(|value| KeyValue { key, value })
@@ -164,6 +166,7 @@ impl KvStore for MemStore {
} = req;
let mut memory = self.inner.write();
+
let prev_kvs = if prev_kv {
kvs.into_iter()
.map(|kv| (kv.key.clone(), memory.insert(kv.key, kv.value)))
@@ -198,6 +201,7 @@ impl KvStore for MemStore {
} = req;
let mut memory = self.inner.write();
+
let prev_kvs = if prev_kv {
keys.into_iter()
.filter_map(|key| memory.remove(&key).map(|value| KeyValue { key, value }))
@@ -330,6 +334,72 @@ impl KvStore for MemStore {
}
}
+#[async_trait::async_trait]
+impl TxnService for MemStore {
+ async fn txn(&self, txn: Txn) -> Result<TxnResponse> {
+ let _timer = timer!(
+ METRIC_META_TXN_REQUEST,
+ &[("target", "memory".to_string()), ("op", "txn".to_string()),]
+ );
+
+ let TxnRequest {
+ compare,
+ success,
+ failure,
+ } = txn.into();
+
+ let mut memory = self.inner.write();
+
+ let succeeded = compare
+ .iter()
+ .all(|x| x.compare_with_value(memory.get(&x.key)));
+
+ let do_txn = |txn_op| match txn_op {
+ TxnOp::Put(key, value) => {
+ let prev_value = memory.insert(key.clone(), value);
+ let prev_kv = prev_value.map(|value| KeyValue { key, value });
+ let put_res = PutResponse {
+ prev_kv,
+ ..Default::default()
+ };
+ TxnOpResponse::ResponsePut(put_res)
+ }
+ TxnOp::Get(key) => {
+ let value = memory.get(&key);
+ let kv = value.map(|value| KeyValue {
+ key,
+ value: value.clone(),
+ });
+ let get_res = RangeResponse {
+ kvs: kv.map(|kv| vec![kv]).unwrap_or(vec![]),
+ ..Default::default()
+ };
+ TxnOpResponse::ResponseGet(get_res)
+ }
+ TxnOp::Delete(key) => {
+ let prev_value = memory.remove(&key);
+ let prev_kv = prev_value.map(|value| KeyValue { key, value });
+ let delete_res = DeleteRangeResponse {
+ prev_kvs: prev_kv.map(|kv| vec![kv]).unwrap_or(vec![]),
+ ..Default::default()
+ };
+ TxnOpResponse::ResponseDelete(delete_res)
+ }
+ };
+
+ let responses: Vec<_> = if succeeded {
+ success.into_iter().map(do_txn).collect()
+ } else {
+ failure.into_iter().map(do_txn).collect()
+ };
+
+ Ok(TxnResponse {
+ succeeded,
+ responses,
+ })
+ }
+}
+
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicU8, Ordering};
diff --git a/src/meta-srv/src/service/store/txn.rs b/src/meta-srv/src/service/store/txn.rs
new file mode 100644
index 000000000000..aaaf7b953acc
--- /dev/null
+++ b/src/meta-srv/src/service/store/txn.rs
@@ -0,0 +1,469 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::{DeleteRangeResponse, PutResponse, RangeResponse};
+
+use crate::error::Result;
+
+mod etcd;
+
+#[async_trait::async_trait]
+pub trait TxnService: Sync + Send {
+ async fn txn(&self, _txn: Txn) -> Result<TxnResponse> {
+ unimplemented!("txn is not implemented")
+ }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum CompareOp {
+ Equal,
+ Greater,
+ Less,
+ NotEqual,
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub struct Compare {
+ pub key: Vec<u8>,
+ pub cmp: CompareOp,
+ /// None means the key does not exist.
+ pub target: Option<Vec<u8>>,
+}
+
+impl Compare {
+ pub fn new(key: Vec<u8>, cmp: CompareOp, target: Option<Vec<u8>>) -> Self {
+ Self { key, cmp, target }
+ }
+
+ pub fn with_value(key: Vec<u8>, cmp: CompareOp, target: Vec<u8>) -> Self {
+ Self::new(key, cmp, Some(target))
+ }
+
+ pub fn with_not_exist_value(key: Vec<u8>, cmp: CompareOp) -> Self {
+ Self::new(key, cmp, None)
+ }
+
+ pub fn compare_with_value(&self, value: Option<&Vec<u8>>) -> bool {
+ match (value, &self.target) {
+ (Some(value), Some(target)) => match self.cmp {
+ CompareOp::Equal => *value == *target,
+ CompareOp::Greater => *value > *target,
+ CompareOp::Less => *value < *target,
+ CompareOp::NotEqual => *value != *target,
+ },
+ (Some(_), None) => match self.cmp {
+ CompareOp::Equal => false,
+ CompareOp::Greater => true,
+ CompareOp::Less => false,
+ CompareOp::NotEqual => true,
+ },
+ (None, Some(_)) => match self.cmp {
+ CompareOp::Equal => false,
+ CompareOp::Greater => false,
+ CompareOp::Less => true,
+ CompareOp::NotEqual => true,
+ },
+ (None, None) => match self.cmp {
+ CompareOp::Equal => true,
+ CompareOp::Greater => false,
+ CompareOp::Less => false,
+ CompareOp::NotEqual => false,
+ },
+ }
+ }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum TxnOp {
+ Put(Vec<u8>, Vec<u8>),
+ Get(Vec<u8>),
+ Delete(Vec<u8>),
+}
+
+#[derive(Debug, Clone, Default, PartialEq)]
+pub struct TxnRequest {
+ pub compare: Vec<Compare>,
+ pub success: Vec<TxnOp>,
+ pub failure: Vec<TxnOp>,
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum TxnOpResponse {
+ ResponsePut(PutResponse),
+ ResponseGet(RangeResponse),
+ ResponseDelete(DeleteRangeResponse),
+}
+
+pub struct TxnResponse {
+ pub succeeded: bool,
+ pub responses: Vec<TxnOpResponse>,
+}
+
+#[derive(Debug, Clone, Default, PartialEq)]
+pub struct Txn {
+ req: TxnRequest,
+ c_when: bool,
+ c_then: bool,
+ c_else: bool,
+}
+
+impl Txn {
+ pub fn new() -> Self {
+ Txn::default()
+ }
+
+ /// Takes a list of comparison. If all comparisons passed in succeed,
+ /// the operations passed into `and_then()` will be executed. Or the operations
+ /// passed into `or_else()` will be executed.
+ #[inline]
+ pub fn when(mut self, compares: impl Into<Vec<Compare>>) -> Self {
+ assert!(!self.c_when, "cannot call `when` twice");
+ assert!(!self.c_then, "cannot call `when` after `and_then`");
+ assert!(!self.c_else, "cannot call `when` after `or_else`");
+
+ self.c_when = true;
+ self.req.compare = compares.into();
+ self
+ }
+
+ /// Takes a list of operations. The operations list will be executed, if the
+ /// comparisons passed into `when()` succeed.
+ #[inline]
+ pub fn and_then(mut self, operations: impl Into<Vec<TxnOp>>) -> Self {
+ assert!(!self.c_then, "cannot call `and_then` twice");
+ assert!(!self.c_else, "cannot call `and_then` after `or_else`");
+
+ self.c_then = true;
+ self.req.success = operations.into();
+ self
+ }
+
+ /// Takes a list of operations. The operations list will be executed, if the
+ /// comparisons passed into `when()` fail.
+ #[inline]
+ pub fn or_else(mut self, operations: impl Into<Vec<TxnOp>>) -> Self {
+ assert!(!self.c_else, "cannot call `or_else` twice");
+
+ self.c_else = true;
+ self.req.failure = operations.into();
+ self
+ }
+}
+
+impl From<Txn> for TxnRequest {
+ fn from(txn: Txn) -> Self {
+ txn.req
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use api::v1::meta::{KeyValue, PutRequest};
+
+ use super::*;
+ use crate::service::store::ext::KvStoreExt;
+ use crate::service::store::kv::KvStoreRef;
+
+ #[test]
+ fn test_compare() {
+ // Equal
+ let compare = Compare::with_value(vec![1], CompareOp::Equal, vec![1]);
+ assert!(compare.compare_with_value(Some(&vec![1])));
+ assert!(!compare.compare_with_value(None));
+ let compare = Compare::with_not_exist_value(vec![1], CompareOp::Equal);
+ assert!(compare.compare_with_value(None));
+
+ // Greater
+ let compare = Compare::with_value(vec![1], CompareOp::Greater, vec![1]);
+ assert!(compare.compare_with_value(Some(&vec![2])));
+ assert!(!compare.compare_with_value(None));
+ let compare = Compare::with_not_exist_value(vec![1], CompareOp::Greater);
+ assert!(!compare.compare_with_value(None));
+ assert!(compare.compare_with_value(Some(&vec![1])));
+
+ // Less
+ let compare = Compare::with_value(vec![1], CompareOp::Less, vec![1]);
+ assert!(compare.compare_with_value(Some(&vec![0])));
+ assert!(compare.compare_with_value(None));
+ let compare = Compare::with_not_exist_value(vec![1], CompareOp::Less);
+ assert!(!compare.compare_with_value(None));
+ assert!(!compare.compare_with_value(Some(&vec![1])));
+
+ // NotEqual
+ let compare = Compare::with_value(vec![1], CompareOp::NotEqual, vec![1]);
+ assert!(!compare.compare_with_value(Some(&vec![1])));
+ assert!(compare.compare_with_value(Some(&vec![2])));
+ assert!(compare.compare_with_value(None));
+ let compare = Compare::with_not_exist_value(vec![1], CompareOp::NotEqual);
+ assert!(!compare.compare_with_value(None));
+ assert!(compare.compare_with_value(Some(&vec![1])));
+ }
+
+ #[test]
+ fn test_txn() {
+ let txn = Txn::new()
+ .when(vec![Compare::with_value(
+ vec![1],
+ CompareOp::Equal,
+ vec![1],
+ )])
+ .and_then(vec![TxnOp::Put(vec![1], vec![1])])
+ .or_else(vec![TxnOp::Put(vec![1], vec![2])]);
+
+ assert_eq!(
+ txn,
+ Txn {
+ req: TxnRequest {
+ compare: vec![Compare::with_value(vec![1], CompareOp::Equal, vec![1])],
+ success: vec![TxnOp::Put(vec![1], vec![1])],
+ failure: vec![TxnOp::Put(vec![1], vec![2])],
+ },
+ c_when: true,
+ c_then: true,
+ c_else: true,
+ }
+ );
+ }
+
+ #[tokio::test]
+ async fn test_txn_one_compare_op() {
+ let kv_store = create_kv_store().await;
+
+ let _ = kv_store
+ .put(PutRequest {
+ key: vec![11],
+ value: vec![3],
+ ..Default::default()
+ })
+ .await
+ .unwrap();
+
+ let txn = Txn::new()
+ .when(vec![Compare::with_value(
+ vec![11],
+ CompareOp::Greater,
+ vec![1],
+ )])
+ .and_then(vec![TxnOp::Put(vec![11], vec![1])])
+ .or_else(vec![TxnOp::Put(vec![11], vec![2])]);
+
+ let txn_response = kv_store.txn(txn).await.unwrap();
+
+ assert!(txn_response.succeeded);
+ assert_eq!(txn_response.responses.len(), 1);
+ }
+
+ #[tokio::test]
+ async fn test_txn_multi_compare_op() {
+ let kv_store = create_kv_store().await;
+
+ for i in 1..3 {
+ let _ = kv_store
+ .put(PutRequest {
+ key: vec![i],
+ value: vec![i],
+ ..Default::default()
+ })
+ .await
+ .unwrap();
+ }
+
+ let when: Vec<_> = (1..3u8)
+ .map(|i| Compare::with_value(vec![i], CompareOp::Equal, vec![i]))
+ .collect();
+
+ let txn = Txn::new()
+ .when(when)
+ .and_then(vec![
+ TxnOp::Put(vec![1], vec![10]),
+ TxnOp::Put(vec![2], vec![20]),
+ ])
+ .or_else(vec![TxnOp::Put(vec![1], vec![11])]);
+
+ let txn_response = kv_store.txn(txn).await.unwrap();
+
+ assert!(txn_response.succeeded);
+ assert_eq!(txn_response.responses.len(), 2);
+ }
+
+ #[tokio::test]
+ async fn test_txn_compare_equal() {
+ let kv_store = create_kv_store().await;
+ let key = vec![101u8];
+ kv_store.delete(key.clone(), false).await.unwrap();
+
+ let txn = Txn::new()
+ .when(vec![Compare::with_not_exist_value(
+ key.clone(),
+ CompareOp::Equal,
+ )])
+ .and_then(vec![TxnOp::Put(key.clone(), vec![1])])
+ .or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
+ let txn_response = kv_store.txn(txn.clone()).await.unwrap();
+ assert!(txn_response.succeeded);
+
+ let txn_response = kv_store.txn(txn).await.unwrap();
+ assert!(!txn_response.succeeded);
+
+ let txn = Txn::new()
+ .when(vec![Compare::with_value(
+ key.clone(),
+ CompareOp::Equal,
+ vec![2],
+ )])
+ .and_then(vec![TxnOp::Put(key.clone(), vec![3])])
+ .or_else(vec![TxnOp::Put(key, vec![4])]);
+ let txn_response = kv_store.txn(txn).await.unwrap();
+ assert!(txn_response.succeeded);
+ }
+
+ #[tokio::test]
+ async fn test_txn_compare_greater() {
+ let kv_store = create_kv_store().await;
+ let key = vec![102u8];
+ kv_store.delete(key.clone(), false).await.unwrap();
+
+ let txn = Txn::new()
+ .when(vec![Compare::with_not_exist_value(
+ key.clone(),
+ CompareOp::Greater,
+ )])
+ .and_then(vec![TxnOp::Put(key.clone(), vec![1])])
+ .or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
+ let txn_response = kv_store.txn(txn.clone()).await.unwrap();
+ assert!(!txn_response.succeeded);
+
+ let txn_response = kv_store.txn(txn).await.unwrap();
+ assert!(txn_response.succeeded);
+
+ let txn = Txn::new()
+ .when(vec![Compare::with_value(
+ key.clone(),
+ CompareOp::Greater,
+ vec![1],
+ )])
+ .and_then(vec![TxnOp::Put(key.clone(), vec![3])])
+ .or_else(vec![TxnOp::Get(key.clone())]);
+ let mut txn_response = kv_store.txn(txn).await.unwrap();
+ assert!(!txn_response.succeeded);
+ let res = txn_response.responses.pop().unwrap();
+ assert_eq!(
+ res,
+ TxnOpResponse::ResponseGet(RangeResponse {
+ header: None,
+ kvs: vec![KeyValue {
+ key,
+ value: vec![1],
+ }],
+ more: false,
+ })
+ );
+ }
+
+ #[tokio::test]
+ async fn test_txn_compare_less() {
+ let kv_store = create_kv_store().await;
+ let key = vec![103u8];
+ kv_store.delete(vec![3], false).await.unwrap();
+
+ let txn = Txn::new()
+ .when(vec![Compare::with_not_exist_value(
+ key.clone(),
+ CompareOp::Less,
+ )])
+ .and_then(vec![TxnOp::Put(key.clone(), vec![1])])
+ .or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
+ let txn_response = kv_store.txn(txn.clone()).await.unwrap();
+ assert!(!txn_response.succeeded);
+
+ let txn_response = kv_store.txn(txn).await.unwrap();
+ assert!(!txn_response.succeeded);
+
+ let txn = Txn::new()
+ .when(vec![Compare::with_value(
+ key.clone(),
+ CompareOp::Less,
+ vec![2],
+ )])
+ .and_then(vec![TxnOp::Put(key.clone(), vec![3])])
+ .or_else(vec![TxnOp::Get(key.clone())]);
+ let mut txn_response = kv_store.txn(txn).await.unwrap();
+ assert!(!txn_response.succeeded);
+ let res = txn_response.responses.pop().unwrap();
+ assert_eq!(
+ res,
+ TxnOpResponse::ResponseGet(RangeResponse {
+ header: None,
+ kvs: vec![KeyValue {
+ key,
+ value: vec![2],
+ }],
+ more: false,
+ })
+ );
+ }
+
+ #[tokio::test]
+ async fn test_txn_compare_not_equal() {
+ let kv_store = create_kv_store().await;
+ let key = vec![104u8];
+ kv_store.delete(key.clone(), false).await.unwrap();
+
+ let txn = Txn::new()
+ .when(vec![Compare::with_not_exist_value(
+ key.clone(),
+ CompareOp::NotEqual,
+ )])
+ .and_then(vec![TxnOp::Put(key.clone(), vec![1])])
+ .or_else(vec![TxnOp::Put(key.clone(), vec![2])]);
+ let txn_response = kv_store.txn(txn.clone()).await.unwrap();
+ assert!(!txn_response.succeeded);
+
+ let txn_response = kv_store.txn(txn).await.unwrap();
+ assert!(txn_response.succeeded);
+
+ let txn = Txn::new()
+ .when(vec![Compare::with_value(
+ key.clone(),
+ CompareOp::Equal,
+ vec![2],
+ )])
+ .and_then(vec![TxnOp::Put(key.clone(), vec![3])])
+ .or_else(vec![TxnOp::Get(key.clone())]);
+ let mut txn_response = kv_store.txn(txn).await.unwrap();
+ assert!(!txn_response.succeeded);
+ let res = txn_response.responses.pop().unwrap();
+ assert_eq!(
+ res,
+ TxnOpResponse::ResponseGet(RangeResponse {
+ header: None,
+ kvs: vec![KeyValue {
+ key,
+ value: vec![1],
+ }],
+ more: false,
+ })
+ );
+ }
+
+ async fn create_kv_store() -> KvStoreRef {
+ std::sync::Arc::new(crate::service::store::memory::MemStore::new())
+ // TODO(jiachun): Add a feature to test against etcd in github CI
+ //
+ // The same test can be run against etcd by uncommenting the following line
+ // crate::service::store::etcd::EtcdStore::with_endpoints(["127.0.0.1:2379"])
+ // .await
+ // .unwrap()
+ }
+}
diff --git a/src/meta-srv/src/service/store/txn/etcd.rs b/src/meta-srv/src/service/store/txn/etcd.rs
new file mode 100644
index 000000000000..e2fc3c0315ef
--- /dev/null
+++ b/src/meta-srv/src/service/store/txn/etcd.rs
@@ -0,0 +1,142 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::{DeleteRangeResponse, PutResponse, RangeResponse};
+use etcd_client::{
+ Compare as EtcdCompare, CompareOp as EtcdCompareOp, Txn as EtcdTxn, TxnOp as EtcdTxnOp,
+ TxnOpResponse as EtcdTxnOpResponse, TxnResponse as EtcdTxnResponse,
+};
+
+use crate::error::{self, Result};
+use crate::service::store::etcd_util::KvPair;
+use crate::service::store::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse, TxnResponse};
+
+impl From<Txn> for EtcdTxn {
+ fn from(txn: Txn) -> Self {
+ let mut etcd_txn = EtcdTxn::new();
+ if txn.c_when {
+ let compares = txn
+ .req
+ .compare
+ .into_iter()
+ .map(EtcdCompare::from)
+ .collect::<Vec<_>>();
+ etcd_txn = etcd_txn.when(compares);
+ }
+ if txn.c_then {
+ let success = txn
+ .req
+ .success
+ .into_iter()
+ .map(EtcdTxnOp::from)
+ .collect::<Vec<_>>();
+ etcd_txn = etcd_txn.and_then(success);
+ }
+ if txn.c_else {
+ let failure = txn
+ .req
+ .failure
+ .into_iter()
+ .map(EtcdTxnOp::from)
+ .collect::<Vec<_>>();
+ etcd_txn = etcd_txn.or_else(failure);
+ }
+ etcd_txn
+ }
+}
+
+impl From<Compare> for EtcdCompare {
+ fn from(cmp: Compare) -> Self {
+ let etcd_cmp = match cmp.cmp {
+ CompareOp::Equal => EtcdCompareOp::Equal,
+ CompareOp::Greater => EtcdCompareOp::Greater,
+ CompareOp::Less => EtcdCompareOp::Less,
+ CompareOp::NotEqual => EtcdCompareOp::NotEqual,
+ };
+ match cmp.target {
+ Some(target) => EtcdCompare::value(cmp.key, etcd_cmp, target),
+ // create revision 0 means key was not exist
+ None => EtcdCompare::create_revision(cmp.key, etcd_cmp, 0),
+ }
+ }
+}
+
+impl From<TxnOp> for EtcdTxnOp {
+ fn from(op: TxnOp) -> Self {
+ match op {
+ TxnOp::Put(key, value) => EtcdTxnOp::put(key, value, None),
+ TxnOp::Get(key) => EtcdTxnOp::get(key, None),
+ TxnOp::Delete(key) => EtcdTxnOp::delete(key, None),
+ }
+ }
+}
+
+impl TryFrom<EtcdTxnOpResponse> for TxnOpResponse {
+ type Error = error::Error;
+
+ fn try_from(op_resp: EtcdTxnOpResponse) -> Result<Self> {
+ match op_resp {
+ EtcdTxnOpResponse::Put(res) => {
+ let prev_kv = res.prev_key().map(KvPair::from_etcd_kv);
+ let put_res = PutResponse {
+ prev_kv,
+ ..Default::default()
+ };
+ Ok(TxnOpResponse::ResponsePut(put_res))
+ }
+ EtcdTxnOpResponse::Get(res) => {
+ let kvs = res.kvs().iter().map(KvPair::from_etcd_kv).collect();
+ let range_res = RangeResponse {
+ kvs,
+ ..Default::default()
+ };
+ Ok(TxnOpResponse::ResponseGet(range_res))
+ }
+ EtcdTxnOpResponse::Delete(res) => {
+ let prev_kvs = res
+ .prev_kvs()
+ .iter()
+ .map(KvPair::from_etcd_kv)
+ .collect::<Vec<_>>();
+ let delete_res = DeleteRangeResponse {
+ prev_kvs,
+ deleted: res.deleted(),
+ ..Default::default()
+ };
+ Ok(TxnOpResponse::ResponseDelete(delete_res))
+ }
+ EtcdTxnOpResponse::Txn(_) => error::EtcdTxnOpResponseSnafu {
+ err_msg: "nested txn is not supported",
+ }
+ .fail(),
+ }
+ }
+}
+
+impl TryFrom<EtcdTxnResponse> for TxnResponse {
+ type Error = error::Error;
+
+ fn try_from(resp: EtcdTxnResponse) -> Result<Self> {
+ let succeeded = resp.succeeded();
+ let responses = resp
+ .op_responses()
+ .into_iter()
+ .map(TxnOpResponse::try_from)
+ .collect::<Result<Vec<_>>>()?;
+ Ok(Self {
+ succeeded,
+ responses,
+ })
+ }
+}
|
feat
|
txn for meta (#1828)
|
afd88dd53afe1e3249596e07694b7fd182ad2cc7
|
2022-12-19 08:50:51
|
LFC
|
fix: `test_dist_table_scan` block (#761)
| false
|
diff --git a/src/common/recordbatch/src/adapter.rs b/src/common/recordbatch/src/adapter.rs
index 2b8436ec4e43..14b8fba0dd10 100644
--- a/src/common/recordbatch/src/adapter.rs
+++ b/src/common/recordbatch/src/adapter.rs
@@ -121,7 +121,8 @@ impl Stream for RecordBatchStreamAdapter {
enum AsyncRecordBatchStreamAdapterState {
Uninit(FutureStream),
- Inited(std::result::Result<DfSendableRecordBatchStream, DataFusionError>),
+ Ready(DfSendableRecordBatchStream),
+ Failed,
}
pub struct AsyncRecordBatchStreamAdapter {
@@ -151,28 +152,26 @@ impl Stream for AsyncRecordBatchStreamAdapter {
loop {
match &mut self.state {
AsyncRecordBatchStreamAdapterState::Uninit(stream_future) => {
- self.state = AsyncRecordBatchStreamAdapterState::Inited(ready!(Pin::new(
- stream_future
- )
- .poll(cx)));
- continue;
+ match ready!(Pin::new(stream_future).poll(cx)) {
+ Ok(stream) => {
+ self.state = AsyncRecordBatchStreamAdapterState::Ready(stream);
+ continue;
+ }
+ Err(e) => {
+ self.state = AsyncRecordBatchStreamAdapterState::Failed;
+ return Poll::Ready(Some(
+ Err(e).context(error::InitRecordbatchStreamSnafu),
+ ));
+ }
+ };
}
- AsyncRecordBatchStreamAdapterState::Inited(stream) => match stream {
- Ok(stream) => {
- return Poll::Ready(ready!(Pin::new(stream).poll_next(cx)).map(|df| {
- let df_record_batch = df.context(error::PollStreamSnafu)?;
- RecordBatch::try_from_df_record_batch(self.schema(), df_record_batch)
- }));
- }
- Err(e) => {
- return Poll::Ready(Some(
- error::CreateRecordBatchesSnafu {
- reason: format!("Read error {:?} from stream", e),
- }
- .fail(),
- ))
- }
- },
+ AsyncRecordBatchStreamAdapterState::Ready(stream) => {
+ return Poll::Ready(ready!(Pin::new(stream).poll_next(cx)).map(|x| {
+ let df_record_batch = x.context(error::PollStreamSnafu)?;
+ RecordBatch::try_from_df_record_batch(self.schema(), df_record_batch)
+ }))
+ }
+ AsyncRecordBatchStreamAdapterState::Failed => return Poll::Ready(None),
}
}
}
@@ -183,3 +182,104 @@ impl Stream for AsyncRecordBatchStreamAdapter {
(0, None)
}
}
+
+#[cfg(test)]
+mod test {
+ use common_error::mock::MockError;
+ use common_error::prelude::{BoxedError, StatusCode};
+ use datatypes::prelude::ConcreteDataType;
+ use datatypes::schema::ColumnSchema;
+ use datatypes::vectors::Int32Vector;
+
+ use super::*;
+ use crate::RecordBatches;
+
+ #[tokio::test]
+ async fn test_async_recordbatch_stream_adaptor() {
+ struct MaybeErrorRecordBatchStream {
+ items: Vec<Result<RecordBatch>>,
+ }
+
+ impl RecordBatchStream for MaybeErrorRecordBatchStream {
+ fn schema(&self) -> SchemaRef {
+ unimplemented!()
+ }
+ }
+
+ impl Stream for MaybeErrorRecordBatchStream {
+ type Item = Result<RecordBatch>;
+
+ fn poll_next(
+ mut self: Pin<&mut Self>,
+ _: &mut Context<'_>,
+ ) -> Poll<Option<Self::Item>> {
+ if let Some(batch) = self.items.pop() {
+ Poll::Ready(Some(Ok(batch?)))
+ } else {
+ Poll::Ready(None)
+ }
+ }
+ }
+
+ fn new_future_stream(
+ maybe_recordbatches: Result<Vec<Result<RecordBatch>>>,
+ ) -> FutureStream {
+ Box::pin(async move {
+ maybe_recordbatches
+ .map(|items| {
+ Box::pin(DfRecordBatchStreamAdapter::new(Box::pin(
+ MaybeErrorRecordBatchStream { items },
+ ))) as _
+ })
+ .map_err(|e| DataFusionError::External(Box::new(e)))
+ })
+ }
+
+ let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
+ "a",
+ ConcreteDataType::int32_datatype(),
+ false,
+ )]));
+ let batch1 = RecordBatch::new(
+ schema.clone(),
+ vec![Arc::new(Int32Vector::from_slice(&[1])) as _],
+ )
+ .unwrap();
+ let batch2 = RecordBatch::new(
+ schema.clone(),
+ vec![Arc::new(Int32Vector::from_slice(&[2])) as _],
+ )
+ .unwrap();
+
+ let success_stream = new_future_stream(Ok(vec![Ok(batch1.clone()), Ok(batch2.clone())]));
+ let adapter = AsyncRecordBatchStreamAdapter::new(schema.clone(), success_stream);
+ let collected = RecordBatches::try_collect(Box::pin(adapter)).await.unwrap();
+ assert_eq!(
+ collected,
+ RecordBatches::try_new(schema.clone(), vec![batch2.clone(), batch1.clone()]).unwrap()
+ );
+
+ let poll_err_stream = new_future_stream(Ok(vec![
+ Ok(batch1.clone()),
+ Err(error::Error::External {
+ source: BoxedError::new(MockError::new(StatusCode::Unknown)),
+ }),
+ ]));
+ let adapter = AsyncRecordBatchStreamAdapter::new(schema.clone(), poll_err_stream);
+ let result = RecordBatches::try_collect(Box::pin(adapter)).await;
+ assert_eq!(
+ result.unwrap_err().to_string(),
+ "Failed to poll stream, source: External error: External error, source: Unknown"
+ );
+
+ let failed_to_init_stream = new_future_stream(Err(error::Error::External {
+ source: BoxedError::new(MockError::new(StatusCode::Internal)),
+ }));
+ let adapter = AsyncRecordBatchStreamAdapter::new(schema.clone(), failed_to_init_stream);
+ let result = RecordBatches::try_collect(Box::pin(adapter)).await;
+ assert_eq!(
+ result.unwrap_err().to_string(),
+ "Failed to init Recordbatch stream, source: External error: External error, source: Internal"
+ );
+ }
+}
diff --git a/src/common/recordbatch/src/error.rs b/src/common/recordbatch/src/error.rs
index 09374413381a..c77e2f3f48d6 100644
--- a/src/common/recordbatch/src/error.rs
+++ b/src/common/recordbatch/src/error.rs
@@ -64,6 +64,12 @@ pub enum Error {
source: datatypes::arrow::error::ArrowError,
backtrace: Backtrace,
},
+
+ #[snafu(display("Failed to init Recordbatch stream, source: {}", source))]
+ InitRecordbatchStream {
+ source: datafusion_common::DataFusionError,
+ backtrace: Backtrace,
+ },
}
impl ErrorExt for Error {
@@ -74,7 +80,8 @@ impl ErrorExt for Error {
Error::DataTypes { .. }
| Error::CreateRecordBatches { .. }
| Error::PollStream { .. }
- | Error::Format { .. } => StatusCode::Internal,
+ | Error::Format { .. }
+ | Error::InitRecordbatchStream { .. } => StatusCode::Internal,
Error::External { source } => source.status_code(),
|
fix
|
`test_dist_table_scan` block (#761)
|
49310acea1058c24c455fd6ae4f423c6c0f48667
|
2023-09-17 13:26:41
|
Ruihang Xia
|
refactor: rename common-function-macro subcrate (#2418)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c069c7e3c891..bd62da4e7ac6 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1717,7 +1717,7 @@ dependencies = [
"arc-swap",
"chrono-tz 0.6.3",
"common-error",
- "common-function-macro",
+ "common-macro",
"common-query",
"common-time",
"datafusion",
@@ -1733,22 +1733,6 @@ dependencies = [
"statrs",
]
-[[package]]
-name = "common-function-macro"
-version = "0.4.0-nightly"
-dependencies = [
- "arc-swap",
- "backtrace",
- "common-query",
- "common-telemetry",
- "datatypes",
- "proc-macro2",
- "quote",
- "snafu",
- "static_assertions",
- "syn 1.0.109",
-]
-
[[package]]
name = "common-greptimedb-telemetry"
version = "0.4.0-nightly"
@@ -1815,6 +1799,22 @@ dependencies = [
"table",
]
+[[package]]
+name = "common-macro"
+version = "0.4.0-nightly"
+dependencies = [
+ "arc-swap",
+ "backtrace",
+ "common-query",
+ "common-telemetry",
+ "datatypes",
+ "proc-macro2",
+ "quote",
+ "snafu",
+ "static_assertions",
+ "syn 1.0.109",
+]
+
[[package]]
name = "common-mem-prof"
version = "0.4.0-nightly"
@@ -6926,7 +6926,7 @@ dependencies = [
"catalog",
"common-catalog",
"common-error",
- "common-function-macro",
+ "common-macro",
"common-telemetry",
"datafusion",
"datatypes",
@@ -7197,7 +7197,7 @@ dependencies = [
"common-datasource",
"common-error",
"common-function",
- "common-function-macro",
+ "common-macro",
"common-meta",
"common-query",
"common-recordbatch",
diff --git a/Cargo.toml b/Cargo.toml
index 2d6832e1e036..73375ac4f37d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,7 +12,7 @@ members = [
"src/common/datasource",
"src/common/error",
"src/common/function",
- "src/common/function-macro",
+ "src/common/macro",
"src/common/greptimedb-telemetry",
"src/common/grpc",
"src/common/grpc-expr",
@@ -123,7 +123,7 @@ common-config = { path = "src/common/config" }
common-datasource = { path = "src/common/datasource" }
common-error = { path = "src/common/error" }
common-function = { path = "src/common/function" }
-common-function-macro = { path = "src/common/function-macro" }
+common-macro = { path = "src/common/macro" }
common-greptimedb-telemetry = { path = "src/common/greptimedb-telemetry" }
common-grpc = { path = "src/common/grpc" }
common-grpc-expr = { path = "src/common/grpc-expr" }
diff --git a/src/common/function-macro/src/lib.rs b/src/common/function-macro/src/lib.rs
deleted file mode 100644
index c0cc67045472..000000000000
--- a/src/common/function-macro/src/lib.rs
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-mod range_fn;
-
-use proc_macro::TokenStream;
-use quote::{quote, quote_spanned, ToTokens};
-use range_fn::process_range_fn;
-use syn::parse::Parser;
-use syn::spanned::Spanned;
-use syn::{
- parse_macro_input, AttributeArgs, DeriveInput, ItemFn, ItemStruct, Lit, Meta, NestedMeta,
-};
-
-/// Make struct implemented trait [AggrFuncTypeStore], which is necessary when writing UDAF.
-/// This derive macro is expect to be used along with attribute macro [as_aggr_func_creator].
-#[proc_macro_derive(AggrFuncTypeStore)]
-pub fn aggr_func_type_store_derive(input: TokenStream) -> TokenStream {
- let ast = parse_macro_input!(input as DeriveInput);
- impl_aggr_func_type_store(&ast)
-}
-
-fn impl_aggr_func_type_store(ast: &DeriveInput) -> TokenStream {
- let name = &ast.ident;
- let gen = quote! {
- use common_query::logical_plan::accumulator::AggrFuncTypeStore;
- use common_query::error::{InvalidInputStateSnafu, Error as QueryError};
- use datatypes::prelude::ConcreteDataType;
-
- impl AggrFuncTypeStore for #name {
- fn input_types(&self) -> std::result::Result<Vec<ConcreteDataType>, QueryError> {
- let input_types = self.input_types.load();
- snafu::ensure!(input_types.is_some(), InvalidInputStateSnafu);
- Ok(input_types.as_ref().unwrap().as_ref().clone())
- }
-
- fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> std::result::Result<(), QueryError> {
- let old = self.input_types.swap(Some(std::sync::Arc::new(input_types.clone())));
- if let Some(old) = old {
- snafu::ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
- for (x, y) in old.iter().zip(input_types.iter()) {
- snafu::ensure!(x == y, InvalidInputStateSnafu);
- }
- }
- Ok(())
- }
- }
- };
- gen.into()
-}
-
-/// A struct can be used as a creator for aggregate function if it has been annotated with this
-/// attribute first. This attribute add a necessary field which is intended to store the input
-/// data's types to the struct.
-/// This attribute is expected to be used along with derive macro [AggrFuncTypeStore].
-#[proc_macro_attribute]
-pub fn as_aggr_func_creator(_args: TokenStream, input: TokenStream) -> TokenStream {
- let mut item_struct = parse_macro_input!(input as ItemStruct);
- if let syn::Fields::Named(ref mut fields) = item_struct.fields {
- let result = syn::Field::parse_named.parse2(quote! {
- input_types: arc_swap::ArcSwapOption<Vec<ConcreteDataType>>
- });
- match result {
- Ok(field) => fields.named.push(field),
- Err(e) => return e.into_compile_error().into(),
- }
- } else {
- return quote_spanned!(
- item_struct.fields.span() => compile_error!(
- "This attribute macro needs to add fields to the its annotated struct, \
- so the struct must have \"{}\".")
- )
- .into();
- }
- quote! {
- #item_struct
- }
- .into()
-}
-
-/// Attribute macro to convert an arithimetic function to a range function. The annotated function
-/// should accept servaral arrays as input and return a single value as output. This procedure
-/// macro can works on any number of input parameters. Return type can be either primitive type
-/// or wrapped in `Option`.
-///
-/// # Example
-/// Take `count_over_time()` in PromQL as an example:
-/// ```rust, ignore
-/// /// The count of all values in the specified interval.
-/// #[range_fn(
-/// name = "CountOverTime",
-/// ret = "Float64Array",
-/// display_name = "prom_count_over_time"
-/// )]
-/// pub fn count_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> f64 {
-/// values.len() as f64
-/// }
-/// ```
-///
-/// # Arguments
-/// - `name`: The name of the generated [ScalarUDF] struct.
-/// - `ret`: The return type of the generated UDF function.
-/// - `display_name`: The display name of the generated UDF function.
-#[proc_macro_attribute]
-pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
- process_range_fn(args, input)
-}
-
-/// Attribute macro to print the caller to the annotated function.
-/// The caller is printed as its filename and the call site line number.
-///
-/// This macro works like this: inject the tracking codes as the first statement to the annotated
-/// function body. The tracking codes use [backtrace-rs](https://crates.io/crates/backtrace) to get
-/// the callers. So you must dependent on the `backtrace-rs` crate.
-///
-/// # Arguments
-/// - `depth`: The max depth of call stack to print. Optional, defaults to 1.
-///
-/// # Example
-/// ```rust, ignore
-///
-/// #[print_caller(depth = 3)]
-/// fn foo() {}
-/// ```
-#[proc_macro_attribute]
-pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
- let mut depth = 1;
-
- let args = parse_macro_input!(args as AttributeArgs);
- for meta in args.iter() {
- if let NestedMeta::Meta(Meta::NameValue(name_value)) = meta {
- let ident = name_value
- .path
- .get_ident()
- .expect("Expected an ident!")
- .to_string();
- if ident == "depth" {
- let Lit::Int(i) = &name_value.lit else {
- panic!("Expected 'depth' to be a valid int!")
- };
- depth = i.base10_parse::<usize>().expect("Invalid 'depth' value");
- break;
- }
- }
- }
-
- let tokens: TokenStream = quote! {
- {
- let curr_file = file!();
-
- let bt = backtrace::Backtrace::new();
- let call_stack = bt
- .frames()
- .iter()
- .skip_while(|f| {
- !f.symbols().iter().any(|s| {
- s.filename()
- .map(|p| p.ends_with(curr_file))
- .unwrap_or(false)
- })
- })
- .skip(1)
- .take(#depth);
-
- let call_stack = call_stack
- .map(|f| {
- f.symbols()
- .iter()
- .map(|s| {
- let filename = s
- .filename()
- .map(|p| format!("{:?}", p))
- .unwrap_or_else(|| "unknown".to_string());
-
- let lineno = s
- .lineno()
- .map(|l| format!("{}", l))
- .unwrap_or_else(|| "unknown".to_string());
-
- format!("filename: {}, lineno: {}", filename, lineno)
- })
- .collect::<Vec<String>>()
- .join(", ")
- })
- .collect::<Vec<_>>();
-
- match call_stack.len() {
- 0 => common_telemetry::info!("unable to find call stack"),
- 1 => common_telemetry::info!("caller: {}", call_stack[0]),
- _ => {
- let mut s = String::new();
- s.push_str("[\n");
- for e in call_stack {
- s.push_str("\t");
- s.push_str(&e);
- s.push_str("\n");
- }
- s.push_str("]");
- common_telemetry::info!("call stack: {}", s)
- }
- }
- }
- }
- .into();
-
- let stmt = match syn::parse(tokens) {
- Ok(stmt) => stmt,
- Err(e) => return e.into_compile_error().into(),
- };
-
- let mut item = parse_macro_input!(input as ItemFn);
- item.block.stmts.insert(0, stmt);
-
- item.into_token_stream().into()
-}
diff --git a/src/common/function/Cargo.toml b/src/common/function/Cargo.toml
index 02adc46d13ab..e5a6433b7c93 100644
--- a/src/common/function/Cargo.toml
+++ b/src/common/function/Cargo.toml
@@ -8,7 +8,7 @@ license.workspace = true
arc-swap = "1.0"
chrono-tz = "0.6"
common-error = { workspace = true }
-common-function-macro = { workspace = true }
+common-macro = { workspace = true }
common-query = { workspace = true }
common-time = { workspace = true }
datafusion.workspace = true
diff --git a/src/common/function/src/scalars/aggregate/argmax.rs b/src/common/function/src/scalars/aggregate/argmax.rs
index de02c02760eb..c5c5264f1994 100644
--- a/src/common/function/src/scalars/aggregate/argmax.rs
+++ b/src/common/function/src/scalars/aggregate/argmax.rs
@@ -15,7 +15,7 @@
use std::cmp::Ordering;
use std::sync::Arc;
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{BadAccumulatorImplSnafu, CreateAccumulatorSnafu, Result};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
diff --git a/src/common/function/src/scalars/aggregate/argmin.rs b/src/common/function/src/scalars/aggregate/argmin.rs
index 30cd51305b2f..7233f43b7708 100644
--- a/src/common/function/src/scalars/aggregate/argmin.rs
+++ b/src/common/function/src/scalars/aggregate/argmin.rs
@@ -15,7 +15,7 @@
use std::cmp::Ordering;
use std::sync::Arc;
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{BadAccumulatorImplSnafu, CreateAccumulatorSnafu, Result};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
diff --git a/src/common/function/src/scalars/aggregate/diff.rs b/src/common/function/src/scalars/aggregate/diff.rs
index 747ff8af2e34..9893d6199b71 100644
--- a/src/common/function/src/scalars/aggregate/diff.rs
+++ b/src/common/function/src/scalars/aggregate/diff.rs
@@ -15,7 +15,7 @@
use std::marker::PhantomData;
use std::sync::Arc;
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
CreateAccumulatorSnafu, DowncastVectorSnafu, FromScalarValueSnafu, Result,
};
diff --git a/src/common/function/src/scalars/aggregate/mean.rs b/src/common/function/src/scalars/aggregate/mean.rs
index 6f42e11b5e98..3dc3e185351b 100644
--- a/src/common/function/src/scalars/aggregate/mean.rs
+++ b/src/common/function/src/scalars/aggregate/mean.rs
@@ -15,7 +15,7 @@
use std::marker::PhantomData;
use std::sync::Arc;
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu, Result,
};
diff --git a/src/common/function/src/scalars/aggregate/percentile.rs b/src/common/function/src/scalars/aggregate/percentile.rs
index 3bdd24b0e8c6..49b981a7ee0e 100644
--- a/src/common/function/src/scalars/aggregate/percentile.rs
+++ b/src/common/function/src/scalars/aggregate/percentile.rs
@@ -16,7 +16,7 @@ use std::cmp::Reverse;
use std::collections::BinaryHeap;
use std::sync::Arc;
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
FromScalarValueSnafu, InvalidInputColSnafu, Result,
diff --git a/src/common/function/src/scalars/aggregate/polyval.rs b/src/common/function/src/scalars/aggregate/polyval.rs
index cd37f3a118d0..b56a692c8df7 100644
--- a/src/common/function/src/scalars/aggregate/polyval.rs
+++ b/src/common/function/src/scalars/aggregate/polyval.rs
@@ -15,7 +15,7 @@
use std::marker::PhantomData;
use std::sync::Arc;
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
FromScalarValueSnafu, InvalidInputColSnafu, Result,
diff --git a/src/common/function/src/scalars/aggregate/scipy_stats_norm_cdf.rs b/src/common/function/src/scalars/aggregate/scipy_stats_norm_cdf.rs
index fb0b19e07f1e..2ec954051341 100644
--- a/src/common/function/src/scalars/aggregate/scipy_stats_norm_cdf.rs
+++ b/src/common/function/src/scalars/aggregate/scipy_stats_norm_cdf.rs
@@ -14,7 +14,7 @@
use std::sync::Arc;
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, Result,
diff --git a/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs b/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs
index 154465cb5b1b..d1bf432c993a 100644
--- a/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs
+++ b/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs
@@ -14,7 +14,7 @@
use std::sync::Arc;
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{
self, BadAccumulatorImplSnafu, CreateAccumulatorSnafu, DowncastVectorSnafu,
FromScalarValueSnafu, GenerateFunctionSnafu, InvalidInputColSnafu, Result,
diff --git a/src/common/function-macro/Cargo.toml b/src/common/macro/Cargo.toml
similarity index 92%
rename from src/common/function-macro/Cargo.toml
rename to src/common/macro/Cargo.toml
index 5601549fd7c0..c0ab6b0a5be3 100644
--- a/src/common/function-macro/Cargo.toml
+++ b/src/common/macro/Cargo.toml
@@ -1,5 +1,5 @@
[package]
-name = "common-function-macro"
+name = "common-macro"
version.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/src/common/macro/src/aggr_func.rs b/src/common/macro/src/aggr_func.rs
new file mode 100644
index 000000000000..4c3ccccdeeb5
--- /dev/null
+++ b/src/common/macro/src/aggr_func.rs
@@ -0,0 +1,72 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use proc_macro::TokenStream;
+use quote::{quote, quote_spanned};
+use syn::parse::Parser;
+use syn::spanned::Spanned;
+use syn::{parse_macro_input, DeriveInput, ItemStruct};
+
+pub(crate) fn impl_aggr_func_type_store(ast: &DeriveInput) -> TokenStream {
+ let name = &ast.ident;
+ let gen = quote! {
+ use common_query::logical_plan::accumulator::AggrFuncTypeStore;
+ use common_query::error::{InvalidInputStateSnafu, Error as QueryError};
+ use datatypes::prelude::ConcreteDataType;
+
+ impl AggrFuncTypeStore for #name {
+ fn input_types(&self) -> std::result::Result<Vec<ConcreteDataType>, QueryError> {
+ let input_types = self.input_types.load();
+ snafu::ensure!(input_types.is_some(), InvalidInputStateSnafu);
+ Ok(input_types.as_ref().unwrap().as_ref().clone())
+ }
+
+ fn set_input_types(&self, input_types: Vec<ConcreteDataType>) -> std::result::Result<(), QueryError> {
+ let old = self.input_types.swap(Some(std::sync::Arc::new(input_types.clone())));
+ if let Some(old) = old {
+ snafu::ensure!(old.len() == input_types.len(), InvalidInputStateSnafu);
+ for (x, y) in old.iter().zip(input_types.iter()) {
+ snafu::ensure!(x == y, InvalidInputStateSnafu);
+ }
+ }
+ Ok(())
+ }
+ }
+ };
+ gen.into()
+}
+
+pub(crate) fn impl_as_aggr_func_creator(_args: TokenStream, input: TokenStream) -> TokenStream {
+ let mut item_struct = parse_macro_input!(input as ItemStruct);
+ if let syn::Fields::Named(ref mut fields) = item_struct.fields {
+ let result = syn::Field::parse_named.parse2(quote! {
+ input_types: arc_swap::ArcSwapOption<Vec<ConcreteDataType>>
+ });
+ match result {
+ Ok(field) => fields.named.push(field),
+ Err(e) => return e.into_compile_error().into(),
+ }
+ } else {
+ return quote_spanned!(
+ item_struct.fields.span() => compile_error!(
+ "This attribute macro needs to add fields to the its annotated struct, \
+ so the struct must have \"{}\".")
+ )
+ .into();
+ }
+ quote! {
+ #item_struct
+ }
+ .into()
+}
diff --git a/src/common/macro/src/lib.rs b/src/common/macro/src/lib.rs
new file mode 100644
index 000000000000..61c3bc0edc19
--- /dev/null
+++ b/src/common/macro/src/lib.rs
@@ -0,0 +1,89 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod aggr_func;
+mod print_caller;
+mod range_fn;
+
+use aggr_func::{impl_aggr_func_type_store, impl_as_aggr_func_creator};
+use print_caller::process_print_caller;
+use proc_macro::TokenStream;
+use range_fn::process_range_fn;
+use syn::{parse_macro_input, DeriveInput};
+
+/// Make struct implemented trait [AggrFuncTypeStore], which is necessary when writing UDAF.
+/// This derive macro is expect to be used along with attribute macro [as_aggr_func_creator].
+#[proc_macro_derive(AggrFuncTypeStore)]
+pub fn aggr_func_type_store_derive(input: TokenStream) -> TokenStream {
+ let ast = parse_macro_input!(input as DeriveInput);
+ impl_aggr_func_type_store(&ast)
+}
+
+/// A struct can be used as a creator for aggregate function if it has been annotated with this
+/// attribute first. This attribute add a necessary field which is intended to store the input
+/// data's types to the struct.
+/// This attribute is expected to be used along with derive macro [AggrFuncTypeStore].
+#[proc_macro_attribute]
+pub fn as_aggr_func_creator(args: TokenStream, input: TokenStream) -> TokenStream {
+ impl_as_aggr_func_creator(args, input)
+}
+
+/// Attribute macro to convert an arithimetic function to a range function. The annotated function
+/// should accept servaral arrays as input and return a single value as output. This procedure
+/// macro can works on any number of input parameters. Return type can be either primitive type
+/// or wrapped in `Option`.
+///
+/// # Example
+/// Take `count_over_time()` in PromQL as an example:
+/// ```rust, ignore
+/// /// The count of all values in the specified interval.
+/// #[range_fn(
+/// name = "CountOverTime",
+/// ret = "Float64Array",
+/// display_name = "prom_count_over_time"
+/// )]
+/// pub fn count_over_time(_: &TimestampMillisecondArray, values: &Float64Array) -> f64 {
+/// values.len() as f64
+/// }
+/// ```
+///
+/// # Arguments
+/// - `name`: The name of the generated [ScalarUDF] struct.
+/// - `ret`: The return type of the generated UDF function.
+/// - `display_name`: The display name of the generated UDF function.
+#[proc_macro_attribute]
+pub fn range_fn(args: TokenStream, input: TokenStream) -> TokenStream {
+ process_range_fn(args, input)
+}
+
+/// Attribute macro to print the caller to the annotated function.
+/// The caller is printed as its filename and the call site line number.
+///
+/// This macro works like this: inject the tracking codes as the first statement to the annotated
+/// function body. The tracking codes use [backtrace-rs](https://crates.io/crates/backtrace) to get
+/// the callers. So you must dependent on the `backtrace-rs` crate.
+///
+/// # Arguments
+/// - `depth`: The max depth of call stack to print. Optional, defaults to 1.
+///
+/// # Example
+/// ```rust, ignore
+///
+/// #[print_caller(depth = 3)]
+/// fn foo() {}
+/// ```
+#[proc_macro_attribute]
+pub fn print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
+ process_print_caller(args, input)
+}
diff --git a/src/common/macro/src/print_caller.rs b/src/common/macro/src/print_caller.rs
new file mode 100644
index 000000000000..c4510ddfa94e
--- /dev/null
+++ b/src/common/macro/src/print_caller.rs
@@ -0,0 +1,108 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use proc_macro::TokenStream;
+use quote::{quote, ToTokens};
+use syn::{parse_macro_input, AttributeArgs, ItemFn, Lit, Meta, NestedMeta};
+
+pub(crate) fn process_print_caller(args: TokenStream, input: TokenStream) -> TokenStream {
+ let mut depth = 1;
+
+ let args = parse_macro_input!(args as AttributeArgs);
+ for meta in args.iter() {
+ if let NestedMeta::Meta(Meta::NameValue(name_value)) = meta {
+ let ident = name_value
+ .path
+ .get_ident()
+ .expect("Expected an ident!")
+ .to_string();
+ if ident == "depth" {
+ let Lit::Int(i) = &name_value.lit else {
+ panic!("Expected 'depth' to be a valid int!")
+ };
+ depth = i.base10_parse::<usize>().expect("Invalid 'depth' value");
+ break;
+ }
+ }
+ }
+
+ let tokens: TokenStream = quote! {
+ {
+ let curr_file = file!();
+
+ let bt = backtrace::Backtrace::new();
+ let call_stack = bt
+ .frames()
+ .iter()
+ .skip_while(|f| {
+ !f.symbols().iter().any(|s| {
+ s.filename()
+ .map(|p| p.ends_with(curr_file))
+ .unwrap_or(false)
+ })
+ })
+ .skip(1)
+ .take(#depth);
+
+ let call_stack = call_stack
+ .map(|f| {
+ f.symbols()
+ .iter()
+ .map(|s| {
+ let filename = s
+ .filename()
+ .map(|p| format!("{:?}", p))
+ .unwrap_or_else(|| "unknown".to_string());
+
+ let lineno = s
+ .lineno()
+ .map(|l| format!("{}", l))
+ .unwrap_or_else(|| "unknown".to_string());
+
+ format!("filename: {}, lineno: {}", filename, lineno)
+ })
+ .collect::<Vec<String>>()
+ .join(", ")
+ })
+ .collect::<Vec<_>>();
+
+ match call_stack.len() {
+ 0 => common_telemetry::info!("unable to find call stack"),
+ 1 => common_telemetry::info!("caller: {}", call_stack[0]),
+ _ => {
+ let mut s = String::new();
+ s.push_str("[\n");
+ for e in call_stack {
+ s.push_str("\t");
+ s.push_str(&e);
+ s.push_str("\n");
+ }
+ s.push_str("]");
+ common_telemetry::info!("call stack: {}", s)
+ }
+ }
+ }
+ }
+ .into();
+
+ let stmt = match syn::parse(tokens) {
+ Ok(stmt) => stmt,
+ Err(e) => return e.into_compile_error().into(),
+ };
+
+ let mut item = parse_macro_input!(input as ItemFn);
+ item.block.stmts.insert(0, stmt);
+
+ item.into_token_stream().into()
+}
diff --git a/src/common/function-macro/src/range_fn.rs b/src/common/macro/src/range_fn.rs
similarity index 100%
rename from src/common/function-macro/src/range_fn.rs
rename to src/common/macro/src/range_fn.rs
diff --git a/src/common/function-macro/tests/test_derive.rs b/src/common/macro/tests/test_derive.rs
similarity index 93%
rename from src/common/function-macro/tests/test_derive.rs
rename to src/common/macro/tests/test_derive.rs
index db2b469e9b36..9c648c788dce 100644
--- a/src/common/function-macro/tests/test_derive.rs
+++ b/src/common/macro/tests/test_derive.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use static_assertions::{assert_fields, assert_impl_all};
#[as_aggr_func_creator]
diff --git a/src/promql/Cargo.toml b/src/promql/Cargo.toml
index dab4edd506d6..00f55ce296c5 100644
--- a/src/promql/Cargo.toml
+++ b/src/promql/Cargo.toml
@@ -11,7 +11,7 @@ bytemuck = "1.12"
catalog = { workspace = true }
common-catalog = { workspace = true }
common-error = { workspace = true }
-common-function-macro = { workspace = true }
+common-macro = { workspace = true }
common-telemetry = { workspace = true }
datafusion.workspace = true
datatypes = { workspace = true }
diff --git a/src/promql/src/functions/aggr_over_time.rs b/src/promql/src/functions/aggr_over_time.rs
index 5c9d0578d248..05428db47c2b 100644
--- a/src/promql/src/functions/aggr_over_time.rs
+++ b/src/promql/src/functions/aggr_over_time.rs
@@ -14,7 +14,7 @@
use std::sync::Arc;
-use common_function_macro::range_fn;
+use common_macro::range_fn;
use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray};
use datafusion::arrow::datatypes::TimeUnit;
use datafusion::common::DataFusionError;
diff --git a/src/promql/src/functions/changes.rs b/src/promql/src/functions/changes.rs
index 4039a95f9a04..a8b29c9cbdac 100644
--- a/src/promql/src/functions/changes.rs
+++ b/src/promql/src/functions/changes.rs
@@ -17,7 +17,7 @@
use std::sync::Arc;
-use common_function_macro::range_fn;
+use common_macro::range_fn;
use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray};
use datafusion::arrow::datatypes::TimeUnit;
use datafusion::common::DataFusionError;
diff --git a/src/promql/src/functions/deriv.rs b/src/promql/src/functions/deriv.rs
index 4ba0a30438ae..84e5c2e212de 100644
--- a/src/promql/src/functions/deriv.rs
+++ b/src/promql/src/functions/deriv.rs
@@ -17,7 +17,7 @@
use std::sync::Arc;
-use common_function_macro::range_fn;
+use common_macro::range_fn;
use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray};
use datafusion::arrow::datatypes::TimeUnit;
use datafusion::common::DataFusionError;
diff --git a/src/promql/src/functions/resets.rs b/src/promql/src/functions/resets.rs
index a76ee65510f5..218e1908738a 100644
--- a/src/promql/src/functions/resets.rs
+++ b/src/promql/src/functions/resets.rs
@@ -17,7 +17,7 @@
use std::sync::Arc;
-use common_function_macro::range_fn;
+use common_macro::range_fn;
use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray};
use datafusion::arrow::datatypes::TimeUnit;
use datafusion::common::DataFusionError;
diff --git a/src/query/Cargo.toml b/src/query/Cargo.toml
index 33e4564167cc..19d642e077e4 100644
--- a/src/query/Cargo.toml
+++ b/src/query/Cargo.toml
@@ -57,7 +57,7 @@ tokio.workspace = true
approx_eq = "0.1"
arrow.workspace = true
catalog = { workspace = true, features = ["testing"] }
-common-function-macro.workspace = true
+common-macro.workspace = true
format_num = "0.1"
num = "0.4"
num-traits = "0.2"
diff --git a/src/query/src/tests/my_sum_udaf_example.rs b/src/query/src/tests/my_sum_udaf_example.rs
index 363816b4d638..8220dcf72dfa 100644
--- a/src/query/src/tests/my_sum_udaf_example.rs
+++ b/src/query/src/tests/my_sum_udaf_example.rs
@@ -17,7 +17,7 @@ use std::marker::PhantomData;
use std::sync::Arc;
use common_function::scalars::aggregate::AggregateFunctionMeta;
-use common_function_macro::{as_aggr_func_creator, AggrFuncTypeStore};
+use common_macro::{as_aggr_func_creator, AggrFuncTypeStore};
use common_query::error::{CreateAccumulatorSnafu, Result as QueryResult};
use common_query::logical_plan::{Accumulator, AggregateFunctionCreator};
use common_query::prelude::*;
|
refactor
|
rename common-function-macro subcrate (#2418)
|
d5800d0b60673d662fd872b7aadfc28be276b63f
|
2022-10-20 08:13:15
|
LFC
|
feat: parse partition syntax in "create table" (#298)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index b6ff256ba847..c269433a84b3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4755,6 +4755,8 @@ dependencies = [
"common-error",
"common-time",
"datatypes",
+ "itertools",
+ "once_cell",
"snafu",
"sqlparser",
"table-engine",
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
index 5365b5d179ec..07de1893abf3 100644
--- a/src/sql/Cargo.toml
+++ b/src/sql/Cargo.toml
@@ -8,6 +8,8 @@ edition = "2021"
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
+itertools = "0.10"
+once_cell = "1.10"
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser = "0.15.0"
table-engine = { path = "../table-engine" }
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 56daa3712efc..ad6bfd170145 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -1,16 +1,27 @@
-use snafu::ensure;
+use std::cmp::Ordering;
+
+use itertools::Itertools;
+use once_cell::sync::Lazy;
use snafu::ResultExt;
+use snafu::{ensure, OptionExt};
+use sqlparser::ast::Value;
+use sqlparser::dialect::keywords::Keyword;
use sqlparser::parser::IsOptional::Mandatory;
-use sqlparser::{dialect::keywords::Keyword, tokenizer::Token};
+use sqlparser::tokenizer::{Token, Word};
use table_engine::engine;
-use crate::ast::{ColumnDef, Ident, TableConstraint};
+use crate::ast::{ColumnDef, Ident, TableConstraint, Value as SqlValue};
use crate::error::{self, InvalidTimeIndexSnafu, Result, SyntaxSnafu};
use crate::parser::ParserContext;
-use crate::statements::create_table::{CreateTable, TIME_INDEX};
+use crate::statements::create_table::{CreateTable, PartitionEntry, Partitions, TIME_INDEX};
use crate::statements::statement::Statement;
+use crate::statements::{sql_data_type_to_concrete_data_type, sql_value_to_value};
const ENGINE: &str = "ENGINE";
+const MAXVALUE: &str = "MAXVALUE";
+
+static LESS: Lazy<Token> = Lazy::new(|| Token::make_keyword("LESS"));
+static THAN: Lazy<Token> = Lazy::new(|| Token::make_keyword("THAN"));
/// Parses create [table] statement
impl<'a> ParserContext<'a> {
@@ -27,13 +38,16 @@ impl<'a> ParserContext<'a> {
.parse_object_name()
.context(error::SyntaxSnafu { sql: self.sql })?;
let (columns, constraints) = self.parse_columns()?;
+
+ let partitions = self.parse_partitions()?;
+
let engine = self.parse_table_engine()?;
let options = self
.parser
.parse_options(Keyword::WITH)
.context(error::SyntaxSnafu { sql: self.sql })?;
- Ok(Statement::Create(CreateTable {
+ let create_table = CreateTable {
if_not_exists,
name: table_name,
columns,
@@ -41,9 +55,95 @@ impl<'a> ParserContext<'a> {
constraints,
options,
table_id: 0, // table id is assigned by catalog manager
+ partitions,
+ };
+ validate_create(&create_table)?;
+
+ Ok(Statement::Create(create_table))
+ }
+
+ // "PARTITION BY ..." syntax:
+ // https://dev.mysql.com/doc/refman/8.0/en/partitioning-columns-range.html
+ fn parse_partitions(&mut self) -> Result<Option<Partitions>> {
+ if !self.parser.parse_keyword(Keyword::PARTITION) {
+ return Ok(None);
+ }
+ self.parser
+ .expect_keywords(&[Keyword::BY, Keyword::RANGE, Keyword::COLUMNS])
+ .context(error::SyntaxSnafu { sql: self.sql })?;
+
+ let column_list = self
+ .parser
+ .parse_parenthesized_column_list(Mandatory)
+ .context(error::SyntaxSnafu { sql: self.sql })?;
+
+ let entries = self.parse_comma_separated(Self::parse_partition_entry)?;
+
+ Ok(Some(Partitions {
+ column_list,
+ entries,
}))
}
+ fn parse_partition_entry(&mut self) -> Result<PartitionEntry> {
+ self.parser
+ .expect_keyword(Keyword::PARTITION)
+ .context(error::SyntaxSnafu { sql: self.sql })?;
+
+ let name = self
+ .parser
+ .parse_identifier()
+ .context(error::SyntaxSnafu { sql: self.sql })?;
+
+ self.parser
+ .expect_keyword(Keyword::VALUES)
+ .and_then(|_| self.parser.expect_token(&LESS))
+ .and_then(|_| self.parser.expect_token(&THAN))
+ .context(error::SyntaxSnafu { sql: self.sql })?;
+
+ let value_list = self.parse_comma_separated(Self::parse_value_list)?;
+
+ Ok(PartitionEntry { name, value_list })
+ }
+
+ fn parse_value_list(&mut self) -> Result<SqlValue> {
+ let token = self.parser.peek_token();
+ let value = match token {
+ Token::Word(Word { value, .. }) if value == MAXVALUE => {
+ let _ = self.parser.next_token();
+ SqlValue::Number(MAXVALUE.to_string(), false)
+ }
+ _ => self
+ .parser
+ .parse_value()
+ .context(error::SyntaxSnafu { sql: self.sql })?,
+ };
+ Ok(value)
+ }
+
+ /// Parse a comma-separated list wrapped by "()", and of which all items accepted by `F`
+ fn parse_comma_separated<T, F>(&mut self, mut f: F) -> Result<Vec<T>>
+ where
+ F: FnMut(&mut ParserContext<'a>) -> Result<T>,
+ {
+ self.parser
+ .expect_token(&Token::LParen)
+ .context(error::SyntaxSnafu { sql: self.sql })?;
+
+ let mut values = vec![];
+ while self.parser.peek_token() != Token::RParen {
+ values.push(f(self)?);
+ if !self.parser.consume_token(&Token::Comma) {
+ break;
+ }
+ }
+
+ self.parser
+ .expect_token(&Token::RParen)
+ .context(error::SyntaxSnafu { sql: self.sql })?;
+ Ok(values)
+ }
+
fn parse_columns(&mut self) -> Result<(Vec<ColumnDef>, Vec<TableConstraint>)> {
let mut columns = vec![];
let mut constraints = vec![];
@@ -157,6 +257,161 @@ impl<'a> ParserContext<'a> {
}
}
+fn validate_create(create_table: &CreateTable) -> Result<()> {
+ if let Some(partitions) = &create_table.partitions {
+ validate_partitions(&create_table.columns, partitions)?;
+ }
+ Ok(())
+}
+
+fn validate_partitions(columns: &[ColumnDef], partitions: &Partitions) -> Result<()> {
+ let partition_columns = ensure_partition_columns_defined(columns, partitions)?;
+
+ ensure_partition_names_no_duplicate(partitions)?;
+
+ ensure_value_list_len_matches_columns(partitions, &partition_columns)?;
+
+ let value_lists = ensure_value_lists_strictly_increased(partitions, partition_columns)?;
+
+ ensure_value_lists_bounded_by_maxvalue(value_lists)?;
+
+ Ok(())
+}
+
+/// Ensure that partition ranges fully cover all values.
+// Simply check the last partition is bounded by "MAXVALUE"s.
+// MySQL does not have this restriction. However, I think we'd better have it because:
+// - It might save user from adding more partitions in the future by hand, which is often
+// a tedious task. Why not provide an extra partition at the beginning and leave all
+// other partition related jobs to us? I think it's a reasonable argument to user.
+// - It might save us from some ugly designs and codings. The "MAXVALUE" bound is natural
+// in dealing with values that are unspecified upfront. Without it, we have to store
+// and use the user defined max bound everywhere, starting from calculating regions by
+// partition rule in Frontend, to automatically split and merge regions in Meta.
+fn ensure_value_lists_bounded_by_maxvalue(value_lists: Vec<&Vec<Value>>) -> Result<()> {
+ let is_maxvalue_bound = value_lists.last().map(|v| {
+ v.iter()
+ .all(|x| matches!(x, SqlValue::Number(s, _) if s == MAXVALUE))
+ });
+ ensure!(
+ matches!(is_maxvalue_bound, Some(true)),
+ error::InvalidSqlSnafu {
+ msg: "Please provide an extra partition that is bounded by 'MAXVALUE'."
+ }
+ );
+ Ok(())
+}
+
+/// Ensure that value lists of partitions are strictly increasing.
+fn ensure_value_lists_strictly_increased<'a>(
+ partitions: &'a Partitions,
+ partition_columns: Vec<&'a ColumnDef>,
+) -> Result<Vec<&'a Vec<Value>>> {
+ let value_lists = partitions
+ .entries
+ .iter()
+ .map(|x| &x.value_list)
+ .collect::<Vec<_>>();
+ for i in 1..value_lists.len() {
+ let mut equal_tuples = 0;
+ for (n, (x, y)) in value_lists[i - 1]
+ .iter()
+ .zip(value_lists[i].iter())
+ .enumerate()
+ {
+ let column = partition_columns[n];
+ let is_x_maxvalue = matches!(x, SqlValue::Number(s, _) if s == MAXVALUE);
+ let is_y_maxvalue = matches!(y, SqlValue::Number(s, _) if s == MAXVALUE);
+ match (is_x_maxvalue, is_y_maxvalue) {
+ (true, true) => {
+ equal_tuples += 1;
+ }
+ (false, false) => {
+ let column_name = &column.name.value;
+ let cdt = sql_data_type_to_concrete_data_type(&column.data_type)?;
+ let x = sql_value_to_value(column_name, &cdt, x)?;
+ let y = sql_value_to_value(column_name, &cdt, y)?;
+ match x.cmp(&y) {
+ Ordering::Less => break,
+ Ordering::Equal => equal_tuples += 1,
+ Ordering::Greater => return error::InvalidSqlSnafu {
+ msg: "VALUES LESS THAN value must be strictly increasing for each partition.",
+ }.fail()
+ }
+ }
+ (true, false) => return error::InvalidSqlSnafu {
+ msg: "VALUES LESS THAN value must be strictly increasing for each partition.",
+ }
+ .fail(),
+ (false, true) => break,
+ }
+ }
+ ensure!(
+ equal_tuples < partition_columns.len(),
+ error::InvalidSqlSnafu {
+ msg: "VALUES LESS THAN value must be strictly increasing for each partition.",
+ }
+ );
+ }
+ Ok(value_lists)
+}
+
+/// Ensure that value list's length matches the column list.
+fn ensure_value_list_len_matches_columns(
+ partitions: &Partitions,
+ partition_columns: &Vec<&ColumnDef>,
+) -> Result<()> {
+ for entry in partitions.entries.iter() {
+ ensure!(
+ entry.value_list.len() == partition_columns.len(),
+ error::InvalidSqlSnafu {
+ msg: "Partition value list does not match column list.",
+ }
+ );
+ }
+ Ok(())
+}
+
+/// Ensure that all columns used in "PARTITION BY RANGE COLUMNS" are defined in create table.
+fn ensure_partition_columns_defined<'a>(
+ columns: &'a [ColumnDef],
+ partitions: &'a Partitions,
+) -> Result<Vec<&'a ColumnDef>> {
+ partitions
+ .column_list
+ .iter()
+ .map(|x| {
+ // Normally the columns in "create table" won't be too many,
+ // a linear search to find the target every time is fine.
+ columns
+ .iter()
+ .find(|c| &c.name == x)
+ .context(error::InvalidSqlSnafu {
+ msg: format!("Partition column {:?} not defined!", x.value),
+ })
+ })
+ .collect::<Result<Vec<&ColumnDef>>>()
+}
+
+/// Ensure that partition names do not duplicate.
+fn ensure_partition_names_no_duplicate(partitions: &Partitions) -> Result<()> {
+ let partition_names = partitions
+ .entries
+ .iter()
+ .map(|x| &x.name.value)
+ .sorted()
+ .collect::<Vec<&String>>();
+ for w in partition_names.windows(2) {
+ ensure!(
+ w[0] != w[1],
+ error::InvalidSqlSnafu {
+ msg: format!("Duplicate partition names: {}", w[0]),
+ }
+ )
+ }
+ Ok(())
+}
+
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
@@ -165,6 +420,237 @@ mod tests {
use super::*;
+ #[test]
+ fn test_validate_create() {
+ let sql = r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('sh', 2000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result.is_ok());
+
+ let sql = r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, x) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('sh', 2000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("Partition column \"x\" not defined!"));
+
+ let sql = r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('sh', 2000),
+ PARTITION r2 VALUES LESS THAN ('sz', 3000),
+ PARTITION r1 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("Duplicate partition names: r1"));
+
+ let sql = r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('sh'),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("Partition value list does not match column list"));
+
+ let cases = vec![
+ r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('sh', 1000),
+ PARTITION r1 VALUES LESS THAN ('hz', 2000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito",
+ r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('hz', 2000),
+ PARTITION r1 VALUES LESS THAN ('hz', 1000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito",
+ r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('hz', 1000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito",
+ r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, 2000),
+ PARTITION r1 VALUES LESS THAN ('sh', 3000),
+)
+ENGINE=mito",
+ ];
+ for sql in cases {
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("VALUES LESS THAN value must be strictly increasing for each partition"));
+ }
+
+ let sql = r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('sh', 2000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, 9999),
+)
+ENGINE=mito";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("Please provide an extra partition that is bounded by 'MAXVALUE'."));
+ }
+
+ #[test]
+ fn test_parse_create_table_with_partitions() {
+ let sql = r"
+CREATE TABLE monitor (
+ host_id INT,
+ idc STRING,
+ ts TIMESTAMP,
+ cpu DOUBLE DEFAULT 0,
+ memory DOUBLE,
+ TIME INDEX (ts),
+ PRIMARY KEY (host),
+)
+PARTITION BY RANGE COLUMNS(idc, host_id) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('sh', 2000),
+ PARTITION r2 VALUES LESS THAN ('sh', 3000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
+ assert_eq!(result.len(), 1);
+ match &result[0] {
+ Statement::Create(c) => {
+ assert!(c.partitions.is_some());
+
+ let partitions = c.partitions.as_ref().unwrap();
+ let column_list = partitions
+ .column_list
+ .iter()
+ .map(|x| &x.value)
+ .collect::<Vec<&String>>();
+ assert_eq!(column_list, vec!["idc", "host_id"]);
+
+ let entries = &partitions.entries;
+ let partition_names = entries
+ .iter()
+ .map(|x| &x.name.value)
+ .collect::<Vec<&String>>();
+ assert_eq!(partition_names, vec!["r0", "r1", "r2", "r3"]);
+
+ assert_eq!(
+ entries[0].value_list,
+ vec![
+ SqlValue::SingleQuotedString("hz".to_string()),
+ SqlValue::Number("1000".to_string(), false)
+ ]
+ );
+ assert_eq!(
+ entries[1].value_list,
+ vec![
+ SqlValue::SingleQuotedString("sh".to_string()),
+ SqlValue::Number("2000".to_string(), false)
+ ]
+ );
+ assert_eq!(
+ entries[2].value_list,
+ vec![
+ SqlValue::SingleQuotedString("sh".to_string()),
+ SqlValue::Number("3000".to_string(), false)
+ ]
+ );
+ assert_eq!(
+ entries[3].value_list,
+ vec![
+ SqlValue::Number(MAXVALUE.to_string(), false),
+ SqlValue::Number(MAXVALUE.to_string(), false)
+ ]
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ #[test]
+ fn test_parse_partitions_with_error_syntax() {
+ let sql = r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('sh', 2000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("sql parser error: Expected BY, found: RANGE"));
+
+ let sql = r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('sh', 2000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+)
+ENGINE=mito";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("sql parser error: Expected LESS, found: THAN"));
+
+ let sql = r"
+CREATE TABLE rcx ( a INT, b STRING, c INT )
+PARTITION BY RANGE COLUMNS(b, a) (
+ PARTITION r0 VALUES LESS THAN ('hz', 1000),
+ PARTITION r1 VALUES LESS THAN ('sh', 2000),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE, MAXVALU),
+)
+ENGINE=mito";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result
+ .unwrap_err()
+ .to_string()
+ .contains("sql parser error: Expected a concrete value, found: MAXVALU"));
+ }
+
fn assert_column_def(column: &ColumnDef, name: &str, data_type: &str) {
assert_eq!(column.name.to_string(), name);
assert_eq!(column.data_type.to_string(), data_type);
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index 0229b547a218..53ce293b6bc1 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -230,7 +230,7 @@ pub fn column_def_to_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
})
}
-fn sql_data_type_to_concrete_data_type(data_type: &SqlDataType) -> Result<ConcreteDataType> {
+pub fn sql_data_type_to_concrete_data_type(data_type: &SqlDataType) -> Result<ConcreteDataType> {
match data_type {
SqlDataType::BigInt(_) => Ok(ConcreteDataType::int64_datatype()),
SqlDataType::Int(_) => Ok(ConcreteDataType::int32_datatype()),
diff --git a/src/sql/src/statements/create_table.rs b/src/sql/src/statements/create_table.rs
index ec30e287ca84..ba145a6339f3 100644
--- a/src/sql/src/statements/create_table.rs
+++ b/src/sql/src/statements/create_table.rs
@@ -1,4 +1,4 @@
-use crate::ast::{ColumnDef, ObjectName, SqlOption, TableConstraint};
+use crate::ast::{ColumnDef, Ident, ObjectName, SqlOption, TableConstraint, Value as SqlValue};
/// Time index name, used in table constraints.
pub const TIME_INDEX: &str = "__time_index";
@@ -15,4 +15,17 @@ pub struct CreateTable {
pub constraints: Vec<TableConstraint>,
/// Table options in `WITH`.
pub options: Vec<SqlOption>,
+ pub partitions: Option<Partitions>,
+}
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub struct Partitions {
+ pub column_list: Vec<Ident>,
+ pub entries: Vec<PartitionEntry>,
+}
+
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub struct PartitionEntry {
+ pub name: Ident,
+ pub value_list: Vec<SqlValue>,
}
|
feat
|
parse partition syntax in "create table" (#298)
|
8fb97ea1d8de342e99cc34b988c0c843ec3dfd12
|
2023-03-21 08:49:43
|
dennis zhuang
|
fix: losing region numbers after altering table (#1209)
| false
|
diff --git a/src/mito/src/engine/tests.rs b/src/mito/src/engine/tests.rs
index 1368dc6a2925..417ecf98ee92 100644
--- a/src/mito/src/engine/tests.rs
+++ b/src/mito/src/engine/tests.rs
@@ -523,6 +523,7 @@ async fn test_alter_table_add_column() {
assert_eq!(new_schema.timestamp_column(), old_schema.timestamp_column());
assert_eq!(new_schema.version(), old_schema.version() + 1);
assert_eq!(new_meta.next_column_id, old_meta.next_column_id + 2);
+ assert_eq!(new_meta.region_numbers, old_meta.region_numbers);
}
#[tokio::test]
@@ -572,6 +573,7 @@ async fn test_alter_table_remove_column() {
assert_eq!(&[1, 2], &new_meta.value_indices[..]);
assert_eq!(new_schema.timestamp_column(), old_schema.timestamp_column());
assert_eq!(new_schema.version(), old_schema.version() + 1);
+ assert_eq!(new_meta.region_numbers, old_meta.region_numbers);
}
#[tokio::test]
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index 55f660ca7681..c95b0d2c8e3e 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -89,6 +89,9 @@ pub struct TableIdent {
pub version: TableVersion,
}
+/// The table medatadata
+/// Note: if you add new fields to this struct, please ensure 'new_meta_builder' function works.
+/// TODO(dennis): find a better way to ensure 'new_meta_builder' works when adding new fields.
#[derive(Clone, Debug, Builder, PartialEq, Eq)]
#[builder(pattern = "mutable")]
pub struct TableMeta {
@@ -197,6 +200,7 @@ impl TableMeta {
.engine_options(self.engine_options.clone())
.options(self.options.clone())
.created_on(self.created_on)
+ .region_numbers(self.region_numbers.clone())
.next_column_id(self.next_column_id);
builder
@@ -572,6 +576,7 @@ mod tests {
.unwrap();
let new_meta = add_columns_to_meta(&meta);
+ assert_eq!(meta.region_numbers, new_meta.region_numbers);
let names: Vec<String> = new_meta
.schema
@@ -606,6 +611,8 @@ mod tests {
.build()
.unwrap();
+ assert_eq!(meta.region_numbers, new_meta.region_numbers);
+
let names: Vec<String> = new_meta
.schema
.column_schemas()
|
fix
|
losing region numbers after altering table (#1209)
|
9271b3b7bd7fecc66aede71b5f4cab5ecce2fcc6
|
2024-07-10 12:06:44
|
Ruihang Xia
|
docs: remove cargo test workspace command (#4325)
| false
|
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index dba6a0f8892f..97da27852030 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -55,7 +55,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim
- To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process.
- Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root).
- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md).
-- Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`).
+- Make sure all unit tests are passed using [nextest](https://nexte.st/index.html) `cargo nextest run`.
- Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`).
#### `pre-commit` Hooks
|
docs
|
remove cargo test workspace command (#4325)
|
0270708d6d736f4376cc922b8e90e47bf37996fe
|
2023-08-10 09:29:41
|
Ning Sun
|
fix: correct grpc metric labels (#2136)
| false
|
diff --git a/src/servers/src/grpc/handler.rs b/src/servers/src/grpc/handler.rs
index 81e12862d55d..f110a0b3bc07 100644
--- a/src/servers/src/grpc/handler.rs
+++ b/src/servers/src/grpc/handler.rs
@@ -36,8 +36,8 @@ use crate::error::{
};
use crate::grpc::TonicResult;
use crate::metrics::{
- METRIC_AUTH_FAILURE, METRIC_CODE_LABEL, METRIC_SERVER_GRPC_DB_REQUEST_TIMER,
- METRIC_STATUS_LABEL, METRIC_TYPE_LABEL,
+ METRIC_AUTH_FAILURE, METRIC_CODE_LABEL, METRIC_DB_LABEL, METRIC_SERVER_GRPC_DB_REQUEST_TIMER,
+ METRIC_TYPE_LABEL,
};
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
@@ -214,9 +214,9 @@ impl Drop for RequestTimer {
METRIC_SERVER_GRPC_DB_REQUEST_TIMER,
self.start.elapsed(),
&[
- (METRIC_CODE_LABEL, std::mem::take(&mut self.db)),
+ (METRIC_DB_LABEL, std::mem::take(&mut self.db)),
(METRIC_TYPE_LABEL, self.request_type.to_string()),
- (METRIC_STATUS_LABEL, self.status_code.to_string())
+ (METRIC_CODE_LABEL, self.status_code.to_string())
]
);
}
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 8f8c7b005c44..af083e076740 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -70,8 +70,8 @@ use crate::configurator::ConfiguratorRef;
use crate::error::{AlreadyStartedSnafu, Result, StartHttpSnafu};
use crate::http::admin::{compact, flush};
use crate::metrics::{
- METRIC_HTTP_REQUESTS_ELAPSED, METRIC_HTTP_REQUESTS_TOTAL, METRIC_METHOD_LABEL,
- METRIC_PATH_LABEL, METRIC_STATUS_LABEL,
+ METRIC_CODE_LABEL, METRIC_HTTP_REQUESTS_ELAPSED, METRIC_HTTP_REQUESTS_TOTAL,
+ METRIC_METHOD_LABEL, METRIC_PATH_LABEL,
};
use crate::metrics_handler::MetricsHandler;
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
@@ -710,7 +710,7 @@ pub(crate) async fn track_metrics<B>(req: Request<B>, next: Next<B>) -> impl Int
let labels = [
(METRIC_METHOD_LABEL, method.to_string()),
(METRIC_PATH_LABEL, path),
- (METRIC_STATUS_LABEL, status),
+ (METRIC_CODE_LABEL, status),
];
metrics::increment_counter!(METRIC_HTTP_REQUESTS_TOTAL, &labels);
diff --git a/src/servers/src/metrics.rs b/src/servers/src/metrics.rs
index 8e206059d824..1c857f9a0a75 100644
--- a/src/servers/src/metrics.rs
+++ b/src/servers/src/metrics.rs
@@ -80,7 +80,6 @@ pub(crate) const METRIC_GRPC_REQUESTS_TOTAL: &str = "servers.grpc_requests_total
pub(crate) const METRIC_GRPC_REQUESTS_ELAPSED: &str = "servers.grpc_requests_elapsed";
pub(crate) const METRIC_METHOD_LABEL: &str = "method";
pub(crate) const METRIC_PATH_LABEL: &str = "path";
-pub(crate) const METRIC_STATUS_LABEL: &str = "status";
pub(crate) const METRIC_JEMALLOC_RESIDENT: &str = "sys.jemalloc.resident";
pub(crate) const METRIC_JEMALLOC_ALLOCATED: &str = "sys.jemalloc.allocated";
@@ -185,7 +184,7 @@ where
let latency = start.elapsed().as_secs_f64();
let status = response.status().as_u16().to_string();
- let labels = [(METRIC_PATH_LABEL, path), (METRIC_STATUS_LABEL, status)];
+ let labels = [(METRIC_PATH_LABEL, path), (METRIC_CODE_LABEL, status)];
metrics::increment_counter!(METRIC_GRPC_REQUESTS_TOTAL, &labels);
metrics::histogram!(METRIC_GRPC_REQUESTS_ELAPSED, latency, &labels);
|
fix
|
correct grpc metric labels (#2136)
|
8491f65093ca10d52146da3c100a2ceda50ca02c
|
2023-02-14 14:03:55
|
shuiyisong
|
refactor: remove `obj_name_to_tab_ref` (#989)
| false
|
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 81ebe805cb6d..4a21874df35b 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -36,7 +36,6 @@ use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::logging::{debug, info};
use datafusion::sql::sqlparser::ast::ObjectName;
-use datafusion_common::TableReference;
use datanode::instance::sql::table_idents_to_full_name;
use datanode::instance::InstanceRef as DnInstanceRef;
use datatypes::schema::Schema;
@@ -45,7 +44,7 @@ use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_client::MetaClientOpts;
use partition::manager::PartitionRuleManager;
use partition::route::TableRoutes;
-use query::query_engine::options::QueryOptions;
+use query::query_engine::options::{validate_catalog_and_schema, QueryOptions};
use servers::error as server_error;
use servers::interceptor::{SqlQueryInterceptor, SqlQueryInterceptorRef};
use servers::promql::{PromqlHandler, PromqlHandlerRef};
@@ -558,64 +557,34 @@ pub fn check_permission(
Statement::ShowCreateTable(_) | Statement::Alter(_) => {}
Statement::Insert(insert) => {
- let (catalog, schema, _) =
- table_idents_to_full_name(insert.table_name(), query_ctx.clone())
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?;
-
- validate_param(&catalog, &schema, query_ctx)?;
+ validate_param(insert.table_name(), query_ctx)?;
}
Statement::CreateTable(stmt) => {
- let tab_ref = obj_name_to_tab_ref(&stmt.name)?;
- validate_tab_ref(tab_ref, query_ctx)?;
+ validate_param(&stmt.name, query_ctx)?;
}
Statement::DropTable(drop_stmt) => {
- let tab_ref = obj_name_to_tab_ref(drop_stmt.table_name())?;
- validate_tab_ref(tab_ref, query_ctx)?;
+ validate_param(drop_stmt.table_name(), query_ctx)?;
}
Statement::ShowTables(stmt) => {
if let Some(database) = &stmt.database {
- validate_param(&query_ctx.current_catalog(), database, query_ctx)?;
+ validate_catalog_and_schema(&query_ctx.current_catalog(), database, query_ctx)
+ .map_err(BoxedError::new)
+ .context(SqlExecInterceptedSnafu)?;
}
}
Statement::DescribeTable(stmt) => {
- let tab_ref = obj_name_to_tab_ref(stmt.name())?;
- validate_tab_ref(tab_ref, query_ctx)?;
+ validate_param(stmt.name(), query_ctx)?;
}
}
Ok(())
}
-fn obj_name_to_tab_ref(obj: &ObjectName) -> Result<TableReference> {
- match &obj.0[..] {
- [table] => Ok(TableReference::Bare {
- table: &table.value,
- }),
- [schema, table] => Ok(TableReference::Partial {
- schema: &schema.value,
- table: &table.value,
- }),
- [catalog, schema, table] => Ok(TableReference::Full {
- catalog: &catalog.value,
- schema: &schema.value,
- table: &table.value,
- }),
- _ => error::InvalidSqlSnafu {
- err_msg: format!(
- "expect table name to be <catalog>.<schema>.<table>, <schema>.<table> or <table>, actual: {obj}",
- ),
- }.fail(),
- }
-}
-
-fn validate_tab_ref(tab_ref: TableReference, query_ctx: &QueryContextRef) -> Result<()> {
- query::query_engine::options::validate_table_references(tab_ref, query_ctx)
+fn validate_param(name: &ObjectName, query_ctx: &QueryContextRef) -> Result<()> {
+ let (catalog, schema, _) = table_idents_to_full_name(name, query_ctx.clone())
.map_err(BoxedError::new)
- .context(SqlExecInterceptedSnafu)
-}
+ .context(ExternalSnafu)?;
-fn validate_param(catalog: &str, schema: &str, query_ctx: &QueryContextRef) -> Result<()> {
- query::query_engine::options::validate_catalog_and_schema(catalog, schema, query_ctx)
+ validate_catalog_and_schema(&catalog, &schema, query_ctx)
.map_err(BoxedError::new)
.context(SqlExecInterceptedSnafu)
}
diff --git a/src/query/src/query_engine/options.rs b/src/query/src/query_engine/options.rs
index 4bf10401d780..00f584ad0da7 100644
--- a/src/query/src/query_engine/options.rs
+++ b/src/query/src/query_engine/options.rs
@@ -23,6 +23,7 @@ pub struct QueryOptions {
pub disallow_cross_schema_query: bool,
}
+// TODO(shuiyisong): remove one method after #559 is done
pub fn validate_catalog_and_schema(
catalog: &str,
schema: &str,
|
refactor
|
remove `obj_name_to_tab_ref` (#989)
|
b48c851b96021db1395671e16925e40317c1a7ba
|
2023-02-23 17:56:47
|
Lei, HUANG
|
fix: support datetime type parsing (#1071)
| false
|
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index fd2486f724d9..944382daacd7 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -30,10 +30,8 @@ use std::str::FromStr;
use api::helper::ColumnDataTypeWrapper;
use common_base::bytes::Bytes;
use common_time::Timestamp;
-use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
-use datatypes::types::DateTimeType;
use datatypes::value::Value;
use snafu::{ensure, OptionExt, ResultExt};
@@ -305,25 +303,7 @@ pub fn sql_data_type_to_concrete_data_type(data_type: &SqlDataType) -> Result<Co
SqlDataType::Boolean => Ok(ConcreteDataType::boolean_datatype()),
SqlDataType::Date => Ok(ConcreteDataType::date_datatype()),
SqlDataType::Varbinary(_) => Ok(ConcreteDataType::binary_datatype()),
- SqlDataType::Custom(obj_name, _) => match &obj_name.0[..] {
- [type_name] => {
- if type_name
- .value
- .eq_ignore_ascii_case(DateTimeType::default().name())
- {
- Ok(ConcreteDataType::datetime_datatype())
- } else {
- error::SqlTypeNotSupportedSnafu {
- t: data_type.clone(),
- }
- .fail()
- }
- }
- _ => error::SqlTypeNotSupportedSnafu {
- t: data_type.clone(),
- }
- .fail(),
- },
+ SqlDataType::Datetime(_) => Ok(ConcreteDataType::datetime_datatype()),
SqlDataType::Timestamp(_, _) => Ok(ConcreteDataType::timestamp_millisecond_datatype()),
_ => error::SqlTypeNotSupportedSnafu {
t: data_type.clone(),
@@ -340,10 +320,9 @@ mod tests {
use common_time::timestamp::TimeUnit;
use datatypes::types::BooleanType;
use datatypes::value::OrderedFloat;
- use sqlparser::ast::ObjectName;
use super::*;
- use crate::ast::{Ident, TimezoneInfo};
+ use crate::ast::TimezoneInfo;
use crate::statements::ColumnOption;
fn check_type(sql_type: SqlDataType, data_type: ConcreteDataType) {
@@ -382,10 +361,6 @@ mod tests {
check_type(SqlDataType::Double, ConcreteDataType::float64_datatype());
check_type(SqlDataType::Boolean, ConcreteDataType::boolean_datatype());
check_type(SqlDataType::Date, ConcreteDataType::date_datatype());
- check_type(
- SqlDataType::Custom(ObjectName(vec![Ident::new("datetime")]), vec![]),
- ConcreteDataType::datetime_datatype(),
- );
check_type(
SqlDataType::Timestamp(None, TimezoneInfo::None),
ConcreteDataType::timestamp_millisecond_datatype(),
@@ -410,6 +385,10 @@ mod tests {
SqlDataType::UnsignedTinyInt(None),
ConcreteDataType::uint8_datatype(),
);
+ check_type(
+ SqlDataType::Datetime(None),
+ ConcreteDataType::datetime_datatype(),
+ )
}
#[test]
|
fix
|
support datetime type parsing (#1071)
|
c78043d526134b6cad19151e5ffd25aaefc778fe
|
2024-05-27 12:45:30
|
tison
|
build(deps): merge tower deps to workspace (#4036)
| false
|
diff --git a/Cargo.toml b/Cargo.toml
index f135177a14b4..a5b1f5aed9cc 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -172,6 +172,7 @@ tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
+tower = { version = "0.4" }
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
diff --git a/src/common/grpc/Cargo.toml b/src/common/grpc/Cargo.toml
index abf57978186e..a823e427a51c 100644
--- a/src/common/grpc/Cargo.toml
+++ b/src/common/grpc/Cargo.toml
@@ -25,7 +25,7 @@ prost.workspace = true
snafu.workspace = true
tokio.workspace = true
tonic.workspace = true
-tower = "0.4"
+tower.workspace = true
[dev-dependencies]
criterion = "0.4"
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 4bc0c52c7a36..2b8d5c746f60 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -68,5 +68,5 @@ datanode.workspace = true
futures = "0.3"
meta-srv = { workspace = true, features = ["mock"] }
strfmt = "0.2"
-tower = "0.4"
+tower.workspace = true
uuid.workspace = true
diff --git a/src/meta-client/Cargo.toml b/src/meta-client/Cargo.toml
index 6ea3bdb73070..c0cb0d3e89b3 100644
--- a/src/meta-client/Cargo.toml
+++ b/src/meta-client/Cargo.toml
@@ -27,6 +27,6 @@ tonic.workspace = true
datatypes.workspace = true
futures = "0.3"
meta-srv = { workspace = true, features = ["mock"] }
-tower = "0.4"
+tower.workspace = true
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index 923b704a4aec..458832b34d43 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -55,7 +55,7 @@ tokio.workspace = true
tokio-stream = { workspace = true, features = ["net"] }
toml.workspace = true
tonic.workspace = true
-tower = "0.4"
+tower.workspace = true
typetag = "0.2"
url = "2.3"
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index b6dc9c2cd508..b355539ea713 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -100,7 +100,7 @@ tokio-rustls = "0.25"
tokio-stream = { workspace = true, features = ["net"] }
tonic.workspace = true
tonic-reflection = "0.11"
-tower = { version = "0.4", features = ["full"] }
+tower = { workspace = true, features = ["full"] }
tower-http = { version = "0.4", features = ["full"] }
urlencoding = "2.1"
zstd.workspace = true
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index fe6e3e3b0600..2473cfc320ad 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -73,7 +73,7 @@ time = "0.3"
tokio.workspace = true
tokio-stream = { workspace = true, features = ["net"] }
tonic.workspace = true
-tower = "0.4"
+tower.workspace = true
uuid.workspace = true
zstd.workspace = true
|
build
|
merge tower deps to workspace (#4036)
|
301656d568d836189046a8416dde41f81e3b07bd
|
2023-02-15 13:29:00
|
shuiyisong
|
fix: rename `schema` to `db` in http param (#1008)
| false
|
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index 0a05d84ac3e1..8cf2f77275fe 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -63,13 +63,13 @@ pub async fn sql(
Json(resp.with_execution_time(start.elapsed().as_millis()))
}
-// TODO(ruihang): add db param and form data support
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
pub struct PromqlQuery {
pub query: String,
pub start: String,
pub end: String,
pub step: String,
+ pub db: Option<String>,
}
/// Handler to execute promql
@@ -82,7 +82,7 @@ pub async fn promql(
) -> Json<JsonResponse> {
let sql_handler = &state.sql_handler;
let start = Instant::now();
- let resp = match super::query_context_from_db(sql_handler.clone(), None) {
+ let resp = match super::query_context_from_db(sql_handler.clone(), params.db) {
Ok(query_ctx) => {
JsonResponse::from_output(sql_handler.do_promql_query(¶ms.query, query_ctx).await)
.await
diff --git a/src/servers/src/http/script.rs b/src/servers/src/http/script.rs
index 84bddde435b9..881a4d555165 100644
--- a/src/servers/src/http/script.rs
+++ b/src/servers/src/http/script.rs
@@ -51,7 +51,7 @@ pub async fn scripts(
RawBody(body): RawBody,
) -> Json<JsonResponse> {
if let Some(script_handler) = &state.script_handler {
- let schema = params.schema.as_ref();
+ let schema = params.db.as_ref();
if schema.is_none() || schema.unwrap().is_empty() {
json_err!("invalid schema")
@@ -83,7 +83,7 @@ pub async fn scripts(
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct ScriptQuery {
- pub schema: Option<String>,
+ pub db: Option<String>,
pub name: Option<String>,
}
@@ -95,7 +95,7 @@ pub async fn run_script(
) -> Json<JsonResponse> {
if let Some(script_handler) = &state.script_handler {
let start = Instant::now();
- let schema = params.schema.as_ref();
+ let schema = params.db.as_ref();
if schema.is_none() || schema.unwrap().is_empty() {
json_err!("invalid schema")
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index 401418908d27..3206c33abd09 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -154,14 +154,14 @@ def test(n):
fn create_script_query() -> Query<script_handler::ScriptQuery> {
Query(script_handler::ScriptQuery {
- schema: Some("test".to_string()),
+ db: Some("test".to_string()),
name: Some("test".to_string()),
})
}
fn create_invalid_script_query() -> Query<script_handler::ScriptQuery> {
Query(script_handler::ScriptQuery {
- schema: None,
+ db: None,
name: None,
})
}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 66999808bbf2..f67458e91a9a 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -317,7 +317,7 @@ pub async fn test_scripts_api(store_type: StorageType) {
let client = TestClient::new(app);
let res = client
- .post("/v1/scripts?schema=schema_test&name=test")
+ .post("/v1/scripts?db=schema_test&name=test")
.body(
r#"
@copr(sql='select number from numbers limit 10', args=['number'], returns=['n'])
@@ -335,7 +335,7 @@ def test(n):
// call script
let res = client
- .post("/v1/run-script?schema=schema_test&name=test")
+ .post("/v1/run-script?db=schema_test&name=test")
.send()
.await;
assert_eq!(res.status(), StatusCode::OK);
|
fix
|
rename `schema` to `db` in http param (#1008)
|
4151d7a8eae4d7f86bba7becdc0c98fbf2b40885
|
2023-05-11 14:24:28
|
Ning Sun
|
fix: allow cross-schema query on information_schema (#1568)
| false
|
diff --git a/src/catalog/src/table_source.rs b/src/catalog/src/table_source.rs
index bce848b78885..72a0c8fe91c7 100644
--- a/src/catalog/src/table_source.rs
+++ b/src/catalog/src/table_source.rs
@@ -62,7 +62,8 @@ impl DfTableSourceProvider {
TableReference::Bare { .. } => (),
TableReference::Partial { schema, .. } => {
ensure!(
- schema.as_ref() == self.default_schema,
+ schema.as_ref() == self.default_schema
+ || schema.as_ref() == INFORMATION_SCHEMA_NAME,
QueryAccessDeniedSnafu {
catalog: &self.default_catalog,
schema: schema.as_ref(),
@@ -74,7 +75,8 @@ impl DfTableSourceProvider {
} => {
ensure!(
catalog.as_ref() == self.default_catalog
- && schema.as_ref() == self.default_schema,
+ && (schema.as_ref() == self.default_schema
+ || schema.as_ref() == INFORMATION_SCHEMA_NAME),
QueryAccessDeniedSnafu {
catalog: catalog.as_ref(),
schema: schema.as_ref()
@@ -191,5 +193,25 @@ mod tests {
};
let result = table_provider.resolve_table_ref(table_ref);
assert!(result.is_err());
+
+ let table_ref = TableReference::Partial {
+ schema: Cow::Borrowed("information_schema"),
+ table: Cow::Borrowed("columns"),
+ };
+ assert!(table_provider.resolve_table_ref(table_ref).is_ok());
+
+ let table_ref = TableReference::Full {
+ catalog: Cow::Borrowed("greptime"),
+ schema: Cow::Borrowed("information_schema"),
+ table: Cow::Borrowed("columns"),
+ };
+ assert!(table_provider.resolve_table_ref(table_ref).is_ok());
+
+ let table_ref = TableReference::Full {
+ catalog: Cow::Borrowed("dummy"),
+ schema: Cow::Borrowed("information_schema"),
+ table: Cow::Borrowed("columns"),
+ };
+ assert!(table_provider.resolve_table_ref(table_ref).is_err());
}
}
|
fix
|
allow cross-schema query on information_schema (#1568)
|
2458b4edd5eb7bb68960b3f3566020372249f9f9
|
2023-04-03 09:32:13
|
Eugene Tolbakov
|
feat(changes): add initial implementation (#1304)
| false
|
diff --git a/src/promql/src/functions/changes.rs b/src/promql/src/functions/changes.rs
index 4ea6d74255cb..4039a95f9a04 100644
--- a/src/promql/src/functions/changes.rs
+++ b/src/promql/src/functions/changes.rs
@@ -12,5 +12,107 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//! Implementation of [`reset`](https://prometheus.io/docs/prometheus/latest/querying/functions/#changes) in PromQL. Refer to the [original
+//! Implementation of [`changes`](https://prometheus.io/docs/prometheus/latest/querying/functions/#changes) in PromQL. Refer to the [original
//! implementation](https://github.com/prometheus/prometheus/blob/main/promql/functions.go#L1023-L1040).
+
+use std::sync::Arc;
+
+use common_function_macro::range_fn;
+use datafusion::arrow::array::{Float64Array, TimestampMillisecondArray};
+use datafusion::arrow::datatypes::TimeUnit;
+use datafusion::common::DataFusionError;
+use datafusion::logical_expr::{ScalarUDF, Signature, TypeSignature, Volatility};
+use datafusion::physical_plan::ColumnarValue;
+use datatypes::arrow::array::Array;
+use datatypes::arrow::datatypes::DataType;
+
+use crate::functions::extract_array;
+use crate::range_array::RangeArray;
+
+/// used to count the number of value changes that occur within a specific time range
+#[range_fn(name = "Changes", ret = "Float64Array", display_name = "prom_changes")]
+pub fn changes(_: &TimestampMillisecondArray, values: &Float64Array) -> Option<f64> {
+ if values.is_empty() {
+ None
+ } else {
+ let (first, rest) = values.values().split_first().unwrap();
+ let mut num_changes = 0;
+ let mut prev_element = first;
+ for cur_element in rest {
+ if cur_element != prev_element && !(cur_element.is_nan() && prev_element.is_nan()) {
+ num_changes += 1;
+ }
+ prev_element = cur_element;
+ }
+ Some(num_changes as f64)
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crate::functions::test_util::simple_range_udf_runner;
+
+ // build timestamp range and value range arrays for test
+ fn build_test_range_arrays(
+ timestamps: Vec<i64>,
+ values: Vec<f64>,
+ ranges: Vec<(u32, u32)>,
+ ) -> (RangeArray, RangeArray) {
+ let ts_array = Arc::new(TimestampMillisecondArray::from_iter(
+ timestamps.into_iter().map(Some),
+ ));
+ let values_array = Arc::new(Float64Array::from_iter(values));
+
+ let ts_range_array = RangeArray::from_ranges(ts_array, ranges.clone()).unwrap();
+ let value_range_array = RangeArray::from_ranges(values_array, ranges).unwrap();
+
+ (ts_range_array, value_range_array)
+ }
+
+ #[test]
+ fn calculate_changes() {
+ let timestamps = vec![
+ 1000i64, 3000, 5000, 7000, 9000, 11000, 13000, 15000, 17000, 200000, 500000,
+ ];
+ let ranges = vec![
+ (0, 1),
+ (0, 4),
+ (0, 6),
+ (0, 10),
+ (0, 0), // empty range
+ ];
+
+ // assertion 1
+ let values_1 = vec![1.0, 2.0, 3.0, 0.0, 1.0, 0.0, 0.0, 1.0, 2.0, 0.0];
+ let (ts_array_1, value_array_1) =
+ build_test_range_arrays(timestamps.clone(), values_1, ranges.clone());
+ simple_range_udf_runner(
+ Changes::scalar_udf(),
+ ts_array_1,
+ value_array_1,
+ vec![Some(0.0), Some(3.0), Some(5.0), Some(8.0), None],
+ );
+
+ // assertion 2
+ let values_2 = vec![1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0];
+ let (ts_array_2, value_array_2) =
+ build_test_range_arrays(timestamps.clone(), values_2, ranges.clone());
+ simple_range_udf_runner(
+ Changes::scalar_udf(),
+ ts_array_2,
+ value_array_2,
+ vec![Some(0.0), Some(3.0), Some(5.0), Some(9.0), None],
+ );
+
+ // assertion 3
+ let values_3 = vec![0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0];
+ let (ts_array_3, value_array_3) = build_test_range_arrays(timestamps, values_3, ranges);
+ simple_range_udf_runner(
+ Changes::scalar_udf(),
+ ts_array_3,
+ value_array_3,
+ vec![Some(0.0), Some(0.0), Some(1.0), Some(1.0), None],
+ );
+ }
+}
|
feat
|
add initial implementation (#1304)
|
408dd55a2fb96582d000178b12d8d58a4d0e9f04
|
2025-03-07 06:11:22
|
yihong
|
fix: flaky test in sqlness by fix random port (#5657)
| false
|
diff --git a/tests/runner/src/server_mode.rs b/tests/runner/src/server_mode.rs
index 585c9be272ac..39f47a542443 100644
--- a/tests/runner/src/server_mode.rs
+++ b/tests/runner/src/server_mode.rs
@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashSet;
use std::path::Path;
+use std::sync::{Mutex, OnceLock};
use serde::Serialize;
use tinytemplate::TinyTemplate;
@@ -22,6 +24,31 @@ use crate::{util, ServerAddr};
const DEFAULT_LOG_LEVEL: &str = "--log-level=debug,hyper=warn,tower=warn,datafusion=warn,reqwest=warn,sqlparser=warn,h2=info,opendal=info";
+static USED_PORTS: OnceLock<Mutex<HashSet<u16>>> = OnceLock::new();
+
+fn get_used_ports() -> &'static Mutex<HashSet<u16>> {
+ USED_PORTS.get_or_init(|| Mutex::new(HashSet::new()))
+}
+
+fn get_unique_random_port() -> u16 {
+ // Tricky loop 100 times to find an unused port instead of infinite loop.
+ const MAX_ATTEMPTS: usize = 100;
+
+ for _ in 0..MAX_ATTEMPTS {
+ let p = util::get_random_port();
+ let mut used = get_used_ports().lock().unwrap();
+ if !used.contains(&p) {
+ used.insert(p);
+ return p;
+ }
+ }
+
+ panic!(
+ "Failed to find an unused port after {} attempts",
+ MAX_ATTEMPTS
+ );
+}
+
#[derive(Clone)]
pub enum ServerMode {
Standalone {
@@ -81,10 +108,10 @@ struct ConfigContext {
impl ServerMode {
pub fn random_standalone() -> Self {
- let http_port = util::get_random_port();
- let rpc_port = util::get_random_port();
- let mysql_port = util::get_random_port();
- let postgres_port = util::get_random_port();
+ let http_port = get_unique_random_port();
+ let rpc_port = get_unique_random_port();
+ let mysql_port = get_unique_random_port();
+ let postgres_port = get_unique_random_port();
ServerMode::Standalone {
http_addr: format!("127.0.0.1:{http_port}"),
@@ -95,10 +122,10 @@ impl ServerMode {
}
pub fn random_frontend(metasrv_port: u16) -> Self {
- let http_port = util::get_random_port();
- let rpc_port = util::get_random_port();
- let mysql_port = util::get_random_port();
- let postgres_port = util::get_random_port();
+ let http_port = get_unique_random_port();
+ let rpc_port = get_unique_random_port();
+ let mysql_port = get_unique_random_port();
+ let postgres_port = get_unique_random_port();
ServerMode::Frontend {
http_addr: format!("127.0.0.1:{http_port}"),
@@ -110,8 +137,8 @@ impl ServerMode {
}
pub fn random_metasrv() -> Self {
- let bind_port = util::get_random_port();
- let http_port = util::get_random_port();
+ let bind_port = get_unique_random_port();
+ let http_port = get_unique_random_port();
ServerMode::Metasrv {
rpc_bind_addr: format!("127.0.0.1:{bind_port}"),
@@ -121,8 +148,8 @@ impl ServerMode {
}
pub fn random_datanode(metasrv_port: u16, node_id: u32) -> Self {
- let rpc_port = util::get_random_port();
- let http_port = util::get_random_port();
+ let rpc_port = get_unique_random_port();
+ let http_port = get_unique_random_port();
ServerMode::Datanode {
rpc_bind_addr: format!("127.0.0.1:{rpc_port}"),
@@ -134,8 +161,8 @@ impl ServerMode {
}
pub fn random_flownode(metasrv_port: u16, node_id: u32) -> Self {
- let rpc_port = util::get_random_port();
- let http_port = util::get_random_port();
+ let rpc_port = get_unique_random_port();
+ let http_port = get_unique_random_port();
ServerMode::Flownode {
rpc_bind_addr: format!("127.0.0.1:{rpc_port}"),
|
fix
|
flaky test in sqlness by fix random port (#5657)
|
dc24c462dc3f5cbc9cca65c494d3b3254b525615
|
2025-03-04 00:11:27
|
Weny Xu
|
fix: prevent failover of regions to the same peer (#5632)
| false
|
diff --git a/src/meta-srv/src/region/supervisor.rs b/src/meta-srv/src/region/supervisor.rs
index c7d1c653121a..4c3725d114c6 100644
--- a/src/meta-srv/src/region/supervisor.rs
+++ b/src/meta-srv/src/region/supervisor.rs
@@ -416,6 +416,12 @@ impl RegionSupervisor {
)
.await?;
let to_peer = peers.remove(0);
+ if to_peer.id == from_peer.id {
+ warn!(
+ "Skip failover for region: {region_id}, from_peer: {from_peer}, trying to failover to the same peer."
+ );
+ return Ok(());
+ }
let task = RegionMigrationProcedureTask {
cluster_id,
region_id,
|
fix
|
prevent failover of regions to the same peer (#5632)
|
29ad16d048d83c8451982cf67a48e73bdce446ce
|
2022-11-16 09:23:25
|
SSebo
|
chore: fix typo (#524)
| false
|
diff --git a/src/common/error/src/format.rs b/src/common/error/src/format.rs
index 4113621cb094..87d8171ce0c5 100644
--- a/src/common/error/src/format.rs
+++ b/src/common/error/src/format.rs
@@ -34,7 +34,7 @@ impl<'a, E: ErrorExt + ?Sized> fmt::Debug for DebugFormat<'a, E> {
write!(f, " Caused by: {:?}", source)?;
}
if let Some(backtrace) = self.0.backtrace_opt() {
- // Add a newline to seperate causes and backtrace.
+ // Add a newline to separate causes and backtrace.
write!(f, "\nBacktrace:\n{}", backtrace)?;
}
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index 7e18b1d29af6..48045b066e98 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//! promethues protcol supportings
+//! prometheus protocol supportings
use std::cmp::Ordering;
use std::collections::{BTreeMap, HashMap};
use std::hash::{Hash, Hasher};
@@ -77,7 +77,7 @@ pub fn query_to_sql(db: &str, q: &Query) -> Result<(String, String)> {
let value = &m.value;
let m_type =
MatcherType::from_i32(m.r#type).context(error::InvalidPromRemoteRequestSnafu {
- msg: format!("invaid LabelMatcher type: {}", m.r#type),
+ msg: format!("invalid LabelMatcher type: {}", m.r#type),
})?;
match m_type {
@@ -87,11 +87,11 @@ pub fn query_to_sql(db: &str, q: &Query) -> Result<(String, String)> {
MatcherType::Neq => {
conditions.push(format!("{}!='{}'", name, value));
}
- // Case senstive regexp match
+ // Case sensitive regexp match
MatcherType::Re => {
conditions.push(format!("{}~'{}'", name, value));
}
- // Case senstive regexp not match
+ // Case sensitive regexp not match
MatcherType::Nre => {
conditions.push(format!("{}!~'{}'", name, value));
}
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index 796f9816afcd..a9103de5c44f 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -158,7 +158,7 @@ impl<'a> ParserContext<'a> {
}));
}
- // SHOW TABLES [in | FROM] [DATABSE]
+ // SHOW TABLES [in | FROM] [DATABASE]
Token::Word(w) => match w.keyword {
Keyword::IN | Keyword::FROM => {
self.parser.next_token();
|
chore
|
fix typo (#524)
|
96e12e9ee5cb1ca783d93a0a56a00b587b4e347f
|
2023-12-04 20:59:02
|
Zhenchi
|
fix: correct the previously unsuccessful decimal_ops sort result fix (#2869)
| false
|
diff --git a/tests/cases/standalone/common/types/decimal/decimal_ops.result b/tests/cases/standalone/common/types/decimal/decimal_ops.result
index 967d7545db67..3081f3811c24 100644
--- a/tests/cases/standalone/common/types/decimal/decimal_ops.result
+++ b/tests/cases/standalone/common/types/decimal/decimal_ops.result
@@ -7,7 +7,7 @@ INSERT INTO decimals VALUES ('0.1',1000), ('0.2',2000);
Affected Rows: 2
--- SQLNESS SORT 3 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM decimals;
+------+---------------------+
@@ -37,7 +37,7 @@ SELECT * FROM decimals WHERE d = '0.1'::DECIMAL(3,2);
+------+---------------------+
-- greater than equals
--- SQLNESS SORT 3 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM decimals WHERE d >= '0.1'::DECIMAL(3,2);
+------+---------------------+
@@ -409,7 +409,7 @@ INSERT INTO tmp_table VALUES (1, 1000), (2, 2000), (3, 3000);
Affected Rows: 3
--- SQLNESS SORT 3 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM tmp_table;
+---+---------------------+
@@ -420,7 +420,7 @@ SELECT * FROM tmp_table;
| 3 | 1970-01-01T00:00:03 |
+---+---------------------+
--- SQLNESS SORT 3 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM tmp_table JOIN decimals ON decimals.ts = tmp_table.ts;
+---+---------------------+------+---------------------+
diff --git a/tests/cases/standalone/common/types/decimal/decimal_ops.sql b/tests/cases/standalone/common/types/decimal/decimal_ops.sql
index e59a3cefcd80..8682aa18ff7d 100644
--- a/tests/cases/standalone/common/types/decimal/decimal_ops.sql
+++ b/tests/cases/standalone/common/types/decimal/decimal_ops.sql
@@ -4,7 +4,7 @@ CREATE TABLE decimals(d DECIMAL(3, 2), ts timestamp time index);
INSERT INTO decimals VALUES ('0.1',1000), ('0.2',2000);
--- SQLNESS SORT 3 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM decimals;
-- ORDER BY
@@ -17,7 +17,7 @@ SELECT * FROM decimals WHERE d = '0.1'::DECIMAL(3,2);
-- greater than equals
--- SQLNESS SORT 3 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM decimals WHERE d >= '0.1'::DECIMAL(3,2);
-- what about if we use different decimal scales?
@@ -196,10 +196,10 @@ CREATE TABLE tmp_table(i INTEGER, ts timestamp time index);
INSERT INTO tmp_table VALUES (1, 1000), (2, 2000), (3, 3000);
--- SQLNESS SORT 3 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM tmp_table;
--- SQLNESS SORT 3 1
+-- SQLNESS SORT_RESULT 3 1
SELECT * FROM tmp_table JOIN decimals ON decimals.ts = tmp_table.ts;
DROP TABLE decimals;
|
fix
|
correct the previously unsuccessful decimal_ops sort result fix (#2869)
|
39d52f25bf15f51539089aa8054475f37044297f
|
2023-11-03 09:26:28
|
Weny Xu
|
feat: enable metasrv leader cached kv (#2629)
| false
|
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index c9522a9d64b5..e9e2b0804a98 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -91,12 +91,20 @@ pub const REMOVED_PREFIX: &str = "__removed";
const NAME_PATTERN: &str = r"[a-zA-Z_:-][a-zA-Z0-9_:\-\.]*";
const DATANODE_TABLE_KEY_PREFIX: &str = "__dn_table";
-const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
-const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
const TABLE_REGION_KEY_PREFIX: &str = "__table_region";
-const CATALOG_NAME_KEY_PREFIX: &str = "__catalog_name";
-const SCHEMA_NAME_KEY_PREFIX: &str = "__schema_name";
-const TABLE_ROUTE_PREFIX: &str = "__table_route";
+
+pub const TABLE_INFO_KEY_PREFIX: &str = "__table_info";
+pub const TABLE_NAME_KEY_PREFIX: &str = "__table_name";
+pub const CATALOG_NAME_KEY_PREFIX: &str = "__catalog_name";
+pub const SCHEMA_NAME_KEY_PREFIX: &str = "__schema_name";
+pub const TABLE_ROUTE_PREFIX: &str = "__table_route";
+
+pub const CACHE_KEY_PREFIXES: [&str; 4] = [
+ TABLE_NAME_KEY_PREFIX,
+ CATALOG_NAME_KEY_PREFIX,
+ SCHEMA_NAME_KEY_PREFIX,
+ TABLE_ROUTE_PREFIX,
+];
pub type RegionDistribution = BTreeMap<DatanodeId, Vec<RegionNumber>>;
diff --git a/src/common/meta/src/range_stream.rs b/src/common/meta/src/range_stream.rs
index 5fb70d1b7010..4aa60b00c3a9 100644
--- a/src/common/meta/src/range_stream.rs
+++ b/src/common/meta/src/range_stream.rs
@@ -39,7 +39,16 @@ enum PaginationStreamState<K, V> {
Error,
}
-pub const DEFAULT_PAGE_SIZE: usize = 512;
+/// The Range Request's default page size.
+///
+/// It dependents on upstream KvStore server side grpc message size limitation.
+/// (e.g., etcd has default grpc message size limitation is 4MiB)
+///
+/// Generally, almost all metadata is smaller than is 2700 Byte.
+/// Therefore, We can set the [DEFAULT_PAGE_SIZE] to 1536 statically.
+///
+/// TODO(weny): Considers updating the default page size dynamically.
+pub const DEFAULT_PAGE_SIZE: usize = 1536;
struct PaginationStreamFactory {
kv: KvBackendRef,
diff --git a/src/meta-srv/src/handler/on_leader_start_handler.rs b/src/meta-srv/src/handler/on_leader_start_handler.rs
index 325bcf42a46a..9f32b443b711 100644
--- a/src/meta-srv/src/handler/on_leader_start_handler.rs
+++ b/src/meta-srv/src/handler/on_leader_start_handler.rs
@@ -35,8 +35,10 @@ impl HeartbeatHandler for OnLeaderStartHandler {
if let Some(election) = &ctx.election {
if election.in_infancy() {
ctx.is_infancy = true;
+ // TODO(weny): Unifies the multiple leader state between Context and MetaSrv.
+ // we can't ensure the in-memory kv has already been reset in the outside loop.
+ // We still use heartbeat requests to trigger resetting in-memory kv.
ctx.reset_in_memory();
- ctx.reset_leader_cached_kv_backend();
}
}
Ok(())
diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs
index 77f8f3f82145..14af05f8c0c5 100644
--- a/src/meta-srv/src/lib.rs
+++ b/src/meta-srv/src/lib.rs
@@ -14,6 +14,7 @@
#![feature(async_closure)]
#![feature(result_flattening)]
+#![feature(assert_matches)]
pub mod bootstrap;
mod cache_invalidator;
@@ -34,6 +35,7 @@ pub mod pubsub;
pub mod region;
pub mod selector;
pub mod service;
+pub mod state;
pub mod table_meta_alloc;
pub use crate::error::Result;
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index c72e2d59e779..a810a5501981 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -24,7 +24,7 @@ use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
use common_grpc::channel_manager;
use common_meta::ddl::DdlTaskExecutorRef;
use common_meta::key::TableMetadataManagerRef;
-use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
+use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBackendRef};
use common_meta::sequence::SequenceRef;
use common_procedure::options::ProcedureConfig;
use common_procedure::ProcedureManagerRef;
@@ -39,7 +39,7 @@ use tokio::sync::broadcast::error::RecvError;
use crate::cluster::MetaPeerClientRef;
use crate::election::{Election, LeaderChangeMessage};
use crate::error::{
- InitMetadataSnafu, Result, StartProcedureManagerSnafu, StartTelemetryTaskSnafu,
+ self, InitMetadataSnafu, Result, StartProcedureManagerSnafu, StartTelemetryTaskSnafu,
StopProcedureManagerSnafu,
};
use crate::handler::HeartbeatHandlerGroup;
@@ -47,6 +47,9 @@ use crate::lock::DistLockRef;
use crate::pubsub::{PublishRef, SubscribeManagerRef};
use crate::selector::{Selector, SelectorType};
use crate::service::mailbox::MailboxRef;
+use crate::service::store::cached_kv::LeaderCachedKvBackend;
+use crate::state::{become_follower, become_leader, StateRef};
+
pub const TABLE_ID_SEQ: &str = "table_id";
pub const METASRV_HOME: &str = "/tmp/metasrv";
@@ -176,10 +179,20 @@ pub struct MetaStateHandler {
procedure_manager: ProcedureManagerRef,
subscribe_manager: Option<SubscribeManagerRef>,
greptimedb_telemetry_task: Arc<GreptimeDBTelemetryTask>,
+ leader_cached_kv_backend: Arc<LeaderCachedKvBackend>,
+ state: StateRef,
}
impl MetaStateHandler {
pub async fn on_become_leader(&self) {
+ self.state.write().unwrap().next_state(become_leader(false));
+
+ if let Err(e) = self.leader_cached_kv_backend.load().await {
+ error!(e; "Failed to load kv into leader cache kv store");
+ } else {
+ self.state.write().unwrap().next_state(become_leader(true));
+ }
+
if let Err(e) = self.procedure_manager.start().await {
error!(e; "Failed to start procedure manager");
}
@@ -187,6 +200,8 @@ impl MetaStateHandler {
}
pub async fn on_become_follower(&self) {
+ self.state.write().unwrap().next_state(become_follower());
+
// Stops the procedures.
if let Err(e) = self.procedure_manager.stop().await {
error!(e; "Failed to stop procedure manager");
@@ -205,13 +220,14 @@ impl MetaStateHandler {
#[derive(Clone)]
pub struct MetaSrv {
+ state: StateRef,
started: Arc<AtomicBool>,
options: MetaSrvOptions,
// It is only valid at the leader node and is used to temporarily
// store some data that will not be persisted.
in_memory: ResettableKvBackendRef,
kv_backend: KvBackendRef,
- leader_cached_kv_backend: ResettableKvBackendRef,
+ leader_cached_kv_backend: Arc<LeaderCachedKvBackend>,
table_id_sequence: SequenceRef,
meta_peer_client: MetaPeerClientRef,
selector: SelectorRef,
@@ -254,6 +270,8 @@ impl MetaSrv {
greptimedb_telemetry_task,
subscribe_manager,
procedure_manager,
+ state: self.state.clone(),
+ leader_cached_kv_backend: leader_cached_kv_backend.clone(),
};
let _handle = common_runtime::spawn_bg(async move {
loop {
@@ -299,6 +317,11 @@ impl MetaSrv {
info!("MetaSrv stopped");
});
} else {
+ // Always load kv into cached kv store.
+ self.leader_cached_kv_backend
+ .load()
+ .await
+ .context(error::KvBackendSnafu)?;
self.procedure_manager
.start()
.await
@@ -337,10 +360,6 @@ impl MetaSrv {
&self.kv_backend
}
- pub fn leader_cached_kv_backend(&self) -> &ResettableKvBackendRef {
- &self.leader_cached_kv_backend
- }
-
pub fn meta_peer_client(&self) -> &MetaPeerClientRef {
&self.meta_peer_client
}
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index 0f2dc0f78da7..90d340466244 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use std::sync::atomic::AtomicBool;
-use std::sync::Arc;
+use std::sync::{Arc, RwLock};
use std::time::Duration;
use client::client_manager::DatanodeClients;
@@ -55,6 +55,7 @@ use crate::pubsub::PublishRef;
use crate::selector::lease_based::LeaseBasedSelector;
use crate::service::mailbox::MailboxRef;
use crate::service::store::cached_kv::{CheckLeader, LeaderCachedKvBackend};
+use crate::state::State;
use crate::table_meta_alloc::MetaSrvTableMetadataAllocator;
// TODO(fys): try use derive_builder macro
@@ -157,7 +158,18 @@ impl MetaSrvBuilder {
let kv_backend = kv_backend.unwrap_or_else(|| Arc::new(MemoryKvBackend::new()));
let in_memory = in_memory.unwrap_or_else(|| Arc::new(MemoryKvBackend::new()));
- let leader_cached_kv_backend = build_leader_cached_kv_backend(&election, &kv_backend);
+
+ let state = Arc::new(RwLock::new(match election {
+ None => State::leader(options.server_addr.to_string(), true),
+ Some(_) => State::follower(options.server_addr.to_string()),
+ }));
+
+ let leader_cached_kv_backend = Arc::new(LeaderCachedKvBackend::new(
+ state.clone(),
+ kv_backend.clone(),
+ ));
+ let kv_backend = leader_cached_kv_backend.clone() as _;
+
let meta_peer_client = meta_peer_client
.unwrap_or_else(|| build_default_meta_peer_client(&election, &in_memory));
let selector = selector.unwrap_or_else(|| Arc::new(LeaseBasedSelector));
@@ -241,6 +253,7 @@ impl MetaSrvBuilder {
let metasrv_home = options.data_home.to_string();
Ok(MetaSrv {
+ state,
started,
options,
in_memory,
@@ -267,16 +280,6 @@ impl MetaSrvBuilder {
}
}
-fn build_leader_cached_kv_backend(
- election: &Option<ElectionRef>,
- kv_backend: &KvBackendRef,
-) -> Arc<LeaderCachedKvBackend> {
- Arc::new(LeaderCachedKvBackend::new(
- Arc::new(CheckLeaderByElection(election.clone())),
- kv_backend.clone(),
- ))
-}
-
fn build_default_meta_peer_client(
election: &Option<ElectionRef>,
in_memory: &ResettableKvBackendRef,
diff --git a/src/meta-srv/src/metrics.rs b/src/meta-srv/src/metrics.rs
index 57d489399578..8c73c9a7c171 100644
--- a/src/meta-srv/src/metrics.rs
+++ b/src/meta-srv/src/metrics.rs
@@ -31,4 +31,7 @@ lazy_static! {
register_histogram_vec!("meta_handler_execute", "meta handler execute", &["name"]).unwrap();
pub static ref METRIC_META_INACTIVE_REGIONS: IntGauge =
register_int_gauge!("meta_inactive_regions", "meta inactive regions").unwrap();
+ pub static ref METRIC_META_LEADER_CACHED_KV_LOAD: HistogramVec =
+ register_histogram_vec!("meta_leader_cache_kv_load", "meta load cache", &["prefix"])
+ .unwrap();
}
diff --git a/src/meta-srv/src/service/store/cached_kv.rs b/src/meta-srv/src/service/store/cached_kv.rs
index f92bd1fb6199..450a967df112 100644
--- a/src/meta-srv/src/service/store/cached_kv.rs
+++ b/src/meta-srv/src/service/store/cached_kv.rs
@@ -15,20 +15,26 @@
use std::any::Any;
use std::collections::HashSet;
use std::sync::atomic::{AtomicUsize, Ordering};
-use std::sync::Arc;
+use std::sync::{Arc, RwLock};
use common_meta::error::{Error, Result};
+use common_meta::key::CACHE_KEY_PREFIXES;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::kv_backend::txn::{Txn, TxnOp, TxnRequest, TxnResponse};
use common_meta::kv_backend::{
KvBackend, KvBackendRef, ResettableKvBackend, ResettableKvBackendRef, TxnService,
};
+use common_meta::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE};
use common_meta::rpc::store::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
};
use common_meta::rpc::KeyValue;
+use futures::TryStreamExt;
+
+use crate::metrics;
+use crate::state::State;
pub type CheckLeaderRef = Arc<dyn CheckLeader>;
@@ -44,6 +50,12 @@ impl CheckLeader for AlwaysLeader {
}
}
+impl CheckLeader for RwLock<State> {
+ fn check(&self) -> bool {
+ self.read().unwrap().enable_leader_cache()
+ }
+}
+
/// A cache dedicated to a Leader node, in order to cache some metadata.
///
/// To use this cache, the following constraints must be followed:
@@ -79,6 +91,37 @@ impl LeaderCachedKvBackend {
Self::new(Arc::new(AlwaysLeader), store)
}
+ /// The caller MUST ensure during the loading, there are no mutation requests reaching the `LeaderCachedKvStore`.
+ pub async fn load(&self) -> Result<()> {
+ for prefix in &CACHE_KEY_PREFIXES[..] {
+ let _timer = metrics::METRIC_META_LEADER_CACHED_KV_LOAD.with_label_values(&[prefix]);
+
+ // TODO(weny): Refactors PaginationStream's output to unary output.
+ let stream = PaginationStream::new(
+ self.store.clone(),
+ RangeRequest::new().with_prefix(prefix.as_bytes()),
+ DEFAULT_PAGE_SIZE,
+ Arc::new(|kv| Ok((kv, ()))),
+ );
+
+ let kvs = stream
+ .try_collect::<Vec<_>>()
+ .await?
+ .into_iter()
+ .map(|(kv, _)| kv)
+ .collect();
+
+ self.cache
+ .batch_put(BatchPutRequest {
+ kvs,
+ prev_kv: false,
+ })
+ .await?;
+ }
+
+ Ok(())
+ }
+
#[inline]
fn is_leader(&self) -> bool {
self.check_leader.check()
@@ -141,7 +184,14 @@ impl KvBackend for LeaderCachedKvBackend {
let ver = self.get_version();
- let res = self.store.range(req.clone()).await?;
+ let res = self
+ .store
+ .range(RangeRequest {
+ // ignores `keys_only`
+ keys_only: false,
+ ..req.clone()
+ })
+ .await?;
if !res.kvs.is_empty() {
let KeyValue { key, value } = res.kvs[0].clone();
let put_req = PutRequest {
diff --git a/src/meta-srv/src/state.rs b/src/meta-srv/src/state.rs
new file mode 100644
index 000000000000..0466644c679c
--- /dev/null
+++ b/src/meta-srv/src/state.rs
@@ -0,0 +1,150 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::{Arc, RwLock};
+
+pub type StateRef = Arc<RwLock<State>>;
+
+/// State transition.
+/// ```text
+/// +------------------------------+
+/// | |
+/// | |
+/// | |
+/// +-------------------v--------------------+ |
+/// | LeaderState{enable_leader_cache:false} | |
+/// +-------------------+--------------------+ |
+/// | |
+/// | |
+/// +---------v---------+ |
+/// | Init Leader Cache | |
+/// +---------+---------+ |
+/// | |
+/// | |
+/// +-------------------v-------------------+ |
+/// | LeaderState{enable_leader_cache:true} | |
+/// +-------------------+-------------------+ |
+/// | |
+/// | |
+/// +-------v-------+ |
+/// | FollowerState | |
+/// +-------+-------+ |
+/// | |
+/// | |
+/// +------------------------------+
+///```
+#[derive(Debug, Clone)]
+pub enum State {
+ Leader(LeaderState),
+ Follower(FollowerState),
+}
+
+#[derive(Debug, Clone)]
+pub struct LeaderState {
+ // Disables the leader cache during initiation
+ pub enable_leader_cache: bool,
+
+ pub server_addr: String,
+}
+
+#[derive(Debug, Clone)]
+pub struct FollowerState {
+ pub server_addr: String,
+}
+
+impl State {
+ pub fn follower(server_addr: String) -> State {
+ Self::Follower(FollowerState { server_addr })
+ }
+
+ pub fn leader(server_addr: String, enable_leader_cache: bool) -> State {
+ Self::Leader(LeaderState {
+ enable_leader_cache,
+ server_addr,
+ })
+ }
+
+ pub fn enable_leader_cache(&self) -> bool {
+ match &self {
+ State::Leader(leader) => leader.enable_leader_cache,
+ State::Follower(_) => false,
+ }
+ }
+
+ pub fn next_state<F>(&mut self, f: F)
+ where
+ F: FnOnce(&State) -> State,
+ {
+ *self = f(self);
+ }
+}
+
+pub fn become_leader(enable_leader_cache: bool) -> impl FnOnce(&State) -> State {
+ move |prev| match prev {
+ State::Leader(leader) => State::Leader(LeaderState { ..leader.clone() }),
+ State::Follower(follower) => State::Leader(LeaderState {
+ server_addr: follower.server_addr.to_string(),
+ enable_leader_cache,
+ }),
+ }
+}
+
+pub fn become_follower() -> impl FnOnce(&State) -> State {
+ move |prev| match prev {
+ State::Leader(leader) => State::Follower(FollowerState {
+ server_addr: leader.server_addr.to_string(),
+ }),
+ State::Follower(follower) => State::Follower(FollowerState { ..follower.clone() }),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use crate::state::{become_follower, become_leader, FollowerState, LeaderState, State};
+
+ #[tokio::test]
+ async fn test_next_state() {
+ let mut state = State::follower("test".to_string());
+
+ state.next_state(become_leader(false));
+
+ assert_matches!(
+ state,
+ State::Leader(LeaderState {
+ enable_leader_cache: false,
+ ..
+ })
+ );
+
+ state.next_state(become_leader(false));
+
+ assert_matches!(
+ state,
+ State::Leader(LeaderState {
+ enable_leader_cache: false,
+ ..
+ })
+ );
+
+ state.next_state(become_follower());
+
+ assert_matches!(state, State::Follower(FollowerState { .. }));
+
+ state.next_state(become_follower());
+
+ assert_matches!(state, State::Follower(FollowerState { .. }));
+ }
+}
|
feat
|
enable metasrv leader cached kv (#2629)
|
579059d99f485f31e242f089ffccf6c88ce6520b
|
2024-12-13 18:23:11
|
Yingwen
|
ci: use 4xlarge for nightly build (#5158)
| false
|
diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml
index 09fcc5c26eba..afe01f11ec27 100644
--- a/.github/workflows/nightly-build.yml
+++ b/.github/workflows/nightly-build.yml
@@ -12,7 +12,7 @@ on:
linux_amd64_runner:
type: choice
description: The runner uses to build linux-amd64 artifacts
- default: ec2-c6i.2xlarge-amd64
+ default: ec2-c6i.4xlarge-amd64
options:
- ubuntu-20.04
- ubuntu-20.04-8-cores
@@ -27,7 +27,7 @@ on:
linux_arm64_runner:
type: choice
description: The runner uses to build linux-arm64 artifacts
- default: ec2-c6g.2xlarge-arm64
+ default: ec2-c6g.4xlarge-arm64
options:
- ec2-c6g.xlarge-arm64 # 4C8G
- ec2-c6g.2xlarge-arm64 # 8C16G
|
ci
|
use 4xlarge for nightly build (#5158)
|
2896e1f8688477d8b09836cf2d4a9e70d20b3b1f
|
2024-04-09 07:56:42
|
tison
|
refactor: pass http method to metasrv http handler (#3667)
| false
|
diff --git a/src/meta-srv/src/service/admin.rs b/src/meta-srv/src/service/admin.rs
index 7bf0d04640de..bf376b1c8068 100644
--- a/src/meta-srv/src/service/admin.rs
+++ b/src/meta-srv/src/service/admin.rs
@@ -98,13 +98,12 @@ pub fn make_admin_service(meta_srv: MetaSrv) -> Admin {
};
let router = router.route("/region-migration", handler);
- let handler = maintenance::MaintenanceHandler {
- kv_backend: meta_srv.kv_backend().clone(),
- };
- let router = router
- .route("/maintenance", handler.clone())
- .route("/maintenance/set", handler);
-
+ let router = router.route(
+ "/maintenance",
+ maintenance::MaintenanceHandler {
+ kv_backend: meta_srv.kv_backend().clone(),
+ },
+ );
let router = Router::nest("/admin", router);
Admin::new(router)
@@ -115,6 +114,7 @@ pub trait HttpHandler: Send + Sync {
async fn handle(
&self,
path: &str,
+ method: http::Method,
params: &HashMap<String, String>,
) -> crate::Result<http::Response<String>>;
}
@@ -163,7 +163,8 @@ where
})
.unwrap_or_default();
let path = req.uri().path().to_owned();
- Box::pin(async move { router.call(&path, query_params).await })
+ let method = req.method().clone();
+ Box::pin(async move { router.call(&path, method, query_params).await })
}
}
@@ -202,6 +203,7 @@ impl Router {
pub async fn call(
&self,
path: &str,
+ method: http::Method,
params: HashMap<String, String>,
) -> Result<http::Response<BoxBody>, Infallible> {
let handler = match self.handlers.get(path) {
@@ -214,7 +216,7 @@ impl Router {
}
};
- let res = match handler.handle(path, ¶ms).await {
+ let res = match handler.handle(path, method, ¶ms).await {
Ok(res) => res.map(boxed),
Err(e) => http::Response::builder()
.status(http::StatusCode::INTERNAL_SERVER_ERROR)
@@ -250,6 +252,7 @@ mod tests {
async fn handle(
&self,
_: &str,
+ _: http::Method,
_: &HashMap<String, String>,
) -> crate::Result<http::Response<String>> {
Ok(http::Response::builder()
@@ -265,6 +268,7 @@ mod tests {
async fn handle(
&self,
_: &str,
+ _: http::Method,
_: &HashMap<String, String>,
) -> crate::Result<http::Response<String>> {
error::EmptyKeySnafu {}.fail()
@@ -300,7 +304,11 @@ mod tests {
let router = Router::nest("/test_root", router);
let res = router
- .call("/test_root/test_node", HashMap::default())
+ .call(
+ "/test_root/test_node",
+ http::Method::GET,
+ HashMap::default(),
+ )
.await
.unwrap();
@@ -312,7 +320,11 @@ mod tests {
let router = Router::new();
let res = router
- .call("/test_root/test_node", HashMap::default())
+ .call(
+ "/test_root/test_node",
+ http::Method::GET,
+ HashMap::default(),
+ )
.await
.unwrap();
@@ -326,7 +338,11 @@ mod tests {
let router = Router::nest("/test_root", router);
let res = router
- .call("/test_root/test_node", HashMap::default())
+ .call(
+ "/test_root/test_node",
+ http::Method::GET,
+ HashMap::default(),
+ )
.await
.unwrap();
diff --git a/src/meta-srv/src/service/admin/health.rs b/src/meta-srv/src/service/admin/health.rs
index 6cde44dfeec7..76f79fc8adcf 100644
--- a/src/meta-srv/src/service/admin/health.rs
+++ b/src/meta-srv/src/service/admin/health.rs
@@ -25,7 +25,12 @@ pub struct HealthHandler;
#[async_trait::async_trait]
impl HttpHandler for HealthHandler {
- async fn handle(&self, _: &str, _: &HashMap<String, String>) -> Result<http::Response<String>> {
+ async fn handle(
+ &self,
+ _: &str,
+ _: http::Method,
+ _: &HashMap<String, String>,
+ ) -> Result<http::Response<String>> {
Ok(http::Response::builder()
.status(http::StatusCode::OK)
.body(HTTP_OK.to_owned())
@@ -42,7 +47,10 @@ mod tests {
let health_handler = HealthHandler {};
let path = "any";
let params = HashMap::default();
- let res = health_handler.handle(path, ¶ms).await.unwrap();
+ let res = health_handler
+ .handle(path, http::Method::GET, ¶ms)
+ .await
+ .unwrap();
assert!(res.status().is_success());
assert_eq!(HTTP_OK.to_owned(), res.body().clone());
diff --git a/src/meta-srv/src/service/admin/heartbeat.rs b/src/meta-srv/src/service/admin/heartbeat.rs
index 3b7c42d729d1..618d6ef6ef53 100644
--- a/src/meta-srv/src/service/admin/heartbeat.rs
+++ b/src/meta-srv/src/service/admin/heartbeat.rs
@@ -33,6 +33,7 @@ impl HttpHandler for HeartBeatHandler {
async fn handle(
&self,
path: &str,
+ _: http::Method,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
if path.ends_with("/help") {
diff --git a/src/meta-srv/src/service/admin/leader.rs b/src/meta-srv/src/service/admin/leader.rs
index 6a5427774d2f..207176e6af9a 100644
--- a/src/meta-srv/src/service/admin/leader.rs
+++ b/src/meta-srv/src/service/admin/leader.rs
@@ -27,7 +27,12 @@ pub struct LeaderHandler {
#[async_trait::async_trait]
impl HttpHandler for LeaderHandler {
- async fn handle(&self, _: &str, _: &HashMap<String, String>) -> Result<http::Response<String>> {
+ async fn handle(
+ &self,
+ _: &str,
+ _: http::Method,
+ _: &HashMap<String, String>,
+ ) -> Result<http::Response<String>> {
if let Some(election) = &self.election {
let leader_addr = election.leader().await?.0;
return http::Response::builder()
diff --git a/src/meta-srv/src/service/admin/maintenance.rs b/src/meta-srv/src/service/admin/maintenance.rs
index 01e62aece6ef..b207fecd9f87 100644
--- a/src/meta-srv/src/service/admin/maintenance.rs
+++ b/src/meta-srv/src/service/admin/maintenance.rs
@@ -23,6 +23,7 @@ use tonic::codegen::http::Response;
use crate::error::{
InvalidHttpBodySnafu, KvBackendSnafu, MissingRequiredParameterSnafu, ParseBoolSnafu,
+ UnsupportedSnafu,
};
use crate::service::admin::HttpHandler;
@@ -91,13 +92,17 @@ impl MaintenanceHandler {
impl HttpHandler for MaintenanceHandler {
async fn handle(
&self,
- path: &str,
+ _: &str,
+ method: http::Method,
params: &HashMap<String, String>,
) -> crate::Result<Response<String>> {
- if path.ends_with("/set") {
- self.set_maintenance(params).await
- } else {
- self.get_maintenance().await
+ match method {
+ http::Method::GET => self.get_maintenance().await,
+ http::Method::PUT => self.set_maintenance(params).await,
+ _ => UnsupportedSnafu {
+ operation: format!("http method {method}"),
+ }
+ .fail(),
}
}
}
diff --git a/src/meta-srv/src/service/admin/meta.rs b/src/meta-srv/src/service/admin/meta.rs
index d13ca93b0eec..2d3d09278f33 100644
--- a/src/meta-srv/src/service/admin/meta.rs
+++ b/src/meta-srv/src/service/admin/meta.rs
@@ -48,7 +48,12 @@ pub struct TableHandler {
#[async_trait::async_trait]
impl HttpHandler for CatalogsHandler {
- async fn handle(&self, _: &str, _: &HashMap<String, String>) -> Result<http::Response<String>> {
+ async fn handle(
+ &self,
+ _: &str,
+ _: http::Method,
+ _: &HashMap<String, String>,
+ ) -> Result<http::Response<String>> {
let stream = self
.table_metadata_manager
.catalog_manager()
@@ -69,6 +74,7 @@ impl HttpHandler for SchemasHandler {
async fn handle(
&self,
path: &str,
+ _: http::Method,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
if path.ends_with("/help") {
@@ -100,6 +106,7 @@ impl HttpHandler for TablesHandler {
async fn handle(
&self,
path: &str,
+ _: http::Method,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
if path.ends_with("/help") {
@@ -135,6 +142,7 @@ impl HttpHandler for TableHandler {
async fn handle(
&self,
path: &str,
+ _: http::Method,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
if path.ends_with("/help") {
diff --git a/src/meta-srv/src/service/admin/node_lease.rs b/src/meta-srv/src/service/admin/node_lease.rs
index 4b94674a25eb..a0f60ced56e0 100644
--- a/src/meta-srv/src/service/admin/node_lease.rs
+++ b/src/meta-srv/src/service/admin/node_lease.rs
@@ -33,6 +33,7 @@ impl HttpHandler for NodeLeaseHandler {
async fn handle(
&self,
_: &str,
+ _: http::Method,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
let cluster_id = util::extract_cluster_id(params)?;
diff --git a/src/meta-srv/src/service/admin/region_migration.rs b/src/meta-srv/src/service/admin/region_migration.rs
index f8bbba51d4ff..e07bb7adb2bc 100644
--- a/src/meta-srv/src/service/admin/region_migration.rs
+++ b/src/meta-srv/src/service/admin/region_migration.rs
@@ -178,6 +178,7 @@ impl HttpHandler for SubmitRegionMigrationTaskHandler {
async fn handle(
&self,
_: &str,
+ _: http::Method,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
let request = SubmitRegionMigrationTaskRequest::try_from(params)?;
diff --git a/src/meta-srv/src/service/admin/route.rs b/src/meta-srv/src/service/admin/route.rs
index 4339e1a56417..cbc2dbe68746 100644
--- a/src/meta-srv/src/service/admin/route.rs
+++ b/src/meta-srv/src/service/admin/route.rs
@@ -37,6 +37,7 @@ impl HttpHandler for RouteHandler {
async fn handle(
&self,
path: &str,
+ _: http::Method,
params: &HashMap<String, String>,
) -> Result<http::Response<String>> {
if path.ends_with("/help") {
|
refactor
|
pass http method to metasrv http handler (#3667)
|
6373bb04f990a926824807a70c6f7a728cc9c1c2
|
2022-11-04 11:52:31
|
LFC
|
fix: insert negative values (#383)
| false
|
diff --git a/src/datanode/src/sql/insert.rs b/src/datanode/src/sql/insert.rs
index b7fbfe39bc98..cbc6f201b7a9 100644
--- a/src/datanode/src/sql/insert.rs
+++ b/src/datanode/src/sql/insert.rs
@@ -34,7 +34,7 @@ impl SqlHandler {
stmt: Insert,
) -> Result<SqlRequest> {
let columns = stmt.columns();
- let values = stmt.values();
+ let values = stmt.values().context(ParseSqlValueSnafu)?;
//TODO(dennis): table name may be in the form of `catalog.schema.table`,
// but we don't process it right now.
let table_name = stmt.table_name();
diff --git a/src/sql/src/statements/insert.rs b/src/sql/src/statements/insert.rs
index ad9064fd0b37..5ab89c7a3f90 100644
--- a/src/sql/src/statements/insert.rs
+++ b/src/sql/src/statements/insert.rs
@@ -1,7 +1,8 @@
-use sqlparser::ast::{SetExpr, Statement, Values};
+use sqlparser::ast::{SetExpr, Statement, UnaryOperator, Values};
use sqlparser::parser::ParserError;
use crate::ast::{Expr, Value};
+use crate::error::{self, Result};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Insert {
@@ -27,34 +28,59 @@ impl Insert {
}
}
- pub fn values(&self) -> Vec<Vec<Value>> {
- match &self.inner {
+ pub fn values(&self) -> Result<Vec<Vec<Value>>> {
+ let values = match &self.inner {
Statement::Insert { source, .. } => match &source.body {
- SetExpr::Values(Values(values)) => values
- .iter()
- .map(|v| {
- v.iter()
- .map(|expr| match expr {
- Expr::Value(v) => v.clone(),
- Expr::Identifier(ident) => {
- Value::SingleQuotedString(ident.value.clone())
- }
- _ => unreachable!(),
- })
- .collect::<Vec<Value>>()
- })
- .collect(),
+ SetExpr::Values(Values(exprs)) => sql_exprs_to_values(exprs)?,
_ => unreachable!(),
},
_ => unreachable!(),
+ };
+ Ok(values)
+ }
+}
+
+fn sql_exprs_to_values(exprs: &Vec<Vec<Expr>>) -> Result<Vec<Vec<Value>>> {
+ let mut values = Vec::with_capacity(exprs.len());
+ for es in exprs.iter() {
+ let mut vs = Vec::with_capacity(es.len());
+ for expr in es.iter() {
+ vs.push(match expr {
+ Expr::Value(v) => v.clone(),
+ Expr::Identifier(ident) => Value::SingleQuotedString(ident.value.clone()),
+ Expr::UnaryOp { op, expr }
+ if matches!(op, UnaryOperator::Minus | UnaryOperator::Plus) =>
+ {
+ if let Expr::Value(Value::Number(s, b)) = &**expr {
+ match op {
+ UnaryOperator::Minus => Value::Number(format!("-{}", s), *b),
+ UnaryOperator::Plus => Value::Number(s.to_string(), *b),
+ _ => unreachable!(),
+ }
+ } else {
+ return error::ParseSqlValueSnafu {
+ msg: format!("{:?}", expr),
+ }
+ .fail();
+ }
+ }
+ _ => {
+ return error::ParseSqlValueSnafu {
+ msg: format!("{:?}", expr),
+ }
+ .fail()
+ }
+ });
}
+ values.push(vs);
}
+ Ok(values)
}
impl TryFrom<Statement> for Insert {
type Error = ParserError;
- fn try_from(value: Statement) -> Result<Self, Self::Error> {
+ fn try_from(value: Statement) -> std::result::Result<Self, Self::Error> {
match value {
Statement::Insert { .. } => Ok(Insert { inner: value }),
unexp => Err(ParserError::ParserError(format!(
@@ -78,7 +104,37 @@ mod tests {
let mut stmts = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
assert_eq!(1, stmts.len());
let insert = stmts.pop().unwrap();
- let r: Result<Statement, ParserError> = insert.try_into();
- r.unwrap();
+ let _stmt: Statement = insert.try_into().unwrap();
+ }
+
+ #[test]
+ fn test_insert_value_with_unary_op() {
+ use crate::statements::statement::Statement;
+
+ // insert "-1"
+ let sql = "INSERT INTO my_table VALUES(-1)";
+ let stmt = ParserContext::create_with_dialect(sql, &GenericDialect {})
+ .unwrap()
+ .remove(0);
+ match stmt {
+ Statement::Insert(insert) => {
+ let values = insert.values().unwrap();
+ assert_eq!(values, vec![vec![Value::Number("-1".to_string(), false)]]);
+ }
+ _ => unreachable!(),
+ }
+
+ // insert "+1"
+ let sql = "INSERT INTO my_table VALUES(+1)";
+ let stmt = ParserContext::create_with_dialect(sql, &GenericDialect {})
+ .unwrap()
+ .remove(0);
+ match stmt {
+ Statement::Insert(insert) => {
+ let values = insert.values().unwrap();
+ assert_eq!(values, vec![vec![Value::Number("1".to_string(), false)]]);
+ }
+ _ => unreachable!(),
+ }
}
}
|
fix
|
insert negative values (#383)
|
7746e5b172cd4bda19bf109ed4ddd7769bcd3e57
|
2023-08-24 12:28:05
|
JeremyHi
|
feat: dist row inserter (#2231)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index e714d0e15485..c7ee96b45fbe 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4152,7 +4152,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=c30a2607be4044502094b25c408171a666a8ff6d#c30a2607be4044502094b25c408171a666a8ff6d"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=3489b4742150abe0a769faf1bb60fbb95b061fc8#3489b4742150abe0a769faf1bb60fbb95b061fc8"
dependencies = [
"prost",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index 6e17a0fcc0a2..92b89faa6753 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -77,7 +77,7 @@ datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git
derive_builder = "0.12"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "c30a2607be4044502094b25c408171a666a8ff6d" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3489b4742150abe0a769faf1bb60fbb95b061fc8" }
itertools = "0.10"
lazy_static = "1.4"
once_cell = "1.18"
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 90228ba55987..b2c2037ae1e2 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -19,7 +19,7 @@ use api::v1::query_request::Query;
use api::v1::{
AlterExpr, AuthHeader, CompactTableExpr, CreateTableExpr, DdlRequest, DeleteRequests,
DropTableExpr, FlushTableExpr, GreptimeRequest, InsertRequests, PromRangeQuery, QueryRequest,
- RequestHeader, TruncateTableExpr,
+ RequestHeader, RowInsertRequests, TruncateTableExpr,
};
use arrow_flight::Ticket;
use async_stream::stream;
@@ -115,6 +115,11 @@ impl Database {
self.handle(Request::Inserts(requests)).await
}
+ pub async fn row_insert(&self, requests: RowInsertRequests) -> Result<u32> {
+ let _timer = timer!(metrics::METRIC_GRPC_INSERT);
+ self.handle(Request::RowInserts(requests)).await
+ }
+
pub fn streaming_inserter(&self) -> Result<StreamInserter> {
self.streaming_inserter_with_channel_size(65536)
}
diff --git a/src/client/src/stream_insert.rs b/src/client/src/stream_insert.rs
index 0701490101cf..e2bfb28760fa 100644
--- a/src/client/src/stream_insert.rs
+++ b/src/client/src/stream_insert.rs
@@ -16,6 +16,7 @@ use api::v1::greptime_database_client::GreptimeDatabaseClient;
use api::v1::greptime_request::Request;
use api::v1::{
AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest, InsertRequests, RequestHeader,
+ RowInsertRequest, RowInsertRequests,
};
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
@@ -84,6 +85,18 @@ impl StreamInserter {
})
}
+ pub async fn row_insert(&self, requests: Vec<RowInsertRequest>) -> Result<()> {
+ let inserts = RowInsertRequests { inserts: requests };
+ let request = self.to_rpc_request(Request::RowInserts(inserts));
+
+ self.sender.send(request).await.map_err(|e| {
+ error::ClientStreamingSnafu {
+ err_msg: e.to_string(),
+ }
+ .build()
+ })
+ }
+
pub async fn finish(self) -> Result<u32> {
drop(self.sender);
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index 35ddef81f4c9..6fed3bc65e40 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -14,6 +14,7 @@
pub mod deleter;
pub(crate) mod inserter;
+pub(crate) mod row_inserter;
use std::collections::HashMap;
use std::sync::Arc;
@@ -23,7 +24,7 @@ use api::v1::ddl_request::Expr as DdlExpr;
use api::v1::greptime_request::Request;
use api::v1::{
column_def, AlterExpr, CompactTableExpr, CreateDatabaseExpr, CreateTableExpr, DeleteRequests,
- FlushTableExpr, InsertRequests, TruncateTableExpr,
+ FlushTableExpr, InsertRequests, RowInsertRequests, TruncateTableExpr,
};
use async_trait::async_trait;
use catalog::{CatalogManager, DeregisterTableRequest, RegisterTableRequest};
@@ -72,6 +73,7 @@ use crate::error::{
use crate::expr_factory;
use crate::instance::distributed::deleter::DistDeleter;
use crate::instance::distributed::inserter::DistInserter;
+use crate::instance::distributed::row_inserter::RowDistInserter;
use crate::table::DistTable;
const MAX_VALUE: &str = "MAXVALUE";
@@ -624,6 +626,20 @@ impl DistInstance {
Ok(Output::AffectedRows(affected_rows as usize))
}
+ async fn handle_row_dist_insert(
+ &self,
+ requests: RowInsertRequests,
+ ctx: QueryContextRef,
+ ) -> Result<Output> {
+ let inserter = RowDistInserter::new(
+ ctx.current_catalog().to_owned(),
+ ctx.current_schema().to_owned(),
+ self.catalog_manager.clone(),
+ );
+ let affected_rows = inserter.insert(requests).await?;
+ Ok(Output::AffectedRows(affected_rows as usize))
+ }
+
async fn handle_dist_delete(
&self,
request: DeleteRequests,
@@ -664,8 +680,9 @@ impl GrpcQueryHandler for DistInstance {
async fn do_query(&self, request: Request, ctx: QueryContextRef) -> Result<Output> {
match request {
Request::Inserts(requests) => self.handle_dist_insert(requests, ctx).await,
- Request::RowInserts(_) | Request::RowDeletes(_) => NotSupportedSnafu {
- feat: "row inserts/deletes",
+ Request::RowInserts(requests) => self.handle_row_dist_insert(requests, ctx).await,
+ Request::RowDeletes(_) => NotSupportedSnafu {
+ feat: "row deletes",
}
.fail(),
Request::Deletes(requests) => self.handle_dist_delete(requests, ctx).await,
diff --git a/src/frontend/src/instance/distributed/row_inserter.rs b/src/frontend/src/instance/distributed/row_inserter.rs
new file mode 100644
index 000000000000..4eabb21de96f
--- /dev/null
+++ b/src/frontend/src/instance/distributed/row_inserter.rs
@@ -0,0 +1,125 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::sync::Arc;
+
+use api::v1::RowInsertRequests;
+use catalog::CatalogManager;
+use client::Database;
+use common_meta::peer::Peer;
+use futures_util::future;
+use metrics::counter;
+use snafu::{OptionExt, ResultExt};
+use table::metadata::TableId;
+
+use crate::catalog::FrontendCatalogManager;
+use crate::error::{
+ CatalogSnafu, FindDatanodeSnafu, FindTableRouteSnafu, JoinTaskSnafu, RequestDatanodeSnafu,
+ Result, SplitInsertSnafu, TableNotFoundSnafu,
+};
+
+pub struct RowDistInserter {
+ catalog_name: String,
+ schema_name: String,
+ catalog_manager: Arc<FrontendCatalogManager>,
+}
+
+impl RowDistInserter {
+ pub fn new(
+ catalog_name: String,
+ schema_name: String,
+ catalog_manager: Arc<FrontendCatalogManager>,
+ ) -> Self {
+ Self {
+ catalog_name,
+ schema_name,
+ catalog_manager,
+ }
+ }
+
+ pub(crate) async fn insert(&self, requests: RowInsertRequests) -> Result<u32> {
+ let requests = self.split(requests).await?;
+ let results = future::try_join_all(requests.into_iter().map(|(peer, inserts)| {
+ let datanode_clients = self.catalog_manager.datanode_clients();
+ let catalog = self.catalog_name.clone();
+ let schema = self.schema_name.clone();
+
+ common_runtime::spawn_write(async move {
+ let client = datanode_clients.get_client(&peer).await;
+ let database = Database::new(catalog, schema, client);
+ database
+ .row_insert(inserts)
+ .await
+ .context(RequestDatanodeSnafu)
+ })
+ }))
+ .await
+ .context(JoinTaskSnafu)?;
+
+ let affected_rows = results.into_iter().sum::<Result<u32>>()?;
+ counter!(crate::metrics::DIST_INGEST_ROW_COUNT, affected_rows as u64);
+ Ok(affected_rows)
+ }
+
+ async fn split(&self, requests: RowInsertRequests) -> Result<HashMap<Peer, RowInsertRequests>> {
+ let partition_manager = self.catalog_manager.partition_manager();
+ let mut inserts: HashMap<Peer, RowInsertRequests> = HashMap::new();
+
+ for req in requests.inserts {
+ let table_name = req.table_name.clone();
+ let table_id = self.get_table_id(table_name.as_str()).await?;
+
+ let req_splits = partition_manager
+ .split_row_insert_request(table_id, req)
+ .await
+ .context(SplitInsertSnafu)?;
+ let table_route = partition_manager
+ .find_table_route(table_id)
+ .await
+ .context(FindTableRouteSnafu { table_name })?;
+
+ for (region_number, insert) in req_splits {
+ let peer =
+ table_route
+ .find_region_leader(region_number)
+ .context(FindDatanodeSnafu {
+ region: region_number,
+ })?;
+ inserts
+ .entry(peer.clone())
+ .or_default()
+ .inserts
+ .push(insert);
+ }
+ }
+
+ Ok(inserts)
+ }
+
+ async fn get_table_id(&self, table_name: &str) -> Result<TableId> {
+ self.catalog_manager
+ .table(&self.catalog_name, &self.schema_name, table_name)
+ .await
+ .context(CatalogSnafu)?
+ .with_context(|| TableNotFoundSnafu {
+ table_name: common_catalog::format_full_table_name(
+ &self.catalog_name,
+ &self.schema_name,
+ table_name,
+ ),
+ })
+ .map(|table| table.table_info().table_id())
+ }
+}
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
index ed2088ae9642..36276b71e2e8 100644
--- a/src/frontend/src/instance/influxdb.rs
+++ b/src/frontend/src/instance/influxdb.rs
@@ -38,7 +38,7 @@ impl InfluxdbLineProtocolHandler for Instance {
let requests = request.try_into()?;
let _ = self
- .handle_inserts(requests, ctx)
+ .handle_row_inserts(requests, ctx)
.await
.map_err(BoxedError::new)
.context(servers::error::ExecuteGrpcQuerySnafu)?;
diff --git a/src/partition/src/lib.rs b/src/partition/src/lib.rs
index 9dba350441b8..dd05b7f6d812 100644
--- a/src/partition/src/lib.rs
+++ b/src/partition/src/lib.rs
@@ -21,6 +21,7 @@ pub mod metrics;
pub mod partition;
pub mod range;
pub mod route;
+pub mod row_splitter;
pub mod splitter;
pub use crate::partition::{PartitionRule, PartitionRuleRef};
diff --git a/src/partition/src/manager.rs b/src/partition/src/manager.rs
index e6f3021d5f6a..36353b13f334 100644
--- a/src/partition/src/manager.rs
+++ b/src/partition/src/manager.rs
@@ -15,6 +15,7 @@
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
+use api::v1::RowInsertRequest;
use common_meta::peer::Peer;
use common_meta::rpc::router::TableRoute;
use common_query::prelude::Expr;
@@ -31,6 +32,7 @@ use crate::error::{FindLeaderSnafu, Result};
use crate::partition::{PartitionBound, PartitionDef, PartitionExpr};
use crate::range::RangePartitionRule;
use crate::route::TableRoutes;
+use crate::row_splitter::{RowInsertRequestSplits, RowSplitter};
use crate::splitter::{DeleteRequestSplit, InsertRequestSplit, WriteSplitter};
use crate::{error, PartitionRuleRef};
@@ -247,6 +249,17 @@ impl PartitionRuleManager {
splitter.split_insert(req, schema)
}
+ /// Split [RowInsertRequest] into [RowInsertRequestSplits] according to the partition rule
+ /// of given table.
+ pub async fn split_row_insert_request(
+ &self,
+ table: TableId,
+ req: RowInsertRequest,
+ ) -> Result<RowInsertRequestSplits> {
+ let partition_rule = self.find_table_partition_rule(table).await?;
+ RowSplitter::new(partition_rule).split(req)
+ }
+
pub async fn split_delete_request(
&self,
table: TableId,
diff --git a/src/partition/src/row_splitter.rs b/src/partition/src/row_splitter.rs
new file mode 100644
index 000000000000..e3f0dda83b55
--- /dev/null
+++ b/src/partition/src/row_splitter.rs
@@ -0,0 +1,322 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+
+use api::helper;
+use api::v1::{ColumnSchema, Row, RowInsertRequest, Rows};
+use datatypes::value::Value;
+use store_api::storage::RegionNumber;
+
+use crate::error::Result;
+use crate::PartitionRuleRef;
+
+pub type RowInsertRequestSplits = HashMap<RegionNumber, RowInsertRequest>;
+
+pub struct RowSplitter {
+ partition_rule: PartitionRuleRef,
+}
+
+impl RowSplitter {
+ pub fn new(partition_rule: PartitionRuleRef) -> Self {
+ Self { partition_rule }
+ }
+
+ pub fn split(&self, req: RowInsertRequest) -> Result<RowInsertRequestSplits> {
+ // No partition
+ let partition_columns = self.partition_rule.partition_columns();
+ if partition_columns.is_empty() {
+ return Ok(HashMap::from([(0, req)]));
+ }
+
+ // No data
+ let Some(rows) = req.rows else {
+ return Ok(HashMap::new());
+ };
+
+ SplitReadRowHelper::new(req.table_name, rows, &self.partition_rule).split_to_requests()
+ }
+}
+
+struct SplitReadRowHelper<'a> {
+ table_name: String,
+ schema: Vec<ColumnSchema>,
+ rows: Vec<Row>,
+ partition_rule: &'a PartitionRuleRef,
+ // Map from partition column name to index in the schema/row.
+ partition_cols_indexes: Vec<Option<usize>>,
+}
+
+impl<'a> SplitReadRowHelper<'a> {
+ fn new(table_name: String, rows: Rows, partition_rule: &'a PartitionRuleRef) -> Self {
+ let col_name_to_idx = rows
+ .schema
+ .iter()
+ .enumerate()
+ .map(|(idx, col)| (&col.column_name, idx))
+ .collect::<HashMap<_, _>>();
+ let partition_cols = partition_rule.partition_columns();
+ let partition_cols_indexes = partition_cols
+ .into_iter()
+ .map(|col_name| col_name_to_idx.get(&col_name).cloned())
+ .collect::<Vec<_>>();
+
+ Self {
+ table_name,
+ schema: rows.schema,
+ rows: rows.rows,
+ partition_rule,
+ partition_cols_indexes,
+ }
+ }
+
+ fn split_to_requests(mut self) -> Result<RowInsertRequestSplits> {
+ let request_splits = self
+ .split_to_regions()?
+ .into_iter()
+ .map(|(region_number, row_indexes)| {
+ let rows = row_indexes
+ .into_iter()
+ .map(|row_idx| std::mem::take(&mut self.rows[row_idx]))
+ .collect();
+ let req = RowInsertRequest {
+ table_name: self.table_name.clone(),
+ rows: Some(Rows {
+ schema: self.schema.clone(),
+ rows,
+ }),
+ region_number,
+ };
+ (region_number, req)
+ })
+ .collect::<HashMap<_, _>>();
+
+ Ok(request_splits)
+ }
+
+ fn split_to_regions(&self) -> Result<HashMap<RegionNumber, Vec<usize>>> {
+ let mut regions_row_indexes: HashMap<RegionNumber, Vec<usize>> = HashMap::new();
+ for (row_idx, values) in self.iter_partition_values().enumerate() {
+ let region_number = self.partition_rule.find_region(&values)?;
+ regions_row_indexes
+ .entry(region_number)
+ .or_default()
+ .push(row_idx);
+ }
+
+ Ok(regions_row_indexes)
+ }
+
+ fn iter_partition_values(&'a self) -> impl Iterator<Item = Vec<Value>> + 'a {
+ self.rows.iter().map(|row| {
+ self.partition_cols_indexes
+ .iter()
+ .map(|idx| {
+ idx.as_ref().map_or(Value::Null, |idx| {
+ helper::pb_value_to_value_ref(&row.values[*idx]).into()
+ })
+ })
+ .collect()
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::any::Any;
+ use std::sync::Arc;
+
+ use api::v1::value::ValueData;
+ use api::v1::{ColumnDataType, SemanticType};
+ use serde::{Deserialize, Serialize};
+
+ use super::*;
+ use crate::partition::PartitionExpr;
+ use crate::PartitionRule;
+
+ fn mock_insert_request() -> RowInsertRequest {
+ let schema = vec![
+ ColumnSchema {
+ column_name: "id".to_string(),
+ datatype: ColumnDataType::String as i32,
+ semantic_type: SemanticType::Tag as i32,
+ },
+ ColumnSchema {
+ column_name: "name".to_string(),
+ datatype: ColumnDataType::String as i32,
+ semantic_type: SemanticType::Tag as i32,
+ },
+ ColumnSchema {
+ column_name: "age".to_string(),
+ datatype: ColumnDataType::Uint32 as i32,
+ semantic_type: SemanticType::Field as i32,
+ },
+ ];
+ let rows = vec![
+ Row {
+ values: vec![
+ ValueData::StringValue("1".to_string()).into(),
+ ValueData::StringValue("Smith".to_string()).into(),
+ ValueData::U32Value(20).into(),
+ ],
+ },
+ Row {
+ values: vec![
+ ValueData::StringValue("2".to_string()).into(),
+ ValueData::StringValue("Johnson".to_string()).into(),
+ ValueData::U32Value(21).into(),
+ ],
+ },
+ Row {
+ values: vec![
+ ValueData::StringValue("3".to_string()).into(),
+ ValueData::StringValue("Williams".to_string()).into(),
+ ValueData::U32Value(22).into(),
+ ],
+ },
+ ];
+ RowInsertRequest {
+ table_name: "t".to_string(),
+ rows: Some(Rows { schema, rows }),
+ region_number: 0,
+ }
+ }
+
+ #[derive(Debug, Serialize, Deserialize)]
+ struct MockPartitionRule;
+
+ impl PartitionRule for MockPartitionRule {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn partition_columns(&self) -> Vec<String> {
+ vec!["id".to_string()]
+ }
+
+ fn find_region(&self, values: &[Value]) -> Result<RegionNumber> {
+ let val = values.get(0).unwrap().clone();
+ let val = match val {
+ Value::String(v) => v.as_utf8().to_string(),
+ _ => unreachable!(),
+ };
+
+ Ok(val.parse::<u32>().unwrap() % 2)
+ }
+
+ fn find_regions_by_exprs(&self, _: &[PartitionExpr]) -> Result<Vec<RegionNumber>> {
+ unimplemented!()
+ }
+ }
+
+ #[derive(Debug, Serialize, Deserialize)]
+ struct MockMissedColPartitionRule;
+
+ impl PartitionRule for MockMissedColPartitionRule {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn partition_columns(&self) -> Vec<String> {
+ vec!["missed_col".to_string()]
+ }
+
+ fn find_region(&self, values: &[Value]) -> Result<RegionNumber> {
+ let val = values.get(0).unwrap().clone();
+ let val = match val {
+ Value::Null => 1,
+ _ => 0,
+ };
+
+ Ok(val)
+ }
+
+ fn find_regions_by_exprs(&self, _: &[PartitionExpr]) -> Result<Vec<RegionNumber>> {
+ unimplemented!()
+ }
+ }
+
+ #[derive(Debug, Serialize, Deserialize)]
+ struct EmptyPartitionRule;
+
+ impl PartitionRule for EmptyPartitionRule {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn partition_columns(&self) -> Vec<String> {
+ vec![]
+ }
+
+ fn find_region(&self, _values: &[Value]) -> Result<RegionNumber> {
+ Ok(0)
+ }
+
+ fn find_regions_by_exprs(&self, _: &[PartitionExpr]) -> Result<Vec<RegionNumber>> {
+ unimplemented!()
+ }
+ }
+
+ #[test]
+ fn test_writer_splitter() {
+ let insert_request = mock_insert_request();
+ let rule = Arc::new(MockPartitionRule) as PartitionRuleRef;
+ let splitter = RowSplitter::new(rule);
+ let splits = splitter.split(insert_request).unwrap();
+
+ assert_eq!(splits.len(), 2);
+
+ let req0 = &splits[&0];
+ let req1 = &splits[&1];
+ assert_eq!(req0.region_number, 0);
+ assert_eq!(req1.region_number, 1);
+
+ let rows0 = req0.rows.as_ref().unwrap();
+ let rows1 = req1.rows.as_ref().unwrap();
+ assert_eq!(rows0.rows.len(), 1);
+ assert_eq!(rows1.rows.len(), 2);
+ }
+
+ #[test]
+ fn test_missed_col_writer_splitter() {
+ let insert_request = mock_insert_request();
+ let rule = Arc::new(MockMissedColPartitionRule) as PartitionRuleRef;
+ let splitter = RowSplitter::new(rule);
+ let splits = splitter.split(insert_request).unwrap();
+
+ assert_eq!(splits.len(), 1);
+
+ let req = &splits[&1];
+ assert_eq!(req.region_number, 1);
+
+ let rows = req.rows.as_ref().unwrap();
+ assert_eq!(rows.rows.len(), 3);
+ }
+
+ #[test]
+ fn test_empty_partition_rule_writer_splitter() {
+ let insert_request = mock_insert_request();
+ let rule = Arc::new(EmptyPartitionRule) as PartitionRuleRef;
+ let splitter = RowSplitter::new(rule);
+ let splits = splitter.split(insert_request).unwrap();
+
+ assert_eq!(splits.len(), 1);
+
+ let req = &splits[&0];
+ assert_eq!(req.region_number, 0);
+
+ let rows = req.rows.as_ref().unwrap();
+ assert_eq!(rows.rows.len(), 3);
+ }
+}
diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs
index 82dd83239354..1ad648d83f70 100644
--- a/src/servers/src/influxdb.rs
+++ b/src/servers/src/influxdb.rs
@@ -234,7 +234,7 @@ fn parse_tags<'a>(
datatype: ColumnDataType::String as i32,
semantic_type: SemanticType::Tag as i32,
});
- one_row.push(to_value(ValueData::StringValue(v.to_string())));
+ one_row.push(ValueData::StringValue(v.to_string()).into());
} else {
check_schema(ColumnDataType::String, SemanticType::Tag, &schema[*index])?;
one_row[*index].value_data = Some(ValueData::StringValue(v.to_string()));
@@ -269,7 +269,7 @@ fn parse_fields<'a>(
datatype: datatype as i32,
semantic_type: SemanticType::Field as i32,
});
- one_row.push(to_value(value));
+ one_row.push(value.into());
} else {
check_schema(datatype, SemanticType::Field, &schema[*index])?;
one_row[*index].value_data = Some(value);
@@ -309,7 +309,7 @@ fn parse_ts(
datatype: ColumnDataType::TimestampMillisecond as i32,
semantic_type: SemanticType::Timestamp as i32,
});
- one_row.push(to_value(ValueData::TsMillisecondValue(ts)))
+ one_row.push(ValueData::TsMillisecondValue(ts).into())
} else {
check_schema(
ColumnDataType::TimestampMillisecond,
@@ -351,14 +351,6 @@ fn check_schema(
Ok(())
}
-// TODO(jeremy): impl From<ValueData> for Value
-#[inline]
-fn to_value(value: ValueData) -> Value {
- Value {
- value_data: Some(value),
- }
-}
-
#[inline]
fn unwrap_or_default_precision(precision: Option<Precision>) -> Precision {
if let Some(val) = precision {
|
feat
|
dist row inserter (#2231)
|
c049ce6ab1ae26b61bb7ef0a029f119a6d2d63d6
|
2024-11-29 16:48:02
|
Ruihang Xia
|
feat: add decolorize processor (#5065)
| false
|
diff --git a/src/pipeline/src/etl/processor.rs b/src/pipeline/src/etl/processor.rs
index 110e3e8736fb..6b00b19793ef 100644
--- a/src/pipeline/src/etl/processor.rs
+++ b/src/pipeline/src/etl/processor.rs
@@ -15,6 +15,7 @@
pub mod cmcd;
pub mod csv;
pub mod date;
+pub mod decolorize;
pub mod dissect;
pub mod epoch;
pub mod gsub;
@@ -29,6 +30,7 @@ use ahash::{HashSet, HashSetExt};
use cmcd::{CmcdProcessor, CmcdProcessorBuilder};
use csv::{CsvProcessor, CsvProcessorBuilder};
use date::{DateProcessor, DateProcessorBuilder};
+use decolorize::{DecolorizeProcessor, DecolorizeProcessorBuilder};
use dissect::{DissectProcessor, DissectProcessorBuilder};
use enum_dispatch::enum_dispatch;
use epoch::{EpochProcessor, EpochProcessorBuilder};
@@ -61,11 +63,6 @@ const TARGET_FIELDS_NAME: &str = "target_fields";
const JSON_PATH_NAME: &str = "json_path";
const JSON_PATH_RESULT_INDEX_NAME: &str = "result_index";
-// const IF_NAME: &str = "if";
-// const IGNORE_FAILURE_NAME: &str = "ignore_failure";
-// const ON_FAILURE_NAME: &str = "on_failure";
-// const TAG_NAME: &str = "tag";
-
/// Processor trait defines the interface for all processors.
///
/// A processor is a transformation that can be applied to a field in a document
@@ -99,6 +96,7 @@ pub enum ProcessorKind {
Epoch(EpochProcessor),
Date(DateProcessor),
JsonPath(JsonPathProcessor),
+ Decolorize(DecolorizeProcessor),
}
/// ProcessorBuilder trait defines the interface for all processor builders
@@ -128,6 +126,7 @@ pub enum ProcessorBuilders {
Epoch(EpochProcessorBuilder),
Date(DateProcessorBuilder),
JsonPath(JsonPathProcessorBuilder),
+ Decolorize(DecolorizeProcessorBuilder),
}
#[derive(Debug, Default)]
@@ -275,6 +274,9 @@ fn parse_processor(doc: &yaml_rust::Yaml) -> Result<ProcessorBuilders> {
json_path::PROCESSOR_JSON_PATH => {
ProcessorBuilders::JsonPath(json_path::JsonPathProcessorBuilder::try_from(value)?)
}
+ decolorize::PROCESSOR_DECOLORIZE => {
+ ProcessorBuilders::Decolorize(DecolorizeProcessorBuilder::try_from(value)?)
+ }
_ => return UnsupportedProcessorSnafu { processor: str_key }.fail(),
};
diff --git a/src/pipeline/src/etl/processor/cmcd.rs b/src/pipeline/src/etl/processor/cmcd.rs
index 06cfeb7c6905..086fe8f3d610 100644
--- a/src/pipeline/src/etl/processor/cmcd.rs
+++ b/src/pipeline/src/etl/processor/cmcd.rs
@@ -12,6 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//! Pipeline Processor for CMCD (Common Media Client Data) data.
+//!
+//! Refer to [`CmcdProcessor`] for more information.
+
use std::collections::BTreeMap;
use ahash::HashSet;
diff --git a/src/pipeline/src/etl/processor/decolorize.rs b/src/pipeline/src/etl/processor/decolorize.rs
new file mode 100644
index 000000000000..e72bc28a1e66
--- /dev/null
+++ b/src/pipeline/src/etl/processor/decolorize.rs
@@ -0,0 +1,195 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Removes ANSI color control codes from the input text.
+//!
+//! Similar to [`decolorize`](https://grafana.com/docs/loki/latest/query/log_queries/#removing-color-codes)
+//! from Grafana Loki and [`strip_ansi_escape_codes`](https://vector.dev/docs/reference/vrl/functions/#strip_ansi_escape_codes)
+//! from Vector VRL.
+
+use ahash::HashSet;
+use once_cell::sync::Lazy;
+use regex::Regex;
+use snafu::OptionExt;
+
+use crate::etl::error::{
+ Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
+};
+use crate::etl::field::{Fields, OneInputOneOutputField};
+use crate::etl::processor::{
+ yaml_bool, yaml_new_field, yaml_new_fields, ProcessorBuilder, ProcessorKind, FIELDS_NAME,
+ FIELD_NAME, IGNORE_MISSING_NAME,
+};
+use crate::etl::value::Value;
+
+pub(crate) const PROCESSOR_DECOLORIZE: &str = "decolorize";
+
+static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"\x1b\[[0-9;]*m").unwrap());
+
+#[derive(Debug, Default)]
+pub struct DecolorizeProcessorBuilder {
+ fields: Fields,
+ ignore_missing: bool,
+}
+
+impl ProcessorBuilder for DecolorizeProcessorBuilder {
+ fn output_keys(&self) -> HashSet<&str> {
+ self.fields
+ .iter()
+ .map(|f| f.target_or_input_field())
+ .collect()
+ }
+
+ fn input_keys(&self) -> HashSet<&str> {
+ self.fields.iter().map(|f| f.input_field()).collect()
+ }
+
+ fn build(self, intermediate_keys: &[String]) -> Result<ProcessorKind> {
+ self.build(intermediate_keys).map(ProcessorKind::Decolorize)
+ }
+}
+
+impl DecolorizeProcessorBuilder {
+ fn build(self, intermediate_keys: &[String]) -> Result<DecolorizeProcessor> {
+ let mut real_fields = vec![];
+ for field in self.fields.into_iter() {
+ let input = OneInputOneOutputField::build(
+ "decolorize",
+ intermediate_keys,
+ field.input_field(),
+ field.target_or_input_field(),
+ )?;
+ real_fields.push(input);
+ }
+ Ok(DecolorizeProcessor {
+ fields: real_fields,
+ ignore_missing: self.ignore_missing,
+ })
+ }
+}
+
+/// Remove ANSI color control codes from the input text.
+#[derive(Debug, Default)]
+pub struct DecolorizeProcessor {
+ fields: Vec<OneInputOneOutputField>,
+ ignore_missing: bool,
+}
+
+impl DecolorizeProcessor {
+ fn process_string(&self, val: &str) -> Result<Value> {
+ Ok(Value::String(RE.replace_all(val, "").into_owned()))
+ }
+
+ fn process(&self, val: &Value) -> Result<Value> {
+ match val {
+ Value::String(val) => self.process_string(val),
+ _ => ProcessorExpectStringSnafu {
+ processor: PROCESSOR_DECOLORIZE,
+ v: val.clone(),
+ }
+ .fail(),
+ }
+ }
+}
+
+impl TryFrom<&yaml_rust::yaml::Hash> for DecolorizeProcessorBuilder {
+ type Error = Error;
+
+ fn try_from(value: &yaml_rust::yaml::Hash) -> Result<Self> {
+ let mut fields = Fields::default();
+ let mut ignore_missing = false;
+
+ for (k, v) in value.iter() {
+ let key = k
+ .as_str()
+ .with_context(|| KeyMustBeStringSnafu { k: k.clone() })?;
+
+ match key {
+ FIELD_NAME => {
+ fields = Fields::one(yaml_new_field(v, FIELD_NAME)?);
+ }
+ FIELDS_NAME => {
+ fields = yaml_new_fields(v, FIELDS_NAME)?;
+ }
+ IGNORE_MISSING_NAME => {
+ ignore_missing = yaml_bool(v, IGNORE_MISSING_NAME)?;
+ }
+ _ => {}
+ }
+ }
+
+ Ok(DecolorizeProcessorBuilder {
+ fields,
+ ignore_missing,
+ })
+ }
+}
+
+impl crate::etl::processor::Processor for DecolorizeProcessor {
+ fn kind(&self) -> &str {
+ PROCESSOR_DECOLORIZE
+ }
+
+ fn ignore_missing(&self) -> bool {
+ self.ignore_missing
+ }
+
+ fn exec_mut(&self, val: &mut Vec<Value>) -> Result<()> {
+ for field in self.fields.iter() {
+ let index = field.input_index();
+ match val.get(index) {
+ Some(Value::Null) | None => {
+ if !self.ignore_missing {
+ return ProcessorMissingFieldSnafu {
+ processor: self.kind(),
+ field: field.input_name(),
+ }
+ .fail();
+ }
+ }
+ Some(v) => {
+ let result = self.process(v)?;
+ let output_index = field.output_index();
+ val[output_index] = result;
+ }
+ }
+ }
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_decolorize_processor() {
+ let processor = DecolorizeProcessor {
+ fields: vec![],
+ ignore_missing: false,
+ };
+
+ let val = Value::String("\x1b[32mGreen\x1b[0m".to_string());
+ let result = processor.process(&val).unwrap();
+ assert_eq!(result, Value::String("Green".to_string()));
+
+ let val = Value::String("Plain text".to_string());
+ let result = processor.process(&val).unwrap();
+ assert_eq!(result, Value::String("Plain text".to_string()));
+
+ let val = Value::String("\x1b[46mfoo\x1b[0m bar".to_string());
+ let result = processor.process(&val).unwrap();
+ assert_eq!(result, Value::String("foo bar".to_string()));
+ }
+}
diff --git a/src/pipeline/src/etl/processor/dissect.rs b/src/pipeline/src/etl/processor/dissect.rs
index 09c6fc93d069..a9ccf5e8735e 100644
--- a/src/pipeline/src/etl/processor/dissect.rs
+++ b/src/pipeline/src/etl/processor/dissect.rs
@@ -644,7 +644,6 @@ impl DissectProcessor {
let mut pos = 0;
let mut appends: HashMap<usize, Vec<(String, u32)>> = HashMap::new();
- // let mut maps: HashMap<usize, (String,String)> = HashMap::new();
let mut process_name_value = |name: &Name, value: String| {
let name_index = name.index;
@@ -658,22 +657,6 @@ impl DissectProcessor {
.or_default()
.push((value, order.unwrap_or_default()));
}
- // Some(StartModifier::MapKey) => match maps.get(&name_index) {
- // Some(map_val) => {
- // map.insert(value, Value::String(map_val.to_string()));
- // }
- // None => {
- // maps.insert(name_index, value);
- // }
- // },
- // Some(StartModifier::MapVal) => match maps.get(&name_index) {
- // Some(map_key) => {
- // map.insert(map_key, Value::String(value));
- // }
- // None => {
- // maps.insert(name_index, value);
- // }
- // },
Some(_) => {
// do nothing, ignore MapKey and MapVal
// because transform can know the key name
diff --git a/src/pipeline/src/etl/processor/gsub.rs b/src/pipeline/src/etl/processor/gsub.rs
index b5a328c6fa00..54c8306ec4de 100644
--- a/src/pipeline/src/etl/processor/gsub.rs
+++ b/src/pipeline/src/etl/processor/gsub.rs
@@ -132,10 +132,6 @@ impl GsubProcessor {
v: val.clone(),
}
.fail(),
- // Err(format!(
- // "{} processor: expect string or array string, but got {val:?}",
- // self.kind()
- // )),
}
}
}
diff --git a/src/pipeline/tests/pipeline.rs b/src/pipeline/tests/pipeline.rs
index d1df83273185..e68c7b9e6a6e 100644
--- a/src/pipeline/tests/pipeline.rs
+++ b/src/pipeline/tests/pipeline.rs
@@ -674,3 +674,36 @@ transform:
assert_eq!(expected, r);
}
+
+#[test]
+fn test_decolorize() {
+ let input_value = serde_json::json!({
+ "message": "\u{001b}[32mSuccess\u{001b}[0m and \u{001b}[31mError\u{001b}[0m"
+ });
+
+ let pipeline_yaml = r#"
+processors:
+ - decolorize:
+ fields:
+ - message
+transform:
+ - fields:
+ - message
+ type: string
+"#;
+ let yaml_content = Content::Yaml(pipeline_yaml.into());
+ let pipeline: Pipeline<GreptimeTransformer> = parse(&yaml_content).unwrap();
+
+ let mut status = pipeline.init_intermediate_state();
+ pipeline.prepare(input_value, &mut status).unwrap();
+ let row = pipeline.exec_mut(&mut status).unwrap();
+
+ let r = row
+ .values
+ .into_iter()
+ .map(|v| v.value_data.unwrap())
+ .collect::<Vec<_>>();
+
+ let expected = StringValue("Success and Error".into());
+ assert_eq!(expected, r[0]);
+}
|
feat
|
add decolorize processor (#5065)
|
1fad67cf4dcc8ec843cb3ad166cb48787df31ac6
|
2022-11-03 09:25:22
|
fys
|
feat: grpc client support multi peers (#380)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 8cb1fd7661bd..ba78692f7fad 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -836,7 +836,10 @@ dependencies = [
"datafusion",
"datanode",
"datatypes",
+ "enum_dispatch",
+ "parking_lot",
"prost 0.9.0",
+ "rand 0.8.5",
"snafu",
"substrait 0.1.0",
"substrait 0.2.0",
@@ -1803,6 +1806,7 @@ dependencies = [
"client",
"common-base",
"common-error",
+ "common-grpc",
"common-query",
"common-recordbatch",
"common-runtime",
diff --git a/src/client/Cargo.toml b/src/client/Cargo.toml
index d54cddba420f..541b6d2dd426 100644
--- a/src/client/Cargo.toml
+++ b/src/client/Cargo.toml
@@ -17,6 +17,9 @@ datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch =
"simd",
] }
datatypes = { path = "../datatypes" }
+enum_dispatch = "0.3"
+parking_lot = "0.12"
+rand = "0.8"
snafu = { version = "0.7", features = ["backtraces"] }
tonic = "0.8"
diff --git a/src/client/examples/insert.rs b/src/client/examples/insert.rs
index e3e2544842dd..2755ec10b745 100644
--- a/src/client/examples/insert.rs
+++ b/src/client/examples/insert.rs
@@ -12,7 +12,7 @@ fn main() {
#[tokio::main]
async fn run() {
- let client = Client::connect("http://127.0.0.1:3001").await.unwrap();
+ let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let expr = InsertExpr {
diff --git a/src/client/examples/logical.rs b/src/client/examples/logical.rs
index 6b0f8233ccb4..44c967c08fb7 100644
--- a/src/client/examples/logical.rs
+++ b/src/client/examples/logical.rs
@@ -18,7 +18,7 @@ fn main() {
#[tokio::main]
async fn run() {
- let client = Client::connect("http://127.0.0.1:3001").await.unwrap();
+ let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let create_table_expr = CreateExpr {
catalog_name: Some("greptime".to_string()),
diff --git a/src/client/examples/physical.rs b/src/client/examples/physical.rs
index 44b1e22f273c..1e866fd80fbc 100644
--- a/src/client/examples/physical.rs
+++ b/src/client/examples/physical.rs
@@ -16,7 +16,7 @@ fn main() {
#[tokio::main]
async fn run() {
- let client = Client::connect("http://127.0.0.1:3001").await.unwrap();
+ let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let physical = mock_physical_plan();
diff --git a/src/client/examples/select.rs b/src/client/examples/select.rs
index 442360bf37db..160c217fc520 100644
--- a/src/client/examples/select.rs
+++ b/src/client/examples/select.rs
@@ -10,7 +10,7 @@ fn main() {
#[tokio::main]
async fn run() {
- let client = Client::connect("http://127.0.0.1:3001").await.unwrap();
+ let client = Client::with_urls(vec!["127.0.0.1:3001"]);
let db = Database::new("greptime", client);
let sql = Select::Sql("select * from demo".to_string());
diff --git a/src/client/src/admin.rs b/src/client/src/admin.rs
index 717bea7b19ae..30be0bc9a581 100644
--- a/src/client/src/admin.rs
+++ b/src/client/src/admin.rs
@@ -22,10 +22,6 @@ impl Admin {
}
}
- pub async fn start(&mut self, url: impl Into<String>) -> Result<()> {
- self.client.start(url).await
- }
-
pub async fn create(&self, expr: CreateExpr) -> Result<AdminResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
diff --git a/src/client/src/client.rs b/src/client/src/client.rs
index d9a55b92f827..05bfb4c0c30c 100644
--- a/src/client/src/client.rs
+++ b/src/client/src/client.rs
@@ -1,47 +1,96 @@
-use api::v1::{greptime_client::GreptimeClient, *};
-use snafu::{OptionExt, ResultExt};
+use std::sync::Arc;
+
+use api::v1::greptime_client::GreptimeClient;
+use api::v1::*;
+use common_grpc::channel_manager::ChannelManager;
+use parking_lot::RwLock;
+use snafu::OptionExt;
+use snafu::ResultExt;
use tonic::transport::Channel;
use crate::error;
+use crate::load_balance::LoadBalance;
+use crate::load_balance::Loadbalancer;
use crate::Result;
#[derive(Clone, Debug, Default)]
pub struct Client {
- client: Option<GreptimeClient<Channel>>,
+ inner: Arc<Inner>,
}
-impl Client {
- pub async fn start(&mut self, url: impl Into<String>) -> Result<()> {
- match self.client.as_ref() {
- None => {
- let url = url.into();
- let client = GreptimeClient::connect(url.clone())
- .await
- .context(error::ConnectFailedSnafu { url })?;
- self.client = Some(client);
- Ok(())
- }
- Some(_) => error::IllegalGrpcClientStateSnafu {
- err_msg: "already started",
- }
- .fail(),
+#[derive(Debug, Default)]
+struct Inner {
+ channel_manager: ChannelManager,
+ peers: Arc<RwLock<Vec<String>>>,
+ load_balance: Loadbalancer,
+}
+
+impl Inner {
+ fn with_manager(channel_manager: ChannelManager) -> Self {
+ Self {
+ channel_manager,
+ ..Default::default()
}
}
- pub fn with_client(client: GreptimeClient<Channel>) -> Self {
+ fn set_peers(&self, peers: Vec<String>) {
+ let mut guard = self.peers.write();
+ *guard = peers;
+ }
+
+ fn get_peer(&self) -> Option<String> {
+ let guard = self.peers.read();
+ self.load_balance.get_peer(&guard).cloned()
+ }
+}
+
+impl Client {
+ pub fn new() -> Self {
+ Default::default()
+ }
+
+ pub fn with_manager(channel_manager: ChannelManager) -> Self {
+ let inner = Arc::new(Inner::with_manager(channel_manager));
+ Self { inner }
+ }
+
+ pub fn with_urls<U, A>(urls: A) -> Self
+ where
+ U: AsRef<str>,
+ A: AsRef<[U]>,
+ {
+ Self::with_manager_and_urls(ChannelManager::new(), urls)
+ }
+
+ pub fn with_manager_and_urls<U, A>(channel_manager: ChannelManager, urls: A) -> Self
+ where
+ U: AsRef<str>,
+ A: AsRef<[U]>,
+ {
+ let inner = Inner::with_manager(channel_manager);
+ let urls: Vec<String> = urls
+ .as_ref()
+ .iter()
+ .map(|peer| peer.as_ref().to_string())
+ .collect();
+ inner.set_peers(urls);
Self {
- client: Some(client),
+ inner: Arc::new(inner),
}
}
- pub async fn connect(url: impl Into<String>) -> Result<Self> {
- let url = url.into();
- let client = GreptimeClient::connect(url.clone())
- .await
- .context(error::ConnectFailedSnafu { url })?;
- Ok(Self {
- client: Some(client),
- })
+ pub fn start<U, A>(&self, urls: A)
+ where
+ U: AsRef<str>,
+ A: AsRef<[U]>,
+ {
+ let urls: Vec<String> = urls
+ .as_ref()
+ .iter()
+ .map(|peer| peer.as_ref().to_string())
+ .collect();
+
+ self.inner.set_peers(urls);
}
pub async fn admin(&self, req: AdminRequest) -> Result<AdminResponse> {
@@ -73,18 +122,59 @@ impl Client {
}
pub async fn batch(&self, req: BatchRequest) -> Result<BatchResponse> {
- if let Some(client) = self.client.as_ref() {
- let res = client
- .clone()
- .batch(req)
- .await
- .context(error::TonicStatusSnafu)?;
- Ok(res.into_inner())
- } else {
- error::IllegalGrpcClientStateSnafu {
- err_msg: "not started",
- }
- .fail()
+ let peer = self
+ .inner
+ .get_peer()
+ .context(error::IllegalGrpcClientStateSnafu {
+ err_msg: "No available peer found",
+ })?;
+ let mut client = self.make_client(peer)?;
+ let result = client.batch(req).await.context(error::TonicStatusSnafu)?;
+ Ok(result.into_inner())
+ }
+
+ fn make_client(&self, addr: impl AsRef<str>) -> Result<GreptimeClient<Channel>> {
+ let addr = addr.as_ref();
+ let channel = self
+ .inner
+ .channel_manager
+ .get(addr)
+ .context(error::CreateChannelSnafu { addr })?;
+ Ok(GreptimeClient::new(channel))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashSet;
+
+ use super::Inner;
+ use crate::load_balance::Loadbalancer;
+
+ fn mock_peers() -> Vec<String> {
+ vec![
+ "127.0.0.1:3001".to_string(),
+ "127.0.0.1:3002".to_string(),
+ "127.0.0.1:3003".to_string(),
+ ]
+ }
+
+ #[test]
+ fn test_inner() {
+ let inner = Inner::default();
+
+ assert!(matches!(
+ inner.load_balance,
+ Loadbalancer::Random(crate::load_balance::Random)
+ ));
+ assert!(inner.get_peer().is_none());
+
+ let peers = mock_peers();
+ inner.set_peers(peers.clone());
+ let all: HashSet<String> = peers.into_iter().collect();
+
+ for _ in 0..20 {
+ assert!(all.contains(&inner.get_peer().unwrap()));
}
}
}
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 2a1d8dc76e26..bff78ebda511 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -43,10 +43,6 @@ impl Database {
}
}
- pub async fn start(&mut self, url: impl Into<String>) -> Result<()> {
- self.client.start(url).await
- }
-
pub fn name(&self) -> &str {
&self.name
}
diff --git a/src/client/src/error.rs b/src/client/src/error.rs
index 7169150ae432..c8c451766737 100644
--- a/src/client/src/error.rs
+++ b/src/client/src/error.rs
@@ -85,6 +85,17 @@ pub enum Error {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
+
+ #[snafu(display(
+ "Failed to create gRPC channel, peer address: {}, source: {}",
+ addr,
+ source
+ ))]
+ CreateChannel {
+ addr: String,
+ #[snafu(backtrace)]
+ source: common_grpc::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -107,6 +118,7 @@ impl ErrorExt for Error {
source.status_code()
}
Error::CreateRecordBatches { source } => source.status_code(),
+ Error::CreateChannel { source, .. } => source.status_code(),
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
}
}
diff --git a/src/client/src/lib.rs b/src/client/src/lib.rs
index b39ea34e202f..5eea78394b61 100644
--- a/src/client/src/lib.rs
+++ b/src/client/src/lib.rs
@@ -2,6 +2,7 @@ pub mod admin;
mod client;
mod database;
mod error;
+pub mod load_balance;
pub use self::{
client::Client,
diff --git a/src/client/src/load_balance.rs b/src/client/src/load_balance.rs
new file mode 100644
index 000000000000..60e37a4e1bdc
--- /dev/null
+++ b/src/client/src/load_balance.rs
@@ -0,0 +1,52 @@
+use enum_dispatch::enum_dispatch;
+use rand::seq::SliceRandom;
+
+#[enum_dispatch]
+pub trait LoadBalance {
+ fn get_peer<'a>(&self, peers: &'a [String]) -> Option<&'a String>;
+}
+
+#[enum_dispatch(LoadBalance)]
+#[derive(Debug)]
+pub enum Loadbalancer {
+ Random,
+}
+
+impl Default for Loadbalancer {
+ fn default() -> Self {
+ Loadbalancer::from(Random)
+ }
+}
+
+#[derive(Debug)]
+pub struct Random;
+
+impl LoadBalance for Random {
+ fn get_peer<'a>(&self, peers: &'a [String]) -> Option<&'a String> {
+ peers.choose(&mut rand::thread_rng())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashSet;
+
+ use super::{LoadBalance, Random};
+
+ #[test]
+ fn test_random_lb() {
+ let peers = vec![
+ "127.0.0.1:3001".to_string(),
+ "127.0.0.1:3002".to_string(),
+ "127.0.0.1:3003".to_string(),
+ "127.0.0.1:3004".to_string(),
+ ];
+ let all: HashSet<String> = peers.clone().into_iter().collect();
+
+ let random = Random;
+ for _ in 0..100 {
+ let peer = random.get_peer(&peers).unwrap();
+ all.contains(peer);
+ }
+ }
+}
diff --git a/src/datanode/src/tests/grpc_test.rs b/src/datanode/src/tests/grpc_test.rs
index b0455b8ce7b5..f5a2aa79a6b8 100644
--- a/src/datanode/src/tests/grpc_test.rs
+++ b/src/datanode/src/tests/grpc_test.rs
@@ -45,7 +45,7 @@ async fn setup_grpc_server(name: &str, port: usize) -> (String, TestGuard, Arc<G
async fn test_auto_create_table() {
let (addr, _guard, grpc_server) = setup_grpc_server("auto_create_table", 3991).await;
- let grpc_client = Client::connect(format!("http://{}", addr)).await.unwrap();
+ let grpc_client = Client::with_urls(vec![addr]);
let db = Database::new("greptime", grpc_client);
insert_and_assert(&db).await;
@@ -111,7 +111,7 @@ fn expect_data() -> (Column, Column, Column, Column) {
async fn test_insert_and_select() {
let (addr, _guard, grpc_server) = setup_grpc_server("insert_and_select", 3990).await;
- let grpc_client = Client::connect(format!("http://{}", addr)).await.unwrap();
+ let grpc_client = Client::with_urls(vec![addr]);
let db = Database::new("greptime", grpc_client.clone());
let admin = Admin::new("greptime", grpc_client);
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index af83b960c66f..a1ace3bde9e0 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -10,6 +10,7 @@ async-trait = "0.1"
client = { path = "../client" }
common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
+common-grpc = { path = "../common/grpc" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 29ce0f83db50..5d72af5099e7 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -30,40 +30,37 @@ use crate::frontend::FrontendOptions;
pub(crate) type InstanceRef = Arc<Instance>;
+#[derive(Default)]
pub struct Instance {
- db: Database,
- admin: Admin,
+ client: Client,
}
impl Instance {
pub(crate) fn new() -> Self {
- let client = Client::default();
- let db = Database::new("greptime", client.clone());
- let admin = Admin::new("greptime", client);
- Self { db, admin }
+ Default::default()
}
pub(crate) async fn start(&mut self, opts: &FrontendOptions) -> Result<()> {
let addr = opts.datanode_grpc_addr();
- self.db
- .start(addr.clone())
- .await
- .context(error::ConnectDatanodeSnafu { addr: addr.clone() })?;
- self.admin
- .start(addr.clone())
- .await
- .context(error::ConnectDatanodeSnafu { addr })?;
+ self.client.start(vec![addr]);
Ok(())
}
+
+ // TODO(fys): temporarily hard code
+ pub fn database(&self) -> Database {
+ Database::new("greptime", self.client.clone())
+ }
+
+ // TODO(fys): temporarily hard code
+ pub fn admin(&self) -> Admin {
+ Admin::new("greptime", self.client.clone())
+ }
}
#[cfg(test)]
impl Instance {
pub fn with_client(client: Client) -> Self {
- Self {
- db: Database::new("greptime", client.clone()),
- admin: Admin::new("greptime", client),
- }
+ Self { client }
}
}
@@ -85,7 +82,7 @@ impl SqlQueryHandler for Instance {
match stmt {
Statement::Query(_) => self
- .db
+ .database()
.select(Select::Sql(query.to_string()))
.await
.and_then(|object_result| object_result.try_into()),
@@ -96,7 +93,7 @@ impl SqlQueryHandler for Instance {
expr: Some(insert_expr::Expr::Sql(query.to_string())),
options: HashMap::default(),
};
- self.db
+ self.database()
.insert(expr)
.await
.and_then(|object_result| object_result.try_into())
@@ -105,7 +102,7 @@ impl SqlQueryHandler for Instance {
let expr = create_to_expr(create)
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu { query })?;
- self.admin
+ self.admin()
.create(expr)
.await
.and_then(admin_result_to_output)
@@ -235,7 +232,7 @@ fn columns_to_expr(column_defs: &[ColumnDef]) -> Result<Vec<GrpcColumnDef>> {
#[async_trait]
impl GrpcQueryHandler for Instance {
async fn do_query(&self, query: ObjectExpr) -> server_error::Result<GrpcObjectResult> {
- self.db
+ self.database()
.object(query.clone())
.await
.map_err(BoxedError::new)
@@ -248,7 +245,7 @@ impl GrpcQueryHandler for Instance {
#[async_trait]
impl GrpcAdminHandler for Instance {
async fn exec_admin_request(&self, expr: AdminExpr) -> server_error::Result<AdminResult> {
- self.admin
+ self.admin()
.do_request(expr.clone())
.await
.map_err(BoxedError::new)
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
index 74ad15f98cf3..09e26bfa2434 100644
--- a/src/frontend/src/instance/influxdb.rs
+++ b/src/frontend/src/instance/influxdb.rs
@@ -11,7 +11,7 @@ use crate::instance::Instance;
impl InfluxdbLineProtocolHandler for Instance {
async fn exec(&self, request: &InfluxdbRequest) -> servers::error::Result<()> {
let exprs: Vec<InsertExpr> = request.try_into()?;
- self.db
+ self.database()
.batch_insert(exprs)
.await
.map_err(BoxedError::new)
diff --git a/src/frontend/src/instance/opentsdb.rs b/src/frontend/src/instance/opentsdb.rs
index f7ac68993f73..d2b6b13502ac 100644
--- a/src/frontend/src/instance/opentsdb.rs
+++ b/src/frontend/src/instance/opentsdb.rs
@@ -28,7 +28,7 @@ impl Instance {
async fn insert_opentsdb_metric(&self, data_point: &DataPoint) -> Result<()> {
let expr = data_point.as_grpc_insert();
- let result = self.db.insert(expr.clone()).await;
+ let result = self.database().insert(expr.clone()).await;
let object_result = match result {
Ok(result) => result,
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index bc87ed3ab8d2..dc466aaa1c5c 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -92,7 +92,7 @@ impl PrometheusProtocolHandler for Instance {
async fn write(&self, request: WriteRequest) -> ServerResult<()> {
let exprs = prometheus::write_request_to_insert_exprs(request)?;
- self.db
+ self.database()
.batch_insert(exprs)
.await
.map_err(BoxedError::new)
@@ -107,7 +107,7 @@ impl PrometheusProtocolHandler for Instance {
let response_type = negotiate_response_type(&request.accepted_response_types)?;
// TODO(dennis): use read_hints to speedup query if possible
- let results = handle_remote_queries(&self.db, &request.queries).await?;
+ let results = handle_remote_queries(&self.database(), &request.queries).await?;
match response_type {
ResponseType::Samples => {
diff --git a/src/frontend/src/tests.rs b/src/frontend/src/tests.rs
index bc2b473e97e1..a1317d7bbf23 100644
--- a/src/frontend/src/tests.rs
+++ b/src/frontend/src/tests.rs
@@ -1,10 +1,10 @@
use std::sync::Arc;
-use api::v1::greptime_client::GreptimeClient;
use client::Client;
+use common_grpc::channel_manager::ChannelManager;
use datanode::instance::Instance as DatanodeInstance;
use servers::grpc::GrpcServer;
-use tonic::transport::{Endpoint, Server};
+use tonic::transport::Server;
use tower::service_fn;
use crate::instance::Instance;
@@ -37,25 +37,27 @@ pub(crate) async fn create_frontend_instance() -> Arc<Instance> {
// on the first attempt to connect. All other attempts will fail.
let mut client = Some(client);
// "http://[::]:50051" is just a placeholder, does not actually connect to it,
- // see https://github.com/hyperium/tonic/issues/727#issuecomment-881532934
- let channel = Endpoint::try_from("http://[::]:50051")
- .unwrap()
- .connect_with_connector(service_fn(move |_| {
- let client = client.take();
-
- async move {
- if let Some(client) = client {
- Ok(client)
- } else {
- Err(std::io::Error::new(
- std::io::ErrorKind::Other,
- "Client already taken",
- ))
+ let addr = "[::].50051";
+ let channel_manager = ChannelManager::new();
+ channel_manager
+ .reset_with_connector(
+ addr,
+ service_fn(move |_| {
+ let client = client.take();
+
+ async move {
+ if let Some(client) = client {
+ Ok(client)
+ } else {
+ Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "Client already taken",
+ ))
+ }
}
- }
- }))
- .await
+ }),
+ )
.unwrap();
- let client = Client::with_client(GreptimeClient::new(channel));
+ let client = Client::with_manager_and_urls(channel_manager, vec![addr]);
Arc::new(Instance::with_client(client))
}
|
feat
|
grpc client support multi peers (#380)
|
b34f26ee07b0b2d07ae8b60aead8b9d23028eeba
|
2022-11-15 14:07:52
|
Ning Sun
|
docs: fix docs site link in readme (#512)
| false
|
diff --git a/README.md b/README.md
index 768af434803f..ca9a2d33a96a 100644
--- a/README.md
+++ b/README.md
@@ -164,8 +164,9 @@ In addition, you may:
## Documentation
-- GreptimeDB [User Guide](https://greptime.com/docs/user-guide)
-- GreptimeDB [Developer Guide](https://greptime.com/docs/developer-guide)
+- GreptimeDB [User Guide](https://docs.greptime.com/user-guide/concepts.html)
+- GreptimeDB [Developer
+ Guide](https://docs.greptime.com/developer-guide/overview.html)
## License
|
docs
|
fix docs site link in readme (#512)
|
621c6f371b0ba952a0432c43baf8258d0fe65e7a
|
2023-09-22 07:37:46
|
LinFeng
|
feat: limit grpc message size (#2459)
| false
|
diff --git a/src/client/src/client.rs b/src/client/src/client.rs
index ada1ae92c56a..c5457e58741f 100644
--- a/src/client/src/client.rs
+++ b/src/client/src/client.rs
@@ -138,8 +138,12 @@ impl Client {
Ok((addr, channel))
}
- fn max_grpc_message_size(&self) -> usize {
- self.inner.channel_manager.config().max_message_size
+ fn max_grpc_recv_message_size(&self) -> usize {
+ self.inner.channel_manager.config().max_recv_message_size
+ }
+
+ fn max_grpc_send_message_size(&self) -> usize {
+ self.inner.channel_manager.config().max_send_message_size
}
pub(crate) fn make_flight_client(&self) -> Result<FlightClient> {
@@ -147,7 +151,8 @@ impl Client {
Ok(FlightClient {
addr,
client: FlightServiceClient::new(channel)
- .max_decoding_message_size(self.max_grpc_message_size()),
+ .max_decoding_message_size(self.max_grpc_recv_message_size())
+ .max_encoding_message_size(self.max_grpc_send_message_size()),
})
}
@@ -155,13 +160,16 @@ impl Client {
let (_, channel) = self.find_channel()?;
Ok(DatabaseClient {
inner: GreptimeDatabaseClient::new(channel)
- .max_decoding_message_size(self.max_grpc_message_size()),
+ .max_decoding_message_size(self.max_grpc_recv_message_size())
+ .max_encoding_message_size(self.max_grpc_send_message_size()),
})
}
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
let (_, channel) = self.find_channel()?;
- Ok(PbRegionClient::new(channel).max_decoding_message_size(self.max_grpc_message_size()))
+ Ok(PbRegionClient::new(channel)
+ .max_decoding_message_size(self.max_grpc_recv_message_size())
+ .max_encoding_message_size(self.max_grpc_send_message_size()))
}
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
diff --git a/src/common/grpc/src/channel_manager.rs b/src/common/grpc/src/channel_manager.rs
index 1102ac0fd303..f52177e2890d 100644
--- a/src/common/grpc/src/channel_manager.rs
+++ b/src/common/grpc/src/channel_manager.rs
@@ -31,7 +31,8 @@ use crate::error::{CreateChannelSnafu, InvalidConfigFilePathSnafu, InvalidTlsCon
const RECYCLE_CHANNEL_INTERVAL_SECS: u64 = 60;
pub const DEFAULT_GRPC_REQUEST_TIMEOUT_SECS: u64 = 10;
pub const DEFAULT_GRPC_CONNECT_TIMEOUT_SECS: u64 = 10;
-pub const DEFAULT_MAX_GRPC_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
+pub const DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
+pub const DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE: usize = 512 * 1024 * 1024;
lazy_static! {
static ref ID: AtomicU64 = AtomicU64::new(0);
@@ -248,9 +249,10 @@ pub struct ChannelConfig {
pub tcp_keepalive: Option<Duration>,
pub tcp_nodelay: bool,
pub client_tls: Option<ClientTlsOption>,
- // Max gRPC message size
- // TODO(dennis): make it configurable
- pub max_message_size: usize,
+ // Max gRPC receiving(decoding) message size
+ pub max_recv_message_size: usize,
+ // Max gRPC sending(encoding) message size
+ pub max_send_message_size: usize,
}
impl Default for ChannelConfig {
@@ -269,7 +271,8 @@ impl Default for ChannelConfig {
tcp_keepalive: None,
tcp_nodelay: true,
client_tls: None,
- max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
+ max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
+ max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
}
}
}
@@ -534,7 +537,8 @@ mod tests {
tcp_keepalive: None,
tcp_nodelay: true,
client_tls: None,
- max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
+ max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
+ max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
},
default_cfg
);
@@ -577,7 +581,8 @@ mod tests {
client_cert_path: "some_cert_path".to_string(),
client_key_path: "some_key_path".to_string(),
}),
- max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
+ max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
+ max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
},
cfg
);
diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs
index 61001f2e39d1..b3835e6e2e84 100644
--- a/src/datanode/src/config.rs
+++ b/src/datanode/src/config.rs
@@ -18,6 +18,9 @@ use std::time::Duration;
use common_base::readable_size::ReadableSize;
use common_config::WalConfig;
+use common_grpc::channel_manager::{
+ DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
+};
pub use common_procedure::options::ProcedureConfig;
use common_telemetry::logging::LoggingOptions;
use file_engine::config::EngineConfig as FileEngineConfig;
@@ -324,6 +327,10 @@ pub struct DatanodeOptions {
pub rpc_addr: String,
pub rpc_hostname: Option<String>,
pub rpc_runtime_size: usize,
+ // Max gRPC receiving(decoding) message size
+ pub rpc_max_recv_message_size: usize,
+ // Max gRPC sending(encoding) message size
+ pub rpc_max_send_message_size: usize,
pub heartbeat: HeartbeatOptions,
pub http: HttpOptions,
pub meta_client: Option<MetaClientOptions>,
@@ -344,6 +351,8 @@ impl Default for DatanodeOptions {
rpc_addr: "127.0.0.1:3001".to_string(),
rpc_hostname: None,
rpc_runtime_size: 8,
+ rpc_max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
+ rpc_max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
http: HttpOptions::default(),
meta_client: None,
wal: WalConfig::default(),
diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs
index 71c050dc0bbc..1847dc4c992a 100644
--- a/src/datanode/src/server.rs
+++ b/src/datanode/src/server.rs
@@ -16,7 +16,7 @@ use std::net::SocketAddr;
use std::sync::Arc;
use futures::future;
-use servers::grpc::GrpcServer;
+use servers::grpc::{GrpcServer, GrpcServerConfig};
use servers::http::{HttpServer, HttpServerBuilder};
use servers::metrics_handler::MetricsHandler;
use servers::server::Server;
@@ -39,9 +39,14 @@ impl Services {
let flight_handler = Some(Arc::new(region_server.clone()) as _);
let region_server_handler = Some(Arc::new(region_server.clone()) as _);
let runtime = region_server.runtime();
+ let grpc_config = GrpcServerConfig {
+ max_recv_message_size: opts.rpc_max_recv_message_size,
+ max_send_message_size: opts.rpc_max_send_message_size,
+ };
Ok(Self {
grpc_server: GrpcServer::new(
+ Some(grpc_config),
None,
None,
flight_handler,
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index d96178f1d3ad..5a61c3b48834 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -22,7 +22,7 @@ use common_runtime::Builder as RuntimeBuilder;
use common_telemetry::info;
use servers::configurator::ConfiguratorRef;
use servers::error::Error::InternalIo;
-use servers::grpc::GrpcServer;
+use servers::grpc::{GrpcServer, GrpcServerConfig};
use servers::http::HttpServerBuilder;
use servers::metrics_handler::MetricsHandler;
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
@@ -69,7 +69,12 @@ impl Services {
.context(error::RuntimeResourceSnafu)?,
);
+ let grpc_config = GrpcServerConfig {
+ max_recv_message_size: opts.max_recv_message_size,
+ max_send_message_size: opts.max_send_message_size,
+ };
let grpc_server = GrpcServer::new(
+ Some(grpc_config),
Some(ServerGrpcQueryHandlerAdaptor::arc(instance.clone())),
Some(instance.clone()),
None,
diff --git a/src/frontend/src/service_config/grpc.rs b/src/frontend/src/service_config/grpc.rs
index 92d6ea771710..e0a64565015d 100644
--- a/src/frontend/src/service_config/grpc.rs
+++ b/src/frontend/src/service_config/grpc.rs
@@ -12,12 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use common_grpc::channel_manager::{
+ DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
+};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct GrpcOptions {
pub addr: String,
pub runtime_size: usize,
+ // Max gRPC receiving(decoding) message size
+ pub max_recv_message_size: usize,
+ // Max gRPC sending(encoding) message size
+ pub max_send_message_size: usize,
}
impl Default for GrpcOptions {
@@ -25,6 +32,8 @@ impl Default for GrpcOptions {
Self {
addr: "127.0.0.1:4001".to_string(),
runtime_size: 8,
+ max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
+ max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
}
}
}
diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs
index 5d5582de7faf..69ea1943a6fa 100644
--- a/src/servers/src/grpc.rs
+++ b/src/servers/src/grpc.rs
@@ -33,7 +33,9 @@ use arrow_flight::flight_service_server::FlightService;
use arrow_flight::flight_service_server::FlightServiceServer;
use async_trait::async_trait;
use auth::UserProviderRef;
-use common_grpc::channel_manager::DEFAULT_MAX_GRPC_MESSAGE_SIZE;
+use common_grpc::channel_manager::{
+ DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE, DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
+};
use common_runtime::Runtime;
use common_telemetry::logging::info;
use common_telemetry::{error, warn};
@@ -82,21 +84,24 @@ pub struct GrpcServer {
/// Grpc Server configuration
#[derive(Debug, Clone)]
pub struct GrpcServerConfig {
- // Max gRPC message size
- // TODO(dennis): make it configurable
- pub max_message_size: usize,
+ // Max gRPC receiving(decoding) message size
+ pub max_recv_message_size: usize,
+ // Max gRPC sending(encoding) message size
+ pub max_send_message_size: usize,
}
impl Default for GrpcServerConfig {
fn default() -> Self {
Self {
- max_message_size: DEFAULT_MAX_GRPC_MESSAGE_SIZE,
+ max_recv_message_size: DEFAULT_MAX_GRPC_RECV_MESSAGE_SIZE,
+ max_send_message_size: DEFAULT_MAX_GRPC_SEND_MESSAGE_SIZE,
}
}
}
impl GrpcServer {
pub fn new(
+ config: Option<GrpcServerConfig>,
query_handler: Option<ServerGrpcQueryHandlerRef>,
prometheus_handler: Option<PrometheusHandlerRef>,
flight_handler: Option<FlightCraftRef>,
@@ -110,7 +115,7 @@ impl GrpcServer {
let region_server_handler = region_server_handler
.map(|handler| RegionServerRequestHandler::new(handler, runtime.clone()));
Self {
- config: GrpcServerConfig::default(),
+ config: config.unwrap_or_default(),
shutdown_tx: Mutex::new(None),
user_provider,
serve_state: Mutex::new(None),
@@ -201,7 +206,8 @@ impl Server for GrpcServer {
}
async fn start(&self, addr: SocketAddr) -> Result<SocketAddr> {
- let max_message_size = self.config.max_message_size;
+ let max_recv_message_size = self.config.max_recv_message_size;
+ let max_send_message_size = self.config.max_send_message_size;
let (tx, rx) = oneshot::channel();
let (listener, addr) = {
let mut shutdown_tx = self.shutdown_tx.lock().await;
@@ -227,7 +233,8 @@ impl Server for GrpcServer {
if let Some(database_handler) = &self.database_handler {
builder = builder.add_service(
GreptimeDatabaseServer::new(DatabaseService::new(database_handler.clone()))
- .max_decoding_message_size(max_message_size),
+ .max_decoding_message_size(max_recv_message_size)
+ .max_encoding_message_size(max_send_message_size),
)
}
if let Some(prometheus_handler) = &self.prometheus_handler {
@@ -237,18 +244,24 @@ impl Server for GrpcServer {
if let Some(flight_handler) = &self.flight_handler {
builder = builder.add_service(
FlightServiceServer::new(FlightCraftWrapper(flight_handler.clone()))
- .max_decoding_message_size(max_message_size),
+ .max_decoding_message_size(max_recv_message_size)
+ .max_encoding_message_size(max_send_message_size),
)
} else {
// TODO(ruihang): this is a temporary workaround before region server is ready.
- builder = builder.add_service(FlightServiceServer::new(FlightCraftWrapper(
- self.database_handler.clone().unwrap(),
- )))
+ builder = builder.add_service(
+ FlightServiceServer::new(FlightCraftWrapper(
+ self.database_handler.clone().unwrap(),
+ ))
+ .max_decoding_message_size(max_recv_message_size)
+ .max_encoding_message_size(max_send_message_size),
+ )
}
if let Some(region_server_handler) = &self.region_server_handler {
builder = builder.add_service(
RegionServer::new(region_server_handler.clone())
- .max_decoding_message_size(max_message_size),
+ .max_decoding_message_size(max_recv_message_size)
+ .max_encoding_message_size(max_send_message_size),
);
}
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index 9bdb11f90b00..013eeb681ec6 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -272,6 +272,7 @@ async fn create_datanode_client(datanode: &Datanode) -> (String, Client) {
let flight_handler = Some(Arc::new(datanode.region_server()) as _);
let region_server_handler = Some(Arc::new(datanode.region_server()) as _);
let grpc_server = GrpcServer::new(
+ None,
None,
None,
flight_handler,
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index a6c0cf72fb65..e3370836eb2f 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -39,7 +39,7 @@ use object_store::test_util::TempFolder;
use object_store::ObjectStore;
use secrecy::ExposeSecret;
use servers::grpc::greptime_handler::GreptimeRequestHandler;
-use servers::grpc::GrpcServer;
+use servers::grpc::{GrpcServer, GrpcServerConfig};
use servers::http::{HttpOptions, HttpServerBuilder};
use servers::metrics_handler::MetricsHandler;
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
@@ -423,13 +423,22 @@ pub async fn setup_grpc_server(
store_type: StorageType,
name: &str,
) -> (String, TestGuard, Arc<GrpcServer>) {
- setup_grpc_server_with_user_provider(store_type, name, None).await
+ setup_grpc_server_with(store_type, name, None, None).await
}
pub async fn setup_grpc_server_with_user_provider(
store_type: StorageType,
name: &str,
user_provider: Option<UserProviderRef>,
+) -> (String, TestGuard, Arc<GrpcServer>) {
+ setup_grpc_server_with(store_type, name, user_provider, None).await
+}
+
+pub async fn setup_grpc_server_with(
+ store_type: StorageType,
+ name: &str,
+ user_provider: Option<UserProviderRef>,
+ grpc_config: Option<GrpcServerConfig>,
) -> (String, TestGuard, Arc<GrpcServer>) {
let instance = setup_standalone_instance(name, store_type).await;
@@ -447,7 +456,9 @@ pub async fn setup_grpc_server_with_user_provider(
user_provider.clone(),
runtime.clone(),
));
+
let fe_grpc_server = Arc::new(GrpcServer::new(
+ grpc_config,
Some(ServerGrpcQueryHandlerAdaptor::arc(fe_instance_ref.clone())),
Some(fe_instance_ref.clone()),
Some(flight_handler),
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 7f2f20d87ec8..3d766a5a37f0 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -24,10 +24,11 @@ use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::consts::{MIN_USER_TABLE_ID, MITO_ENGINE};
use common_query::Output;
use common_recordbatch::RecordBatches;
+use servers::grpc::GrpcServerConfig;
use servers::http::prometheus::{PromData, PromSeries, PrometheusJsonResponse, PrometheusResponse};
use servers::server::Server;
use tests_integration::test_util::{
- setup_grpc_server, setup_grpc_server_with_user_provider, StorageType,
+ setup_grpc_server, setup_grpc_server_with, setup_grpc_server_with_user_provider, StorageType,
};
#[macro_export]
@@ -64,6 +65,9 @@ macro_rules! grpc_tests {
test_auto_create_table,
test_insert_and_select,
test_dbname,
+ test_grpc_message_size_ok,
+ test_grpc_message_size_limit_recv,
+ test_grpc_message_size_limit_send,
test_grpc_auth,
test_health_check,
test_prom_gateway_query,
@@ -115,6 +119,66 @@ pub async fn test_dbname(store_type: StorageType) {
guard.remove_all().await;
}
+pub async fn test_grpc_message_size_ok(store_type: StorageType) {
+ let config = GrpcServerConfig {
+ max_recv_message_size: 1024,
+ max_send_message_size: 1024,
+ };
+ let (addr, mut guard, fe_grpc_server) =
+ setup_grpc_server_with(store_type, "auto_create_table", None, Some(config)).await;
+
+ let grpc_client = Client::with_urls(vec![addr]);
+ let db = Database::new_with_dbname(
+ format!("{}-{}", DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME),
+ grpc_client,
+ );
+ db.sql("show tables;").await.unwrap();
+ let _ = fe_grpc_server.shutdown().await;
+ guard.remove_all().await;
+}
+
+pub async fn test_grpc_message_size_limit_send(store_type: StorageType) {
+ let config = GrpcServerConfig {
+ max_recv_message_size: 1024,
+ max_send_message_size: 50,
+ };
+ let (addr, mut guard, fe_grpc_server) =
+ setup_grpc_server_with(store_type, "auto_create_table", None, Some(config)).await;
+
+ let grpc_client = Client::with_urls(vec![addr]);
+ let db = Database::new_with_dbname(
+ format!("{}-{}", DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME),
+ grpc_client,
+ );
+ let err_msg = db.sql("show tables;").await.unwrap_err().to_string();
+ assert!(err_msg.contains("message length too large"), "{}", err_msg);
+ let _ = fe_grpc_server.shutdown().await;
+ guard.remove_all().await;
+}
+
+pub async fn test_grpc_message_size_limit_recv(store_type: StorageType) {
+ let config = GrpcServerConfig {
+ max_recv_message_size: 10,
+ max_send_message_size: 1024,
+ };
+ let (addr, mut guard, fe_grpc_server) =
+ setup_grpc_server_with(store_type, "auto_create_table", None, Some(config)).await;
+
+ let grpc_client = Client::with_urls(vec![addr]);
+ let db = Database::new_with_dbname(
+ format!("{}-{}", DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME),
+ grpc_client,
+ );
+ let err_msg = db.sql("show tables;").await.unwrap_err().to_string();
+ assert!(
+ err_msg.contains("Operation was attempted past the valid range"),
+ "{}",
+ err_msg
+ );
+ let _ = fe_grpc_server.shutdown().await;
+ guard.remove_all().await;
+}
+
pub async fn test_grpc_auth(store_type: StorageType) {
let user_provider = user_provider_from_option(
&"static_user_provider:cmd:greptime_user=greptime_pwd".to_string(),
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index c186dab8ba7f..0317482da7f4 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -610,6 +610,8 @@ node_id = 0
require_lease_before_startup = true
rpc_addr = "127.0.0.1:3001"
rpc_runtime_size = 8
+rpc_max_recv_message_size = 536870912
+rpc_max_send_message_size = 536870912
enable_telemetry = true
[heartbeat]
|
feat
|
limit grpc message size (#2459)
|
8289b0dec2096a7a2c52334e5ee748920f18be86
|
2024-02-22 12:31:15
|
Ruihang Xia
|
ci: align docs workflow jobs with develop.yml (#3356)
| false
|
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index bdda82fd9a2a..c9c516c576f5 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -61,6 +61,18 @@ jobs:
sqlness:
name: Sqlness Test
- runs-on: ubuntu-20.04
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ ubuntu-20.04 ]
+ steps:
+ - run: 'echo "No action required"'
+
+ sqlness-kafka-wal:
+ name: Sqlness Test with Kafka Wal
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ ubuntu-20.04 ]
steps:
- run: 'echo "No action required"'
|
ci
|
align docs workflow jobs with develop.yml (#3356)
|
175929426a8f06c03a087b1d3b380b9f7af2a474
|
2024-03-26 16:54:28
|
Lei, HUANG
|
feat: support time range in copy table (#3583)
| false
|
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index f7b7471ec118..1a30c596ce8b 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -360,6 +360,8 @@ fn to_copy_table_request(stmt: CopyTable, query_ctx: QueryContextRef) -> Result<
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
+ let timestamp_range = timestamp_range_from_option_map(&with, &query_ctx)?;
+
let pattern = with
.get(common_datasource::file_format::FILE_PATTERN)
.cloned();
@@ -373,8 +375,7 @@ fn to_copy_table_request(stmt: CopyTable, query_ctx: QueryContextRef) -> Result<
connection: connection.map,
pattern,
direction,
- // we copy the whole table by default.
- timestamp_range: None,
+ timestamp_range,
})
}
@@ -387,16 +388,7 @@ fn to_copy_database_request(
let (catalog_name, database_name) = idents_to_full_database_name(&arg.database_name, query_ctx)
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
-
- let start_timestamp = extract_timestamp(&arg.with, COPY_DATABASE_TIME_START_KEY, query_ctx)?;
- let end_timestamp = extract_timestamp(&arg.with, COPY_DATABASE_TIME_END_KEY, query_ctx)?;
-
- let time_range = match (start_timestamp, end_timestamp) {
- (Some(start), Some(end)) => TimestampRange::new(start, end),
- (Some(start), None) => Some(TimestampRange::from_start(start)),
- (None, Some(end)) => Some(TimestampRange::until_end(end, false)), // exclusive end
- (None, None) => None,
- };
+ let time_range = timestamp_range_from_option_map(&arg.with, query_ctx)?;
Ok(CopyDatabaseRequest {
catalog_name,
@@ -408,6 +400,24 @@ fn to_copy_database_request(
})
}
+/// Extracts timestamp range from OptionMap with keys `start_time` and `end_time`.
+/// The timestamp ranges should be a valid timestamp string as defined in [Timestamp::from_str].
+/// The timezone used for conversion will respect that inside `query_ctx`.
+fn timestamp_range_from_option_map(
+ options: &OptionMap,
+ query_ctx: &QueryContextRef,
+) -> Result<Option<TimestampRange>> {
+ let start_timestamp = extract_timestamp(options, COPY_DATABASE_TIME_START_KEY, query_ctx)?;
+ let end_timestamp = extract_timestamp(options, COPY_DATABASE_TIME_END_KEY, query_ctx)?;
+ let time_range = match (start_timestamp, end_timestamp) {
+ (Some(start), Some(end)) => TimestampRange::new(start, end),
+ (Some(start), None) => Some(TimestampRange::from_start(start)),
+ (None, Some(end)) => Some(TimestampRange::until_end(end, false)), // exclusive end
+ (None, None) => None,
+ };
+ Ok(time_range)
+}
+
/// Extracts timestamp from a [HashMap<String, String>] with given key.
fn extract_timestamp(
map: &OptionMap,
@@ -440,3 +450,44 @@ fn idents_to_full_database_name(
.fail(),
}
}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashMap;
+ use std::sync::Arc;
+
+ use common_time::{Timestamp, Timezone};
+ use session::context::QueryContextBuilder;
+ use sql::statements::OptionMap;
+
+ use crate::statement::copy_database::{
+ COPY_DATABASE_TIME_END_KEY, COPY_DATABASE_TIME_START_KEY,
+ };
+ use crate::statement::timestamp_range_from_option_map;
+
+ #[test]
+ fn test_timestamp_range_from_option_map() {
+ let query_ctx = QueryContextBuilder::default()
+ .timezone(Arc::new(Timezone::from_tz_string("Asia/Shanghai").unwrap()))
+ .build();
+ let map = OptionMap::from(
+ [
+ (
+ COPY_DATABASE_TIME_START_KEY.to_string(),
+ "2022-04-11 08:00:00".to_string(),
+ ),
+ (
+ COPY_DATABASE_TIME_END_KEY.to_string(),
+ "2022-04-11 16:00:00".to_string(),
+ ),
+ ]
+ .into_iter()
+ .collect::<HashMap<_, _>>(),
+ );
+ let range = timestamp_range_from_option_map(&map, &query_ctx)
+ .unwrap()
+ .unwrap();
+ assert_eq!(Timestamp::new_second(1649635200), range.start().unwrap());
+ assert_eq!(Timestamp::new_second(1649664000), range.end().unwrap());
+ }
+}
diff --git a/tests/cases/standalone/common/copy/copy_to_fs.result b/tests/cases/standalone/common/copy/copy_to_fs.result
index 932067ee5788..f6bdc22a20d3 100644
--- a/tests/cases/standalone/common/copy/copy_to_fs.result
+++ b/tests/cases/standalone/common/copy/copy_to_fs.result
@@ -1,4 +1,4 @@
-CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
+CREATE TABLE demo(host string, cpu DOUBLE, memory DOUBLE, ts TIMESTAMP TIME INDEX);
Affected Rows: 0
@@ -6,17 +6,17 @@ insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 16552765570
Affected Rows: 2
-Copy demo TO '/tmp/export/demo.parquet';
+COPY demo TO '/tmp/export/demo.parquet' WITH (start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:38');
-Affected Rows: 2
+Affected Rows: 1
-Copy demo TO '/tmp/export/demo.csv' with (format='csv');
+COPY demo TO '/tmp/export/demo.csv' WITH (format='csv', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:38');
-Affected Rows: 2
+Affected Rows: 1
-Copy demo TO '/tmp/export/demo.json' with (format='json');
+COPY demo TO '/tmp/export/demo.json' WITH (format='json', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:38');
-Affected Rows: 2
+Affected Rows: 1
drop table demo;
diff --git a/tests/cases/standalone/common/copy/copy_to_fs.sql b/tests/cases/standalone/common/copy/copy_to_fs.sql
index 2329dda7ca8a..d095c2d7957e 100644
--- a/tests/cases/standalone/common/copy/copy_to_fs.sql
+++ b/tests/cases/standalone/common/copy/copy_to_fs.sql
@@ -1,11 +1,11 @@
-CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
+CREATE TABLE demo(host string, cpu DOUBLE, memory DOUBLE, ts TIMESTAMP TIME INDEX);
insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
-Copy demo TO '/tmp/export/demo.parquet';
+COPY demo TO '/tmp/export/demo.parquet' WITH (start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:38');
-Copy demo TO '/tmp/export/demo.csv' with (format='csv');
+COPY demo TO '/tmp/export/demo.csv' WITH (format='csv', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:38');
-Copy demo TO '/tmp/export/demo.json' with (format='json');
+COPY demo TO '/tmp/export/demo.json' WITH (format='json', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:38');
drop table demo;
|
feat
|
support time range in copy table (#3583)
|
4a3982ca604fccf01adc8243d0a21cba36e15380
|
2024-08-07 13:13:04
|
Weny Xu
|
chore: use `configData` (#4522)
| false
|
diff --git a/.github/actions/setup-greptimedb-cluster/with-disk.yaml b/.github/actions/setup-greptimedb-cluster/with-disk.yaml
index 1cbd22dbba6f..4b33b37ab08e 100644
--- a/.github/actions/setup-greptimedb-cluster/with-disk.yaml
+++ b/.github/actions/setup-greptimedb-cluster/with-disk.yaml
@@ -1,13 +1,13 @@
meta:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
datanode:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
frontend:
- config: |-
+ configData: |-
[runtime]
- global_rt_size = 4
\ No newline at end of file
+ global_rt_size = 4
diff --git a/.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml b/.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
index fc89bd542253..8f99c242f4bf 100644
--- a/.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
+++ b/.github/actions/setup-greptimedb-cluster/with-minio-and-cache.yaml
@@ -1,5 +1,5 @@
meta:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
@@ -7,7 +7,7 @@ meta:
[datanode.client]
timeout = "60s"
datanode:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
@@ -16,7 +16,7 @@ datanode:
cache_path = "/data/greptimedb/s3cache"
cache_capacity = "256MB"
frontend:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
diff --git a/.github/actions/setup-greptimedb-cluster/with-minio.yaml b/.github/actions/setup-greptimedb-cluster/with-minio.yaml
index b0b1c6b757e4..b8121374ce03 100644
--- a/.github/actions/setup-greptimedb-cluster/with-minio.yaml
+++ b/.github/actions/setup-greptimedb-cluster/with-minio.yaml
@@ -1,5 +1,5 @@
meta:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
@@ -7,12 +7,12 @@ meta:
[datanode.client]
timeout = "60s"
datanode:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
frontend:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
diff --git a/.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml b/.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
index b5a7dec358fe..043b9fe43f85 100644
--- a/.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
+++ b/.github/actions/setup-greptimedb-cluster/with-remote-wal.yaml
@@ -1,5 +1,5 @@
meta:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
@@ -13,7 +13,7 @@ meta:
[datanode.client]
timeout = "60s"
datanode:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
compact_rt_size = 2
@@ -23,7 +23,7 @@ datanode:
broker_endpoints = ["kafka.kafka-cluster.svc.cluster.local:9092"]
linger = "2ms"
frontend:
- config: |-
+ configData: |-
[runtime]
global_rt_size = 4
|
chore
|
use `configData` (#4522)
|
4ae0b5e1855ba37e0007e9f0c55fc5577b200c06
|
2023-05-15 09:30:43
|
LFC
|
test: move instances tests to "tests-integration" (#1573)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 9013fb86ad7c..c883770ef117 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3081,8 +3081,6 @@ dependencies = [
"prost",
"query",
"regex",
- "rstest",
- "rstest_reuse",
"script",
"serde",
"serde_json",
@@ -8764,31 +8762,49 @@ dependencies = [
"axum-test-helper",
"catalog",
"client",
+ "common-base",
"common-catalog",
"common-error",
"common-grpc",
"common-query",
+ "common-recordbatch",
"common-runtime",
"common-telemetry",
"common-test-util",
+ "datafusion",
+ "datafusion-expr",
"datanode",
"datatypes",
"dotenv",
"frontend",
+ "futures",
+ "itertools",
+ "meta-client",
+ "meta-srv",
"mito",
"object-store",
"once_cell",
+ "partition",
"paste",
+ "prost",
+ "query",
"rand",
+ "rstest",
+ "rstest_reuse",
+ "script",
"secrecy",
"serde",
"serde_json",
"servers",
+ "session",
"snafu",
"sql",
+ "store-api",
"table",
"tempfile",
"tokio",
+ "tonic 0.9.2",
+ "tower",
"uuid",
]
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 869f514e22a7..03f402289d82 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -7,6 +7,7 @@ license.workspace = true
[features]
default = ["python"]
python = ["dep:script"]
+testing = []
[dependencies]
api = { path = "../api" }
@@ -66,8 +67,6 @@ common-test-util = { path = "../common/test-util" }
datanode = { path = "../datanode" }
futures = "0.3"
meta-srv = { path = "../meta-srv", features = ["mock"] }
-rstest = "0.17"
-rstest_reuse = "0.5"
strfmt = "0.2"
toml = "0.5"
tower = "0.4"
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index ab31162a5c72..d87a0dc91926 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -75,19 +75,19 @@ impl FrontendCatalogManager {
}
}
- pub(crate) fn set_dist_instance(&mut self, dist_instance: Arc<DistInstance>) {
+ pub fn set_dist_instance(&mut self, dist_instance: Arc<DistInstance>) {
self.dist_instance = Some(dist_instance)
}
- pub(crate) fn backend(&self) -> KvBackendRef {
+ pub fn backend(&self) -> KvBackendRef {
self.backend.clone()
}
- pub(crate) fn partition_manager(&self) -> PartitionRuleManagerRef {
+ pub fn partition_manager(&self) -> PartitionRuleManagerRef {
self.partition_manager.clone()
}
- pub(crate) fn datanode_clients(&self) -> Arc<DatanodeClients> {
+ pub fn datanode_clients(&self) -> Arc<DatanodeClients> {
self.datanode_clients.clone()
}
}
@@ -406,71 +406,3 @@ impl SchemaProvider for FrontendSchemaProvider {
Ok(self.table_names().await?.contains(&name.to_string()))
}
}
-
-#[cfg(test)]
-mod tests {
- use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
- use script::table::{build_scripts_schema, SCRIPTS_TABLE_NAME};
- use table::requests::{CreateTableRequest, TableOptions};
-
- use super::*;
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_register_system_table() {
- let instance =
- crate::tests::create_distributed_instance("test_register_system_table").await;
-
- let catalog_name = DEFAULT_CATALOG_NAME;
- let schema_name = DEFAULT_SCHEMA_NAME;
- let table_name = SCRIPTS_TABLE_NAME;
- let request = CreateTableRequest {
- id: 1,
- catalog_name: catalog_name.to_string(),
- schema_name: schema_name.to_string(),
- table_name: table_name.to_string(),
- desc: Some("Scripts table".to_string()),
- schema: build_scripts_schema(),
- region_numbers: vec![0],
- primary_key_indices: vec![0, 1],
- create_if_not_exists: true,
- table_options: TableOptions::default(),
- engine: MITO_ENGINE.to_string(),
- };
-
- let result = instance
- .catalog_manager
- .register_system_table(RegisterSystemTableRequest {
- create_table_request: request,
- open_hook: None,
- })
- .await;
- assert!(result.is_ok());
-
- assert!(
- instance
- .catalog_manager
- .table(catalog_name, schema_name, table_name)
- .await
- .unwrap()
- .is_some(),
- "the registered system table cannot be found in catalog"
- );
-
- let mut actually_created_table_in_datanode = 0;
- for datanode in instance.datanodes.values() {
- if datanode
- .catalog_manager()
- .table(catalog_name, schema_name, table_name)
- .await
- .unwrap()
- .is_some()
- {
- actually_created_table_in_datanode += 1;
- }
- }
- assert_eq!(
- actually_created_table_in_datanode, 1,
- "system table should be actually created at one and only one datanode"
- )
- }
-}
diff --git a/src/frontend/src/datanode.rs b/src/frontend/src/datanode.rs
index 7be786f4a40c..969026017f02 100644
--- a/src/frontend/src/datanode.rs
+++ b/src/frontend/src/datanode.rs
@@ -62,8 +62,8 @@ impl DatanodeClients {
.await
}
- #[cfg(test)]
- pub(crate) async fn insert_client(&self, datanode: Peer, client: Client) {
+ #[cfg(feature = "testing")]
+ pub async fn insert_client(&self, datanode: Peer, client: Client) {
self.clients.insert(datanode, client).await
}
}
diff --git a/src/frontend/src/expr_factory.rs b/src/frontend/src/expr_factory.rs
index e32cdcfbe298..d59d58906b30 100644
--- a/src/frontend/src/expr_factory.rs
+++ b/src/frontend/src/expr_factory.rs
@@ -117,10 +117,7 @@ pub(crate) async fn create_external_expr(
}
/// Convert `CreateTable` statement to `CreateExpr` gRPC request.
-pub(crate) fn create_to_expr(
- create: &CreateTable,
- query_ctx: QueryContextRef,
-) -> Result<CreateTableExpr> {
+pub fn create_to_expr(create: &CreateTable, query_ctx: QueryContextRef) -> Result<CreateTableExpr> {
let (catalog_name, schema_name, table_name) =
table_idents_to_full_name(&create.name, query_ctx)
.map_err(BoxedError::new)
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index bec85dcb2d10..fbbed42af7d0 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub(crate) mod distributed;
+pub mod distributed;
mod grpc;
mod influxdb;
mod opentsdb;
@@ -236,8 +236,7 @@ impl Instance {
Ok(())
}
- #[cfg(test)]
- pub(crate) async fn new_distributed(
+ pub async fn new_distributed(
catalog_manager: CatalogManagerRef,
dist_instance: Arc<DistInstance>,
) -> Self {
@@ -423,8 +422,7 @@ impl Instance {
.map(|_| ())
}
- #[cfg(test)]
- pub(crate) fn statement_executor(&self) -> Arc<StatementExecutor> {
+ pub fn statement_executor(&self) -> Arc<StatementExecutor> {
self.statement_executor.clone()
}
}
@@ -651,13 +649,9 @@ fn validate_insert_request(schema: &Schema, request: &InsertRequest) -> Result<(
#[cfg(test)]
mod tests {
- use std::borrow::Cow;
use std::collections::HashMap;
- use std::sync::atomic::AtomicU32;
use api::v1::column::Values;
- use catalog::helper::{TableGlobalKey, TableGlobalValue};
- use common_recordbatch::RecordBatches;
use datatypes::prelude::{ConcreteDataType, Value};
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
use query::query_engine::options::QueryOptions;
@@ -665,9 +659,6 @@ mod tests {
use strfmt::Format;
use super::*;
- use crate::table::DistTable;
- use crate::tests;
- use crate::tests::MockDistributedInstance;
#[test]
fn test_validate_insert_request() {
@@ -842,372 +833,4 @@ mod tests {
let sql = "DESC TABLE {catalog}{schema}demo;";
replace_test(sql, plugins, &query_ctx);
}
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_standalone_exec_sql() {
- let standalone = tests::create_standalone_instance("test_standalone_exec_sql").await;
- let instance = standalone.instance.as_ref();
-
- let sql = r#"
- CREATE TABLE demo(
- host STRING,
- ts TIMESTAMP,
- cpu DOUBLE NULL,
- memory DOUBLE NULL,
- disk_util DOUBLE DEFAULT 9.9,
- TIME INDEX (ts),
- PRIMARY KEY(host)
- ) engine=mito"#;
- create_table(instance, sql).await;
-
- insert_and_query(instance).await;
-
- drop_table(instance).await;
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_distributed_exec_sql() {
- let distributed = tests::create_distributed_instance("test_distributed_exec_sql").await;
- let instance = distributed.frontend.as_ref();
-
- let sql = r#"
- CREATE TABLE demo(
- host STRING,
- ts TIMESTAMP,
- cpu DOUBLE NULL,
- memory DOUBLE NULL,
- disk_util DOUBLE DEFAULT 9.9,
- TIME INDEX (ts),
- PRIMARY KEY(host)
- )
- PARTITION BY RANGE COLUMNS (host) (
- PARTITION r0 VALUES LESS THAN ('550-A'),
- PARTITION r1 VALUES LESS THAN ('550-W'),
- PARTITION r2 VALUES LESS THAN ('MOSS'),
- PARTITION r3 VALUES LESS THAN (MAXVALUE),
- )
- engine=mito"#;
- create_table(instance, sql).await;
-
- insert_and_query(instance).await;
-
- verify_data_distribution(
- &distributed,
- HashMap::from([
- (
- 0u32,
- "\
-+---------------------+------+
-| ts | host |
-+---------------------+------+
-| 2013-12-31T16:00:00 | 490 |
-+---------------------+------+",
- ),
- (
- 1u32,
- "\
-+---------------------+-------+
-| ts | host |
-+---------------------+-------+
-| 2022-12-31T16:00:00 | 550-A |
-+---------------------+-------+",
- ),
- (
- 2u32,
- "\
-+---------------------+-------+
-| ts | host |
-+---------------------+-------+
-| 2023-12-31T16:00:00 | 550-W |
-+---------------------+-------+",
- ),
- (
- 3u32,
- "\
-+---------------------+------+
-| ts | host |
-+---------------------+------+
-| 2043-12-31T16:00:00 | MOSS |
-+---------------------+------+",
- ),
- ]),
- )
- .await;
-
- drop_table(instance).await;
-
- verify_table_is_dropped(&distributed).await;
- }
-
- async fn query(instance: &Instance, sql: &str) -> Output {
- SqlQueryHandler::do_query(instance, sql, QueryContext::arc())
- .await
- .remove(0)
- .unwrap()
- }
-
- async fn create_table(instance: &Instance, sql: &str) {
- let output = query(instance, sql).await;
- let Output::AffectedRows(x) = output else { unreachable!() };
- assert_eq!(x, 0);
- }
-
- async fn insert_and_query(instance: &Instance) {
- let sql = r#"INSERT INTO demo(host, cpu, memory, ts) VALUES
- ('490', 0.1, 1, 1388505600000),
- ('550-A', 1, 100, 1672502400000),
- ('550-W', 10000, 1000000, 1704038400000),
- ('MOSS', 100000000, 10000000000, 2335190400000)
- "#;
- let output = query(instance, sql).await;
- let Output::AffectedRows(x) = output else { unreachable!() };
- assert_eq!(x, 4);
-
- let sql = "SELECT * FROM demo WHERE ts > cast(1000000000 as timestamp) ORDER BY host"; // use nanoseconds as where condition
- let output = query(instance, sql).await;
- let Output::Stream(s) = output else { unreachable!() };
- let batches = common_recordbatch::util::collect_batches(s).await.unwrap();
- let pretty_print = batches.pretty_print().unwrap();
- let expected = "\
-+-------+---------------------+-------------+-----------+-----------+
-| host | ts | cpu | memory | disk_util |
-+-------+---------------------+-------------+-----------+-----------+
-| 490 | 2013-12-31T16:00:00 | 0.1 | 1.0 | 9.9 |
-| 550-A | 2022-12-31T16:00:00 | 1.0 | 100.0 | 9.9 |
-| 550-W | 2023-12-31T16:00:00 | 10000.0 | 1000000.0 | 9.9 |
-| MOSS | 2043-12-31T16:00:00 | 100000000.0 | 1.0e10 | 9.9 |
-+-------+---------------------+-------------+-----------+-----------+";
- assert_eq!(pretty_print, expected);
- }
-
- async fn verify_data_distribution(
- instance: &MockDistributedInstance,
- expected_distribution: HashMap<u32, &str>,
- ) {
- let table = instance
- .frontend
- .catalog_manager()
- .table("greptime", "public", "demo")
- .await
- .unwrap()
- .unwrap();
- let table = table.as_any().downcast_ref::<DistTable>().unwrap();
-
- let TableGlobalValue { regions_id_map, .. } = table
- .table_global_value(&TableGlobalKey {
- catalog_name: "greptime".to_string(),
- schema_name: "public".to_string(),
- table_name: "demo".to_string(),
- })
- .await
- .unwrap()
- .unwrap();
- let region_to_dn_map = regions_id_map
- .iter()
- .map(|(k, v)| (v[0], *k))
- .collect::<HashMap<u32, u64>>();
- assert_eq!(region_to_dn_map.len(), expected_distribution.len());
-
- let stmt = QueryLanguageParser::parse_sql("SELECT ts, host FROM demo ORDER BY ts").unwrap();
- for (region, dn) in region_to_dn_map.iter() {
- let dn = instance.datanodes.get(dn).unwrap();
- let engine = dn.query_engine();
- let plan = engine
- .planner()
- .plan(stmt.clone(), QueryContext::arc())
- .await
- .unwrap();
- let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
- let Output::Stream(stream) = output else { unreachable!() };
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- let actual = recordbatches.pretty_print().unwrap();
-
- let expected = expected_distribution.get(region).unwrap();
- assert_eq!(&actual, expected);
- }
- }
-
- async fn drop_table(instance: &Instance) {
- let sql = "DROP TABLE demo";
- let output = query(instance, sql).await;
- let Output::AffectedRows(x) = output else { unreachable!() };
- assert_eq!(x, 1);
- }
-
- async fn verify_table_is_dropped(instance: &MockDistributedInstance) {
- for (_, dn) in instance.datanodes.iter() {
- assert!(dn
- .catalog_manager()
- .table("greptime", "public", "demo")
- .await
- .unwrap()
- .is_none())
- }
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_sql_interceptor_plugin() {
- #[derive(Default)]
- struct AssertionHook {
- pub(crate) c: AtomicU32,
- }
-
- impl SqlQueryInterceptor for AssertionHook {
- type Error = Error;
-
- fn pre_parsing<'a>(
- &self,
- query: &'a str,
- _query_ctx: QueryContextRef,
- ) -> Result<Cow<'a, str>> {
- self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
- assert!(query.starts_with("CREATE TABLE demo"));
- Ok(Cow::Borrowed(query))
- }
-
- fn post_parsing(
- &self,
- statements: Vec<Statement>,
- _query_ctx: QueryContextRef,
- ) -> Result<Vec<Statement>> {
- self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
- assert!(matches!(statements[0], Statement::CreateTable(_)));
- Ok(statements)
- }
-
- fn pre_execute(
- &self,
- _statement: &Statement,
- _plan: Option<&query::plan::LogicalPlan>,
- _query_ctx: QueryContextRef,
- ) -> Result<()> {
- self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
- Ok(())
- }
-
- fn post_execute(
- &self,
- mut output: Output,
- _query_ctx: QueryContextRef,
- ) -> Result<Output> {
- self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
- match &mut output {
- Output::AffectedRows(rows) => {
- assert_eq!(*rows, 0);
- // update output result
- *rows = 10;
- }
- _ => unreachable!(),
- }
- Ok(output)
- }
- }
-
- let standalone = tests::create_standalone_instance("test_hook").await;
- let mut instance = standalone.instance;
-
- let plugins = Plugins::new();
- let counter_hook = Arc::new(AssertionHook::default());
- plugins.insert::<SqlQueryInterceptorRef<Error>>(counter_hook.clone());
- Arc::make_mut(&mut instance).set_plugins(Arc::new(plugins));
-
- let sql = r#"CREATE TABLE demo(
- host STRING,
- ts TIMESTAMP,
- cpu DOUBLE NULL,
- memory DOUBLE NULL,
- disk_util DOUBLE DEFAULT 9.9,
- TIME INDEX (ts),
- PRIMARY KEY(host)
- ) engine=mito with(regions=1);"#;
- let output = SqlQueryHandler::do_query(&*instance, sql, QueryContext::arc())
- .await
- .remove(0)
- .unwrap();
-
- // assert that the hook is called 3 times
- assert_eq!(4, counter_hook.c.load(std::sync::atomic::Ordering::Relaxed));
- match output {
- Output::AffectedRows(rows) => assert_eq!(rows, 10),
- _ => unreachable!(),
- }
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_disable_db_operation_plugin() {
- #[derive(Default)]
- struct DisableDBOpHook;
-
- impl SqlQueryInterceptor for DisableDBOpHook {
- type Error = Error;
-
- fn post_parsing(
- &self,
- statements: Vec<Statement>,
- _query_ctx: QueryContextRef,
- ) -> Result<Vec<Statement>> {
- for s in &statements {
- match s {
- Statement::CreateDatabase(_) | Statement::ShowDatabases(_) => {
- return Err(Error::NotSupported {
- feat: "Database operations".to_owned(),
- })
- }
- _ => {}
- }
- }
-
- Ok(statements)
- }
- }
-
- let query_ctx = Arc::new(QueryContext::new());
-
- let standalone = tests::create_standalone_instance("test_db_hook").await;
- let mut instance = standalone.instance;
-
- let plugins = Plugins::new();
- let hook = Arc::new(DisableDBOpHook::default());
- plugins.insert::<SqlQueryInterceptorRef<Error>>(hook.clone());
- Arc::make_mut(&mut instance).set_plugins(Arc::new(plugins));
-
- let sql = r#"CREATE TABLE demo(
- host STRING,
- ts TIMESTAMP,
- cpu DOUBLE NULL,
- memory DOUBLE NULL,
- disk_util DOUBLE DEFAULT 9.9,
- TIME INDEX (ts),
- PRIMARY KEY(host)
- ) engine=mito with(regions=1);"#;
- let output = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
- .await
- .remove(0)
- .unwrap();
-
- match output {
- Output::AffectedRows(rows) => assert_eq!(rows, 0),
- _ => unreachable!(),
- }
-
- let sql = r#"CREATE DATABASE tomcat"#;
- if let Err(e) = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
- .await
- .remove(0)
- {
- assert!(matches!(e, error::Error::NotSupported { .. }));
- } else {
- unreachable!();
- }
-
- let sql = r#"SELECT 1; SHOW DATABASES"#;
- if let Err(e) = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
- .await
- .remove(0)
- {
- assert!(matches!(e, error::Error::NotSupported { .. }));
- } else {
- unreachable!();
- }
- }
}
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index a314d35747f9..5f925517c9c6 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -73,14 +73,14 @@ use crate::table::DistTable;
const MAX_VALUE: &str = "MAXVALUE";
#[derive(Clone)]
-pub(crate) struct DistInstance {
+pub struct DistInstance {
meta_client: Arc<MetaClient>,
catalog_manager: Arc<FrontendCatalogManager>,
datanode_clients: Arc<DatanodeClients>,
}
impl DistInstance {
- pub(crate) fn new(
+ pub fn new(
meta_client: Arc<MetaClient>,
catalog_manager: Arc<FrontendCatalogManager>,
datanode_clients: Arc<DatanodeClients>,
@@ -92,7 +92,7 @@ impl DistInstance {
}
}
- pub(crate) async fn create_table(
+ pub async fn create_table(
&self,
create_table: &mut CreateTableExpr,
partitions: Option<Partitions>,
@@ -579,8 +579,7 @@ impl DistInstance {
Ok(Output::AffectedRows(affected_rows))
}
- #[cfg(test)]
- pub(crate) fn catalog_manager(&self) -> Arc<FrontendCatalogManager> {
+ pub fn catalog_manager(&self) -> Arc<FrontendCatalogManager> {
self.catalog_manager.clone()
}
}
diff --git a/src/frontend/src/instance/grpc.rs b/src/frontend/src/instance/grpc.rs
index 06960b8d43a5..5b16fccd8abe 100644
--- a/src/frontend/src/instance/grpc.rs
+++ b/src/frontend/src/instance/grpc.rs
@@ -81,845 +81,3 @@ impl GrpcQueryHandler for Instance {
Ok(output)
}
}
-
-#[cfg(test)]
-mod test {
- use std::collections::HashMap;
-
- use api::v1::column::{SemanticType, Values};
- use api::v1::ddl_request::Expr as DdlExpr;
- use api::v1::{
- alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
- CreateDatabaseExpr, CreateTableExpr, DdlRequest, DeleteRequest, DropTableExpr,
- FlushTableExpr, InsertRequest, QueryRequest,
- };
- use catalog::helper::{TableGlobalKey, TableGlobalValue};
- use common_catalog::consts::MITO_ENGINE;
- use common_query::Output;
- use common_recordbatch::RecordBatches;
- use query::parser::QueryLanguageParser;
- use session::context::QueryContext;
- use tests::{has_parquet_file, test_region_dir};
-
- use super::*;
- use crate::table::DistTable;
- use crate::tests;
- use crate::tests::MockDistributedInstance;
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_distributed_handle_ddl_request() {
- let instance =
- tests::create_distributed_instance("test_distributed_handle_ddl_request").await;
- let frontend = &instance.frontend;
-
- test_handle_ddl_request(frontend.as_ref()).await;
-
- verify_table_is_dropped(&instance).await;
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_standalone_handle_ddl_request() {
- let standalone =
- tests::create_standalone_instance("test_standalone_handle_ddl_request").await;
- let instance = &standalone.instance;
-
- test_handle_ddl_request(instance.as_ref()).await;
- }
-
- async fn query(instance: &Instance, request: Request) -> Output {
- GrpcQueryHandler::do_query(instance, request, QueryContext::arc())
- .await
- .unwrap()
- }
-
- async fn test_handle_ddl_request(instance: &Instance) {
- let request = Request::Ddl(DdlRequest {
- expr: Some(DdlExpr::CreateDatabase(CreateDatabaseExpr {
- database_name: "database_created_through_grpc".to_string(),
- create_if_not_exists: true,
- })),
- });
- let output = query(instance, request).await;
- assert!(matches!(output, Output::AffectedRows(1)));
-
- let request = Request::Ddl(DdlRequest {
- expr: Some(DdlExpr::CreateTable(CreateTableExpr {
- catalog_name: "greptime".to_string(),
- schema_name: "database_created_through_grpc".to_string(),
- table_name: "table_created_through_grpc".to_string(),
- column_defs: vec![
- ColumnDef {
- name: "a".to_string(),
- datatype: ColumnDataType::String as _,
- is_nullable: true,
- default_constraint: vec![],
- },
- ColumnDef {
- name: "ts".to_string(),
- datatype: ColumnDataType::TimestampMillisecond as _,
- is_nullable: false,
- default_constraint: vec![],
- },
- ],
- time_index: "ts".to_string(),
- engine: MITO_ENGINE.to_string(),
- ..Default::default()
- })),
- });
- let output = query(instance, request).await;
- assert!(matches!(output, Output::AffectedRows(0)));
-
- let request = Request::Ddl(DdlRequest {
- expr: Some(DdlExpr::Alter(AlterExpr {
- catalog_name: "greptime".to_string(),
- schema_name: "database_created_through_grpc".to_string(),
- table_name: "table_created_through_grpc".to_string(),
- kind: Some(alter_expr::Kind::AddColumns(AddColumns {
- add_columns: vec![AddColumn {
- column_def: Some(ColumnDef {
- name: "b".to_string(),
- datatype: ColumnDataType::Int32 as _,
- is_nullable: true,
- default_constraint: vec![],
- }),
- is_key: false,
- }],
- })),
- })),
- });
- let output = query(instance, request).await;
- assert!(matches!(output, Output::AffectedRows(0)));
-
- let request = Request::Query(QueryRequest {
- query: Some(Query::Sql("INSERT INTO database_created_through_grpc.table_created_through_grpc (a, b, ts) VALUES ('s', 1, 1672816466000)".to_string()))
- });
- let output = query(instance, request).await;
- assert!(matches!(output, Output::AffectedRows(1)));
-
- let request = Request::Query(QueryRequest {
- query: Some(Query::Sql(
- "SELECT ts, a, b FROM database_created_through_grpc.table_created_through_grpc"
- .to_string(),
- )),
- });
- let output = query(instance, request).await;
- let Output::Stream(stream) = output else { unreachable!() };
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- let expected = "\
-+---------------------+---+---+
-| ts | a | b |
-+---------------------+---+---+
-| 2023-01-04T07:14:26 | s | 1 |
-+---------------------+---+---+";
- assert_eq!(recordbatches.pretty_print().unwrap(), expected);
-
- let request = Request::Ddl(DdlRequest {
- expr: Some(DdlExpr::DropTable(DropTableExpr {
- catalog_name: "greptime".to_string(),
- schema_name: "database_created_through_grpc".to_string(),
- table_name: "table_created_through_grpc".to_string(),
- })),
- });
- let output = query(instance, request).await;
- assert!(matches!(output, Output::AffectedRows(1)));
- }
-
- async fn verify_table_is_dropped(instance: &MockDistributedInstance) {
- for (_, dn) in instance.datanodes.iter() {
- assert!(dn
- .catalog_manager()
- .table(
- "greptime",
- "database_created_through_grpc",
- "table_created_through_grpc"
- )
- .await
- .unwrap()
- .is_none());
- }
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_distributed_insert_delete_and_query() {
- common_telemetry::init_default_ut_logging();
-
- let instance =
- tests::create_distributed_instance("test_distributed_insert_delete_and_query").await;
- let frontend = instance.frontend.as_ref();
-
- let table_name = "my_dist_table";
- let sql = format!(
- r"
-CREATE TABLE {table_name} (
- a INT,
- b STRING PRIMARY KEY,
- ts TIMESTAMP,
- TIME INDEX (ts)
-) PARTITION BY RANGE COLUMNS(a) (
- PARTITION r0 VALUES LESS THAN (10),
- PARTITION r1 VALUES LESS THAN (20),
- PARTITION r2 VALUES LESS THAN (50),
- PARTITION r3 VALUES LESS THAN (MAXVALUE),
-)"
- );
- create_table(frontend, sql).await;
-
- test_insert_delete_and_query_on_existing_table(frontend, table_name).await;
-
- verify_data_distribution(
- &instance,
- table_name,
- HashMap::from([
- (
- 0u32,
- "\
-+---------------------+---+-------------------+
-| ts | a | b |
-+---------------------+---+-------------------+
-| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
-| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
-| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
-| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
-| 2023-01-01T07:26:17 | | ts: 1672557977000 |
-+---------------------+---+-------------------+",
- ),
- (
- 1u32,
- "\
-+---------------------+----+-------------------+
-| ts | a | b |
-+---------------------+----+-------------------+
-| 2023-01-01T07:26:18 | 11 | ts: 1672557978000 |
-+---------------------+----+-------------------+",
- ),
- (
- 2u32,
- "\
-+---------------------+----+-------------------+
-| ts | a | b |
-+---------------------+----+-------------------+
-| 2023-01-01T07:26:20 | 20 | ts: 1672557980000 |
-| 2023-01-01T07:26:21 | 21 | ts: 1672557981000 |
-| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
-+---------------------+----+-------------------+",
- ),
- (
- 3u32,
- "\
-+---------------------+----+-------------------+
-| ts | a | b |
-+---------------------+----+-------------------+
-| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
-| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
-| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
-+---------------------+----+-------------------+",
- ),
- ]),
- )
- .await;
-
- test_insert_delete_and_query_on_auto_created_table(frontend).await;
-
- // Auto created table has only one region.
- verify_data_distribution(
- &instance,
- "auto_created_table",
- HashMap::from([(
- 0u32,
- "\
-+---------------------+---+---+
-| ts | a | b |
-+---------------------+---+---+
-| 2023-01-01T07:26:16 | | |
-| 2023-01-01T07:26:17 | 6 | |
-| 2023-01-01T07:26:18 | | x |
-| 2023-01-01T07:26:20 | | z |
-+---------------------+---+---+",
- )]),
- )
- .await;
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_standalone_insert_and_query() {
- common_telemetry::init_default_ut_logging();
-
- let standalone =
- tests::create_standalone_instance("test_standalone_insert_and_query").await;
- let instance = &standalone.instance;
-
- let table_name = "my_table";
- let sql = format!("CREATE TABLE {table_name} (a INT, b STRING, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY (a, b))");
- create_table(instance, sql).await;
-
- test_insert_delete_and_query_on_existing_table(instance, table_name).await;
-
- test_insert_delete_and_query_on_auto_created_table(instance).await
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_distributed_flush_table() {
- common_telemetry::init_default_ut_logging();
-
- let instance = tests::create_distributed_instance("test_distributed_flush_table").await;
- let data_tmp_dirs = instance.data_tmp_dirs();
- let frontend = instance.frontend.as_ref();
-
- let table_name = "my_dist_table";
- let sql = format!(
- r"
-CREATE TABLE {table_name} (
- a INT,
- ts TIMESTAMP,
- TIME INDEX (ts)
-) PARTITION BY RANGE COLUMNS(a) (
- PARTITION r0 VALUES LESS THAN (10),
- PARTITION r1 VALUES LESS THAN (20),
- PARTITION r2 VALUES LESS THAN (50),
- PARTITION r3 VALUES LESS THAN (MAXVALUE),
-)"
- );
- create_table(frontend, sql).await;
-
- test_insert_delete_and_query_on_existing_table(frontend, table_name).await;
-
- flush_table(frontend, "greptime", "public", table_name, None).await;
- // Wait for previous task finished
- flush_table(frontend, "greptime", "public", table_name, None).await;
-
- let table = instance
- .frontend
- .catalog_manager()
- .table("greptime", "public", table_name)
- .await
- .unwrap()
- .unwrap();
- let table = table.as_any().downcast_ref::<DistTable>().unwrap();
-
- let tgv = table
- .table_global_value(&TableGlobalKey {
- catalog_name: "greptime".to_string(),
- schema_name: "public".to_string(),
- table_name: table_name.to_string(),
- })
- .await
- .unwrap()
- .unwrap();
- let table_id = tgv.table_id();
-
- let region_to_dn_map = tgv
- .regions_id_map
- .iter()
- .map(|(k, v)| (v[0], *k))
- .collect::<HashMap<u32, u64>>();
-
- for (region, dn) in region_to_dn_map.iter() {
- // data_tmp_dirs -> dn: 1..4
- let data_tmp_dir = data_tmp_dirs.get((*dn - 1) as usize).unwrap();
- let region_dir = test_region_dir(
- data_tmp_dir.path().to_str().unwrap(),
- "greptime",
- "public",
- table_id,
- *region,
- );
- has_parquet_file(®ion_dir);
- }
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_standalone_flush_table() {
- common_telemetry::init_default_ut_logging();
-
- let standalone = tests::create_standalone_instance("test_standalone_flush_table").await;
- let instance = &standalone.instance;
- let data_tmp_dir = standalone.data_tmp_dir();
-
- let table_name = "my_table";
- let sql = format!("CREATE TABLE {table_name} (a INT, b STRING, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY (a, b))");
-
- create_table(instance, sql).await;
-
- test_insert_delete_and_query_on_existing_table(instance, table_name).await;
-
- let table_id = 1024;
- let region_id = 0;
- let region_dir = test_region_dir(
- data_tmp_dir.path().to_str().unwrap(),
- "greptime",
- "public",
- table_id,
- region_id,
- );
- assert!(!has_parquet_file(®ion_dir));
-
- flush_table(instance, "greptime", "public", "my_table", None).await;
- // Wait for previous task finished
- flush_table(instance, "greptime", "public", "my_table", None).await;
-
- assert!(has_parquet_file(®ion_dir));
- }
-
- async fn create_table(frontend: &Instance, sql: String) {
- let request = Request::Query(QueryRequest {
- query: Some(Query::Sql(sql)),
- });
- let output = query(frontend, request).await;
- assert!(matches!(output, Output::AffectedRows(0)));
- }
-
- async fn flush_table(
- frontend: &Instance,
- catalog_name: &str,
- schema_name: &str,
- table_name: &str,
- region_id: Option<u32>,
- ) {
- let request = Request::Ddl(DdlRequest {
- expr: Some(DdlExpr::FlushTable(FlushTableExpr {
- catalog_name: catalog_name.to_string(),
- schema_name: schema_name.to_string(),
- table_name: table_name.to_string(),
- region_id,
- })),
- });
-
- let output = query(frontend, request).await;
- assert!(matches!(output, Output::AffectedRows(0)));
- }
-
- async fn test_insert_delete_and_query_on_existing_table(instance: &Instance, table_name: &str) {
- let ts_millisecond_values = vec![
- 1672557972000,
- 1672557973000,
- 1672557974000,
- 1672557975000,
- 1672557976000,
- 1672557977000,
- 1672557978000,
- 1672557979000,
- 1672557980000,
- 1672557981000,
- 1672557982000,
- 1672557983000,
- 1672557984000,
- 1672557985000,
- 1672557986000,
- 1672557987000,
- ];
- let insert = InsertRequest {
- table_name: table_name.to_string(),
- columns: vec![
- Column {
- column_name: "a".to_string(),
- values: Some(Values {
- i32_values: vec![1, 2, 3, 4, 5, 11, 12, 20, 21, 22, 23, 50, 51, 52, 53],
- ..Default::default()
- }),
- null_mask: vec![32, 0],
- semantic_type: SemanticType::Field as i32,
- datatype: ColumnDataType::Int32 as i32,
- },
- Column {
- column_name: "b".to_string(),
- values: Some(Values {
- string_values: ts_millisecond_values
- .iter()
- .map(|x| format!("ts: {x}"))
- .collect(),
- ..Default::default()
- }),
- semantic_type: SemanticType::Tag as i32,
- datatype: ColumnDataType::String as i32,
- ..Default::default()
- },
- Column {
- column_name: "ts".to_string(),
- values: Some(Values {
- ts_millisecond_values,
- ..Default::default()
- }),
- semantic_type: SemanticType::Timestamp as i32,
- datatype: ColumnDataType::TimestampMillisecond as i32,
- ..Default::default()
- },
- ],
- row_count: 16,
- ..Default::default()
- };
- let output = query(instance, Request::Insert(insert)).await;
- assert!(matches!(output, Output::AffectedRows(16)));
-
- let request = Request::Query(QueryRequest {
- query: Some(Query::Sql(format!(
- "SELECT ts, a, b FROM {table_name} ORDER BY ts"
- ))),
- });
- let output = query(instance, request.clone()).await;
- let Output::Stream(stream) = output else { unreachable!() };
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- let expected = "\
-+---------------------+----+-------------------+
-| ts | a | b |
-+---------------------+----+-------------------+
-| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
-| 2023-01-01T07:26:13 | 2 | ts: 1672557973000 |
-| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
-| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
-| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
-| 2023-01-01T07:26:17 | | ts: 1672557977000 |
-| 2023-01-01T07:26:18 | 11 | ts: 1672557978000 |
-| 2023-01-01T07:26:19 | 12 | ts: 1672557979000 |
-| 2023-01-01T07:26:20 | 20 | ts: 1672557980000 |
-| 2023-01-01T07:26:21 | 21 | ts: 1672557981000 |
-| 2023-01-01T07:26:22 | 22 | ts: 1672557982000 |
-| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
-| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
-| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
-| 2023-01-01T07:26:26 | 52 | ts: 1672557986000 |
-| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
-+---------------------+----+-------------------+";
- assert_eq!(recordbatches.pretty_print().unwrap(), expected);
-
- let delete = DeleteRequest {
- table_name: table_name.to_string(),
- region_number: 0,
- key_columns: vec![
- Column {
- column_name: "a".to_string(),
- semantic_type: SemanticType::Field as i32,
- values: Some(Values {
- i32_values: vec![2, 12, 22, 52],
- ..Default::default()
- }),
- datatype: ColumnDataType::Int32 as i32,
- ..Default::default()
- },
- Column {
- column_name: "b".to_string(),
- semantic_type: SemanticType::Tag as i32,
- values: Some(Values {
- string_values: vec![
- "ts: 1672557973000".to_string(),
- "ts: 1672557979000".to_string(),
- "ts: 1672557982000".to_string(),
- "ts: 1672557986000".to_string(),
- ],
- ..Default::default()
- }),
- datatype: ColumnDataType::String as i32,
- ..Default::default()
- },
- Column {
- column_name: "ts".to_string(),
- semantic_type: SemanticType::Timestamp as i32,
- values: Some(Values {
- ts_millisecond_values: vec![
- 1672557973000,
- 1672557979000,
- 1672557982000,
- 1672557986000,
- ],
- ..Default::default()
- }),
- datatype: ColumnDataType::TimestampMillisecond as i32,
- ..Default::default()
- },
- ],
- row_count: 4,
- };
- let output = query(instance, Request::Delete(delete)).await;
- assert!(matches!(output, Output::AffectedRows(4)));
-
- let output = query(instance, request).await;
- let Output::Stream(stream) = output else { unreachable!() };
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- let expected = "\
-+---------------------+----+-------------------+
-| ts | a | b |
-+---------------------+----+-------------------+
-| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
-| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
-| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
-| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
-| 2023-01-01T07:26:17 | | ts: 1672557977000 |
-| 2023-01-01T07:26:18 | 11 | ts: 1672557978000 |
-| 2023-01-01T07:26:20 | 20 | ts: 1672557980000 |
-| 2023-01-01T07:26:21 | 21 | ts: 1672557981000 |
-| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
-| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
-| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
-| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
-+---------------------+----+-------------------+";
- assert_eq!(recordbatches.pretty_print().unwrap(), expected);
- }
-
- async fn verify_data_distribution(
- instance: &MockDistributedInstance,
- table_name: &str,
- expected_distribution: HashMap<u32, &str>,
- ) {
- let table = instance
- .frontend
- .catalog_manager()
- .table("greptime", "public", table_name)
- .await
- .unwrap()
- .unwrap();
- let table = table.as_any().downcast_ref::<DistTable>().unwrap();
-
- let TableGlobalValue { regions_id_map, .. } = table
- .table_global_value(&TableGlobalKey {
- catalog_name: "greptime".to_string(),
- schema_name: "public".to_string(),
- table_name: table_name.to_string(),
- })
- .await
- .unwrap()
- .unwrap();
- let region_to_dn_map = regions_id_map
- .iter()
- .map(|(k, v)| (v[0], *k))
- .collect::<HashMap<u32, u64>>();
- assert_eq!(region_to_dn_map.len(), expected_distribution.len());
-
- for (region, dn) in region_to_dn_map.iter() {
- let stmt = QueryLanguageParser::parse_sql(&format!(
- "SELECT ts, a, b FROM {table_name} ORDER BY ts"
- ))
- .unwrap();
- let dn = instance.datanodes.get(dn).unwrap();
- let engine = dn.query_engine();
- let plan = engine
- .planner()
- .plan(stmt, QueryContext::arc())
- .await
- .unwrap();
- let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
- let Output::Stream(stream) = output else { unreachable!() };
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- let actual = recordbatches.pretty_print().unwrap();
-
- let expected = expected_distribution.get(region).unwrap();
- assert_eq!(&actual, expected);
- }
- }
-
- async fn test_insert_delete_and_query_on_auto_created_table(instance: &Instance) {
- let insert = InsertRequest {
- table_name: "auto_created_table".to_string(),
- columns: vec![
- Column {
- column_name: "a".to_string(),
- values: Some(Values {
- i32_values: vec![4, 6],
- ..Default::default()
- }),
- null_mask: vec![2],
- semantic_type: SemanticType::Field as i32,
- datatype: ColumnDataType::Int32 as i32,
- },
- Column {
- column_name: "ts".to_string(),
- values: Some(Values {
- ts_millisecond_values: vec![1672557975000, 1672557976000, 1672557977000],
- ..Default::default()
- }),
- semantic_type: SemanticType::Timestamp as i32,
- datatype: ColumnDataType::TimestampMillisecond as i32,
- ..Default::default()
- },
- ],
- row_count: 3,
- ..Default::default()
- };
-
- // Test auto create not existed table upon insertion.
- let request = Request::Insert(insert);
- let output = query(instance, request).await;
- assert!(matches!(output, Output::AffectedRows(3)));
-
- let insert = InsertRequest {
- table_name: "auto_created_table".to_string(),
- columns: vec![
- Column {
- column_name: "b".to_string(),
- values: Some(Values {
- string_values: vec!["x".to_string(), "z".to_string()],
- ..Default::default()
- }),
- null_mask: vec![2],
- semantic_type: SemanticType::Field as i32,
- datatype: ColumnDataType::String as i32,
- },
- Column {
- column_name: "ts".to_string(),
- values: Some(Values {
- ts_millisecond_values: vec![1672557978000, 1672557979000, 1672557980000],
- ..Default::default()
- }),
- semantic_type: SemanticType::Timestamp as i32,
- datatype: ColumnDataType::TimestampMillisecond as i32,
- ..Default::default()
- },
- ],
- row_count: 3,
- ..Default::default()
- };
-
- // Test auto add not existed column upon insertion.
- let request = Request::Insert(insert);
- let output = query(instance, request).await;
- assert!(matches!(output, Output::AffectedRows(3)));
-
- let request = Request::Query(QueryRequest {
- query: Some(Query::Sql(
- "SELECT ts, a, b FROM auto_created_table".to_string(),
- )),
- });
- let output = query(instance, request.clone()).await;
- let Output::Stream(stream) = output else { unreachable!() };
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- let expected = "\
-+---------------------+---+---+
-| ts | a | b |
-+---------------------+---+---+
-| 2023-01-01T07:26:15 | 4 | |
-| 2023-01-01T07:26:16 | | |
-| 2023-01-01T07:26:17 | 6 | |
-| 2023-01-01T07:26:18 | | x |
-| 2023-01-01T07:26:19 | | |
-| 2023-01-01T07:26:20 | | z |
-+---------------------+---+---+";
- assert_eq!(recordbatches.pretty_print().unwrap(), expected);
-
- let delete = DeleteRequest {
- table_name: "auto_created_table".to_string(),
- region_number: 0,
- key_columns: vec![Column {
- column_name: "ts".to_string(),
- values: Some(Values {
- ts_millisecond_values: vec![1672557975000, 1672557979000],
- ..Default::default()
- }),
- semantic_type: SemanticType::Timestamp as i32,
- datatype: ColumnDataType::TimestampMillisecond as i32,
- ..Default::default()
- }],
- row_count: 2,
- };
-
- let output = query(instance, Request::Delete(delete)).await;
- assert!(matches!(output, Output::AffectedRows(2)));
-
- let output = query(instance, request).await;
- let Output::Stream(stream) = output else { unreachable!() };
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- let expected = "\
-+---------------------+---+---+
-| ts | a | b |
-+---------------------+---+---+
-| 2023-01-01T07:26:16 | | |
-| 2023-01-01T07:26:17 | 6 | |
-| 2023-01-01T07:26:18 | | x |
-| 2023-01-01T07:26:20 | | z |
-+---------------------+---+---+";
- assert_eq!(recordbatches.pretty_print().unwrap(), expected);
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_promql_query() {
- common_telemetry::init_default_ut_logging();
-
- let standalone = tests::create_standalone_instance("test_standalone_promql_query").await;
- let instance = &standalone.instance;
-
- let table_name = "my_table";
- let sql = format!("CREATE TABLE {table_name} (h string, a double, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY(h))");
- create_table(instance, sql).await;
-
- let insert = InsertRequest {
- table_name: table_name.to_string(),
- columns: vec![
- Column {
- column_name: "h".to_string(),
- values: Some(Values {
- string_values: vec![
- "t".to_string(),
- "t".to_string(),
- "t".to_string(),
- "t".to_string(),
- "t".to_string(),
- "t".to_string(),
- "t".to_string(),
- "t".to_string(),
- ],
- ..Default::default()
- }),
- semantic_type: SemanticType::Tag as i32,
- datatype: ColumnDataType::String as i32,
- ..Default::default()
- },
- Column {
- column_name: "a".to_string(),
- values: Some(Values {
- f64_values: vec![1f64, 11f64, 20f64, 22f64, 50f64, 55f64, 99f64],
- ..Default::default()
- }),
- null_mask: vec![4],
- semantic_type: SemanticType::Field as i32,
- datatype: ColumnDataType::Float64 as i32,
- },
- Column {
- column_name: "ts".to_string(),
- values: Some(Values {
- ts_millisecond_values: vec![
- 1672557972000,
- 1672557973000,
- 1672557974000,
- 1672557975000,
- 1672557976000,
- 1672557977000,
- 1672557978000,
- 1672557979000,
- ],
- ..Default::default()
- }),
- semantic_type: SemanticType::Timestamp as i32,
- datatype: ColumnDataType::TimestampMillisecond as i32,
- ..Default::default()
- },
- ],
- row_count: 8,
- ..Default::default()
- };
-
- let request = Request::Insert(insert);
- let output = query(instance, request).await;
- assert!(matches!(output, Output::AffectedRows(8)));
-
- let request = Request::Query(QueryRequest {
- query: Some(Query::PromRangeQuery(api::v1::PromRangeQuery {
- query: "my_table".to_owned(),
- start: "1672557973".to_owned(),
- end: "1672557978".to_owned(),
- step: "1s".to_owned(),
- })),
- });
- let output = query(instance, request).await;
- let Output::Stream(stream) = output else { unreachable!() };
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- let expected = "\
-+---+------+---------------------+
-| h | a | ts |
-+---+------+---------------------+
-| t | 11.0 | 2023-01-01T07:26:13 |
-| t | | 2023-01-01T07:26:14 |
-| t | 20.0 | 2023-01-01T07:26:15 |
-| t | 22.0 | 2023-01-01T07:26:16 |
-| t | 50.0 | 2023-01-01T07:26:17 |
-| t | 55.0 | 2023-01-01T07:26:18 |
-+---+------+---------------------+";
- assert_eq!(recordbatches.pretty_print().unwrap(), expected);
- }
-}
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
index 8073ac740d97..7256f587f2f9 100644
--- a/src/frontend/src/instance/influxdb.rs
+++ b/src/frontend/src/instance/influxdb.rs
@@ -36,65 +36,3 @@ impl InfluxdbLineProtocolHandler for Instance {
Ok(())
}
}
-
-#[cfg(test)]
-mod test {
- use std::sync::Arc;
-
- use common_query::Output;
- use common_recordbatch::RecordBatches;
- use servers::query_handler::sql::SqlQueryHandler;
- use session::context::QueryContext;
-
- use super::*;
- use crate::tests;
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_standalone_put_influxdb_lines() {
- let standalone =
- tests::create_standalone_instance("test_standalone_put_influxdb_lines").await;
- let instance = &standalone.instance;
-
- test_put_influxdb_lines(instance).await;
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_distributed_put_influxdb_lines() {
- let instance =
- tests::create_distributed_instance("test_distributed_put_influxdb_lines").await;
- let instance = &instance.frontend;
-
- test_put_influxdb_lines(instance).await;
- }
-
- async fn test_put_influxdb_lines(instance: &Arc<Instance>) {
- let lines = r"
-monitor1,host=host1 cpu=66.6,memory=1024 1663840496100023100
-monitor1,host=host2 memory=1027 1663840496400340001";
- let request = InfluxdbRequest {
- precision: None,
- lines: lines.to_string(),
- };
- instance.exec(&request, QueryContext::arc()).await.unwrap();
-
- let mut output = instance
- .do_query(
- "SELECT ts, host, cpu, memory FROM monitor1 ORDER BY ts",
- QueryContext::arc(),
- )
- .await;
- let output = output.remove(0).unwrap();
- let Output::Stream(stream) = output else { unreachable!() };
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- assert_eq!(
- recordbatches.pretty_print().unwrap(),
- "\
-+-------------------------+-------+------+--------+
-| ts | host | cpu | memory |
-+-------------------------+-------+------+--------+
-| 2022-09-22T09:54:56.100 | host1 | 66.6 | 1024.0 |
-| 2022-09-22T09:54:56.400 | host2 | | 1027.0 |
-+-------------------------+-------+------+--------+"
- );
- }
-}
diff --git a/src/frontend/src/instance/opentsdb.rs b/src/frontend/src/instance/opentsdb.rs
index 3a5f6b9a5a50..42c0ea7000cf 100644
--- a/src/frontend/src/instance/opentsdb.rs
+++ b/src/frontend/src/instance/opentsdb.rs
@@ -35,95 +35,3 @@ impl OpentsdbProtocolHandler for Instance {
Ok(())
}
}
-
-#[cfg(test)]
-mod tests {
- use std::sync::Arc;
-
- use common_query::Output;
- use common_recordbatch::RecordBatches;
- use itertools::Itertools;
- use servers::query_handler::sql::SqlQueryHandler;
- use session::context::QueryContext;
-
- use super::*;
- use crate::tests;
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_standalone_exec() {
- let standalone = tests::create_standalone_instance("test_standalone_exec").await;
- let instance = &standalone.instance;
-
- test_exec(instance).await;
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_distributed_exec() {
- let distributed = tests::create_distributed_instance("test_distributed_exec").await;
- let instance = &distributed.frontend;
-
- test_exec(instance).await;
- }
-
- async fn test_exec(instance: &Arc<Instance>) {
- let ctx = QueryContext::arc();
- let data_point1 = DataPoint::new(
- "my_metric_1".to_string(),
- 1000,
- 1.0,
- vec![
- ("tagk1".to_string(), "tagv1".to_string()),
- ("tagk2".to_string(), "tagv2".to_string()),
- ],
- );
- // should create new table "my_metric_1" directly
- let result = instance.exec(&data_point1, ctx.clone()).await;
- assert!(result.is_ok());
-
- let data_point2 = DataPoint::new(
- "my_metric_1".to_string(),
- 2000,
- 2.0,
- vec![
- ("tagk2".to_string(), "tagv2".to_string()),
- ("tagk3".to_string(), "tagv3".to_string()),
- ],
- );
- // should create new column "tagk3" directly
- let result = instance.exec(&data_point2, ctx.clone()).await;
- assert!(result.is_ok());
-
- let data_point3 = DataPoint::new("my_metric_1".to_string(), 3000, 3.0, vec![]);
- // should handle null tags properly
- let result = instance.exec(&data_point3, ctx.clone()).await;
- assert!(result.is_ok());
-
- let output = instance
- .do_query(
- "select * from my_metric_1 order by greptime_timestamp",
- Arc::new(QueryContext::new()),
- )
- .await
- .remove(0)
- .unwrap();
- match output {
- Output::Stream(stream) => {
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- let pretty_print = recordbatches.pretty_print().unwrap();
- let expected = vec![
- "+---------------------+----------------+-------+-------+-------+",
- "| greptime_timestamp | greptime_value | tagk1 | tagk2 | tagk3 |",
- "+---------------------+----------------+-------+-------+-------+",
- "| 1970-01-01T00:00:01 | 1.0 | tagv1 | tagv2 | |",
- "| 1970-01-01T00:00:02 | 2.0 | | tagv2 | tagv3 |",
- "| 1970-01-01T00:00:03 | 3.0 | | | |",
- "+---------------------+----------------+-------+-------+-------+",
- ]
- .into_iter()
- .join("\n");
- assert_eq!(pretty_print, expected);
- }
- _ => unreachable!(),
- };
- }
-}
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index de7d38bd2c37..6dd1d0b04c6c 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -153,168 +153,3 @@ impl PrometheusProtocolHandler for Instance {
todo!();
}
}
-
-#[cfg(test)]
-mod tests {
- use std::sync::Arc;
-
- use api::prometheus::remote::label_matcher::Type as MatcherType;
- use api::prometheus::remote::{Label, LabelMatcher, Sample};
- use common_catalog::consts::DEFAULT_CATALOG_NAME;
- use servers::query_handler::sql::SqlQueryHandler;
- use session::context::QueryContext;
-
- use super::*;
- use crate::tests;
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_standalone_prometheus_remote_rw() {
- let standalone =
- tests::create_standalone_instance("test_standalone_prometheus_remote_rw").await;
- let instance = &standalone.instance;
-
- test_prometheus_remote_rw(instance).await;
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_distributed_prometheus_remote_rw() {
- let distributed =
- tests::create_distributed_instance("test_distributed_prometheus_remote_rw").await;
- let instance = &distributed.frontend;
-
- test_prometheus_remote_rw(instance).await;
- }
-
- async fn test_prometheus_remote_rw(instance: &Arc<Instance>) {
- let write_request = WriteRequest {
- timeseries: prometheus::mock_timeseries(),
- ..Default::default()
- };
-
- let db = "prometheus";
- let ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
-
- assert!(SqlQueryHandler::do_query(
- instance.as_ref(),
- "CREATE DATABASE IF NOT EXISTS prometheus",
- ctx.clone(),
- )
- .await
- .get(0)
- .unwrap()
- .is_ok());
-
- instance.write(write_request, ctx.clone()).await.unwrap();
-
- let read_request = ReadRequest {
- queries: vec![
- Query {
- start_timestamp_ms: 1000,
- end_timestamp_ms: 2000,
- matchers: vec![LabelMatcher {
- name: prometheus::METRIC_NAME_LABEL.to_string(),
- value: "metric1".to_string(),
- r#type: 0,
- }],
- ..Default::default()
- },
- Query {
- start_timestamp_ms: 1000,
- end_timestamp_ms: 3000,
- matchers: vec![
- LabelMatcher {
- name: prometheus::METRIC_NAME_LABEL.to_string(),
- value: "metric3".to_string(),
- r#type: 0,
- },
- LabelMatcher {
- name: "app".to_string(),
- value: "biz".to_string(),
- r#type: MatcherType::Eq as i32,
- },
- ],
- ..Default::default()
- },
- ],
- ..Default::default()
- };
-
- let resp = instance.read(read_request, ctx).await.unwrap();
- assert_eq!(resp.content_type, "application/x-protobuf");
- assert_eq!(resp.content_encoding, "snappy");
- let body = prometheus::snappy_decompress(&resp.body).unwrap();
- let read_response = ReadResponse::decode(&body[..]).unwrap();
- let query_results = read_response.results;
- assert_eq!(2, query_results.len());
-
- assert_eq!(1, query_results[0].timeseries.len());
- let timeseries = &query_results[0].timeseries[0];
-
- assert_eq!(
- vec![
- Label {
- name: prometheus::METRIC_NAME_LABEL.to_string(),
- value: "metric1".to_string(),
- },
- Label {
- name: "job".to_string(),
- value: "spark".to_string(),
- },
- ],
- timeseries.labels
- );
-
- assert_eq!(
- timeseries.samples,
- vec![
- Sample {
- value: 1.0,
- timestamp: 1000,
- },
- Sample {
- value: 2.0,
- timestamp: 2000,
- }
- ]
- );
-
- assert_eq!(1, query_results[1].timeseries.len());
- let timeseries = &query_results[1].timeseries[0];
-
- assert_eq!(
- vec![
- Label {
- name: prometheus::METRIC_NAME_LABEL.to_string(),
- value: "metric3".to_string(),
- },
- Label {
- name: "idc".to_string(),
- value: "z002".to_string(),
- },
- Label {
- name: "app".to_string(),
- value: "biz".to_string(),
- },
- ],
- timeseries.labels
- );
-
- assert_eq!(
- timeseries.samples,
- vec![
- Sample {
- value: 5.0,
- timestamp: 1000,
- },
- Sample {
- value: 6.0,
- timestamp: 2000,
- },
- Sample {
- value: 7.0,
- timestamp: 3000,
- }
- ]
- );
- }
-}
diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs
index cbcdd5bbe476..8ba07cf21cf3 100644
--- a/src/frontend/src/lib.rs
+++ b/src/frontend/src/lib.rs
@@ -18,7 +18,7 @@
pub mod catalog;
pub mod datanode;
pub mod error;
-mod expr_factory;
+pub mod expr_factory;
pub mod frontend;
pub mod grpc;
pub mod influxdb;
@@ -32,11 +32,4 @@ pub mod prometheus;
mod script;
mod server;
pub mod statement;
-mod table;
-#[cfg(test)]
-mod tests;
-
-#[cfg(test)]
-// allowed because https://docs.rs/rstest_reuse/0.5.0/rstest_reuse/#use-rstest_reuse-at-the-top-of-your-crate
-#[allow(clippy::single_component_path_imports)]
-use rstest_reuse;
+pub mod table;
diff --git a/src/frontend/src/statement.rs b/src/frontend/src/statement.rs
index f0612756ab14..d3cd1d82aa64 100644
--- a/src/frontend/src/statement.rs
+++ b/src/frontend/src/statement.rs
@@ -59,7 +59,7 @@ impl StatementExecutor {
}
}
- pub(crate) async fn execute_stmt(
+ pub async fn execute_stmt(
&self,
stmt: QueryStatement,
query_ctx: QueryContextRef,
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 89565a07bda3..9e2013419e40 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -217,7 +217,7 @@ impl Table for DistTable {
}
impl DistTable {
- pub(crate) fn new(
+ pub fn new(
table_name: TableName,
table_info: TableInfoRef,
partition_manager: PartitionRuleManagerRef,
@@ -233,7 +233,7 @@ impl DistTable {
}
}
- pub(crate) async fn table_global_value(
+ pub async fn table_global_value(
&self,
key: &TableGlobalKey,
) -> Result<Option<TableGlobalValue>> {
@@ -496,25 +496,10 @@ mod test {
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, Ordering};
- use api::v1::column::SemanticType;
- use api::v1::{column, Column, ColumnDataType, InsertRequest as GrpcInsertRequest};
use catalog::error::Result;
use catalog::remote::{KvBackend, ValueIter};
- use common_query::physical_plan::DfPhysicalPlanAdapter;
- use common_query::DfPhysicalPlan;
- use common_recordbatch::adapter::RecordBatchStreamAdapter;
- use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
- use datafusion::physical_plan::expressions::{col as physical_col, PhysicalSortExpr};
- use datafusion::physical_plan::sorts::sort::SortExec;
- use datafusion::prelude::SessionContext;
- use datafusion::sql::sqlparser;
use datafusion_expr::expr_fn::{and, binary_expr, col, or};
use datafusion_expr::{lit, Operator};
- use datanode::instance::Instance;
- use datatypes::arrow::compute::SortOptions;
- use datatypes::prelude::ConcreteDataType;
- use datatypes::schema::{ColumnSchema, Schema};
- use itertools::Itertools;
use meta_client::client::MetaClient;
use meta_client::rpc::router::RegionRoute;
use meta_client::rpc::{Region, Table, TableRoute};
@@ -528,15 +513,10 @@ mod test {
use partition::range::RangePartitionRule;
use partition::route::TableRoutes;
use partition::PartitionRuleRef;
- use session::context::QueryContext;
- use sql::parser::ParserContext;
- use sql::statements::statement::Statement;
use store_api::storage::RegionNumber;
- use table::metadata::{TableInfoBuilder, TableMetaBuilder};
- use table::{meter_insert_request, TableRef};
+ use table::meter_insert_request;
use super::*;
- use crate::expr_factory;
struct DummyKvBackend;
@@ -745,321 +725,6 @@ mod test {
assert_eq!(range_columns_rule.regions(), &vec![1, 2, 3]);
}
- #[tokio::test(flavor = "multi_thread")]
- async fn test_dist_table_scan() {
- common_telemetry::init_default_ut_logging();
- let table = Arc::new(new_dist_table("test_dist_table_scan").await);
- // should scan all regions
- // select a, row_id from numbers
- let projection = Some(vec![1, 2]);
- let filters = vec![];
- let expected_output = vec![
- "+-----+--------+",
- "| a | row_id |",
- "+-----+--------+",
- "| 0 | 1 |",
- "| 1 | 2 |",
- "| 2 | 3 |",
- "| 3 | 4 |",
- "| 4 | 5 |",
- "| 10 | 1 |",
- "| 11 | 2 |",
- "| 12 | 3 |",
- "| 13 | 4 |",
- "| 14 | 5 |",
- "| 30 | 1 |",
- "| 31 | 2 |",
- "| 32 | 3 |",
- "| 33 | 4 |",
- "| 34 | 5 |",
- "| 100 | 1 |",
- "| 101 | 2 |",
- "| 102 | 3 |",
- "| 103 | 4 |",
- "| 104 | 5 |",
- "+-----+--------+",
- ];
- exec_table_scan(table.clone(), projection, filters, 4, expected_output).await;
-
- // should scan only region 1
- // select a, row_id from numbers where a < 10
- let projection = Some(vec![1, 2]);
- let filters = vec![binary_expr(col("a"), Operator::Lt, lit(10)).into()];
- let expected_output = vec![
- "+---+--------+",
- "| a | row_id |",
- "+---+--------+",
- "| 0 | 1 |",
- "| 1 | 2 |",
- "| 2 | 3 |",
- "| 3 | 4 |",
- "| 4 | 5 |",
- "+---+--------+",
- ];
- exec_table_scan(table.clone(), projection, filters, 1, expected_output).await;
-
- // should scan region 1 and 2
- // select a, row_id from numbers where a < 15
- let projection = Some(vec![1, 2]);
- let filters = vec![binary_expr(col("a"), Operator::Lt, lit(15)).into()];
- let expected_output = vec![
- "+----+--------+",
- "| a | row_id |",
- "+----+--------+",
- "| 0 | 1 |",
- "| 1 | 2 |",
- "| 2 | 3 |",
- "| 3 | 4 |",
- "| 4 | 5 |",
- "| 10 | 1 |",
- "| 11 | 2 |",
- "| 12 | 3 |",
- "| 13 | 4 |",
- "| 14 | 5 |",
- "+----+--------+",
- ];
- exec_table_scan(table.clone(), projection, filters, 2, expected_output).await;
-
- // should scan region 2 and 3
- // select a, row_id from numbers where a < 40 and a >= 10
- let projection = Some(vec![1, 2]);
- let filters = vec![and(
- binary_expr(col("a"), Operator::Lt, lit(40)),
- binary_expr(col("a"), Operator::GtEq, lit(10)),
- )
- .into()];
- let expected_output = vec![
- "+----+--------+",
- "| a | row_id |",
- "+----+--------+",
- "| 10 | 1 |",
- "| 11 | 2 |",
- "| 12 | 3 |",
- "| 13 | 4 |",
- "| 14 | 5 |",
- "| 30 | 1 |",
- "| 31 | 2 |",
- "| 32 | 3 |",
- "| 33 | 4 |",
- "| 34 | 5 |",
- "+----+--------+",
- ];
- exec_table_scan(table.clone(), projection, filters, 2, expected_output).await;
-
- // should scan all regions
- // select a, row_id from numbers where a < 1000 and row_id == 1
- let projection = Some(vec![1, 2]);
- let filters = vec![and(
- binary_expr(col("a"), Operator::Lt, lit(1000)),
- binary_expr(col("row_id"), Operator::Eq, lit(1)),
- )
- .into()];
- let expected_output = vec![
- "+-----+--------+",
- "| a | row_id |",
- "+-----+--------+",
- "| 0 | 1 |",
- "| 10 | 1 |",
- "| 30 | 1 |",
- "| 100 | 1 |",
- "+-----+--------+",
- ];
- exec_table_scan(table.clone(), projection, filters, 4, expected_output).await;
- }
-
- async fn exec_table_scan(
- table: TableRef,
- projection: Option<Vec<usize>>,
- filters: Vec<Expr>,
- expected_partitions: usize,
- expected_output: Vec<&str>,
- ) {
- let expected_output = expected_output.into_iter().join("\n");
- let table_scan = table
- .scan(projection.as_ref(), filters.as_slice(), None)
- .await
- .unwrap();
- assert_eq!(
- table_scan.output_partitioning().partition_count(),
- expected_partitions
- );
-
- let merge =
- CoalescePartitionsExec::new(Arc::new(DfPhysicalPlanAdapter(table_scan.clone())));
-
- let sort = SortExec::new(
- vec![PhysicalSortExpr {
- expr: physical_col("a", table_scan.schema().arrow_schema()).unwrap(),
- options: SortOptions::default(),
- }],
- Arc::new(merge),
- )
- .with_fetch(None);
- assert_eq!(sort.output_partitioning().partition_count(), 1);
-
- let session_ctx = SessionContext::new();
- let stream = sort.execute(0, session_ctx.task_ctx()).unwrap();
- let stream = Box::pin(RecordBatchStreamAdapter::try_new(stream).unwrap());
-
- let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
- assert_eq!(recordbatches.pretty_print().unwrap(), expected_output);
- }
-
- async fn new_dist_table(test_name: &str) -> DistTable {
- let column_schemas = vec![
- ColumnSchema::new("ts", ConcreteDataType::int64_datatype(), false),
- ColumnSchema::new("a", ConcreteDataType::int32_datatype(), true),
- ColumnSchema::new("row_id", ConcreteDataType::int32_datatype(), true),
- ];
- let schema = Arc::new(Schema::new(column_schemas.clone()));
-
- let instance = crate::tests::create_distributed_instance(test_name).await;
- let dist_instance = &instance.dist_instance;
- let datanode_instances = instance.datanodes;
-
- let catalog_manager = dist_instance.catalog_manager();
- let partition_manager = catalog_manager.partition_manager();
- let datanode_clients = catalog_manager.datanode_clients();
-
- let table_name = TableName::new("greptime", "public", "dist_numbers");
-
- let sql = "
- CREATE TABLE greptime.public.dist_numbers (
- ts BIGINT,
- a INT,
- row_id INT,
- TIME INDEX (ts),
- )
- PARTITION BY RANGE COLUMNS (a) (
- PARTITION r0 VALUES LESS THAN (10),
- PARTITION r1 VALUES LESS THAN (20),
- PARTITION r2 VALUES LESS THAN (50),
- PARTITION r3 VALUES LESS THAN (MAXVALUE),
- )
- ENGINE=mito";
-
- let create_table =
- match ParserContext::create_with_dialect(sql, &sqlparser::dialect::GenericDialect {})
- .unwrap()
- .pop()
- .unwrap()
- {
- Statement::CreateTable(c) => c,
- _ => unreachable!(),
- };
-
- let mut expr = expr_factory::create_to_expr(&create_table, QueryContext::arc()).unwrap();
- let _result = dist_instance
- .create_table(&mut expr, create_table.partitions)
- .await
- .unwrap();
-
- let table_route = partition_manager
- .find_table_route(&table_name)
- .await
- .unwrap();
-
- let mut region_to_datanode_mapping = HashMap::new();
- for region_route in table_route.region_routes.iter() {
- let region_id = region_route.region.id as u32;
- let datanode_id = region_route.leader_peer.as_ref().unwrap().id;
- region_to_datanode_mapping.insert(region_id, datanode_id);
- }
-
- let mut global_start_ts = 1;
- let regional_numbers = vec![
- (0, (0..5).collect::<Vec<i32>>()),
- (1, (10..15).collect::<Vec<i32>>()),
- (2, (30..35).collect::<Vec<i32>>()),
- (3, (100..105).collect::<Vec<i32>>()),
- ];
- for (region_number, numbers) in regional_numbers {
- let datanode_id = *region_to_datanode_mapping.get(®ion_number).unwrap();
- let instance = datanode_instances.get(&datanode_id).unwrap().clone();
-
- let start_ts = global_start_ts;
- global_start_ts += numbers.len() as i64;
-
- insert_testing_data(
- &table_name,
- instance.clone(),
- numbers,
- start_ts,
- region_number,
- )
- .await;
- }
-
- let meta = TableMetaBuilder::default()
- .schema(schema)
- .primary_key_indices(vec![])
- .next_column_id(1)
- .build()
- .unwrap();
- let table_info = TableInfoBuilder::default()
- .name(&table_name.table_name)
- .meta(meta)
- .build()
- .unwrap();
- DistTable {
- table_name,
- table_info: Arc::new(table_info),
- partition_manager,
- datanode_clients,
- backend: catalog_manager.backend(),
- }
- }
-
- async fn insert_testing_data(
- table_name: &TableName,
- dn_instance: Arc<Instance>,
- data: Vec<i32>,
- start_ts: i64,
- region_number: RegionNumber,
- ) {
- let row_count = data.len() as u32;
- let columns = vec![
- Column {
- column_name: "ts".to_string(),
- values: Some(column::Values {
- i64_values: (start_ts..start_ts + row_count as i64).collect::<Vec<i64>>(),
- ..Default::default()
- }),
- datatype: ColumnDataType::Int64 as i32,
- semantic_type: SemanticType::Timestamp as i32,
- ..Default::default()
- },
- Column {
- column_name: "a".to_string(),
- values: Some(column::Values {
- i32_values: data,
- ..Default::default()
- }),
- datatype: ColumnDataType::Int32 as i32,
- ..Default::default()
- },
- Column {
- column_name: "row_id".to_string(),
- values: Some(column::Values {
- i32_values: (1..=row_count as i32).collect::<Vec<i32>>(),
- ..Default::default()
- }),
- datatype: ColumnDataType::Int32 as i32,
- ..Default::default()
- },
- ];
- let request = GrpcInsertRequest {
- table_name: table_name.table_name.clone(),
- columns,
- row_count,
- region_number,
- };
- dn_instance
- .handle_insert(request, QueryContext::arc())
- .await
- .unwrap();
- }
-
#[tokio::test(flavor = "multi_thread")]
async fn test_find_regions() {
let partition_manager = Arc::new(PartitionRuleManager::new(Arc::new(TableRoutes::new(
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index a2abf33b6167..6ba5644f4c02 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -23,7 +23,7 @@ common-test-util = { path = "../src/common/test-util" }
datanode = { path = "../src/datanode" }
datatypes = { path = "../src/datatypes" }
dotenv = "0.15"
-frontend = { path = "../src/frontend" }
+frontend = { path = "../src/frontend", features = ["testing"] }
mito = { path = "../src/mito", features = ["test"] }
object-store = { path = "../src/object-store" }
once_cell = "1.16"
@@ -40,4 +40,22 @@ tokio.workspace = true
uuid.workspace = true
[dev-dependencies]
+common-base = { path = "../src/common/base" }
+common-recordbatch = { path = "../src/common/recordbatch" }
+datafusion.workspace = true
+datafusion-expr.workspace = true
+futures.workspace = true
+itertools = "0.10"
+meta-client = { path = "../src/meta-client" }
+meta-srv = { path = "../src/meta-srv" }
+partition = { path = "../src/partition" }
paste.workspace = true
+prost.workspace = true
+query = { path = "../src/query" }
+script = { path = "../src/script" }
+session = { path = "../src/session" }
+store-api = { path = "../src/store-api" }
+tonic.workspace = true
+tower = "0.4"
+rstest = "0.17"
+rstest_reuse = "0.5"
diff --git a/tests-integration/src/catalog.rs b/tests-integration/src/catalog.rs
new file mode 100644
index 000000000000..5e3187ac2c5f
--- /dev/null
+++ b/tests-integration/src/catalog.rs
@@ -0,0 +1,80 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[cfg(test)]
+mod tests {
+ use catalog::{CatalogManager, RegisterSystemTableRequest};
+ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
+ use script::table::{build_scripts_schema, SCRIPTS_TABLE_NAME};
+ use table::requests::{CreateTableRequest, TableOptions};
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_register_system_table() {
+ let instance =
+ crate::tests::create_distributed_instance("test_register_system_table").await;
+
+ let catalog_name = DEFAULT_CATALOG_NAME;
+ let schema_name = DEFAULT_SCHEMA_NAME;
+ let table_name = SCRIPTS_TABLE_NAME;
+ let request = CreateTableRequest {
+ id: 1,
+ catalog_name: catalog_name.to_string(),
+ schema_name: schema_name.to_string(),
+ table_name: table_name.to_string(),
+ desc: Some("Scripts table".to_string()),
+ schema: build_scripts_schema(),
+ region_numbers: vec![0],
+ primary_key_indices: vec![0, 1],
+ create_if_not_exists: true,
+ table_options: TableOptions::default(),
+ engine: MITO_ENGINE.to_string(),
+ };
+
+ let result = instance
+ .catalog_manager
+ .register_system_table(RegisterSystemTableRequest {
+ create_table_request: request,
+ open_hook: None,
+ })
+ .await;
+ assert!(result.is_ok());
+
+ assert!(
+ instance
+ .catalog_manager
+ .table(catalog_name, schema_name, table_name)
+ .await
+ .unwrap()
+ .is_some(),
+ "the registered system table cannot be found in catalog"
+ );
+
+ let mut actually_created_table_in_datanode = 0;
+ for datanode in instance.datanodes.values() {
+ if datanode
+ .catalog_manager()
+ .table(catalog_name, schema_name, table_name)
+ .await
+ .unwrap()
+ .is_some()
+ {
+ actually_created_table_in_datanode += 1;
+ }
+ }
+ assert_eq!(
+ actually_created_table_in_datanode, 1,
+ "system table should be actually created at one and only one datanode"
+ )
+ }
+}
diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs
new file mode 100644
index 000000000000..01252ec7335b
--- /dev/null
+++ b/tests-integration/src/grpc.rs
@@ -0,0 +1,858 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[cfg(test)]
+mod test {
+ use std::collections::HashMap;
+
+ use api::v1::column::{SemanticType, Values};
+ use api::v1::ddl_request::Expr as DdlExpr;
+ use api::v1::greptime_request::Request;
+ use api::v1::query_request::Query;
+ use api::v1::{
+ alter_expr, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef,
+ CreateDatabaseExpr, CreateTableExpr, DdlRequest, DeleteRequest, DropTableExpr,
+ FlushTableExpr, InsertRequest, QueryRequest,
+ };
+ use catalog::helper::{TableGlobalKey, TableGlobalValue};
+ use common_catalog::consts::MITO_ENGINE;
+ use common_query::Output;
+ use common_recordbatch::RecordBatches;
+ use frontend::instance::Instance;
+ use frontend::table::DistTable;
+ use query::parser::QueryLanguageParser;
+ use servers::query_handler::grpc::GrpcQueryHandler;
+ use session::context::QueryContext;
+ use tests::{has_parquet_file, test_region_dir};
+
+ use crate::tests;
+ use crate::tests::MockDistributedInstance;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_distributed_handle_ddl_request() {
+ let instance =
+ tests::create_distributed_instance("test_distributed_handle_ddl_request").await;
+ let frontend = &instance.frontend;
+
+ test_handle_ddl_request(frontend.as_ref()).await;
+
+ verify_table_is_dropped(&instance).await;
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_standalone_handle_ddl_request() {
+ let standalone =
+ tests::create_standalone_instance("test_standalone_handle_ddl_request").await;
+ let instance = &standalone.instance;
+
+ test_handle_ddl_request(instance.as_ref()).await;
+ }
+
+ async fn query(instance: &Instance, request: Request) -> Output {
+ GrpcQueryHandler::do_query(instance, request, QueryContext::arc())
+ .await
+ .unwrap()
+ }
+
+ async fn test_handle_ddl_request(instance: &Instance) {
+ let request = Request::Ddl(DdlRequest {
+ expr: Some(DdlExpr::CreateDatabase(CreateDatabaseExpr {
+ database_name: "database_created_through_grpc".to_string(),
+ create_if_not_exists: true,
+ })),
+ });
+ let output = query(instance, request).await;
+ assert!(matches!(output, Output::AffectedRows(1)));
+
+ let request = Request::Ddl(DdlRequest {
+ expr: Some(DdlExpr::CreateTable(CreateTableExpr {
+ catalog_name: "greptime".to_string(),
+ schema_name: "database_created_through_grpc".to_string(),
+ table_name: "table_created_through_grpc".to_string(),
+ column_defs: vec![
+ ColumnDef {
+ name: "a".to_string(),
+ datatype: ColumnDataType::String as _,
+ is_nullable: true,
+ default_constraint: vec![],
+ },
+ ColumnDef {
+ name: "ts".to_string(),
+ datatype: ColumnDataType::TimestampMillisecond as _,
+ is_nullable: false,
+ default_constraint: vec![],
+ },
+ ],
+ time_index: "ts".to_string(),
+ engine: MITO_ENGINE.to_string(),
+ ..Default::default()
+ })),
+ });
+ let output = query(instance, request).await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+
+ let request = Request::Ddl(DdlRequest {
+ expr: Some(DdlExpr::Alter(AlterExpr {
+ catalog_name: "greptime".to_string(),
+ schema_name: "database_created_through_grpc".to_string(),
+ table_name: "table_created_through_grpc".to_string(),
+ kind: Some(alter_expr::Kind::AddColumns(AddColumns {
+ add_columns: vec![AddColumn {
+ column_def: Some(ColumnDef {
+ name: "b".to_string(),
+ datatype: ColumnDataType::Int32 as _,
+ is_nullable: true,
+ default_constraint: vec![],
+ }),
+ is_key: false,
+ }],
+ })),
+ })),
+ });
+ let output = query(instance, request).await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+
+ let request = Request::Query(QueryRequest {
+ query: Some(Query::Sql("INSERT INTO database_created_through_grpc.table_created_through_grpc (a, b, ts) VALUES ('s', 1, 1672816466000)".to_string()))
+ });
+ let output = query(instance, request).await;
+ assert!(matches!(output, Output::AffectedRows(1)));
+
+ let request = Request::Query(QueryRequest {
+ query: Some(Query::Sql(
+ "SELECT ts, a, b FROM database_created_through_grpc.table_created_through_grpc"
+ .to_string(),
+ )),
+ });
+ let output = query(instance, request).await;
+ let Output::Stream(stream) = output else { unreachable!() };
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++---------------------+---+---+
+| ts | a | b |
++---------------------+---+---+
+| 2023-01-04T07:14:26 | s | 1 |
++---------------------+---+---+";
+ assert_eq!(recordbatches.pretty_print().unwrap(), expected);
+
+ let request = Request::Ddl(DdlRequest {
+ expr: Some(DdlExpr::DropTable(DropTableExpr {
+ catalog_name: "greptime".to_string(),
+ schema_name: "database_created_through_grpc".to_string(),
+ table_name: "table_created_through_grpc".to_string(),
+ })),
+ });
+ let output = query(instance, request).await;
+ assert!(matches!(output, Output::AffectedRows(1)));
+ }
+
+ async fn verify_table_is_dropped(instance: &MockDistributedInstance) {
+ for (_, dn) in instance.datanodes.iter() {
+ assert!(dn
+ .catalog_manager()
+ .table(
+ "greptime",
+ "database_created_through_grpc",
+ "table_created_through_grpc"
+ )
+ .await
+ .unwrap()
+ .is_none());
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_distributed_insert_delete_and_query() {
+ common_telemetry::init_default_ut_logging();
+
+ let instance =
+ tests::create_distributed_instance("test_distributed_insert_delete_and_query").await;
+ let frontend = instance.frontend.as_ref();
+
+ let table_name = "my_dist_table";
+ let sql = format!(
+ r"
+CREATE TABLE {table_name} (
+ a INT,
+ b STRING PRIMARY KEY,
+ ts TIMESTAMP,
+ TIME INDEX (ts)
+) PARTITION BY RANGE COLUMNS(a) (
+ PARTITION r0 VALUES LESS THAN (10),
+ PARTITION r1 VALUES LESS THAN (20),
+ PARTITION r2 VALUES LESS THAN (50),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE),
+)"
+ );
+ create_table(frontend, sql).await;
+
+ test_insert_delete_and_query_on_existing_table(frontend, table_name).await;
+
+ verify_data_distribution(
+ &instance,
+ table_name,
+ HashMap::from([
+ (
+ 0u32,
+ "\
++---------------------+---+-------------------+
+| ts | a | b |
++---------------------+---+-------------------+
+| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
+| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
+| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
+| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
+| 2023-01-01T07:26:17 | | ts: 1672557977000 |
++---------------------+---+-------------------+",
+ ),
+ (
+ 1u32,
+ "\
++---------------------+----+-------------------+
+| ts | a | b |
++---------------------+----+-------------------+
+| 2023-01-01T07:26:18 | 11 | ts: 1672557978000 |
++---------------------+----+-------------------+",
+ ),
+ (
+ 2u32,
+ "\
++---------------------+----+-------------------+
+| ts | a | b |
++---------------------+----+-------------------+
+| 2023-01-01T07:26:20 | 20 | ts: 1672557980000 |
+| 2023-01-01T07:26:21 | 21 | ts: 1672557981000 |
+| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
++---------------------+----+-------------------+",
+ ),
+ (
+ 3u32,
+ "\
++---------------------+----+-------------------+
+| ts | a | b |
++---------------------+----+-------------------+
+| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
+| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
+| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
++---------------------+----+-------------------+",
+ ),
+ ]),
+ )
+ .await;
+
+ test_insert_delete_and_query_on_auto_created_table(frontend).await;
+
+ // Auto created table has only one region.
+ verify_data_distribution(
+ &instance,
+ "auto_created_table",
+ HashMap::from([(
+ 0u32,
+ "\
++---------------------+---+---+
+| ts | a | b |
++---------------------+---+---+
+| 2023-01-01T07:26:16 | | |
+| 2023-01-01T07:26:17 | 6 | |
+| 2023-01-01T07:26:18 | | x |
+| 2023-01-01T07:26:20 | | z |
++---------------------+---+---+",
+ )]),
+ )
+ .await;
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_standalone_insert_and_query() {
+ common_telemetry::init_default_ut_logging();
+
+ let standalone =
+ tests::create_standalone_instance("test_standalone_insert_and_query").await;
+ let instance = &standalone.instance;
+
+ let table_name = "my_table";
+ let sql = format!("CREATE TABLE {table_name} (a INT, b STRING, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY (a, b))");
+ create_table(instance, sql).await;
+
+ test_insert_delete_and_query_on_existing_table(instance, table_name).await;
+
+ test_insert_delete_and_query_on_auto_created_table(instance).await
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_distributed_flush_table() {
+ common_telemetry::init_default_ut_logging();
+
+ let instance = tests::create_distributed_instance("test_distributed_flush_table").await;
+ let data_tmp_dirs = instance.data_tmp_dirs();
+ let frontend = instance.frontend.as_ref();
+
+ let table_name = "my_dist_table";
+ let sql = format!(
+ r"
+CREATE TABLE {table_name} (
+ a INT,
+ ts TIMESTAMP,
+ TIME INDEX (ts)
+) PARTITION BY RANGE COLUMNS(a) (
+ PARTITION r0 VALUES LESS THAN (10),
+ PARTITION r1 VALUES LESS THAN (20),
+ PARTITION r2 VALUES LESS THAN (50),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE),
+)"
+ );
+ create_table(frontend, sql).await;
+
+ test_insert_delete_and_query_on_existing_table(frontend, table_name).await;
+
+ flush_table(frontend, "greptime", "public", table_name, None).await;
+ // Wait for previous task finished
+ flush_table(frontend, "greptime", "public", table_name, None).await;
+
+ let table = instance
+ .frontend
+ .catalog_manager()
+ .table("greptime", "public", table_name)
+ .await
+ .unwrap()
+ .unwrap();
+ let table = table.as_any().downcast_ref::<DistTable>().unwrap();
+
+ let tgv = table
+ .table_global_value(&TableGlobalKey {
+ catalog_name: "greptime".to_string(),
+ schema_name: "public".to_string(),
+ table_name: table_name.to_string(),
+ })
+ .await
+ .unwrap()
+ .unwrap();
+ let table_id = tgv.table_id();
+
+ let region_to_dn_map = tgv
+ .regions_id_map
+ .iter()
+ .map(|(k, v)| (v[0], *k))
+ .collect::<HashMap<u32, u64>>();
+
+ for (region, dn) in region_to_dn_map.iter() {
+ // data_tmp_dirs -> dn: 1..4
+ let data_tmp_dir = data_tmp_dirs.get((*dn - 1) as usize).unwrap();
+ let region_dir = test_region_dir(
+ data_tmp_dir.path().to_str().unwrap(),
+ "greptime",
+ "public",
+ table_id,
+ *region,
+ );
+ has_parquet_file(®ion_dir);
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_standalone_flush_table() {
+ common_telemetry::init_default_ut_logging();
+
+ let standalone = tests::create_standalone_instance("test_standalone_flush_table").await;
+ let instance = &standalone.instance;
+ let data_tmp_dir = standalone.data_tmp_dir();
+
+ let table_name = "my_table";
+ let sql = format!("CREATE TABLE {table_name} (a INT, b STRING, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY (a, b))");
+
+ create_table(instance, sql).await;
+
+ test_insert_delete_and_query_on_existing_table(instance, table_name).await;
+
+ let table_id = 1024;
+ let region_id = 0;
+ let region_dir = test_region_dir(
+ data_tmp_dir.path().to_str().unwrap(),
+ "greptime",
+ "public",
+ table_id,
+ region_id,
+ );
+ assert!(!has_parquet_file(®ion_dir));
+
+ flush_table(instance, "greptime", "public", "my_table", None).await;
+ // Wait for previous task finished
+ flush_table(instance, "greptime", "public", "my_table", None).await;
+
+ assert!(has_parquet_file(®ion_dir));
+ }
+
+ async fn create_table(frontend: &Instance, sql: String) {
+ let request = Request::Query(QueryRequest {
+ query: Some(Query::Sql(sql)),
+ });
+ let output = query(frontend, request).await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+ }
+
+ async fn flush_table(
+ frontend: &Instance,
+ catalog_name: &str,
+ schema_name: &str,
+ table_name: &str,
+ region_id: Option<u32>,
+ ) {
+ let request = Request::Ddl(DdlRequest {
+ expr: Some(DdlExpr::FlushTable(FlushTableExpr {
+ catalog_name: catalog_name.to_string(),
+ schema_name: schema_name.to_string(),
+ table_name: table_name.to_string(),
+ region_id,
+ })),
+ });
+
+ let output = query(frontend, request).await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+ }
+
+ async fn test_insert_delete_and_query_on_existing_table(instance: &Instance, table_name: &str) {
+ let ts_millisecond_values = vec![
+ 1672557972000,
+ 1672557973000,
+ 1672557974000,
+ 1672557975000,
+ 1672557976000,
+ 1672557977000,
+ 1672557978000,
+ 1672557979000,
+ 1672557980000,
+ 1672557981000,
+ 1672557982000,
+ 1672557983000,
+ 1672557984000,
+ 1672557985000,
+ 1672557986000,
+ 1672557987000,
+ ];
+ let insert = InsertRequest {
+ table_name: table_name.to_string(),
+ columns: vec![
+ Column {
+ column_name: "a".to_string(),
+ values: Some(Values {
+ i32_values: vec![1, 2, 3, 4, 5, 11, 12, 20, 21, 22, 23, 50, 51, 52, 53],
+ ..Default::default()
+ }),
+ null_mask: vec![32, 0],
+ semantic_type: SemanticType::Field as i32,
+ datatype: ColumnDataType::Int32 as i32,
+ },
+ Column {
+ column_name: "b".to_string(),
+ values: Some(Values {
+ string_values: ts_millisecond_values
+ .iter()
+ .map(|x| format!("ts: {x}"))
+ .collect(),
+ ..Default::default()
+ }),
+ semantic_type: SemanticType::Tag as i32,
+ datatype: ColumnDataType::String as i32,
+ ..Default::default()
+ },
+ Column {
+ column_name: "ts".to_string(),
+ values: Some(Values {
+ ts_millisecond_values,
+ ..Default::default()
+ }),
+ semantic_type: SemanticType::Timestamp as i32,
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ ..Default::default()
+ },
+ ],
+ row_count: 16,
+ ..Default::default()
+ };
+ let output = query(instance, Request::Insert(insert)).await;
+ assert!(matches!(output, Output::AffectedRows(16)));
+
+ let request = Request::Query(QueryRequest {
+ query: Some(Query::Sql(format!(
+ "SELECT ts, a, b FROM {table_name} ORDER BY ts"
+ ))),
+ });
+ let output = query(instance, request.clone()).await;
+ let Output::Stream(stream) = output else { unreachable!() };
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++---------------------+----+-------------------+
+| ts | a | b |
++---------------------+----+-------------------+
+| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
+| 2023-01-01T07:26:13 | 2 | ts: 1672557973000 |
+| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
+| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
+| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
+| 2023-01-01T07:26:17 | | ts: 1672557977000 |
+| 2023-01-01T07:26:18 | 11 | ts: 1672557978000 |
+| 2023-01-01T07:26:19 | 12 | ts: 1672557979000 |
+| 2023-01-01T07:26:20 | 20 | ts: 1672557980000 |
+| 2023-01-01T07:26:21 | 21 | ts: 1672557981000 |
+| 2023-01-01T07:26:22 | 22 | ts: 1672557982000 |
+| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
+| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
+| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
+| 2023-01-01T07:26:26 | 52 | ts: 1672557986000 |
+| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
++---------------------+----+-------------------+";
+ assert_eq!(recordbatches.pretty_print().unwrap(), expected);
+
+ let delete = DeleteRequest {
+ table_name: table_name.to_string(),
+ region_number: 0,
+ key_columns: vec![
+ Column {
+ column_name: "a".to_string(),
+ semantic_type: SemanticType::Field as i32,
+ values: Some(Values {
+ i32_values: vec![2, 12, 22, 52],
+ ..Default::default()
+ }),
+ datatype: ColumnDataType::Int32 as i32,
+ ..Default::default()
+ },
+ Column {
+ column_name: "b".to_string(),
+ semantic_type: SemanticType::Tag as i32,
+ values: Some(Values {
+ string_values: vec![
+ "ts: 1672557973000".to_string(),
+ "ts: 1672557979000".to_string(),
+ "ts: 1672557982000".to_string(),
+ "ts: 1672557986000".to_string(),
+ ],
+ ..Default::default()
+ }),
+ datatype: ColumnDataType::String as i32,
+ ..Default::default()
+ },
+ Column {
+ column_name: "ts".to_string(),
+ semantic_type: SemanticType::Timestamp as i32,
+ values: Some(Values {
+ ts_millisecond_values: vec![
+ 1672557973000,
+ 1672557979000,
+ 1672557982000,
+ 1672557986000,
+ ],
+ ..Default::default()
+ }),
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ ..Default::default()
+ },
+ ],
+ row_count: 4,
+ };
+ let output = query(instance, Request::Delete(delete)).await;
+ assert!(matches!(output, Output::AffectedRows(4)));
+
+ let output = query(instance, request).await;
+ let Output::Stream(stream) = output else { unreachable!() };
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++---------------------+----+-------------------+
+| ts | a | b |
++---------------------+----+-------------------+
+| 2023-01-01T07:26:12 | 1 | ts: 1672557972000 |
+| 2023-01-01T07:26:14 | 3 | ts: 1672557974000 |
+| 2023-01-01T07:26:15 | 4 | ts: 1672557975000 |
+| 2023-01-01T07:26:16 | 5 | ts: 1672557976000 |
+| 2023-01-01T07:26:17 | | ts: 1672557977000 |
+| 2023-01-01T07:26:18 | 11 | ts: 1672557978000 |
+| 2023-01-01T07:26:20 | 20 | ts: 1672557980000 |
+| 2023-01-01T07:26:21 | 21 | ts: 1672557981000 |
+| 2023-01-01T07:26:23 | 23 | ts: 1672557983000 |
+| 2023-01-01T07:26:24 | 50 | ts: 1672557984000 |
+| 2023-01-01T07:26:25 | 51 | ts: 1672557985000 |
+| 2023-01-01T07:26:27 | 53 | ts: 1672557987000 |
++---------------------+----+-------------------+";
+ assert_eq!(recordbatches.pretty_print().unwrap(), expected);
+ }
+
+ async fn verify_data_distribution(
+ instance: &MockDistributedInstance,
+ table_name: &str,
+ expected_distribution: HashMap<u32, &str>,
+ ) {
+ let table = instance
+ .frontend
+ .catalog_manager()
+ .table("greptime", "public", table_name)
+ .await
+ .unwrap()
+ .unwrap();
+ let table = table.as_any().downcast_ref::<DistTable>().unwrap();
+
+ let TableGlobalValue { regions_id_map, .. } = table
+ .table_global_value(&TableGlobalKey {
+ catalog_name: "greptime".to_string(),
+ schema_name: "public".to_string(),
+ table_name: table_name.to_string(),
+ })
+ .await
+ .unwrap()
+ .unwrap();
+ let region_to_dn_map = regions_id_map
+ .iter()
+ .map(|(k, v)| (v[0], *k))
+ .collect::<HashMap<u32, u64>>();
+ assert_eq!(region_to_dn_map.len(), expected_distribution.len());
+
+ for (region, dn) in region_to_dn_map.iter() {
+ let stmt = QueryLanguageParser::parse_sql(&format!(
+ "SELECT ts, a, b FROM {table_name} ORDER BY ts"
+ ))
+ .unwrap();
+ let dn = instance.datanodes.get(dn).unwrap();
+ let engine = dn.query_engine();
+ let plan = engine
+ .planner()
+ .plan(stmt, QueryContext::arc())
+ .await
+ .unwrap();
+ let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
+ let Output::Stream(stream) = output else { unreachable!() };
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ let actual = recordbatches.pretty_print().unwrap();
+
+ let expected = expected_distribution.get(region).unwrap();
+ assert_eq!(&actual, expected);
+ }
+ }
+
+ async fn test_insert_delete_and_query_on_auto_created_table(instance: &Instance) {
+ let insert = InsertRequest {
+ table_name: "auto_created_table".to_string(),
+ columns: vec![
+ Column {
+ column_name: "a".to_string(),
+ values: Some(Values {
+ i32_values: vec![4, 6],
+ ..Default::default()
+ }),
+ null_mask: vec![2],
+ semantic_type: SemanticType::Field as i32,
+ datatype: ColumnDataType::Int32 as i32,
+ },
+ Column {
+ column_name: "ts".to_string(),
+ values: Some(Values {
+ ts_millisecond_values: vec![1672557975000, 1672557976000, 1672557977000],
+ ..Default::default()
+ }),
+ semantic_type: SemanticType::Timestamp as i32,
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ ..Default::default()
+ },
+ ],
+ row_count: 3,
+ ..Default::default()
+ };
+
+ // Test auto create not existed table upon insertion.
+ let request = Request::Insert(insert);
+ let output = query(instance, request).await;
+ assert!(matches!(output, Output::AffectedRows(3)));
+
+ let insert = InsertRequest {
+ table_name: "auto_created_table".to_string(),
+ columns: vec![
+ Column {
+ column_name: "b".to_string(),
+ values: Some(Values {
+ string_values: vec!["x".to_string(), "z".to_string()],
+ ..Default::default()
+ }),
+ null_mask: vec![2],
+ semantic_type: SemanticType::Field as i32,
+ datatype: ColumnDataType::String as i32,
+ },
+ Column {
+ column_name: "ts".to_string(),
+ values: Some(Values {
+ ts_millisecond_values: vec![1672557978000, 1672557979000, 1672557980000],
+ ..Default::default()
+ }),
+ semantic_type: SemanticType::Timestamp as i32,
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ ..Default::default()
+ },
+ ],
+ row_count: 3,
+ ..Default::default()
+ };
+
+ // Test auto add not existed column upon insertion.
+ let request = Request::Insert(insert);
+ let output = query(instance, request).await;
+ assert!(matches!(output, Output::AffectedRows(3)));
+
+ let request = Request::Query(QueryRequest {
+ query: Some(Query::Sql(
+ "SELECT ts, a, b FROM auto_created_table".to_string(),
+ )),
+ });
+ let output = query(instance, request.clone()).await;
+ let Output::Stream(stream) = output else { unreachable!() };
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++---------------------+---+---+
+| ts | a | b |
++---------------------+---+---+
+| 2023-01-01T07:26:15 | 4 | |
+| 2023-01-01T07:26:16 | | |
+| 2023-01-01T07:26:17 | 6 | |
+| 2023-01-01T07:26:18 | | x |
+| 2023-01-01T07:26:19 | | |
+| 2023-01-01T07:26:20 | | z |
++---------------------+---+---+";
+ assert_eq!(recordbatches.pretty_print().unwrap(), expected);
+
+ let delete = DeleteRequest {
+ table_name: "auto_created_table".to_string(),
+ region_number: 0,
+ key_columns: vec![Column {
+ column_name: "ts".to_string(),
+ values: Some(Values {
+ ts_millisecond_values: vec![1672557975000, 1672557979000],
+ ..Default::default()
+ }),
+ semantic_type: SemanticType::Timestamp as i32,
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ ..Default::default()
+ }],
+ row_count: 2,
+ };
+
+ let output = query(instance, Request::Delete(delete)).await;
+ assert!(matches!(output, Output::AffectedRows(2)));
+
+ let output = query(instance, request).await;
+ let Output::Stream(stream) = output else { unreachable!() };
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++---------------------+---+---+
+| ts | a | b |
++---------------------+---+---+
+| 2023-01-01T07:26:16 | | |
+| 2023-01-01T07:26:17 | 6 | |
+| 2023-01-01T07:26:18 | | x |
+| 2023-01-01T07:26:20 | | z |
++---------------------+---+---+";
+ assert_eq!(recordbatches.pretty_print().unwrap(), expected);
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_promql_query() {
+ common_telemetry::init_default_ut_logging();
+
+ let standalone = tests::create_standalone_instance("test_standalone_promql_query").await;
+ let instance = &standalone.instance;
+
+ let table_name = "my_table";
+ let sql = format!("CREATE TABLE {table_name} (h string, a double, ts TIMESTAMP, TIME INDEX (ts), PRIMARY KEY(h))");
+ create_table(instance, sql).await;
+
+ let insert = InsertRequest {
+ table_name: table_name.to_string(),
+ columns: vec![
+ Column {
+ column_name: "h".to_string(),
+ values: Some(Values {
+ string_values: vec![
+ "t".to_string(),
+ "t".to_string(),
+ "t".to_string(),
+ "t".to_string(),
+ "t".to_string(),
+ "t".to_string(),
+ "t".to_string(),
+ "t".to_string(),
+ ],
+ ..Default::default()
+ }),
+ semantic_type: SemanticType::Tag as i32,
+ datatype: ColumnDataType::String as i32,
+ ..Default::default()
+ },
+ Column {
+ column_name: "a".to_string(),
+ values: Some(Values {
+ f64_values: vec![1f64, 11f64, 20f64, 22f64, 50f64, 55f64, 99f64],
+ ..Default::default()
+ }),
+ null_mask: vec![4],
+ semantic_type: SemanticType::Field as i32,
+ datatype: ColumnDataType::Float64 as i32,
+ },
+ Column {
+ column_name: "ts".to_string(),
+ values: Some(Values {
+ ts_millisecond_values: vec![
+ 1672557972000,
+ 1672557973000,
+ 1672557974000,
+ 1672557975000,
+ 1672557976000,
+ 1672557977000,
+ 1672557978000,
+ 1672557979000,
+ ],
+ ..Default::default()
+ }),
+ semantic_type: SemanticType::Timestamp as i32,
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ ..Default::default()
+ },
+ ],
+ row_count: 8,
+ ..Default::default()
+ };
+
+ let request = Request::Insert(insert);
+ let output = query(instance, request).await;
+ assert!(matches!(output, Output::AffectedRows(8)));
+
+ let request = Request::Query(QueryRequest {
+ query: Some(Query::PromRangeQuery(api::v1::PromRangeQuery {
+ query: "my_table".to_owned(),
+ start: "1672557973".to_owned(),
+ end: "1672557978".to_owned(),
+ step: "1s".to_owned(),
+ })),
+ });
+ let output = query(instance, request).await;
+ let Output::Stream(stream) = output else { unreachable!() };
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++---+------+---------------------+
+| h | a | ts |
++---+------+---------------------+
+| t | 11.0 | 2023-01-01T07:26:13 |
+| t | | 2023-01-01T07:26:14 |
+| t | 20.0 | 2023-01-01T07:26:15 |
+| t | 22.0 | 2023-01-01T07:26:16 |
+| t | 50.0 | 2023-01-01T07:26:17 |
+| t | 55.0 | 2023-01-01T07:26:18 |
++---+------+---------------------+";
+ assert_eq!(recordbatches.pretty_print().unwrap(), expected);
+ }
+}
diff --git a/tests-integration/src/influxdb.rs b/tests-integration/src/influxdb.rs
new file mode 100644
index 000000000000..e77880897fcb
--- /dev/null
+++ b/tests-integration/src/influxdb.rs
@@ -0,0 +1,77 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[cfg(test)]
+mod test {
+ use std::sync::Arc;
+
+ use common_query::Output;
+ use common_recordbatch::RecordBatches;
+ use frontend::instance::Instance;
+ use servers::influxdb::InfluxdbRequest;
+ use servers::query_handler::sql::SqlQueryHandler;
+ use servers::query_handler::InfluxdbLineProtocolHandler;
+ use session::context::QueryContext;
+
+ use crate::tests;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_standalone_put_influxdb_lines() {
+ let standalone =
+ tests::create_standalone_instance("test_standalone_put_influxdb_lines").await;
+ let instance = &standalone.instance;
+
+ test_put_influxdb_lines(instance).await;
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_distributed_put_influxdb_lines() {
+ let instance =
+ tests::create_distributed_instance("test_distributed_put_influxdb_lines").await;
+ let instance = &instance.frontend;
+
+ test_put_influxdb_lines(instance).await;
+ }
+
+ async fn test_put_influxdb_lines(instance: &Arc<Instance>) {
+ let lines = r"
+monitor1,host=host1 cpu=66.6,memory=1024 1663840496100023100
+monitor1,host=host2 memory=1027 1663840496400340001";
+ let request = InfluxdbRequest {
+ precision: None,
+ lines: lines.to_string(),
+ };
+ instance.exec(&request, QueryContext::arc()).await.unwrap();
+
+ let mut output = instance
+ .do_query(
+ "SELECT ts, host, cpu, memory FROM monitor1 ORDER BY ts",
+ QueryContext::arc(),
+ )
+ .await;
+ let output = output.remove(0).unwrap();
+ let Output::Stream(stream) = output else { unreachable!() };
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ assert_eq!(
+ recordbatches.pretty_print().unwrap(),
+ "\
++-------------------------+-------+------+--------+
+| ts | host | cpu | memory |
++-------------------------+-------+------+--------+
+| 2022-09-22T09:54:56.100 | host1 | 66.6 | 1024.0 |
+| 2022-09-22T09:54:56.400 | host2 | | 1027.0 |
++-------------------------+-------+------+--------+"
+ );
+ }
+}
diff --git a/tests-integration/src/instance.rs b/tests-integration/src/instance.rs
new file mode 100644
index 000000000000..d4876db6d72a
--- /dev/null
+++ b/tests-integration/src/instance.rs
@@ -0,0 +1,405 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[cfg(test)]
+mod tests {
+ use std::borrow::Cow;
+ use std::collections::HashMap;
+ use std::sync::atomic::AtomicU32;
+ use std::sync::Arc;
+
+ use catalog::helper::{TableGlobalKey, TableGlobalValue};
+ use common_base::Plugins;
+ use common_query::Output;
+ use common_recordbatch::RecordBatches;
+ use frontend::error::{self, Error, Result};
+ use frontend::instance::Instance;
+ use frontend::table::DistTable;
+ use query::parser::QueryLanguageParser;
+ use servers::interceptor::{SqlQueryInterceptor, SqlQueryInterceptorRef};
+ use servers::query_handler::sql::SqlQueryHandler;
+ use session::context::{QueryContext, QueryContextRef};
+ use sql::statements::statement::Statement;
+
+ use crate::tests;
+ use crate::tests::MockDistributedInstance;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_standalone_exec_sql() {
+ let standalone = tests::create_standalone_instance("test_standalone_exec_sql").await;
+ let instance = standalone.instance.as_ref();
+
+ let sql = r#"
+ CREATE TABLE demo(
+ host STRING,
+ ts TIMESTAMP,
+ cpu DOUBLE NULL,
+ memory DOUBLE NULL,
+ disk_util DOUBLE DEFAULT 9.9,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+ ) engine=mito"#;
+ create_table(instance, sql).await;
+
+ insert_and_query(instance).await;
+
+ drop_table(instance).await;
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_distributed_exec_sql() {
+ let distributed = tests::create_distributed_instance("test_distributed_exec_sql").await;
+ let instance = distributed.frontend.as_ref();
+
+ let sql = r#"
+ CREATE TABLE demo(
+ host STRING,
+ ts TIMESTAMP,
+ cpu DOUBLE NULL,
+ memory DOUBLE NULL,
+ disk_util DOUBLE DEFAULT 9.9,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+ )
+ PARTITION BY RANGE COLUMNS (host) (
+ PARTITION r0 VALUES LESS THAN ('550-A'),
+ PARTITION r1 VALUES LESS THAN ('550-W'),
+ PARTITION r2 VALUES LESS THAN ('MOSS'),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE),
+ )
+ engine=mito"#;
+ create_table(instance, sql).await;
+
+ insert_and_query(instance).await;
+
+ verify_data_distribution(
+ &distributed,
+ HashMap::from([
+ (
+ 0u32,
+ "\
++---------------------+------+
+| ts | host |
++---------------------+------+
+| 2013-12-31T16:00:00 | 490 |
++---------------------+------+",
+ ),
+ (
+ 1u32,
+ "\
++---------------------+-------+
+| ts | host |
++---------------------+-------+
+| 2022-12-31T16:00:00 | 550-A |
++---------------------+-------+",
+ ),
+ (
+ 2u32,
+ "\
++---------------------+-------+
+| ts | host |
++---------------------+-------+
+| 2023-12-31T16:00:00 | 550-W |
++---------------------+-------+",
+ ),
+ (
+ 3u32,
+ "\
++---------------------+------+
+| ts | host |
++---------------------+------+
+| 2043-12-31T16:00:00 | MOSS |
++---------------------+------+",
+ ),
+ ]),
+ )
+ .await;
+
+ drop_table(instance).await;
+
+ verify_table_is_dropped(&distributed).await;
+ }
+
+ async fn query(instance: &Instance, sql: &str) -> Output {
+ SqlQueryHandler::do_query(instance, sql, QueryContext::arc())
+ .await
+ .remove(0)
+ .unwrap()
+ }
+
+ async fn create_table(instance: &Instance, sql: &str) {
+ let output = query(instance, sql).await;
+ let Output::AffectedRows(x) = output else { unreachable!() };
+ assert_eq!(x, 0);
+ }
+
+ async fn insert_and_query(instance: &Instance) {
+ let sql = r#"INSERT INTO demo(host, cpu, memory, ts) VALUES
+ ('490', 0.1, 1, 1388505600000),
+ ('550-A', 1, 100, 1672502400000),
+ ('550-W', 10000, 1000000, 1704038400000),
+ ('MOSS', 100000000, 10000000000, 2335190400000)
+ "#;
+ let output = query(instance, sql).await;
+ let Output::AffectedRows(x) = output else { unreachable!() };
+ assert_eq!(x, 4);
+
+ let sql = "SELECT * FROM demo WHERE ts > cast(1000000000 as timestamp) ORDER BY host"; // use nanoseconds as where condition
+ let output = query(instance, sql).await;
+ let Output::Stream(s) = output else { unreachable!() };
+ let batches = common_recordbatch::util::collect_batches(s).await.unwrap();
+ let pretty_print = batches.pretty_print().unwrap();
+ let expected = "\
++-------+---------------------+-------------+-----------+-----------+
+| host | ts | cpu | memory | disk_util |
++-------+---------------------+-------------+-----------+-----------+
+| 490 | 2013-12-31T16:00:00 | 0.1 | 1.0 | 9.9 |
+| 550-A | 2022-12-31T16:00:00 | 1.0 | 100.0 | 9.9 |
+| 550-W | 2023-12-31T16:00:00 | 10000.0 | 1000000.0 | 9.9 |
+| MOSS | 2043-12-31T16:00:00 | 100000000.0 | 1.0e10 | 9.9 |
++-------+---------------------+-------------+-----------+-----------+";
+ assert_eq!(pretty_print, expected);
+ }
+
+ async fn verify_data_distribution(
+ instance: &MockDistributedInstance,
+ expected_distribution: HashMap<u32, &str>,
+ ) {
+ let table = instance
+ .frontend
+ .catalog_manager()
+ .table("greptime", "public", "demo")
+ .await
+ .unwrap()
+ .unwrap();
+ let table = table.as_any().downcast_ref::<DistTable>().unwrap();
+
+ let TableGlobalValue { regions_id_map, .. } = table
+ .table_global_value(&TableGlobalKey {
+ catalog_name: "greptime".to_string(),
+ schema_name: "public".to_string(),
+ table_name: "demo".to_string(),
+ })
+ .await
+ .unwrap()
+ .unwrap();
+ let region_to_dn_map = regions_id_map
+ .iter()
+ .map(|(k, v)| (v[0], *k))
+ .collect::<HashMap<u32, u64>>();
+ assert_eq!(region_to_dn_map.len(), expected_distribution.len());
+
+ let stmt = QueryLanguageParser::parse_sql("SELECT ts, host FROM demo ORDER BY ts").unwrap();
+ for (region, dn) in region_to_dn_map.iter() {
+ let dn = instance.datanodes.get(dn).unwrap();
+ let engine = dn.query_engine();
+ let plan = engine
+ .planner()
+ .plan(stmt.clone(), QueryContext::arc())
+ .await
+ .unwrap();
+ let output = engine.execute(plan, QueryContext::arc()).await.unwrap();
+ let Output::Stream(stream) = output else { unreachable!() };
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ let actual = recordbatches.pretty_print().unwrap();
+
+ let expected = expected_distribution.get(region).unwrap();
+ assert_eq!(&actual, expected);
+ }
+ }
+
+ async fn drop_table(instance: &Instance) {
+ let sql = "DROP TABLE demo";
+ let output = query(instance, sql).await;
+ let Output::AffectedRows(x) = output else { unreachable!() };
+ assert_eq!(x, 1);
+ }
+
+ async fn verify_table_is_dropped(instance: &MockDistributedInstance) {
+ for (_, dn) in instance.datanodes.iter() {
+ assert!(dn
+ .catalog_manager()
+ .table("greptime", "public", "demo")
+ .await
+ .unwrap()
+ .is_none())
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_sql_interceptor_plugin() {
+ #[derive(Default)]
+ struct AssertionHook {
+ pub(crate) c: AtomicU32,
+ }
+
+ impl SqlQueryInterceptor for AssertionHook {
+ type Error = Error;
+
+ fn pre_parsing<'a>(
+ &self,
+ query: &'a str,
+ _query_ctx: QueryContextRef,
+ ) -> Result<Cow<'a, str>> {
+ self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ assert!(query.starts_with("CREATE TABLE demo"));
+ Ok(Cow::Borrowed(query))
+ }
+
+ fn post_parsing(
+ &self,
+ statements: Vec<Statement>,
+ _query_ctx: QueryContextRef,
+ ) -> Result<Vec<Statement>> {
+ self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ assert!(matches!(statements[0], Statement::CreateTable(_)));
+ Ok(statements)
+ }
+
+ fn pre_execute(
+ &self,
+ _statement: &Statement,
+ _plan: Option<&query::plan::LogicalPlan>,
+ _query_ctx: QueryContextRef,
+ ) -> Result<()> {
+ self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ Ok(())
+ }
+
+ fn post_execute(
+ &self,
+ mut output: Output,
+ _query_ctx: QueryContextRef,
+ ) -> Result<Output> {
+ self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ match &mut output {
+ Output::AffectedRows(rows) => {
+ assert_eq!(*rows, 0);
+ // update output result
+ *rows = 10;
+ }
+ _ => unreachable!(),
+ }
+ Ok(output)
+ }
+ }
+
+ let standalone = tests::create_standalone_instance("test_hook").await;
+ let mut instance = standalone.instance;
+
+ let plugins = Plugins::new();
+ let counter_hook = Arc::new(AssertionHook::default());
+ plugins.insert::<SqlQueryInterceptorRef<Error>>(counter_hook.clone());
+ Arc::make_mut(&mut instance).set_plugins(Arc::new(plugins));
+
+ let sql = r#"CREATE TABLE demo(
+ host STRING,
+ ts TIMESTAMP,
+ cpu DOUBLE NULL,
+ memory DOUBLE NULL,
+ disk_util DOUBLE DEFAULT 9.9,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+ ) engine=mito with(regions=1);"#;
+ let output = SqlQueryHandler::do_query(&*instance, sql, QueryContext::arc())
+ .await
+ .remove(0)
+ .unwrap();
+
+ // assert that the hook is called 3 times
+ assert_eq!(4, counter_hook.c.load(std::sync::atomic::Ordering::Relaxed));
+ match output {
+ Output::AffectedRows(rows) => assert_eq!(rows, 10),
+ _ => unreachable!(),
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_disable_db_operation_plugin() {
+ #[derive(Default)]
+ struct DisableDBOpHook;
+
+ impl SqlQueryInterceptor for DisableDBOpHook {
+ type Error = Error;
+
+ fn post_parsing(
+ &self,
+ statements: Vec<Statement>,
+ _query_ctx: QueryContextRef,
+ ) -> Result<Vec<Statement>> {
+ for s in &statements {
+ match s {
+ Statement::CreateDatabase(_) | Statement::ShowDatabases(_) => {
+ return Err(Error::NotSupported {
+ feat: "Database operations".to_owned(),
+ })
+ }
+ _ => {}
+ }
+ }
+
+ Ok(statements)
+ }
+ }
+
+ let query_ctx = Arc::new(QueryContext::new());
+
+ let standalone = tests::create_standalone_instance("test_db_hook").await;
+ let mut instance = standalone.instance;
+
+ let plugins = Plugins::new();
+ let hook = Arc::new(DisableDBOpHook::default());
+ plugins.insert::<SqlQueryInterceptorRef<Error>>(hook.clone());
+ Arc::make_mut(&mut instance).set_plugins(Arc::new(plugins));
+
+ let sql = r#"CREATE TABLE demo(
+ host STRING,
+ ts TIMESTAMP,
+ cpu DOUBLE NULL,
+ memory DOUBLE NULL,
+ disk_util DOUBLE DEFAULT 9.9,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+ ) engine=mito with(regions=1);"#;
+ let output = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
+ .await
+ .remove(0)
+ .unwrap();
+
+ match output {
+ Output::AffectedRows(rows) => assert_eq!(rows, 0),
+ _ => unreachable!(),
+ }
+
+ let sql = r#"CREATE DATABASE tomcat"#;
+ if let Err(e) = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
+ .await
+ .remove(0)
+ {
+ assert!(matches!(e, error::Error::NotSupported { .. }));
+ } else {
+ unreachable!();
+ }
+
+ let sql = r#"SELECT 1; SHOW DATABASES"#;
+ if let Err(e) = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
+ .await
+ .remove(0)
+ {
+ assert!(matches!(e, error::Error::NotSupported { .. }));
+ } else {
+ unreachable!();
+ }
+ }
+}
diff --git a/tests-integration/src/lib.rs b/tests-integration/src/lib.rs
index 423bc7f30040..3a3642b49aab 100644
--- a/tests-integration/src/lib.rs
+++ b/tests-integration/src/lib.rs
@@ -12,4 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+mod catalog;
+mod grpc;
+mod influxdb;
+mod instance;
+mod opentsdb;
+mod prometheus;
+mod table;
pub mod test_util;
+
+#[cfg(test)]
+mod tests;
+
+#[cfg(test)]
+// allowed because https://docs.rs/rstest_reuse/0.5.0/rstest_reuse/#use-rstest_reuse-at-the-top-of-your-crate
+#[allow(clippy::single_component_path_imports)]
+use rstest_reuse;
diff --git a/tests-integration/src/opentsdb.rs b/tests-integration/src/opentsdb.rs
new file mode 100644
index 000000000000..b61fd88ce12c
--- /dev/null
+++ b/tests-integration/src/opentsdb.rs
@@ -0,0 +1,107 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use common_query::Output;
+ use common_recordbatch::RecordBatches;
+ use frontend::instance::Instance;
+ use itertools::Itertools;
+ use servers::opentsdb::codec::DataPoint;
+ use servers::query_handler::sql::SqlQueryHandler;
+ use servers::query_handler::OpentsdbProtocolHandler;
+ use session::context::QueryContext;
+
+ use crate::tests;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_standalone_exec() {
+ let standalone = tests::create_standalone_instance("test_standalone_exec").await;
+ let instance = &standalone.instance;
+
+ test_exec(instance).await;
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_distributed_exec() {
+ let distributed = tests::create_distributed_instance("test_distributed_exec").await;
+ let instance = &distributed.frontend;
+
+ test_exec(instance).await;
+ }
+
+ async fn test_exec(instance: &Arc<Instance>) {
+ let ctx = QueryContext::arc();
+ let data_point1 = DataPoint::new(
+ "my_metric_1".to_string(),
+ 1000,
+ 1.0,
+ vec![
+ ("tagk1".to_string(), "tagv1".to_string()),
+ ("tagk2".to_string(), "tagv2".to_string()),
+ ],
+ );
+ // should create new table "my_metric_1" directly
+ let result = instance.exec(&data_point1, ctx.clone()).await;
+ assert!(result.is_ok());
+
+ let data_point2 = DataPoint::new(
+ "my_metric_1".to_string(),
+ 2000,
+ 2.0,
+ vec![
+ ("tagk2".to_string(), "tagv2".to_string()),
+ ("tagk3".to_string(), "tagv3".to_string()),
+ ],
+ );
+ // should create new column "tagk3" directly
+ let result = instance.exec(&data_point2, ctx.clone()).await;
+ assert!(result.is_ok());
+
+ let data_point3 = DataPoint::new("my_metric_1".to_string(), 3000, 3.0, vec![]);
+ // should handle null tags properly
+ let result = instance.exec(&data_point3, ctx.clone()).await;
+ assert!(result.is_ok());
+
+ let output = instance
+ .do_query(
+ "select * from my_metric_1 order by greptime_timestamp",
+ Arc::new(QueryContext::new()),
+ )
+ .await
+ .remove(0)
+ .unwrap();
+ match output {
+ Output::Stream(stream) => {
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ let pretty_print = recordbatches.pretty_print().unwrap();
+ let expected = vec![
+ "+---------------------+----------------+-------+-------+-------+",
+ "| greptime_timestamp | greptime_value | tagk1 | tagk2 | tagk3 |",
+ "+---------------------+----------------+-------+-------+-------+",
+ "| 1970-01-01T00:00:01 | 1.0 | tagv1 | tagv2 | |",
+ "| 1970-01-01T00:00:02 | 2.0 | | tagv2 | tagv3 |",
+ "| 1970-01-01T00:00:03 | 3.0 | | | |",
+ "+---------------------+----------------+-------+-------+-------+",
+ ]
+ .into_iter()
+ .join("\n");
+ assert_eq!(pretty_print, expected);
+ }
+ _ => unreachable!(),
+ };
+ }
+}
diff --git a/tests-integration/src/prometheus.rs b/tests-integration/src/prometheus.rs
new file mode 100644
index 000000000000..6bd20c70fce1
--- /dev/null
+++ b/tests-integration/src/prometheus.rs
@@ -0,0 +1,183 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use api::prometheus::remote::label_matcher::Type as MatcherType;
+ use api::prometheus::remote::{
+ Label, LabelMatcher, Query, ReadRequest, ReadResponse, Sample, WriteRequest,
+ };
+ use common_catalog::consts::DEFAULT_CATALOG_NAME;
+ use frontend::instance::Instance;
+ use prost::Message;
+ use servers::prometheus;
+ use servers::query_handler::sql::SqlQueryHandler;
+ use servers::query_handler::PrometheusProtocolHandler;
+ use session::context::QueryContext;
+
+ use crate::tests;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_standalone_prometheus_remote_rw() {
+ let standalone =
+ tests::create_standalone_instance("test_standalone_prometheus_remote_rw").await;
+ let instance = &standalone.instance;
+
+ test_prometheus_remote_rw(instance).await;
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_distributed_prometheus_remote_rw() {
+ let distributed =
+ tests::create_distributed_instance("test_distributed_prometheus_remote_rw").await;
+ let instance = &distributed.frontend;
+
+ test_prometheus_remote_rw(instance).await;
+ }
+
+ async fn test_prometheus_remote_rw(instance: &Arc<Instance>) {
+ let write_request = WriteRequest {
+ timeseries: prometheus::mock_timeseries(),
+ ..Default::default()
+ };
+
+ let db = "prometheus";
+ let ctx = Arc::new(QueryContext::with(DEFAULT_CATALOG_NAME, db));
+
+ assert!(SqlQueryHandler::do_query(
+ instance.as_ref(),
+ "CREATE DATABASE IF NOT EXISTS prometheus",
+ ctx.clone(),
+ )
+ .await
+ .get(0)
+ .unwrap()
+ .is_ok());
+
+ instance.write(write_request, ctx.clone()).await.unwrap();
+
+ let read_request = ReadRequest {
+ queries: vec![
+ Query {
+ start_timestamp_ms: 1000,
+ end_timestamp_ms: 2000,
+ matchers: vec![LabelMatcher {
+ name: prometheus::METRIC_NAME_LABEL.to_string(),
+ value: "metric1".to_string(),
+ r#type: 0,
+ }],
+ ..Default::default()
+ },
+ Query {
+ start_timestamp_ms: 1000,
+ end_timestamp_ms: 3000,
+ matchers: vec![
+ LabelMatcher {
+ name: prometheus::METRIC_NAME_LABEL.to_string(),
+ value: "metric3".to_string(),
+ r#type: 0,
+ },
+ LabelMatcher {
+ name: "app".to_string(),
+ value: "biz".to_string(),
+ r#type: MatcherType::Eq as i32,
+ },
+ ],
+ ..Default::default()
+ },
+ ],
+ ..Default::default()
+ };
+
+ let resp = instance.read(read_request, ctx).await.unwrap();
+ assert_eq!(resp.content_type, "application/x-protobuf");
+ assert_eq!(resp.content_encoding, "snappy");
+ let body = prometheus::snappy_decompress(&resp.body).unwrap();
+ let read_response = ReadResponse::decode(&body[..]).unwrap();
+ let query_results = read_response.results;
+ assert_eq!(2, query_results.len());
+
+ assert_eq!(1, query_results[0].timeseries.len());
+ let timeseries = &query_results[0].timeseries[0];
+
+ assert_eq!(
+ vec![
+ Label {
+ name: prometheus::METRIC_NAME_LABEL.to_string(),
+ value: "metric1".to_string(),
+ },
+ Label {
+ name: "job".to_string(),
+ value: "spark".to_string(),
+ },
+ ],
+ timeseries.labels
+ );
+
+ assert_eq!(
+ timeseries.samples,
+ vec![
+ Sample {
+ value: 1.0,
+ timestamp: 1000,
+ },
+ Sample {
+ value: 2.0,
+ timestamp: 2000,
+ }
+ ]
+ );
+
+ assert_eq!(1, query_results[1].timeseries.len());
+ let timeseries = &query_results[1].timeseries[0];
+
+ assert_eq!(
+ vec![
+ Label {
+ name: prometheus::METRIC_NAME_LABEL.to_string(),
+ value: "metric3".to_string(),
+ },
+ Label {
+ name: "idc".to_string(),
+ value: "z002".to_string(),
+ },
+ Label {
+ name: "app".to_string(),
+ value: "biz".to_string(),
+ },
+ ],
+ timeseries.labels
+ );
+
+ assert_eq!(
+ timeseries.samples,
+ vec![
+ Sample {
+ value: 5.0,
+ timestamp: 1000,
+ },
+ Sample {
+ value: 6.0,
+ timestamp: 2000,
+ },
+ Sample {
+ value: 7.0,
+ timestamp: 3000,
+ }
+ ]
+ );
+ }
+}
diff --git a/tests-integration/src/table.rs b/tests-integration/src/table.rs
new file mode 100644
index 000000000000..42e25ec9a4be
--- /dev/null
+++ b/tests-integration/src/table.rs
@@ -0,0 +1,363 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[cfg(test)]
+mod test {
+ use std::collections::HashMap;
+ use std::sync::Arc;
+
+ use api::v1::column::SemanticType;
+ use api::v1::{column, Column, ColumnDataType, InsertRequest as GrpcInsertRequest};
+ use common_query::logical_plan::Expr;
+ use common_query::physical_plan::DfPhysicalPlanAdapter;
+ use common_query::DfPhysicalPlan;
+ use common_recordbatch::adapter::RecordBatchStreamAdapter;
+ use common_recordbatch::RecordBatches;
+ use datafusion::physical_plan::coalesce_partitions::CoalescePartitionsExec;
+ use datafusion::physical_plan::expressions::{col as physical_col, PhysicalSortExpr};
+ use datafusion::physical_plan::sorts::sort::SortExec;
+ use datafusion::prelude::SessionContext;
+ use datafusion::sql::sqlparser;
+ use datafusion_expr::expr_fn::{and, binary_expr, col};
+ use datafusion_expr::{lit, Operator};
+ use datanode::instance::Instance;
+ use datatypes::arrow::compute::SortOptions;
+ use datatypes::prelude::ConcreteDataType;
+ use datatypes::schema::{ColumnSchema, Schema};
+ use frontend::expr_factory;
+ use frontend::table::DistTable;
+ use itertools::Itertools;
+ use meta_client::rpc::TableName;
+ use session::context::QueryContext;
+ use sql::parser::ParserContext;
+ use sql::statements::statement::Statement;
+ use store_api::storage::RegionNumber;
+ use table::metadata::{TableInfoBuilder, TableMetaBuilder};
+ use table::TableRef;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_dist_table_scan() {
+ common_telemetry::init_default_ut_logging();
+ let table = Arc::new(new_dist_table("test_dist_table_scan").await);
+ // should scan all regions
+ // select a, row_id from numbers
+ let projection = Some(vec![1, 2]);
+ let filters = vec![];
+ let expected_output = vec![
+ "+-----+--------+",
+ "| a | row_id |",
+ "+-----+--------+",
+ "| 0 | 1 |",
+ "| 1 | 2 |",
+ "| 2 | 3 |",
+ "| 3 | 4 |",
+ "| 4 | 5 |",
+ "| 10 | 1 |",
+ "| 11 | 2 |",
+ "| 12 | 3 |",
+ "| 13 | 4 |",
+ "| 14 | 5 |",
+ "| 30 | 1 |",
+ "| 31 | 2 |",
+ "| 32 | 3 |",
+ "| 33 | 4 |",
+ "| 34 | 5 |",
+ "| 100 | 1 |",
+ "| 101 | 2 |",
+ "| 102 | 3 |",
+ "| 103 | 4 |",
+ "| 104 | 5 |",
+ "+-----+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 4, expected_output).await;
+
+ // should scan only region 1
+ // select a, row_id from numbers where a < 10
+ let projection = Some(vec![1, 2]);
+ let filters = vec![binary_expr(col("a"), Operator::Lt, lit(10)).into()];
+ let expected_output = vec![
+ "+---+--------+",
+ "| a | row_id |",
+ "+---+--------+",
+ "| 0 | 1 |",
+ "| 1 | 2 |",
+ "| 2 | 3 |",
+ "| 3 | 4 |",
+ "| 4 | 5 |",
+ "+---+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 1, expected_output).await;
+
+ // should scan region 1 and 2
+ // select a, row_id from numbers where a < 15
+ let projection = Some(vec![1, 2]);
+ let filters = vec![binary_expr(col("a"), Operator::Lt, lit(15)).into()];
+ let expected_output = vec![
+ "+----+--------+",
+ "| a | row_id |",
+ "+----+--------+",
+ "| 0 | 1 |",
+ "| 1 | 2 |",
+ "| 2 | 3 |",
+ "| 3 | 4 |",
+ "| 4 | 5 |",
+ "| 10 | 1 |",
+ "| 11 | 2 |",
+ "| 12 | 3 |",
+ "| 13 | 4 |",
+ "| 14 | 5 |",
+ "+----+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 2, expected_output).await;
+
+ // should scan region 2 and 3
+ // select a, row_id from numbers where a < 40 and a >= 10
+ let projection = Some(vec![1, 2]);
+ let filters = vec![and(
+ binary_expr(col("a"), Operator::Lt, lit(40)),
+ binary_expr(col("a"), Operator::GtEq, lit(10)),
+ )
+ .into()];
+ let expected_output = vec![
+ "+----+--------+",
+ "| a | row_id |",
+ "+----+--------+",
+ "| 10 | 1 |",
+ "| 11 | 2 |",
+ "| 12 | 3 |",
+ "| 13 | 4 |",
+ "| 14 | 5 |",
+ "| 30 | 1 |",
+ "| 31 | 2 |",
+ "| 32 | 3 |",
+ "| 33 | 4 |",
+ "| 34 | 5 |",
+ "+----+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 2, expected_output).await;
+
+ // should scan all regions
+ // select a, row_id from numbers where a < 1000 and row_id == 1
+ let projection = Some(vec![1, 2]);
+ let filters = vec![and(
+ binary_expr(col("a"), Operator::Lt, lit(1000)),
+ binary_expr(col("row_id"), Operator::Eq, lit(1)),
+ )
+ .into()];
+ let expected_output = vec![
+ "+-----+--------+",
+ "| a | row_id |",
+ "+-----+--------+",
+ "| 0 | 1 |",
+ "| 10 | 1 |",
+ "| 30 | 1 |",
+ "| 100 | 1 |",
+ "+-----+--------+",
+ ];
+ exec_table_scan(table.clone(), projection, filters, 4, expected_output).await;
+ }
+
+ async fn exec_table_scan(
+ table: TableRef,
+ projection: Option<Vec<usize>>,
+ filters: Vec<Expr>,
+ expected_partitions: usize,
+ expected_output: Vec<&str>,
+ ) {
+ let expected_output = expected_output.into_iter().join("\n");
+ let table_scan = table
+ .scan(projection.as_ref(), filters.as_slice(), None)
+ .await
+ .unwrap();
+ assert_eq!(
+ table_scan.output_partitioning().partition_count(),
+ expected_partitions
+ );
+
+ let merge =
+ CoalescePartitionsExec::new(Arc::new(DfPhysicalPlanAdapter(table_scan.clone())));
+
+ let sort = SortExec::new(
+ vec![PhysicalSortExpr {
+ expr: physical_col("a", table_scan.schema().arrow_schema()).unwrap(),
+ options: SortOptions::default(),
+ }],
+ Arc::new(merge),
+ )
+ .with_fetch(None);
+ assert_eq!(sort.output_partitioning().partition_count(), 1);
+
+ let session_ctx = SessionContext::new();
+ let stream = sort.execute(0, session_ctx.task_ctx()).unwrap();
+ let stream = Box::pin(RecordBatchStreamAdapter::try_new(stream).unwrap());
+
+ let recordbatches = RecordBatches::try_collect(stream).await.unwrap();
+ assert_eq!(recordbatches.pretty_print().unwrap(), expected_output);
+ }
+
+ async fn new_dist_table(test_name: &str) -> DistTable {
+ let column_schemas = vec![
+ ColumnSchema::new("ts", ConcreteDataType::int64_datatype(), false),
+ ColumnSchema::new("a", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new("row_id", ConcreteDataType::int32_datatype(), true),
+ ];
+ let schema = Arc::new(Schema::new(column_schemas.clone()));
+
+ let instance = crate::tests::create_distributed_instance(test_name).await;
+ let dist_instance = &instance.dist_instance;
+ let datanode_instances = instance.datanodes;
+
+ let catalog_manager = dist_instance.catalog_manager();
+ let partition_manager = catalog_manager.partition_manager();
+ let datanode_clients = catalog_manager.datanode_clients();
+
+ let table_name = TableName::new("greptime", "public", "dist_numbers");
+
+ let sql = "
+ CREATE TABLE greptime.public.dist_numbers (
+ ts BIGINT,
+ a INT,
+ row_id INT,
+ TIME INDEX (ts),
+ )
+ PARTITION BY RANGE COLUMNS (a) (
+ PARTITION r0 VALUES LESS THAN (10),
+ PARTITION r1 VALUES LESS THAN (20),
+ PARTITION r2 VALUES LESS THAN (50),
+ PARTITION r3 VALUES LESS THAN (MAXVALUE),
+ )
+ ENGINE=mito";
+
+ let create_table =
+ match ParserContext::create_with_dialect(sql, &sqlparser::dialect::GenericDialect {})
+ .unwrap()
+ .pop()
+ .unwrap()
+ {
+ Statement::CreateTable(c) => c,
+ _ => unreachable!(),
+ };
+
+ let mut expr = expr_factory::create_to_expr(&create_table, QueryContext::arc()).unwrap();
+ let _result = dist_instance
+ .create_table(&mut expr, create_table.partitions)
+ .await
+ .unwrap();
+
+ let table_route = partition_manager
+ .find_table_route(&table_name)
+ .await
+ .unwrap();
+
+ let mut region_to_datanode_mapping = HashMap::new();
+ for region_route in table_route.region_routes.iter() {
+ let region_id = region_route.region.id as u32;
+ let datanode_id = region_route.leader_peer.as_ref().unwrap().id;
+ region_to_datanode_mapping.insert(region_id, datanode_id);
+ }
+
+ let mut global_start_ts = 1;
+ let regional_numbers = vec![
+ (0, (0..5).collect::<Vec<i32>>()),
+ (1, (10..15).collect::<Vec<i32>>()),
+ (2, (30..35).collect::<Vec<i32>>()),
+ (3, (100..105).collect::<Vec<i32>>()),
+ ];
+ for (region_number, numbers) in regional_numbers {
+ let datanode_id = *region_to_datanode_mapping.get(®ion_number).unwrap();
+ let instance = datanode_instances.get(&datanode_id).unwrap().clone();
+
+ let start_ts = global_start_ts;
+ global_start_ts += numbers.len() as i64;
+
+ insert_testing_data(
+ &table_name,
+ instance.clone(),
+ numbers,
+ start_ts,
+ region_number,
+ )
+ .await;
+ }
+
+ let meta = TableMetaBuilder::default()
+ .schema(schema)
+ .primary_key_indices(vec![])
+ .next_column_id(1)
+ .build()
+ .unwrap();
+ let table_info = TableInfoBuilder::default()
+ .name(&table_name.table_name)
+ .meta(meta)
+ .build()
+ .unwrap();
+ DistTable::new(
+ table_name,
+ Arc::new(table_info),
+ partition_manager,
+ datanode_clients,
+ catalog_manager.backend(),
+ )
+ }
+
+ async fn insert_testing_data(
+ table_name: &TableName,
+ dn_instance: Arc<Instance>,
+ data: Vec<i32>,
+ start_ts: i64,
+ region_number: RegionNumber,
+ ) {
+ let row_count = data.len() as u32;
+ let columns = vec![
+ Column {
+ column_name: "ts".to_string(),
+ values: Some(column::Values {
+ i64_values: (start_ts..start_ts + row_count as i64).collect::<Vec<i64>>(),
+ ..Default::default()
+ }),
+ datatype: ColumnDataType::Int64 as i32,
+ semantic_type: SemanticType::Timestamp as i32,
+ ..Default::default()
+ },
+ Column {
+ column_name: "a".to_string(),
+ values: Some(column::Values {
+ i32_values: data,
+ ..Default::default()
+ }),
+ datatype: ColumnDataType::Int32 as i32,
+ ..Default::default()
+ },
+ Column {
+ column_name: "row_id".to_string(),
+ values: Some(column::Values {
+ i32_values: (1..=row_count as i32).collect::<Vec<i32>>(),
+ ..Default::default()
+ }),
+ datatype: ColumnDataType::Int32 as i32,
+ ..Default::default()
+ },
+ ];
+ let request = GrpcInsertRequest {
+ table_name: table_name.table_name.clone(),
+ columns,
+ row_count,
+ region_number,
+ };
+ dn_instance
+ .handle_insert(request, QueryContext::arc())
+ .await
+ .unwrap();
+ }
+}
diff --git a/src/frontend/src/tests.rs b/tests-integration/src/tests.rs
similarity index 98%
rename from src/frontend/src/tests.rs
rename to tests-integration/src/tests.rs
index 4ae3ed05d6d1..255997cd28fe 100644
--- a/src/frontend/src/tests.rs
+++ b/tests-integration/src/tests.rs
@@ -30,6 +30,10 @@ use datanode::datanode::{
DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig, StorageConfig, WalConfig,
};
use datanode::instance::Instance as DatanodeInstance;
+use frontend::catalog::FrontendCatalogManager;
+use frontend::datanode::DatanodeClients;
+use frontend::instance::distributed::DistInstance;
+use frontend::instance::Instance;
use meta_client::client::MetaClientBuilder;
use meta_client::rpc::Peer;
use meta_srv::metasrv::MetaSrvOptions;
@@ -45,11 +49,6 @@ use table::engine::{region_name, table_dir};
use tonic::transport::Server;
use tower::service_fn;
-use crate::catalog::FrontendCatalogManager;
-use crate::datanode::DatanodeClients;
-use crate::instance::distributed::DistInstance;
-use crate::instance::Instance;
-
/// Guard against the `TempDir`s that used in unit tests.
/// (The `TempDir` will be deleted once it goes out of scope.)
pub struct TestGuard {
diff --git a/src/frontend/src/tests/instance_test.rs b/tests-integration/src/tests/instance_test.rs
similarity index 99%
rename from src/frontend/src/tests/instance_test.rs
rename to tests-integration/src/tests/instance_test.rs
index 0e97d48e100f..b208a3d4fb02 100644
--- a/src/frontend/src/tests/instance_test.rs
+++ b/tests-integration/src/tests/instance_test.rs
@@ -20,13 +20,13 @@ use common_query::Output;
use common_recordbatch::util;
use common_telemetry::logging;
use datatypes::vectors::{Int64Vector, StringVector, UInt64Vector, VectorRef};
+use frontend::error::{Error, Result};
+use frontend::instance::Instance;
use rstest::rstest;
use rstest_reuse::apply;
use servers::query_handler::sql::SqlQueryHandler;
use session::context::{QueryContext, QueryContextRef};
-use crate::error::{Error, Result};
-use crate::instance::Instance;
use crate::tests::test_util::{
both_instances_cases, check_output_stream, check_unordered_output_stream, distributed,
get_data_dir, standalone, standalone_instance_case, MockInstance,
@@ -516,7 +516,7 @@ async fn test_execute_external_create_without_ts_type(instance: Arc<dyn MockInst
async fn test_execute_query_external_table_parquet(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
let format = "parquet";
- let location = get_data_dir("../../tests/data/parquet/various_type.parquet")
+ let location = get_data_dir("../tests/data/parquet/various_type.parquet")
.canonicalize()
.unwrap()
.display()
@@ -586,7 +586,7 @@ async fn test_execute_query_external_table_parquet(instance: Arc<dyn MockInstanc
async fn test_execute_query_external_table_csv(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
let format = "csv";
- let location = get_data_dir("../../tests/data/csv/various_type.csv")
+ let location = get_data_dir("../tests/data/csv/various_type.csv")
.canonicalize()
.unwrap()
.display()
@@ -637,7 +637,7 @@ async fn test_execute_query_external_table_csv(instance: Arc<dyn MockInstance>)
async fn test_execute_query_external_table_json(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
let format = "json";
- let location = get_data_dir("../../tests/data/json/various_type.json")
+ let location = get_data_dir("../tests/data/json/various_type.json")
.canonicalize()
.unwrap()
.display()
@@ -694,7 +694,7 @@ async fn test_execute_query_external_table_json(instance: Arc<dyn MockInstance>)
async fn test_execute_query_external_table_json_with_schame(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
let format = "json";
- let location = get_data_dir("../../tests/data/json/various_type.json")
+ let location = get_data_dir("../tests/data/json/various_type.json")
.canonicalize()
.unwrap()
.display()
diff --git a/src/frontend/src/tests/promql_test.rs b/tests-integration/src/tests/promql_test.rs
similarity index 99%
rename from src/frontend/src/tests/promql_test.rs
rename to tests-integration/src/tests/promql_test.rs
index e1ea15949f3b..9e8fbfb1ad61 100644
--- a/src/frontend/src/tests/promql_test.rs
+++ b/tests-integration/src/tests/promql_test.rs
@@ -15,6 +15,7 @@
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
+use frontend::instance::Instance;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use rstest::rstest;
use rstest_reuse::apply;
@@ -22,7 +23,6 @@ use servers::query_handler::sql::SqlQueryHandler;
use session::context::QueryContext;
use super::test_util::{check_unordered_output_stream, standalone, standalone_instance_case};
-use crate::instance::Instance;
use crate::tests::test_util::MockInstance;
#[allow(clippy::too_many_arguments)]
diff --git a/src/frontend/src/tests/test_util.rs b/tests-integration/src/tests/test_util.rs
similarity index 99%
rename from src/frontend/src/tests/test_util.rs
rename to tests-integration/src/tests/test_util.rs
index 668370c9ed35..d5a4d56aaa5a 100644
--- a/src/frontend/src/tests/test_util.rs
+++ b/tests-integration/src/tests/test_util.rs
@@ -17,9 +17,9 @@ use std::sync::Arc;
use common_query::Output;
use common_recordbatch::util;
+use frontend::instance::Instance;
use rstest_reuse::{self, template};
-use crate::instance::Instance;
use crate::tests::{
create_distributed_instance, create_standalone_instance, MockDistributedInstance,
MockStandaloneInstance,
|
test
|
move instances tests to "tests-integration" (#1573)
|
81da18e5dfcf84a2479ef21281c10911ca6d463a
|
2025-02-19 16:11:33
|
Ning Sun
|
refactor: use global type alias for pipeline input (#5568)
| false
|
diff --git a/src/pipeline/src/dispatcher.rs b/src/pipeline/src/dispatcher.rs
index a1c208e85094..909b1afa4282 100644
--- a/src/pipeline/src/dispatcher.rs
+++ b/src/pipeline/src/dispatcher.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::BTreeMap;
-
use common_telemetry::debug;
use snafu::OptionExt;
use yaml_rust::Yaml;
@@ -22,7 +20,7 @@ use crate::etl::error::{
Error, FieldRequiredForDispatcherSnafu, Result, TableSuffixRequiredForDispatcherRuleSnafu,
ValueRequiredForDispatcherRuleSnafu,
};
-use crate::Value;
+use crate::{PipelineMap, Value};
const FIELD: &str = "field";
const TABLE_SUFFIX: &str = "table_suffix";
@@ -111,7 +109,7 @@ impl TryFrom<&Yaml> for Dispatcher {
impl Dispatcher {
/// execute dispatcher and returns matched rule if any
- pub(crate) fn exec(&self, data: &BTreeMap<String, Value>) -> Option<&Rule> {
+ pub(crate) fn exec(&self, data: &PipelineMap) -> Option<&Rule> {
if let Some(value) = data.get(&self.field) {
for rule in &self.rules {
if rule.value == *value {
diff --git a/src/pipeline/src/etl.rs b/src/pipeline/src/etl.rs
index deee21d8bbd3..56ec4539a020 100644
--- a/src/pipeline/src/etl.rs
+++ b/src/pipeline/src/etl.rs
@@ -20,14 +20,13 @@ pub mod processor;
pub mod transform;
pub mod value;
-use std::collections::BTreeMap;
use std::sync::Arc;
use error::{
IntermediateKeyIndexSnafu, PrepareValueMustBeObjectSnafu, YamlLoadSnafu, YamlParseSnafu,
};
use itertools::Itertools;
-use processor::{IntermediateStatus, Processor, Processors};
+use processor::{Processor, Processors};
use snafu::{ensure, OptionExt, ResultExt};
use transform::{Transformer, Transforms};
use value::Value;
@@ -43,6 +42,8 @@ const TRANSFORM: &str = "transform";
const TRANSFORMS: &str = "transforms";
const DISPATCHER: &str = "dispatcher";
+pub type PipelineMap = std::collections::BTreeMap<String, Value>;
+
pub enum Content<'a> {
Json(&'a str),
Yaml(&'a str),
@@ -153,10 +154,10 @@ impl<O> PipelineExecOutput<O> {
}
}
-pub fn json_to_intermediate_state(val: serde_json::Value) -> Result<IntermediateStatus> {
+pub fn json_to_intermediate_state(val: serde_json::Value) -> Result<PipelineMap> {
match val {
serde_json::Value::Object(map) => {
- let mut intermediate_state = BTreeMap::new();
+ let mut intermediate_state = PipelineMap::new();
for (k, v) in map {
intermediate_state.insert(k, Value::try_from(v)?);
}
@@ -166,9 +167,7 @@ pub fn json_to_intermediate_state(val: serde_json::Value) -> Result<Intermediate
}
}
-pub fn json_array_to_intermediate_state(
- val: Vec<serde_json::Value>,
-) -> Result<Vec<IntermediateStatus>> {
+pub fn json_array_to_intermediate_state(val: Vec<serde_json::Value>) -> Result<Vec<PipelineMap>> {
val.into_iter().map(json_to_intermediate_state).collect()
}
@@ -176,10 +175,7 @@ impl<T> Pipeline<T>
where
T: Transformer,
{
- pub fn exec_mut(
- &self,
- val: &mut BTreeMap<String, Value>,
- ) -> Result<PipelineExecOutput<T::VecOutput>> {
+ pub fn exec_mut(&self, val: &mut PipelineMap) -> Result<PipelineExecOutput<T::VecOutput>> {
for processor in self.processors.iter() {
processor.exec_mut(val)?;
}
@@ -350,7 +346,7 @@ transform:
type: timestamp, ns
index: time"#;
let pipeline: Pipeline<GreptimeTransformer> = parse(&Content::Yaml(pipeline_str)).unwrap();
- let mut payload = BTreeMap::new();
+ let mut payload = PipelineMap::new();
payload.insert("message".to_string(), Value::String(message));
let result = pipeline
.exec_mut(&mut payload)
diff --git a/src/pipeline/src/etl/processor.rs b/src/pipeline/src/etl/processor.rs
index 005feca3794e..e09e5bdc05c2 100644
--- a/src/pipeline/src/etl/processor.rs
+++ b/src/pipeline/src/etl/processor.rs
@@ -27,8 +27,6 @@ pub mod regex;
pub mod timestamp;
pub mod urlencoding;
-use std::collections::BTreeMap;
-
use cmcd::CmcdProcessor;
use csv::CsvProcessor;
use date::DateProcessor;
@@ -51,8 +49,8 @@ use super::error::{
ProcessorMustBeMapSnafu, ProcessorMustHaveStringKeySnafu,
};
use super::field::{Field, Fields};
+use super::PipelineMap;
use crate::etl::error::{Error, Result};
-use crate::etl::value::Value;
use crate::etl_error::UnsupportedProcessorSnafu;
const FIELD_NAME: &str = "field";
@@ -66,8 +64,6 @@ const TARGET_FIELDS_NAME: &str = "target_fields";
const JSON_PATH_NAME: &str = "json_path";
const JSON_PATH_RESULT_INDEX_NAME: &str = "result_index";
-pub type IntermediateStatus = BTreeMap<String, Value>;
-
/// Processor trait defines the interface for all processors.
///
/// A processor is a transformation that can be applied to a field in a document
@@ -83,7 +79,7 @@ pub trait Processor: std::fmt::Debug + Send + Sync + 'static {
fn ignore_missing(&self) -> bool;
/// Execute the processor on a vector which be preprocessed by the pipeline
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()>;
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()>;
}
#[derive(Debug)]
diff --git a/src/pipeline/src/etl/processor/cmcd.rs b/src/pipeline/src/etl/processor/cmcd.rs
index a5da69d0be42..18c6e71998bf 100644
--- a/src/pipeline/src/etl/processor/cmcd.rs
+++ b/src/pipeline/src/etl/processor/cmcd.rs
@@ -16,12 +16,9 @@
//!
//! Refer to [`CmcdProcessor`] for more information.
-use std::collections::BTreeMap;
-
use snafu::{OptionExt, ResultExt};
use urlencoding::decode;
-use super::IntermediateStatus;
use crate::etl::error::{
CmcdMissingKeySnafu, CmcdMissingValueSnafu, Error, FailedToParseFloatKeySnafu,
FailedToParseIntKeySnafu, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
@@ -33,6 +30,7 @@ use crate::etl::processor::{
IGNORE_MISSING_NAME,
};
use crate::etl::value::Value;
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_CMCD: &str = "cmcd";
@@ -161,8 +159,8 @@ impl CmcdProcessor {
format!("{}_{}", prefix, key)
}
- fn parse(&self, name: &str, value: &str) -> Result<BTreeMap<String, Value>> {
- let mut working_set = BTreeMap::new();
+ fn parse(&self, name: &str, value: &str) -> Result<PipelineMap> {
+ let mut working_set = PipelineMap::new();
let parts = value.split(',');
@@ -251,7 +249,7 @@ impl Processor for CmcdProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let name = field.input_field();
@@ -285,11 +283,9 @@ impl Processor for CmcdProcessor {
#[cfg(test)]
mod tests {
- use std::collections::BTreeMap;
-
use urlencoding::decode;
- use super::CmcdProcessor;
+ use super::*;
use crate::etl::field::{Field, Fields};
use crate::etl::value::Value;
@@ -436,7 +432,7 @@ mod tests {
let expected = vec
.into_iter()
.map(|(k, v)| (k.to_string(), v))
- .collect::<BTreeMap<String, Value>>();
+ .collect::<PipelineMap>();
let actual = processor.parse("prefix", &decoded).unwrap();
assert_eq!(actual, expected);
diff --git a/src/pipeline/src/etl/processor/csv.rs b/src/pipeline/src/etl/processor/csv.rs
index a0fac70de15c..2fe130c600cd 100644
--- a/src/pipeline/src/etl/processor/csv.rs
+++ b/src/pipeline/src/etl/processor/csv.rs
@@ -14,8 +14,6 @@
// Reference: https://www.elastic.co/guide/en/elasticsearch/reference/current/csv-processor.html
-use std::collections::BTreeMap;
-
use csv::{ReaderBuilder, Trim};
use itertools::EitherOrBoth::{Both, Left, Right};
use itertools::Itertools;
@@ -31,6 +29,7 @@ use crate::etl::processor::{
IGNORE_MISSING_NAME,
};
use crate::etl::value::Value;
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_CSV: &str = "csv";
@@ -60,7 +59,7 @@ pub struct CsvProcessor {
impl CsvProcessor {
// process the csv format string to a map with target_fields as keys
- fn process(&self, val: &str) -> Result<BTreeMap<String, Value>> {
+ fn process(&self, val: &str) -> Result<PipelineMap> {
let mut reader = self.reader.from_reader(val.as_bytes());
if let Some(result) = reader.records().next() {
@@ -190,7 +189,7 @@ impl Processor for CsvProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut BTreeMap<String, Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let name = field.input_field();
@@ -240,7 +239,7 @@ mod tests {
let result = processor.process("1,2").unwrap();
- let values = [
+ let values: PipelineMap = [
("a".into(), Value::String("1".into())),
("b".into(), Value::String("2".into())),
]
@@ -266,7 +265,7 @@ mod tests {
let result = processor.process("1,2").unwrap();
- let values = [
+ let values: PipelineMap = [
("a".into(), Value::String("1".into())),
("b".into(), Value::String("2".into())),
("c".into(), Value::Null),
@@ -291,7 +290,7 @@ mod tests {
let result = processor.process("1,2").unwrap();
- let values = [
+ let values: PipelineMap = [
("a".into(), Value::String("1".into())),
("b".into(), Value::String("2".into())),
("c".into(), Value::String("default".into())),
@@ -317,7 +316,7 @@ mod tests {
let result = processor.process("1,2").unwrap();
- let values = [
+ let values: PipelineMap = [
("a".into(), Value::String("1".into())),
("b".into(), Value::String("2".into())),
]
diff --git a/src/pipeline/src/etl/processor/date.rs b/src/pipeline/src/etl/processor/date.rs
index e080b795402c..0af04244239f 100644
--- a/src/pipeline/src/etl/processor/date.rs
+++ b/src/pipeline/src/etl/processor/date.rs
@@ -19,7 +19,6 @@ use chrono_tz::Tz;
use lazy_static::lazy_static;
use snafu::{OptionExt, ResultExt};
-use super::IntermediateStatus;
use crate::etl::error::{
DateFailedToGetLocalTimezoneSnafu, DateFailedToGetTimestampSnafu, DateParseSnafu,
DateParseTimezoneSnafu, Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
@@ -31,6 +30,7 @@ use crate::etl::processor::{
FIELD_NAME, IGNORE_MISSING_NAME,
};
use crate::etl::value::{Timestamp, Value};
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_DATE: &str = "date";
@@ -194,7 +194,7 @@ impl Processor for DateProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/decolorize.rs b/src/pipeline/src/etl/processor/decolorize.rs
index 2547b99d6824..fa70f4a28876 100644
--- a/src/pipeline/src/etl/processor/decolorize.rs
+++ b/src/pipeline/src/etl/processor/decolorize.rs
@@ -22,7 +22,6 @@ use once_cell::sync::Lazy;
use regex::Regex;
use snafu::OptionExt;
-use super::IntermediateStatus;
use crate::etl::error::{
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
};
@@ -31,6 +30,7 @@ use crate::etl::processor::{
yaml_bool, yaml_new_field, yaml_new_fields, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
};
use crate::etl::value::Value;
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_DECOLORIZE: &str = "decolorize";
@@ -102,7 +102,7 @@ impl crate::etl::processor::Processor for DecolorizeProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/digest.rs b/src/pipeline/src/etl/processor/digest.rs
index 64bb2a2f6d8a..b93af08d3c5d 100644
--- a/src/pipeline/src/etl/processor/digest.rs
+++ b/src/pipeline/src/etl/processor/digest.rs
@@ -24,7 +24,6 @@ use std::borrow::Cow;
use regex::Regex;
use snafu::OptionExt;
-use super::IntermediateStatus;
use crate::etl::error::{
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, Result,
};
@@ -33,6 +32,7 @@ use crate::etl::processor::{
yaml_bool, yaml_new_field, yaml_new_fields, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME,
};
use crate::etl::value::Value;
+use crate::etl::PipelineMap;
use crate::etl_error::DigestPatternInvalidSnafu;
pub(crate) const PROCESSOR_DIGEST: &str = "digest";
@@ -201,7 +201,7 @@ impl crate::etl::processor::Processor for DigestProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/dissect.rs b/src/pipeline/src/etl/processor/dissect.rs
index 9ac28f7bf09e..2a41d75923cc 100644
--- a/src/pipeline/src/etl/processor/dissect.rs
+++ b/src/pipeline/src/etl/processor/dissect.rs
@@ -18,7 +18,6 @@ use ahash::{HashMap, HashMapExt, HashSet, HashSetExt};
use itertools::Itertools;
use snafu::OptionExt;
-use super::IntermediateStatus;
use crate::etl::error::{
DissectAppendOrderAlreadySetSnafu, DissectConsecutiveNamesSnafu, DissectEmptyPatternSnafu,
DissectEndModifierAlreadySetSnafu, DissectInvalidPatternSnafu, DissectModifierAlreadySetSnafu,
@@ -32,6 +31,7 @@ use crate::etl::processor::{
Processor, FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, PATTERNS_NAME, PATTERN_NAME,
};
use crate::etl::value::Value;
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_DISSECT: &str = "dissect";
@@ -601,7 +601,7 @@ impl Processor for DissectProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/epoch.rs b/src/pipeline/src/etl/processor/epoch.rs
index 29ad6bd3d97d..da638def9b80 100644
--- a/src/pipeline/src/etl/processor/epoch.rs
+++ b/src/pipeline/src/etl/processor/epoch.rs
@@ -14,7 +14,6 @@
use snafu::{OptionExt, ResultExt};
-use super::IntermediateStatus;
use crate::etl::error::{
EpochInvalidResolutionSnafu, Error, FailedToParseIntSnafu, KeyMustBeStringSnafu,
ProcessorMissingFieldSnafu, ProcessorUnsupportedValueSnafu, Result,
@@ -30,6 +29,7 @@ use crate::etl::value::time::{
SEC_RESOLUTION, S_RESOLUTION, US_RESOLUTION,
};
use crate::etl::value::{Timestamp, Value};
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_EPOCH: &str = "epoch";
const RESOLUTION_NAME: &str = "resolution";
@@ -163,7 +163,7 @@ impl Processor for EpochProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/gsub.rs b/src/pipeline/src/etl/processor/gsub.rs
index 7f0f601f44f3..8950b418df9e 100644
--- a/src/pipeline/src/etl/processor/gsub.rs
+++ b/src/pipeline/src/etl/processor/gsub.rs
@@ -15,7 +15,6 @@
use regex::Regex;
use snafu::{OptionExt, ResultExt};
-use super::IntermediateStatus;
use crate::etl::error::{
Error, GsubPatternRequiredSnafu, GsubReplacementRequiredSnafu, KeyMustBeStringSnafu,
ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu, RegexSnafu, Result,
@@ -26,6 +25,7 @@ use crate::etl::processor::{
IGNORE_MISSING_NAME, PATTERN_NAME,
};
use crate::etl::value::Value;
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_GSUB: &str = "gsub";
@@ -118,7 +118,7 @@ impl crate::etl::processor::Processor for GsubProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/join.rs b/src/pipeline/src/etl/processor/join.rs
index 72fafdbf7dd1..64cf6d425b01 100644
--- a/src/pipeline/src/etl/processor/join.rs
+++ b/src/pipeline/src/etl/processor/join.rs
@@ -14,7 +14,6 @@
use snafu::OptionExt;
-use super::IntermediateStatus;
use crate::etl::error::{
Error, JoinSeparatorRequiredSnafu, KeyMustBeStringSnafu, ProcessorExpectStringSnafu,
ProcessorMissingFieldSnafu, Result,
@@ -25,6 +24,7 @@ use crate::etl::processor::{
IGNORE_MISSING_NAME, SEPARATOR_NAME,
};
use crate::etl::value::{Array, Value};
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_JOIN: &str = "join";
@@ -95,7 +95,7 @@ impl Processor for JoinProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/json_path.rs b/src/pipeline/src/etl/processor/json_path.rs
index 92916263e4e9..6b0e97f44824 100644
--- a/src/pipeline/src/etl/processor/json_path.rs
+++ b/src/pipeline/src/etl/processor/json_path.rs
@@ -16,8 +16,8 @@ use jsonpath_rust::JsonPath;
use snafu::{OptionExt, ResultExt};
use super::{
- yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, IntermediateStatus, Processor,
- FIELDS_NAME, FIELD_NAME, IGNORE_MISSING_NAME, JSON_PATH_NAME, JSON_PATH_RESULT_INDEX_NAME,
+ yaml_bool, yaml_new_field, yaml_new_fields, yaml_string, PipelineMap, Processor, FIELDS_NAME,
+ FIELD_NAME, IGNORE_MISSING_NAME, JSON_PATH_NAME, JSON_PATH_RESULT_INDEX_NAME,
};
use crate::etl::error::{Error, Result};
use crate::etl::field::Fields;
@@ -126,7 +126,7 @@ impl Processor for JsonPathProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/letter.rs b/src/pipeline/src/etl/processor/letter.rs
index 960521853e48..1d4d248b8796 100644
--- a/src/pipeline/src/etl/processor/letter.rs
+++ b/src/pipeline/src/etl/processor/letter.rs
@@ -14,7 +14,6 @@
use snafu::OptionExt;
-use super::IntermediateStatus;
use crate::etl::error::{
Error, KeyMustBeStringSnafu, LetterInvalidMethodSnafu, ProcessorExpectStringSnafu,
ProcessorMissingFieldSnafu, Result,
@@ -25,6 +24,7 @@ use crate::etl::processor::{
IGNORE_MISSING_NAME, METHOD_NAME,
};
use crate::etl::value::Value;
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_LETTER: &str = "letter";
@@ -126,7 +126,7 @@ impl Processor for LetterProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/regex.rs b/src/pipeline/src/etl/processor/regex.rs
index 27f30f65d9ae..a08b944725af 100644
--- a/src/pipeline/src/etl/processor/regex.rs
+++ b/src/pipeline/src/etl/processor/regex.rs
@@ -18,13 +18,10 @@ const PATTERNS_NAME: &str = "patterns";
pub(crate) const PROCESSOR_REGEX: &str = "regex";
-use std::collections::BTreeMap;
-
use lazy_static::lazy_static;
use regex::Regex;
use snafu::{OptionExt, ResultExt};
-use super::IntermediateStatus;
use crate::etl::error::{
Error, KeyMustBeStringSnafu, ProcessorExpectStringSnafu, ProcessorMissingFieldSnafu,
RegexNamedGroupNotFoundSnafu, RegexNoValidFieldSnafu, RegexNoValidPatternSnafu, RegexSnafu,
@@ -36,6 +33,7 @@ use crate::etl::processor::{
FIELD_NAME, IGNORE_MISSING_NAME, PATTERN_NAME,
};
use crate::etl::value::Value;
+use crate::etl::PipelineMap;
lazy_static! {
static ref GROUPS_NAME_REGEX: Regex = Regex::new(r"\(\?P?<([[:word:]]+)>.+?\)").unwrap();
@@ -169,8 +167,8 @@ impl RegexProcessor {
Ok(())
}
- fn process(&self, prefix: &str, val: &str) -> Result<BTreeMap<String, Value>> {
- let mut result = BTreeMap::new();
+ fn process(&self, prefix: &str, val: &str) -> Result<PipelineMap> {
+ let mut result = PipelineMap::new();
for gr in self.patterns.iter() {
if let Some(captures) = gr.regex.captures(val) {
for group in gr.groups.iter() {
@@ -194,7 +192,7 @@ impl Processor for RegexProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
let prefix = field.target_or_input_field();
@@ -227,11 +225,10 @@ impl Processor for RegexProcessor {
}
#[cfg(test)]
mod tests {
- use std::collections::BTreeMap;
-
use ahash::{HashMap, HashMapExt};
use itertools::Itertools;
+ use super::*;
use crate::etl::processor::regex::RegexProcessor;
use crate::etl::value::{Map, Value};
@@ -272,7 +269,7 @@ ignore_missing: false"#;
let cw = "[c=w,n=US_CA_SANJOSE,o=55155]";
let breadcrumbs_str = [cc, cg, co, cp, cw].iter().join(",");
- let temporary_map: BTreeMap<String, Value> = [
+ let temporary_map: PipelineMap = [
("breadcrumbs_parent", Value::String(cc.to_string())),
("breadcrumbs_edge", Value::String(cg.to_string())),
("breadcrumbs_origin", Value::String(co.to_string())),
diff --git a/src/pipeline/src/etl/processor/timestamp.rs b/src/pipeline/src/etl/processor/timestamp.rs
index bf90e78f2165..a4d215ed49bf 100644
--- a/src/pipeline/src/etl/processor/timestamp.rs
+++ b/src/pipeline/src/etl/processor/timestamp.rs
@@ -19,7 +19,6 @@ use chrono_tz::Tz;
use lazy_static::lazy_static;
use snafu::{OptionExt, ResultExt};
-use super::IntermediateStatus;
use crate::etl::error::{
DateFailedToGetLocalTimezoneSnafu, DateFailedToGetTimestampSnafu, DateInvalidFormatSnafu,
DateParseSnafu, DateParseTimezoneSnafu, EpochInvalidResolutionSnafu, Error,
@@ -37,6 +36,7 @@ use crate::etl::value::time::{
SEC_RESOLUTION, S_RESOLUTION, US_RESOLUTION,
};
use crate::etl::value::{Timestamp, Value};
+use crate::etl::PipelineMap;
pub(crate) const PROCESSOR_TIMESTAMP: &str = "timestamp";
const RESOLUTION_NAME: &str = "resolution";
@@ -298,7 +298,7 @@ impl Processor for TimestampProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut IntermediateStatus) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/processor/urlencoding.rs b/src/pipeline/src/etl/processor/urlencoding.rs
index c14c7d87b11f..33d3f521a167 100644
--- a/src/pipeline/src/etl/processor/urlencoding.rs
+++ b/src/pipeline/src/etl/processor/urlencoding.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::BTreeMap;
-
use snafu::{OptionExt, ResultExt};
use urlencoding::{decode, encode};
@@ -27,6 +25,7 @@ use crate::etl::processor::{
IGNORE_MISSING_NAME, METHOD_NAME,
};
use crate::etl::value::Value;
+use crate::PipelineMap;
pub(crate) const PROCESSOR_URL_ENCODING: &str = "urlencoding";
@@ -127,7 +126,7 @@ impl crate::etl::processor::Processor for UrlEncodingProcessor {
self.ignore_missing
}
- fn exec_mut(&self, val: &mut BTreeMap<String, Value>) -> Result<()> {
+ fn exec_mut(&self, val: &mut PipelineMap) -> Result<()> {
for field in self.fields.iter() {
let index = field.input_field();
match val.get(index) {
diff --git a/src/pipeline/src/etl/transform.rs b/src/pipeline/src/etl/transform.rs
index a61444d9458f..14cfa440fb51 100644
--- a/src/pipeline/src/etl/transform.rs
+++ b/src/pipeline/src/etl/transform.rs
@@ -15,8 +15,6 @@
pub mod index;
pub mod transformer;
-use std::collections::BTreeMap;
-
use snafu::OptionExt;
use crate::etl::error::{Error, Result};
@@ -39,6 +37,7 @@ use super::error::{
use super::field::Fields;
use super::processor::{yaml_new_field, yaml_new_fields, yaml_string};
use super::value::Timestamp;
+use super::PipelineMap;
pub trait Transformer: std::fmt::Debug + Sized + Send + Sync + 'static {
type Output;
@@ -48,7 +47,7 @@ pub trait Transformer: std::fmt::Debug + Sized + Send + Sync + 'static {
fn schemas(&self) -> &Vec<greptime_proto::v1::ColumnSchema>;
fn transforms(&self) -> &Transforms;
fn transforms_mut(&mut self) -> &mut Transforms;
- fn transform_mut(&self, val: &mut BTreeMap<String, Value>) -> Result<Self::VecOutput>;
+ fn transform_mut(&self, val: &mut PipelineMap) -> Result<Self::VecOutput>;
}
/// On Failure behavior when transform fails
diff --git a/src/pipeline/src/etl/transform/transformer/greptime.rs b/src/pipeline/src/etl/transform/transformer/greptime.rs
index 621acc758162..0211e67db15c 100644
--- a/src/pipeline/src/etl/transform/transformer/greptime.rs
+++ b/src/pipeline/src/etl/transform/transformer/greptime.rs
@@ -14,7 +14,7 @@
pub mod coerce;
-use std::collections::{BTreeMap, HashSet};
+use std::collections::HashSet;
use std::sync::Arc;
use ahash::{HashMap, HashMapExt};
@@ -34,10 +34,10 @@ use crate::etl::error::{
UnsupportedNumberTypeSnafu,
};
use crate::etl::field::{Field, Fields};
-use crate::etl::processor::IntermediateStatus;
use crate::etl::transform::index::Index;
use crate::etl::transform::{Transform, Transformer, Transforms};
use crate::etl::value::{Timestamp, Value};
+use crate::etl::PipelineMap;
const DEFAULT_GREPTIME_TIMESTAMP_COLUMN: &str = "greptime_timestamp";
const DEFAULT_MAX_NESTED_LEVELS_FOR_JSON_FLATTENING: usize = 10;
@@ -178,7 +178,7 @@ impl Transformer for GreptimeTransformer {
}
}
- fn transform_mut(&self, val: &mut IntermediateStatus) -> Result<Self::VecOutput> {
+ fn transform_mut(&self, val: &mut PipelineMap) -> Result<Self::VecOutput> {
let mut values = vec![GreptimeValue { value_data: None }; self.schema.len()];
let mut output_index = 0;
for transform in self.transforms.iter() {
@@ -327,7 +327,7 @@ fn resolve_number_schema(
)
}
-fn values_to_row(schema_info: &mut SchemaInfo, values: BTreeMap<String, Value>) -> Result<Row> {
+fn values_to_row(schema_info: &mut SchemaInfo, values: PipelineMap) -> Result<Row> {
let mut row: Vec<GreptimeValue> = Vec::with_capacity(schema_info.schema.len());
for _ in 0..schema_info.schema.len() {
row.push(GreptimeValue { value_data: None });
@@ -513,7 +513,7 @@ fn values_to_row(schema_info: &mut SchemaInfo, values: BTreeMap<String, Value>)
}
fn identity_pipeline_inner<'a>(
- array: Vec<BTreeMap<String, Value>>,
+ array: Vec<PipelineMap>,
tag_column_names: Option<impl Iterator<Item = &'a String>>,
_params: &GreptimePipelineParams,
) -> Result<Rows> {
@@ -569,7 +569,7 @@ fn identity_pipeline_inner<'a>(
/// 4. The pipeline will return an error if the same column datatype is mismatched
/// 5. The pipeline will analyze the schema of each json record and merge them to get the final schema.
pub fn identity_pipeline(
- array: Vec<BTreeMap<String, Value>>,
+ array: Vec<PipelineMap>,
table: Option<Arc<table::Table>>,
params: &GreptimePipelineParams,
) -> Result<Rows> {
@@ -577,7 +577,7 @@ pub fn identity_pipeline(
array
.into_iter()
.map(|item| flatten_object(item, DEFAULT_MAX_NESTED_LEVELS_FOR_JSON_FLATTENING))
- .collect::<Result<Vec<BTreeMap<String, Value>>>>()?
+ .collect::<Result<Vec<PipelineMap>>>()?
} else {
array
};
@@ -596,11 +596,8 @@ pub fn identity_pipeline(
///
/// The `max_nested_levels` parameter is used to limit the nested levels of the JSON object.
/// The error will be returned if the nested levels is greater than the `max_nested_levels`.
-pub fn flatten_object(
- object: BTreeMap<String, Value>,
- max_nested_levels: usize,
-) -> Result<BTreeMap<String, Value>> {
- let mut flattened = BTreeMap::new();
+pub fn flatten_object(object: PipelineMap, max_nested_levels: usize) -> Result<PipelineMap> {
+ let mut flattened = PipelineMap::new();
if !object.is_empty() {
// it will use recursion to flatten the object.
@@ -611,9 +608,9 @@ pub fn flatten_object(
}
fn do_flatten_object(
- dest: &mut BTreeMap<String, Value>,
+ dest: &mut PipelineMap,
base: Option<&str>,
- object: BTreeMap<String, Value>,
+ object: PipelineMap,
current_level: usize,
max_nested_levels: usize,
) -> Result<()> {
diff --git a/src/pipeline/src/etl/value.rs b/src/pipeline/src/etl/value.rs
index b007e665134c..124d598d9b77 100644
--- a/src/pipeline/src/etl/value.rs
+++ b/src/pipeline/src/etl/value.rs
@@ -16,8 +16,6 @@ pub mod array;
pub mod map;
pub mod time;
-use std::collections::BTreeMap;
-
pub use array::Array;
use jsonb::{Number as JsonbNumber, Object as JsonbObject, Value as JsonbValue};
use jsonpath_rust::path::{JsonLike, Path};
@@ -32,6 +30,7 @@ use super::error::{
ValueParseFloatSnafu, ValueParseIntSnafu, ValueParseTypeSnafu, ValueUnsupportedNumberTypeSnafu,
ValueUnsupportedYamlTypeSnafu, ValueYamlKeyMustBeStringSnafu,
};
+use super::PipelineMap;
use crate::etl::error::{Error, Result};
/// Value can be used as type
@@ -347,7 +346,7 @@ impl TryFrom<serde_json::Value> for Value {
Ok(Value::Array(Array { values }))
}
serde_json::Value::Object(v) => {
- let mut values = BTreeMap::new();
+ let mut values = PipelineMap::new();
for (k, v) in v {
values.insert(k, Value::try_from(v)?);
}
@@ -378,7 +377,7 @@ impl TryFrom<&yaml_rust::Yaml> for Value {
Ok(Value::Array(Array { values }))
}
yaml_rust::Yaml::Hash(v) => {
- let mut values = BTreeMap::new();
+ let mut values = PipelineMap::new();
for (k, v) in v {
let key = k
.as_str()
@@ -458,7 +457,7 @@ impl From<Value> for JsonbValue<'_> {
}
Value::Map(obj) => {
let mut map = JsonbObject::new();
- for (k, v) in obj.into_iter() {
+ for (k, v) in obj.values.into_iter() {
let val: JsonbValue = v.into();
map.insert(k, val);
}
diff --git a/src/pipeline/src/etl/value/map.rs b/src/pipeline/src/etl/value/map.rs
index 9e730ef532d8..b406a69343c6 100644
--- a/src/pipeline/src/etl/value/map.rs
+++ b/src/pipeline/src/etl/value/map.rs
@@ -12,15 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::BTreeMap;
-
-use ahash::HashMap;
-
use crate::etl::value::Value;
+use crate::PipelineMap;
#[derive(Debug, Clone, PartialEq, Default)]
pub struct Map {
- pub values: BTreeMap<String, Value>,
+ pub values: PipelineMap,
}
impl Map {
@@ -39,24 +36,14 @@ impl Map {
}
}
-impl From<HashMap<String, Value>> for Map {
- fn from(values: HashMap<String, Value>) -> Self {
- let mut map = Map::default();
- for (k, v) in values.into_iter() {
- map.insert(k, v);
- }
- map
- }
-}
-
-impl From<BTreeMap<String, Value>> for Map {
- fn from(values: BTreeMap<String, Value>) -> Self {
+impl From<PipelineMap> for Map {
+ fn from(values: PipelineMap) -> Self {
Self { values }
}
}
impl std::ops::Deref for Map {
- type Target = BTreeMap<String, Value>;
+ type Target = PipelineMap;
fn deref(&self) -> &Self::Target {
&self.values
@@ -69,16 +56,6 @@ impl std::ops::DerefMut for Map {
}
}
-impl std::iter::IntoIterator for Map {
- type Item = (String, Value);
-
- type IntoIter = std::collections::btree_map::IntoIter<String, Value>;
-
- fn into_iter(self) -> Self::IntoIter {
- self.values.into_iter()
- }
-}
-
impl std::fmt::Display for Map {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let values = self
diff --git a/src/pipeline/src/lib.rs b/src/pipeline/src/lib.rs
index a6c82f9353cf..2b358c45728b 100644
--- a/src/pipeline/src/lib.rs
+++ b/src/pipeline/src/lib.rs
@@ -25,8 +25,8 @@ pub use etl::transform::{GreptimeTransformer, Transformer};
pub use etl::value::{Array, Map, Value};
pub use etl::{
error as etl_error, json_array_to_intermediate_state, json_to_intermediate_state, parse,
- Content, DispatchedTo, Pipeline, PipelineDefinition, PipelineExecOutput, PipelineWay,
- SelectInfo, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
+ Content, DispatchedTo, Pipeline, PipelineDefinition, PipelineExecOutput, PipelineMap,
+ PipelineWay, SelectInfo, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
};
pub use manager::{
error, pipeline_operator, table, util, PipelineInfo, PipelineRef, PipelineTableRef,
diff --git a/src/servers/src/pipeline.rs b/src/servers/src/pipeline.rs
index 27c4d2757aa5..e952e4ba8ac3 100644
--- a/src/servers/src/pipeline.rs
+++ b/src/servers/src/pipeline.rs
@@ -18,7 +18,7 @@ use std::sync::Arc;
use api::v1::{RowInsertRequest, Rows};
use pipeline::{
DispatchedTo, GreptimePipelineParams, GreptimeTransformer, Pipeline, PipelineDefinition,
- PipelineExecOutput, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
+ PipelineExecOutput, PipelineMap, GREPTIME_INTERNAL_IDENTITY_PIPELINE_NAME,
};
use session::context::QueryContextRef;
use snafu::ResultExt;
@@ -52,7 +52,7 @@ pub(crate) async fn run_pipeline(
state: &PipelineHandlerRef,
pipeline_definition: PipelineDefinition,
pipeline_parameters: &GreptimePipelineParams,
- array: Vec<BTreeMap<String, pipeline::Value>>,
+ array: Vec<PipelineMap>,
table_name: String,
query_ctx: &QueryContextRef,
is_top_level: bool,
@@ -81,8 +81,7 @@ pub(crate) async fn run_pipeline(
let transform_timer = std::time::Instant::now();
let mut transformed = Vec::with_capacity(array.len());
- let mut dispatched: BTreeMap<DispatchedTo, Vec<BTreeMap<String, pipeline::Value>>> =
- BTreeMap::new();
+ let mut dispatched: BTreeMap<DispatchedTo, Vec<PipelineMap>> = BTreeMap::new();
for mut values in array {
let r = pipeline
|
refactor
|
use global type alias for pipeline input (#5568)
|
96b2a5fb289e6bc152715a3fd8412d9f0e1d1c5c
|
2025-01-06 13:03:35
|
Weny Xu
|
feat: introduce `ParallelFstValuesMapper` (#5276)
| false
|
diff --git a/src/common/base/Cargo.toml b/src/common/base/Cargo.toml
index 2d35ad5d31ad..44eb5535ea0b 100644
--- a/src/common/base/Cargo.toml
+++ b/src/common/base/Cargo.toml
@@ -4,6 +4,9 @@ version.workspace = true
edition.workspace = true
license.workspace = true
+[features]
+testing = []
+
[lints]
workspace = true
diff --git a/src/common/base/src/range_read.rs b/src/common/base/src/range_read.rs
index 53c26eeebdee..fb0fc61fb036 100644
--- a/src/common/base/src/range_read.rs
+++ b/src/common/base/src/range_read.rs
@@ -17,6 +17,7 @@ use std::io;
use std::ops::Range;
use std::path::Path;
use std::pin::Pin;
+use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
@@ -33,19 +34,22 @@ pub struct Metadata {
pub content_length: u64,
}
-/// `RangeReader` reads a range of bytes from a source.
-#[async_trait]
-pub trait RangeReader: Send + Unpin {
+/// `SizeAwareRangeReader` is a `RangeReader` that supports setting a file size hint.
+pub trait SizeAwareRangeReader: RangeReader {
/// Sets the file size hint for the reader.
///
/// It's used to optimize the reading process by reducing the number of remote requests.
fn with_file_size_hint(&mut self, file_size_hint: u64);
+}
+/// `RangeReader` reads a range of bytes from a source.
+#[async_trait]
+pub trait RangeReader: Sync + Send + Unpin {
/// Returns the metadata of the source.
- async fn metadata(&mut self) -> io::Result<Metadata>;
+ async fn metadata(&self) -> io::Result<Metadata>;
/// Reads the bytes in the given range.
- async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes>;
+ async fn read(&self, range: Range<u64>) -> io::Result<Bytes>;
/// Reads the bytes in the given range into the buffer.
///
@@ -53,18 +57,14 @@ pub trait RangeReader: Send + Unpin {
/// - If the buffer is insufficient to hold the bytes, it will either:
/// - Allocate additional space (e.g., for `Vec<u8>`)
/// - Panic (e.g., for `&mut [u8]`)
- async fn read_into(
- &mut self,
- range: Range<u64>,
- buf: &mut (impl BufMut + Send),
- ) -> io::Result<()> {
+ async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
let bytes = self.read(range).await?;
buf.put_slice(&bytes);
Ok(())
}
/// Reads the bytes in the given ranges.
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
let mut result = Vec::with_capacity(ranges.len());
for range in ranges {
result.push(self.read(range.clone()).await?);
@@ -74,25 +74,20 @@ pub trait RangeReader: Send + Unpin {
}
#[async_trait]
-impl<R: ?Sized + RangeReader> RangeReader for &mut R {
- fn with_file_size_hint(&mut self, file_size_hint: u64) {
- (*self).with_file_size_hint(file_size_hint)
- }
-
- async fn metadata(&mut self) -> io::Result<Metadata> {
+impl<R: ?Sized + RangeReader> RangeReader for &R {
+ async fn metadata(&self) -> io::Result<Metadata> {
(*self).metadata().await
}
- async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
+
+ async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
(*self).read(range).await
}
- async fn read_into(
- &mut self,
- range: Range<u64>,
- buf: &mut (impl BufMut + Send),
- ) -> io::Result<()> {
+
+ async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
(*self).read_into(range, buf).await
}
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
+
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
(*self).read_vec(ranges).await
}
}
@@ -120,7 +115,7 @@ pub struct AsyncReadAdapter<R> {
impl<R: RangeReader + 'static> AsyncReadAdapter<R> {
pub async fn new(inner: R) -> io::Result<Self> {
- let mut inner = inner;
+ let inner = inner;
let metadata = inner.metadata().await?;
Ok(AsyncReadAdapter {
inner: Arc::new(Mutex::new(inner)),
@@ -160,7 +155,7 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
let range = *this.position..(*this.position + size);
let inner = this.inner.clone();
let fut = async move {
- let mut inner = inner.lock().await;
+ let inner = inner.lock().await;
inner.read(range).await
};
@@ -195,27 +190,24 @@ impl<R: RangeReader + 'static> AsyncRead for AsyncReadAdapter<R> {
#[async_trait]
impl RangeReader for Vec<u8> {
- fn with_file_size_hint(&mut self, _file_size_hint: u64) {
- // do nothing
- }
-
- async fn metadata(&mut self) -> io::Result<Metadata> {
+ async fn metadata(&self) -> io::Result<Metadata> {
Ok(Metadata {
content_length: self.len() as u64,
})
}
- async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
+ async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
let bytes = Bytes::copy_from_slice(&self[range.start as usize..range.end as usize]);
Ok(bytes)
}
}
+// TODO(weny): considers replacing `tokio::fs::File` with opendal reader.
/// `FileReader` is a `RangeReader` for reading a file.
pub struct FileReader {
content_length: u64,
- position: u64,
- file: tokio::fs::File,
+ position: AtomicU64,
+ file: Mutex<tokio::fs::File>,
}
impl FileReader {
@@ -225,32 +217,37 @@ impl FileReader {
let metadata = file.metadata().await?;
Ok(FileReader {
content_length: metadata.len(),
- position: 0,
- file,
+ position: AtomicU64::new(0),
+ file: Mutex::new(file),
})
}
}
-#[async_trait]
-impl RangeReader for FileReader {
+#[cfg(any(test, feature = "testing"))]
+impl SizeAwareRangeReader for FileReader {
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
// do nothing
}
+}
- async fn metadata(&mut self) -> io::Result<Metadata> {
+#[async_trait]
+impl RangeReader for FileReader {
+ async fn metadata(&self) -> io::Result<Metadata> {
Ok(Metadata {
content_length: self.content_length,
})
}
- async fn read(&mut self, mut range: Range<u64>) -> io::Result<Bytes> {
- if range.start != self.position {
- self.file.seek(io::SeekFrom::Start(range.start)).await?;
- self.position = range.start;
+ async fn read(&self, mut range: Range<u64>) -> io::Result<Bytes> {
+ let mut file = self.file.lock().await;
+
+ if range.start != self.position.load(Ordering::Relaxed) {
+ file.seek(io::SeekFrom::Start(range.start)).await?;
+ self.position.store(range.start, Ordering::Relaxed);
}
range.end = range.end.min(self.content_length);
- if range.end <= self.position {
+ if range.end <= self.position.load(Ordering::Relaxed) {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"Start of range is out of bounds",
@@ -259,8 +256,8 @@ impl RangeReader for FileReader {
let mut buf = vec![0; (range.end - range.start) as usize];
- self.file.read_exact(&mut buf).await?;
- self.position = range.end;
+ file.read_exact(&mut buf).await?;
+ self.position.store(range.end, Ordering::Relaxed);
Ok(Bytes::from(buf))
}
@@ -301,7 +298,7 @@ mod tests {
let data = b"hello world";
tokio::fs::write(path, data).await.unwrap();
- let mut reader = FileReader::new(path).await.unwrap();
+ let reader = FileReader::new(path).await.unwrap();
let metadata = reader.metadata().await.unwrap();
assert_eq!(metadata.content_length, data.len() as u64);
diff --git a/src/index/src/bloom_filter/applier.rs b/src/index/src/bloom_filter/applier.rs
index 2750cbb92b6b..e9b11a8f1d6f 100644
--- a/src/index/src/bloom_filter/applier.rs
+++ b/src/index/src/bloom_filter/applier.rs
@@ -42,7 +42,7 @@ pub struct BloomFilterApplier {
}
impl BloomFilterApplier {
- pub async fn new(mut reader: Box<dyn BloomFilterReader + Send>) -> Result<Self> {
+ pub async fn new(reader: Box<dyn BloomFilterReader + Send>) -> Result<Self> {
let meta = reader.metadata().await?;
Ok(Self { reader, meta })
diff --git a/src/index/src/bloom_filter/reader.rs b/src/index/src/bloom_filter/reader.rs
index 02085fa671f7..e816ab01c601 100644
--- a/src/index/src/bloom_filter/reader.rs
+++ b/src/index/src/bloom_filter/reader.rs
@@ -29,16 +29,16 @@ use crate::bloom_filter::{BloomFilterMeta, BloomFilterSegmentLocation, SEED};
const BLOOM_META_LEN_SIZE: u64 = 4;
/// Default prefetch size of bloom filter meta.
-pub const DEFAULT_PREFETCH_SIZE: u64 = 1024; // 1KiB
+pub const DEFAULT_PREFETCH_SIZE: u64 = 8192; // 8KiB
/// `BloomFilterReader` reads the bloom filter from the file.
#[async_trait]
-pub trait BloomFilterReader {
+pub trait BloomFilterReader: Sync {
/// Reads range of bytes from the file.
- async fn range_read(&mut self, offset: u64, size: u32) -> Result<Bytes>;
+ async fn range_read(&self, offset: u64, size: u32) -> Result<Bytes>;
/// Reads bunch of ranges from the file.
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
let mut results = Vec::with_capacity(ranges.len());
for range in ranges {
let size = (range.end - range.start) as u32;
@@ -49,10 +49,10 @@ pub trait BloomFilterReader {
}
/// Reads the meta information of the bloom filter.
- async fn metadata(&mut self) -> Result<BloomFilterMeta>;
+ async fn metadata(&self) -> Result<BloomFilterMeta>;
/// Reads a bloom filter with the given location.
- async fn bloom_filter(&mut self, loc: &BloomFilterSegmentLocation) -> Result<BloomFilter> {
+ async fn bloom_filter(&self, loc: &BloomFilterSegmentLocation) -> Result<BloomFilter> {
let bytes = self.range_read(loc.offset, loc.size as _).await?;
let vec = bytes
.chunks_exact(std::mem::size_of::<u64>())
@@ -80,23 +80,23 @@ impl<R: RangeReader> BloomFilterReaderImpl<R> {
#[async_trait]
impl<R: RangeReader> BloomFilterReader for BloomFilterReaderImpl<R> {
- async fn range_read(&mut self, offset: u64, size: u32) -> Result<Bytes> {
+ async fn range_read(&self, offset: u64, size: u32) -> Result<Bytes> {
self.reader
.read(offset..offset + size as u64)
.await
.context(IoSnafu)
}
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
self.reader.read_vec(ranges).await.context(IoSnafu)
}
- async fn metadata(&mut self) -> Result<BloomFilterMeta> {
+ async fn metadata(&self) -> Result<BloomFilterMeta> {
let metadata = self.reader.metadata().await.context(IoSnafu)?;
let file_size = metadata.content_length;
let mut meta_reader =
- BloomFilterMetaReader::new(&mut self.reader, file_size, Some(DEFAULT_PREFETCH_SIZE));
+ BloomFilterMetaReader::new(&self.reader, file_size, Some(DEFAULT_PREFETCH_SIZE));
meta_reader.metadata().await
}
}
@@ -250,7 +250,7 @@ mod tests {
async fn test_bloom_filter_reader() {
let bytes = mock_bloom_filter_bytes().await;
- let mut reader = BloomFilterReaderImpl::new(bytes);
+ let reader = BloomFilterReaderImpl::new(bytes);
let meta = reader.metadata().await.unwrap();
assert_eq!(meta.bloom_filter_segments.len(), 2);
diff --git a/src/index/src/inverted_index/format/reader.rs b/src/index/src/inverted_index/format/reader.rs
index 24f21504d001..268b169979ea 100644
--- a/src/index/src/inverted_index/format/reader.rs
+++ b/src/index/src/inverted_index/format/reader.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::VecDeque;
use std::ops::Range;
use std::sync::Arc;
@@ -33,10 +34,10 @@ mod footer;
#[async_trait]
pub trait InvertedIndexReader: Send + Sync {
/// Seeks to given offset and reads data with exact size as provided.
- async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>>;
+ async fn range_read(&self, offset: u64, size: u32) -> Result<Vec<u8>>;
/// Reads the bytes in the given ranges.
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
let mut result = Vec::with_capacity(ranges.len());
for range in ranges {
let data = self
@@ -48,16 +49,35 @@ pub trait InvertedIndexReader: Send + Sync {
}
/// Retrieves metadata of all inverted indices stored within the blob.
- async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>>;
+ async fn metadata(&self) -> Result<Arc<InvertedIndexMetas>>;
/// Retrieves the finite state transducer (FST) map from the given offset and size.
- async fn fst(&mut self, offset: u64, size: u32) -> Result<FstMap> {
+ async fn fst(&self, offset: u64, size: u32) -> Result<FstMap> {
let fst_data = self.range_read(offset, size).await?;
FstMap::new(fst_data).context(DecodeFstSnafu)
}
+ /// Retrieves the multiple finite state transducer (FST) maps from the given ranges.
+ async fn fst_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<FstMap>> {
+ self.read_vec(ranges)
+ .await?
+ .into_iter()
+ .map(|bytes| FstMap::new(bytes.to_vec()).context(DecodeFstSnafu))
+ .collect::<Result<Vec<_>>>()
+ }
+
/// Retrieves the bitmap from the given offset and size.
- async fn bitmap(&mut self, offset: u64, size: u32) -> Result<BitVec> {
+ async fn bitmap(&self, offset: u64, size: u32) -> Result<BitVec> {
self.range_read(offset, size).await.map(BitVec::from_vec)
}
+
+ /// Retrieves the multiple bitmaps from the given ranges.
+ async fn bitmap_deque(&mut self, ranges: &[Range<u64>]) -> Result<VecDeque<BitVec>> {
+ Ok(self
+ .read_vec(ranges)
+ .await?
+ .into_iter()
+ .map(|bytes| BitVec::from_slice(bytes.as_ref()))
+ .collect::<VecDeque<_>>())
+ }
}
diff --git a/src/index/src/inverted_index/format/reader/blob.rs b/src/index/src/inverted_index/format/reader/blob.rs
index 73d98835794a..b48a3224844c 100644
--- a/src/index/src/inverted_index/format/reader/blob.rs
+++ b/src/index/src/inverted_index/format/reader/blob.rs
@@ -52,7 +52,7 @@ impl<R> InvertedIndexBlobReader<R> {
#[async_trait]
impl<R: RangeReader + Sync> InvertedIndexReader for InvertedIndexBlobReader<R> {
- async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>> {
+ async fn range_read(&self, offset: u64, size: u32) -> Result<Vec<u8>> {
let buf = self
.source
.read(offset..offset + size as u64)
@@ -61,16 +61,16 @@ impl<R: RangeReader + Sync> InvertedIndexReader for InvertedIndexBlobReader<R> {
Ok(buf.into())
}
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
self.source.read_vec(ranges).await.context(CommonIoSnafu)
}
- async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>> {
+ async fn metadata(&self) -> Result<Arc<InvertedIndexMetas>> {
let metadata = self.source.metadata().await.context(CommonIoSnafu)?;
let blob_size = metadata.content_length;
Self::validate_blob_size(blob_size)?;
- let mut footer_reader = InvertedIndexFooterReader::new(&mut self.source, blob_size)
+ let mut footer_reader = InvertedIndexFooterReader::new(&self.source, blob_size)
.with_prefetch_size(DEFAULT_PREFETCH_SIZE);
footer_reader.metadata().await.map(Arc::new)
}
@@ -160,7 +160,7 @@ mod tests {
#[tokio::test]
async fn test_inverted_index_blob_reader_metadata() {
let blob = create_inverted_index_blob();
- let mut blob_reader = InvertedIndexBlobReader::new(blob);
+ let blob_reader = InvertedIndexBlobReader::new(blob);
let metas = blob_reader.metadata().await.unwrap();
assert_eq!(metas.metas.len(), 2);
@@ -187,7 +187,7 @@ mod tests {
#[tokio::test]
async fn test_inverted_index_blob_reader_fst() {
let blob = create_inverted_index_blob();
- let mut blob_reader = InvertedIndexBlobReader::new(blob);
+ let blob_reader = InvertedIndexBlobReader::new(blob);
let metas = blob_reader.metadata().await.unwrap();
let meta = metas.metas.get("tag0").unwrap();
@@ -219,7 +219,7 @@ mod tests {
#[tokio::test]
async fn test_inverted_index_blob_reader_bitmap() {
let blob = create_inverted_index_blob();
- let mut blob_reader = InvertedIndexBlobReader::new(blob);
+ let blob_reader = InvertedIndexBlobReader::new(blob);
let metas = blob_reader.metadata().await.unwrap();
let meta = metas.metas.get("tag0").unwrap();
diff --git a/src/index/src/inverted_index/format/reader/footer.rs b/src/index/src/inverted_index/format/reader/footer.rs
index c025ecf52ecd..b5a855a45597 100644
--- a/src/index/src/inverted_index/format/reader/footer.rs
+++ b/src/index/src/inverted_index/format/reader/footer.rs
@@ -24,7 +24,7 @@ use crate::inverted_index::error::{
};
use crate::inverted_index::format::FOOTER_PAYLOAD_SIZE_SIZE;
-pub const DEFAULT_PREFETCH_SIZE: u64 = 1024; // 1KiB
+pub const DEFAULT_PREFETCH_SIZE: u64 = 8192; // 8KiB
/// InvertedIndexFooterReader is for reading the footer section of the blob.
pub struct InvertedIndexFooterReader<R> {
@@ -177,11 +177,11 @@ mod tests {
..Default::default()
};
- let mut payload_buf = create_test_payload(meta);
+ let payload_buf = create_test_payload(meta);
let blob_size = payload_buf.len() as u64;
for prefetch in [0, blob_size / 2, blob_size, blob_size + 10] {
- let mut reader = InvertedIndexFooterReader::new(&mut payload_buf, blob_size);
+ let mut reader = InvertedIndexFooterReader::new(&payload_buf, blob_size);
if prefetch > 0 {
reader = reader.with_prefetch_size(prefetch);
}
@@ -205,7 +205,7 @@ mod tests {
for prefetch in [0, blob_size / 2, blob_size, blob_size + 10] {
let blob_size = payload_buf.len() as u64;
- let mut reader = InvertedIndexFooterReader::new(&mut payload_buf, blob_size);
+ let mut reader = InvertedIndexFooterReader::new(&payload_buf, blob_size);
if prefetch > 0 {
reader = reader.with_prefetch_size(prefetch);
}
@@ -224,11 +224,11 @@ mod tests {
..Default::default()
};
- let mut payload_buf = create_test_payload(meta);
+ let payload_buf = create_test_payload(meta);
let blob_size = payload_buf.len() as u64;
for prefetch in [0, blob_size / 2, blob_size, blob_size + 10] {
- let mut reader = InvertedIndexFooterReader::new(&mut payload_buf, blob_size);
+ let mut reader = InvertedIndexFooterReader::new(&payload_buf, blob_size);
if prefetch > 0 {
reader = reader.with_prefetch_size(prefetch);
}
diff --git a/src/index/src/inverted_index/format/writer/blob.rs b/src/index/src/inverted_index/format/writer/blob.rs
index 7b95b8deafa9..d53dfee855ab 100644
--- a/src/index/src/inverted_index/format/writer/blob.rs
+++ b/src/index/src/inverted_index/format/writer/blob.rs
@@ -118,7 +118,7 @@ mod tests {
.await
.unwrap();
- let mut reader = InvertedIndexBlobReader::new(blob);
+ let reader = InvertedIndexBlobReader::new(blob);
let metadata = reader.metadata().await.unwrap();
assert_eq!(metadata.total_row_count, 8);
assert_eq!(metadata.segment_row_count, 1);
@@ -158,7 +158,7 @@ mod tests {
.await
.unwrap();
- let mut reader = InvertedIndexBlobReader::new(blob);
+ let reader = InvertedIndexBlobReader::new(blob);
let metadata = reader.metadata().await.unwrap();
assert_eq!(metadata.total_row_count, 8);
assert_eq!(metadata.segment_row_count, 1);
diff --git a/src/index/src/inverted_index/search/fst_values_mapper.rs b/src/index/src/inverted_index/search/fst_values_mapper.rs
index 716653152120..54a842de0207 100644
--- a/src/index/src/inverted_index/search/fst_values_mapper.rs
+++ b/src/index/src/inverted_index/search/fst_values_mapper.rs
@@ -18,55 +18,75 @@ use greptime_proto::v1::index::InvertedIndexMeta;
use crate::inverted_index::error::Result;
use crate::inverted_index::format::reader::InvertedIndexReader;
-/// `FstValuesMapper` maps FST-encoded u64 values to their corresponding bitmaps
-/// within an inverted index.
+/// `ParallelFstValuesMapper` enables parallel mapping of multiple FST value groups to their
+/// corresponding bitmaps within an inverted index.
///
-/// The higher 32 bits of each u64 value represent the
-/// bitmap offset and the lower 32 bits represent its size. This mapper uses these
-/// combined offset-size pairs to fetch and union multiple bitmaps into a single `BitVec`.
-pub struct FstValuesMapper<'a> {
- /// `reader` retrieves bitmap data using offsets and sizes from FST values.
+/// This mapper processes multiple groups of FST values in parallel, where each group is associated
+/// with its own metadata. It optimizes bitmap retrieval by batching requests across all groups
+/// before combining them into separate result bitmaps.
+pub struct ParallelFstValuesMapper<'a> {
reader: &'a mut dyn InvertedIndexReader,
-
- /// `metadata` provides context for interpreting the index structures.
- metadata: &'a InvertedIndexMeta,
}
-impl<'a> FstValuesMapper<'a> {
- pub fn new(
- reader: &'a mut dyn InvertedIndexReader,
- metadata: &'a InvertedIndexMeta,
- ) -> FstValuesMapper<'a> {
- FstValuesMapper { reader, metadata }
+impl<'a> ParallelFstValuesMapper<'a> {
+ pub fn new(reader: &'a mut dyn InvertedIndexReader) -> Self {
+ Self { reader }
}
- /// Maps an array of FST values to a `BitVec` by retrieving and combining bitmaps.
- pub async fn map_values(&mut self, values: &[u64]) -> Result<BitVec> {
- let mut bitmap = BitVec::new();
-
- for value in values {
- // relative_offset (higher 32 bits), size (lower 32 bits)
- let [relative_offset, size] = bytemuck::cast::<u64, [u32; 2]>(*value);
+ pub async fn map_values_vec(
+ &mut self,
+ value_and_meta_vec: &[(Vec<u64>, &'a InvertedIndexMeta)],
+ ) -> Result<Vec<BitVec>> {
+ let groups = value_and_meta_vec
+ .iter()
+ .map(|(values, _)| values.len())
+ .collect::<Vec<_>>();
+ let len = groups.iter().sum::<usize>();
+ let mut fetch_ranges = Vec::with_capacity(len);
+
+ for (values, meta) in value_and_meta_vec {
+ for value in values {
+ // The higher 32 bits of each u64 value represent the
+ // bitmap offset and the lower 32 bits represent its size. This mapper uses these
+ // combined offset-size pairs to fetch and union multiple bitmaps into a single `BitVec`.
+ let [relative_offset, size] = bytemuck::cast::<u64, [u32; 2]>(*value);
+ fetch_ranges.push(
+ meta.base_offset + relative_offset as u64
+ ..meta.base_offset + relative_offset as u64 + size as u64,
+ );
+ }
+ }
- let bm = self
- .reader
- .bitmap(self.metadata.base_offset + relative_offset as u64, size)
- .await?;
+ if fetch_ranges.is_empty() {
+ return Ok(vec![BitVec::new()]);
+ }
- // Ensure the longest BitVec is the left operand to prevent truncation during OR.
- if bm.len() > bitmap.len() {
- bitmap = bm | bitmap
- } else {
- bitmap |= bm
+ common_telemetry::debug!("fetch ranges: {:?}", fetch_ranges);
+ let mut bitmaps = self.reader.bitmap_deque(&fetch_ranges).await?;
+ let mut output = Vec::with_capacity(groups.len());
+
+ for counter in groups {
+ let mut bitmap = BitVec::new();
+ for _ in 0..counter {
+ let bm = bitmaps.pop_front().unwrap();
+ if bm.len() > bitmap.len() {
+ bitmap = bm | bitmap
+ } else {
+ bitmap |= bm
+ }
}
+
+ output.push(bitmap);
}
- Ok(bitmap)
+ Ok(output)
}
}
#[cfg(test)]
mod tests {
+ use std::collections::VecDeque;
+
use common_base::bit_vec::prelude::*;
use super::*;
@@ -77,38 +97,70 @@ mod tests {
}
#[tokio::test]
- async fn test_map_values() {
+ async fn test_map_values_vec() {
let mut mock_reader = MockInvertedIndexReader::new();
- mock_reader
- .expect_bitmap()
- .returning(|offset, size| match (offset, size) {
- (1, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]),
- (2, 1) => Ok(bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]),
- _ => unreachable!(),
- });
+ mock_reader.expect_bitmap_deque().returning(|ranges| {
+ let mut output = VecDeque::new();
+ for range in ranges {
+ let offset = range.start;
+ let size = range.end - range.start;
+ match (offset, size) {
+ (1, 1) => output.push_back(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]),
+ (2, 1) => output.push_back(bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]),
+ _ => unreachable!(),
+ }
+ }
+ Ok(output)
+ });
let meta = InvertedIndexMeta::default();
- let mut values_mapper = FstValuesMapper::new(&mut mock_reader, &meta);
+ let mut values_mapper = ParallelFstValuesMapper::new(&mut mock_reader);
- let result = values_mapper.map_values(&[]).await.unwrap();
- assert_eq!(result.count_ones(), 0);
+ let result = values_mapper
+ .map_values_vec(&[(vec![], &meta)])
+ .await
+ .unwrap();
+ assert_eq!(result[0].count_ones(), 0);
- let result = values_mapper.map_values(&[value(1, 1)]).await.unwrap();
- assert_eq!(result, bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
+ let result = values_mapper
+ .map_values_vec(&[(vec![value(1, 1)], &meta)])
+ .await
+ .unwrap();
+ assert_eq!(result[0], bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
+
+ let result = values_mapper
+ .map_values_vec(&[(vec![value(2, 1)], &meta)])
+ .await
+ .unwrap();
+ assert_eq!(result[0], bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]);
+
+ let result = values_mapper
+ .map_values_vec(&[(vec![value(1, 1), value(2, 1)], &meta)])
+ .await
+ .unwrap();
+ assert_eq!(result[0], bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
- let result = values_mapper.map_values(&[value(2, 1)]).await.unwrap();
- assert_eq!(result, bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]);
+ let result = values_mapper
+ .map_values_vec(&[(vec![value(2, 1), value(1, 1)], &meta)])
+ .await
+ .unwrap();
+ assert_eq!(result[0], bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
let result = values_mapper
- .map_values(&[value(1, 1), value(2, 1)])
+ .map_values_vec(&[(vec![value(2, 1)], &meta), (vec![value(1, 1)], &meta)])
.await
.unwrap();
- assert_eq!(result, bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
+ assert_eq!(result[0], bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 1, 0, 1]);
+ assert_eq!(result[1], bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
let result = values_mapper
- .map_values(&[value(2, 1), value(1, 1)])
+ .map_values_vec(&[
+ (vec![value(2, 1), value(1, 1)], &meta),
+ (vec![value(1, 1)], &meta),
+ ])
.await
.unwrap();
- assert_eq!(result, bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
+ assert_eq!(result[0], bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1]);
+ assert_eq!(result[1], bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1]);
}
}
diff --git a/src/index/src/inverted_index/search/index_apply/predicates_apply.rs b/src/index/src/inverted_index/search/index_apply/predicates_apply.rs
index bead0761d7b2..cf8e30085f0c 100644
--- a/src/index/src/inverted_index/search/index_apply/predicates_apply.rs
+++ b/src/index/src/inverted_index/search/index_apply/predicates_apply.rs
@@ -23,7 +23,7 @@ use crate::inverted_index::format::reader::InvertedIndexReader;
use crate::inverted_index::search::fst_apply::{
FstApplier, IntersectionFstApplier, KeysFstApplier,
};
-use crate::inverted_index::search::fst_values_mapper::FstValuesMapper;
+use crate::inverted_index::search::fst_values_mapper::ParallelFstValuesMapper;
use crate::inverted_index::search::index_apply::{
ApplyOutput, IndexApplier, IndexNotFoundStrategy, SearchContext,
};
@@ -57,11 +57,10 @@ impl IndexApplier for PredicatesIndexApplier {
let mut bitmap = Self::bitmap_full_range(&metadata);
// TODO(zhongzc): optimize the order of applying to make it quicker to return empty.
- for (name, fst_applier) in &self.fst_appliers {
- if bitmap.count_ones() == 0 {
- break;
- }
+ let mut appliers = Vec::with_capacity(self.fst_appliers.len());
+ let mut fst_ranges = Vec::with_capacity(self.fst_appliers.len());
+ for (name, fst_applier) in &self.fst_appliers {
let Some(meta) = metadata.metas.get(name) else {
match context.index_not_found_strategy {
IndexNotFoundStrategy::ReturnEmpty => {
@@ -75,14 +74,31 @@ impl IndexApplier for PredicatesIndexApplier {
}
}
};
-
let fst_offset = meta.base_offset + meta.relative_fst_offset as u64;
- let fst_size = meta.fst_size;
- let fst = reader.fst(fst_offset, fst_size).await?;
- let values = fst_applier.apply(&fst);
+ let fst_size = meta.fst_size as u64;
+ appliers.push((fst_applier, meta));
+ fst_ranges.push(fst_offset..fst_offset + fst_size);
+ }
- let mut mapper = FstValuesMapper::new(&mut *reader, meta);
- let bm = mapper.map_values(&values).await?;
+ if fst_ranges.is_empty() {
+ output.matched_segment_ids = bitmap;
+ return Ok(output);
+ }
+
+ let fsts = reader.fst_vec(&fst_ranges).await?;
+ let value_and_meta_vec = fsts
+ .into_iter()
+ .zip(appliers)
+ .map(|(fst, (fst_applier, meta))| (fst_applier.apply(&fst), meta))
+ .collect::<Vec<_>>();
+
+ let mut mapper = ParallelFstValuesMapper::new(reader);
+ let bm_vec = mapper.map_values_vec(&value_and_meta_vec).await?;
+
+ for bm in bm_vec {
+ if bitmap.count_ones() == 0 {
+ break;
+ }
bitmap &= bm;
}
@@ -148,6 +164,7 @@ impl TryFrom<Vec<(String, Vec<Predicate>)>> for PredicatesIndexApplier {
#[cfg(test)]
mod tests {
+ use std::collections::VecDeque;
use std::sync::Arc;
use common_base::bit_vec::prelude::*;
@@ -204,15 +221,19 @@ mod tests {
mock_reader
.expect_metadata()
.returning(|| Ok(mock_metas([("tag-0", 0)])));
- mock_reader.expect_fst().returning(|_offset, _size| {
- Ok(FstMap::from_iter([(b"tag-0_value-0", fst_value(2, 1))]).unwrap())
+ mock_reader.expect_fst_vec().returning(|_ranges| {
+ Ok(vec![FstMap::from_iter([(
+ b"tag-0_value-0",
+ fst_value(2, 1),
+ )])
+ .unwrap()])
+ });
+
+ mock_reader.expect_bitmap_deque().returning(|range| {
+ assert_eq!(range.len(), 1);
+ assert_eq!(range[0], 2..3);
+ Ok(VecDeque::from([bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]]))
});
- mock_reader
- .expect_bitmap()
- .returning(|offset, size| match (offset, size) {
- (2, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
- _ => unreachable!(),
- });
let output = applier
.apply(SearchContext::default(), &mut mock_reader)
.await
@@ -227,8 +248,12 @@ mod tests {
mock_reader
.expect_metadata()
.returning(|| Ok(mock_metas([("tag-0", 0)])));
- mock_reader.expect_fst().returning(|_offset, _size| {
- Ok(FstMap::from_iter([(b"tag-0_value-1", fst_value(2, 1))]).unwrap())
+ mock_reader.expect_fst_vec().returning(|_range| {
+ Ok(vec![FstMap::from_iter([(
+ b"tag-0_value-1",
+ fst_value(2, 1),
+ )])
+ .unwrap()])
});
let output = applier
.apply(SearchContext::default(), &mut mock_reader)
@@ -252,20 +277,33 @@ mod tests {
mock_reader
.expect_metadata()
.returning(|| Ok(mock_metas([("tag-0", 0), ("tag-1", 1)])));
- mock_reader
- .expect_fst()
- .returning(|offset, _size| match offset {
- 0 => Ok(FstMap::from_iter([(b"tag-0_value-0", fst_value(1, 1))]).unwrap()),
- 1 => Ok(FstMap::from_iter([(b"tag-1_value-a", fst_value(2, 1))]).unwrap()),
- _ => unreachable!(),
- });
- mock_reader
- .expect_bitmap()
- .returning(|offset, size| match (offset, size) {
- (1, 1) => Ok(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
- (2, 1) => Ok(bitvec![u8, Lsb0; 1, 1, 0, 1, 1, 0, 1, 1]),
- _ => unreachable!(),
- });
+ mock_reader.expect_fst_vec().returning(|ranges| {
+ let mut output = vec![];
+ for range in ranges {
+ match range.start {
+ 0 => output
+ .push(FstMap::from_iter([(b"tag-0_value-0", fst_value(1, 1))]).unwrap()),
+ 1 => output
+ .push(FstMap::from_iter([(b"tag-1_value-a", fst_value(2, 1))]).unwrap()),
+ _ => unreachable!(),
+ }
+ }
+ Ok(output)
+ });
+ mock_reader.expect_bitmap_deque().returning(|ranges| {
+ let mut output = VecDeque::new();
+ for range in ranges {
+ let offset = range.start;
+ let size = range.end - range.start;
+ match (offset, size) {
+ (1, 1) => output.push_back(bitvec![u8, Lsb0; 1, 0, 1, 0, 1, 0, 1, 0]),
+ (2, 1) => output.push_back(bitvec![u8, Lsb0; 1, 1, 0, 1, 1, 0, 1, 1]),
+ _ => unreachable!(),
+ }
+ }
+
+ Ok(output)
+ });
let output = applier
.apply(SearchContext::default(), &mut mock_reader)
diff --git a/src/mito2/src/cache/index.rs b/src/mito2/src/cache/index.rs
index 137dc3d87454..c28ac063b322 100644
--- a/src/mito2/src/cache/index.rs
+++ b/src/mito2/src/cache/index.rs
@@ -157,7 +157,7 @@ where
load: F,
) -> Result<Vec<u8>, E>
where
- F: FnOnce(Vec<Range<u64>>) -> Fut,
+ F: Fn(Vec<Range<u64>>) -> Fut,
Fut: Future<Output = Result<Vec<Bytes>, E>>,
E: std::error::Error,
{
diff --git a/src/mito2/src/cache/index/bloom_filter_index.rs b/src/mito2/src/cache/index/bloom_filter_index.rs
index 08ac7e8273d1..c5b95a5b843f 100644
--- a/src/mito2/src/cache/index/bloom_filter_index.rs
+++ b/src/mito2/src/cache/index/bloom_filter_index.rs
@@ -87,8 +87,8 @@ impl<R> CachedBloomFilterIndexBlobReader<R> {
#[async_trait]
impl<R: BloomFilterReader + Send> BloomFilterReader for CachedBloomFilterIndexBlobReader<R> {
- async fn range_read(&mut self, offset: u64, size: u32) -> Result<Bytes> {
- let inner = &mut self.inner;
+ async fn range_read(&self, offset: u64, size: u32) -> Result<Bytes> {
+ let inner = &self.inner;
self.cache
.get_or_load(
(self.file_id, self.column_id),
@@ -102,7 +102,7 @@ impl<R: BloomFilterReader + Send> BloomFilterReader for CachedBloomFilterIndexBl
}
/// Reads the meta information of the bloom filter.
- async fn metadata(&mut self) -> Result<BloomFilterMeta> {
+ async fn metadata(&self) -> Result<BloomFilterMeta> {
if let Some(cached) = self.cache.get_metadata((self.file_id, self.column_id)) {
CACHE_HIT.with_label_values(&[INDEX_METADATA_TYPE]).inc();
Ok((*cached).clone())
diff --git a/src/mito2/src/cache/index/inverted_index.rs b/src/mito2/src/cache/index/inverted_index.rs
index 53929c467a98..aaedcd8f89f9 100644
--- a/src/mito2/src/cache/index/inverted_index.rs
+++ b/src/mito2/src/cache/index/inverted_index.rs
@@ -12,11 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use core::ops::Range;
use std::sync::Arc;
use api::v1::index::InvertedIndexMetas;
use async_trait::async_trait;
use bytes::Bytes;
+use futures::future::try_join_all;
use index::inverted_index::error::Result;
use index::inverted_index::format::reader::InvertedIndexReader;
use prost::Message;
@@ -77,8 +79,8 @@ impl<R> CachedInvertedIndexBlobReader<R> {
#[async_trait]
impl<R: InvertedIndexReader> InvertedIndexReader for CachedInvertedIndexBlobReader<R> {
- async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>> {
- let inner = &mut self.inner;
+ async fn range_read(&self, offset: u64, size: u32) -> Result<Vec<u8>> {
+ let inner = &self.inner;
self.cache
.get_or_load(
self.file_id,
@@ -90,7 +92,25 @@ impl<R: InvertedIndexReader> InvertedIndexReader for CachedInvertedIndexBlobRead
.await
}
- async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>> {
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
+ let fetch = ranges.iter().map(|range| {
+ let inner = &self.inner;
+ self.cache.get_or_load(
+ self.file_id,
+ self.blob_size,
+ range.start,
+ (range.end - range.start) as u32,
+ move |ranges| async move { inner.read_vec(&ranges).await },
+ )
+ });
+ Ok(try_join_all(fetch)
+ .await?
+ .into_iter()
+ .map(Bytes::from)
+ .collect::<Vec<_>>())
+ }
+
+ async fn metadata(&self) -> Result<Arc<InvertedIndexMetas>> {
if let Some(cached) = self.cache.get_metadata(self.file_id) {
CACHE_HIT.with_label_values(&[INDEX_METADATA_TYPE]).inc();
Ok(cached)
@@ -220,7 +240,7 @@ mod test {
.unwrap();
let reader = InvertedIndexBlobReader::new(range_reader);
- let mut cached_reader = CachedInvertedIndexBlobReader::new(
+ let cached_reader = CachedInvertedIndexBlobReader::new(
FileId::random(),
file_size,
reader,
@@ -304,7 +324,7 @@ mod test {
let offset = rng.gen_range(0..file_size);
let size = rng.gen_range(0..file_size as u32 - offset as u32);
let expected = cached_reader.range_read(offset, size).await.unwrap();
- let inner = &mut cached_reader.inner;
+ let inner = &cached_reader.inner;
let read = cached_reader
.cache
.get_or_load(
diff --git a/src/mito2/src/sst/index/bloom_filter/applier.rs b/src/mito2/src/sst/index/bloom_filter/applier.rs
index 1e7533a7044c..2f024d5cb712 100644
--- a/src/mito2/src/sst/index/bloom_filter/applier.rs
+++ b/src/mito2/src/sst/index/bloom_filter/applier.rs
@@ -118,7 +118,7 @@ impl BloomFilterIndexApplier {
.start_timer();
for (column_id, predicates) in &self.filters {
- let mut blob = match self
+ let blob = match self
.blob_reader(file_id, *column_id, file_size_hint)
.await?
{
diff --git a/src/mito2/src/sst/index/bloom_filter/creator.rs b/src/mito2/src/sst/index/bloom_filter/creator.rs
index d1b73a0bde25..01c79b408eed 100644
--- a/src/mito2/src/sst/index/bloom_filter/creator.rs
+++ b/src/mito2/src/sst/index/bloom_filter/creator.rs
@@ -481,7 +481,7 @@ mod tests {
.await
.unwrap();
let reader = blob_guard.reader().await.unwrap();
- let mut bloom_filter = BloomFilterReaderImpl::new(reader);
+ let bloom_filter = BloomFilterReaderImpl::new(reader);
let metadata = bloom_filter.metadata().await.unwrap();
assert_eq!(metadata.bloom_filter_segments.len(), 10);
@@ -510,7 +510,7 @@ mod tests {
.await
.unwrap();
let reader = blob_guard.reader().await.unwrap();
- let mut bloom_filter = BloomFilterReaderImpl::new(reader);
+ let bloom_filter = BloomFilterReaderImpl::new(reader);
let metadata = bloom_filter.metadata().await.unwrap();
assert_eq!(metadata.bloom_filter_segments.len(), 5);
diff --git a/src/mito2/src/sst/index/inverted_index/applier.rs b/src/mito2/src/sst/index/inverted_index/applier.rs
index 73785f3264a0..61865f76f4c9 100644
--- a/src/mito2/src/sst/index/inverted_index/applier.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier.rs
@@ -126,7 +126,7 @@ impl InvertedIndexApplier {
index_not_found_strategy: IndexNotFoundStrategy::ReturnEmpty,
};
- let mut blob = match self.cached_blob_reader(file_id).await {
+ let blob = match self.cached_blob_reader(file_id, file_size_hint).await {
Ok(Some(puffin_reader)) => puffin_reader,
other => {
if let Err(err) = other {
@@ -158,7 +158,11 @@ impl InvertedIndexApplier {
}
/// Creates a blob reader from the cached index file.
- async fn cached_blob_reader(&self, file_id: FileId) -> Result<Option<BlobReader>> {
+ async fn cached_blob_reader(
+ &self,
+ file_id: FileId,
+ file_size_hint: Option<u64>,
+ ) -> Result<Option<BlobReader>> {
let Some(file_cache) = &self.file_cache else {
return Ok(None);
};
@@ -171,10 +175,12 @@ impl InvertedIndexApplier {
let puffin_manager = self.puffin_manager_factory.build(file_cache.local_store());
let puffin_file_name = file_cache.cache_file_path(index_key);
+ // Adds file size hint to the puffin reader to avoid extra metadata read.
let reader = puffin_manager
.reader(&puffin_file_name)
.await
.context(PuffinBuildReaderSnafu)?
+ .with_file_size_hint(file_size_hint)
.blob(INDEX_BLOB_TYPE)
.await
.context(PuffinReadBlobSnafu)?
diff --git a/src/mito2/src/sst/index/puffin_manager.rs b/src/mito2/src/sst/index/puffin_manager.rs
index 8bfa25de1e87..498ed254f2f0 100644
--- a/src/mito2/src/sst/index/puffin_manager.rs
+++ b/src/mito2/src/sst/index/puffin_manager.rs
@@ -191,7 +191,7 @@ mod tests {
let reader = manager.reader(file_name).await.unwrap();
let blob_guard = reader.blob(blob_key).await.unwrap();
- let mut blob_reader = blob_guard.reader().await.unwrap();
+ let blob_reader = blob_guard.reader().await.unwrap();
let meta = blob_reader.metadata().await.unwrap();
let bs = blob_reader.read(0..meta.content_length).await.unwrap();
assert_eq!(&*bs, raw_data);
diff --git a/src/mito2/src/sst/index/store.rs b/src/mito2/src/sst/index/store.rs
index 7322bd4db496..f37fdc5c7a92 100644
--- a/src/mito2/src/sst/index/store.rs
+++ b/src/mito2/src/sst/index/store.rs
@@ -19,7 +19,7 @@ use std::task::{Context, Poll};
use async_trait::async_trait;
use bytes::{BufMut, Bytes};
-use common_base::range_read::{Metadata, RangeReader};
+use common_base::range_read::{Metadata, RangeReader, SizeAwareRangeReader};
use futures::{AsyncRead, AsyncSeek, AsyncWrite};
use object_store::ObjectStore;
use pin_project::pin_project;
@@ -266,13 +266,15 @@ pub(crate) struct InstrumentedRangeReader<'a> {
file_size_hint: Option<u64>,
}
-#[async_trait]
-impl RangeReader for InstrumentedRangeReader<'_> {
+impl SizeAwareRangeReader for InstrumentedRangeReader<'_> {
fn with_file_size_hint(&mut self, file_size_hint: u64) {
self.file_size_hint = Some(file_size_hint);
}
+}
- async fn metadata(&mut self) -> io::Result<Metadata> {
+#[async_trait]
+impl RangeReader for InstrumentedRangeReader<'_> {
+ async fn metadata(&self) -> io::Result<Metadata> {
match self.file_size_hint {
Some(file_size_hint) => Ok(Metadata {
content_length: file_size_hint,
@@ -286,18 +288,14 @@ impl RangeReader for InstrumentedRangeReader<'_> {
}
}
- async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
+ async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
let buf = self.store.reader(&self.path).await?.read(range).await?;
self.read_byte_count.inc_by(buf.len() as _);
self.read_count.inc_by(1);
Ok(buf.to_bytes())
}
- async fn read_into(
- &mut self,
- range: Range<u64>,
- buf: &mut (impl BufMut + Send),
- ) -> io::Result<()> {
+ async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
let reader = self.store.reader(&self.path).await?;
let size = reader.read_into(buf, range).await?;
self.read_byte_count.inc_by(size as _);
@@ -305,7 +303,7 @@ impl RangeReader for InstrumentedRangeReader<'_> {
Ok(())
}
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
let bufs = self
.store
.reader(&self.path)
diff --git a/src/puffin/Cargo.toml b/src/puffin/Cargo.toml
index 31c92ba4f972..7116bbef52b9 100644
--- a/src/puffin/Cargo.toml
+++ b/src/puffin/Cargo.toml
@@ -35,4 +35,5 @@ tokio-util.workspace = true
uuid.workspace = true
[dev-dependencies]
+common-base = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
diff --git a/src/puffin/src/file_format/reader/file.rs b/src/puffin/src/file_format/reader/file.rs
index 9ed40a7f181e..27a9853acdf2 100644
--- a/src/puffin/src/file_format/reader/file.rs
+++ b/src/puffin/src/file_format/reader/file.rs
@@ -74,14 +74,14 @@ impl<R> PuffinFileReader<R> {
#[async_trait]
impl<'a, R: RangeReader + 'a> AsyncReader<'a> for PuffinFileReader<R> {
- type Reader = PartialReader<&'a mut R>;
+ type Reader = PartialReader<&'a R>;
async fn metadata(&'a mut self) -> Result<FileMetadata> {
if let Some(metadata) = &self.metadata {
return Ok(metadata.clone());
}
let file_size = self.get_file_size_async().await?;
- let mut reader = PuffinFileFooterReader::new(&mut self.source, file_size)
+ let mut reader = PuffinFileFooterReader::new(&self.source, file_size)
.with_prefetch_size(DEFAULT_PREFETCH_SIZE);
let metadata = reader.metadata().await?;
self.metadata = Some(metadata.clone());
@@ -90,7 +90,7 @@ impl<'a, R: RangeReader + 'a> AsyncReader<'a> for PuffinFileReader<R> {
fn blob_reader(&'a mut self, blob_metadata: &BlobMetadata) -> Result<Self::Reader> {
Ok(PartialReader::new(
- &mut self.source,
+ &self.source,
blob_metadata.offset as _,
blob_metadata.length as _,
))
diff --git a/src/puffin/src/file_format/reader/footer.rs b/src/puffin/src/file_format/reader/footer.rs
index d0cd1e8ed4f0..c7a7eab08b0e 100644
--- a/src/puffin/src/file_format/reader/footer.rs
+++ b/src/puffin/src/file_format/reader/footer.rs
@@ -25,7 +25,7 @@ use crate::file_format::{Flags, FLAGS_SIZE, MAGIC, MAGIC_SIZE, MIN_FILE_SIZE, PA
use crate::file_metadata::FileMetadata;
/// The default prefetch size for the footer reader.
-pub const DEFAULT_PREFETCH_SIZE: u64 = 1024; // 1KiB
+pub const DEFAULT_PREFETCH_SIZE: u64 = 8192; // 8KiB
/// Reader for the footer of a Puffin data file
///
diff --git a/src/puffin/src/partial_reader.rs b/src/puffin/src/partial_reader.rs
index ef4815679440..7954f705ef1d 100644
--- a/src/puffin/src/partial_reader.rs
+++ b/src/puffin/src/partial_reader.rs
@@ -13,8 +13,6 @@
// limitations under the License.
mod r#async;
-mod position;
-mod sync;
use pin_project::pin_project;
@@ -31,13 +29,6 @@ pub struct PartialReader<R> {
/// The `offset` and `size` fields are used to determine the slice of `source` to read.
#[pin]
source: R,
-
- /// The current position within the portion.
- ///
- /// A `None` value indicates that no read operations have been performed yet on this portion.
- /// Before a read operation can be performed, the resource must be positioned at the correct offset in the portion.
- /// After the first read operation, this field will be set to `Some(_)`, representing the current read position in the portion.
- position_in_portion: Option<u64>,
}
impl<R> PartialReader<R> {
@@ -47,15 +38,9 @@ impl<R> PartialReader<R> {
offset,
size,
source,
- position_in_portion: None,
}
}
- /// Returns the current position in the portion.
- pub fn position(&self) -> u64 {
- self.position_in_portion.unwrap_or_default()
- }
-
/// Returns the size of the portion in portion.
pub fn size(&self) -> u64 {
self.size
@@ -65,11 +50,6 @@ impl<R> PartialReader<R> {
pub fn is_empty(&self) -> bool {
self.size == 0
}
-
- /// Returns whether the current position is at the end of the portion.
- pub fn is_eof(&self) -> bool {
- self.position() == self.size
- }
}
#[cfg(test)]
@@ -83,7 +63,6 @@ mod tests {
let data: Vec<u8> = (0..100).collect();
let reader = PartialReader::new(Cursor::new(data), 10, 0);
assert!(reader.is_empty());
- assert!(reader.is_eof());
}
#[test]
diff --git a/src/puffin/src/partial_reader/async.rs b/src/puffin/src/partial_reader/async.rs
index 4eedd1ee31f5..0b9a5c54c710 100644
--- a/src/puffin/src/partial_reader/async.rs
+++ b/src/puffin/src/partial_reader/async.rs
@@ -23,17 +23,13 @@ use crate::partial_reader::PartialReader;
#[async_trait]
impl<R: RangeReader> RangeReader for PartialReader<R> {
- fn with_file_size_hint(&mut self, _file_size_hint: u64) {
- // do nothing
- }
-
- async fn metadata(&mut self) -> io::Result<Metadata> {
+ async fn metadata(&self) -> io::Result<Metadata> {
Ok(Metadata {
content_length: self.size,
})
}
- async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
+ async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
let absolute_range_start = self.offset + range.start;
if absolute_range_start >= self.offset + self.size {
return Err(io::Error::new(
@@ -45,15 +41,10 @@ impl<R: RangeReader> RangeReader for PartialReader<R> {
let absolute_range = absolute_range_start..absolute_range_end;
let result = self.source.read(absolute_range.clone()).await?;
- self.position_in_portion = Some(absolute_range.end);
Ok(result)
}
- async fn read_into(
- &mut self,
- range: Range<u64>,
- buf: &mut (impl BufMut + Send),
- ) -> io::Result<()> {
+ async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
let absolute_range_start = self.offset + range.start;
if absolute_range_start >= self.offset + self.size {
return Err(io::Error::new(
@@ -65,11 +56,10 @@ impl<R: RangeReader> RangeReader for PartialReader<R> {
let absolute_range = absolute_range_start..absolute_range_end;
self.source.read_into(absolute_range.clone(), buf).await?;
- self.position_in_portion = Some(absolute_range.end);
Ok(())
}
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
let absolute_ranges = ranges
.iter()
.map(|range| {
@@ -88,9 +78,6 @@ impl<R: RangeReader> RangeReader for PartialReader<R> {
.collect::<io::Result<Vec<_>>>()?;
let results = self.source.read_vec(&absolute_ranges).await?;
- if let Some(last_range) = absolute_ranges.last() {
- self.position_in_portion = Some(last_range.end);
- }
Ok(results)
}
@@ -103,7 +90,7 @@ mod tests {
#[tokio::test]
async fn read_all_data_in_portion() {
let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(data.clone(), 0, 100);
+ let reader = PartialReader::new(data.clone(), 0, 100);
let buf = reader.read(0..100).await.unwrap();
assert_eq!(*buf, data);
}
@@ -111,7 +98,7 @@ mod tests {
#[tokio::test]
async fn read_part_of_data_in_portion() {
let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(data, 10, 30);
+ let reader = PartialReader::new(data, 10, 30);
let buf = reader.read(0..30).await.unwrap();
assert_eq!(*buf, (10..40).collect::<Vec<u8>>());
}
@@ -119,7 +106,7 @@ mod tests {
#[tokio::test]
async fn seek_past_end_of_portion_returns_error() {
let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(data, 10, 30);
+ let reader = PartialReader::new(data, 10, 30);
// seeking past the portion returns an error
assert!(reader.read(31..32).await.is_err());
}
@@ -127,11 +114,7 @@ mod tests {
#[tokio::test]
async fn is_eof_returns_true_at_end_of_portion() {
let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(data, 10, 30);
- // we are not at the end of the portion
- assert!(!reader.is_eof());
+ let reader = PartialReader::new(data, 10, 30);
let _ = reader.read(0..20).await.unwrap();
- // we are at the end of the portion
- assert!(reader.is_eof());
}
}
diff --git a/src/puffin/src/partial_reader/position.rs b/src/puffin/src/partial_reader/position.rs
deleted file mode 100644
index e57817c493af..000000000000
--- a/src/puffin/src/partial_reader/position.rs
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::io;
-
-/// Calculates the new position after seeking. It checks if the new position
-/// is valid (within the portion bounds) before returning it.
-pub fn position_after_seek(
- seek_from: io::SeekFrom,
- position_in_portion: u64,
- size_of_portion: u64,
-) -> io::Result<u64> {
- let new_position = match seek_from {
- io::SeekFrom::Start(offset) => offset,
- io::SeekFrom::Current(offset) => {
- let next = (position_in_portion as i64) + offset;
- if next < 0 {
- return Err(io::Error::new(
- io::ErrorKind::InvalidInput,
- "invalid seek to a negative or overflowing position",
- ));
- }
- next as u64
- }
- io::SeekFrom::End(offset) => {
- let end = size_of_portion as i64;
- (end + offset) as u64
- }
- };
-
- if new_position > size_of_portion {
- return Err(io::Error::new(
- io::ErrorKind::InvalidInput,
- "invalid seek to a position beyond the end of the portion",
- ));
- }
-
- Ok(new_position)
-}
-
-#[cfg(test)]
-mod tests {
- use std::io::ErrorKind;
-
- use super::*;
-
- #[test]
- fn test_position_after_seek_from_start() {
- let result = position_after_seek(io::SeekFrom::Start(10), 0, 20).unwrap();
- assert_eq!(result, 10);
- }
-
- #[test]
- fn test_position_after_seek_from_start_out_of_bounds() {
- let result = position_after_seek(io::SeekFrom::Start(30), 0, 20);
- assert!(result.is_err());
- assert_eq!(result.unwrap_err().kind(), ErrorKind::InvalidInput);
- }
-
- #[test]
- fn test_position_after_seek_from_current() {
- let result = position_after_seek(io::SeekFrom::Current(10), 10, 30).unwrap();
- assert_eq!(result, 20);
- }
-
- #[test]
- fn test_position_after_seek_from_current_negative_position_within_bounds() {
- let result = position_after_seek(io::SeekFrom::Current(-10), 15, 20).unwrap();
- assert_eq!(result, 5);
- }
-
- #[test]
- fn test_position_after_seek_from_current_negative_position() {
- let result = position_after_seek(io::SeekFrom::Current(-10), 5, 20);
- assert!(result.is_err());
- assert_eq!(result.unwrap_err().kind(), ErrorKind::InvalidInput);
- }
-
- #[test]
- fn test_position_after_seek_from_end() {
- let result = position_after_seek(io::SeekFrom::End(-10), 0, 30).unwrap();
- assert_eq!(result, 20);
- }
-
- #[test]
- fn test_position_after_seek_from_end_out_of_bounds() {
- let result = position_after_seek(io::SeekFrom::End(10), 0, 20);
- assert!(result.is_err());
- assert_eq!(result.unwrap_err().kind(), ErrorKind::InvalidInput);
- }
-}
diff --git a/src/puffin/src/partial_reader/sync.rs b/src/puffin/src/partial_reader/sync.rs
deleted file mode 100644
index 1b7781543973..000000000000
--- a/src/puffin/src/partial_reader/sync.rs
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::io;
-
-use crate::partial_reader::position::position_after_seek;
-use crate::partial_reader::PartialReader;
-
-impl<R: io::Read + io::Seek> io::Read for PartialReader<R> {
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- // past end of portion
- if self.position() > self.size() {
- return Err(io::Error::new(
- io::ErrorKind::InvalidInput,
- "invalid read past the end of the portion",
- ));
- }
-
- // end of portion
- if self.is_eof() {
- return Ok(0);
- }
-
- // haven't read from the portion yet, need to seek to the start of it.
- if self.position_in_portion.is_none() {
- self.source.seek(io::SeekFrom::Start(self.offset))?;
- self.position_in_portion = Some(0);
- }
-
- // prevent reading over the end
- let max_len = (self.size() - self.position_in_portion.unwrap()) as usize;
- let actual_len = max_len.min(buf.len());
-
- // create a limited reader
- let target_buf = &mut buf[..actual_len];
-
- // perform the actual read from the source and update the position.
- let read_bytes = self.source.read(target_buf)?;
- self.position_in_portion = Some(self.position_in_portion.unwrap() + read_bytes as u64);
-
- Ok(read_bytes)
- }
-}
-
-impl<R: io::Read + io::Seek> io::Seek for PartialReader<R> {
- fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
- let new_position = position_after_seek(pos, self.position(), self.size())?;
- let pos = io::SeekFrom::Start(self.offset + new_position);
- self.source.seek(pos)?;
-
- self.position_in_portion = Some(new_position);
- Ok(new_position)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::io::{Cursor, Read, Seek, SeekFrom};
-
- use super::*;
-
- #[test]
- fn read_all_data_in_portion() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data.clone()), 0, 100);
- let mut buf = vec![0; 100];
- assert_eq!(reader.read(&mut buf).unwrap(), 100);
- assert_eq!(buf, data);
- }
-
- #[test]
- fn read_part_of_data_in_portion() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
- let mut buf = vec![0; 30];
- assert_eq!(reader.read(&mut buf).unwrap(), 30);
- assert_eq!(buf, (10..40).collect::<Vec<u8>>());
- }
-
- #[test]
- fn seek_and_read_data_in_portion() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
- assert_eq!(reader.seek(SeekFrom::Start(10)).unwrap(), 10);
- let mut buf = vec![0; 10];
- assert_eq!(reader.read(&mut buf).unwrap(), 10);
- assert_eq!(buf, (20..30).collect::<Vec<u8>>());
- }
-
- #[test]
- fn read_past_end_of_portion_is_eof() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
- let mut buf = vec![0; 50];
- assert_eq!(reader.read(&mut buf).unwrap(), 30);
- assert_eq!(reader.read(&mut buf).unwrap(), 0); // hit EOF
- }
-
- #[test]
- fn seek_past_end_of_portion_returns_error() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
- // seeking past the portion returns an error
- assert!(reader.seek(SeekFrom::Start(31)).is_err());
- }
-
- #[test]
- fn seek_to_negative_position_returns_error() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
- assert_eq!(reader.seek(SeekFrom::Start(10)).unwrap(), 10);
- // seeking back to the start of the portion
- assert_eq!(reader.seek(SeekFrom::Current(-10)).unwrap(), 0);
- // seeking to a negative position returns an error
- assert!(reader.seek(SeekFrom::Current(-1)).is_err());
- }
-
- #[test]
- fn seek_from_end_of_portion() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
- let mut buf = vec![0; 10];
- // seek to 10 bytes before the end of the portion
- assert_eq!(reader.seek(SeekFrom::End(-10)).unwrap(), 20);
- assert_eq!(reader.read(&mut buf).unwrap(), 10);
- // the final 10 bytes of the portion
- assert_eq!(buf, (30..40).collect::<Vec<u8>>());
- assert!(reader.is_eof());
- }
-
- #[test]
- fn seek_from_end_to_negative_position_returns_error() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data.clone()), 10, 30);
- // seeking to a negative position returns an error
- assert!(reader.seek(SeekFrom::End(-31)).is_err());
- }
-
- #[test]
- fn zero_length_portion_returns_zero_on_read() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data), 10, 0);
- let mut buf = vec![0; 10];
- // reading a portion with zero length returns 0 bytes
- assert_eq!(reader.read(&mut buf).unwrap(), 0);
- }
-
- #[test]
- fn is_eof_returns_true_at_end_of_portion() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
- // we are not at the end of the portion
- assert!(!reader.is_eof());
- let mut buf = vec![0; 30];
- assert_eq!(reader.read(&mut buf).unwrap(), 30);
- // we are at the end of the portion
- assert!(reader.is_eof());
- }
-
- #[test]
- fn position_resets_after_seek_to_start() {
- let data: Vec<u8> = (0..100).collect();
- let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
- assert_eq!(reader.seek(SeekFrom::Start(10)).unwrap(), 10);
- assert_eq!(reader.position(), 10);
- assert_eq!(reader.seek(SeekFrom::Start(0)).unwrap(), 0);
- assert_eq!(reader.position(), 0);
- }
-}
diff --git a/src/puffin/src/puffin_manager/file_accessor.rs b/src/puffin/src/puffin_manager/file_accessor.rs
index dc32db6db50c..351423b054e7 100644
--- a/src/puffin/src/puffin_manager/file_accessor.rs
+++ b/src/puffin/src/puffin_manager/file_accessor.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use async_trait::async_trait;
-use common_base::range_read::RangeReader;
+use common_base::range_read::SizeAwareRangeReader;
use futures::AsyncWrite;
use crate::error::Result;
@@ -22,7 +22,7 @@ use crate::error::Result;
#[async_trait]
#[auto_impl::auto_impl(Arc)]
pub trait PuffinFileAccessor: Send + Sync + 'static {
- type Reader: RangeReader + Sync;
+ type Reader: SizeAwareRangeReader + Sync;
type Writer: AsyncWrite + Unpin + Send;
/// Opens a reader for the given puffin file.
diff --git a/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs b/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs
index a5da2f75f858..9228c0b59424 100644
--- a/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs
+++ b/src/puffin/src/puffin_manager/fs_puffin_manager/reader.rs
@@ -19,7 +19,7 @@ use std::sync::Arc;
use async_compression::futures::bufread::ZstdDecoder;
use async_trait::async_trait;
use bytes::{BufMut, Bytes};
-use common_base::range_read::{AsyncReadAdapter, Metadata, RangeReader};
+use common_base::range_read::{AsyncReadAdapter, Metadata, RangeReader, SizeAwareRangeReader};
use futures::io::BufReader;
use futures::{AsyncRead, AsyncWrite};
use snafu::{ensure, OptionExt, ResultExt};
@@ -202,7 +202,7 @@ where
.find(|m| m.blob_type == key.as_str())
.context(BlobNotFoundSnafu { blob: key })?;
- let mut reader = file.blob_reader(blob_metadata)?;
+ let reader = file.blob_reader(blob_metadata)?;
let meta = reader.metadata().await.context(MetadataSnafu)?;
let buf = reader
.read(0..meta.content_length)
@@ -315,36 +315,25 @@ where
A: RangeReader,
B: RangeReader,
{
- fn with_file_size_hint(&mut self, file_size_hint: u64) {
- match self {
- Either::L(a) => a.with_file_size_hint(file_size_hint),
- Either::R(b) => b.with_file_size_hint(file_size_hint),
- }
- }
-
- async fn metadata(&mut self) -> io::Result<Metadata> {
+ async fn metadata(&self) -> io::Result<Metadata> {
match self {
Either::L(a) => a.metadata().await,
Either::R(b) => b.metadata().await,
}
}
- async fn read(&mut self, range: Range<u64>) -> io::Result<Bytes> {
+ async fn read(&self, range: Range<u64>) -> io::Result<Bytes> {
match self {
Either::L(a) => a.read(range).await,
Either::R(b) => b.read(range).await,
}
}
- async fn read_into(
- &mut self,
- range: Range<u64>,
- buf: &mut (impl BufMut + Send),
- ) -> io::Result<()> {
+ async fn read_into(&self, range: Range<u64>, buf: &mut (impl BufMut + Send)) -> io::Result<()> {
match self {
Either::L(a) => a.read_into(range, buf).await,
Either::R(b) => b.read_into(range, buf).await,
}
}
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
+ async fn read_vec(&self, ranges: &[Range<u64>]) -> io::Result<Vec<Bytes>> {
match self {
Either::L(a) => a.read_vec(ranges).await,
Either::R(b) => b.read_vec(ranges).await,
diff --git a/src/puffin/src/puffin_manager/stager/bounded_stager.rs b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
index 19fd891438b3..476eaf9bb62a 100644
--- a/src/puffin/src/puffin_manager/stager/bounded_stager.rs
+++ b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
@@ -548,7 +548,7 @@ mod tests {
let puffin_file_name = "test_get_blob";
let key = "key";
- let mut reader = stager
+ let reader = stager
.get_blob(
puffin_file_name,
key,
@@ -685,7 +685,7 @@ mod tests {
.await
.unwrap();
- let mut reader = stager
+ let reader = stager
.get_blob(
puffin_file_name,
blob_key,
@@ -732,7 +732,7 @@ mod tests {
let blob_key = "blob_key";
// First time to get the blob
- let mut reader = stager
+ let reader = stager
.get_blob(
puffin_file_name,
blob_key,
@@ -758,7 +758,7 @@ mod tests {
assert_eq!(&*buf, b"Hello world");
// Second time to get the blob, get from recycle bin
- let mut reader = stager
+ let reader = stager
.get_blob(
puffin_file_name,
blob_key,
diff --git a/src/puffin/src/puffin_manager/tests.rs b/src/puffin/src/puffin_manager/tests.rs
index 37b3ce7c65af..c4057a5f5bcb 100644
--- a/src/puffin/src/puffin_manager/tests.rs
+++ b/src/puffin/src/puffin_manager/tests.rs
@@ -297,7 +297,7 @@ async fn check_blob(
compressed: bool,
) {
let blob = puffin_reader.blob(key).await.unwrap();
- let mut reader = blob.reader().await.unwrap();
+ let reader = blob.reader().await.unwrap();
let meta = reader.metadata().await.unwrap();
let bs = reader.read(0..meta.content_length).await.unwrap();
assert_eq!(&*bs, raw_data);
diff --git a/src/puffin/src/tests.rs b/src/puffin/src/tests.rs
index a3bb48587924..5eb31511c899 100644
--- a/src/puffin/src/tests.rs
+++ b/src/puffin/src/tests.rs
@@ -57,7 +57,7 @@ async fn test_read_puffin_file_metadata_async() {
"src/tests/resources/sample-metric-data-uncompressed.puffin",
];
for path in paths {
- let mut reader = FileReader::new(path).await.unwrap();
+ let reader = FileReader::new(path).await.unwrap();
let file_size = reader.metadata().await.unwrap().content_length;
let mut reader = PuffinFileReader::new(reader);
let metadata = reader.metadata().await.unwrap();
|
feat
|
introduce `ParallelFstValuesMapper` (#5276)
|
acfa229641421abf2319aa951b1c3f9a412b6403
|
2023-06-29 08:15:05
|
Weny Xu
|
chore: bump orc-rust to 0319acd (#1847)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index afa26a81abe4..301d645320bd 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5994,8 +5994,7 @@ checksum = "978aa494585d3ca4ad74929863093e87cac9790d81fe7aba2b3dc2890643a0fc"
[[package]]
name = "orc-rust"
version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e15d3f67795da54d9526e46b7808181ce6236d518f56ca1ee556d3a3fdd77c66"
+source = "git+https://github.com/WenyXu/orc-rs.git?rev=0319acd32456e403c20f135cc012441a76852605#0319acd32456e403c20f135cc012441a76852605"
dependencies = [
"arrow",
"bytes",
@@ -6010,6 +6009,7 @@ dependencies = [
"snafu",
"tokio",
"zigzag",
+ "zstd 0.12.3+zstd.1.5.2",
]
[[package]]
diff --git a/src/common/datasource/Cargo.toml b/src/common/datasource/Cargo.toml
index 8c1d3c793b06..6106e0b6a161 100644
--- a/src/common/datasource/Cargo.toml
+++ b/src/common/datasource/Cargo.toml
@@ -24,7 +24,7 @@ datafusion.workspace = true
derive_builder = "0.12"
futures.workspace = true
object-store = { path = "../../object-store" }
-orc-rust = "0.2.3"
+orc-rust = { git = "https://github.com/WenyXu/orc-rs.git", rev = "0319acd32456e403c20f135cc012441a76852605" }
regex = "1.7"
snafu.workspace = true
tokio.workspace = true
|
chore
|
bump orc-rust to 0319acd (#1847)
|
3d9df822ad015111b187762ede730c2c94e452a4
|
2025-01-07 12:57:58
|
Weny Xu
|
refactor: refactor `PgStore` (#5309)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index c8f8c681b2de..f446ccfc6c0b 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -100,7 +100,7 @@ jobs:
- name: Build greptime binaries
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
- run: cargo gc -- --bin greptime --bin sqlness-runner
+ run: cargo gc -- --bin greptime --bin sqlness-runner --features pg_kvbackend
- name: Pack greptime binaries
shell: bash
run: |
@@ -261,7 +261,7 @@ jobs:
- name: Build greptime bianry
shell: bash
# `cargo gc` will invoke `cargo build` with specified args
- run: cargo gc --profile ci -- --bin greptime
+ run: cargo gc --profile ci -- --bin greptime --features pg_kvbackend
- name: Pack greptime binary
shell: bash
run: |
@@ -573,6 +573,9 @@ jobs:
- name: "Remote WAL"
opts: "-w kafka -k 127.0.0.1:9092"
kafka: true
+ - name: "Pg Kvbackend"
+ opts: "--setup-pg"
+ kafka: false
timeout-minutes: 60
steps:
- uses: actions/checkout@v4
diff --git a/Cargo.lock b/Cargo.lock
index 1c5b57834592..d0503c3b9f40 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2174,6 +2174,7 @@ dependencies = [
"async-recursion",
"async-stream",
"async-trait",
+ "backon",
"base64 0.21.7",
"bytes",
"chrono",
diff --git a/Cargo.toml b/Cargo.toml
index 73571315fe34..91b05578d101 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -99,6 +99,7 @@ arrow-schema = { version = "51.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
axum = { version = "0.6", features = ["headers"] }
+backon = "1"
base64 = "0.21"
bigdecimal = "0.4.2"
bitflags = "2.4.1"
diff --git a/src/cli/src/bench.rs b/src/cli/src/bench.rs
index c04512548033..c948859b527e 100644
--- a/src/cli/src/bench.rs
+++ b/src/cli/src/bench.rs
@@ -78,7 +78,9 @@ impl BenchTableMetadataCommand {
#[cfg(feature = "pg_kvbackend")]
let kv_backend = if let Some(postgres_addr) = &self.postgres_addr {
info!("Using postgres as kv backend");
- PgStore::with_url(postgres_addr, 128).await.unwrap()
+ PgStore::with_url(postgres_addr, "greptime_metakv", 128)
+ .await
+ .unwrap()
} else {
kv_backend
};
diff --git a/src/common/meta/Cargo.toml b/src/common/meta/Cargo.toml
index 00da3cacca94..231399a578dc 100644
--- a/src/common/meta/Cargo.toml
+++ b/src/common/meta/Cargo.toml
@@ -6,7 +6,7 @@ license.workspace = true
[features]
testing = []
-pg_kvbackend = ["dep:tokio-postgres"]
+pg_kvbackend = ["dep:tokio-postgres", "dep:backon"]
[lints]
workspace = true
@@ -17,6 +17,7 @@ api.workspace = true
async-recursion = "1.0"
async-stream = "0.3"
async-trait.workspace = true
+backon = { workspace = true, optional = true }
base64.workspace = true
bytes.workspace = true
chrono.workspace = true
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index 8c92146a4624..0fc483879d56 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -639,15 +639,6 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Failed to parse {} from str to utf8", name))]
- StrFromUtf8 {
- name: String,
- #[snafu(source)]
- error: std::str::Utf8Error,
- #[snafu(implicit)]
- location: Location,
- },
-
#[snafu(display("Value not exists"))]
ValueNotExist {
#[snafu(implicit)]
@@ -658,8 +649,9 @@ pub enum Error {
GetCache { source: Arc<Error> },
#[cfg(feature = "pg_kvbackend")]
- #[snafu(display("Failed to execute via Postgres"))]
+ #[snafu(display("Failed to execute via Postgres, sql: {}", sql))]
PostgresExecution {
+ sql: String,
#[snafu(source)]
error: tokio_postgres::Error,
#[snafu(implicit)]
@@ -693,6 +685,13 @@ pub enum Error {
operation: String,
},
+ #[cfg(feature = "pg_kvbackend")]
+ #[snafu(display("Postgres transaction retry failed"))]
+ PostgresTransactionRetryFailed {
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display(
"Datanode table info not found, table id: {}, datanode id: {}",
table_id,
@@ -756,8 +755,7 @@ impl ErrorExt for Error {
| UnexpectedLogicalRouteTable { .. }
| ProcedureOutput { .. }
| FromUtf8 { .. }
- | MetadataCorruption { .. }
- | StrFromUtf8 { .. } => StatusCode::Unexpected,
+ | MetadataCorruption { .. } => StatusCode::Unexpected,
SendMessage { .. } | GetKvCache { .. } | CacheNotGet { .. } => StatusCode::Internal,
@@ -807,7 +805,8 @@ impl ErrorExt for Error {
PostgresExecution { .. }
| CreatePostgresPool { .. }
| GetPostgresConnection { .. }
- | PostgresTransaction { .. } => StatusCode::Internal,
+ | PostgresTransaction { .. }
+ | PostgresTransactionRetryFailed { .. } => StatusCode::Internal,
Error::DatanodeTableInfoNotFound { .. } => StatusCode::Internal,
}
}
@@ -818,6 +817,20 @@ impl ErrorExt for Error {
}
impl Error {
+ #[cfg(feature = "pg_kvbackend")]
+ /// Check if the error is a serialization error.
+ pub fn is_serialization_error(&self) -> bool {
+ match self {
+ Error::PostgresTransaction { error, .. } => {
+ error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
+ }
+ Error::PostgresExecution { error, .. } => {
+ error.code() == Some(&tokio_postgres::error::SqlState::T_R_SERIALIZATION_FAILURE)
+ }
+ _ => false,
+ }
+ }
+
/// Creates a new [Error::RetryLater] error from source `err`.
pub fn retry_later<E: ErrorExt + Send + Sync + 'static>(err: E) -> Error {
Error::RetryLater {
diff --git a/src/common/meta/src/kv_backend/etcd.rs b/src/common/meta/src/kv_backend/etcd.rs
index 213489a583c7..17c4b7db7107 100644
--- a/src/common/meta/src/kv_backend/etcd.rs
+++ b/src/common/meta/src/kv_backend/etcd.rs
@@ -591,7 +591,7 @@ mod tests {
#[tokio::test]
async fn test_range_2() {
if let Some(kv_backend) = build_kv_backend().await {
- test_kv_range_2_with_prefix(kv_backend, b"range2/".to_vec()).await;
+ test_kv_range_2_with_prefix(&kv_backend, b"range2/".to_vec()).await;
}
}
@@ -618,7 +618,8 @@ mod tests {
if let Some(kv_backend) = build_kv_backend().await {
let prefix = b"deleteRange/";
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
- test_kv_delete_range_with_prefix(kv_backend, prefix.to_vec()).await;
+ test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
+ unprepare_kv(&kv_backend, prefix).await;
}
}
@@ -627,20 +628,20 @@ mod tests {
if let Some(kv_backend) = build_kv_backend().await {
let prefix = b"batchDelete/";
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
- test_kv_batch_delete_with_prefix(kv_backend, prefix.to_vec()).await;
+ test_kv_batch_delete_with_prefix(&kv_backend, prefix.to_vec()).await;
+ unprepare_kv(&kv_backend, prefix).await;
}
}
#[tokio::test]
async fn test_etcd_txn() {
if let Some(kv_backend) = build_kv_backend().await {
- let kv_backend_ref = Arc::new(kv_backend);
- test_txn_one_compare_op(kv_backend_ref.clone()).await;
- text_txn_multi_compare_op(kv_backend_ref.clone()).await;
- test_txn_compare_equal(kv_backend_ref.clone()).await;
- test_txn_compare_greater(kv_backend_ref.clone()).await;
- test_txn_compare_less(kv_backend_ref.clone()).await;
- test_txn_compare_not_equal(kv_backend_ref).await;
+ test_txn_one_compare_op(&kv_backend).await;
+ text_txn_multi_compare_op(&kv_backend).await;
+ test_txn_compare_equal(&kv_backend).await;
+ test_txn_compare_greater(&kv_backend).await;
+ test_txn_compare_less(&kv_backend).await;
+ test_txn_compare_not_equal(&kv_backend).await;
}
}
}
diff --git a/src/common/meta/src/kv_backend/memory.rs b/src/common/meta/src/kv_backend/memory.rs
index b236d7b57619..989a91ea168c 100644
--- a/src/common/meta/src/kv_backend/memory.rs
+++ b/src/common/meta/src/kv_backend/memory.rs
@@ -355,7 +355,7 @@ mod tests {
async fn test_range_2() {
let kv = MemoryKvBackend::<Error>::new();
- test_kv_range_2(kv).await;
+ test_kv_range_2(&kv).await;
}
#[tokio::test]
@@ -376,24 +376,24 @@ mod tests {
async fn test_delete_range() {
let kv_backend = mock_mem_store_with_data().await;
- test_kv_delete_range(kv_backend).await;
+ test_kv_delete_range(&kv_backend).await;
}
#[tokio::test]
async fn test_batch_delete() {
let kv_backend = mock_mem_store_with_data().await;
- test_kv_batch_delete(kv_backend).await;
+ test_kv_batch_delete(&kv_backend).await;
}
#[tokio::test]
async fn test_memory_txn() {
- let kv_backend = Arc::new(MemoryKvBackend::<Error>::new());
- test_txn_one_compare_op(kv_backend.clone()).await;
- text_txn_multi_compare_op(kv_backend.clone()).await;
- test_txn_compare_equal(kv_backend.clone()).await;
- test_txn_compare_greater(kv_backend.clone()).await;
- test_txn_compare_less(kv_backend.clone()).await;
- test_txn_compare_not_equal(kv_backend).await;
+ let kv_backend = MemoryKvBackend::<Error>::new();
+ test_txn_one_compare_op(&kv_backend).await;
+ text_txn_multi_compare_op(&kv_backend).await;
+ test_txn_compare_equal(&kv_backend).await;
+ test_txn_compare_greater(&kv_backend).await;
+ test_txn_compare_less(&kv_backend).await;
+ test_txn_compare_not_equal(&kv_backend).await;
}
}
diff --git a/src/common/meta/src/kv_backend/postgres.rs b/src/common/meta/src/kv_backend/postgres.rs
index b75f045314ec..7f3333575ae2 100644
--- a/src/common/meta/src/kv_backend/postgres.rs
+++ b/src/common/meta/src/kv_backend/postgres.rs
@@ -13,17 +13,21 @@
// limitations under the License.
use std::any::Any;
-use std::borrow::Cow;
+use std::collections::HashMap;
+use std::marker::PhantomData;
use std::sync::Arc;
+use std::time::Duration;
+use backon::{BackoffBuilder, ExponentialBuilder};
+use common_telemetry::debug;
use deadpool_postgres::{Config, Pool, Runtime};
use snafu::ResultExt;
use tokio_postgres::types::ToSql;
-use tokio_postgres::NoTls;
+use tokio_postgres::{IsolationLevel, NoTls, Row};
use crate::error::{
CreatePostgresPoolSnafu, Error, GetPostgresConnectionSnafu, PostgresExecutionSnafu,
- PostgresTransactionSnafu, Result, StrFromUtf8Snafu,
+ PostgresTransactionRetryFailedSnafu, PostgresTransactionSnafu, Result,
};
use crate::kv_backend::txn::{
Compare, Txn as KvTxn, TxnOp, TxnOpResponse, TxnResponse as KvTxnResponse,
@@ -32,8 +36,8 @@ use crate::kv_backend::{KvBackend, KvBackendRef, TxnService};
use crate::metrics::METRIC_META_TXN_REQUEST;
use crate::rpc::store::{
BatchDeleteRequest, BatchDeleteResponse, BatchGetRequest, BatchGetResponse, BatchPutRequest,
- BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, DeleteRangeRequest,
- DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
+ BatchPutResponse, DeleteRangeRequest, DeleteRangeResponse, PutRequest, PutResponse,
+ RangeRequest, RangeResponse,
};
use crate::rpc::KeyValue;
@@ -54,11 +58,11 @@ impl PgQueryExecutor<'_> {
PgQueryExecutor::Client(client) => client
.query(query, params)
.await
- .context(PostgresExecutionSnafu),
+ .context(PostgresExecutionSnafu { sql: query }),
PgQueryExecutor::Transaction(txn) => txn
.query(query, params)
.await
- .context(PostgresExecutionSnafu),
+ .context(PostgresExecutionSnafu { sql: query }),
}
}
@@ -74,78 +78,188 @@ impl PgQueryExecutor<'_> {
}
}
+const PG_STORE_TXN_RETRY_COUNT: usize = 3;
+
/// Posgres backend store for metasrv
-pub struct PgStore {
+pub struct PgStore<T> {
pool: Pool,
max_txn_ops: usize,
+ sql_template_set: SqlTemplateSet<T>,
+ txn_retry_count: usize,
}
const EMPTY: &[u8] = &[0];
-// TODO: allow users to configure metadata table name.
-const METADKV_CREATION: &str =
- "CREATE TABLE IF NOT EXISTS greptime_metakv(k varchar PRIMARY KEY, v varchar)";
+/// Factory for building sql templates.
+struct SqlTemplateFactory<'a, T> {
+ table_name: &'a str,
+ _phantom: PhantomData<T>,
+}
-const FULL_TABLE_SCAN: &str = "SELECT k, v FROM greptime_metakv $1 ORDER BY K";
+impl<'a, T> SqlTemplateFactory<'a, T> {
+ /// Creates a new [`SqlTemplateFactory`] with the given table name.
+ fn new(table_name: &'a str) -> Self {
+ Self {
+ table_name,
+ _phantom: PhantomData,
+ }
+ }
-const POINT_GET: &str = "SELECT k, v FROM greptime_metakv WHERE k = $1";
+ /// Builds the template set for the given table name.
+ fn build(
+ &self,
+ key_value_from_row: fn(Row) -> T,
+ key_value_from_row_key_only: fn(Row) -> T,
+ ) -> SqlTemplateSet<T> {
+ let table_name = self.table_name;
+ SqlTemplateSet {
+ table_name: table_name.to_string(),
+ create_table_statement: format!(
+ "CREATE TABLE IF NOT EXISTS {table_name}(k bytea PRIMARY KEY, v bytea)",
+ ),
+ range_template: RangeTemplate {
+ point: format!("SELECT k, v FROM {table_name} WHERE k = $1"),
+ range: format!("SELECT k, v FROM {table_name} WHERE k >= $1 AND k < $2 ORDER BY k"),
+ full: format!("SELECT k, v FROM {table_name} $1 ORDER BY k"),
+ left_bounded: format!("SELECT k, v FROM {table_name} WHERE k >= $1 ORDER BY k"),
+ prefix: format!("SELECT k, v FROM {table_name} WHERE k LIKE $1 ORDER BY k"),
+ },
+ delete_template: RangeTemplate {
+ point: format!("DELETE FROM {table_name} WHERE k = $1 RETURNING k,v;"),
+ range: format!("DELETE FROM {table_name} WHERE k >= $1 AND k < $2 RETURNING k,v;"),
+ full: format!("DELETE FROM {table_name} RETURNING k,v"),
+ left_bounded: format!("DELETE FROM {table_name} WHERE k >= $1 RETURNING k,v;"),
+ prefix: format!("DELETE FROM {table_name} WHERE k LIKE $1 RETURNING k,v;"),
+ },
+ key_value_from_row,
+ key_value_from_row_key_only,
+ }
+ }
+}
-const PREFIX_SCAN: &str = "SELECT k, v FROM greptime_metakv WHERE k LIKE $1 ORDER BY K";
+/// Templates for the given table name.
+#[derive(Debug, Clone)]
+pub struct SqlTemplateSet<T> {
+ table_name: String,
+ create_table_statement: String,
+ range_template: RangeTemplate,
+ delete_template: RangeTemplate,
+ key_value_from_row: fn(Row) -> T,
+ key_value_from_row_key_only: fn(Row) -> T,
+}
-const RANGE_SCAN_LEFT_BOUNDED: &str = "SELECT k, v FROM greptime_metakv WHERE k >= $1 ORDER BY K";
+impl<T: Sync + Send> SqlTemplateSet<T> {
+ /// Converts a row to a [`KeyValue`] with options.
+ fn key_value_from_row_with_opts(&self, keys_only: bool) -> impl Fn(Row) -> T {
+ if keys_only {
+ self.key_value_from_row_key_only
+ } else {
+ self.key_value_from_row
+ }
+ }
-const RANGE_SCAN_FULL_RANGE: &str =
- "SELECT k, v FROM greptime_metakv WHERE k >= $1 AND K < $2 ORDER BY K";
+ /// Generates the sql for batch get.
+ fn generate_batch_get_query(&self, key_len: usize) -> String {
+ let table_name = &self.table_name;
+ let in_clause = generate_in_placeholders(1, key_len).join(", ");
+ format!("SELECT k, v FROM {table_name} WHERE k in ({});", in_clause)
+ }
-const FULL_TABLE_DELETE: &str = "DELETE FROM greptime_metakv RETURNING k,v";
+ /// Generates the sql for batch delete.
+ fn generate_batch_delete_query(&self, key_len: usize) -> String {
+ let table_name = &self.table_name;
+ let in_clause = generate_in_placeholders(1, key_len).join(", ");
+ format!(
+ "DELETE FROM {table_name} WHERE k in ({}) RETURNING k,v;",
+ in_clause
+ )
+ }
-const POINT_DELETE: &str = "DELETE FROM greptime_metakv WHERE K = $1 RETURNING k,v;";
+ /// Generates the sql for batch upsert.
+ fn generate_batch_upsert_query(&self, kv_len: usize) -> String {
+ let table_name = &self.table_name;
+ let in_placeholders: Vec<String> = (1..=kv_len).map(|i| format!("${}", i)).collect();
+ let in_clause = in_placeholders.join(", ");
+ let mut param_index = kv_len + 1;
+ let mut values_placeholders = Vec::new();
+ for _ in 0..kv_len {
+ values_placeholders.push(format!("(${0}, ${1})", param_index, param_index + 1));
+ param_index += 2;
+ }
+ let values_clause = values_placeholders.join(", ");
-const PREFIX_DELETE: &str = "DELETE FROM greptime_metakv WHERE k LIKE $1 RETURNING k,v;";
+ format!(
+ r#"
+ WITH prev AS (
+ SELECT k,v FROM {table_name} WHERE k IN ({in_clause})
+ ), update AS (
+ INSERT INTO {table_name} (k, v) VALUES
+ {values_clause}
+ ON CONFLICT (
+ k
+ ) DO UPDATE SET
+ v = excluded.v
+ )
-const RANGE_DELETE_LEFT_BOUNDED: &str = "DELETE FROM greptime_metakv WHERE k >= $1 RETURNING k,v;";
+ SELECT k, v FROM prev;
+ "#
+ )
+ }
+}
-const RANGE_DELETE_FULL_RANGE: &str =
- "DELETE FROM greptime_metakv WHERE k >= $1 AND K < $2 RETURNING k,v;";
+/// Default sql template set for [`KeyValue`].
+pub type DefaultSqlTemplateSet = SqlTemplateSet<KeyValue>;
-const CAS: &str = r#"
-WITH prev AS (
- SELECT k,v FROM greptime_metakv WHERE k = $1 AND v = $2
-), update AS (
-UPDATE greptime_metakv
-SET k=$1,
-v=$2
-WHERE
- k=$1 AND v=$3
-)
+/// Default pg store for [`KeyValue`].
+pub type DefaultPgStore = PgStore<KeyValue>;
-SELECT k, v FROM prev;
-"#;
+impl<T> PgStore<T> {
+ async fn client(&self) -> Result<PgClient> {
+ match self.pool.get().await {
+ Ok(client) => Ok(client),
+ Err(e) => GetPostgresConnectionSnafu {
+ reason: e.to_string(),
+ }
+ .fail(),
+ }
+ }
-const PUT_IF_NOT_EXISTS: &str = r#"
-WITH prev AS (
- select k,v from greptime_metakv where k = $1
-), insert AS (
- INSERT INTO greptime_metakv
- VALUES ($1, $2)
- ON CONFLICT (k) DO NOTHING
-)
+ async fn client_executor(&self) -> Result<PgQueryExecutor<'_>> {
+ let client = self.client().await?;
+ Ok(PgQueryExecutor::Client(client))
+ }
-SELECT k, v FROM prev;"#;
+ async fn txn_executor<'a>(&self, client: &'a mut PgClient) -> Result<PgQueryExecutor<'a>> {
+ let txn = client
+ .build_transaction()
+ .isolation_level(IsolationLevel::Serializable)
+ .start()
+ .await
+ .context(PostgresTransactionSnafu {
+ operation: "start".to_string(),
+ })?;
+ Ok(PgQueryExecutor::Transaction(txn))
+ }
+}
-impl PgStore {
+impl DefaultPgStore {
/// Create pgstore impl of KvBackendRef from url.
- pub async fn with_url(url: &str, max_txn_ops: usize) -> Result<KvBackendRef> {
+ pub async fn with_url(url: &str, table_name: &str, max_txn_ops: usize) -> Result<KvBackendRef> {
let mut cfg = Config::new();
cfg.url = Some(url.to_string());
+ // TODO(weny, CookiePie): add tls support
let pool = cfg
.create_pool(Some(Runtime::Tokio1), NoTls)
.context(CreatePostgresPoolSnafu)?;
- Self::with_pg_pool(pool, max_txn_ops).await
+ Self::with_pg_pool(pool, table_name, max_txn_ops).await
}
/// Create pgstore impl of KvBackendRef from tokio-postgres client.
- pub async fn with_pg_pool(pool: Pool, max_txn_ops: usize) -> Result<KvBackendRef> {
+ pub async fn with_pg_pool(
+ pool: Pool,
+ table_name: &str,
+ max_txn_ops: usize,
+ ) -> Result<KvBackendRef> {
// This step ensures the postgres metadata backend is ready to use.
// We check if greptime_metakv table exists, and we will create a new table
// if it does not exist.
@@ -158,144 +272,104 @@ impl PgStore {
.fail();
}
};
+ let template_factory = SqlTemplateFactory::new(table_name);
+ let sql_template_set =
+ template_factory.build(key_value_from_row, key_value_from_row_key_only);
client
- .execute(METADKV_CREATION, &[])
- .await
- .context(PostgresExecutionSnafu)?;
- Ok(Arc::new(Self { pool, max_txn_ops }))
- }
-
- async fn get_client(&self) -> Result<PgClient> {
- match self.pool.get().await {
- Ok(client) => Ok(client),
- Err(e) => GetPostgresConnectionSnafu {
- reason: e.to_string(),
- }
- .fail(),
- }
- }
-
- async fn get_client_executor(&self) -> Result<PgQueryExecutor<'_>> {
- let client = self.get_client().await?;
- Ok(PgQueryExecutor::Client(client))
- }
-
- async fn get_txn_executor<'a>(&self, client: &'a mut PgClient) -> Result<PgQueryExecutor<'a>> {
- let txn = client
- .transaction()
+ .execute(&sql_template_set.create_table_statement, &[])
.await
- .context(PostgresTransactionSnafu {
- operation: "start".to_string(),
+ .with_context(|_| PostgresExecutionSnafu {
+ sql: sql_template_set.create_table_statement.to_string(),
})?;
- Ok(PgQueryExecutor::Transaction(txn))
- }
-
- async fn put_if_not_exists_with_query_executor(
- &self,
- query_executor: &PgQueryExecutor<'_>,
- key: &str,
- value: &str,
- ) -> Result<bool> {
- let res = query_executor
- .query(PUT_IF_NOT_EXISTS, &[&key, &value])
- .await?;
- Ok(res.is_empty())
+ Ok(Arc::new(Self {
+ pool,
+ max_txn_ops,
+ sql_template_set,
+ txn_retry_count: PG_STORE_TXN_RETRY_COUNT,
+ }))
}
}
-fn select_range_template(req: &RangeRequest) -> &str {
- if req.range_end.is_empty() {
- return POINT_GET;
- }
- if req.key == EMPTY && req.range_end == EMPTY {
- FULL_TABLE_SCAN
- } else if req.range_end == EMPTY {
- RANGE_SCAN_LEFT_BOUNDED
- } else if is_prefix_range(&req.key, &req.range_end) {
- PREFIX_SCAN
- } else {
- RANGE_SCAN_FULL_RANGE
- }
+/// Type of range template.
+#[derive(Debug, Clone, Copy)]
+enum RangeTemplateType {
+ Point,
+ Range,
+ Full,
+ LeftBounded,
+ Prefix,
}
-fn select_range_delete_template(req: &DeleteRangeRequest) -> &str {
- if req.range_end.is_empty() {
- return POINT_DELETE;
- }
- if req.key == EMPTY && req.range_end == EMPTY {
- FULL_TABLE_DELETE
- } else if req.range_end == EMPTY {
- RANGE_DELETE_LEFT_BOUNDED
- } else if is_prefix_range(&req.key, &req.range_end) {
- PREFIX_DELETE
- } else {
- RANGE_DELETE_FULL_RANGE
+/// Builds params for the given range template type.
+impl RangeTemplateType {
+ fn build_params(&self, mut key: Vec<u8>, range_end: Vec<u8>) -> Vec<Vec<u8>> {
+ match self {
+ RangeTemplateType::Point => vec![key],
+ RangeTemplateType::Range => vec![key, range_end],
+ RangeTemplateType::Full => vec![],
+ RangeTemplateType::LeftBounded => vec![key],
+ RangeTemplateType::Prefix => {
+ key.push(b'%');
+ vec![key]
+ }
+ }
}
}
-// Generate dynamic parameterized sql for batch get.
-fn generate_batch_get_query(key_len: usize) -> String {
- let in_placeholders: Vec<String> = (1..=key_len).map(|i| format!("${}", i)).collect();
- let in_clause = in_placeholders.join(", ");
- format!(
- "SELECT k, v FROM greptime_metakv WHERE k in ({});",
- in_clause
- )
+/// Templates for range request.
+#[derive(Debug, Clone)]
+struct RangeTemplate {
+ point: String,
+ range: String,
+ full: String,
+ left_bounded: String,
+ prefix: String,
}
-// Generate dynamic parameterized sql for batch delete.
-fn generate_batch_delete_query(key_len: usize) -> String {
- let in_placeholders: Vec<String> = (1..=key_len).map(|i| format!("${}", i)).collect();
- let in_clause = in_placeholders.join(", ");
- format!(
- "DELETE FROM greptime_metakv WHERE k in ({}) RETURNING k, v;",
- in_clause
- )
-}
-
-// Generate dynamic parameterized sql for batch upsert.
-fn generate_batch_upsert_query(kv_len: usize) -> String {
- let in_placeholders: Vec<String> = (1..=kv_len).map(|i| format!("${}", i)).collect();
- let in_clause = in_placeholders.join(", ");
- let mut param_index = kv_len + 1;
- let mut values_placeholders = Vec::new();
- for _ in 0..kv_len {
- values_placeholders.push(format!("(${0}, ${1})", param_index, param_index + 1));
- param_index += 2;
+impl RangeTemplate {
+ /// Gets the template for the given type.
+ fn get(&self, typ: RangeTemplateType) -> &str {
+ match typ {
+ RangeTemplateType::Point => &self.point,
+ RangeTemplateType::Range => &self.range,
+ RangeTemplateType::Full => &self.full,
+ RangeTemplateType::LeftBounded => &self.left_bounded,
+ RangeTemplateType::Prefix => &self.prefix,
+ }
}
- let values_clause = values_placeholders.join(", ");
- format!(
- r#"
- WITH prev AS (
- SELECT k,v FROM greptime_metakv WHERE k IN ({in_clause})
- ), update AS (
- INSERT INTO greptime_metakv (k, v) VALUES
- {values_clause}
- ON CONFLICT (
- k
- ) DO UPDATE SET
- v = excluded.v
- )
-
- SELECT k, v FROM prev;
- "#
- )
+ /// Adds limit to the template.
+ fn with_limit(template: &str, limit: i64) -> String {
+ if limit == 0 {
+ return format!("{};", template);
+ }
+ format!("{} LIMIT {};", template, limit)
+ }
}
-// Trim null byte at the end and convert bytes to string.
-fn process_bytes<'a>(data: &'a [u8], name: &str) -> Result<&'a str> {
- let mut len = data.len();
- // remove trailing null bytes to avoid error in postgres encoding.
- while len > 0 && data[len - 1] == 0 {
- len -= 1;
+/// Determine the template type for range request.
+fn range_template(key: &[u8], range_end: &[u8]) -> RangeTemplateType {
+ match (key, range_end) {
+ (_, &[]) => RangeTemplateType::Point,
+ (EMPTY, EMPTY) => RangeTemplateType::Full,
+ (_, EMPTY) => RangeTemplateType::LeftBounded,
+ (start, end) => {
+ if is_prefix_range(start, end) {
+ RangeTemplateType::Prefix
+ } else {
+ RangeTemplateType::Range
+ }
+ }
}
- let res = std::str::from_utf8(&data[0..len]).context(StrFromUtf8Snafu { name })?;
- Ok(res)
+}
+
+/// Generate in placeholders for sql.
+fn generate_in_placeholders(from: usize, to: usize) -> Vec<String> {
+ (from..=to).map(|i| format!("${}", i)).collect()
}
#[async_trait::async_trait]
-impl KvBackend for PgStore {
+impl KvBackend for DefaultPgStore {
fn name(&self) -> &str {
"Postgres"
}
@@ -305,101 +379,83 @@ impl KvBackend for PgStore {
}
async fn range(&self, req: RangeRequest) -> Result<RangeResponse> {
- let client = self.get_client_executor().await?;
+ let client = self.client_executor().await?;
self.range_with_query_executor(&client, req).await
}
async fn put(&self, req: PutRequest) -> Result<PutResponse> {
- let client = self.get_client_executor().await?;
+ let client = self.client_executor().await?;
self.put_with_query_executor(&client, req).await
}
async fn batch_put(&self, req: BatchPutRequest) -> Result<BatchPutResponse> {
- let client = self.get_client_executor().await?;
+ let client = self.client_executor().await?;
self.batch_put_with_query_executor(&client, req).await
}
async fn batch_get(&self, req: BatchGetRequest) -> Result<BatchGetResponse> {
- let client = self.get_client_executor().await?;
+ let client = self.client_executor().await?;
self.batch_get_with_query_executor(&client, req).await
}
async fn delete_range(&self, req: DeleteRangeRequest) -> Result<DeleteRangeResponse> {
- let client = self.get_client_executor().await?;
+ let client = self.client_executor().await?;
self.delete_range_with_query_executor(&client, req).await
}
async fn batch_delete(&self, req: BatchDeleteRequest) -> Result<BatchDeleteResponse> {
- let client = self.get_client_executor().await?;
+ let client = self.client_executor().await?;
self.batch_delete_with_query_executor(&client, req).await
}
+}
- async fn compare_and_put(&self, req: CompareAndPutRequest) -> Result<CompareAndPutResponse> {
- let client = self.get_client_executor().await?;
- self.compare_and_put_with_query_executor(&client, req).await
+/// Converts a row to a [`KeyValue`] with key only.
+fn key_value_from_row_key_only(r: Row) -> KeyValue {
+ KeyValue {
+ key: r.get(0),
+ value: vec![],
}
}
-impl PgStore {
+/// Converts a row to a [`KeyValue`].
+fn key_value_from_row(r: Row) -> KeyValue {
+ KeyValue {
+ key: r.get(0),
+ value: r.get(1),
+ }
+}
+
+impl DefaultPgStore {
async fn range_with_query_executor(
&self,
query_executor: &PgQueryExecutor<'_>,
req: RangeRequest,
) -> Result<RangeResponse> {
- let mut params = vec![];
- let template = select_range_template(&req);
- if req.key != EMPTY {
- let key = process_bytes(&req.key, "rangeKey")?;
- if template == PREFIX_SCAN {
- let prefix = format!("{key}%");
- params.push(Cow::Owned(prefix))
- } else {
- params.push(Cow::Borrowed(key))
- }
- }
- if template == RANGE_SCAN_FULL_RANGE && req.range_end != EMPTY {
- let range_end = process_bytes(&req.range_end, "rangeEnd")?;
- params.push(Cow::Borrowed(range_end));
- }
+ let template_type = range_template(&req.key, &req.range_end);
+ let template = self.sql_template_set.range_template.get(template_type);
+ let params = template_type.build_params(req.key, req.range_end);
+ let params_ref = params.iter().map(|x| x as _).collect::<Vec<_>>();
+ // Always add 1 to limit to check if there is more data
+ let query =
+ RangeTemplate::with_limit(template, if req.limit == 0 { 0 } else { req.limit + 1 });
let limit = req.limit as usize;
- let limit_cause = match limit > 0 {
- true => format!(" LIMIT {};", limit + 1),
- false => ";".to_string(),
- };
- let template = format!("{}{}", template, limit_cause);
- let params: Vec<&(dyn ToSql + Sync)> = params
- .iter()
- .map(|x| match x {
- Cow::Borrowed(borrowed) => borrowed as &(dyn ToSql + Sync),
- Cow::Owned(owned) => owned as &(dyn ToSql + Sync),
- })
- .collect();
- let res = query_executor.query(&template, ¶ms).await?;
- let kvs: Vec<KeyValue> = res
+ debug!("query: {:?}, params: {:?}", query, params);
+ let res = query_executor.query(&query, ¶ms_ref).await?;
+ let mut kvs: Vec<KeyValue> = res
.into_iter()
- .map(|r| {
- let key: String = r.get(0);
- if req.keys_only {
- return KeyValue {
- key: key.into_bytes(),
- value: vec![],
- };
- }
- let value: String = r.get(1);
- KeyValue {
- key: key.into_bytes(),
- value: value.into_bytes(),
- }
- })
+ .map(
+ self.sql_template_set
+ .key_value_from_row_with_opts(req.keys_only),
+ )
.collect();
- if limit == 0 || limit > kvs.len() {
+ // If limit is 0, we always return all data
+ if limit == 0 || kvs.len() <= limit {
return Ok(RangeResponse { kvs, more: false });
}
- let (filtered_kvs, _) = kvs.split_at(limit);
- Ok(RangeResponse {
- kvs: filtered_kvs.to_vec(),
- more: kvs.len() > limit,
- })
+ // If limit is greater than the number of rows, we remove the last row and set more to true
+ let removed = kvs.pop();
+ debug_assert!(removed.is_some());
+ Ok(RangeResponse { kvs, more: true })
}
async fn put_with_query_executor(
@@ -422,11 +478,12 @@ impl PgStore {
.await?;
if !res.prev_kvs.is_empty() {
+ debug_assert!(req.prev_kv);
return Ok(PutResponse {
prev_kv: Some(res.prev_kvs.remove(0)),
});
}
- Ok(PutResponse { prev_kv: None })
+ Ok(PutResponse::default())
}
async fn batch_put_with_query_executor(
@@ -434,41 +491,33 @@ impl PgStore {
query_executor: &PgQueryExecutor<'_>,
req: BatchPutRequest,
) -> Result<BatchPutResponse> {
- let mut in_params = Vec::with_capacity(req.kvs.len());
+ let mut in_params = Vec::with_capacity(req.kvs.len() * 3);
let mut values_params = Vec::with_capacity(req.kvs.len() * 2);
for kv in &req.kvs {
- let processed_key = process_bytes(&kv.key, "BatchPutRequestKey")?;
+ let processed_key = &kv.key;
in_params.push(processed_key);
- let processed_value = process_bytes(&kv.value, "BatchPutRequestValue")?;
+ let processed_value = &kv.value;
values_params.push(processed_key);
values_params.push(processed_value);
}
in_params.extend(values_params);
- let params: Vec<&(dyn ToSql + Sync)> =
- in_params.iter().map(|x| x as &(dyn ToSql + Sync)).collect();
-
- let query = generate_batch_upsert_query(req.kvs.len());
-
+ let params = in_params.iter().map(|x| x as _).collect::<Vec<_>>();
+ let query = self
+ .sql_template_set
+ .generate_batch_upsert_query(req.kvs.len());
let res = query_executor.query(&query, ¶ms).await?;
if req.prev_kv {
- let kvs: Vec<KeyValue> = res
- .into_iter()
- .map(|r| {
- let key: String = r.get(0);
- let value: String = r.get(1);
- KeyValue {
- key: key.into_bytes(),
- value: value.into_bytes(),
- }
- })
- .collect();
- if !kvs.is_empty() {
- return Ok(BatchPutResponse { prev_kvs: kvs });
- }
+ Ok(BatchPutResponse {
+ prev_kvs: res
+ .into_iter()
+ .map(self.sql_template_set.key_value_from_row)
+ .collect(),
+ })
+ } else {
+ Ok(BatchPutResponse::default())
}
- Ok(BatchPutResponse { prev_kvs: vec![] })
}
/// Batch get with certain client. It's needed for a client with transaction.
@@ -480,30 +529,17 @@ impl PgStore {
if req.keys.is_empty() {
return Ok(BatchGetResponse { kvs: vec![] });
}
- let query = generate_batch_get_query(req.keys.len());
- let value_params = req
- .keys
- .iter()
- .map(|k| process_bytes(k, "BatchGetRequestKey"))
- .collect::<Result<Vec<&str>>>()?;
- let params: Vec<&(dyn ToSql + Sync)> = value_params
- .iter()
- .map(|x| x as &(dyn ToSql + Sync))
- .collect();
-
+ let query = self
+ .sql_template_set
+ .generate_batch_get_query(req.keys.len());
+ let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
let res = query_executor.query(&query, ¶ms).await?;
- let kvs: Vec<KeyValue> = res
- .into_iter()
- .map(|r| {
- let key: String = r.get(0);
- let value: String = r.get(1);
- KeyValue {
- key: key.into_bytes(),
- value: value.into_bytes(),
- }
- })
- .collect();
- Ok(BatchGetResponse { kvs })
+ Ok(BatchGetResponse {
+ kvs: res
+ .into_iter()
+ .map(self.sql_template_set.key_value_from_row)
+ .collect(),
+ })
}
async fn delete_range_with_query_executor(
@@ -511,54 +547,20 @@ impl PgStore {
query_executor: &PgQueryExecutor<'_>,
req: DeleteRangeRequest,
) -> Result<DeleteRangeResponse> {
- let mut params = vec![];
- let template = select_range_delete_template(&req);
- if req.key != EMPTY {
- let key = process_bytes(&req.key, "deleteRangeKey")?;
- if template == PREFIX_DELETE {
- let prefix = format!("{key}%");
- params.push(Cow::Owned(prefix));
- } else {
- params.push(Cow::Borrowed(key));
- }
- }
- if template == RANGE_DELETE_FULL_RANGE && req.range_end != EMPTY {
- let range_end = process_bytes(&req.range_end, "deleteRangeEnd")?;
- params.push(Cow::Borrowed(range_end));
- }
- let params: Vec<&(dyn ToSql + Sync)> = params
- .iter()
- .map(|x| match x {
- Cow::Borrowed(borrowed) => borrowed as &(dyn ToSql + Sync),
- Cow::Owned(owned) => owned as &(dyn ToSql + Sync),
- })
- .collect();
-
- let res = query_executor.query(template, ¶ms).await?;
- let deleted = res.len() as i64;
- if !req.prev_kv {
- return Ok({
- DeleteRangeResponse {
- deleted,
- prev_kvs: vec![],
- }
- });
+ let template_type = range_template(&req.key, &req.range_end);
+ let template = self.sql_template_set.delete_template.get(template_type);
+ let params = template_type.build_params(req.key, req.range_end);
+ let params_ref = params.iter().map(|x| x as _).collect::<Vec<_>>();
+ let res = query_executor.query(template, ¶ms_ref).await?;
+ let mut resp = DeleteRangeResponse::new(res.len() as i64);
+ if req.prev_kv {
+ resp.with_prev_kvs(
+ res.into_iter()
+ .map(self.sql_template_set.key_value_from_row)
+ .collect(),
+ );
}
- let kvs: Vec<KeyValue> = res
- .into_iter()
- .map(|r| {
- let key: String = r.get(0);
- let value: String = r.get(1);
- KeyValue {
- key: key.into_bytes(),
- value: value.into_bytes(),
- }
- })
- .collect();
- Ok(DeleteRangeResponse {
- deleted,
- prev_kvs: kvs,
- })
+ Ok(resp)
}
async fn batch_delete_with_query_executor(
@@ -567,78 +569,22 @@ impl PgStore {
req: BatchDeleteRequest,
) -> Result<BatchDeleteResponse> {
if req.keys.is_empty() {
- return Ok(BatchDeleteResponse { prev_kvs: vec![] });
- }
- let query = generate_batch_delete_query(req.keys.len());
- let value_params = req
- .keys
- .iter()
- .map(|k| process_bytes(k, "BatchDeleteRequestKey"))
- .collect::<Result<Vec<&str>>>()?;
- let params: Vec<&(dyn ToSql + Sync)> = value_params
- .iter()
- .map(|x| x as &(dyn ToSql + Sync))
- .collect();
-
- let res = query_executor.query(&query, ¶ms).await?;
- if !req.prev_kv {
- return Ok(BatchDeleteResponse { prev_kvs: vec![] });
+ return Ok(BatchDeleteResponse::default());
}
- let kvs: Vec<KeyValue> = res
- .into_iter()
- .map(|r| {
- let key: String = r.get(0);
- let value: String = r.get(1);
- KeyValue {
- key: key.into_bytes(),
- value: value.into_bytes(),
- }
- })
- .collect();
- Ok(BatchDeleteResponse { prev_kvs: kvs })
- }
-
- async fn compare_and_put_with_query_executor(
- &self,
- query_executor: &PgQueryExecutor<'_>,
- req: CompareAndPutRequest,
- ) -> Result<CompareAndPutResponse> {
- let key = process_bytes(&req.key, "CASKey")?;
- let value = process_bytes(&req.value, "CASValue")?;
- if req.expect.is_empty() {
- let put_res = self
- .put_if_not_exists_with_query_executor(query_executor, key, value)
- .await?;
- return Ok(CompareAndPutResponse {
- success: put_res,
- prev_kv: None,
- });
- }
- let expect = process_bytes(&req.expect, "CASExpect")?;
-
- let res = query_executor.query(CAS, &[&key, &value, &expect]).await?;
- match res.is_empty() {
- true => Ok(CompareAndPutResponse {
- success: false,
- prev_kv: None,
- }),
- false => {
- let mut kvs: Vec<KeyValue> = res
+ let query = self
+ .sql_template_set
+ .generate_batch_delete_query(req.keys.len());
+ let params = req.keys.iter().map(|x| x as _).collect::<Vec<_>>();
+ let res = query_executor.query(&query, ¶ms).await?;
+ if req.prev_kv {
+ Ok(BatchDeleteResponse {
+ prev_kvs: res
.into_iter()
- .map(|r| {
- let key: String = r.get(0);
- let value: String = r.get(1);
- KeyValue {
- key: key.into_bytes(),
- value: value.into_bytes(),
- }
- })
- .collect();
- Ok(CompareAndPutResponse {
- success: true,
- prev_kv: Some(kvs.remove(0)),
- })
- }
+ .map(self.sql_template_set.key_value_from_row)
+ .collect(),
+ })
+ } else {
+ Ok(BatchDeleteResponse::default())
}
}
@@ -653,11 +599,12 @@ impl PgStore {
let res = self
.batch_get_with_query_executor(query_executor, batch_get_req)
.await?;
+ debug!("batch get res: {:?}", res);
let res_map = res
.kvs
.into_iter()
.map(|kv| (kv.key, kv.value))
- .collect::<std::collections::HashMap<Vec<u8>, Vec<u8>>>();
+ .collect::<HashMap<Vec<u8>, Vec<u8>>>();
for c in cmp {
let value = res_map.get(&c.key);
if !c.compare_value(value) {
@@ -676,130 +623,121 @@ impl PgStore {
if !check_txn_ops(txn_ops)? {
return Ok(None);
}
- match txn_ops.first() {
- Some(TxnOp::Delete(_)) => {
- let mut batch_del_req = BatchDeleteRequest {
- keys: vec![],
- prev_kv: false,
- };
- for op in txn_ops {
- if let TxnOp::Delete(key) = op {
- batch_del_req.keys.push(key.clone());
- }
- }
- let res = self
- .batch_delete_with_query_executor(query_executor, batch_del_req)
- .await?;
- let res_map = res
- .prev_kvs
- .into_iter()
- .map(|kv| (kv.key, kv.value))
- .collect::<std::collections::HashMap<Vec<u8>, Vec<u8>>>();
- let mut resps = Vec::with_capacity(txn_ops.len());
- for op in txn_ops {
- if let TxnOp::Delete(key) = op {
- let value = res_map.get(key);
- resps.push(TxnOpResponse::ResponseDelete(DeleteRangeResponse {
- deleted: if value.is_some() { 1 } else { 0 },
- prev_kvs: value
- .map(|v| {
- vec![KeyValue {
- key: key.clone(),
- value: v.clone(),
- }]
- })
- .unwrap_or_default(),
- }));
- }
- }
- Ok(Some(resps))
+ // Safety: txn_ops is not empty
+ match txn_ops.first().unwrap() {
+ TxnOp::Delete(_) => self.handle_batch_delete(query_executor, txn_ops).await,
+ TxnOp::Put(_, _) => self.handle_batch_put(query_executor, txn_ops).await,
+ TxnOp::Get(_) => self.handle_batch_get(query_executor, txn_ops).await,
+ }
+ }
+
+ async fn handle_batch_delete(
+ &self,
+ query_executor: &PgQueryExecutor<'_>,
+ txn_ops: &[TxnOp],
+ ) -> Result<Option<Vec<TxnOpResponse>>> {
+ let mut batch_del_req = BatchDeleteRequest {
+ keys: vec![],
+ prev_kv: true,
+ };
+ for op in txn_ops {
+ if let TxnOp::Delete(key) = op {
+ batch_del_req.keys.push(key.clone());
}
- Some(TxnOp::Put(_, _)) => {
- let mut batch_put_req = BatchPutRequest {
- kvs: vec![],
- prev_kv: false,
- };
- for op in txn_ops {
- if let TxnOp::Put(key, value) = op {
- batch_put_req.kvs.push(KeyValue {
- key: key.clone(),
- value: value.clone(),
- });
- }
- }
- let res = self
- .batch_put_with_query_executor(query_executor, batch_put_req)
- .await?;
- let res_map = res
- .prev_kvs
- .into_iter()
- .map(|kv| (kv.key, kv.value))
- .collect::<std::collections::HashMap<Vec<u8>, Vec<u8>>>();
- let mut resps = Vec::with_capacity(txn_ops.len());
- for op in txn_ops {
- if let TxnOp::Put(key, _) = op {
- let prev_kv = res_map.get(key);
- match prev_kv {
- Some(v) => {
- resps.push(TxnOpResponse::ResponsePut(PutResponse {
- prev_kv: Some(KeyValue {
- key: key.clone(),
- value: v.clone(),
- }),
- }));
- }
- None => {
- resps.push(TxnOpResponse::ResponsePut(PutResponse {
- prev_kv: None,
- }));
- }
- }
- }
- }
- Ok(Some(resps))
+ }
+ let res = self
+ .batch_delete_with_query_executor(query_executor, batch_del_req)
+ .await?;
+ let res_map = res
+ .prev_kvs
+ .into_iter()
+ .map(|kv| (kv.key, kv.value))
+ .collect::<HashMap<Vec<u8>, Vec<u8>>>();
+ let mut resps = Vec::with_capacity(txn_ops.len());
+ for op in txn_ops {
+ if let TxnOp::Delete(key) = op {
+ let value = res_map.get(key);
+ resps.push(TxnOpResponse::ResponseDelete(DeleteRangeResponse {
+ deleted: if value.is_some() { 1 } else { 0 },
+ prev_kvs: vec![],
+ }));
}
- Some(TxnOp::Get(_)) => {
- let mut batch_get_req = BatchGetRequest { keys: vec![] };
- for op in txn_ops {
- if let TxnOp::Get(key) = op {
- batch_get_req.keys.push(key.clone());
- }
- }
- let res = self
- .batch_get_with_query_executor(query_executor, batch_get_req)
- .await?;
- let res_map = res
- .kvs
- .into_iter()
- .map(|kv| (kv.key, kv.value))
- .collect::<std::collections::HashMap<Vec<u8>, Vec<u8>>>();
- let mut resps = Vec::with_capacity(txn_ops.len());
- for op in txn_ops {
- if let TxnOp::Get(key) = op {
- let value = res_map.get(key);
- resps.push(TxnOpResponse::ResponseGet(RangeResponse {
- kvs: value
- .map(|v| {
- vec![KeyValue {
- key: key.clone(),
- value: v.clone(),
- }]
- })
- .unwrap_or_default(),
- more: false,
- }));
- }
- }
- Ok(Some(resps))
+ }
+ Ok(Some(resps))
+ }
+
+ async fn handle_batch_put(
+ &self,
+ query_executor: &PgQueryExecutor<'_>,
+ txn_ops: &[TxnOp],
+ ) -> Result<Option<Vec<TxnOpResponse>>> {
+ let mut batch_put_req = BatchPutRequest {
+ kvs: vec![],
+ prev_kv: false,
+ };
+ for op in txn_ops {
+ if let TxnOp::Put(key, value) = op {
+ batch_put_req.kvs.push(KeyValue {
+ key: key.clone(),
+ value: value.clone(),
+ });
+ }
+ }
+ let _ = self
+ .batch_put_with_query_executor(query_executor, batch_put_req)
+ .await?;
+ let mut resps = Vec::with_capacity(txn_ops.len());
+ for op in txn_ops {
+ if let TxnOp::Put(_, _) = op {
+ resps.push(TxnOpResponse::ResponsePut(PutResponse { prev_kv: None }));
}
- None => Ok(Some(vec![])),
}
+ Ok(Some(resps))
+ }
+
+ async fn handle_batch_get(
+ &self,
+ query_executor: &PgQueryExecutor<'_>,
+ txn_ops: &[TxnOp],
+ ) -> Result<Option<Vec<TxnOpResponse>>> {
+ let mut batch_get_req = BatchGetRequest { keys: vec![] };
+ for op in txn_ops {
+ if let TxnOp::Get(key) = op {
+ batch_get_req.keys.push(key.clone());
+ }
+ }
+ let res = self
+ .batch_get_with_query_executor(query_executor, batch_get_req)
+ .await?;
+ let res_map = res
+ .kvs
+ .into_iter()
+ .map(|kv| (kv.key, kv.value))
+ .collect::<HashMap<Vec<u8>, Vec<u8>>>();
+ let mut resps = Vec::with_capacity(txn_ops.len());
+ for op in txn_ops {
+ if let TxnOp::Get(key) = op {
+ let value = res_map.get(key);
+ resps.push(TxnOpResponse::ResponseGet(RangeResponse {
+ kvs: value
+ .map(|v| {
+ vec![KeyValue {
+ key: key.clone(),
+ value: v.clone(),
+ }]
+ })
+ .unwrap_or_default(),
+ more: false,
+ }));
+ }
+ }
+ Ok(Some(resps))
}
async fn execute_txn_op(
&self,
query_executor: &PgQueryExecutor<'_>,
- op: TxnOp,
+ op: &TxnOp,
) -> Result<TxnOpResponse> {
match op {
TxnOp::Put(key, value) => {
@@ -807,8 +745,8 @@ impl PgStore {
.put_with_query_executor(
query_executor,
PutRequest {
- key,
- value,
+ key: key.clone(),
+ value: value.clone(),
prev_kv: false,
},
)
@@ -820,7 +758,7 @@ impl PgStore {
.range_with_query_executor(
query_executor,
RangeRequest {
- key,
+ key: key.clone(),
range_end: vec![],
limit: 1,
keys_only: false,
@@ -834,7 +772,7 @@ impl PgStore {
.delete_range_with_query_executor(
query_executor,
DeleteRangeRequest {
- key,
+ key: key.clone(),
range_end: vec![],
prev_kv: false,
},
@@ -844,19 +782,10 @@ impl PgStore {
}
}
}
-}
-
-#[async_trait::async_trait]
-impl TxnService for PgStore {
- type Error = Error;
-
- async fn txn(&self, txn: KvTxn) -> Result<KvTxnResponse> {
- let _timer = METRIC_META_TXN_REQUEST
- .with_label_values(&["postgres", "txn"])
- .start_timer();
- let mut client = self.get_client().await?;
- let pg_txn = self.get_txn_executor(&mut client).await?;
+ async fn txn_inner(&self, txn: &KvTxn) -> Result<KvTxnResponse> {
+ let mut client = self.client().await?;
+ let pg_txn = self.txn_executor(&mut client).await?;
let mut success = true;
if txn.c_when {
success = self.execute_txn_cmp(&pg_txn, &txn.req.compare).await?;
@@ -866,7 +795,7 @@ impl TxnService for PgStore {
match self.try_batch_txn(&pg_txn, &txn.req.success).await? {
Some(res) => responses.extend(res),
None => {
- for txnop in txn.req.success {
+ for txnop in &txn.req.success {
let res = self.execute_txn_op(&pg_txn, txnop).await?;
responses.push(res);
}
@@ -876,7 +805,7 @@ impl TxnService for PgStore {
match self.try_batch_txn(&pg_txn, &txn.req.failure).await? {
Some(res) => responses.extend(res),
None => {
- for txnop in txn.req.failure {
+ for txnop in &txn.req.failure {
let res = self.execute_txn_op(&pg_txn, txnop).await?;
responses.push(res);
}
@@ -890,6 +819,43 @@ impl TxnService for PgStore {
succeeded: success,
})
}
+}
+
+#[async_trait::async_trait]
+impl TxnService for DefaultPgStore {
+ type Error = Error;
+
+ async fn txn(&self, txn: KvTxn) -> Result<KvTxnResponse> {
+ let _timer = METRIC_META_TXN_REQUEST
+ .with_label_values(&["postgres", "txn"])
+ .start_timer();
+
+ let mut backoff = ExponentialBuilder::default()
+ .with_min_delay(Duration::from_millis(10))
+ .with_max_delay(Duration::from_millis(200))
+ .with_max_times(self.txn_retry_count)
+ .build();
+
+ loop {
+ match self.txn_inner(&txn).await {
+ Ok(res) => return Ok(res),
+ Err(e) => {
+ if e.is_serialization_error() {
+ let d = backoff.next();
+ if let Some(d) = d {
+ tokio::time::sleep(d).await;
+ continue;
+ }
+ break;
+ } else {
+ return Err(e);
+ }
+ }
+ }
+ }
+
+ PostgresTransactionRetryFailedSnafu {}.fail()
+ }
fn max_txn_ops(&self) -> usize {
self.max_txn_ops
@@ -908,23 +874,20 @@ fn is_prefix_range(start: &[u8], end: &[u8]) -> bool {
false
}
-/// Check if the transaction operations are the same type.
+/// Checks if the transaction operations are the same type.
fn check_txn_ops(txn_ops: &[TxnOp]) -> Result<bool> {
if txn_ops.is_empty() {
return Ok(false);
}
- let first_op = &txn_ops[0];
- for op in txn_ops {
- match (op, first_op) {
- (TxnOp::Put(_, _), TxnOp::Put(_, _)) => {}
- (TxnOp::Get(_), TxnOp::Get(_)) => {}
- (TxnOp::Delete(_), TxnOp::Delete(_)) => {}
- _ => {
- return Ok(false);
- }
- }
- }
- Ok(true)
+ let same = txn_ops.windows(2).all(|a| {
+ matches!(
+ (&a[0], &a[1]),
+ (TxnOp::Put(_, _), TxnOp::Put(_, _))
+ | (TxnOp::Get(_), TxnOp::Get(_))
+ | (TxnOp::Delete(_), TxnOp::Delete(_))
+ )
+ });
+ Ok(same)
}
#[cfg(test)]
@@ -939,7 +902,7 @@ mod tests {
unprepare_kv,
};
- async fn build_pg_kv_backend() -> Option<PgStore> {
+ async fn build_pg_kv_backend(table_name: &str) -> Option<DefaultPgStore> {
let endpoints = std::env::var("GT_POSTGRES_ENDPOINTS").unwrap_or_default();
if endpoints.is_empty() {
return None;
@@ -952,71 +915,110 @@ mod tests {
.context(CreatePostgresPoolSnafu)
.unwrap();
let client = pool.get().await.unwrap();
+ let template_factory = SqlTemplateFactory::new(table_name);
+ let sql_templates = template_factory.build(key_value_from_row, key_value_from_row_key_only);
client
- .execute(METADKV_CREATION, &[])
+ .execute(&sql_templates.create_table_statement, &[])
.await
- .context(PostgresExecutionSnafu)
+ .context(PostgresExecutionSnafu {
+ sql: sql_templates.create_table_statement.to_string(),
+ })
.unwrap();
Some(PgStore {
pool,
max_txn_ops: 128,
+ sql_template_set: sql_templates,
+ txn_retry_count: PG_STORE_TXN_RETRY_COUNT,
})
}
#[tokio::test]
- async fn test_pg_crud() {
- if let Some(kv_backend) = build_pg_kv_backend().await {
+ async fn test_pg_put() {
+ if let Some(kv_backend) = build_pg_kv_backend("put_test").await {
let prefix = b"put/";
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
test_kv_put_with_prefix(&kv_backend, prefix.to_vec()).await;
unprepare_kv(&kv_backend, prefix).await;
+ }
+ }
+ #[tokio::test]
+ async fn test_pg_range() {
+ if let Some(kv_backend) = build_pg_kv_backend("range_test").await {
let prefix = b"range/";
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
test_kv_range_with_prefix(&kv_backend, prefix.to_vec()).await;
unprepare_kv(&kv_backend, prefix).await;
+ }
+ }
- let prefix = b"batchGet/";
+ #[tokio::test]
+ async fn test_pg_range_2() {
+ if let Some(kv_backend) = build_pg_kv_backend("range2_test").await {
+ let prefix = b"range2/";
+ test_kv_range_2_with_prefix(&kv_backend, prefix.to_vec()).await;
+ unprepare_kv(&kv_backend, prefix).await;
+ }
+ }
+
+ #[tokio::test]
+ async fn test_pg_batch_get() {
+ if let Some(kv_backend) = build_pg_kv_backend("batch_get_test").await {
+ let prefix = b"batch_get/";
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
test_kv_batch_get_with_prefix(&kv_backend, prefix.to_vec()).await;
unprepare_kv(&kv_backend, prefix).await;
+ }
+ }
- let prefix = b"deleteRange/";
+ #[tokio::test]
+ async fn test_pg_batch_delete() {
+ if let Some(kv_backend) = build_pg_kv_backend("batch_delete_test").await {
+ let prefix = b"batch_delete/";
prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
- test_kv_delete_range_with_prefix(kv_backend, prefix.to_vec()).await;
+ test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
+ unprepare_kv(&kv_backend, prefix).await;
}
+ }
- if let Some(kv_backend) = build_pg_kv_backend().await {
- test_kv_range_2_with_prefix(kv_backend, b"range2/".to_vec()).await;
+ #[tokio::test]
+ async fn test_pg_batch_delete_with_prefix() {
+ if let Some(kv_backend) = build_pg_kv_backend("batch_delete_prefix_test").await {
+ let prefix = b"batch_delete/";
+ prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
+ test_kv_batch_delete_with_prefix(&kv_backend, prefix.to_vec()).await;
+ unprepare_kv(&kv_backend, prefix).await;
+ }
+ }
+
+ #[tokio::test]
+ async fn test_pg_delete_range() {
+ if let Some(kv_backend) = build_pg_kv_backend("delete_range_test").await {
+ let prefix = b"delete_range/";
+ prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
+ test_kv_delete_range_with_prefix(&kv_backend, prefix.to_vec()).await;
+ unprepare_kv(&kv_backend, prefix).await;
}
+ }
- if let Some(kv_backend) = build_pg_kv_backend().await {
+ #[tokio::test]
+ async fn test_pg_compare_and_put() {
+ if let Some(kv_backend) = build_pg_kv_backend("compare_and_put_test").await {
+ let prefix = b"compare_and_put/";
let kv_backend = Arc::new(kv_backend);
- test_kv_compare_and_put_with_prefix(kv_backend, b"compareAndPut/".to_vec()).await;
+ test_kv_compare_and_put_with_prefix(kv_backend.clone(), prefix.to_vec()).await;
}
+ }
- if let Some(kv_backend) = build_pg_kv_backend().await {
- let prefix = b"batchDelete/";
- prepare_kv_with_prefix(&kv_backend, prefix.to_vec()).await;
- test_kv_batch_delete_with_prefix(kv_backend, prefix.to_vec()).await;
- }
-
- if let Some(kv_backend) = build_pg_kv_backend().await {
- let kv_backend_ref = Arc::new(kv_backend);
- test_txn_one_compare_op(kv_backend_ref.clone()).await;
- text_txn_multi_compare_op(kv_backend_ref.clone()).await;
- test_txn_compare_equal(kv_backend_ref.clone()).await;
- test_txn_compare_greater(kv_backend_ref.clone()).await;
- test_txn_compare_less(kv_backend_ref.clone()).await;
- test_txn_compare_not_equal(kv_backend_ref.clone()).await;
- // Clean up
- kv_backend_ref
- .get_client()
- .await
- .unwrap()
- .execute("DELETE FROM greptime_metakv", &[])
- .await
- .unwrap();
+ #[tokio::test]
+ async fn test_pg_txn() {
+ if let Some(kv_backend) = build_pg_kv_backend("txn_test").await {
+ test_txn_one_compare_op(&kv_backend).await;
+ text_txn_multi_compare_op(&kv_backend).await;
+ test_txn_compare_equal(&kv_backend).await;
+ test_txn_compare_greater(&kv_backend).await;
+ test_txn_compare_less(&kv_backend).await;
+ test_txn_compare_not_equal(&kv_backend).await;
}
}
}
diff --git a/src/common/meta/src/kv_backend/test.rs b/src/common/meta/src/kv_backend/test.rs
index d428b6ed224e..bb38c5a20542 100644
--- a/src/common/meta/src/kv_backend/test.rs
+++ b/src/common/meta/src/kv_backend/test.rs
@@ -61,14 +61,18 @@ pub async fn prepare_kv_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>
pub async fn unprepare_kv(kv_backend: &impl KvBackend, prefix: &[u8]) {
let range_end = util::get_prefix_end_key(prefix);
- assert!(kv_backend
- .delete_range(DeleteRangeRequest {
- key: prefix.to_vec(),
- range_end,
- ..Default::default()
- })
- .await
- .is_ok());
+ assert!(
+ kv_backend
+ .delete_range(DeleteRangeRequest {
+ key: prefix.to_vec(),
+ range_end,
+ ..Default::default()
+ })
+ .await
+ .is_ok(),
+ "prefix: {:?}",
+ std::str::from_utf8(prefix).unwrap()
+ );
}
pub async fn test_kv_put(kv_backend: &impl KvBackend) {
@@ -170,11 +174,11 @@ pub async fn test_kv_range_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<
assert_eq!(b"val1", resp.kvs[0].value());
}
-pub async fn test_kv_range_2(kv_backend: impl KvBackend) {
+pub async fn test_kv_range_2(kv_backend: &impl KvBackend) {
test_kv_range_2_with_prefix(kv_backend, vec![]).await;
}
-pub async fn test_kv_range_2_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
+pub async fn test_kv_range_2_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
let atest = [prefix.clone(), b"atest".to_vec()].concat();
let test = [prefix.clone(), b"test".to_vec()].concat();
@@ -348,11 +352,11 @@ pub async fn test_kv_compare_and_put_with_prefix(
assert!(resp.is_none());
}
-pub async fn test_kv_delete_range(kv_backend: impl KvBackend) {
+pub async fn test_kv_delete_range(kv_backend: &impl KvBackend) {
test_kv_delete_range_with_prefix(kv_backend, vec![]).await;
}
-pub async fn test_kv_delete_range_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
+pub async fn test_kv_delete_range_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
let key3 = [prefix.clone(), b"key3".to_vec()].concat();
let req = DeleteRangeRequest {
key: key3.clone(),
@@ -403,11 +407,11 @@ pub async fn test_kv_delete_range_with_prefix(kv_backend: impl KvBackend, prefix
assert!(resp.kvs.is_empty());
}
-pub async fn test_kv_batch_delete(kv_backend: impl KvBackend) {
+pub async fn test_kv_batch_delete(kv_backend: &impl KvBackend) {
test_kv_batch_delete_with_prefix(kv_backend, vec![]).await;
}
-pub async fn test_kv_batch_delete_with_prefix(kv_backend: impl KvBackend, prefix: Vec<u8>) {
+pub async fn test_kv_batch_delete_with_prefix(kv_backend: &impl KvBackend, prefix: Vec<u8>) {
let key1 = [prefix.clone(), b"key1".to_vec()].concat();
let key100 = [prefix.clone(), b"key100".to_vec()].concat();
assert!(kv_backend.get(&key1).await.unwrap().is_some());
@@ -447,7 +451,7 @@ pub async fn test_kv_batch_delete_with_prefix(kv_backend: impl KvBackend, prefix
assert!(kv_backend.get(&key11).await.unwrap().is_none());
}
-pub async fn test_txn_one_compare_op(kv_backend: KvBackendRef) {
+pub async fn test_txn_one_compare_op(kv_backend: &impl KvBackend) {
let _ = kv_backend
.put(PutRequest {
key: vec![11],
@@ -472,7 +476,7 @@ pub async fn test_txn_one_compare_op(kv_backend: KvBackendRef) {
assert_eq!(txn_response.responses.len(), 1);
}
-pub async fn text_txn_multi_compare_op(kv_backend: KvBackendRef) {
+pub async fn text_txn_multi_compare_op(kv_backend: &impl KvBackend) {
for i in 1..3 {
let _ = kv_backend
.put(PutRequest {
@@ -502,7 +506,7 @@ pub async fn text_txn_multi_compare_op(kv_backend: KvBackendRef) {
assert_eq!(txn_response.responses.len(), 2);
}
-pub async fn test_txn_compare_equal(kv_backend: KvBackendRef) {
+pub async fn test_txn_compare_equal(kv_backend: &impl KvBackend) {
let key = vec![101u8];
kv_backend.delete(&key, false).await.unwrap();
@@ -531,7 +535,7 @@ pub async fn test_txn_compare_equal(kv_backend: KvBackendRef) {
assert!(txn_response.succeeded);
}
-pub async fn test_txn_compare_greater(kv_backend: KvBackendRef) {
+pub async fn test_txn_compare_greater(kv_backend: &impl KvBackend) {
let key = vec![102u8];
kv_backend.delete(&key, false).await.unwrap();
@@ -571,7 +575,7 @@ pub async fn test_txn_compare_greater(kv_backend: KvBackendRef) {
);
}
-pub async fn test_txn_compare_less(kv_backend: KvBackendRef) {
+pub async fn test_txn_compare_less(kv_backend: &impl KvBackend) {
let key = vec![103u8];
kv_backend.delete(&[3], false).await.unwrap();
@@ -611,7 +615,7 @@ pub async fn test_txn_compare_less(kv_backend: KvBackendRef) {
);
}
-pub async fn test_txn_compare_not_equal(kv_backend: KvBackendRef) {
+pub async fn test_txn_compare_not_equal(kv_backend: &impl KvBackend) {
let key = vec![104u8];
kv_backend.delete(&key, false).await.unwrap();
diff --git a/src/common/meta/src/rpc/store.rs b/src/common/meta/src/rpc/store.rs
index f763d6b4430d..25536993972f 100644
--- a/src/common/meta/src/rpc/store.rs
+++ b/src/common/meta/src/rpc/store.rs
@@ -266,7 +266,7 @@ impl PutRequest {
}
}
-#[derive(Debug, Clone, PartialEq)]
+#[derive(Debug, Clone, PartialEq, Default)]
pub struct PutResponse {
pub prev_kv: Option<KeyValue>,
}
@@ -425,7 +425,7 @@ impl BatchPutRequest {
}
}
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, Default)]
pub struct BatchPutResponse {
pub prev_kvs: Vec<KeyValue>,
}
@@ -509,7 +509,7 @@ impl BatchDeleteRequest {
}
}
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, Default)]
pub struct BatchDeleteResponse {
pub prev_kvs: Vec<KeyValue>,
}
@@ -754,6 +754,19 @@ impl TryFrom<PbDeleteRangeResponse> for DeleteRangeResponse {
}
impl DeleteRangeResponse {
+ /// Creates a new [`DeleteRangeResponse`] with the given deleted count.
+ pub fn new(deleted: i64) -> Self {
+ Self {
+ deleted,
+ prev_kvs: vec![],
+ }
+ }
+
+ /// Creates a new [`DeleteRangeResponse`] with the given deleted count and previous key-value pairs.
+ pub fn with_prev_kvs(&mut self, prev_kvs: Vec<KeyValue>) {
+ self.prev_kvs = prev_kvs;
+ }
+
pub fn to_proto_resp(self, header: PbResponseHeader) -> PbDeleteRangeResponse {
PbDeleteRangeResponse {
header: Some(header),
diff --git a/src/common/procedure/Cargo.toml b/src/common/procedure/Cargo.toml
index 1d8c6736e3fd..99ed2e56175c 100644
--- a/src/common/procedure/Cargo.toml
+++ b/src/common/procedure/Cargo.toml
@@ -13,7 +13,7 @@ workspace = true
[dependencies]
async-stream.workspace = true
async-trait.workspace = true
-backon = "1"
+backon.workspace = true
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
diff --git a/src/log-store/src/raft_engine/backend.rs b/src/log-store/src/raft_engine/backend.rs
index 33cb64a2e881..37a9af18a4c4 100644
--- a/src/log-store/src/raft_engine/backend.rs
+++ b/src/log-store/src/raft_engine/backend.rs
@@ -644,7 +644,7 @@ mod tests {
let dir = create_temp_dir("range2");
let backend = build_kv_backend(dir.path().to_str().unwrap().to_string());
- test_kv_range_2(backend).await;
+ test_kv_range_2(&backend).await;
}
#[tokio::test]
@@ -671,7 +671,7 @@ mod tests {
let backend = build_kv_backend(dir.path().to_str().unwrap().to_string());
prepare_kv(&backend).await;
- test_kv_batch_delete(backend).await;
+ test_kv_batch_delete(&backend).await;
}
#[tokio::test]
@@ -680,7 +680,7 @@ mod tests {
let backend = build_kv_backend(dir.path().to_str().unwrap().to_string());
prepare_kv(&backend).await;
- test_kv_delete_range(backend).await;
+ test_kv_delete_range(&backend).await;
}
#[tokio::test(flavor = "multi_thread")]
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 91a58e7d5be7..9ed6ed66c3ec 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -229,7 +229,8 @@ pub async fn metasrv_builder(
#[cfg(feature = "pg_kvbackend")]
(None, BackendImpl::PostgresStore) => {
let pool = create_postgres_pool(opts).await?;
- let kv_backend = PgStore::with_pg_pool(pool, opts.max_txn_ops)
+ // TODO(CookiePie): use table name from config.
+ let kv_backend = PgStore::with_pg_pool(pool, "greptime_metakv", opts.max_txn_ops)
.await
.context(error::KvBackendSnafu)?;
// Client for election should be created separately since we need a different session keep-alive idle time.
diff --git a/src/meta-srv/src/election/postgres.rs b/src/meta-srv/src/election/postgres.rs
index 94de90bde925..192fa682bf5c 100644
--- a/src/meta-srv/src/election/postgres.rs
+++ b/src/meta-srv/src/election/postgres.rs
@@ -23,6 +23,7 @@ use itertools::Itertools;
use snafu::{ensure, OptionExt, ResultExt};
use tokio::sync::broadcast;
use tokio::time::MissedTickBehavior;
+use tokio_postgres::types::ToSql;
use tokio_postgres::Client;
use crate::election::{
@@ -39,7 +40,7 @@ const CAMPAIGN: &str = "SELECT pg_try_advisory_lock({})";
const STEP_DOWN: &str = "SELECT pg_advisory_unlock({})";
// Currently the session timeout is longer than the leader lease time, so the leader lease may expire while the session is still alive.
// Either the leader reconnects and step down or the session expires and the lock is released.
-const SET_IDLE_SESSION_TIMEOUT: &str = "SET idle_in_transaction_session_timeout = '10s';";
+const SET_IDLE_SESSION_TIMEOUT: &str = "SET idle_session_timeout = '10s';";
// Separator between value and expire time.
const LEASE_SEP: &str = r#"||__metadata_lease_sep||"#;
@@ -50,7 +51,7 @@ WITH prev AS (
SELECT k, v FROM greptime_metakv WHERE k = $1
), insert AS (
INSERT INTO greptime_metakv
- VALUES($1, $2 || $3 || TO_CHAR(CURRENT_TIMESTAMP + INTERVAL '1 second' * $4, 'YYYY-MM-DD HH24:MI:SS.MS'))
+ VALUES($1, convert_to($2 || $3 || TO_CHAR(CURRENT_TIMESTAMP + INTERVAL '1 second' * $4, 'YYYY-MM-DD HH24:MI:SS.MS'), 'UTF8'))
ON CONFLICT (k) DO NOTHING
)
@@ -61,7 +62,7 @@ SELECT k, v FROM prev;
const CAS_WITH_EXPIRE_TIME: &str = r#"
UPDATE greptime_metakv
SET k=$1,
-v=$3 || $4 || TO_CHAR(CURRENT_TIMESTAMP + INTERVAL '1 second' * $5, 'YYYY-MM-DD HH24:MI:SS.MS')
+v=convert_to($3 || $4 || TO_CHAR(CURRENT_TIMESTAMP + INTERVAL '1 second' * $5, 'YYYY-MM-DD HH24:MI:SS.MS'), 'UTF8')
WHERE
k=$1 AND v=$2
"#;
@@ -329,12 +330,13 @@ impl PgElection {
/// Returns value, expire time and current time. If `with_origin` is true, the origin string is also returned.
async fn get_value_with_lease(
&self,
- key: &String,
+ key: &str,
with_origin: bool,
) -> Result<Option<(String, Timestamp, Timestamp, Option<String>)>> {
+ let key = key.as_bytes().to_vec();
let res = self
.client
- .query(GET_WITH_CURRENT_TIMESTAMP, &[&key])
+ .query(GET_WITH_CURRENT_TIMESTAMP, &[&key as &(dyn ToSql + Sync)])
.await
.context(PostgresExecutionSnafu)?;
@@ -342,7 +344,7 @@ impl PgElection {
Ok(None)
} else {
// Safety: Checked if res is empty above.
- let current_time_str = res[0].get(1);
+ let current_time_str = res[0].try_get(1).unwrap_or_default();
let current_time = match Timestamp::from_str(current_time_str, None) {
Ok(ts) => ts,
Err(_) => UnexpectedSnafu {
@@ -351,8 +353,9 @@ impl PgElection {
.fail()?,
};
// Safety: Checked if res is empty above.
- let value_and_expire_time = res[0].get(0);
- let (value, expire_time) = parse_value_and_expire_time(value_and_expire_time)?;
+ let value_and_expire_time =
+ String::from_utf8_lossy(res[0].try_get(0).unwrap_or_default());
+ let (value, expire_time) = parse_value_and_expire_time(&value_and_expire_time)?;
if with_origin {
Ok(Some((
@@ -372,17 +375,20 @@ impl PgElection {
&self,
key_prefix: &str,
) -> Result<(Vec<(String, Timestamp)>, Timestamp)> {
- let key_prefix = format!("{}%", key_prefix);
+ let key_prefix = format!("{}%", key_prefix).as_bytes().to_vec();
let res = self
.client
- .query(PREFIX_GET_WITH_CURRENT_TIMESTAMP, &[&key_prefix])
+ .query(
+ PREFIX_GET_WITH_CURRENT_TIMESTAMP,
+ &[(&key_prefix as &(dyn ToSql + Sync))],
+ )
.await
.context(PostgresExecutionSnafu)?;
let mut values_with_leases = vec![];
let mut current = Timestamp::default();
for row in res {
- let current_time_str = row.get(1);
+ let current_time_str = row.try_get(1).unwrap_or_default();
current = match Timestamp::from_str(current_time_str, None) {
Ok(ts) => ts,
Err(_) => UnexpectedSnafu {
@@ -391,8 +397,8 @@ impl PgElection {
.fail()?,
};
- let value_and_expire_time = row.get(0);
- let (value, expire_time) = parse_value_and_expire_time(value_and_expire_time)?;
+ let value_and_expire_time = String::from_utf8_lossy(row.try_get(0).unwrap_or_default());
+ let (value, expire_time) = parse_value_and_expire_time(&value_and_expire_time)?;
values_with_leases.push((value, expire_time));
}
@@ -400,13 +406,15 @@ impl PgElection {
}
async fn update_value_with_lease(&self, key: &str, prev: &str, updated: &str) -> Result<()> {
+ let key = key.as_bytes().to_vec();
+ let prev = prev.as_bytes().to_vec();
let res = self
.client
.execute(
CAS_WITH_EXPIRE_TIME,
&[
- &key,
- &prev,
+ &key as &(dyn ToSql + Sync),
+ &prev as &(dyn ToSql + Sync),
&updated,
&LEASE_SEP,
&(self.candidate_lease_ttl_secs as f64),
@@ -418,7 +426,7 @@ impl PgElection {
ensure!(
res == 1,
UnexpectedSnafu {
- violated: format!("Failed to update key: {}", key),
+ violated: format!("Failed to update key: {}", String::from_utf8_lossy(&key)),
}
);
@@ -432,12 +440,17 @@ impl PgElection {
value: &str,
lease_ttl_secs: u64,
) -> Result<bool> {
+ let key = key.as_bytes().to_vec();
+ let lease_ttl_secs = lease_ttl_secs as f64;
+ let params: Vec<&(dyn ToSql + Sync)> = vec![
+ &key as &(dyn ToSql + Sync),
+ &value as &(dyn ToSql + Sync),
+ &LEASE_SEP,
+ &lease_ttl_secs,
+ ];
let res = self
.client
- .query(
- PUT_IF_NOT_EXISTS_WITH_EXPIRE_TIME,
- &[&key, &value, &LEASE_SEP, &(lease_ttl_secs as f64)],
- )
+ .query(PUT_IF_NOT_EXISTS_WITH_EXPIRE_TIME, ¶ms)
.await
.context(PostgresExecutionSnafu)?;
Ok(res.is_empty())
@@ -445,10 +458,11 @@ impl PgElection {
/// Returns `true` if the deletion is successful.
/// Caution: Should only delete the key if the lease is expired.
- async fn delete_value(&self, key: &String) -> Result<bool> {
+ async fn delete_value(&self, key: &str) -> Result<bool> {
+ let key = key.as_bytes().to_vec();
let res = self
.client
- .query(POINT_DELETE, &[&key])
+ .query(POINT_DELETE, &[&key as &(dyn ToSql + Sync)])
.await
.context(PostgresExecutionSnafu)?;
@@ -635,6 +649,8 @@ mod tests {
use super::*;
use crate::error::PostgresExecutionSnafu;
+ const CREATE_TABLE: &str =
+ "CREATE TABLE IF NOT EXISTS greptime_metakv(k bytea PRIMARY KEY, v bytea);";
async fn create_postgres_client() -> Result<Client> {
let endpoint = env::var("GT_POSTGRES_ENDPOINTS").unwrap_or_default();
@@ -650,6 +666,7 @@ mod tests {
tokio::spawn(async move {
connection.await.context(PostgresExecutionSnafu).unwrap();
});
+ client.execute(CREATE_TABLE, &[]).await.unwrap();
Ok(client)
}
@@ -1152,6 +1169,7 @@ mod tests {
#[tokio::test]
async fn test_follower_action() {
+ common_telemetry::init_default_ut_logging();
let candidate_lease_ttl_secs = 5;
let store_key_prefix = uuid::Uuid::new_v4().to_string();
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 81bbe2fb0b07..2782661c25b1 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -70,6 +70,7 @@ pub enum WalConfig {
pub struct StoreConfig {
pub store_addrs: Vec<String>,
pub setup_etcd: bool,
+ pub setup_pg: bool,
}
#[derive(Clone)]
@@ -159,6 +160,7 @@ impl Env {
self.build_db();
self.setup_wal();
self.setup_etcd();
+ self.setup_pg();
let db_ctx = GreptimeDBContext::new(self.wal.clone(), self.store_config.clone());
@@ -383,7 +385,21 @@ impl Env {
"-c".to_string(),
self.generate_config_file(subcommand, db_ctx),
];
- if db_ctx.store_config().store_addrs.is_empty() {
+ if db_ctx.store_config().setup_pg {
+ let client_ports = self
+ .store_config
+ .store_addrs
+ .iter()
+ .map(|s| s.split(':').nth(1).unwrap().parse::<u16>().unwrap())
+ .collect::<Vec<_>>();
+ let client_port = client_ports.first().unwrap_or(&5432);
+ let pg_server_addr = format!(
+ "postgresql://greptimedb:[email protected]:{}/postgres",
+ client_port
+ );
+ args.extend(vec!["--backend".to_string(), "postgres-store".to_string()]);
+ args.extend(vec!["--store-addrs".to_string(), pg_server_addr]);
+ } else if db_ctx.store_config().store_addrs.is_empty() {
args.extend(vec!["--backend".to_string(), "memory-store".to_string()])
}
(args, vec![METASRV_ADDR.to_string()])
@@ -570,6 +586,20 @@ impl Env {
}
}
+ /// Setup PostgreSql if needed.
+ fn setup_pg(&self) {
+ if self.store_config.setup_pg {
+ let client_ports = self
+ .store_config
+ .store_addrs
+ .iter()
+ .map(|s| s.split(':').nth(1).unwrap().parse::<u16>().unwrap())
+ .collect::<Vec<_>>();
+ let client_port = client_ports.first().unwrap_or(&5432);
+ util::setup_pg(*client_port, None);
+ }
+ }
+
/// Generate config file to `/tmp/{subcommand}-{current_time}.toml`
fn generate_config_file(&self, subcommand: &str, db_ctx: &GreptimeDBContext) -> String {
let mut tt = TinyTemplate::new();
diff --git a/tests/runner/src/main.rs b/tests/runner/src/main.rs
index 2e3158e1953b..9701765d0adf 100644
--- a/tests/runner/src/main.rs
+++ b/tests/runner/src/main.rs
@@ -106,6 +106,10 @@ struct Args {
/// Whether to setup etcd, by default it is false.
#[clap(long, default_value = "false")]
setup_etcd: bool,
+
+ /// Whether to setup pg, by default it is false.
+ #[clap(long, default_value = "false")]
+ setup_pg: bool,
}
#[tokio::main]
@@ -154,6 +158,7 @@ async fn main() {
let store = StoreConfig {
store_addrs: args.store_addrs.clone(),
setup_etcd: args.setup_etcd,
+ setup_pg: args.setup_pg,
};
let runner = Runner::new(
diff --git a/tests/runner/src/util.rs b/tests/runner/src/util.rs
index 4bcd482a26bf..5baa0cd80eb0 100644
--- a/tests/runner/src/util.rs
+++ b/tests/runner/src/util.rs
@@ -305,6 +305,53 @@ pub fn stop_rm_etcd() {
}
}
+/// Set up a PostgreSQL server in docker.
+pub fn setup_pg(pg_port: u16, pg_version: Option<&str>) {
+ if std::process::Command::new("docker")
+ .args(["-v"])
+ .status()
+ .is_err()
+ {
+ panic!("Docker is not installed");
+ }
+
+ let pg_image = if let Some(pg_version) = pg_version {
+ format!("postgres:{pg_version}")
+ } else {
+ "postgres:latest".to_string()
+ };
+ let pg_password = "admin";
+ let pg_user = "greptimedb";
+
+ let mut arg_list = vec![];
+ arg_list.extend(["run", "-d"]);
+
+ let pg_password_env = format!("POSTGRES_PASSWORD={pg_password}");
+ let pg_user_env = format!("POSTGRES_USER={pg_user}");
+ let pg_port_forward = format!("{pg_port}:5432");
+ arg_list.extend(["-e", &pg_password_env, "-e", &pg_user_env]);
+ arg_list.extend(["-p", &pg_port_forward]);
+
+ arg_list.extend(["--name", "greptimedb_pg", &pg_image]);
+
+ let mut cmd = std::process::Command::new("docker");
+
+ cmd.args(arg_list);
+
+ println!("Starting PostgreSQL with command: {:?}", cmd);
+
+ let status = cmd.status();
+ if status.is_err() {
+ panic!("Failed to start PostgreSQL: {:?}", status);
+ } else if let Ok(status) = status {
+ if status.success() {
+ println!("Started PostgreSQL with port {}", pg_port);
+ } else {
+ panic!("Failed to start PostgreSQL: {:?}", status);
+ }
+ }
+}
+
/// Get the dir of test cases. This function only works when the runner is run
/// under the project's dir because it depends on some envs set by cargo.
pub fn get_case_dir(case_dir: Option<PathBuf>) -> String {
|
refactor
|
refactor `PgStore` (#5309)
|
5f8c17514fa8c615f9d68d08cb89be193c814f5f
|
2023-12-20 07:58:29
|
JeremyHi
|
chore: SelectorType use snake_case (#2962)
| false
|
diff --git a/config/metasrv.example.toml b/config/metasrv.example.toml
index aad33ce1afcf..fff978f8c15d 100644
--- a/config/metasrv.example.toml
+++ b/config/metasrv.example.toml
@@ -7,10 +7,10 @@ server_addr = "127.0.0.1:3002"
# Etcd server address, "127.0.0.1:2379" by default.
store_addr = "127.0.0.1:2379"
# Datanode selector type.
-# - "LeaseBased" (default value).
-# - "LoadBased"
+# - "lease_based" (default value).
+# - "load_based"
# For details, please see "https://docs.greptime.com/developer-guide/metasrv/selector".
-selector = "LeaseBased"
+selector = "lease_based"
# Store data in memory, false by default.
use_memory_store = false
# Whether to enable greptimedb telemetry, true by default.
diff --git a/src/meta-srv/src/selector.rs b/src/meta-srv/src/selector.rs
index 140f4e5711d5..44ce0b2c8cb4 100644
--- a/src/meta-srv/src/selector.rs
+++ b/src/meta-srv/src/selector.rs
@@ -67,8 +67,8 @@ impl TryFrom<&str> for SelectorType {
fn try_from(value: &str) -> Result<Self> {
match value {
- "LoadBased" => Ok(SelectorType::LoadBased),
- "LeaseBased" => Ok(SelectorType::LeaseBased),
+ "load_based" | "LoadBased" => Ok(SelectorType::LoadBased),
+ "lease_based" | "LeaseBased" => Ok(SelectorType::LeaseBased),
other => error::UnsupportedSelectorTypeSnafu {
selector_type: other,
}
@@ -89,12 +89,18 @@ mod tests {
#[test]
fn test_convert_str_to_selector_type() {
- let leasebased = "LeaseBased";
- let selector_type = leasebased.try_into().unwrap();
+ let lease_based = "lease_based";
+ let selector_type = lease_based.try_into().unwrap();
+ assert_eq!(SelectorType::LeaseBased, selector_type);
+ let lease_based = "LeaseBased";
+ let selector_type = lease_based.try_into().unwrap();
assert_eq!(SelectorType::LeaseBased, selector_type);
- let loadbased = "LoadBased";
- let selector_type = loadbased.try_into().unwrap();
+ let load_based = "load_based";
+ let selector_type = load_based.try_into().unwrap();
+ assert_eq!(SelectorType::LoadBased, selector_type);
+ let load_based = "LoadBased";
+ let selector_type = load_based.try_into().unwrap();
assert_eq!(SelectorType::LoadBased, selector_type);
let unknown = "unknown";
|
chore
|
SelectorType use snake_case (#2962)
|
df01ac05a1ec5f7c3c4630eca7b58e8f0b39f8fe
|
2024-04-24 12:59:10
|
Ruihang Xia
|
feat: add validate method to CreateExpr (#3772)
| false
|
diff --git a/src/metric-engine/src/data_region.rs b/src/metric-engine/src/data_region.rs
index 9207d0f107bf..1dd1b53faad1 100644
--- a/src/metric-engine/src/data_region.rs
+++ b/src/metric-engine/src/data_region.rs
@@ -25,7 +25,7 @@ use store_api::region_request::{
AddColumn, AffectedRows, AlterKind, RegionAlterRequest, RegionPutRequest, RegionRequest,
};
use store_api::storage::consts::ReservedColumnId;
-use store_api::storage::RegionId;
+use store_api::storage::{ConcreteDataType, RegionId};
use crate::error::{
ColumnTypeMismatchSnafu, MitoReadOperationSnafu, MitoWriteOperationSnafu, Result,
@@ -128,7 +128,8 @@ impl DataRegion {
if c.semantic_type == SemanticType::Tag {
if !c.column_schema.data_type.is_string() {
return ColumnTypeMismatchSnafu {
- column_type: c.column_schema.data_type.clone(),
+ expect: ConcreteDataType::string_datatype(),
+ actual: c.column_schema.data_type.clone(),
}
.fail();
}
diff --git a/src/metric-engine/src/engine/create.rs b/src/metric-engine/src/engine/create.rs
index a7e3c5c3647c..c71375299c38 100644
--- a/src/metric-engine/src/engine/create.rs
+++ b/src/metric-engine/src/engine/create.rs
@@ -43,9 +43,11 @@ use crate::engine::options::{
};
use crate::engine::MetricEngineInner;
use crate::error::{
- ColumnNotFoundSnafu, ConflictRegionOptionSnafu, CreateMitoRegionSnafu,
- InternalColumnOccupiedSnafu, MissingRegionOptionSnafu, MitoReadOperationSnafu,
- ParseRegionIdSnafu, PhysicalRegionNotFoundSnafu, Result, SerializeColumnMetadataSnafu,
+ AddingFieldColumnSnafu, ColumnNotFoundSnafu, ColumnTypeMismatchSnafu,
+ ConflictRegionOptionSnafu, CreateMitoRegionSnafu, InternalColumnOccupiedSnafu,
+ InvalidMetadataSnafu, MissingRegionOptionSnafu, MitoReadOperationSnafu,
+ MultipleFieldColumnSnafu, NoFieldColumnSnafu, ParseRegionIdSnafu, PhysicalRegionNotFoundSnafu,
+ Result, SerializeColumnMetadataSnafu,
};
use crate::metrics::{LOGICAL_REGION_COUNT, PHYSICAL_COLUMN_COUNT, PHYSICAL_REGION_COUNT};
use crate::utils::{to_data_region_id, to_metadata_region_id};
@@ -191,6 +193,14 @@ impl MetricEngineInner {
})?;
for col in &request.column_metadatas {
if !physical_columns.contains(&col.column_schema.name) {
+ // Multi-field on physical table is explicit forbidden at present
+ // TODO(ruihang): support multi-field on both logical and physical column
+ ensure!(
+ col.semantic_type != SemanticType::Field,
+ AddingFieldColumnSnafu {
+ name: col.column_schema.name.clone()
+ }
+ );
new_columns.push(col.clone());
} else {
existing_columns.push(col.column_schema.name.clone());
@@ -290,6 +300,8 @@ impl MetricEngineInner {
/// - required table option is present ([PHYSICAL_TABLE_METADATA_KEY] or
/// [LOGICAL_TABLE_METADATA_KEY])
fn verify_region_create_request(request: &RegionCreateRequest) -> Result<()> {
+ request.validate().context(InvalidMetadataSnafu)?;
+
let name_to_index = request
.column_metadatas
.iter()
@@ -323,6 +335,41 @@ impl MetricEngineInner {
ConflictRegionOptionSnafu {}
);
+ // check if only one field column is declared, and all tag columns are string
+ let mut field_col: Option<&ColumnMetadata> = None;
+ for col in &request.column_metadatas {
+ match col.semantic_type {
+ SemanticType::Tag => ensure!(
+ col.column_schema.data_type == ConcreteDataType::string_datatype(),
+ ColumnTypeMismatchSnafu {
+ expect: ConcreteDataType::string_datatype(),
+ actual: col.column_schema.data_type.clone(),
+ }
+ ),
+ SemanticType::Field => {
+ if field_col.is_some() {
+ MultipleFieldColumnSnafu {
+ previous: field_col.unwrap().column_schema.name.clone(),
+ current: col.column_schema.name.clone(),
+ }
+ .fail()?;
+ }
+ field_col = Some(col)
+ }
+ SemanticType::Timestamp => {}
+ }
+ }
+ let field_col = field_col.context(NoFieldColumnSnafu)?;
+
+ // make sure the field column is float64 type
+ ensure!(
+ field_col.column_schema.data_type == ConcreteDataType::float64_datatype(),
+ ColumnTypeMismatchSnafu {
+ expect: ConcreteDataType::float64_datatype(),
+ actual: field_col.column_schema.data_type.clone(),
+ }
+ );
+
Ok(())
}
@@ -531,6 +578,15 @@ mod test {
false,
),
},
+ ColumnMetadata {
+ column_id: 2,
+ semantic_type: SemanticType::Field,
+ column_schema: ColumnSchema::new(
+ "column2".to_string(),
+ ConcreteDataType::float64_datatype(),
+ false,
+ ),
+ },
],
region_dir: "test_dir".to_string(),
engine: METRIC_ENGINE_NAME.to_string(),
@@ -539,37 +595,51 @@ mod test {
.into_iter()
.collect(),
};
- let result = MetricEngineInner::verify_region_create_request(&request);
- assert!(result.is_ok());
+ MetricEngineInner::verify_region_create_request(&request).unwrap();
}
#[test]
fn test_verify_region_create_request_options() {
let mut request = RegionCreateRequest {
- column_metadatas: vec![],
+ column_metadatas: vec![
+ ColumnMetadata {
+ column_id: 0,
+ semantic_type: SemanticType::Timestamp,
+ column_schema: ColumnSchema::new(
+ METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME,
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ },
+ ColumnMetadata {
+ column_id: 1,
+ semantic_type: SemanticType::Field,
+ column_schema: ColumnSchema::new(
+ "val".to_string(),
+ ConcreteDataType::float64_datatype(),
+ false,
+ ),
+ },
+ ],
region_dir: "test_dir".to_string(),
engine: METRIC_ENGINE_NAME.to_string(),
primary_key: vec![],
options: HashMap::new(),
};
- let result = MetricEngineInner::verify_region_create_request(&request);
- assert!(result.is_err());
+ MetricEngineInner::verify_region_create_request(&request).unwrap_err();
let mut options = HashMap::new();
options.insert(PHYSICAL_TABLE_METADATA_KEY.to_string(), "value".to_string());
request.options.clone_from(&options);
- let result = MetricEngineInner::verify_region_create_request(&request);
- assert!(result.is_ok());
+ MetricEngineInner::verify_region_create_request(&request).unwrap();
options.insert(LOGICAL_TABLE_METADATA_KEY.to_string(), "value".to_string());
request.options.clone_from(&options);
- let result = MetricEngineInner::verify_region_create_request(&request);
- assert!(result.is_err());
+ MetricEngineInner::verify_region_create_request(&request).unwrap_err();
options.remove(PHYSICAL_TABLE_METADATA_KEY).unwrap();
request.options = options;
- let result = MetricEngineInner::verify_region_create_request(&request);
- assert!(result.is_ok());
+ MetricEngineInner::verify_region_create_request(&request).unwrap();
}
#[tokio::test]
diff --git a/src/metric-engine/src/error.rs b/src/metric-engine/src/error.rs
index b2568947290c..81e680dfd060 100644
--- a/src/metric-engine/src/error.rs
+++ b/src/metric-engine/src/error.rs
@@ -133,9 +133,10 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Column type mismatch. Expect string, got {:?}", column_type))]
+ #[snafu(display("Column type mismatch. Expect {:?}, got {:?}", expect, actual))]
ColumnTypeMismatch {
- column_type: ConcreteDataType,
+ expect: ConcreteDataType,
+ actual: ConcreteDataType,
location: Location,
},
@@ -169,6 +170,19 @@ pub enum Error {
request: RegionRequest,
location: Location,
},
+
+ #[snafu(display("Multiple field column found: {} and {}", previous, current))]
+ MultipleFieldColumn {
+ previous: String,
+ current: String,
+ location: Location,
+ },
+
+ #[snafu(display("Adding field column {} to physical table", name))]
+ AddingFieldColumn { name: String, location: Location },
+
+ #[snafu(display("No field column found"))]
+ NoFieldColumn { location: Location },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -182,7 +196,10 @@ impl ErrorExt for Error {
| MissingRegionOption { .. }
| ConflictRegionOption { .. }
| ColumnTypeMismatch { .. }
- | PhysicalRegionBusy { .. } => StatusCode::InvalidArguments,
+ | PhysicalRegionBusy { .. }
+ | MultipleFieldColumn { .. }
+ | NoFieldColumn { .. }
+ | AddingFieldColumn { .. } => StatusCode::InvalidArguments,
ForbiddenPhysicalAlter { .. } | UnsupportedRegionRequest { .. } => {
StatusCode::Unsupported
diff --git a/src/metric-engine/src/test_util.rs b/src/metric-engine/src/test_util.rs
index 0d013ac7b308..2da459419d65 100644
--- a/src/metric-engine/src/test_util.rs
+++ b/src/metric-engine/src/test_util.rs
@@ -210,9 +210,9 @@ pub fn create_logical_region_request(
),
},
];
- for tag in tags {
+ for (bias, tag) in tags.iter().enumerate() {
column_metadatas.push(ColumnMetadata {
- column_id: 2,
+ column_id: 2 + bias as ColumnId,
semantic_type: SemanticType::Tag,
column_schema: ColumnSchema::new(
tag.to_string(),
diff --git a/src/operator/src/expr_factory.rs b/src/operator/src/expr_factory.rs
index 42d8ee3187c2..cc950f6ba7ec 100644
--- a/src/operator/src/expr_factory.rs
+++ b/src/operator/src/expr_factory.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashMap;
+use std::collections::{HashMap, HashSet};
use api::helper::ColumnDataTypeWrapper;
use api::v1::alter_expr::Kind;
@@ -31,7 +31,7 @@ use query::sql::{
};
use session::context::QueryContextRef;
use session::table_name::table_idents_to_full_name;
-use snafu::{ensure, ResultExt};
+use snafu::{ensure, OptionExt, ResultExt};
use sql::ast::{ColumnDef, ColumnOption, TableConstraint};
use sql::statements::alter::{AlterTable, AlterTableOperation};
use sql::statements::create::{CreateExternalTable, CreateTable, TIME_INDEX};
@@ -214,9 +214,72 @@ pub fn create_to_expr(create: &CreateTable, query_ctx: QueryContextRef) -> Resul
table_id: None,
engine: create.engine.to_string(),
};
+
+ validate_create_expr(&expr)?;
Ok(expr)
}
+/// Validate the [`CreateTableExpr`] request.
+pub fn validate_create_expr(create: &CreateTableExpr) -> Result<()> {
+ // construct column list
+ let mut column_to_indices = HashMap::with_capacity(create.column_defs.len());
+ for (idx, column) in create.column_defs.iter().enumerate() {
+ if let Some(indices) = column_to_indices.get(&column.name) {
+ return InvalidSqlSnafu {
+ err_msg: format!(
+ "column name `{}` is duplicated at index {} and {}",
+ column.name, indices, idx
+ ),
+ }
+ .fail();
+ }
+ column_to_indices.insert(&column.name, idx);
+ }
+
+ // verify time_index exists
+ let _ = column_to_indices
+ .get(&create.time_index)
+ .with_context(|| InvalidSqlSnafu {
+ err_msg: format!(
+ "column name `{}` is not found in column list",
+ create.time_index
+ ),
+ })?;
+
+ // verify primary_key exists
+ for pk in &create.primary_keys {
+ let _ = column_to_indices
+ .get(&pk)
+ .with_context(|| InvalidSqlSnafu {
+ err_msg: format!("column name `{}` is not found in column list", pk),
+ })?;
+ }
+
+ // construct primary_key set
+ let mut pk_set = HashSet::new();
+ for pk in &create.primary_keys {
+ if !pk_set.insert(pk) {
+ return InvalidSqlSnafu {
+ err_msg: format!("column name `{}` is duplicated in primary keys", pk),
+ }
+ .fail();
+ }
+ }
+
+ // verify time index is not primary key
+ if pk_set.contains(&create.time_index) {
+ return InvalidSqlSnafu {
+ err_msg: format!(
+ "column name `{}` is both primary key and time index",
+ create.time_index
+ ),
+ }
+ .fail();
+ }
+
+ Ok(())
+}
+
fn find_primary_keys(
columns: &[ColumnDef],
constraints: &[TableConstraint],
@@ -457,6 +520,33 @@ mod tests {
);
}
+ #[test]
+ fn test_invalid_create_to_expr() {
+ let cases = [
+ // duplicate column declaration
+ "CREATE TABLE monitor (host STRING primary key, ts TIMESTAMP TIME INDEX, some_column text, some_column string);",
+ // duplicate primary key
+ "CREATE TABLE monitor (host STRING, ts TIMESTAMP TIME INDEX, some_column STRING, PRIMARY KEY (some_column, host, some_column));",
+ // time index is primary key
+ "CREATE TABLE monitor (host STRING, ts TIMESTAMP TIME INDEX, PRIMARY KEY (host, ts));"
+ ];
+
+ for sql in cases {
+ let stmt = ParserContext::create_with_dialect(
+ sql,
+ &GreptimeDbDialect {},
+ ParseOptions::default(),
+ )
+ .unwrap()
+ .pop()
+ .unwrap();
+ let Statement::CreateTable(create_table) = stmt else {
+ unreachable!()
+ };
+ create_to_expr(&create_table, QueryContext::arc()).unwrap_err();
+ }
+ }
+
#[test]
fn test_create_to_expr_with_default_timestamp_value() {
let sql = "CREATE TABLE monitor (v double,ts TIMESTAMP default '2024-01-30T00:01:01',TIME INDEX (ts)) engine=mito;";
diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs
index b98c3951d523..687db126f028 100644
--- a/src/store-api/src/region_request.rs
+++ b/src/store-api/src/region_request.rs
@@ -247,6 +247,53 @@ pub struct RegionCreateRequest {
pub region_dir: String,
}
+impl RegionCreateRequest {
+ /// Checks whether the request is valid, returns an error if it is invalid.
+ pub fn validate(&self) -> Result<()> {
+ // time index must exist
+ ensure!(
+ self.column_metadatas
+ .iter()
+ .any(|x| x.semantic_type == SemanticType::Timestamp),
+ InvalidRegionRequestSnafu {
+ region_id: RegionId::new(0, 0),
+ err: "missing timestamp column in create region request".to_string(),
+ }
+ );
+
+ // build column id to indices
+ let mut column_id_to_indices = HashMap::with_capacity(self.column_metadatas.len());
+ for (i, c) in self.column_metadatas.iter().enumerate() {
+ if let Some(previous) = column_id_to_indices.insert(c.column_id, i) {
+ return InvalidRegionRequestSnafu {
+ region_id: RegionId::new(0, 0),
+ err: format!(
+ "duplicate column id {} (at position {} and {}) in create region request",
+ c.column_id, previous, i
+ ),
+ }
+ .fail();
+ }
+ }
+
+ // primary key must exist
+ for column_id in &self.primary_key {
+ ensure!(
+ column_id_to_indices.contains_key(column_id),
+ InvalidRegionRequestSnafu {
+ region_id: RegionId::new(0, 0),
+ err: format!(
+ "missing primary key column {} in create region request",
+ column_id
+ ),
+ }
+ );
+ }
+
+ Ok(())
+ }
+}
+
#[derive(Debug, Clone, Default)]
pub struct RegionDropRequest {}
@@ -965,4 +1012,46 @@ mod tests {
metadata.schema_version = 1;
request.validate(&metadata).unwrap();
}
+
+ #[test]
+ fn test_validate_create_region() {
+ let column_metadatas = vec![
+ ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Timestamp,
+ column_id: 1,
+ },
+ ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "tag_0",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: 2,
+ },
+ ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "field_0",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Field,
+ column_id: 3,
+ },
+ ];
+ let create = RegionCreateRequest {
+ engine: "mito".to_string(),
+ column_metadatas,
+ primary_key: vec![3, 4],
+ options: HashMap::new(),
+ region_dir: "path".to_string(),
+ };
+
+ assert!(create.validate().is_err());
+ }
}
diff --git a/tests-fuzz/targets/fuzz_create_table.rs b/tests-fuzz/targets/fuzz_create_table.rs
index 60bcb1628bd8..ae43e6d6966f 100644
--- a/tests-fuzz/targets/fuzz_create_table.rs
+++ b/tests-fuzz/targets/fuzz_create_table.rs
@@ -61,34 +61,19 @@ impl Arbitrary<'_> for FuzzInput {
fn generate_expr(input: FuzzInput) -> Result<CreateTableExpr> {
let mut rng = ChaChaRng::seed_from_u64(input.seed);
- let metric_engine = rng.gen_bool(0.5);
let if_not_exists = rng.gen_bool(0.5);
- if metric_engine {
- let create_table_generator = CreateTableExprGeneratorBuilder::default()
- .name_generator(Box::new(MappedGenerator::new(
- WordGenerator,
- merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
- )))
- .columns(input.columns)
- .engine("metric")
- .if_not_exists(if_not_exists)
- .with_clause([("physical_metric_table".to_string(), "".to_string())])
- .build()
- .unwrap();
- create_table_generator.generate(&mut rng)
- } else {
- let create_table_generator = CreateTableExprGeneratorBuilder::default()
- .name_generator(Box::new(MappedGenerator::new(
- WordGenerator,
- merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
- )))
- .columns(input.columns)
- .engine("mito")
- .if_not_exists(if_not_exists)
- .build()
- .unwrap();
- create_table_generator.generate(&mut rng)
- }
+
+ let create_table_generator = CreateTableExprGeneratorBuilder::default()
+ .name_generator(Box::new(MappedGenerator::new(
+ WordGenerator,
+ merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
+ )))
+ .columns(input.columns)
+ .engine("mito")
+ .if_not_exists(if_not_exists)
+ .build()
+ .unwrap();
+ create_table_generator.generate(&mut rng)
}
async fn execute_create_table(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
diff --git a/tests/cases/standalone/common/create/create_metric_table.result b/tests/cases/standalone/common/create/create_metric_table.result
index f844c5cbd5c2..37a59598ef91 100644
--- a/tests/cases/standalone/common/create/create_metric_table.result
+++ b/tests/cases/standalone/common/create/create_metric_table.result
@@ -20,6 +20,11 @@ DESC TABLE phy;
| val | Float64 | | YES | | FIELD |
+--------+----------------------+-----+------+---------+---------------+
+-- create table with duplicate column def
+CREATE TABLE t1(ts timestamp time index, val double, host text, host string) engine=metric with ("on_physical_table" = "phy");
+
+Error: 1004(InvalidArguments), Invalid SQL, error: column name `host` is duplicated at index 2 and 3
+
CREATE TABLE t1 (ts timestamp time index, val double, host string primary key) engine = metric with ("on_physical_table" = "phy");
Affected Rows: 0
@@ -28,6 +33,21 @@ CREATE TABLE t2 (ts timestamp time index, job string primary key, val double) en
Affected Rows: 0
+-- create logical table with different data type on field column
+CREATE TABLE t3 (ts timestamp time index, val string, host string, primary key (host)) engine=metric with ("on_physical_table" = "phy");
+
+Error: 1004(InvalidArguments), Column type mismatch. Expect Float64(Float64Type), got String(StringType)
+
+-- create logical table with different data type on tag column
+CREATE TABLE t4 (ts timestamp time index, val double, host double, primary key (host)) engine=metric with ("on_physical_table" = "phy");
+
+Error: 1004(InvalidArguments), Column type mismatch. Expect String(StringType), got Float64(Float64Type)
+
+-- create logical table with different column name on field column
+CREATE TABLE t5 (ts timestamp time index, valval double, host string primary key) engine = metric with ("on_physical_table" = "phy");
+
+Error: 1004(InvalidArguments), Adding field column valval to physical table
+
SELECT table_catalog, table_schema, table_name, table_type, engine FROM information_schema.tables WHERE engine = 'metric' order by table_name;
+---------------+--------------+------------+------------+--------+
@@ -126,18 +146,10 @@ Affected Rows: 0
-- fuzz test case https://github.com/GreptimeTeam/greptimedb/issues/3612
CREATE TABLE `auT`(
incidunt TIMESTAMP(3) TIME INDEX,
- `QuaErAT` BOOLEAN,
- `REPREHenDERIt` BOOLEAN DEFAULT true,
- `Et` INT NULL,
- `AutEM` INT,
- esse DOUBLE,
- `Tempore` BOOLEAN,
- `reruM` BOOLEAN,
- `eRrOR` BOOLEAN NULL,
- `cOMmodi` BOOLEAN,
- `PERfERENdIS` DOUBLE,
- `eSt` FLOAT DEFAULT 0.70978713,
- PRIMARY KEY(`cOMmodi`, `PERfERENdIS`, esse)
+ `REPREHenDERIt` double DEFAULT 0.70978713,
+ `cOMmodi` STRING,
+ `PERfERENdIS` STRING,
+ PRIMARY KEY(`cOMmodi`, `PERfERENdIS`)
) ENGINE = metric with ("physical_metric_table" = "");
Affected Rows: 0
@@ -148,17 +160,9 @@ DESC TABLE `auT`;
| Column | Type | Key | Null | Default | Semantic Type |
+---------------+----------------------+-----+------+------------+---------------+
| incidunt | TimestampMillisecond | PRI | NO | | TIMESTAMP |
-| QuaErAT | Boolean | | YES | | FIELD |
-| REPREHenDERIt | Boolean | | YES | true | FIELD |
-| Et | Int32 | | YES | | FIELD |
-| AutEM | Int32 | | YES | | FIELD |
-| esse | Float64 | PRI | YES | | TAG |
-| Tempore | Boolean | | YES | | FIELD |
-| reruM | Boolean | | YES | | FIELD |
-| eRrOR | Boolean | | YES | | FIELD |
-| cOMmodi | Boolean | PRI | YES | | TAG |
-| PERfERENdIS | Float64 | PRI | YES | | TAG |
-| eSt | Float32 | | YES | 0.70978713 | FIELD |
+| REPREHenDERIt | Float64 | | YES | 0.70978713 | FIELD |
+| cOMmodi | String | PRI | YES | | TAG |
+| PERfERENdIS | String | PRI | YES | | TAG |
+---------------+----------------------+-----+------+------------+---------------+
DROP TABLE `auT`;
diff --git a/tests/cases/standalone/common/create/create_metric_table.sql b/tests/cases/standalone/common/create/create_metric_table.sql
index af1acdac0551..a444986e9ee4 100644
--- a/tests/cases/standalone/common/create/create_metric_table.sql
+++ b/tests/cases/standalone/common/create/create_metric_table.sql
@@ -4,10 +4,22 @@ SHOW TABLES;
DESC TABLE phy;
+-- create table with duplicate column def
+CREATE TABLE t1(ts timestamp time index, val double, host text, host string) engine=metric with ("on_physical_table" = "phy");
+
CREATE TABLE t1 (ts timestamp time index, val double, host string primary key) engine = metric with ("on_physical_table" = "phy");
CREATE TABLE t2 (ts timestamp time index, job string primary key, val double) engine = metric with ("on_physical_table" = "phy");
+-- create logical table with different data type on field column
+CREATE TABLE t3 (ts timestamp time index, val string, host string, primary key (host)) engine=metric with ("on_physical_table" = "phy");
+
+-- create logical table with different data type on tag column
+CREATE TABLE t4 (ts timestamp time index, val double, host double, primary key (host)) engine=metric with ("on_physical_table" = "phy");
+
+-- create logical table with different column name on field column
+CREATE TABLE t5 (ts timestamp time index, valval double, host string primary key) engine = metric with ("on_physical_table" = "phy");
+
SELECT table_catalog, table_schema, table_name, table_type, engine FROM information_schema.tables WHERE engine = 'metric' order by table_name;
DESC TABLE phy;
@@ -38,18 +50,10 @@ DROP TABLE phy2;
-- fuzz test case https://github.com/GreptimeTeam/greptimedb/issues/3612
CREATE TABLE `auT`(
incidunt TIMESTAMP(3) TIME INDEX,
- `QuaErAT` BOOLEAN,
- `REPREHenDERIt` BOOLEAN DEFAULT true,
- `Et` INT NULL,
- `AutEM` INT,
- esse DOUBLE,
- `Tempore` BOOLEAN,
- `reruM` BOOLEAN,
- `eRrOR` BOOLEAN NULL,
- `cOMmodi` BOOLEAN,
- `PERfERENdIS` DOUBLE,
- `eSt` FLOAT DEFAULT 0.70978713,
- PRIMARY KEY(`cOMmodi`, `PERfERENdIS`, esse)
+ `REPREHenDERIt` double DEFAULT 0.70978713,
+ `cOMmodi` STRING,
+ `PERfERENdIS` STRING,
+ PRIMARY KEY(`cOMmodi`, `PERfERENdIS`)
) ENGINE = metric with ("physical_metric_table" = "");
DESC TABLE `auT`;
|
feat
|
add validate method to CreateExpr (#3772)
|
067c5ee7ced92b3a2f3ecae53303cc0c8dec450c
|
2023-05-22 16:00:23
|
Ning Sun
|
feat: time_zone variable for mysql connections (#1607)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 32f82ce086e9..0c91587cc0c4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1874,6 +1874,7 @@ name = "common-time"
version = "0.2.0"
dependencies = [
"chrono",
+ "chrono-tz 0.8.2",
"common-error",
"rand",
"serde",
@@ -7995,6 +7996,7 @@ dependencies = [
"arc-swap",
"common-catalog",
"common-telemetry",
+ "common-time",
]
[[package]]
diff --git a/src/common/time/Cargo.toml b/src/common/time/Cargo.toml
index 49d778a858ba..c8c93bec1de8 100644
--- a/src/common/time/Cargo.toml
+++ b/src/common/time/Cargo.toml
@@ -6,6 +6,7 @@ license.workspace = true
[dependencies]
chrono.workspace = true
+chrono-tz = "0.8"
common-error = { path = "../error" }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
diff --git a/src/common/time/src/error.rs b/src/common/time/src/error.rs
index c39b9dd61d40..fd0a2269f99a 100644
--- a/src/common/time/src/error.rs
+++ b/src/common/time/src/error.rs
@@ -13,7 +13,7 @@
// limitations under the License.
use std::any::Any;
-use std::num::TryFromIntError;
+use std::num::{ParseIntError, TryFromIntError};
use chrono::ParseError;
use common_error::ext::ErrorExt;
@@ -40,14 +40,33 @@ pub enum Error {
#[snafu(display("Timestamp arithmetic overflow, msg: {}", msg))]
ArithmeticOverflow { msg: String, location: Location },
+
+ #[snafu(display("Invalid time zone offset: {hours}:{minutes}"))]
+ InvalidTimeZoneOffset {
+ hours: i32,
+ minutes: u32,
+ location: Location,
+ },
+
+ #[snafu(display("Invalid offset string {raw}: {source}"))]
+ ParseOffsetStr {
+ raw: String,
+ source: ParseIntError,
+ location: Location,
+ },
+
+ #[snafu(display("Invalid time zone string {raw}"))]
+ ParseTimeZoneName { raw: String, location: Location },
}
impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
- Error::ParseDateStr { .. } | Error::ParseTimestamp { .. } => {
- StatusCode::InvalidArguments
- }
+ Error::ParseDateStr { .. }
+ | Error::ParseTimestamp { .. }
+ | Error::InvalidTimeZoneOffset { .. }
+ | Error::ParseOffsetStr { .. }
+ | Error::ParseTimeZoneName { .. } => StatusCode::InvalidArguments,
Error::TimestampOverflow { .. } => StatusCode::Internal,
Error::InvalidDateStr { .. } | Error::ArithmeticOverflow { .. } => {
StatusCode::InvalidArguments
@@ -64,7 +83,10 @@ impl ErrorExt for Error {
Error::ParseTimestamp { location, .. }
| Error::TimestampOverflow { location, .. }
| Error::ArithmeticOverflow { location, .. } => Some(*location),
- Error::ParseDateStr { .. } => None,
+ Error::ParseDateStr { .. }
+ | Error::InvalidTimeZoneOffset { .. }
+ | Error::ParseOffsetStr { .. }
+ | Error::ParseTimeZoneName { .. } => None,
Error::InvalidDateStr { location, .. } => Some(*location),
}
}
diff --git a/src/common/time/src/lib.rs b/src/common/time/src/lib.rs
index fdc9033bed98..76558e761074 100644
--- a/src/common/time/src/lib.rs
+++ b/src/common/time/src/lib.rs
@@ -18,6 +18,7 @@ pub mod error;
pub mod range;
pub mod timestamp;
pub mod timestamp_millis;
+pub mod timezone;
pub mod util;
pub use date::Date;
@@ -25,3 +26,4 @@ pub use datetime::DateTime;
pub use range::RangeMillis;
pub use timestamp::Timestamp;
pub use timestamp_millis::TimestampMillis;
+pub use timezone::TimeZone;
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index 898da0879002..d1c2aef80229 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -20,12 +20,13 @@ use std::str::FromStr;
use std::time::Duration;
use chrono::offset::Local;
-use chrono::{DateTime, LocalResult, NaiveDateTime, TimeZone, Utc};
+use chrono::{DateTime, LocalResult, NaiveDateTime, TimeZone as ChronoTimeZone, Utc};
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use crate::error;
use crate::error::{ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, TimestampOverflowSnafu};
+use crate::timezone::TimeZone;
use crate::util::div_ceil;
#[derive(Debug, Clone, Default, Copy, Serialize, Deserialize)]
@@ -171,17 +172,33 @@ impl Timestamp {
/// Format timestamp to ISO8601 string. If the timestamp exceeds what chrono timestamp can
/// represent, this function simply print the timestamp unit and value in plain string.
pub fn to_iso8601_string(&self) -> String {
- self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f%z")
+ self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f%z", None)
}
pub fn to_local_string(&self) -> String {
- self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f")
+ self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f", None)
}
- fn as_formatted_string(self, pattern: &str) -> String {
+ /// Format timestamp for given timezone.
+ /// When timezone is None, using local time by default.
+ pub fn to_timezone_aware_string(&self, tz: Option<TimeZone>) -> String {
+ self.as_formatted_string("%Y-%m-%d %H:%M:%S%.f", tz)
+ }
+
+ fn as_formatted_string(self, pattern: &str, timezone: Option<TimeZone>) -> String {
if let Some(v) = self.to_chrono_datetime() {
- let local = Local {};
- format!("{}", local.from_utc_datetime(&v).format(pattern))
+ match timezone {
+ Some(TimeZone::Offset(offset)) => {
+ format!("{}", offset.from_utc_datetime(&v).format(pattern))
+ }
+ Some(TimeZone::Named(tz)) => {
+ format!("{}", tz.from_utc_datetime(&v).format(pattern))
+ }
+ None => {
+ let local = Local {};
+ format!("{}", local.from_utc_datetime(&v).format(pattern))
+ }
+ }
} else {
format!("[Timestamp{}: {}]", self.unit, self.value)
}
@@ -934,4 +951,54 @@ mod tests {
Timestamp::new_millisecond(58).sub(Timestamp::new_millisecond(100))
);
}
+
+ #[test]
+ fn test_to_timezone_aware_string() {
+ std::env::set_var("TZ", "Asia/Shanghai");
+
+ assert_eq!(
+ "1970-01-01 08:00:00.001",
+ Timestamp::new(1, TimeUnit::Millisecond).to_timezone_aware_string(None)
+ );
+ assert_eq!(
+ "1970-01-01 08:00:00.001",
+ Timestamp::new(1, TimeUnit::Millisecond)
+ .to_timezone_aware_string(TimeZone::from_tz_string("SYSTEM").unwrap())
+ );
+ assert_eq!(
+ "1970-01-01 08:00:00.001",
+ Timestamp::new(1, TimeUnit::Millisecond)
+ .to_timezone_aware_string(TimeZone::from_tz_string("+08:00").unwrap())
+ );
+ assert_eq!(
+ "1970-01-01 07:00:00.001",
+ Timestamp::new(1, TimeUnit::Millisecond)
+ .to_timezone_aware_string(TimeZone::from_tz_string("+07:00").unwrap())
+ );
+ assert_eq!(
+ "1969-12-31 23:00:00.001",
+ Timestamp::new(1, TimeUnit::Millisecond)
+ .to_timezone_aware_string(TimeZone::from_tz_string("-01:00").unwrap())
+ );
+ assert_eq!(
+ "1970-01-01 08:00:00.001",
+ Timestamp::new(1, TimeUnit::Millisecond)
+ .to_timezone_aware_string(TimeZone::from_tz_string("Asia/Shanghai").unwrap())
+ );
+ assert_eq!(
+ "1970-01-01 00:00:00.001",
+ Timestamp::new(1, TimeUnit::Millisecond)
+ .to_timezone_aware_string(TimeZone::from_tz_string("UTC").unwrap())
+ );
+ assert_eq!(
+ "1970-01-01 01:00:00.001",
+ Timestamp::new(1, TimeUnit::Millisecond)
+ .to_timezone_aware_string(TimeZone::from_tz_string("Europe/Berlin").unwrap())
+ );
+ assert_eq!(
+ "1970-01-01 03:00:00.001",
+ Timestamp::new(1, TimeUnit::Millisecond)
+ .to_timezone_aware_string(TimeZone::from_tz_string("Europe/Moscow").unwrap())
+ );
+ }
}
diff --git a/src/common/time/src/timezone.rs b/src/common/time/src/timezone.rs
new file mode 100644
index 000000000000..6feba8b251a0
--- /dev/null
+++ b/src/common/time/src/timezone.rs
@@ -0,0 +1,158 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::Display;
+use std::str::FromStr;
+
+use chrono::{FixedOffset, Local};
+use chrono_tz::Tz;
+use snafu::{OptionExt, ResultExt};
+
+use crate::error::{
+ InvalidTimeZoneOffsetSnafu, ParseOffsetStrSnafu, ParseTimeZoneNameSnafu, Result,
+};
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum TimeZone {
+ Offset(FixedOffset),
+ Named(Tz),
+}
+
+impl TimeZone {
+ /// Compute timezone from given offset hours and minutes
+ /// Return `None` if given offset exceeds scope
+ pub fn hours_mins_opt(offset_hours: i32, offset_mins: u32) -> Result<Self> {
+ let offset_secs = if offset_hours > 0 {
+ offset_hours * 3600 + offset_mins as i32 * 60
+ } else {
+ offset_hours * 3600 - offset_mins as i32 * 60
+ };
+
+ FixedOffset::east_opt(offset_secs)
+ .map(Self::Offset)
+ .context(InvalidTimeZoneOffsetSnafu {
+ hours: offset_hours,
+ minutes: offset_mins,
+ })
+ }
+
+ /// Parse timezone offset string and return None if given offset exceeds
+ /// scope.
+ ///
+ /// String examples are available as described in
+ /// https://dev.mysql.com/doc/refman/8.0/en/time-zone-support.html
+ ///
+ /// - `SYSTEM`
+ /// - Offset to UTC: `+08:00` , `-11:30`
+ /// - Named zones: `Asia/Shanghai`, `Europe/Berlin`
+ pub fn from_tz_string(tz_string: &str) -> Result<Option<Self>> {
+ // Use system timezone
+ if tz_string.eq_ignore_ascii_case("SYSTEM") {
+ Ok(None)
+ } else if let Some((hrs, mins)) = tz_string.split_once(':') {
+ let hrs = hrs
+ .parse::<i32>()
+ .context(ParseOffsetStrSnafu { raw: tz_string })?;
+ let mins = mins
+ .parse::<u32>()
+ .context(ParseOffsetStrSnafu { raw: tz_string })?;
+ Self::hours_mins_opt(hrs, mins).map(Some)
+ } else if let Ok(tz) = Tz::from_str(tz_string) {
+ Ok(Some(Self::Named(tz)))
+ } else {
+ ParseTimeZoneNameSnafu { raw: tz_string }.fail()
+ }
+ }
+}
+
+impl Display for TimeZone {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::Named(tz) => write!(f, "{}", tz.name()),
+ Self::Offset(offset) => write!(f, "{}", offset),
+ }
+ }
+}
+
+#[inline]
+pub fn system_time_zone_name() -> String {
+ Local::now().offset().to_string()
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_from_tz_string() {
+ assert_eq!(None, TimeZone::from_tz_string("SYSTEM").unwrap());
+
+ let utc_plus_8 = Some(TimeZone::Offset(FixedOffset::east_opt(3600 * 8).unwrap()));
+ assert_eq!(utc_plus_8, TimeZone::from_tz_string("+8:00").unwrap());
+ assert_eq!(utc_plus_8, TimeZone::from_tz_string("+08:00").unwrap());
+ assert_eq!(utc_plus_8, TimeZone::from_tz_string("08:00").unwrap());
+
+ let utc_minus_8 = Some(TimeZone::Offset(FixedOffset::west_opt(3600 * 8).unwrap()));
+ assert_eq!(utc_minus_8, TimeZone::from_tz_string("-08:00").unwrap());
+ assert_eq!(utc_minus_8, TimeZone::from_tz_string("-8:00").unwrap());
+
+ let utc_minus_8_5 = Some(TimeZone::Offset(
+ FixedOffset::west_opt(3600 * 8 + 60 * 30).unwrap(),
+ ));
+ assert_eq!(utc_minus_8_5, TimeZone::from_tz_string("-8:30").unwrap());
+
+ let utc_plus_max = Some(TimeZone::Offset(FixedOffset::east_opt(3600 * 14).unwrap()));
+ assert_eq!(utc_plus_max, TimeZone::from_tz_string("14:00").unwrap());
+
+ let utc_minus_max = Some(TimeZone::Offset(
+ FixedOffset::west_opt(3600 * 13 + 60 * 59).unwrap(),
+ ));
+ assert_eq!(utc_minus_max, TimeZone::from_tz_string("-13:59").unwrap());
+
+ assert_eq!(
+ Some(TimeZone::Named(Tz::Asia__Shanghai)),
+ TimeZone::from_tz_string("Asia/Shanghai").unwrap()
+ );
+ assert_eq!(
+ Some(TimeZone::Named(Tz::UTC)),
+ TimeZone::from_tz_string("UTC").unwrap()
+ );
+
+ assert!(TimeZone::from_tz_string("WORLD_PEACE").is_err());
+ assert!(TimeZone::from_tz_string("A0:01").is_err());
+ assert!(TimeZone::from_tz_string("20:0A").is_err());
+ assert!(TimeZone::from_tz_string(":::::").is_err());
+ assert!(TimeZone::from_tz_string("Asia/London").is_err());
+ assert!(TimeZone::from_tz_string("Unknown").is_err());
+ }
+
+ #[test]
+ fn test_timezone_to_string() {
+ assert_eq!("UTC", TimeZone::Named(Tz::UTC).to_string());
+ assert_eq!(
+ "+01:00",
+ TimeZone::from_tz_string("01:00")
+ .unwrap()
+ .unwrap()
+ .to_string()
+ );
+ assert_eq!(
+ "Asia/Shanghai",
+ TimeZone::from_tz_string("Asia/Shanghai")
+ .unwrap()
+ .unwrap()
+ .to_string()
+ );
+ }
+}
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index 036d4ae98bd0..b01033c68a35 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -20,6 +20,8 @@ use std::sync::Arc;
use common_query::Output;
use common_recordbatch::RecordBatches;
+use common_time::timezone::system_time_zone_name;
+use common_time::TimeZone;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::StringVector;
@@ -54,6 +56,10 @@ static SELECT_TIME_DIFF_FUNC_PATTERN: Lazy<Regex> =
static SHOW_SQL_MODE_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES LIKE 'sql_mode'(.*))").unwrap());
+// Time zone settings
+static SET_TIME_ZONE_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new(r"(?i)^SET TIME_ZONE\s*=\s*'(\S+)'").unwrap());
+
static OTHER_NOT_SUPPORTED_STMT: Lazy<RegexSet> = Lazy::new(|| {
RegexSet::new([
// Txn.
@@ -124,8 +130,6 @@ static VAR_VALUES: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
("transaction_isolation", "REPEATABLE-READ"),
("session.transaction_isolation", "REPEATABLE-READ"),
("session.transaction_read_only", "0"),
- ("time_zone", "UTC"),
- ("system_time_zone", "UTC"),
("max_allowed_packet", "134217728"),
("interactive_timeout", "31536000"),
("wait_timeout", "31536000"),
@@ -168,7 +172,7 @@ fn show_variables(name: &str, value: &str) -> RecordBatches {
.unwrap()
}
-fn select_variable(query: &str) -> Option<Output> {
+fn select_variable(query: &str, query_context: QueryContextRef) -> Option<Output> {
let mut fields = vec![];
let mut values = vec![];
@@ -191,12 +195,24 @@ fn select_variable(query: &str) -> Option<Output> {
.unwrap_or("")
})
.collect();
+
+ // get value of variables from known sources or fallback to defaults
+ let value = match var_as[0] {
+ "time_zone" => query_context
+ .time_zone()
+ .map(|tz| tz.to_string())
+ .unwrap_or_else(|| "".to_owned()),
+ "system_time_zone" => system_time_zone_name(),
+ _ => VAR_VALUES
+ .get(var_as[0])
+ .map(|v| v.to_string())
+ .unwrap_or_else(|| "0".to_owned()),
+ };
+
+ values.push(Arc::new(StringVector::from(vec![value])) as _);
match var_as.len() {
1 => {
// @@aa
- let value = VAR_VALUES.get(var_as[0]).unwrap_or(&"0");
- values.push(Arc::new(StringVector::from(vec![*value])) as _);
-
// field is '@@aa'
fields.push(ColumnSchema::new(
&format!("@@{}", var_as[0]),
@@ -207,9 +223,6 @@ fn select_variable(query: &str) -> Option<Output> {
2 => {
// @@bb as cc:
// var is 'bb'.
- let value = VAR_VALUES.get(var_as[0]).unwrap_or(&"0");
- values.push(Arc::new(StringVector::from(vec![*value])) as _);
-
// field is 'cc'.
fields.push(ColumnSchema::new(
var_as[1],
@@ -227,12 +240,12 @@ fn select_variable(query: &str) -> Option<Output> {
Some(Output::RecordBatches(batches))
}
-fn check_select_variable(query: &str) -> Option<Output> {
+fn check_select_variable(query: &str, query_context: QueryContextRef) -> Option<Output> {
if vec![&SELECT_VAR_PATTERN, &MYSQL_CONN_JAVA_PATTERN]
.iter()
.any(|r| r.is_match(query))
{
- select_variable(query)
+ select_variable(query, query_context)
} else {
None
}
@@ -251,6 +264,20 @@ fn check_show_variables(query: &str) -> Option<Output> {
recordbatches.map(Output::RecordBatches)
}
+// TODO(sunng87): extract this to use sqlparser for more variables
+fn check_set_variables(query: &str, query_ctx: QueryContextRef) -> Option<Output> {
+ if let Some(captures) = SET_TIME_ZONE_PATTERN.captures(query) {
+ // get the capture
+ let tz = captures.get(1).unwrap();
+ if let Ok(timezone) = TimeZone::from_tz_string(tz.as_str()) {
+ query_ctx.set_time_zone(timezone);
+ return Some(Output::AffectedRows(0));
+ }
+ }
+
+ None
+}
+
// Check for SET or others query, this is the final check of the federated query.
fn check_others(query: &str, query_ctx: QueryContextRef) -> Option<Output> {
if OTHER_NOT_SUPPORTED_STMT.is_match(query.as_bytes()) {
@@ -283,19 +310,12 @@ pub(crate) fn check(query: &str, query_ctx: QueryContextRef) -> Option<Output> {
}
// First to check the query is like "select @@variables".
- let output = check_select_variable(query);
- if output.is_some() {
- return output;
- }
-
- // Then to check "show variables like ...".
- let output = check_show_variables(query);
- if output.is_some() {
- return output;
- }
-
- // Last check.
- check_others(query, query_ctx)
+ check_select_variable(query, query_ctx.clone())
+ // Then to check "show variables like ...".
+ .or_else(|| check_show_variables(query))
+ .or_else(|| check_set_variables(query, query_ctx.clone()))
+ // Last check
+ .or_else(|| check_others(query, query_ctx))
}
#[cfg(test)]
@@ -352,13 +372,15 @@ mod test {
+-----------------+------------------------+";
test(query, expected);
+ // set sysstem timezone
+ std::env::set_var("TZ", "Asia/Shanghai");
// complex variables
let query = "/* mysql-connector-java-8.0.17 (Revision: 16a712ddb3f826a1933ab42b0039f7fb9eebc6ec) */SELECT @@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@collation_server AS collation_server, @@collation_connection AS collation_connection, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_write_timeout AS net_write_timeout, @@performance_schema AS performance_schema, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@transaction_isolation AS transaction_isolation, @@wait_timeout AS wait_timeout;";
let expected = "\
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+
| auto_increment_increment | character_set_client | character_set_connection | character_set_results | character_set_server | collation_server | collation_connection | init_connect | interactive_timeout | license | lower_case_table_names | max_allowed_packet | net_write_timeout | performance_schema | sql_mode | system_time_zone | time_zone | transaction_isolation | wait_timeout; |
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+
-| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31536000 | 0 | 0 | 134217728 | 31536000 | 0 | 0 | UTC | UTC | REPEATABLE-READ | 31536000 |
+| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31536000 | 0 | 0 | 134217728 | 31536000 | 0 | 0 | +08:00 | | REPEATABLE-READ | 31536000 |
+--------------------------+----------------------+--------------------------+-----------------------+----------------------+------------------+----------------------+--------------+---------------------+---------+------------------------+--------------------+-------------------+--------------------+----------+------------------+-----------+-----------------------+---------------+";
test(query, expected);
@@ -395,4 +417,31 @@ mod test {
+----------------------------------+";
test(query, expected);
}
+
+ #[test]
+ fn test_set_time_zone() {
+ let query_context = Arc::new(QueryContext::new());
+ let output = check("set time_zone = 'UTC'", query_context.clone());
+ match output.unwrap() {
+ Output::AffectedRows(rows) => {
+ assert_eq!(rows, 0)
+ }
+ _ => unreachable!(),
+ }
+ assert_eq!("UTC", query_context.time_zone().unwrap().to_string());
+
+ let output = check("select @@time_zone", query_context);
+ match output.unwrap() {
+ Output::RecordBatches(r) => {
+ let expected = "\
++-------------+
+| @@time_zone |
++-------------+
+| UTC |
++-------------+";
+ assert_eq!(r.pretty_print().unwrap(), expected);
+ }
+ _ => unreachable!(),
+ }
+ }
}
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index d49d4fb12fba..1c48ae940152 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -231,7 +231,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
log::debug!("execute replaced query: {}", query);
let outputs = self.do_query(&query).await;
- writer::write_output(w, &query, outputs).await?;
+ writer::write_output(w, &query, self.session.context(), outputs).await?;
Ok(())
}
@@ -263,7 +263,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
]
);
let outputs = self.do_query(query).await;
- writer::write_output(writer, query, outputs).await?;
+ writer::write_output(writer, query, self.session.context(), outputs).await?;
Ok(())
}
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index a1109cc7f32d..6a060635b6da 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -22,6 +22,7 @@ use datatypes::schema::{ColumnSchema, SchemaRef};
use opensrv_mysql::{
Column, ColumnFlags, ColumnType, ErrorKind, OkResponse, QueryResultWriter, RowWriter,
};
+use session::context::QueryContextRef;
use snafu::prelude::*;
use tokio::io::AsyncWrite;
@@ -31,9 +32,10 @@ use crate::error::{self, Error, Result};
pub async fn write_output<'a, W: AsyncWrite + Send + Sync + Unpin>(
w: QueryResultWriter<'a, W>,
query: &str,
+ query_context: QueryContextRef,
outputs: Vec<Result<Output>>,
) -> Result<()> {
- let mut writer = Some(MysqlResultWriter::new(w));
+ let mut writer = Some(MysqlResultWriter::new(w, query_context.clone()));
for output in outputs {
let result_writer = writer.take().context(error::InternalSnafu {
err_msg: "Sending multiple result set is unsupported",
@@ -54,11 +56,18 @@ struct QueryResult {
pub struct MysqlResultWriter<'a, W: AsyncWrite + Unpin> {
writer: QueryResultWriter<'a, W>,
+ query_context: QueryContextRef,
}
impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
- pub fn new(writer: QueryResultWriter<'a, W>) -> MysqlResultWriter<'a, W> {
- MysqlResultWriter::<'a, W> { writer }
+ pub fn new(
+ writer: QueryResultWriter<'a, W>,
+ query_context: QueryContextRef,
+ ) -> MysqlResultWriter<'a, W> {
+ MysqlResultWriter::<'a, W> {
+ writer,
+ query_context,
+ }
}
/// Try to write one result set. If there are more than one result set, return `Some`.
@@ -80,18 +89,23 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
recordbatches,
schema,
};
- Self::write_query_result(query, query_result, self.writer).await?;
+ Self::write_query_result(query, query_result, self.writer, self.query_context)
+ .await?;
}
Output::RecordBatches(recordbatches) => {
let query_result = QueryResult {
schema: recordbatches.schema(),
recordbatches: recordbatches.take(),
};
- Self::write_query_result(query, query_result, self.writer).await?;
+ Self::write_query_result(query, query_result, self.writer, self.query_context)
+ .await?;
}
Output::AffectedRows(rows) => {
let next_writer = Self::write_affected_rows(self.writer, rows).await?;
- return Ok(Some(MysqlResultWriter::new(next_writer)));
+ return Ok(Some(MysqlResultWriter::new(
+ next_writer,
+ self.query_context,
+ )));
}
},
Err(error) => Self::write_query_error(query, error, self.writer).await?,
@@ -122,6 +136,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
query: &str,
query_result: QueryResult,
writer: QueryResultWriter<'a, W>,
+ query_context: QueryContextRef,
) -> Result<()> {
match create_mysql_column_def(&query_result.schema) {
Ok(column_def) => {
@@ -129,7 +144,8 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
// to return a new QueryResultWriter.
let mut row_writer = writer.start(&column_def).await?;
for recordbatch in &query_result.recordbatches {
- Self::write_recordbatch(&mut row_writer, recordbatch).await?;
+ Self::write_recordbatch(&mut row_writer, recordbatch, query_context.clone())
+ .await?;
}
row_writer.finish().await?;
Ok(())
@@ -141,6 +157,7 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
async fn write_recordbatch(
row_writer: &mut RowWriter<'_, W>,
recordbatch: &RecordBatch,
+ query_context: QueryContextRef,
) -> Result<()> {
for row in recordbatch.rows() {
for value in row.into_iter() {
@@ -161,7 +178,8 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
Value::Binary(v) => row_writer.write_col(v.deref())?,
Value::Date(v) => row_writer.write_col(v.val())?,
Value::DateTime(v) => row_writer.write_col(v.val())?,
- Value::Timestamp(v) => row_writer.write_col(v.to_local_string())?,
+ Value::Timestamp(v) => row_writer
+ .write_col(v.to_timezone_aware_string(query_context.time_zone()))?,
Value::List(_) => {
return Err(Error::Internal {
err_msg: format!(
diff --git a/src/session/Cargo.toml b/src/session/Cargo.toml
index f6dff95e464a..06224ac8ef76 100644
--- a/src/session/Cargo.toml
+++ b/src/session/Cargo.toml
@@ -8,3 +8,4 @@ license.workspace = true
arc-swap = "1.5"
common-catalog = { path = "../common/catalog" }
common-telemetry = { path = "../common/telemetry" }
+common-time = { path = "../common/time" }
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index fbfad7991739..5adbe84ae9e1 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -20,6 +20,7 @@ use arc_swap::ArcSwap;
use common_catalog::build_db_string;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_telemetry::debug;
+use common_time::TimeZone;
pub type QueryContextRef = Arc<QueryContext>;
pub type ConnInfoRef = Arc<ConnInfo>;
@@ -28,6 +29,7 @@ pub type ConnInfoRef = Arc<ConnInfo>;
pub struct QueryContext {
current_catalog: ArcSwap<String>,
current_schema: ArcSwap<String>,
+ time_zone: ArcSwap<Option<TimeZone>>,
}
impl Default for QueryContext {
@@ -56,6 +58,7 @@ impl QueryContext {
Self {
current_catalog: ArcSwap::new(Arc::new(DEFAULT_CATALOG_NAME.to_string())),
current_schema: ArcSwap::new(Arc::new(DEFAULT_SCHEMA_NAME.to_string())),
+ time_zone: ArcSwap::new(Arc::new(None)),
}
}
@@ -63,6 +66,7 @@ impl QueryContext {
Self {
current_catalog: ArcSwap::new(Arc::new(catalog.to_string())),
current_schema: ArcSwap::new(Arc::new(schema.to_string())),
+ time_zone: ArcSwap::new(Arc::new(None)),
}
}
@@ -99,6 +103,16 @@ impl QueryContext {
let schema = self.current_schema();
build_db_string(&catalog, &schema)
}
+
+ #[inline]
+ pub fn time_zone(&self) -> Option<TimeZone> {
+ self.time_zone.load().as_ref().clone()
+ }
+
+ #[inline]
+ pub fn set_time_zone(&self, tz: Option<TimeZone>) {
+ self.time_zone.swap(Arc::new(tz));
+ }
}
pub const DEFAULT_USERNAME: &str = "greptime";
|
feat
|
time_zone variable for mysql connections (#1607)
|
c0f080df26572426539b890055cb7a77b41e6776
|
2023-09-25 14:14:49
|
LFC
|
fix: print root cause error message to user facing interface (#2486)
| false
|
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index a86c3133e0e5..e599c724dcbe 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -52,7 +52,7 @@ use futures::FutureExt;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::Value;
-use snafu::{ensure, ResultExt};
+use snafu::{ensure, ErrorCompat, ResultExt};
use tokio::sync::oneshot::{self, Sender};
use tokio::sync::Mutex;
use tower::timeout::TimeoutLayer;
@@ -315,7 +315,7 @@ impl JsonResponse {
},
Err(e) => {
return Self::with_error(
- format!("Query engine output error: {e}"),
+ e.iter_chain().last().unwrap().to_string(),
e.status_code(),
);
}
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index d5cf69703f6d..bfbb689aa869 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -26,6 +26,7 @@ use opensrv_mysql::{
};
use session::context::QueryContextRef;
use snafu::prelude::*;
+use snafu::ErrorCompat;
use tokio::io::AsyncWrite;
use crate::error::{self, Error, OtherSnafu, Result};
@@ -211,7 +212,8 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
);
let kind = ErrorKind::ER_INTERNAL_ERROR;
- w.error(kind, error.to_string().as_bytes()).await?;
+ let error = error.iter_chain().last().unwrap().to_string();
+ w.error(kind, error.as_bytes()).await?;
Ok(())
}
}
diff --git a/src/servers/src/postgres/handler.rs b/src/servers/src/postgres/handler.rs
index 536ca604173f..f11900133dd9 100644
--- a/src/servers/src/postgres/handler.rs
+++ b/src/servers/src/postgres/handler.rs
@@ -31,6 +31,7 @@ use pgwire::api::{ClientInfo, Type};
use pgwire::error::{ErrorInfo, PgWireError, PgWireResult};
use query::query_engine::DescribeResult;
use session::Session;
+use snafu::ErrorCompat;
use sql::dialect::PostgreSqlDialect;
use sql::parser::ParserContext;
@@ -90,7 +91,7 @@ fn output_to_query_response<'a>(
Err(e) => Ok(Response::Error(Box::new(ErrorInfo::new(
"ERROR".to_string(),
"XX000".to_string(),
- e.to_string(),
+ e.iter_chain().last().unwrap().to_string(),
)))),
}
}
|
fix
|
print root cause error message to user facing interface (#2486)
|
163cea81c26ded94b0780840eabbaae022c3b6d4
|
2024-09-20 13:57:20
|
Weny Xu
|
feat: migrate local WAL regions (#4715)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 2183cf593d51..000d6316ecca 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4396,7 +4396,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=796ce9b003c6689e853825f649e03543c81ede99#796ce9b003c6689e853825f649e03543c81ede99"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=36334744c7020734dcb4a6b8d24d52ae7ed53fe1#36334744c7020734dcb4a6b8d24d52ae7ed53fe1"
dependencies = [
"prost 0.12.6",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index df70b98332f2..f0b93ac65339 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -120,7 +120,7 @@ etcd-client = { version = "0.13" }
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "796ce9b003c6689e853825f649e03543c81ede99" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "36334744c7020734dcb4a6b8d24d52ae7ed53fe1" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
diff --git a/src/common/function/src/table/migrate_region.rs b/src/common/function/src/table/migrate_region.rs
index b46231eb4523..0a487973d358 100644
--- a/src/common/function/src/table/migrate_region.rs
+++ b/src/common/function/src/table/migrate_region.rs
@@ -25,13 +25,13 @@ use session::context::QueryContextRef;
use crate::handlers::ProcedureServiceHandlerRef;
use crate::helper::cast_u64;
-const DEFAULT_REPLAY_TIMEOUT_SECS: u64 = 10;
+const DEFAULT_TIMEOUT_SECS: u64 = 30;
/// A function to migrate a region from source peer to target peer.
/// Returns the submitted procedure id if success. Only available in cluster mode.
///
-/// - `migrate_region(region_id, from_peer, to_peer)`, with default replay WAL timeout(10 seconds).
-/// - `migrate_region(region_id, from_peer, to_peer, timeout(secs))`
+/// - `migrate_region(region_id, from_peer, to_peer)`, with timeout(30 seconds).
+/// - `migrate_region(region_id, from_peer, to_peer, timeout(secs))`.
///
/// The parameters:
/// - `region_id`: the region id
@@ -48,18 +48,13 @@ pub(crate) async fn migrate_region(
_ctx: &QueryContextRef,
params: &[ValueRef<'_>],
) -> Result<Value> {
- let (region_id, from_peer, to_peer, replay_timeout) = match params.len() {
+ let (region_id, from_peer, to_peer, timeout) = match params.len() {
3 => {
let region_id = cast_u64(¶ms[0])?;
let from_peer = cast_u64(¶ms[1])?;
let to_peer = cast_u64(¶ms[2])?;
- (
- region_id,
- from_peer,
- to_peer,
- Some(DEFAULT_REPLAY_TIMEOUT_SECS),
- )
+ (region_id, from_peer, to_peer, Some(DEFAULT_TIMEOUT_SECS))
}
4 => {
@@ -82,14 +77,14 @@ pub(crate) async fn migrate_region(
}
};
- match (region_id, from_peer, to_peer, replay_timeout) {
- (Some(region_id), Some(from_peer), Some(to_peer), Some(replay_timeout)) => {
+ match (region_id, from_peer, to_peer, timeout) {
+ (Some(region_id), Some(from_peer), Some(to_peer), Some(timeout)) => {
let pid = procedure_service_handler
.migrate_region(MigrateRegionRequest {
region_id,
from_peer,
to_peer,
- replay_timeout: Duration::from_secs(replay_timeout),
+ timeout: Duration::from_secs(timeout),
})
.await?;
diff --git a/src/common/meta/src/instruction.rs b/src/common/meta/src/instruction.rs
index d620cc3449ed..61e2811e72b2 100644
--- a/src/common/meta/src/instruction.rs
+++ b/src/common/meta/src/instruction.rs
@@ -132,11 +132,20 @@ impl OpenRegion {
pub struct DowngradeRegion {
/// The [RegionId].
pub region_id: RegionId,
+ /// The timeout of waiting for flush the region.
+ ///
+ /// `None` stands for don't flush before downgrading the region.
+ #[serde(default)]
+ pub flush_timeout: Option<Duration>,
}
impl Display for DowngradeRegion {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
- write!(f, "DowngradeRegion(region_id={})", self.region_id)
+ write!(
+ f,
+ "DowngradeRegion(region_id={}, flush_timeout={:?})",
+ self.region_id, self.flush_timeout,
+ )
}
}
@@ -152,7 +161,7 @@ pub struct UpgradeRegion {
/// `None` stands for no wait,
/// it's helpful to verify whether the leader region is ready.
#[serde(with = "humantime_serde")]
- pub wait_for_replay_timeout: Option<Duration>,
+ pub replay_timeout: Option<Duration>,
/// The hint for replaying memtable.
#[serde(default)]
pub location_id: Option<u64>,
diff --git a/src/common/meta/src/rpc/procedure.rs b/src/common/meta/src/rpc/procedure.rs
index 2c2a69b5b603..2e25a4aa5d46 100644
--- a/src/common/meta/src/rpc/procedure.rs
+++ b/src/common/meta/src/rpc/procedure.rs
@@ -31,7 +31,7 @@ pub struct MigrateRegionRequest {
pub region_id: u64,
pub from_peer: u64,
pub to_peer: u64,
- pub replay_timeout: Duration,
+ pub timeout: Duration,
}
/// Cast the protobuf [`ProcedureId`] to common [`ProcedureId`].
diff --git a/src/datanode/src/heartbeat/handler.rs b/src/datanode/src/heartbeat/handler.rs
index 573b94cb1185..d23615eb13d8 100644
--- a/src/datanode/src/heartbeat/handler.rs
+++ b/src/datanode/src/heartbeat/handler.rs
@@ -37,6 +37,7 @@ use crate::region_server::RegionServer;
pub struct RegionHeartbeatResponseHandler {
region_server: RegionServer,
catchup_tasks: TaskTracker<()>,
+ downgrade_tasks: TaskTracker<()>,
}
/// Handler of the instruction.
@@ -47,12 +48,22 @@ pub type InstructionHandler =
pub struct HandlerContext {
region_server: RegionServer,
catchup_tasks: TaskTracker<()>,
+ downgrade_tasks: TaskTracker<()>,
}
impl HandlerContext {
fn region_ident_to_region_id(region_ident: &RegionIdent) -> RegionId {
RegionId::new(region_ident.table_id, region_ident.region_number)
}
+
+ #[cfg(test)]
+ pub fn new_for_test(region_server: RegionServer) -> Self {
+ Self {
+ region_server,
+ catchup_tasks: TaskTracker::new(),
+ downgrade_tasks: TaskTracker::new(),
+ }
+ }
}
impl RegionHeartbeatResponseHandler {
@@ -61,6 +72,7 @@ impl RegionHeartbeatResponseHandler {
Self {
region_server,
catchup_tasks: TaskTracker::new(),
+ downgrade_tasks: TaskTracker::new(),
}
}
@@ -107,11 +119,13 @@ impl HeartbeatResponseHandler for RegionHeartbeatResponseHandler {
let mailbox = ctx.mailbox.clone();
let region_server = self.region_server.clone();
let catchup_tasks = self.catchup_tasks.clone();
+ let downgrade_tasks = self.downgrade_tasks.clone();
let handler = Self::build_handler(instruction)?;
let _handle = common_runtime::spawn_global(async move {
let reply = handler(HandlerContext {
region_server,
catchup_tasks,
+ downgrade_tasks,
})
.await;
@@ -129,6 +143,7 @@ mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashMap;
use std::sync::Arc;
+ use std::time::Duration;
use common_meta::heartbeat::mailbox::{
HeartbeatMailbox, IncomingMessage, MailboxRef, MessageMeta,
@@ -197,6 +212,7 @@ mod tests {
// Downgrade region
let instruction = Instruction::DowngradeRegion(DowngradeRegion {
region_id: RegionId::new(2048, 1),
+ flush_timeout: Some(Duration::from_secs(1)),
});
assert!(heartbeat_handler
.is_acceptable(&heartbeat_env.create_handler_ctx((meta.clone(), instruction))));
@@ -205,7 +221,7 @@ mod tests {
let instruction = Instruction::UpgradeRegion(UpgradeRegion {
region_id,
last_entry_id: None,
- wait_for_replay_timeout: None,
+ replay_timeout: None,
location_id: None,
});
assert!(
@@ -392,7 +408,10 @@ mod tests {
// Should be ok, if we try to downgrade it twice.
for _ in 0..2 {
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
- let instruction = Instruction::DowngradeRegion(DowngradeRegion { region_id });
+ let instruction = Instruction::DowngradeRegion(DowngradeRegion {
+ region_id,
+ flush_timeout: Some(Duration::from_secs(1)),
+ });
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
@@ -413,6 +432,7 @@ mod tests {
let meta = MessageMeta::new_test(1, "test", "dn-1", "me-0");
let instruction = Instruction::DowngradeRegion(DowngradeRegion {
region_id: RegionId::new(2048, 1),
+ flush_timeout: Some(Duration::from_secs(1)),
});
let mut ctx = heartbeat_env.create_handler_ctx((meta, instruction));
let control = heartbeat_handler.handle(&mut ctx).await.unwrap();
diff --git a/src/datanode/src/heartbeat/handler/downgrade_region.rs b/src/datanode/src/heartbeat/handler/downgrade_region.rs
index 4dccdc26aaa2..ac1179280376 100644
--- a/src/datanode/src/heartbeat/handler/downgrade_region.rs
+++ b/src/datanode/src/heartbeat/handler/downgrade_region.rs
@@ -13,38 +13,399 @@
// limitations under the License.
use common_meta::instruction::{DowngradeRegion, DowngradeRegionReply, InstructionReply};
+use common_telemetry::tracing::info;
+use common_telemetry::warn;
use futures_util::future::BoxFuture;
use store_api::region_engine::SetReadonlyResponse;
+use store_api::region_request::{RegionFlushRequest, RegionRequest};
+use store_api::storage::RegionId;
use crate::heartbeat::handler::HandlerContext;
+use crate::heartbeat::task_tracker::WaitResult;
impl HandlerContext {
+ async fn set_readonly_gracefully(&self, region_id: RegionId) -> InstructionReply {
+ match self.region_server.set_readonly_gracefully(region_id).await {
+ Ok(SetReadonlyResponse::Success { last_entry_id }) => {
+ InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ last_entry_id,
+ exists: true,
+ error: None,
+ })
+ }
+ Ok(SetReadonlyResponse::NotFound) => {
+ InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ last_entry_id: None,
+ exists: false,
+ error: None,
+ })
+ }
+ Err(err) => InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ last_entry_id: None,
+ exists: true,
+ error: Some(format!("{err:?}")),
+ }),
+ }
+ }
+
pub(crate) fn handle_downgrade_region_instruction(
self,
- DowngradeRegion { region_id }: DowngradeRegion,
+ DowngradeRegion {
+ region_id,
+ flush_timeout,
+ }: DowngradeRegion,
) -> BoxFuture<'static, InstructionReply> {
Box::pin(async move {
- match self.region_server.set_readonly_gracefully(region_id).await {
- Ok(SetReadonlyResponse::Success { last_entry_id }) => {
- InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id,
- exists: true,
- error: None,
- })
+ let Some(writable) = self.region_server.is_writable(region_id) else {
+ return InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ last_entry_id: None,
+ exists: false,
+ error: None,
+ });
+ };
+
+ // Ignores flush request
+ if !writable {
+ return self.set_readonly_gracefully(region_id).await;
+ }
+
+ let region_server_moved = self.region_server.clone();
+ if let Some(flush_timeout) = flush_timeout {
+ let register_result = self
+ .downgrade_tasks
+ .try_register(
+ region_id,
+ Box::pin(async move {
+ info!("Flush region: {region_id} before downgrading region");
+ region_server_moved
+ .handle_request(
+ region_id,
+ RegionRequest::Flush(RegionFlushRequest {
+ row_group_size: None,
+ }),
+ )
+ .await?;
+
+ Ok(())
+ }),
+ )
+ .await;
+
+ if register_result.is_busy() {
+ warn!("Another flush task is running for the region: {region_id}");
}
- Ok(SetReadonlyResponse::NotFound) => {
- InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id: None,
- exists: false,
- error: None,
- })
+
+ let mut watcher = register_result.into_watcher();
+ let result = self.catchup_tasks.wait(&mut watcher, flush_timeout).await;
+
+ match result {
+ WaitResult::Timeout => {
+ InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ last_entry_id: None,
+ exists: true,
+ error: Some(format!(
+ "Flush region: {region_id} before downgrading region is timeout"
+ )),
+ })
+ }
+ WaitResult::Finish(Ok(_)) => self.set_readonly_gracefully(region_id).await,
+ WaitResult::Finish(Err(err)) => {
+ InstructionReply::DowngradeRegion(DowngradeRegionReply {
+ last_entry_id: None,
+ exists: true,
+ error: Some(format!("{err:?}")),
+ })
+ }
}
- Err(err) => InstructionReply::DowngradeRegion(DowngradeRegionReply {
- last_entry_id: None,
- exists: true,
- error: Some(format!("{err:?}")),
- }),
+ } else {
+ self.set_readonly_gracefully(region_id).await
}
})
}
}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+ use std::time::Duration;
+
+ use common_meta::instruction::{DowngradeRegion, InstructionReply};
+ use mito2::engine::MITO_ENGINE_NAME;
+ use store_api::region_engine::{RegionRole, SetReadonlyResponse};
+ use store_api::region_request::RegionRequest;
+ use store_api::storage::RegionId;
+ use tokio::time::Instant;
+
+ use crate::error;
+ use crate::heartbeat::handler::HandlerContext;
+ use crate::tests::{mock_region_server, MockRegionEngine};
+
+ #[tokio::test]
+ async fn test_region_not_exist() {
+ let mut mock_region_server = mock_region_server();
+ let (mock_engine, _) = MockRegionEngine::new(MITO_ENGINE_NAME);
+ mock_region_server.register_engine(mock_engine);
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
+ let region_id = RegionId::new(1024, 1);
+ let waits = vec![None, Some(Duration::from_millis(100u64))];
+
+ for flush_timeout in waits {
+ let reply = handler_context
+ .clone()
+ .handle_downgrade_region_instruction(DowngradeRegion {
+ region_id,
+ flush_timeout,
+ })
+ .await;
+ assert_matches!(reply, InstructionReply::DowngradeRegion(_));
+
+ if let InstructionReply::DowngradeRegion(reply) = reply {
+ assert!(!reply.exists);
+ assert!(reply.error.is_none());
+ assert!(reply.last_entry_id.is_none());
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_region_readonly() {
+ let mock_region_server = mock_region_server();
+ let region_id = RegionId::new(1024, 1);
+ let (mock_engine, _) =
+ MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| {
+ region_engine.mock_role = Some(Some(RegionRole::Follower));
+ region_engine.handle_request_mock_fn = Some(Box::new(|_, req| {
+ if let RegionRequest::Flush(_) = req {
+ // Should be unreachable.
+ unreachable!();
+ };
+
+ Ok(0)
+ }));
+ region_engine.handle_set_readonly_gracefully_mock_fn =
+ Some(Box::new(|_| Ok(SetReadonlyResponse::success(Some(1024)))))
+ });
+ mock_region_server.register_test_region(region_id, mock_engine);
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
+
+ let waits = vec![None, Some(Duration::from_millis(100u64))];
+ for flush_timeout in waits {
+ let reply = handler_context
+ .clone()
+ .handle_downgrade_region_instruction(DowngradeRegion {
+ region_id,
+ flush_timeout,
+ })
+ .await;
+ assert_matches!(reply, InstructionReply::DowngradeRegion(_));
+
+ if let InstructionReply::DowngradeRegion(reply) = reply {
+ assert!(reply.exists);
+ assert!(reply.error.is_none());
+ assert_eq!(reply.last_entry_id.unwrap(), 1024);
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_region_flush_timeout() {
+ let mock_region_server = mock_region_server();
+ let region_id = RegionId::new(1024, 1);
+ let (mock_engine, _) =
+ MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| {
+ region_engine.mock_role = Some(Some(RegionRole::Leader));
+ region_engine.handle_request_delay = Some(Duration::from_secs(100));
+ region_engine.handle_set_readonly_gracefully_mock_fn =
+ Some(Box::new(|_| Ok(SetReadonlyResponse::success(Some(1024)))))
+ });
+ mock_region_server.register_test_region(region_id, mock_engine);
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
+
+ let flush_timeout = Duration::from_millis(100);
+ let reply = handler_context
+ .clone()
+ .handle_downgrade_region_instruction(DowngradeRegion {
+ region_id,
+ flush_timeout: Some(flush_timeout),
+ })
+ .await;
+ assert_matches!(reply, InstructionReply::DowngradeRegion(_));
+
+ if let InstructionReply::DowngradeRegion(reply) = reply {
+ assert!(reply.exists);
+ assert!(reply.error.unwrap().contains("timeout"));
+ assert!(reply.last_entry_id.is_none());
+ }
+ }
+
+ #[tokio::test]
+ async fn test_region_flush_timeout_and_retry() {
+ let mock_region_server = mock_region_server();
+ let region_id = RegionId::new(1024, 1);
+ let (mock_engine, _) =
+ MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| {
+ region_engine.mock_role = Some(Some(RegionRole::Leader));
+ region_engine.handle_request_delay = Some(Duration::from_millis(300));
+ region_engine.handle_set_readonly_gracefully_mock_fn =
+ Some(Box::new(|_| Ok(SetReadonlyResponse::success(Some(1024)))))
+ });
+ mock_region_server.register_test_region(region_id, mock_engine);
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
+
+ let waits = vec![
+ Some(Duration::from_millis(100u64)),
+ Some(Duration::from_millis(100u64)),
+ ];
+
+ for flush_timeout in waits {
+ let reply = handler_context
+ .clone()
+ .handle_downgrade_region_instruction(DowngradeRegion {
+ region_id,
+ flush_timeout,
+ })
+ .await;
+ assert_matches!(reply, InstructionReply::DowngradeRegion(_));
+ if let InstructionReply::DowngradeRegion(reply) = reply {
+ assert!(reply.exists);
+ assert!(reply.error.unwrap().contains("timeout"));
+ assert!(reply.last_entry_id.is_none());
+ }
+ }
+ let timer = Instant::now();
+ let reply = handler_context
+ .handle_downgrade_region_instruction(DowngradeRegion {
+ region_id,
+ flush_timeout: Some(Duration::from_millis(500)),
+ })
+ .await;
+ assert_matches!(reply, InstructionReply::DowngradeRegion(_));
+ // Must less than 300 ms.
+ assert!(timer.elapsed().as_millis() < 300);
+
+ if let InstructionReply::DowngradeRegion(reply) = reply {
+ assert!(reply.exists);
+ assert!(reply.error.is_none());
+ assert_eq!(reply.last_entry_id.unwrap(), 1024);
+ }
+ }
+
+ #[tokio::test]
+ async fn test_region_flush_timeout_and_retry_error() {
+ let mock_region_server = mock_region_server();
+ let region_id = RegionId::new(1024, 1);
+ let (mock_engine, _) =
+ MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| {
+ region_engine.mock_role = Some(Some(RegionRole::Leader));
+ region_engine.handle_request_delay = Some(Duration::from_millis(300));
+ region_engine.handle_request_mock_fn = Some(Box::new(|_, _| {
+ error::UnexpectedSnafu {
+ violated: "mock flush failed",
+ }
+ .fail()
+ }));
+ region_engine.handle_set_readonly_gracefully_mock_fn =
+ Some(Box::new(|_| Ok(SetReadonlyResponse::success(Some(1024)))))
+ });
+ mock_region_server.register_test_region(region_id, mock_engine);
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
+
+ let waits = vec![
+ Some(Duration::from_millis(100u64)),
+ Some(Duration::from_millis(100u64)),
+ ];
+
+ for flush_timeout in waits {
+ let reply = handler_context
+ .clone()
+ .handle_downgrade_region_instruction(DowngradeRegion {
+ region_id,
+ flush_timeout,
+ })
+ .await;
+ assert_matches!(reply, InstructionReply::DowngradeRegion(_));
+ if let InstructionReply::DowngradeRegion(reply) = reply {
+ assert!(reply.exists);
+ assert!(reply.error.unwrap().contains("timeout"));
+ assert!(reply.last_entry_id.is_none());
+ }
+ }
+ let timer = Instant::now();
+ let reply = handler_context
+ .handle_downgrade_region_instruction(DowngradeRegion {
+ region_id,
+ flush_timeout: Some(Duration::from_millis(500)),
+ })
+ .await;
+ assert_matches!(reply, InstructionReply::DowngradeRegion(_));
+ // Must less than 300 ms.
+ assert!(timer.elapsed().as_millis() < 300);
+
+ if let InstructionReply::DowngradeRegion(reply) = reply {
+ assert!(reply.exists);
+ assert!(reply.error.unwrap().contains("flush failed"));
+ assert!(reply.last_entry_id.is_none());
+ }
+ }
+
+ #[tokio::test]
+ async fn test_set_region_readonly_not_found() {
+ let mock_region_server = mock_region_server();
+ let region_id = RegionId::new(1024, 1);
+ let (mock_engine, _) =
+ MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| {
+ region_engine.mock_role = Some(Some(RegionRole::Leader));
+ region_engine.handle_set_readonly_gracefully_mock_fn =
+ Some(Box::new(|_| Ok(SetReadonlyResponse::NotFound)));
+ });
+ mock_region_server.register_test_region(region_id, mock_engine);
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
+ let reply = handler_context
+ .clone()
+ .handle_downgrade_region_instruction(DowngradeRegion {
+ region_id,
+ flush_timeout: None,
+ })
+ .await;
+ assert_matches!(reply, InstructionReply::DowngradeRegion(_));
+ if let InstructionReply::DowngradeRegion(reply) = reply {
+ assert!(!reply.exists);
+ assert!(reply.error.is_none());
+ assert!(reply.last_entry_id.is_none());
+ }
+ }
+
+ #[tokio::test]
+ async fn test_set_region_readonly_error() {
+ let mock_region_server = mock_region_server();
+ let region_id = RegionId::new(1024, 1);
+ let (mock_engine, _) =
+ MockRegionEngine::with_custom_apply_fn(MITO_ENGINE_NAME, |region_engine| {
+ region_engine.mock_role = Some(Some(RegionRole::Leader));
+ region_engine.handle_set_readonly_gracefully_mock_fn = Some(Box::new(|_| {
+ error::UnexpectedSnafu {
+ violated: "Failed to set region to readonly",
+ }
+ .fail()
+ }));
+ });
+ mock_region_server.register_test_region(region_id, mock_engine);
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
+ let reply = handler_context
+ .clone()
+ .handle_downgrade_region_instruction(DowngradeRegion {
+ region_id,
+ flush_timeout: None,
+ })
+ .await;
+ assert_matches!(reply, InstructionReply::DowngradeRegion(_));
+ if let InstructionReply::DowngradeRegion(reply) = reply {
+ assert!(reply.exists);
+ assert!(reply
+ .error
+ .unwrap()
+ .contains("Failed to set region to readonly"));
+ assert!(reply.last_entry_id.is_none());
+ }
+ }
+}
diff --git a/src/datanode/src/heartbeat/handler/upgrade_region.rs b/src/datanode/src/heartbeat/handler/upgrade_region.rs
index 614373166315..0d1ef0476c95 100644
--- a/src/datanode/src/heartbeat/handler/upgrade_region.rs
+++ b/src/datanode/src/heartbeat/handler/upgrade_region.rs
@@ -26,7 +26,7 @@ impl HandlerContext {
UpgradeRegion {
region_id,
last_entry_id,
- wait_for_replay_timeout,
+ replay_timeout,
location_id,
}: UpgradeRegion,
) -> BoxFuture<'static, InstructionReply> {
@@ -78,7 +78,7 @@ impl HandlerContext {
}
// Returns immediately
- let Some(wait_for_replay_timeout) = wait_for_replay_timeout else {
+ let Some(replay_timeout) = replay_timeout else {
return InstructionReply::UpgradeRegion(UpgradeRegionReply {
ready: false,
exists: true,
@@ -88,10 +88,7 @@ impl HandlerContext {
// We don't care that it returns a newly registered or running task.
let mut watcher = register_result.into_watcher();
- let result = self
- .catchup_tasks
- .wait(&mut watcher, wait_for_replay_timeout)
- .await;
+ let result = self.catchup_tasks.wait(&mut watcher, replay_timeout).await;
match result {
WaitResult::Timeout => InstructionReply::UpgradeRegion(UpgradeRegionReply {
@@ -129,7 +126,6 @@ mod tests {
use crate::error;
use crate::heartbeat::handler::HandlerContext;
- use crate::heartbeat::task_tracker::TaskTracker;
use crate::tests::{mock_region_server, MockRegionEngine};
#[tokio::test]
@@ -138,21 +134,18 @@ mod tests {
let (mock_engine, _) = MockRegionEngine::new(MITO_ENGINE_NAME);
mock_region_server.register_engine(mock_engine);
- let handler_context = HandlerContext {
- region_server: mock_region_server,
- catchup_tasks: TaskTracker::new(),
- };
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
let region_id = RegionId::new(1024, 1);
let waits = vec![None, Some(Duration::from_millis(100u64))];
- for wait_for_replay_timeout in waits {
+ for replay_timeout in waits {
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
- wait_for_replay_timeout,
+ replay_timeout,
location_id: None,
})
.await;
@@ -180,20 +173,17 @@ mod tests {
});
mock_region_server.register_test_region(region_id, mock_engine);
- let handler_context = HandlerContext {
- region_server: mock_region_server,
- catchup_tasks: TaskTracker::new(),
- };
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
let waits = vec![None, Some(Duration::from_millis(100u64))];
- for wait_for_replay_timeout in waits {
+ for replay_timeout in waits {
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
- wait_for_replay_timeout,
+ replay_timeout,
location_id: None,
})
.await;
@@ -222,20 +212,17 @@ mod tests {
});
mock_region_server.register_test_region(region_id, mock_engine);
- let handler_context = HandlerContext {
- region_server: mock_region_server,
- catchup_tasks: TaskTracker::new(),
- };
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
let waits = vec![None, Some(Duration::from_millis(100u64))];
- for wait_for_replay_timeout in waits {
+ for replay_timeout in waits {
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
- wait_for_replay_timeout,
+ replay_timeout,
location_id: None,
})
.await;
@@ -269,17 +256,14 @@ mod tests {
Some(Duration::from_millis(100u64)),
];
- let handler_context = HandlerContext {
- region_server: mock_region_server,
- catchup_tasks: TaskTracker::new(),
- };
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
- for wait_for_replay_timeout in waits {
+ for replay_timeout in waits {
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
- wait_for_replay_timeout,
+ replay_timeout,
last_entry_id: None,
location_id: None,
})
@@ -298,7 +282,7 @@ mod tests {
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
- wait_for_replay_timeout: Some(Duration::from_millis(500)),
+ replay_timeout: Some(Duration::from_millis(500)),
location_id: None,
})
.await;
@@ -333,17 +317,14 @@ mod tests {
});
mock_region_server.register_test_region(region_id, mock_engine);
- let handler_context = HandlerContext {
- region_server: mock_region_server,
- catchup_tasks: TaskTracker::new(),
- };
+ let handler_context = HandlerContext::new_for_test(mock_region_server);
let reply = handler_context
.clone()
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
- wait_for_replay_timeout: None,
+ replay_timeout: None,
location_id: None,
})
.await;
@@ -361,7 +342,7 @@ mod tests {
.handle_upgrade_region_instruction(UpgradeRegion {
region_id,
last_entry_id: None,
- wait_for_replay_timeout: Some(Duration::from_millis(200)),
+ replay_timeout: Some(Duration::from_millis(200)),
location_id: None,
})
.await;
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index 89be76511dfb..8966dc4932c9 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -103,10 +103,14 @@ pub fn mock_region_server() -> RegionServer {
pub type MockRequestHandler =
Box<dyn Fn(RegionId, RegionRequest) -> Result<AffectedRows, Error> + Send + Sync>;
+pub type MockSetReadonlyGracefullyHandler =
+ Box<dyn Fn(RegionId) -> Result<SetReadonlyResponse, Error> + Send + Sync>;
+
pub struct MockRegionEngine {
sender: Sender<(RegionId, RegionRequest)>,
pub(crate) handle_request_delay: Option<Duration>,
pub(crate) handle_request_mock_fn: Option<MockRequestHandler>,
+ pub(crate) handle_set_readonly_gracefully_mock_fn: Option<MockSetReadonlyGracefullyHandler>,
pub(crate) mock_role: Option<Option<RegionRole>>,
engine: String,
}
@@ -120,6 +124,7 @@ impl MockRegionEngine {
handle_request_delay: None,
sender: tx,
handle_request_mock_fn: None,
+ handle_set_readonly_gracefully_mock_fn: None,
mock_role: None,
engine: engine.to_string(),
}),
@@ -138,6 +143,7 @@ impl MockRegionEngine {
handle_request_delay: None,
sender: tx,
handle_request_mock_fn: Some(mock_fn),
+ handle_set_readonly_gracefully_mock_fn: None,
mock_role: None,
engine: engine.to_string(),
}),
@@ -157,6 +163,7 @@ impl MockRegionEngine {
handle_request_delay: None,
sender: tx,
handle_request_mock_fn: None,
+ handle_set_readonly_gracefully_mock_fn: None,
mock_role: None,
engine: engine.to_string(),
};
@@ -217,9 +224,13 @@ impl RegionEngine for MockRegionEngine {
async fn set_readonly_gracefully(
&self,
- _region_id: RegionId,
+ region_id: RegionId,
) -> Result<SetReadonlyResponse, BoxedError> {
- unimplemented!()
+ if let Some(mock_fn) = &self.handle_set_readonly_gracefully_mock_fn {
+ return mock_fn(region_id).map_err(BoxedError::new);
+ };
+
+ unreachable!()
}
fn role(&self, _region_id: RegionId) -> Option<RegionRole> {
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index 0a9df7293fb9..d5f7c6aaaf8c 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -483,7 +483,7 @@ impl MetaClient {
request.region_id,
request.from_peer,
request.to_peer,
- request.replay_timeout,
+ request.timeout,
)
.await
}
diff --git a/src/meta-client/src/client/procedure.rs b/src/meta-client/src/client/procedure.rs
index f45cfb787988..eccfe90dff93 100644
--- a/src/meta-client/src/client/procedure.rs
+++ b/src/meta-client/src/client/procedure.rs
@@ -77,17 +77,17 @@ impl Client {
/// - `region_id`: the migrated region id
/// - `from_peer`: the source datanode id
/// - `to_peer`: the target datanode id
- /// - `replay_timeout`: replay WAL timeout after migration.
+ /// - `timeout`: timeout for downgrading region and upgrading region operations
pub async fn migrate_region(
&self,
region_id: u64,
from_peer: u64,
to_peer: u64,
- replay_timeout: Duration,
+ timeout: Duration,
) -> Result<MigrateRegionResponse> {
let inner = self.inner.read().await;
inner
- .migrate_region(region_id, from_peer, to_peer, replay_timeout)
+ .migrate_region(region_id, from_peer, to_peer, timeout)
.await
}
@@ -216,13 +216,13 @@ impl Inner {
region_id: u64,
from_peer: u64,
to_peer: u64,
- replay_timeout: Duration,
+ timeout: Duration,
) -> Result<MigrateRegionResponse> {
let mut req = MigrateRegionRequest {
region_id,
from_peer,
to_peer,
- replay_timeout_secs: replay_timeout.as_secs() as u32,
+ timeout_secs: timeout.as_secs() as u32,
..Default::default()
};
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index d0a58d688e68..728f326871af 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -32,6 +32,13 @@ use crate::pubsub::Message;
#[snafu(visibility(pub))]
#[stack_trace_debug]
pub enum Error {
+ #[snafu(display("Exceeded deadline, operation: {}", operation))]
+ ExceededDeadline {
+ #[snafu(implicit)]
+ location: Location,
+ operation: String,
+ },
+
#[snafu(display("The target peer is unavailable temporally: {}", peer_id))]
PeerUnavailable {
#[snafu(implicit)]
@@ -783,7 +790,8 @@ impl ErrorExt for Error {
| Error::Join { .. }
| Error::WeightArray { .. }
| Error::NotSetWeightArray { .. }
- | Error::PeerUnavailable { .. } => StatusCode::Internal,
+ | Error::PeerUnavailable { .. }
+ | Error::ExceededDeadline { .. } => StatusCode::Internal,
Error::Unsupported { .. } => StatusCode::Unsupported,
diff --git a/src/meta-srv/src/procedure/region_migration.rs b/src/meta-srv/src/procedure/region_migration.rs
index e56afdbf1177..9b5bdfc0060a 100644
--- a/src/meta-srv/src/procedure/region_migration.rs
+++ b/src/meta-srv/src/procedure/region_migration.rs
@@ -73,13 +73,13 @@ pub struct PersistentContext {
to_peer: Peer,
/// The [RegionId] of migration region.
region_id: RegionId,
- /// The timeout of waiting for a candidate to replay the WAL.
- #[serde(with = "humantime_serde", default = "default_replay_timeout")]
- replay_timeout: Duration,
+ /// The timeout for downgrading leader region and upgrading candidate region operations.
+ #[serde(with = "humantime_serde", default = "default_timeout")]
+ timeout: Duration,
}
-fn default_replay_timeout() -> Duration {
- Duration::from_secs(1)
+fn default_timeout() -> Duration {
+ Duration::from_secs(10)
}
impl PersistentContext {
@@ -123,6 +123,8 @@ pub struct VolatileContext {
leader_region_lease_deadline: Option<Instant>,
/// The last_entry_id of leader region.
leader_region_last_entry_id: Option<u64>,
+ /// Elapsed time of downgrading region and upgrading region.
+ operations_elapsed: Duration,
}
impl VolatileContext {
@@ -211,6 +213,18 @@ pub struct Context {
}
impl Context {
+ /// Returns the next operation's timeout.
+ pub fn next_operation_timeout(&self) -> Option<Duration> {
+ self.persistent_ctx
+ .timeout
+ .checked_sub(self.volatile_ctx.operations_elapsed)
+ }
+
+ /// Updates operations elapsed.
+ pub fn update_operations_elapsed(&mut self, instant: Instant) {
+ self.volatile_ctx.operations_elapsed += instant.elapsed();
+ }
+
/// Returns address of meta server.
pub fn server_addr(&self) -> &str {
&self.server_addr
@@ -441,7 +455,7 @@ impl RegionMigrationProcedure {
region_id: persistent_ctx.region_id,
from_peer: persistent_ctx.from_peer.clone(),
to_peer: persistent_ctx.to_peer.clone(),
- replay_timeout: persistent_ctx.replay_timeout,
+ timeout: persistent_ctx.timeout,
});
let context = context_factory.new_context(persistent_ctx);
@@ -537,7 +551,7 @@ mod tests {
let procedure = RegionMigrationProcedure::new(persistent_context, context, None);
let serialized = procedure.dump().unwrap();
- let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"replay_timeout":"1s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
+ let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"timeout":"10s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
assert_eq!(expected, serialized);
}
diff --git a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
index 17c57629067d..836ca4c53212 100644
--- a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
@@ -16,24 +16,23 @@ use std::any::Any;
use std::time::Duration;
use api::v1::meta::MailboxMessage;
-use common_meta::distributed_time_constants::{MAILBOX_RTT_SECS, REGION_LEASE_SECS};
+use common_meta::distributed_time_constants::REGION_LEASE_SECS;
use common_meta::instruction::{
DowngradeRegion, DowngradeRegionReply, Instruction, InstructionReply,
};
use common_procedure::Status;
-use common_telemetry::{info, warn};
+use common_telemetry::{error, info, warn};
use serde::{Deserialize, Serialize};
-use snafu::ResultExt;
-use tokio::time::sleep;
+use snafu::{OptionExt, ResultExt};
+use tokio::time::{sleep, Instant};
+use super::update_metadata::UpdateMetadata;
use super::upgrade_candidate_region::UpgradeCandidateRegion;
use crate::error::{self, Result};
use crate::handler::HeartbeatMailbox;
use crate::procedure::region_migration::{Context, State};
use crate::service::mailbox::Channel;
-const DOWNGRADE_LEADER_REGION_TIMEOUT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
-
#[derive(Debug, Serialize, Deserialize)]
pub struct DowngradeLeaderRegion {
// The optimistic retry times.
@@ -55,25 +54,32 @@ impl Default for DowngradeLeaderRegion {
#[typetag::serde]
impl State for DowngradeLeaderRegion {
async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
- let replay_timeout = ctx.persistent_ctx.replay_timeout;
// Ensures the `leader_region_lease_deadline` must exist after recovering.
ctx.volatile_ctx
.set_leader_region_lease_deadline(Duration::from_secs(REGION_LEASE_SECS));
- self.downgrade_region_with_retry(ctx).await;
-
- if let Some(deadline) = ctx.volatile_ctx.leader_region_lease_deadline.as_ref() {
- info!(
- "Running into the downgrade leader slow path, sleep until {:?}",
- deadline
- );
- tokio::time::sleep_until(*deadline).await;
+
+ match self.downgrade_region_with_retry(ctx).await {
+ Ok(_) => {
+ // Do nothing
+ }
+ Err(error::Error::ExceededDeadline { .. }) => {
+ // Rollbacks the metadata if procedure is timeout
+ return Ok((Box::new(UpdateMetadata::Rollback), Status::executing(false)));
+ }
+ Err(err) => {
+ error!(err; "Occurs non-retryable error");
+ if let Some(deadline) = ctx.volatile_ctx.leader_region_lease_deadline.as_ref() {
+ info!(
+ "Running into the downgrade leader slow path, sleep until {:?}",
+ deadline
+ );
+ tokio::time::sleep_until(*deadline).await;
+ }
+ }
}
Ok((
- Box::new(UpgradeCandidateRegion {
- replay_timeout,
- ..Default::default()
- }),
+ Box::new(UpgradeCandidateRegion::default()),
Status::executing(false),
))
}
@@ -85,10 +91,17 @@ impl State for DowngradeLeaderRegion {
impl DowngradeLeaderRegion {
/// Builds downgrade region instruction.
- fn build_downgrade_region_instruction(&self, ctx: &Context) -> Instruction {
+ fn build_downgrade_region_instruction(
+ &self,
+ ctx: &Context,
+ flush_timeout: Duration,
+ ) -> Instruction {
let pc = &ctx.persistent_ctx;
let region_id = pc.region_id;
- Instruction::DowngradeRegion(DowngradeRegion { region_id })
+ Instruction::DowngradeRegion(DowngradeRegion {
+ region_id,
+ flush_timeout: Some(flush_timeout),
+ })
}
/// Tries to downgrade a leader region.
@@ -102,32 +115,32 @@ impl DowngradeLeaderRegion {
/// - [PushMessage](error::Error::PushMessage), The receiver is dropped.
/// - [MailboxReceiver](error::Error::MailboxReceiver), The sender is dropped without sending (impossible).
/// - [UnexpectedInstructionReply](error::Error::UnexpectedInstructionReply).
+ /// - [ExceededDeadline](error::Error::ExceededDeadline)
/// - Invalid JSON.
- async fn downgrade_region(
- &self,
- ctx: &mut Context,
- downgrade_instruction: &Instruction,
- ) -> Result<()> {
+ async fn downgrade_region(&self, ctx: &mut Context) -> Result<()> {
let pc = &ctx.persistent_ctx;
let region_id = pc.region_id;
let leader = &pc.from_peer;
+ let operation_timeout =
+ ctx.next_operation_timeout()
+ .context(error::ExceededDeadlineSnafu {
+ operation: "Downgrade region",
+ })?;
+ let downgrade_instruction = self.build_downgrade_region_instruction(ctx, operation_timeout);
let msg = MailboxMessage::json_message(
&format!("Downgrade leader region: {}", region_id),
&format!("Meta@{}", ctx.server_addr()),
&format!("Datanode-{}@{}", leader.id, leader.addr),
common_time::util::current_time_millis(),
- downgrade_instruction,
+ &downgrade_instruction,
)
.with_context(|_| error::SerializeToJsonSnafu {
input: downgrade_instruction.to_string(),
})?;
let ch = Channel::Datanode(leader.id);
- let receiver = ctx
- .mailbox
- .send(&ch, msg, DOWNGRADE_LEADER_REGION_TIMEOUT)
- .await?;
+ let receiver = ctx.mailbox.send(&ch, msg, operation_timeout).await?;
match receiver.await? {
Ok(msg) => {
@@ -191,26 +204,36 @@ impl DowngradeLeaderRegion {
///
/// Slow path:
/// - Waits for the lease of the leader region expired.
- async fn downgrade_region_with_retry(&self, ctx: &mut Context) {
- let instruction = self.build_downgrade_region_instruction(ctx);
-
+ ///
+ /// Abort:
+ /// - ExceededDeadline
+ async fn downgrade_region_with_retry(&self, ctx: &mut Context) -> Result<()> {
let mut retry = 0;
loop {
- if let Err(err) = self.downgrade_region(ctx, &instruction).await {
+ let timer = Instant::now();
+ if let Err(err) = self.downgrade_region(ctx).await {
+ ctx.update_operations_elapsed(timer);
retry += 1;
- if err.is_retryable() && retry < self.optimistic_retry {
- warn!("Failed to downgrade region, error: {err:?}, retry later");
+ // Throws the error immediately if the procedure exceeded the deadline.
+ if matches!(err, error::Error::ExceededDeadline { .. }) {
+ return Err(err);
+ } else if err.is_retryable() && retry < self.optimistic_retry {
+ error!("Failed to downgrade region, error: {err:?}, retry later");
sleep(self.retry_initial_interval).await;
} else {
+ error!("Failed to downgrade region, error: {err:?}");
break;
}
} else {
+ ctx.update_operations_elapsed(timer);
// Resets the deadline.
ctx.volatile_ctx.reset_leader_region_lease_deadline();
break;
}
}
+
+ Ok(())
}
}
@@ -237,7 +260,7 @@ mod tests {
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_millis(1000),
}
}
@@ -248,11 +271,7 @@ mod tests {
let env = TestingEnv::new();
let mut ctx = env.context_factory().new_context(persistent_context);
- let instruction = &state.build_downgrade_region_instruction(&ctx);
- let err = state
- .downgrade_region(&mut ctx, instruction)
- .await
- .unwrap_err();
+ let err = state.downgrade_region(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::PusherNotFound { .. });
assert!(!err.is_retryable());
@@ -276,13 +295,30 @@ mod tests {
drop(rx);
- let instruction = &state.build_downgrade_region_instruction(&ctx);
+ let err = state.downgrade_region(&mut ctx).await.unwrap_err();
+
+ assert_matches!(err, Error::PushMessage { .. });
+ assert!(!err.is_retryable());
+ }
+
+ #[tokio::test]
+ async fn test_procedure_exceeded_deadline() {
+ let state = DowngradeLeaderRegion::default();
+ let persistent_context = new_persistent_context();
+ let env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
+
+ let err = state.downgrade_region(&mut ctx).await.unwrap_err();
+
+ assert_matches!(err, Error::ExceededDeadline { .. });
+ assert!(!err.is_retryable());
+
let err = state
- .downgrade_region(&mut ctx, instruction)
+ .downgrade_region_with_retry(&mut ctx)
.await
.unwrap_err();
-
- assert_matches!(err, Error::PushMessage { .. });
+ assert_matches!(err, Error::ExceededDeadline { .. });
assert!(!err.is_retryable());
}
@@ -306,11 +342,7 @@ mod tests {
// Sends an incorrect reply.
send_mock_reply(mailbox, rx, |id| Ok(new_close_region_reply(id)));
- let instruction = &state.build_downgrade_region_instruction(&ctx);
- let err = state
- .downgrade_region(&mut ctx, instruction)
- .await
- .unwrap_err();
+ let err = state.downgrade_region(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::UnexpectedInstructionReply { .. });
assert!(!err.is_retryable());
@@ -337,11 +369,7 @@ mod tests {
Err(error::MailboxTimeoutSnafu { id }.build())
});
- let instruction = &state.build_downgrade_region_instruction(&ctx);
- let err = state
- .downgrade_region(&mut ctx, instruction)
- .await
- .unwrap_err();
+ let err = state.downgrade_region(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
@@ -373,11 +401,7 @@ mod tests {
))
});
- let instruction = &state.build_downgrade_region_instruction(&ctx);
- let err = state
- .downgrade_region(&mut ctx, instruction)
- .await
- .unwrap_err();
+ let err = state.downgrade_region(&mut ctx).await.unwrap_err();
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
@@ -425,7 +449,7 @@ mod tests {
.unwrap();
});
- state.downgrade_region_with_retry(&mut ctx).await;
+ state.downgrade_region_with_retry(&mut ctx).await.unwrap();
assert_eq!(ctx.volatile_ctx.leader_region_last_entry_id, Some(1));
assert!(ctx.volatile_ctx.leader_region_lease_deadline.is_none());
}
@@ -467,7 +491,7 @@ mod tests {
ctx.volatile_ctx
.set_leader_region_lease_deadline(Duration::from_secs(5));
let expected_deadline = ctx.volatile_ctx.leader_region_lease_deadline.unwrap();
- state.downgrade_region_with_retry(&mut ctx).await;
+ state.downgrade_region_with_retry(&mut ctx).await.unwrap();
assert_eq!(ctx.volatile_ctx.leader_region_last_entry_id, None);
// Should remain no change.
assert_eq!(
@@ -509,4 +533,30 @@ mod tests {
.downcast_ref::<UpgradeCandidateRegion>()
.unwrap();
}
+
+ #[tokio::test]
+ async fn test_downgrade_region_procedure_exceeded_deadline() {
+ let mut state = Box::<UpgradeCandidateRegion>::default();
+ state.retry_initial_interval = Duration::from_millis(100);
+ let persistent_context = new_persistent_context();
+ let to_peer_id = persistent_context.to_peer.id;
+
+ let mut env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let mailbox_ctx = env.mailbox_context();
+ let mailbox = mailbox_ctx.mailbox().clone();
+ ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
+
+ let (tx, rx) = tokio::sync::mpsc::channel(1);
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ send_mock_reply(mailbox, rx, |id| {
+ Ok(new_downgrade_region_reply(id, None, true, None))
+ });
+ let (next, _) = state.next(&mut ctx).await.unwrap();
+ let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
+ assert_matches!(update_metadata, UpdateMetadata::Rollback);
+ }
}
diff --git a/src/meta-srv/src/procedure/region_migration/manager.rs b/src/meta-srv/src/procedure/region_migration/manager.rs
index 73aa4371f467..bb3eff80c0b3 100644
--- a/src/meta-srv/src/procedure/region_migration/manager.rs
+++ b/src/meta-srv/src/procedure/region_migration/manager.rs
@@ -104,7 +104,7 @@ pub struct RegionMigrationProcedureTask {
pub(crate) region_id: RegionId,
pub(crate) from_peer: Peer,
pub(crate) to_peer: Peer,
- pub(crate) replay_timeout: Duration,
+ pub(crate) timeout: Duration,
}
impl RegionMigrationProcedureTask {
@@ -113,14 +113,14 @@ impl RegionMigrationProcedureTask {
region_id: RegionId,
from_peer: Peer,
to_peer: Peer,
- replay_timeout: Duration,
+ timeout: Duration,
) -> Self {
Self {
cluster_id,
region_id,
from_peer,
to_peer,
- replay_timeout,
+ timeout,
}
}
}
@@ -328,7 +328,7 @@ impl RegionMigrationManager {
region_id,
from_peer,
to_peer,
- replay_timeout,
+ timeout,
} = task.clone();
let procedure = RegionMigrationProcedure::new(
PersistentContext {
@@ -338,7 +338,7 @@ impl RegionMigrationManager {
region_id,
from_peer,
to_peer,
- replay_timeout,
+ timeout,
},
self.context_factory.clone(),
Some(guard),
@@ -390,7 +390,7 @@ mod test {
region_id,
from_peer: Peer::empty(2),
to_peer: Peer::empty(1),
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_millis(1000),
};
// Inserts one
manager
@@ -415,7 +415,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(1),
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_millis(1000),
};
let err = manager.submit_procedure(task).await.unwrap_err();
@@ -433,7 +433,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_millis(1000),
};
let err = manager.submit_procedure(task).await.unwrap_err();
@@ -451,7 +451,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_millis(1000),
};
let table_info = new_test_table_info(1024, vec![1]).into();
@@ -479,7 +479,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_millis(1000),
};
let table_info = new_test_table_info(1024, vec![1]).into();
@@ -511,7 +511,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_millis(1000),
};
let table_info = new_test_table_info(1024, vec![1]).into();
@@ -538,7 +538,7 @@ mod test {
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_millis(1000),
};
let err = manager
diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs
index edfb89515fe7..65e33ab3d99e 100644
--- a/src/meta-srv/src/procedure/region_migration/test_util.rs
+++ b/src/meta-srv/src/procedure/region_migration/test_util.rs
@@ -316,7 +316,7 @@ pub fn new_persistent_context(from: u64, to: u64, region_id: RegionId) -> Persis
to_peer: Peer::empty(to),
region_id,
cluster_id: 0,
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_secs(10),
}
}
diff --git a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs
index 88caf5f08d3e..49100e92f36e 100644
--- a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs
@@ -16,13 +16,12 @@ use std::any::Any;
use std::time::Duration;
use api::v1::meta::MailboxMessage;
-use common_meta::distributed_time_constants::MAILBOX_RTT_SECS;
use common_meta::instruction::{Instruction, InstructionReply, UpgradeRegion, UpgradeRegionReply};
use common_procedure::Status;
-use common_telemetry::warn;
+use common_telemetry::error;
use serde::{Deserialize, Serialize};
-use snafu::{ensure, ResultExt};
-use tokio::time::sleep;
+use snafu::{ensure, OptionExt, ResultExt};
+use tokio::time::{sleep, Instant};
use super::update_metadata::UpdateMetadata;
use crate::error::{self, Result};
@@ -36,8 +35,6 @@ pub struct UpgradeCandidateRegion {
pub(crate) optimistic_retry: usize,
// The retry initial interval.
pub(crate) retry_initial_interval: Duration,
- // The replay timeout of a instruction.
- pub(crate) replay_timeout: Duration,
// If it's true it requires the candidate region MUST replay the WAL to the latest entry id.
// Otherwise, it will rollback to the old leader region.
pub(crate) require_ready: bool,
@@ -48,7 +45,6 @@ impl Default for UpgradeCandidateRegion {
Self {
optimistic_retry: 3,
retry_initial_interval: Duration::from_millis(500),
- replay_timeout: Duration::from_millis(1000),
require_ready: true,
}
}
@@ -71,17 +67,12 @@ impl State for UpgradeCandidateRegion {
}
impl UpgradeCandidateRegion {
- const UPGRADE_CANDIDATE_REGION_RTT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
-
- /// Returns the timeout of the upgrade candidate region.
- ///
- /// Equals `replay_timeout` + RTT
- fn send_upgrade_candidate_region_timeout(&self) -> Duration {
- self.replay_timeout + UpgradeCandidateRegion::UPGRADE_CANDIDATE_REGION_RTT
- }
-
/// Builds upgrade region instruction.
- fn build_upgrade_region_instruction(&self, ctx: &Context) -> Instruction {
+ fn build_upgrade_region_instruction(
+ &self,
+ ctx: &Context,
+ replay_timeout: Duration,
+ ) -> Instruction {
let pc = &ctx.persistent_ctx;
let region_id = pc.region_id;
let last_entry_id = ctx.volatile_ctx.leader_region_last_entry_id;
@@ -89,7 +80,7 @@ impl UpgradeCandidateRegion {
Instruction::UpgradeRegion(UpgradeRegion {
region_id,
last_entry_id,
- wait_for_replay_timeout: Some(self.replay_timeout),
+ replay_timeout: Some(replay_timeout),
location_id: Some(ctx.persistent_ctx.from_peer.id),
})
}
@@ -106,28 +97,32 @@ impl UpgradeCandidateRegion {
/// - [PushMessage](error::Error::PushMessage), The receiver is dropped.
/// - [MailboxReceiver](error::Error::MailboxReceiver), The sender is dropped without sending (impossible).
/// - [UnexpectedInstructionReply](error::Error::UnexpectedInstructionReply) (impossible).
+ /// - [ExceededDeadline](error::Error::ExceededDeadline)
/// - Invalid JSON (impossible).
- async fn upgrade_region(&self, ctx: &Context, upgrade_instruction: &Instruction) -> Result<()> {
+ async fn upgrade_region(&self, ctx: &Context) -> Result<()> {
let pc = &ctx.persistent_ctx;
let region_id = pc.region_id;
let candidate = &pc.to_peer;
+ let operation_timeout =
+ ctx.next_operation_timeout()
+ .context(error::ExceededDeadlineSnafu {
+ operation: "Upgrade region",
+ })?;
+ let upgrade_instruction = self.build_upgrade_region_instruction(ctx, operation_timeout);
let msg = MailboxMessage::json_message(
&format!("Upgrade candidate region: {}", region_id),
&format!("Meta@{}", ctx.server_addr()),
&format!("Datanode-{}@{}", candidate.id, candidate.addr),
common_time::util::current_time_millis(),
- upgrade_instruction,
+ &upgrade_instruction,
)
.with_context(|_| error::SerializeToJsonSnafu {
input: upgrade_instruction.to_string(),
})?;
let ch = Channel::Datanode(candidate.id);
- let receiver = ctx
- .mailbox
- .send(&ch, msg, self.send_upgrade_candidate_region_timeout())
- .await?;
+ let receiver = ctx.mailbox.send(&ch, msg, operation_timeout).await?;
match receiver.await? {
Ok(msg) => {
@@ -192,22 +187,27 @@ impl UpgradeCandidateRegion {
/// Upgrades a candidate region.
///
/// Returns true if the candidate region is upgraded successfully.
- async fn upgrade_region_with_retry(&self, ctx: &Context) -> bool {
- let upgrade_instruction = self.build_upgrade_region_instruction(ctx);
-
+ async fn upgrade_region_with_retry(&self, ctx: &mut Context) -> bool {
let mut retry = 0;
let mut upgraded = false;
loop {
- if let Err(err) = self.upgrade_region(ctx, &upgrade_instruction).await {
+ let timer = Instant::now();
+ if let Err(err) = self.upgrade_region(ctx).await {
retry += 1;
- if err.is_retryable() && retry < self.optimistic_retry {
- warn!("Failed to upgrade region, error: {err:?}, retry later");
+ ctx.update_operations_elapsed(timer);
+ if matches!(err, error::Error::ExceededDeadline { .. }) {
+ error!("Failed to upgrade region, exceeded deadline");
+ break;
+ } else if err.is_retryable() && retry < self.optimistic_retry {
+ error!("Failed to upgrade region, error: {err:?}, retry later");
sleep(self.retry_initial_interval).await;
} else {
+ error!("Failed to upgrade region, error: {err:?}");
break;
}
} else {
+ ctx.update_operations_elapsed(timer);
upgraded = true;
break;
}
@@ -239,7 +239,7 @@ mod tests {
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
cluster_id: 0,
- replay_timeout: Duration::from_millis(1000),
+ timeout: Duration::from_millis(1000),
}
}
@@ -250,8 +250,7 @@ mod tests {
let env = TestingEnv::new();
let ctx = env.context_factory().new_context(persistent_context);
- let instruction = &state.build_upgrade_region_instruction(&ctx);
- let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+ let err = state.upgrade_region(&ctx).await.unwrap_err();
assert_matches!(err, Error::PusherNotFound { .. });
assert!(!err.is_retryable());
@@ -275,13 +274,26 @@ mod tests {
drop(rx);
- let instruction = &state.build_upgrade_region_instruction(&ctx);
- let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+ let err = state.upgrade_region(&ctx).await.unwrap_err();
assert_matches!(err, Error::PushMessage { .. });
assert!(!err.is_retryable());
}
+ #[tokio::test]
+ async fn test_procedure_exceeded_deadline() {
+ let state = UpgradeCandidateRegion::default();
+ let persistent_context = new_persistent_context();
+ let env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
+
+ let err = state.upgrade_region(&ctx).await.unwrap_err();
+
+ assert_matches!(err, Error::ExceededDeadline { .. });
+ assert!(!err.is_retryable());
+ }
+
#[tokio::test]
async fn test_unexpected_instruction_reply() {
let state = UpgradeCandidateRegion::default();
@@ -301,8 +313,7 @@ mod tests {
send_mock_reply(mailbox, rx, |id| Ok(new_close_region_reply(id)));
- let instruction = &state.build_upgrade_region_instruction(&ctx);
- let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+ let err = state.upgrade_region(&ctx).await.unwrap_err();
assert_matches!(err, Error::UnexpectedInstructionReply { .. });
assert!(!err.is_retryable());
}
@@ -334,8 +345,7 @@ mod tests {
))
});
- let instruction = &state.build_upgrade_region_instruction(&ctx);
- let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+ let err = state.upgrade_region(&ctx).await.unwrap_err();
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
@@ -363,8 +373,7 @@ mod tests {
Ok(new_upgrade_region_reply(id, true, false, None))
});
- let instruction = &state.build_upgrade_region_instruction(&ctx);
- let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+ let err = state.upgrade_region(&ctx).await.unwrap_err();
assert_matches!(err, Error::Unexpected { .. });
assert!(!err.is_retryable());
@@ -396,8 +405,7 @@ mod tests {
Ok(new_upgrade_region_reply(id, false, true, None))
});
- let instruction = &state.build_upgrade_region_instruction(&ctx);
- let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+ let err = state.upgrade_region(&ctx).await.unwrap_err();
assert_matches!(err, Error::RetryLater { .. });
assert!(err.is_retryable());
@@ -417,8 +425,7 @@ mod tests {
Ok(new_upgrade_region_reply(id, false, true, None))
});
- let instruction = &state.build_upgrade_region_instruction(&ctx);
- state.upgrade_region(&ctx, instruction).await.unwrap();
+ state.upgrade_region(&ctx).await.unwrap();
}
#[tokio::test]
@@ -537,4 +544,31 @@ mod tests {
let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
assert_matches!(update_metadata, UpdateMetadata::Rollback);
}
+
+ #[tokio::test]
+ async fn test_upgrade_region_procedure_exceeded_deadline() {
+ let mut state = Box::<UpgradeCandidateRegion>::default();
+ state.retry_initial_interval = Duration::from_millis(100);
+ let persistent_context = new_persistent_context();
+ let to_peer_id = persistent_context.to_peer.id;
+
+ let mut env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let mailbox_ctx = env.mailbox_context();
+ let mailbox = mailbox_ctx.mailbox().clone();
+ ctx.volatile_ctx.operations_elapsed = ctx.persistent_ctx.timeout + Duration::from_secs(1);
+
+ let (tx, rx) = tokio::sync::mpsc::channel(1);
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ send_mock_reply(mailbox, rx, |id| {
+ Ok(new_upgrade_region_reply(id, false, true, None))
+ });
+
+ let (next, _) = state.next(&mut ctx).await.unwrap();
+ let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
+ assert_matches!(update_metadata, UpdateMetadata::Rollback);
+ }
}
diff --git a/src/meta-srv/src/region/supervisor.rs b/src/meta-srv/src/region/supervisor.rs
index 8c7dff9b3df8..79b305bb8039 100644
--- a/src/meta-srv/src/region/supervisor.rs
+++ b/src/meta-srv/src/region/supervisor.rs
@@ -402,7 +402,7 @@ impl RegionSupervisor {
region_id,
from_peer,
to_peer,
- replay_timeout: Duration::from_secs(60),
+ timeout: Duration::from_secs(60),
};
if let Err(err) = self.region_migration_manager.submit_procedure(task).await {
diff --git a/src/meta-srv/src/service/procedure.rs b/src/meta-srv/src/service/procedure.rs
index d19d0902ae05..e20bb2c4db33 100644
--- a/src/meta-srv/src/service/procedure.rs
+++ b/src/meta-srv/src/service/procedure.rs
@@ -111,8 +111,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
region_id,
from_peer,
to_peer,
- replay_timeout_secs,
- ..
+ timeout_secs,
} = request.into_inner();
let header = header.context(error::MissingRequestHeaderSnafu)?;
@@ -134,7 +133,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
region_id: region_id.into(),
from_peer,
to_peer,
- replay_timeout: Duration::from_secs(replay_timeout_secs.into()),
+ timeout: Duration::from_secs(timeout_secs.into()),
})
.await?
.map(procedure::pid_to_pb_pid);
diff --git a/src/mito2/src/engine/catchup_test.rs b/src/mito2/src/engine/catchup_test.rs
index de72bb6128b0..5f4dd3b15acf 100644
--- a/src/mito2/src/engine/catchup_test.rs
+++ b/src/mito2/src/engine/catchup_test.rs
@@ -19,6 +19,9 @@ use api::v1::Rows;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_recordbatch::RecordBatches;
+use common_wal::options::{KafkaWalOptions, WalOptions, WAL_OPTIONS_KEY};
+use rstest::rstest;
+use rstest_reuse::{self, apply};
use store_api::region_engine::{RegionEngine, SetReadonlyResponse};
use store_api::region_request::{RegionCatchupRequest, RegionOpenRequest, RegionRequest};
use store_api::storage::{RegionId, ScanRequest};
@@ -26,7 +29,8 @@ use store_api::storage::{RegionId, ScanRequest};
use crate::config::MitoConfig;
use crate::error::{self, Error};
use crate::test_util::{
- build_rows, flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv,
+ build_rows, flush_region, kafka_log_store_factory, prepare_test_for_kafka_log_store, put_rows,
+ rows_schema, single_kafka_log_store_factory, CreateRequestBuilder, LogStoreFactory, TestEnv,
};
use crate::wal::EntryId;
@@ -38,15 +42,23 @@ fn get_last_entry_id(resp: SetReadonlyResponse) -> Option<EntryId> {
}
}
-#[tokio::test]
-async fn test_catchup_with_last_entry_id() {
+#[apply(single_kafka_log_store_factory)]
+
+async fn test_catchup_with_last_entry_id(factory: Option<LogStoreFactory>) {
common_telemetry::init_default_ut_logging();
- let mut env = TestEnv::with_prefix("last_entry_id");
+ let Some(factory) = factory else {
+ return;
+ };
+
+ let mut env = TestEnv::with_prefix("last_entry_id").with_log_store_factory(factory.clone());
+ let topic = prepare_test_for_kafka_log_store(&factory).await;
let leader_engine = env.create_engine(MitoConfig::default()).await;
let follower_engine = env.create_follower_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
- let request = CreateRequestBuilder::new().build();
+ let request = CreateRequestBuilder::new()
+ .kafka_topic(topic.clone())
+ .build();
let region_dir = request.region_dir.clone();
let column_schemas = rows_schema(&request);
@@ -55,13 +67,23 @@ async fn test_catchup_with_last_entry_id() {
.await
.unwrap();
+ let mut options = HashMap::new();
+ if let Some(topic) = &topic {
+ options.insert(
+ WAL_OPTIONS_KEY.to_string(),
+ serde_json::to_string(&WalOptions::Kafka(KafkaWalOptions {
+ topic: topic.to_string(),
+ }))
+ .unwrap(),
+ );
+ };
follower_engine
.handle_request(
region_id,
RegionRequest::Open(RegionOpenRequest {
engine: String::new(),
region_dir,
- options: HashMap::default(),
+ options,
skip_wal_replay: false,
}),
)
@@ -135,15 +157,23 @@ async fn test_catchup_with_last_entry_id() {
assert!(resp.is_ok());
}
-#[tokio::test]
-async fn test_catchup_with_incorrect_last_entry_id() {
+#[apply(single_kafka_log_store_factory)]
+async fn test_catchup_with_incorrect_last_entry_id(factory: Option<LogStoreFactory>) {
common_telemetry::init_default_ut_logging();
- let mut env = TestEnv::with_prefix("incorrect_last_entry_id");
+ let Some(factory) = factory else {
+ return;
+ };
+
+ let mut env =
+ TestEnv::with_prefix("incorrect_last_entry_id").with_log_store_factory(factory.clone());
+ let topic = prepare_test_for_kafka_log_store(&factory).await;
let leader_engine = env.create_engine(MitoConfig::default()).await;
let follower_engine = env.create_follower_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
- let request = CreateRequestBuilder::new().build();
+ let request = CreateRequestBuilder::new()
+ .kafka_topic(topic.clone())
+ .build();
let region_dir = request.region_dir.clone();
let column_schemas = rows_schema(&request);
@@ -152,13 +182,23 @@ async fn test_catchup_with_incorrect_last_entry_id() {
.await
.unwrap();
+ let mut options = HashMap::new();
+ if let Some(topic) = &topic {
+ options.insert(
+ WAL_OPTIONS_KEY.to_string(),
+ serde_json::to_string(&WalOptions::Kafka(KafkaWalOptions {
+ topic: topic.to_string(),
+ }))
+ .unwrap(),
+ );
+ };
follower_engine
.handle_request(
region_id,
RegionRequest::Open(RegionOpenRequest {
engine: String::new(),
region_dir,
- options: HashMap::default(),
+ options,
skip_wal_replay: false,
}),
)
@@ -217,14 +257,23 @@ async fn test_catchup_with_incorrect_last_entry_id() {
assert!(resp.is_ok());
}
-#[tokio::test]
-async fn test_catchup_without_last_entry_id() {
- let mut env = TestEnv::with_prefix("without_last_entry_id");
+#[apply(single_kafka_log_store_factory)]
+async fn test_catchup_without_last_entry_id(factory: Option<LogStoreFactory>) {
+ common_telemetry::init_default_ut_logging();
+ let Some(factory) = factory else {
+ return;
+ };
+
+ let mut env =
+ TestEnv::with_prefix("without_last_entry_id").with_log_store_factory(factory.clone());
+ let topic = prepare_test_for_kafka_log_store(&factory).await;
let leader_engine = env.create_engine(MitoConfig::default()).await;
let follower_engine = env.create_follower_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
- let request = CreateRequestBuilder::new().build();
+ let request = CreateRequestBuilder::new()
+ .kafka_topic(topic.clone())
+ .build();
let region_dir = request.region_dir.clone();
let column_schemas = rows_schema(&request);
@@ -233,13 +282,23 @@ async fn test_catchup_without_last_entry_id() {
.await
.unwrap();
+ let mut options = HashMap::new();
+ if let Some(topic) = &topic {
+ options.insert(
+ WAL_OPTIONS_KEY.to_string(),
+ serde_json::to_string(&WalOptions::Kafka(KafkaWalOptions {
+ topic: topic.to_string(),
+ }))
+ .unwrap(),
+ );
+ };
follower_engine
.handle_request(
region_id,
RegionRequest::Open(RegionOpenRequest {
engine: String::new(),
region_dir,
- options: HashMap::default(),
+ options,
skip_wal_replay: false,
}),
)
@@ -299,14 +358,23 @@ async fn test_catchup_without_last_entry_id() {
assert!(region.is_writable());
}
-#[tokio::test]
-async fn test_catchup_with_manifest_update() {
- let mut env = TestEnv::with_prefix("without_manifest_update");
+#[apply(single_kafka_log_store_factory)]
+async fn test_catchup_with_manifest_update(factory: Option<LogStoreFactory>) {
+ common_telemetry::init_default_ut_logging();
+ let Some(factory) = factory else {
+ return;
+ };
+
+ let mut env =
+ TestEnv::with_prefix("without_manifest_update").with_log_store_factory(factory.clone());
+ let topic = prepare_test_for_kafka_log_store(&factory).await;
let leader_engine = env.create_engine(MitoConfig::default()).await;
let follower_engine = env.create_follower_engine(MitoConfig::default()).await;
let region_id = RegionId::new(1, 1);
- let request = CreateRequestBuilder::new().build();
+ let request = CreateRequestBuilder::new()
+ .kafka_topic(topic.clone())
+ .build();
let region_dir = request.region_dir.clone();
let column_schemas = rows_schema(&request);
@@ -315,13 +383,23 @@ async fn test_catchup_with_manifest_update() {
.await
.unwrap();
+ let mut options = HashMap::new();
+ if let Some(topic) = &topic {
+ options.insert(
+ WAL_OPTIONS_KEY.to_string(),
+ serde_json::to_string(&WalOptions::Kafka(KafkaWalOptions {
+ topic: topic.to_string(),
+ }))
+ .unwrap(),
+ );
+ };
follower_engine
.handle_request(
region_id,
RegionRequest::Open(RegionOpenRequest {
engine: String::new(),
region_dir,
- options: HashMap::default(),
+ options,
skip_wal_replay: false,
}),
)
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index da0451bc1aa8..cd449e53fae6 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -112,6 +112,12 @@ pub(crate) fn kafka_log_store_factory() -> Option<LogStoreFactory> {
#[tokio::test]
pub(crate) fn multiple_log_store_factories(#[case] factory: Option<LogStoreFactory>) {}
+#[template]
+#[rstest]
+#[case::with_kafka(kafka_log_store_factory())]
+#[tokio::test]
+pub(crate) fn single_kafka_log_store_factory(#[case] factory: Option<LogStoreFactory>) {}
+
#[derive(Clone)]
pub(crate) struct RaftEngineLogStoreFactory;
diff --git a/src/mito2/src/worker/handle_catchup.rs b/src/mito2/src/worker/handle_catchup.rs
index bee00fae5ef6..505c994d3607 100644
--- a/src/mito2/src/worker/handle_catchup.rs
+++ b/src/mito2/src/worker/handle_catchup.rs
@@ -17,6 +17,7 @@
use std::sync::Arc;
use common_telemetry::info;
+use common_telemetry::tracing::warn;
use snafu::ensure;
use store_api::logstore::LogStore;
use store_api::region_request::{AffectedRows, RegionCatchupRequest};
@@ -72,38 +73,42 @@ impl<S: LogStore> RegionWorkerLoop<S> {
region
};
- let flushed_entry_id = region.version_control.current().last_entry_id;
- info!("Trying to replay memtable for region: {region_id}, flushed entry id: {flushed_entry_id}");
- let timer = Instant::now();
- let wal_entry_reader =
- self.wal
- .wal_entry_reader(®ion.provider, region_id, request.location_id);
- let on_region_opened = self.wal.on_region_opened();
- let last_entry_id = replay_memtable(
- ®ion.provider,
- wal_entry_reader,
- region_id,
- flushed_entry_id,
- ®ion.version_control,
- self.config.allow_stale_entries,
- on_region_opened,
- )
- .await?;
- info!(
- "Elapsed: {:?}, region: {region_id} catchup finished. last entry id: {last_entry_id}, expected: {:?}.",
- timer.elapsed(),
- request.entry_id
- );
- if let Some(expected_last_entry_id) = request.entry_id {
- ensure!(
- // The replayed last entry id may be greater than the `expected_last_entry_id`.
- last_entry_id >= expected_last_entry_id,
- error::UnexpectedReplaySnafu {
- region_id,
- expected_last_entry_id,
- replayed_last_entry_id: last_entry_id,
- }
+ if region.provider.is_remote_wal() {
+ let flushed_entry_id = region.version_control.current().last_entry_id;
+ info!("Trying to replay memtable for region: {region_id}, flushed entry id: {flushed_entry_id}");
+ let timer = Instant::now();
+ let wal_entry_reader =
+ self.wal
+ .wal_entry_reader(®ion.provider, region_id, request.location_id);
+ let on_region_opened = self.wal.on_region_opened();
+ let last_entry_id = replay_memtable(
+ ®ion.provider,
+ wal_entry_reader,
+ region_id,
+ flushed_entry_id,
+ ®ion.version_control,
+ self.config.allow_stale_entries,
+ on_region_opened,
)
+ .await?;
+ info!(
+ "Elapsed: {:?}, region: {region_id} catchup finished. last entry id: {last_entry_id}, expected: {:?}.",
+ timer.elapsed(),
+ request.entry_id
+ );
+ if let Some(expected_last_entry_id) = request.entry_id {
+ ensure!(
+ // The replayed last entry id may be greater than the `expected_last_entry_id`.
+ last_entry_id >= expected_last_entry_id,
+ error::UnexpectedReplaySnafu {
+ region_id,
+ expected_last_entry_id,
+ replayed_last_entry_id: last_entry_id,
+ }
+ )
+ }
+ } else {
+ warn!("Skips to replay memtable for region: {}", region.region_id);
}
if request.set_writable {
diff --git a/src/store-api/src/logstore/provider.rs b/src/store-api/src/logstore/provider.rs
index 16f907f3b439..b37b2de90786 100644
--- a/src/store-api/src/logstore/provider.rs
+++ b/src/store-api/src/logstore/provider.rs
@@ -84,6 +84,11 @@ impl Provider {
Provider::Kafka(Arc::new(KafkaProvider { topic }))
}
+ /// Returns true if it's remote WAL.
+ pub fn is_remote_wal(&self) -> bool {
+ matches!(self, Provider::Kafka(_))
+ }
+
/// Returns the type name.
pub fn type_name(&self) -> &'static str {
match self {
|
feat
|
migrate local WAL regions (#4715)
|
8d61e6fe491204b987f682330526744e05f31dfd
|
2024-08-23 08:35:52
|
Weny Xu
|
chore: bump rskafka to `75535b` (#4608)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c168fa0f387e..24fc59697ef2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -9170,8 +9170,9 @@ dependencies = [
[[package]]
name = "rsasl"
-version = "2.0.2"
-source = "git+https://github.com/wenyxu/rsasl.git?rev=06ebb683d5539c3410de4ce9fa37ff9b97e790a4#06ebb683d5539c3410de4ce9fa37ff9b97e790a4"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "45035615cdd68c71daac89aef75b130d4b2cad29599966e1b4671f8fbb463559"
dependencies = [
"base64 0.22.1",
"core2",
@@ -9188,9 +9189,8 @@ dependencies = [
[[package]]
name = "rskafka"
version = "0.5.0"
-source = "git+https://github.com/WenyXu/rskafka.git?rev=940c6030012c5b746fad819fb72e3325b26e39de#940c6030012c5b746fad819fb72e3325b26e39de"
+source = "git+https://github.com/influxdata/rskafka.git?rev=75535b5ad9bae4a5dbb582c82e44dfd81ec10105#75535b5ad9bae4a5dbb582c82e44dfd81ec10105"
dependencies = [
- "async-trait",
"bytes",
"chrono",
"crc32c",
@@ -9199,7 +9199,6 @@ dependencies = [
"integer-encoding 4.0.0",
"lz4",
"parking_lot 0.12.3",
- "pin-project-lite",
"rand",
"rsasl",
"rustls 0.23.10",
diff --git a/Cargo.toml b/Cargo.toml
index 72763337dcde..b596558713bc 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -152,8 +152,7 @@ reqwest = { version = "0.12", default-features = false, features = [
"stream",
"multipart",
] }
-# SCRAM-SHA-512 requires https://github.com/dequbed/rsasl/pull/48, https://github.com/influxdata/rskafka/pull/247
-rskafka = { git = "https://github.com/WenyXu/rskafka.git", rev = "940c6030012c5b746fad819fb72e3325b26e39de", features = [
+rskafka = { git = "https://github.com/influxdata/rskafka.git", rev = "75535b5ad9bae4a5dbb582c82e44dfd81ec10105", features = [
"transport-tls",
] }
rstest = "0.21"
|
chore
|
bump rskafka to `75535b` (#4608)
|
c14aa176b5b0e583f8f647cab88de47790e92955
|
2025-03-10 16:13:15
|
localhost
|
chore: impl ref and ref_mut for json like (#5679)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 8ababdf21d88..28f1bac8c949 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5898,15 +5898,15 @@ dependencies = [
[[package]]
name = "jsonpath-rust"
-version = "0.7.3"
+version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69a61b87f6a55cc6c28fed5739dd36b9642321ce63e4a5e4a4715d69106f4a10"
+checksum = "0c00ae348f9f8fd2d09f82a98ca381c60df9e0820d8d79fce43e649b4dc3128b"
dependencies = [
"pest",
"pest_derive",
"regex",
"serde_json",
- "thiserror 1.0.64",
+ "thiserror 2.0.12",
]
[[package]]
@@ -8271,7 +8271,7 @@ dependencies = [
"rand",
"ring",
"rust_decimal",
- "thiserror 2.0.6",
+ "thiserror 2.0.12",
"tokio",
"tokio-rustls 0.26.0",
"tokio-util",
@@ -8383,7 +8383,7 @@ dependencies = [
"greptime-proto",
"itertools 0.10.5",
"jsonb",
- "jsonpath-rust 0.7.3",
+ "jsonpath-rust 0.7.5",
"lazy_static",
"moka",
"once_cell",
@@ -11063,7 +11063,7 @@ dependencies = [
"serde_json",
"sha2",
"smallvec",
- "thiserror 2.0.6",
+ "thiserror 2.0.12",
"tokio",
"tokio-stream",
"tracing",
@@ -11148,7 +11148,7 @@ dependencies = [
"smallvec",
"sqlx-core",
"stringprep",
- "thiserror 2.0.6",
+ "thiserror 2.0.12",
"tracing",
"whoami",
]
@@ -11186,7 +11186,7 @@ dependencies = [
"smallvec",
"sqlx-core",
"stringprep",
- "thiserror 2.0.6",
+ "thiserror 2.0.12",
"tracing",
"whoami",
]
@@ -11967,11 +11967,11 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "2.0.6"
+version = "2.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47"
+checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
dependencies = [
- "thiserror-impl 2.0.6",
+ "thiserror-impl 2.0.12",
]
[[package]]
@@ -11987,9 +11987,9 @@ dependencies = [
[[package]]
name = "thiserror-impl"
-version = "2.0.6"
+version = "2.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312"
+checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
dependencies = [
"proc-macro2",
"quote",
diff --git a/src/pipeline/Cargo.toml b/src/pipeline/Cargo.toml
index ceb3a992b2d3..4c2a5e994555 100644
--- a/src/pipeline/Cargo.toml
+++ b/src/pipeline/Cargo.toml
@@ -41,7 +41,7 @@ futures.workspace = true
greptime-proto.workspace = true
itertools.workspace = true
jsonb.workspace = true
-jsonpath-rust = "0.7.3"
+jsonpath-rust = "0.7.5"
lazy_static.workspace = true
moka = { workspace = true, features = ["sync"] }
once_cell.workspace = true
diff --git a/src/pipeline/src/etl/value.rs b/src/pipeline/src/etl/value.rs
index 124d598d9b77..cfe774f8bf29 100644
--- a/src/pipeline/src/etl/value.rs
+++ b/src/pipeline/src/etl/value.rs
@@ -16,10 +16,13 @@ pub mod array;
pub mod map;
pub mod time;
+use std::result::Result as StdResult;
+
pub use array::Array;
use jsonb::{Number as JsonbNumber, Object as JsonbObject, Value as JsonbValue};
+use jsonpath_rust::parser::{parse_json_path, JsonPathIndex};
use jsonpath_rust::path::{JsonLike, Path};
-use jsonpath_rust::{jsp_idx, jsp_obj};
+use jsonpath_rust::{jsp_idx, jsp_obj, JsonPath, JsonPathParserError, JsonPathStr};
pub use map::Map;
use regex::Regex;
use snafu::{OptionExt, ResultExt};
@@ -286,6 +289,52 @@ impl Value {
_ => None,
}
}
+
+ // ref https://github.com/serde-rs/json/blob/master/src/value/mod.rs#L779
+ pub fn pointer(&self, pointer: &str) -> Option<&Value> {
+ if pointer.is_empty() {
+ return Some(self);
+ }
+ if !pointer.starts_with('/') {
+ return None;
+ }
+ pointer
+ .split('/')
+ .skip(1)
+ .map(|x| x.replace("~1", "/").replace("~0", "~"))
+ .try_fold(self, |target, token| match target {
+ Value::Map(map) => map.get(&token),
+ Value::Array(list) => parse_index(&token).and_then(|x| list.get(x)),
+ _ => None,
+ })
+ }
+
+ // ref https://github.com/serde-rs/json/blob/master/src/value/mod.rs#L834
+ pub fn pointer_mut(&mut self, pointer: &str) -> Option<&mut Value> {
+ if pointer.is_empty() {
+ return Some(self);
+ }
+ if !pointer.starts_with('/') {
+ return None;
+ }
+ pointer
+ .split('/')
+ .skip(1)
+ .map(|x| x.replace("~1", "/").replace("~0", "~"))
+ .try_fold(self, |target, token| match target {
+ Value::Map(map) => map.get_mut(&token),
+ Value::Array(list) => parse_index(&token).and_then(move |x| list.get_mut(x)),
+ _ => None,
+ })
+ }
+}
+
+// ref https://github.com/serde-rs/json/blob/master/src/value/mod.rs#L259
+fn parse_index(s: &str) -> Option<usize> {
+ if s.starts_with('+') || (s.starts_with('0') && s.len() != 1) {
+ return None;
+ }
+ s.parse().ok()
}
impl std::fmt::Display for Value {
@@ -814,4 +863,46 @@ impl JsonLike for Value {
fn null() -> Self {
Value::Null
}
+
+ // ref https://github.com/besok/jsonpath-rust/blob/main/src/path/mod.rs#L423
+ fn reference<T>(
+ &self,
+ path: T,
+ ) -> std::result::Result<std::option::Option<&Value>, JsonPathParserError>
+ where
+ T: Into<JsonPathStr>,
+ {
+ Ok(self.pointer(&path_to_json_path(path.into())?))
+ }
+
+ // https://github.com/besok/jsonpath-rust/blob/main/src/path/mod.rs#L430
+ fn reference_mut<T>(
+ &mut self,
+ path: T,
+ ) -> std::result::Result<std::option::Option<&mut Value>, JsonPathParserError>
+ where
+ T: Into<JsonPathStr>,
+ {
+ Ok(self.pointer_mut(&path_to_json_path(path.into())?))
+ }
+}
+
+// ref https://github.com/besok/jsonpath-rust/blob/main/src/path/mod.rs#L438
+fn path_to_json_path(path: JsonPathStr) -> StdResult<String, JsonPathParserError> {
+ convert_part(&parse_json_path(path.as_str())?)
+}
+
+// https://github.com/besok/jsonpath-rust/blob/main/src/path/mod.rs#L442
+fn convert_part(path: &JsonPath) -> StdResult<String, JsonPathParserError> {
+ match path {
+ JsonPath::Chain(elems) => elems
+ .iter()
+ .map(convert_part)
+ .collect::<StdResult<String, JsonPathParserError>>(),
+
+ JsonPath::Index(JsonPathIndex::Single(v)) => Ok(format!("/{}", v)),
+ JsonPath::Field(e) => Ok(format!("/{}", e)),
+ JsonPath::Root => Ok("".to_string()),
+ e => Err(JsonPathParserError::InvalidJsonPath(e.to_string())),
+ }
}
|
chore
|
impl ref and ref_mut for json like (#5679)
|
df6260d52568592675baad342a3b1cada136b46d
|
2024-02-18 18:55:00
|
dependabot[bot]
|
fix: bump libgit2-sys from 0.16.1+1.7.1 to 0.16.2+1.7.2 (#3316)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 4113c6f8c7d4..03e5c25a817d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4698,9 +4698,9 @@ checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
[[package]]
name = "libgit2-sys"
-version = "0.16.1+1.7.1"
+version = "0.16.2+1.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2a2bb3680b094add03bb3732ec520ece34da31a8cd2d633d1389d0f0fb60d0c"
+checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8"
dependencies = [
"cc",
"libc",
|
fix
|
bump libgit2-sys from 0.16.1+1.7.1 to 0.16.2+1.7.2 (#3316)
|
fe8327fc7840adca1a16ea34f47138defa1e9d41
|
2022-09-29 14:38:08
|
fys
|
feat: support write data via influxdb line protocol in frontend (#280)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index b4ef88c40e9b..15786de88dc6 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -918,6 +918,8 @@ dependencies = [
"api",
"arrow2",
"async-trait",
+ "common-base",
+ "common-error",
"datafusion",
"snafu",
]
@@ -2257,6 +2259,17 @@ dependencies = [
"hashbrown",
]
+[[package]]
+name = "influxdb_line_protocol"
+version = "0.1.0"
+source = "git+https://github.com/evenyag/influxdb_iox?branch=feat/line-protocol#10ef0d0b02705ac7518717390939fa3a9bcfcacc"
+dependencies = [
+ "bytes",
+ "nom",
+ "smallvec",
+ "snafu",
+]
+
[[package]]
name = "instant"
version = "0.1.12"
@@ -4693,8 +4706,10 @@ dependencies = [
"axum-test-helper",
"bytes",
"catalog",
+ "client",
"common-base",
"common-error",
+ "common-grpc",
"common-query",
"common-recordbatch",
"common-runtime",
@@ -4704,6 +4719,7 @@ dependencies = [
"futures",
"hex",
"hyper",
+ "influxdb_line_protocol",
"metrics 0.20.1",
"mysql_async",
"num_cpus",
@@ -4715,6 +4731,7 @@ dependencies = [
"serde",
"serde_json",
"snafu",
+ "table",
"test-util",
"tokio",
"tokio-postgres",
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index 57a726c6203b..13c8afc3460f 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -2,6 +2,7 @@ use datatypes::prelude::ConcreteDataType;
use snafu::prelude::*;
use crate::error::{self, Result};
+use crate::v1::column::Values;
use crate::v1::ColumnDataType;
#[derive(Debug, PartialEq, Eq)]
@@ -71,10 +72,140 @@ impl TryFrom<ConcreteDataType> for ColumnDataTypeWrapper {
}
}
+impl Values {
+ pub fn with_capacity(datatype: ColumnDataType, capacity: usize) -> Self {
+ match datatype {
+ ColumnDataType::Boolean => Values {
+ bool_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Int8 => Values {
+ i8_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Int16 => Values {
+ i16_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Int32 => Values {
+ i32_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Int64 => Values {
+ i64_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Uint8 => Values {
+ u8_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Uint16 => Values {
+ u16_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Uint32 => Values {
+ u32_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Uint64 => Values {
+ u64_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Float32 => Values {
+ f32_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Float64 => Values {
+ f64_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Binary => Values {
+ binary_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::String => Values {
+ string_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Date => Values {
+ date_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Datetime => Values {
+ datetime_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ ColumnDataType::Timestamp => Values {
+ ts_millis_values: Vec::with_capacity(capacity),
+ ..Default::default()
+ },
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
+ #[test]
+ fn test_values_with_capacity() {
+ let values = Values::with_capacity(ColumnDataType::Int8, 2);
+ let values = values.i8_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Int32, 2);
+ let values = values.i32_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Int64, 2);
+ let values = values.i64_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Uint8, 2);
+ let values = values.u8_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Uint32, 2);
+ let values = values.u32_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Uint64, 2);
+ let values = values.u64_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Float32, 2);
+ let values = values.f32_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Float64, 2);
+ let values = values.f64_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Binary, 2);
+ let values = values.binary_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Boolean, 2);
+ let values = values.bool_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::String, 2);
+ let values = values.string_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Date, 2);
+ let values = values.date_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Datetime, 2);
+ let values = values.datetime_values;
+ assert_eq!(2, values.capacity());
+
+ let values = Values::with_capacity(ColumnDataType::Timestamp, 2);
+ let values = values.ts_millis_values;
+ assert_eq!(2, values.capacity());
+ }
+
#[test]
fn test_concrete_datatype_from_column_datatype() {
assert_eq!(
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 4da2dbf925c2..33050b45c165 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -1,5 +1,6 @@
use clap::Parser;
use frontend::frontend::{Frontend, FrontendOptions};
+use frontend::influxdb::InfluxdbOptions;
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
@@ -47,6 +48,8 @@ struct StartCommand {
opentsdb_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
+ #[clap(short, long)]
+ influxdb_enable: Option<bool>,
}
impl StartCommand {
@@ -91,6 +94,9 @@ impl TryFrom<StartCommand> for FrontendOptions {
..Default::default()
});
}
+ if let Some(enable) = cmd.influxdb_enable {
+ opts.influxdb_options = Some(InfluxdbOptions { enable });
+ }
Ok(opts)
}
}
@@ -107,6 +113,7 @@ mod tests {
mysql_addr: Some("127.0.0.1:5678".to_string()),
postgres_addr: Some("127.0.0.1:5432".to_string()),
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
+ influxdb_enable: Some(false),
config_file: None,
};
@@ -136,5 +143,7 @@ mod tests {
opts.opentsdb_options.as_ref().unwrap().runtime_size,
default_opts.opentsdb_options.as_ref().unwrap().runtime_size
);
+
+ assert!(!opts.influxdb_options.unwrap().enable);
}
}
diff --git a/src/common/grpc/Cargo.toml b/src/common/grpc/Cargo.toml
index c9d49817f735..72f23fe0f2d7 100644
--- a/src/common/grpc/Cargo.toml
+++ b/src/common/grpc/Cargo.toml
@@ -6,6 +6,8 @@ edition = "2021"
[dependencies]
api = { path = "../../api" }
async-trait = "0.1"
+common-base = { path = "../base" }
+common-error = { path = "../error" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
snafu = { version = "0.7", features = ["backtraces"] }
diff --git a/src/common/grpc/src/error.rs b/src/common/grpc/src/error.rs
index 992167e27eb7..9c31c3dad218 100644
--- a/src/common/grpc/src/error.rs
+++ b/src/common/grpc/src/error.rs
@@ -1,6 +1,11 @@
+use std::any::Any;
+
use api::DecodeError;
+use common_error::prelude::{ErrorExt, StatusCode};
use datafusion::error::DataFusionError;
-use snafu::{Backtrace, Snafu};
+use snafu::{Backtrace, ErrorCompat, Snafu};
+
+pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
@@ -31,4 +36,42 @@ pub enum Error {
source: DecodeError,
backtrace: Backtrace,
},
+
+ #[snafu(display(
+ "Write type mismatch, column name: {}, expected: {}, actual: {}",
+ column_name,
+ expected,
+ actual
+ ))]
+ TypeMismatch {
+ column_name: String,
+ expected: String,
+ actual: String,
+ backtrace: Backtrace,
+ },
+}
+
+impl ErrorExt for Error {
+ fn status_code(&self) -> StatusCode {
+ match self {
+ Error::EmptyPhysicalPlan { .. }
+ | Error::EmptyPhysicalExpr { .. }
+ | Error::MissingField { .. }
+ | Error::TypeMismatch { .. } => StatusCode::InvalidArguments,
+ Error::UnsupportedDfPlan { .. } | Error::UnsupportedDfExpr { .. } => {
+ StatusCode::Unsupported
+ }
+ Error::NewProjection { .. } | Error::DecodePhysicalPlanNode { .. } => {
+ StatusCode::Internal
+ }
+ }
+ }
+
+ fn backtrace_opt(&self) -> Option<&Backtrace> {
+ ErrorCompat::backtrace(self)
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
}
diff --git a/src/common/grpc/src/lib.rs b/src/common/grpc/src/lib.rs
index 202f244b9dd4..4ba2ebd375af 100644
--- a/src/common/grpc/src/lib.rs
+++ b/src/common/grpc/src/lib.rs
@@ -1,5 +1,6 @@
pub mod error;
pub mod physical;
+pub mod writer;
pub use error::Error;
pub use physical::{
diff --git a/src/common/grpc/src/writer.rs b/src/common/grpc/src/writer.rs
new file mode 100644
index 000000000000..2404b74cc1f7
--- /dev/null
+++ b/src/common/grpc/src/writer.rs
@@ -0,0 +1,367 @@
+use std::collections::HashMap;
+
+use api::v1::{
+ codec::InsertBatch,
+ column::{SemanticType, Values},
+ Column, ColumnDataType,
+};
+use common_base::BitVec;
+use snafu::ensure;
+
+use crate::error::{Result, TypeMismatchSnafu};
+
+type ColumnName = String;
+
+#[derive(Default)]
+pub struct LinesWriter {
+ column_name_index: HashMap<ColumnName, usize>,
+ null_masks: Vec<BitVec>,
+ batch: InsertBatch,
+ lines: usize,
+}
+
+impl LinesWriter {
+ pub fn with_lines(lines: usize) -> Self {
+ Self {
+ lines,
+ ..Default::default()
+ }
+ }
+
+ pub fn write_ts(&mut self, column_name: &str, value: (i64, Precision)) -> Result<()> {
+ let (idx, column) = self.mut_column(
+ column_name,
+ ColumnDataType::Timestamp,
+ SemanticType::Timestamp,
+ );
+ ensure!(
+ column.datatype == Some(ColumnDataType::Timestamp.into()),
+ TypeMismatchSnafu {
+ column_name,
+ expected: "timestamp",
+ actual: format!("{:?}", column.datatype)
+ }
+ );
+ // It is safe to use unwrap here, because values has been initialized in mut_column()
+ let values = column.values.as_mut().unwrap();
+ values.ts_millis_values.push(to_ms_ts(value.1, value.0));
+ self.null_masks[idx].push(false);
+ Ok(())
+ }
+
+ pub fn write_tag(&mut self, column_name: &str, value: &str) -> Result<()> {
+ let (idx, column) = self.mut_column(column_name, ColumnDataType::String, SemanticType::Tag);
+ ensure!(
+ column.datatype == Some(ColumnDataType::String.into()),
+ TypeMismatchSnafu {
+ column_name,
+ expected: "string",
+ actual: format!("{:?}", column.datatype)
+ }
+ );
+ // It is safe to use unwrap here, because values has been initialized in mut_column()
+ let values = column.values.as_mut().unwrap();
+ values.string_values.push(value.to_string());
+ self.null_masks[idx].push(false);
+ Ok(())
+ }
+
+ pub fn write_u64(&mut self, column_name: &str, value: u64) -> Result<()> {
+ let (idx, column) =
+ self.mut_column(column_name, ColumnDataType::Uint64, SemanticType::Field);
+ ensure!(
+ column.datatype == Some(ColumnDataType::Uint64.into()),
+ TypeMismatchSnafu {
+ column_name,
+ expected: "u64",
+ actual: format!("{:?}", column.datatype)
+ }
+ );
+ // It is safe to use unwrap here, because values has been initialized in mut_column()
+ let values = column.values.as_mut().unwrap();
+ values.u64_values.push(value);
+ self.null_masks[idx].push(false);
+ Ok(())
+ }
+
+ pub fn write_i64(&mut self, column_name: &str, value: i64) -> Result<()> {
+ let (idx, column) =
+ self.mut_column(column_name, ColumnDataType::Int64, SemanticType::Field);
+ ensure!(
+ column.datatype == Some(ColumnDataType::Int64.into()),
+ TypeMismatchSnafu {
+ column_name,
+ expected: "i64",
+ actual: format!("{:?}", column.datatype)
+ }
+ );
+ // It is safe to use unwrap here, because values has been initialized in mut_column()
+ let values = column.values.as_mut().unwrap();
+ values.i64_values.push(value);
+ self.null_masks[idx].push(false);
+ Ok(())
+ }
+
+ pub fn write_f64(&mut self, column_name: &str, value: f64) -> Result<()> {
+ let (idx, column) =
+ self.mut_column(column_name, ColumnDataType::Float64, SemanticType::Field);
+ ensure!(
+ column.datatype == Some(ColumnDataType::Float64.into()),
+ TypeMismatchSnafu {
+ column_name,
+ expected: "f64",
+ actual: format!("{:?}", column.datatype)
+ }
+ );
+ // It is safe to use unwrap here, because values has been initialized in mut_column()
+ let values = column.values.as_mut().unwrap();
+ values.f64_values.push(value);
+ self.null_masks[idx].push(false);
+ Ok(())
+ }
+
+ pub fn write_string(&mut self, column_name: &str, value: &str) -> Result<()> {
+ let (idx, column) =
+ self.mut_column(column_name, ColumnDataType::String, SemanticType::Field);
+ ensure!(
+ column.datatype == Some(ColumnDataType::String.into()),
+ TypeMismatchSnafu {
+ column_name,
+ expected: "string",
+ actual: format!("{:?}", column.datatype)
+ }
+ );
+ // It is safe to use unwrap here, because values has been initialized in mut_column()
+ let values = column.values.as_mut().unwrap();
+ values.string_values.push(value.to_string());
+ self.null_masks[idx].push(false);
+ Ok(())
+ }
+
+ pub fn write_bool(&mut self, column_name: &str, value: bool) -> Result<()> {
+ let (idx, column) =
+ self.mut_column(column_name, ColumnDataType::Boolean, SemanticType::Field);
+ ensure!(
+ column.datatype == Some(ColumnDataType::Boolean.into()),
+ TypeMismatchSnafu {
+ column_name,
+ expected: "boolean",
+ actual: format!("{:?}", column.datatype)
+ }
+ );
+ // It is safe to use unwrap here, because values has been initialized in mut_column()
+ let values = column.values.as_mut().unwrap();
+ values.bool_values.push(value);
+ self.null_masks[idx].push(false);
+ Ok(())
+ }
+
+ pub fn commit(&mut self) {
+ let batch = &mut self.batch;
+ batch.row_count += 1;
+
+ for i in 0..batch.columns.len() {
+ let null_mask = &mut self.null_masks[i];
+ if batch.row_count as usize > null_mask.len() {
+ null_mask.push(true);
+ }
+ }
+ }
+
+ pub fn finish(mut self) -> InsertBatch {
+ let null_masks = self.null_masks;
+ for (i, null_mask) in null_masks.into_iter().enumerate() {
+ let columns = &mut self.batch.columns;
+ columns[i].null_mask = null_mask.into_vec();
+ }
+ self.batch
+ }
+
+ fn mut_column(
+ &mut self,
+ column_name: &str,
+ datatype: ColumnDataType,
+ semantic_type: SemanticType,
+ ) -> (usize, &mut Column) {
+ let column_names = &mut self.column_name_index;
+ let column_idx = match column_names.get(column_name) {
+ Some(i) => *i,
+ None => {
+ let new_idx = column_names.len();
+ let batch = &mut self.batch;
+ let to_insert = self.lines;
+ let mut null_mask = BitVec::with_capacity(to_insert);
+ null_mask.extend(BitVec::repeat(true, batch.row_count as usize));
+ self.null_masks.push(null_mask);
+ batch.columns.push(Column {
+ column_name: column_name.to_string(),
+ semantic_type: semantic_type.into(),
+ values: Some(Values::with_capacity(datatype, to_insert)),
+ datatype: Some(datatype.into()),
+ null_mask: Vec::default(),
+ });
+ column_names.insert(column_name.to_string(), new_idx);
+ new_idx
+ }
+ };
+ (column_idx, &mut self.batch.columns[column_idx])
+ }
+}
+
+fn to_ms_ts(p: Precision, ts: i64) -> i64 {
+ match p {
+ Precision::NANOSECOND => ts / 1_000_000,
+ Precision::MICROSECOND => ts / 1000,
+ Precision::MILLISECOND => ts,
+ Precision::SECOND => ts * 1000,
+ Precision::MINUTE => ts * 1000 * 60,
+ Precision::HOUR => ts * 1000 * 60 * 60,
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum Precision {
+ NANOSECOND,
+ MICROSECOND,
+ MILLISECOND,
+ SECOND,
+ MINUTE,
+ HOUR,
+}
+
+#[cfg(test)]
+mod tests {
+ use api::v1::{column::SemanticType, ColumnDataType};
+ use common_base::BitVec;
+
+ use super::LinesWriter;
+ use crate::writer::{to_ms_ts, Precision};
+
+ #[test]
+ fn test_lines_writer() {
+ let mut writer = LinesWriter::with_lines(3);
+
+ writer.write_tag("host", "host1").unwrap();
+ writer.write_f64("cpu", 0.5).unwrap();
+ writer.write_f64("memory", 0.4).unwrap();
+ writer.write_string("name", "name1").unwrap();
+ writer
+ .write_ts("ts", (101011000, Precision::MILLISECOND))
+ .unwrap();
+ writer.commit();
+
+ writer.write_tag("host", "host2").unwrap();
+ writer
+ .write_ts("ts", (102011001, Precision::MILLISECOND))
+ .unwrap();
+ writer.write_bool("enable_reboot", true).unwrap();
+ writer.write_u64("year_of_service", 2).unwrap();
+ writer.write_i64("temperature", 4).unwrap();
+ writer.commit();
+
+ writer.write_tag("host", "host3").unwrap();
+ writer.write_f64("cpu", 0.4).unwrap();
+ writer.write_u64("cpu_core_num", 16).unwrap();
+ writer
+ .write_ts("ts", (103011002, Precision::MILLISECOND))
+ .unwrap();
+ writer.commit();
+
+ let insert_batch = writer.finish();
+ assert_eq!(3, insert_batch.row_count);
+
+ let columns = insert_batch.columns;
+ assert_eq!(9, columns.len());
+
+ let column = &columns[0];
+ assert_eq!("host", columns[0].column_name);
+ assert_eq!(Some(ColumnDataType::String as i32), column.datatype);
+ assert_eq!(SemanticType::Tag as i32, column.semantic_type);
+ assert_eq!(
+ vec!["host1", "host2", "host3"],
+ column.values.as_ref().unwrap().string_values
+ );
+ verify_null_mask(&column.null_mask, vec![false, false, false]);
+
+ let column = &columns[1];
+ assert_eq!("cpu", column.column_name);
+ assert_eq!(Some(ColumnDataType::Float64 as i32), column.datatype);
+ assert_eq!(SemanticType::Field as i32, column.semantic_type);
+ assert_eq!(vec![0.5, 0.4], column.values.as_ref().unwrap().f64_values);
+ verify_null_mask(&column.null_mask, vec![false, true, false]);
+
+ let column = &columns[2];
+ assert_eq!("memory", column.column_name);
+ assert_eq!(Some(ColumnDataType::Float64 as i32), column.datatype);
+ assert_eq!(SemanticType::Field as i32, column.semantic_type);
+ assert_eq!(vec![0.4], column.values.as_ref().unwrap().f64_values);
+ verify_null_mask(&column.null_mask, vec![false, true, true]);
+
+ let column = &columns[3];
+ assert_eq!("name", column.column_name);
+ assert_eq!(Some(ColumnDataType::String as i32), column.datatype);
+ assert_eq!(SemanticType::Field as i32, column.semantic_type);
+ assert_eq!(vec!["name1"], column.values.as_ref().unwrap().string_values);
+ verify_null_mask(&column.null_mask, vec![false, true, true]);
+
+ let column = &columns[4];
+ assert_eq!("ts", column.column_name);
+ assert_eq!(Some(ColumnDataType::Timestamp as i32), column.datatype);
+ assert_eq!(SemanticType::Timestamp as i32, column.semantic_type);
+ assert_eq!(
+ vec![101011000, 102011001, 103011002],
+ column.values.as_ref().unwrap().ts_millis_values
+ );
+ verify_null_mask(&column.null_mask, vec![false, false, false]);
+
+ let column = &columns[5];
+ assert_eq!("enable_reboot", column.column_name);
+ assert_eq!(Some(ColumnDataType::Boolean as i32), column.datatype);
+ assert_eq!(SemanticType::Field as i32, column.semantic_type);
+ assert_eq!(vec![true], column.values.as_ref().unwrap().bool_values);
+ verify_null_mask(&column.null_mask, vec![true, false, true]);
+
+ let column = &columns[6];
+ assert_eq!("year_of_service", column.column_name);
+ assert_eq!(Some(ColumnDataType::Uint64 as i32), column.datatype);
+ assert_eq!(SemanticType::Field as i32, column.semantic_type);
+ assert_eq!(vec![2], column.values.as_ref().unwrap().u64_values);
+ verify_null_mask(&column.null_mask, vec![true, false, true]);
+
+ let column = &columns[7];
+ assert_eq!("temperature", column.column_name);
+ assert_eq!(Some(ColumnDataType::Int64 as i32), column.datatype);
+ assert_eq!(SemanticType::Field as i32, column.semantic_type);
+ assert_eq!(vec![4], column.values.as_ref().unwrap().i64_values);
+ verify_null_mask(&column.null_mask, vec![true, false, true]);
+
+ let column = &columns[8];
+ assert_eq!("cpu_core_num", column.column_name);
+ assert_eq!(Some(ColumnDataType::Uint64 as i32), column.datatype);
+ assert_eq!(SemanticType::Field as i32, column.semantic_type);
+ assert_eq!(vec![16], column.values.as_ref().unwrap().u64_values);
+ verify_null_mask(&column.null_mask, vec![true, true, false]);
+ }
+
+ fn verify_null_mask(data: &[u8], expected: Vec<bool>) {
+ let bitvec = BitVec::from_slice(data);
+ for (idx, b) in expected.iter().enumerate() {
+ assert_eq!(b, bitvec.get(idx).unwrap())
+ }
+ }
+
+ #[test]
+ fn test_to_ms() {
+ assert_eq!(100, to_ms_ts(Precision::NANOSECOND, 100110000));
+ assert_eq!(100110, to_ms_ts(Precision::MICROSECOND, 100110000));
+ assert_eq!(100110000, to_ms_ts(Precision::MILLISECOND, 100110000));
+ assert_eq!(
+ 100110000 * 1000 * 60,
+ to_ms_ts(Precision::MINUTE, 100110000)
+ );
+ assert_eq!(
+ 100110000 * 1000 * 60 * 60,
+ to_ms_ts(Precision::HOUR, 100110000)
+ );
+ }
+}
diff --git a/src/datanode/src/server/grpc/insert.rs b/src/datanode/src/server/grpc/insert.rs
index 60bb309c3829..e528ba3f9f90 100644
--- a/src/datanode/src/server/grpc/insert.rs
+++ b/src/datanode/src/server/grpc/insert.rs
@@ -67,12 +67,10 @@ pub fn insertion_expr_to_request(
}
fn insert_batches(bytes_vec: Vec<Vec<u8>>) -> Result<Vec<InsertBatch>> {
- let mut insert_batches = Vec::with_capacity(bytes_vec.len());
-
- for bytes in bytes_vec {
- insert_batches.push(bytes.deref().try_into().context(DecodeInsertSnafu)?);
- }
- Ok(insert_batches)
+ bytes_vec
+ .iter()
+ .map(|bytes| bytes.deref().try_into().context(DecodeInsertSnafu))
+ .collect()
}
fn add_values_to_builder(
diff --git a/src/datanode/src/server/grpc/select.rs b/src/datanode/src/server/grpc/select.rs
index 936b2eede4b2..dab83caff0f4 100644
--- a/src/datanode/src/server/grpc/select.rs
+++ b/src/datanode/src/server/grpc/select.rs
@@ -129,7 +129,6 @@ macro_rules! convert_arrow_array_to_grpc_vals {
_ => unimplemented!(),
}
};
-
}
pub fn values(arrays: &[Arc<dyn Array>]) -> Result<Values> {
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index 9ca7e6008102..29bf944ad440 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize};
use snafu::prelude::*;
use crate::error::{self, Result};
+use crate::influxdb::InfluxdbOptions;
use crate::instance::Instance;
use crate::mysql::MysqlOptions;
use crate::opentsdb::OpentsdbOptions;
@@ -17,6 +18,7 @@ pub struct FrontendOptions {
pub mysql_options: Option<MysqlOptions>,
pub postgres_options: Option<PostgresOptions>,
pub opentsdb_options: Option<OpentsdbOptions>,
+ pub influxdb_options: Option<InfluxdbOptions>,
}
impl Default for FrontendOptions {
@@ -27,6 +29,7 @@ impl Default for FrontendOptions {
mysql_options: Some(MysqlOptions::default()),
postgres_options: Some(PostgresOptions::default()),
opentsdb_options: Some(OpentsdbOptions::default()),
+ influxdb_options: Some(InfluxdbOptions::default()),
}
}
}
diff --git a/src/frontend/src/influxdb.rs b/src/frontend/src/influxdb.rs
new file mode 100644
index 000000000000..da96bc99890e
--- /dev/null
+++ b/src/frontend/src/influxdb.rs
@@ -0,0 +1,23 @@
+use serde::{Deserialize, Serialize};
+
+#[derive(Clone, Debug, Serialize, Deserialize)]
+pub struct InfluxdbOptions {
+ pub enable: bool,
+}
+
+impl Default for InfluxdbOptions {
+ fn default() -> Self {
+ Self { enable: true }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::InfluxdbOptions;
+
+ #[test]
+ fn test_influxdb_options() {
+ let default = InfluxdbOptions::default();
+ assert!(default.enable);
+ }
+}
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 38ada0f2c0d9..47f1253fd0ec 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -1,3 +1,4 @@
+mod influxdb;
mod opentsdb;
use std::collections::HashMap;
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
new file mode 100644
index 000000000000..a0832c37277f
--- /dev/null
+++ b/src/frontend/src/instance/influxdb.rs
@@ -0,0 +1,45 @@
+use api::v1::{insert_expr::Expr, InsertExpr};
+use async_trait::async_trait;
+use common_error::prelude::BoxedError;
+use servers::influxdb::InfluxdbRequest;
+use servers::{
+ error::ExecuteQuerySnafu, influxdb::InsertBatches, query_handler::InfluxdbLineProtocolHandler,
+};
+use snafu::ResultExt;
+
+use crate::error::RequestDatanodeSnafu;
+use crate::error::Result;
+use crate::instance::Instance;
+
+#[async_trait]
+impl InfluxdbLineProtocolHandler for Instance {
+ async fn exec(&self, request: &InfluxdbRequest) -> servers::error::Result<()> {
+ // TODO(fys): use batch insert
+ self.do_insert(request.try_into()?)
+ .await
+ .map_err(BoxedError::new)
+ .context(ExecuteQuerySnafu {
+ query: &request.lines,
+ })?;
+ Ok(())
+ }
+}
+
+impl Instance {
+ async fn do_insert(&self, insert_batches: InsertBatches) -> Result<()> {
+ for (table_name, batch) in insert_batches.data {
+ let expr = Expr::Values(api::v1::insert_expr::Values {
+ values: vec![batch.into()],
+ });
+ let _object_result = self
+ .db
+ .insert(InsertExpr {
+ table_name,
+ expr: Some(expr),
+ })
+ .await
+ .context(RequestDatanodeSnafu)?;
+ }
+ Ok(())
+ }
+}
diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs
index 932e213a87df..1c26a86ee19d 100644
--- a/src/frontend/src/lib.rs
+++ b/src/frontend/src/lib.rs
@@ -2,6 +2,7 @@
pub mod error;
pub mod frontend;
+pub mod influxdb;
pub mod instance;
pub mod mysql;
pub mod opentsdb;
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 44674f791f13..99686041d287 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -13,6 +13,7 @@ use tokio::try_join;
use crate::error::{self, Result};
use crate::frontend::FrontendOptions;
+use crate::influxdb::InfluxdbOptions;
use crate::instance::InstanceRef;
pub(crate) struct Services;
@@ -91,6 +92,12 @@ impl Services {
if opentsdb_server_and_addr.is_some() {
http_server.set_opentsdb_handler(instance.clone());
}
+ if matches!(
+ opts.influxdb_options,
+ Some(InfluxdbOptions { enable: true })
+ ) {
+ http_server.set_influxdb_handler(instance.clone());
+ }
Some((Box::new(http_server) as _, http_addr))
} else {
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 95852cd68ca4..6767d0fd17a4 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -9,7 +9,10 @@ async-trait = "0.1"
axum = "0.6.0-rc.2"
axum-macros = "0.3.0-rc.1"
bytes = "1.2"
+client = { path = "../client" }
+common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
+common-grpc = { path = "../common/grpc" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
@@ -19,6 +22,7 @@ datatypes = { path = "../datatypes" }
futures = "0.3"
hex = { version = "0.4" }
hyper = { version = "0.14", features = ["full"] }
+influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", branch = "feat/line-protocol" }
metrics = "0.20"
num_cpus = "1.13"
opensrv-mysql = "0.1"
@@ -27,6 +31,7 @@ query = { path = "../query" }
serde = "1.0"
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
+table = { path = "../table" }
tokio = { version = "1.20", features = ["full"] }
tokio-stream = { version = "0.1", features = ["net"] }
tonic = "0.8"
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 946b5e08f746..91312bf9e2b5 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -2,8 +2,10 @@ use std::any::Any;
use std::net::SocketAddr;
use axum::http::StatusCode as HttpStatusCode;
-use axum::response::{IntoResponse, Response};
-use axum::Json;
+use axum::{
+ response::{IntoResponse, Response},
+ Json,
+};
use common_error::prelude::*;
use serde_json::json;
@@ -76,6 +78,21 @@ pub enum Error {
backtrace: Backtrace,
},
+ #[snafu(display("Failed to parse InfluxDB line protocol, source: {}", source))]
+ InfluxdbLineProtocol {
+ #[snafu(backtrace)]
+ source: influxdb_line_protocol::Error,
+ },
+
+ #[snafu(display("Failed to write InfluxDB line protocol, source: {}", source))]
+ InfluxdbLinesWrite {
+ #[snafu(backtrace)]
+ source: common_grpc::error::Error,
+ },
+
+ #[snafu(display("Failed to convert time precision, name: {}", name))]
+ TimePrecision { name: String, backtrace: Backtrace },
+
#[snafu(display("Connection reset by peer"))]
ConnResetByPeer { backtrace: Backtrace },
@@ -128,10 +145,13 @@ impl ErrorExt for Error {
NotSupported { .. }
| InvalidQuery { .. }
+ | InfluxdbLineProtocol { .. }
| ConnResetByPeer { .. }
| InvalidOpentsdbLine { .. }
- | InvalidOpentsdbJsonRequest { .. } => StatusCode::InvalidArguments,
+ | InvalidOpentsdbJsonRequest { .. }
+ | TimePrecision { .. } => StatusCode::InvalidArguments,
+ InfluxdbLinesWrite { source, .. } => source.status_code(),
Hyper { .. } => StatusCode::Unknown,
}
}
@@ -160,9 +180,12 @@ impl From<std::io::Error> for Error {
impl IntoResponse for Error {
fn into_response(self) -> Response {
let (status, error_message) = match self {
- Error::InvalidOpentsdbLine { .. }
+ Error::InfluxdbLineProtocol { .. }
+ | Error::InfluxdbLinesWrite { .. }
+ | Error::InvalidOpentsdbLine { .. }
| Error::InvalidOpentsdbJsonRequest { .. }
- | Error::InvalidQuery { .. } => (HttpStatusCode::BAD_REQUEST, self.to_string()),
+ | Error::InvalidQuery { .. }
+ | Error::TimePrecision { .. } => (HttpStatusCode::BAD_REQUEST, self.to_string()),
_ => (HttpStatusCode::INTERNAL_SERVER_ERROR, self.to_string()),
};
let body = Json(json!({
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index a9922f49d1dd..9f31d19f6b22 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -1,4 +1,5 @@
pub mod handler;
+pub mod influxdb;
pub mod opentsdb;
use std::net::SocketAddr;
@@ -19,15 +20,17 @@ use snafu::ResultExt;
use tower::{timeout::TimeoutLayer, ServiceBuilder};
use tower_http::trace::TraceLayer;
+use self::influxdb::influxdb_write;
use crate::error::{Result, StartHttpSnafu};
-use crate::query_handler::OpentsdbProtocolHandlerRef;
use crate::query_handler::SqlQueryHandlerRef;
+use crate::query_handler::{InfluxdbLineProtocolHandlerRef, OpentsdbProtocolHandlerRef};
use crate::server::Server;
const HTTP_API_VERSION: &str = "v1";
pub struct HttpServer {
sql_handler: SqlQueryHandlerRef,
+ influxdb_handler: Option<InfluxdbLineProtocolHandlerRef>,
opentsdb_handler: Option<OpentsdbProtocolHandlerRef>,
}
@@ -121,6 +124,7 @@ impl HttpServer {
Self {
sql_handler,
opentsdb_handler: None,
+ influxdb_handler: None,
}
}
@@ -132,6 +136,14 @@ impl HttpServer {
self.opentsdb_handler.get_or_insert(handler);
}
+ pub fn set_influxdb_handler(&mut self, handler: InfluxdbLineProtocolHandlerRef) {
+ debug_assert!(
+ self.influxdb_handler.is_none(),
+ "Influxdb line protocol handler can be set only once!"
+ );
+ self.influxdb_handler.get_or_insert(handler);
+ }
+
pub fn make_app(&self) -> Router {
// TODO(LFC): Use released Axum.
// Axum version 0.6 introduces state within router, making router methods far more elegant
@@ -148,12 +160,20 @@ impl HttpServer {
let mut router = Router::new().nest(&format!("/{}", HTTP_API_VERSION), sql_router);
if let Some(opentsdb_handler) = self.opentsdb_handler.clone() {
- let opentsdb_router = Router::with_state(opentsdb_handler.clone())
+ let opentsdb_router = Router::with_state(opentsdb_handler)
.route("/api/put", routing::post(opentsdb::put));
router = router.nest(&format!("/{}/opentsdb", HTTP_API_VERSION), opentsdb_router);
}
+ // TODO(fys): Creating influxdb's database when we can create greptime schema.
+ if let Some(influxdb_handler) = self.influxdb_handler.clone() {
+ let influxdb_router =
+ Router::with_state(influxdb_handler).route("/write", routing::post(influxdb_write));
+
+ router = router.nest(&format!("/{}/influxdb", HTTP_API_VERSION), influxdb_router);
+ }
+
router
.route("/metrics", routing::get(handler::metrics))
// middlewares
diff --git a/src/servers/src/http/influxdb.rs b/src/servers/src/http/influxdb.rs
new file mode 100644
index 000000000000..b9c68fbd8f49
--- /dev/null
+++ b/src/servers/src/http/influxdb.rs
@@ -0,0 +1,59 @@
+use std::collections::HashMap;
+
+use axum::extract::{Query, State};
+use axum::http::StatusCode;
+use common_grpc::writer::Precision;
+
+use crate::error::Result;
+use crate::error::TimePrecisionSnafu;
+use crate::http::HttpResponse;
+use crate::influxdb::InfluxdbRequest;
+use crate::query_handler::InfluxdbLineProtocolHandlerRef;
+
+#[axum_macros::debug_handler]
+pub async fn influxdb_write(
+ State(handler): State<InfluxdbLineProtocolHandlerRef>,
+ Query(params): Query<HashMap<String, String>>,
+ lines: String,
+) -> Result<(StatusCode, HttpResponse)> {
+ let precision = params
+ .get("precision")
+ .map(|val| parse_time_precision(val))
+ .transpose()?;
+ let request = InfluxdbRequest { precision, lines };
+ handler.exec(&request).await?;
+ Ok((StatusCode::NO_CONTENT, HttpResponse::Text("".to_string())))
+}
+
+fn parse_time_precision(value: &str) -> Result<Precision> {
+ match value {
+ "n" => Ok(Precision::NANOSECOND),
+ "u" => Ok(Precision::MICROSECOND),
+ "ms" => Ok(Precision::MILLISECOND),
+ "s" => Ok(Precision::SECOND),
+ "m" => Ok(Precision::MINUTE),
+ "h" => Ok(Precision::HOUR),
+ unknown => TimePrecisionSnafu {
+ name: unknown.to_string(),
+ }
+ .fail(),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use common_grpc::writer::Precision;
+
+ use crate::http::influxdb::parse_time_precision;
+
+ #[test]
+ fn test_parse_time_precision() {
+ assert_eq!(Precision::NANOSECOND, parse_time_precision("n").unwrap());
+ assert_eq!(Precision::MICROSECOND, parse_time_precision("u").unwrap());
+ assert_eq!(Precision::MILLISECOND, parse_time_precision("ms").unwrap());
+ assert_eq!(Precision::SECOND, parse_time_precision("s").unwrap());
+ assert_eq!(Precision::MINUTE, parse_time_precision("m").unwrap());
+ assert_eq!(Precision::HOUR, parse_time_precision("h").unwrap());
+ assert!(parse_time_precision("unknown").is_err());
+ }
+}
diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs
new file mode 100644
index 000000000000..e25e0b214e76
--- /dev/null
+++ b/src/servers/src/influxdb.rs
@@ -0,0 +1,269 @@
+use std::collections::HashMap;
+
+use common_grpc::writer::{LinesWriter, Precision};
+use influxdb_line_protocol::{parse_lines, FieldValue};
+use snafu::ResultExt;
+
+use crate::error::{Error, InfluxdbLineProtocolSnafu, InfluxdbLinesWriteSnafu};
+
+pub const INFLUXDB_TIMESTAMP_COLUMN_NAME: &str = "ts";
+pub const DEFAULT_TIME_PRECISION: Precision = Precision::NANOSECOND;
+
+pub struct InfluxdbRequest {
+ pub precision: Option<Precision>,
+ pub lines: String,
+}
+
+type TableName = String;
+
+pub struct InsertBatches {
+ pub data: Vec<(TableName, api::v1::codec::InsertBatch)>,
+}
+
+impl TryFrom<&InfluxdbRequest> for InsertBatches {
+ type Error = Error;
+
+ fn try_from(value: &InfluxdbRequest) -> std::result::Result<Self, Self::Error> {
+ let mut writers: HashMap<TableName, LinesWriter> = HashMap::new();
+ let lines = parse_lines(&value.lines)
+ .collect::<influxdb_line_protocol::Result<Vec<_>>>()
+ .context(InfluxdbLineProtocolSnafu)?;
+ let line_len = lines.len();
+
+ for line in lines {
+ let table_name = line.series.measurement;
+ let writer = writers
+ .entry(table_name.to_string())
+ .or_insert_with(|| LinesWriter::with_lines(line_len));
+
+ let tags = line.series.tag_set;
+ if let Some(tags) = tags {
+ for (k, v) in tags {
+ writer
+ .write_tag(k.as_str(), v.as_str())
+ .context(InfluxdbLinesWriteSnafu)?;
+ }
+ }
+
+ let fields = line.field_set;
+ for (k, v) in fields {
+ let column_name = k.as_str();
+ match v {
+ FieldValue::I64(value) => {
+ writer
+ .write_i64(column_name, value)
+ .context(InfluxdbLinesWriteSnafu)?;
+ }
+ FieldValue::U64(value) => {
+ writer
+ .write_u64(column_name, value)
+ .context(InfluxdbLinesWriteSnafu)?;
+ }
+ FieldValue::F64(value) => {
+ writer
+ .write_f64(column_name, value)
+ .context(InfluxdbLinesWriteSnafu)?;
+ }
+ FieldValue::String(value) => {
+ writer
+ .write_string(column_name, value.as_str())
+ .context(InfluxdbLinesWriteSnafu)?;
+ }
+ FieldValue::Boolean(value) => {
+ writer
+ .write_bool(column_name, value)
+ .context(InfluxdbLinesWriteSnafu)?;
+ }
+ }
+ }
+
+ if let Some(timestamp) = line.timestamp {
+ let precision = if let Some(val) = &value.precision {
+ *val
+ } else {
+ DEFAULT_TIME_PRECISION
+ };
+ writer
+ .write_ts(INFLUXDB_TIMESTAMP_COLUMN_NAME, (timestamp, precision))
+ .context(InfluxdbLinesWriteSnafu)?;
+ }
+
+ writer.commit();
+ }
+
+ Ok(InsertBatches {
+ data: writers
+ .into_iter()
+ .map(|(table_name, writer)| (table_name, writer.finish()))
+ .collect(),
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use api::v1::{
+ codec::InsertBatch,
+ column::{SemanticType, Values},
+ Column, ColumnDataType,
+ };
+ use common_base::BitVec;
+
+ use super::InsertBatches;
+ use crate::influxdb::InfluxdbRequest;
+
+ #[test]
+ fn test_convert_influxdb_lines() {
+ let lines = r"
+monitor1,host=host1 cpu=66.6,memory=1024 1663840496100023100
+monitor1,host=host2 memory=1027 1663840496400340001
+monitor2,host=host3 cpu=66.5 1663840496100023102
+monitor2,host=host4 cpu=66.3,memory=1029 1663840496400340003";
+
+ let influxdb_req = &InfluxdbRequest {
+ precision: None,
+ lines: lines.to_string(),
+ };
+
+ let insert_batches: InsertBatches = influxdb_req.try_into().unwrap();
+ let insert_batches = insert_batches.data;
+
+ assert_eq!(2, insert_batches.len());
+
+ for (table_name, insert_batch) in &insert_batches {
+ if table_name == "monitor1" {
+ assert_monitor_1(insert_batch);
+ } else if table_name == "monitor2" {
+ assert_monitor_2(insert_batch);
+ } else {
+ panic!()
+ }
+ }
+ }
+
+ fn assert_monitor_1(insert_batch: &InsertBatch) {
+ let columns = &insert_batch.columns;
+ assert_eq!(4, columns.len());
+ verify_column(
+ &columns[0],
+ "host",
+ ColumnDataType::String,
+ SemanticType::Tag,
+ Vec::new(),
+ Values {
+ string_values: vec!["host1".to_string(), "host2".to_string()],
+ ..Default::default()
+ },
+ );
+
+ verify_column(
+ &columns[1],
+ "cpu",
+ ColumnDataType::Float64,
+ SemanticType::Field,
+ vec![false, true],
+ Values {
+ f64_values: vec![66.6],
+ ..Default::default()
+ },
+ );
+
+ verify_column(
+ &columns[2],
+ "memory",
+ ColumnDataType::Float64,
+ SemanticType::Field,
+ Vec::new(),
+ Values {
+ f64_values: vec![1024.0, 1027.0],
+ ..Default::default()
+ },
+ );
+
+ verify_column(
+ &columns[3],
+ "ts",
+ ColumnDataType::Timestamp,
+ SemanticType::Timestamp,
+ Vec::new(),
+ Values {
+ ts_millis_values: vec![1663840496100, 1663840496400],
+ ..Default::default()
+ },
+ );
+ }
+
+ fn assert_monitor_2(insert_batch: &InsertBatch) {
+ let columns = &insert_batch.columns;
+ assert_eq!(4, columns.len());
+ verify_column(
+ &columns[0],
+ "host",
+ ColumnDataType::String,
+ SemanticType::Tag,
+ Vec::new(),
+ Values {
+ string_values: vec!["host3".to_string(), "host4".to_string()],
+ ..Default::default()
+ },
+ );
+
+ verify_column(
+ &columns[1],
+ "cpu",
+ ColumnDataType::Float64,
+ SemanticType::Field,
+ Vec::new(),
+ Values {
+ f64_values: vec![66.5, 66.3],
+ ..Default::default()
+ },
+ );
+
+ verify_column(
+ &columns[2],
+ "ts",
+ ColumnDataType::Timestamp,
+ SemanticType::Timestamp,
+ Vec::new(),
+ Values {
+ ts_millis_values: vec![1663840496100, 1663840496400],
+ ..Default::default()
+ },
+ );
+
+ verify_column(
+ &columns[3],
+ "memory",
+ ColumnDataType::Float64,
+ SemanticType::Field,
+ vec![true, false],
+ Values {
+ f64_values: vec![1029.0],
+ ..Default::default()
+ },
+ );
+ }
+
+ fn verify_column(
+ column: &Column,
+ name: &str,
+ datatype: ColumnDataType,
+ semantic_type: SemanticType,
+ null_mask: Vec<bool>,
+ vals: Values,
+ ) {
+ assert_eq!(name, column.column_name);
+ assert_eq!(Some(datatype as i32), column.datatype);
+ assert_eq!(semantic_type as i32, column.semantic_type);
+ verify_null_mask(&column.null_mask, null_mask);
+ assert_eq!(Some(vals), column.values);
+ }
+
+ fn verify_null_mask(data: &[u8], expected: Vec<bool>) {
+ let bitvec = BitVec::from_slice(data);
+ for (idx, b) in expected.iter().enumerate() {
+ assert_eq!(b, bitvec.get(idx).unwrap())
+ }
+ }
+}
diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs
index 5b75f6e4b7ae..af3b2578823e 100644
--- a/src/servers/src/lib.rs
+++ b/src/servers/src/lib.rs
@@ -3,6 +3,7 @@
pub mod error;
pub mod grpc;
pub mod http;
+pub mod influxdb;
pub mod mysql;
pub mod opentsdb;
pub mod postgres;
diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs
index 74400a46b35b..690bbf105093 100644
--- a/src/servers/src/query_handler.rs
+++ b/src/servers/src/query_handler.rs
@@ -5,6 +5,7 @@ use async_trait::async_trait;
use common_query::Output;
use crate::error::Result;
+use crate::influxdb::InfluxdbRequest;
use crate::opentsdb::codec::DataPoint;
/// All query handler traits for various request protocols, like SQL or GRPC.
@@ -21,6 +22,7 @@ pub type SqlQueryHandlerRef = Arc<dyn SqlQueryHandler + Send + Sync>;
pub type GrpcQueryHandlerRef = Arc<dyn GrpcQueryHandler + Send + Sync>;
pub type GrpcAdminHandlerRef = Arc<dyn GrpcAdminHandler + Send + Sync>;
pub type OpentsdbProtocolHandlerRef = Arc<dyn OpentsdbProtocolHandler + Send + Sync>;
+pub type InfluxdbLineProtocolHandlerRef = Arc<dyn InfluxdbLineProtocolHandler + Send + Sync>;
#[async_trait]
pub trait SqlQueryHandler {
@@ -39,6 +41,13 @@ pub trait GrpcAdminHandler {
async fn exec_admin_request(&self, expr: AdminExpr) -> Result<AdminResult>;
}
+#[async_trait]
+pub trait InfluxdbLineProtocolHandler {
+ /// A successful request will not return a response.
+ /// Only on error will the socket return a line of data.
+ async fn exec(&self, request: &InfluxdbRequest) -> Result<()>;
+}
+
#[async_trait]
pub trait OpentsdbProtocolHandler {
/// A successful request will not return a response.
diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs
new file mode 100644
index 000000000000..662dbafa90f6
--- /dev/null
+++ b/src/servers/tests/http/influxdb_test.rs
@@ -0,0 +1,82 @@
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use axum::Router;
+use axum_test_helper::TestClient;
+use common_query::Output;
+use servers::error::Result;
+use servers::http::HttpServer;
+use servers::influxdb::{InfluxdbRequest, InsertBatches};
+use servers::query_handler::{InfluxdbLineProtocolHandler, SqlQueryHandler};
+use tokio::sync::mpsc;
+
+struct DummyInstance {
+ tx: mpsc::Sender<String>,
+}
+
+#[async_trait]
+impl InfluxdbLineProtocolHandler for DummyInstance {
+ async fn exec(&self, request: &InfluxdbRequest) -> Result<()> {
+ let batches: InsertBatches = request.try_into()?;
+
+ for (table_name, _) in batches.data {
+ let _ = self.tx.send(table_name).await;
+ }
+
+ Ok(())
+ }
+}
+
+#[async_trait]
+impl SqlQueryHandler for DummyInstance {
+ async fn do_query(&self, _query: &str) -> Result<Output> {
+ unimplemented!()
+ }
+
+ async fn insert_script(&self, _name: &str, _script: &str) -> Result<()> {
+ unimplemented!()
+ }
+
+ async fn execute_script(&self, _name: &str) -> Result<Output> {
+ unimplemented!()
+ }
+}
+
+fn make_test_app(tx: mpsc::Sender<String>) -> Router {
+ let instance = Arc::new(DummyInstance { tx });
+ let mut server = HttpServer::new(instance.clone());
+ server.set_influxdb_handler(instance);
+ server.make_app()
+}
+
+#[tokio::test]
+async fn test_influxdb_write() {
+ let (tx, mut rx) = mpsc::channel(100);
+
+ let app = make_test_app(tx);
+ let client = TestClient::new(app);
+
+ // right request
+ let result = client
+ .post("/v1/influxdb/write")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
+ // bad request
+ let result = client
+ .post("/v1/influxdb/write")
+ .body("monitor, host=host1 cpu=1.2 1664370459457010101")
+ .send()
+ .await;
+ assert_eq!(result.status(), 400);
+ assert!(!result.text().await.is_empty());
+
+ let mut metrics = vec![];
+ while let Ok(s) = rx.try_recv() {
+ metrics.push(s);
+ }
+ assert_eq!(metrics, vec!["monitor".to_string()]);
+}
diff --git a/src/servers/tests/http/mod.rs b/src/servers/tests/http/mod.rs
index 5e8292dcebf9..eecf87523c57 100644
--- a/src/servers/tests/http/mod.rs
+++ b/src/servers/tests/http/mod.rs
@@ -1,2 +1,3 @@
mod http_handler_test;
+mod influxdb_test;
mod opentsdb_test;
|
feat
|
support write data via influxdb line protocol in frontend (#280)
|
191755fc42f45ecc3628c55427e7e5abbecd2ede
|
2024-11-04 13:14:13
|
dennis zhuang
|
fix: data_length, index_length, table_rows in tables (#4927)
| false
|
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index 0d9e96ab6a44..c7e6f8b55c01 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -178,6 +178,12 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Partition manager not found, it's not expected."))]
+ PartitionManagerNotFound {
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Failed to find table partitions"))]
FindPartitions { source: partition::error::Error },
@@ -301,6 +307,7 @@ impl ErrorExt for Error {
| Error::CastManager { .. }
| Error::Json { .. }
| Error::GetInformationExtension { .. }
+ | Error::PartitionManagerNotFound { .. }
| Error::ProcedureIdNotFound { .. } => StatusCode::Unexpected,
Error::ViewPlanColumnsChanged { .. } => StatusCode::InvalidArguments,
diff --git a/src/catalog/src/system_schema/information_schema/partitions.rs b/src/catalog/src/system_schema/information_schema/partitions.rs
index 93d60679901e..4cfeece62637 100644
--- a/src/catalog/src/system_schema/information_schema/partitions.rs
+++ b/src/catalog/src/system_schema/information_schema/partitions.rs
@@ -34,15 +34,14 @@ use datatypes::vectors::{
};
use futures::{StreamExt, TryStreamExt};
use partition::manager::PartitionInfo;
-use partition::partition::PartitionDef;
use snafu::{OptionExt, ResultExt};
-use store_api::storage::{RegionId, ScanRequest, TableId};
+use store_api::storage::{ScanRequest, TableId};
use table::metadata::{TableInfo, TableType};
use super::PARTITIONS;
use crate::error::{
- CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, Result,
- UpgradeWeakCatalogManagerRefSnafu,
+ CreateRecordBatchSnafu, FindPartitionsSnafu, InternalSnafu, PartitionManagerNotFoundSnafu,
+ Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::kvbackend::KvBackendCatalogManager;
use crate::system_schema::information_schema::{InformationTable, Predicates};
@@ -236,7 +235,8 @@ impl InformationSchemaPartitionsBuilder {
let partition_manager = catalog_manager
.as_any()
.downcast_ref::<KvBackendCatalogManager>()
- .map(|catalog_manager| catalog_manager.partition_manager());
+ .map(|catalog_manager| catalog_manager.partition_manager())
+ .context(PartitionManagerNotFoundSnafu)?;
let predicates = Predicates::from_scan_request(&request);
@@ -262,27 +262,10 @@ impl InformationSchemaPartitionsBuilder {
let table_ids: Vec<TableId> =
table_infos.iter().map(|info| info.ident.table_id).collect();
- let mut table_partitions = if let Some(partition_manager) = &partition_manager {
- partition_manager
- .batch_find_table_partitions(&table_ids)
- .await
- .context(FindPartitionsSnafu)?
- } else {
- // Current node must be a standalone instance, contains only one partition by default.
- // TODO(dennis): change it when we support multi-regions for standalone.
- table_ids
- .into_iter()
- .map(|table_id| {
- (
- table_id,
- vec![PartitionInfo {
- id: RegionId::new(table_id, 0),
- partition: PartitionDef::new(vec![], vec![]),
- }],
- )
- })
- .collect()
- };
+ let mut table_partitions = partition_manager
+ .batch_find_table_partitions(&table_ids)
+ .await
+ .context(FindPartitionsSnafu)?;
for table_info in table_infos {
let partitions = table_partitions
diff --git a/src/catalog/src/system_schema/information_schema/tables.rs b/src/catalog/src/system_schema/information_schema/tables.rs
index 976c920b9ab9..b258b857b2db 100644
--- a/src/catalog/src/system_schema/information_schema/tables.rs
+++ b/src/catalog/src/system_schema/information_schema/tables.rs
@@ -12,13 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashSet;
use std::sync::{Arc, Weak};
use arrow_schema::SchemaRef as ArrowSchemaRef;
-use common_catalog::consts::INFORMATION_SCHEMA_TABLES_TABLE_ID;
+use common_catalog::consts::{INFORMATION_SCHEMA_TABLES_TABLE_ID, MITO_ENGINE};
use common_error::ext::BoxedError;
+use common_meta::datanode::RegionStat;
use common_recordbatch::adapter::RecordBatchStreamAdapter;
use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
+use common_telemetry::error;
use datafusion::execution::TaskContext;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
@@ -31,7 +34,7 @@ use datatypes::vectors::{
};
use futures::TryStreamExt;
use snafu::{OptionExt, ResultExt};
-use store_api::storage::{ScanRequest, TableId};
+use store_api::storage::{RegionId, ScanRequest, TableId};
use table::metadata::{TableInfo, TableType};
use super::TABLES;
@@ -39,6 +42,7 @@ use crate::error::{
CreateRecordBatchSnafu, InternalSnafu, Result, UpgradeWeakCatalogManagerRefSnafu,
};
use crate::system_schema::information_schema::{InformationTable, Predicates};
+use crate::system_schema::utils;
use crate::CatalogManager;
pub const TABLE_CATALOG: &str = "table_catalog";
@@ -234,17 +238,50 @@ impl InformationSchemaTablesBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
+ let information_extension = utils::information_extension(&self.catalog_manager)?;
+
+ // TODO(dennis): `region_stats` API is not stable in distributed cluster because of network issue etc.
+ // But we don't want the statements such as `show tables` fail,
+ // so using `unwrap_or_else` here instead of `?` operator.
+ let region_stats = information_extension
+ .region_stats()
+ .await
+ .map_err(|e| {
+ error!(e; "Failed to call region_stats");
+ e
+ })
+ .unwrap_or_else(|_| vec![]);
+
for schema_name in catalog_manager.schema_names(&catalog_name, None).await? {
let mut stream = catalog_manager.tables(&catalog_name, &schema_name, None);
while let Some(table) = stream.try_next().await? {
let table_info = table.table_info();
+
+ // TODO(dennis): make it working for metric engine
+ let table_region_stats = if table_info.meta.engine == MITO_ENGINE {
+ let region_ids = table_info
+ .meta
+ .region_numbers
+ .iter()
+ .map(|n| RegionId::new(table_info.ident.table_id, *n))
+ .collect::<HashSet<_>>();
+
+ region_stats
+ .iter()
+ .filter(|stat| region_ids.contains(&stat.id))
+ .collect::<Vec<_>>()
+ } else {
+ vec![]
+ };
+
self.add_table(
&predicates,
&catalog_name,
&schema_name,
table_info,
table.table_type(),
+ &table_region_stats,
);
}
}
@@ -260,6 +297,7 @@ impl InformationSchemaTablesBuilder {
schema_name: &str,
table_info: Arc<TableInfo>,
table_type: TableType,
+ region_stats: &[&RegionStat],
) {
let table_name = table_info.name.as_ref();
let table_id = table_info.table_id();
@@ -273,7 +311,9 @@ impl InformationSchemaTablesBuilder {
let row = [
(TABLE_CATALOG, &Value::from(catalog_name)),
+ (TABLE_ID, &Value::from(table_id)),
(TABLE_SCHEMA, &Value::from(schema_name)),
+ (ENGINE, &Value::from(engine)),
(TABLE_NAME, &Value::from(table_name)),
(TABLE_TYPE, &Value::from(table_type_text)),
];
@@ -287,21 +327,39 @@ impl InformationSchemaTablesBuilder {
self.table_names.push(Some(table_name));
self.table_types.push(Some(table_type_text));
self.table_ids.push(Some(table_id));
+
+ let data_length = region_stats.iter().map(|stat| stat.sst_size).sum();
+ let table_rows = region_stats.iter().map(|stat| stat.num_rows).sum();
+ let index_length = region_stats.iter().map(|stat| stat.index_size).sum();
+
+ // It's not precise, but it is acceptable for long-term data storage.
+ let avg_row_length = if table_rows > 0 {
+ let total_data_length = data_length
+ + region_stats
+ .iter()
+ .map(|stat| stat.memtable_size)
+ .sum::<u64>();
+
+ total_data_length / table_rows
+ } else {
+ 0
+ };
+
+ self.data_length.push(Some(data_length));
+ self.index_length.push(Some(index_length));
+ self.table_rows.push(Some(table_rows));
+ self.avg_row_length.push(Some(avg_row_length));
+
// TODO(sunng87): use real data for these fields
- self.data_length.push(Some(0));
self.max_data_length.push(Some(0));
- self.index_length.push(Some(0));
- self.avg_row_length.push(Some(0));
- self.max_index_length.push(Some(0));
self.checksum.push(Some(0));
- self.table_rows.push(Some(0));
+ self.max_index_length.push(Some(0));
self.data_free.push(Some(0));
self.auto_increment.push(Some(0));
self.row_format.push(Some("Fixed"));
self.table_collation.push(Some("utf8_bin"));
self.update_time.push(None);
self.check_time.push(None);
-
// use mariadb default table version number here
self.version.push(Some(11));
self.table_comment.push(table_info.desc.as_deref());
diff --git a/tests/cases/standalone/common/information_schema/region_statistics.result b/tests/cases/standalone/common/information_schema/region_statistics.result
index 6e49679cf172..b4f931300a12 100644
--- a/tests/cases/standalone/common/information_schema/region_statistics.result
+++ b/tests/cases/standalone/common/information_schema/region_statistics.result
@@ -32,6 +32,14 @@ SELECT SUM(region_rows), SUM(disk_size), SUM(sst_size), SUM(index_size)
| 3 | 2145 | 0 | 0 |
+-------------------------------------------------------+-----------------------------------------------------+----------------------------------------------------+------------------------------------------------------+
+SELECT data_length, index_length, avg_row_length, table_rows FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'test';
+
++-------------+--------------+----------------+------------+
+| data_length | index_length | avg_row_length | table_rows |
++-------------+--------------+----------------+------------+
+| 0 | 0 | 26 | 3 |
++-------------+--------------+----------------+------------+
+
DROP TABLE test;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/information_schema/region_statistics.sql b/tests/cases/standalone/common/information_schema/region_statistics.sql
index 9b6e64890405..ed7a7b0cfcf8 100644
--- a/tests/cases/standalone/common/information_schema/region_statistics.sql
+++ b/tests/cases/standalone/common/information_schema/region_statistics.sql
@@ -21,4 +21,6 @@ SELECT SUM(region_rows), SUM(disk_size), SUM(sst_size), SUM(index_size)
FROM INFORMATION_SCHEMA.REGION_STATISTICS WHERE table_id
IN (SELECT TABLE_ID FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'test' and table_schema = 'public');
+SELECT data_length, index_length, avg_row_length, table_rows FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'test';
+
DROP TABLE test;
|
fix
|
data_length, index_length, table_rows in tables (#4927)
|
32ad35832383237601069ec450a8766557769865
|
2023-05-22 15:24:02
|
Yingwen
|
fix(table-procedure): Open table in RegisterCatalog state (#1617)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 8daa63e3cec6..32f82ce086e9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8688,6 +8688,7 @@ dependencies = [
"common-catalog",
"common-error",
"common-procedure",
+ "common-procedure-test",
"common-telemetry",
"common-test-util",
"datatypes",
diff --git a/src/common/procedure-test/src/lib.rs b/src/common/procedure-test/src/lib.rs
index 45ca79e5f70f..938a2ad91b84 100644
--- a/src/common/procedure-test/src/lib.rs
+++ b/src/common/procedure-test/src/lib.rs
@@ -14,20 +14,32 @@
//! Test utilities for procedures.
+use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
use common_procedure::{
- BoxedProcedure, Context, ContextProvider, ProcedureId, ProcedureState, Result, Status,
+ Context, ContextProvider, Procedure, ProcedureId, ProcedureState, ProcedureWithId, Result,
+ Status,
};
-/// A Mock [ContextProvider] that always return [ProcedureState::Done].
-struct MockContextProvider {}
+/// A Mock [ContextProvider].
+#[derive(Default)]
+pub struct MockContextProvider {
+ states: HashMap<ProcedureId, ProcedureState>,
+}
+
+impl MockContextProvider {
+ /// Returns a new provider.
+ pub fn new(states: HashMap<ProcedureId, ProcedureState>) -> MockContextProvider {
+ MockContextProvider { states }
+ }
+}
#[async_trait]
impl ContextProvider for MockContextProvider {
- async fn procedure_state(&self, _procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
- Ok(Some(ProcedureState::Done))
+ async fn procedure_state(&self, procedure_id: ProcedureId) -> Result<Option<ProcedureState>> {
+ Ok(self.states.get(&procedure_id).cloned())
}
}
@@ -35,10 +47,10 @@ impl ContextProvider for MockContextProvider {
///
/// # Panics
/// Panics if the `procedure` has subprocedure to execute.
-pub async fn execute_procedure_until_done(procedure: &mut BoxedProcedure) {
+pub async fn execute_procedure_until_done(procedure: &mut dyn Procedure) {
let ctx = Context {
procedure_id: ProcedureId::random(),
- provider: Arc::new(MockContextProvider {}),
+ provider: Arc::new(MockContextProvider::default()),
};
loop {
@@ -52,3 +64,53 @@ pub async fn execute_procedure_until_done(procedure: &mut BoxedProcedure) {
}
}
}
+
+/// Executes a procedure once.
+///
+/// Returns whether the procedure is done.
+pub async fn execute_procedure_once(
+ procedure_id: ProcedureId,
+ provider: MockContextProvider,
+ procedure: &mut dyn Procedure,
+) -> bool {
+ let ctx = Context {
+ procedure_id,
+ provider: Arc::new(provider),
+ };
+
+ match procedure.execute(&ctx).await.unwrap() {
+ Status::Executing { .. } => false,
+ Status::Suspended { subprocedures, .. } => {
+ assert!(
+ subprocedures.is_empty(),
+ "Executing subprocedure is unsupported"
+ );
+ false
+ }
+ Status::Done => true,
+ }
+}
+
+/// Executes a procedure until it returns [Status::Suspended] or [Status::Done].
+///
+/// Returns `Some` if it returns [Status::Suspended] or `None` if it returns [Status::Done].
+pub async fn execute_until_suspended_or_done(
+ procedure_id: ProcedureId,
+ provider: MockContextProvider,
+ procedure: &mut dyn Procedure,
+) -> Option<Vec<ProcedureWithId>> {
+ let ctx = Context {
+ procedure_id,
+ provider: Arc::new(provider),
+ };
+
+ loop {
+ match procedure.execute(&ctx).await.unwrap() {
+ Status::Executing { .. } => (),
+ Status::Suspended { subprocedures, .. } => return Some(subprocedures),
+ Status::Done => break,
+ }
+ }
+
+ None
+}
diff --git a/src/common/procedure/src/procedure.rs b/src/common/procedure/src/procedure.rs
index 6eaa075408cf..5cb037fbc0dd 100644
--- a/src/common/procedure/src/procedure.rs
+++ b/src/common/procedure/src/procedure.rs
@@ -97,6 +97,25 @@ pub trait Procedure: Send + Sync {
fn lock_key(&self) -> LockKey;
}
+#[async_trait]
+impl<T: Procedure + ?Sized> Procedure for Box<T> {
+ fn type_name(&self) -> &str {
+ (**self).type_name()
+ }
+
+ async fn execute(&mut self, ctx: &Context) -> Result<Status> {
+ (**self).execute(ctx).await
+ }
+
+ fn dump(&self) -> Result<String> {
+ (**self).dump()
+ }
+
+ fn lock_key(&self) -> LockKey {
+ (**self).lock_key()
+ }
+}
+
/// Keys to identify required locks.
///
/// [LockKey] always sorts keys lexicographically so that they can be acquired
diff --git a/src/mito/src/engine/procedure/create.rs b/src/mito/src/engine/procedure/create.rs
index 7dc87e11bfb5..f6b246eb4029 100644
--- a/src/mito/src/engine/procedure/create.rs
+++ b/src/mito/src/engine/procedure/create.rs
@@ -18,6 +18,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
use common_procedure::{Context, Error, LockKey, Procedure, ProcedureManager, Result, Status};
+use common_telemetry::logging;
use common_telemetry::metric::Timer;
use datatypes::schema::{Schema, SchemaRef};
use serde::{Deserialize, Serialize};
@@ -128,6 +129,8 @@ impl<S: StorageEngine> CreateMitoTable<S> {
/// Checks whether the table exists.
fn on_prepare(&mut self) -> Result<Status> {
let table_ref = self.creator.data.table_ref();
+ logging::debug!("on prepare create table {}", table_ref);
+
if self.creator.engine_inner.get_table(&table_ref).is_some() {
// If the table already exists.
ensure!(
@@ -149,6 +152,7 @@ impl<S: StorageEngine> CreateMitoTable<S> {
async fn on_engine_create_table(&mut self) -> Result<Status> {
// In this state, we can ensure we are able to create a new table.
let table_ref = self.creator.data.table_ref();
+ logging::debug!("on engine create table {}", table_ref);
let _lock = self
.creator
@@ -212,6 +216,8 @@ impl<S: StorageEngine> TableCreator<S> {
return Ok(table.clone());
}
+ logging::debug!("Creator create table {}", table_ref);
+
self.create_regions(&table_dir).await?;
self.write_table_manifest(&table_dir).await
@@ -295,6 +301,13 @@ impl<S: StorageEngine> TableCreator<S> {
.map_err(Error::from_error_ext)?
};
+ logging::debug!(
+ "Create region {} for table {}, region_id: {}",
+ number,
+ self.data.request.table_ref(),
+ region_id
+ );
+
self.regions.insert(*number, region);
}
diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs
index ae573c5384ed..6e65ac70e390 100644
--- a/src/mito/src/table.rs
+++ b/src/mito/src/table.rs
@@ -456,8 +456,13 @@ impl<R: Region> MitoTable<R> {
object_store: ObjectStore,
compress_type: CompressionType,
) -> Result<MitoTable<R>> {
- let manifest =
- TableManifest::create(&table_manifest_dir(table_dir), object_store, compress_type);
+ let manifest_dir = table_manifest_dir(table_dir);
+ let manifest = TableManifest::create(&manifest_dir, object_store, compress_type);
+ logging::info!(
+ "Create table manifest at {}, table_name: {}",
+ manifest_dir,
+ table_name
+ );
let _timer =
common_telemetry::timer!(crate::metrics::MITO_CREATE_TABLE_UPDATE_MANIFEST_ELAPSED);
diff --git a/src/table-procedure/Cargo.toml b/src/table-procedure/Cargo.toml
index e1927afd2eac..b20823e89d45 100644
--- a/src/table-procedure/Cargo.toml
+++ b/src/table-procedure/Cargo.toml
@@ -18,6 +18,7 @@ table = { path = "../table" }
[dev-dependencies]
common-catalog = { path = "../common/catalog" }
+common-procedure-test = { path = "../common/procedure-test" }
common-test-util = { path = "../common/test-util" }
log-store = { path = "../log-store" }
mito = { path = "../mito" }
diff --git a/src/table-procedure/src/create.rs b/src/table-procedure/src/create.rs
index d531ed4370ef..6d65852ce638 100644
--- a/src/table-procedure/src/create.rs
+++ b/src/table-procedure/src/create.rs
@@ -24,7 +24,7 @@ use common_telemetry::logging;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableEngineProcedureRef, TableEngineRef, TableReference};
-use table::requests::CreateTableRequest;
+use table::requests::{CreateTableRequest, OpenTableRequest};
use crate::error::{
AccessCatalogSnafu, CatalogNotFoundSnafu, DeserializeProcedureSnafu, SchemaNotFoundSnafu,
@@ -248,12 +248,21 @@ impl CreateTableProcedure {
return Ok(Status::Done);
}
+ // If we recover the procedure from json, then the table engine hasn't open this table yet,
+ // so we need to use `open_table()` instead of `get_table()`.
let engine_ctx = EngineContext::default();
- let table_ref = self.data.table_ref();
- // Safety: The procedure owns the lock so the table should exist.
+ let open_req = OpenTableRequest {
+ catalog_name: self.data.request.catalog_name.clone(),
+ schema_name: self.data.request.schema_name.clone(),
+ table_name: self.data.request.table_name.clone(),
+ table_id: self.data.request.id,
+ region_numbers: self.data.request.region_numbers.clone(),
+ };
+ // Safety: The table is already created.
let table = self
.table_engine
- .get_table(&engine_ctx, &table_ref)
+ .open_table(&engine_ctx, open_req)
+ .await
.map_err(Error::from_error_ext)?
.unwrap();
@@ -274,7 +283,7 @@ impl CreateTableProcedure {
}
/// Represents each step while creating a table in the datanode.
-#[derive(Debug, Serialize, Deserialize)]
+#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
enum CreateTableState {
/// Validate request and prepare to create table.
Prepare,
@@ -310,6 +319,12 @@ impl CreateTableData {
#[cfg(test)]
mod tests {
+ use std::collections::HashMap;
+
+ use common_procedure_test::{
+ execute_procedure_once, execute_procedure_until_done, execute_until_suspended_or_done,
+ MockContextProvider,
+ };
use table::engine::{EngineContext, TableEngine};
use super::*;
@@ -353,4 +368,91 @@ mod tests {
.unwrap()
.is_some());
}
+
+ #[tokio::test]
+ async fn test_recover_register_catalog() {
+ common_telemetry::init_default_ut_logging();
+
+ let TestEnv {
+ dir,
+ table_engine,
+ procedure_manager: _,
+ catalog_manager,
+ } = TestEnv::new("create");
+
+ let table_name = "test_create";
+ let request = test_util::new_create_request(table_name);
+ let procedure = CreateTableProcedure::new(
+ request.clone(),
+ catalog_manager,
+ table_engine.clone(),
+ table_engine.clone(),
+ );
+
+ let table_ref = TableReference {
+ catalog: &request.catalog_name,
+ schema: &request.schema_name,
+ table: &request.table_name,
+ };
+ let engine_ctx = EngineContext::default();
+ assert!(table_engine
+ .get_table(&engine_ctx, &table_ref)
+ .unwrap()
+ .is_none());
+
+ let procedure_id = ProcedureId::random();
+ let mut procedure = Box::new(procedure);
+ // Execute until suspended. We use an empty provider so the parent can submit
+ // a new subprocedure as the it can't find the subprocedure.
+ let mut subprocedures = execute_until_suspended_or_done(
+ procedure_id,
+ MockContextProvider::default(),
+ &mut procedure,
+ )
+ .await
+ .unwrap();
+ assert_eq!(1, subprocedures.len());
+ // Execute the subprocedure.
+ let mut subprocedure = subprocedures.pop().unwrap();
+ execute_procedure_until_done(&mut subprocedure.procedure).await;
+ let mut states = HashMap::new();
+ states.insert(subprocedure.id, ProcedureState::Done);
+ // Execute the parent procedure once.
+ execute_procedure_once(
+ procedure_id,
+ MockContextProvider::new(states),
+ &mut procedure,
+ )
+ .await;
+ assert_eq!(CreateTableState::RegisterCatalog, procedure.data.state);
+
+ // Close the table engine and reopen the TestEnv.
+ table_engine.close().await.unwrap();
+ let TestEnv {
+ dir: _dir,
+ table_engine,
+ procedure_manager: _,
+ catalog_manager,
+ } = TestEnv::from_temp_dir(dir);
+
+ // Recover the procedure
+ let json = procedure.dump().unwrap();
+ let procedure = CreateTableProcedure::from_json(
+ &json,
+ catalog_manager,
+ table_engine.clone(),
+ table_engine.clone(),
+ )
+ .unwrap();
+ let mut procedure = Box::new(procedure);
+ assert_eq!(CreateTableState::RegisterCatalog, procedure.data.state);
+ // Execute until done.
+ execute_procedure_until_done(&mut procedure).await;
+
+ // The table is created.
+ assert!(table_engine
+ .get_table(&engine_ctx, &table_ref)
+ .unwrap()
+ .is_some());
+ }
}
diff --git a/src/table-procedure/src/test_util.rs b/src/table-procedure/src/test_util.rs
index 3deff706ecf1..66e99b193669 100644
--- a/src/table-procedure/src/test_util.rs
+++ b/src/table-procedure/src/test_util.rs
@@ -46,6 +46,10 @@ pub struct TestEnv {
impl TestEnv {
pub fn new(prefix: &str) -> TestEnv {
let dir = create_temp_dir(prefix);
+ TestEnv::from_temp_dir(dir)
+ }
+
+ pub fn from_temp_dir(dir: TempDir) -> TestEnv {
let store_dir = format!("{}/db", dir.path().to_string_lossy());
let mut builder = Fs::default();
builder.root(&store_dir);
|
fix
|
Open table in RegisterCatalog state (#1617)
|
d9a96344eecf0d907b2d5c7dff95636a4848fe65
|
2024-02-21 12:31:51
|
tison
|
ci: try fix log location (#3342)
| false
|
diff --git a/.github/actions/build-windows-artifacts/action.yml b/.github/actions/build-windows-artifacts/action.yml
index 879473aa7d93..452bc58c6a0a 100644
--- a/.github/actions/build-windows-artifacts/action.yml
+++ b/.github/actions/build-windows-artifacts/action.yml
@@ -65,7 +65,7 @@ runs:
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
- path: ${{ runner.temp }}/greptime-*.log
+ path: /tmp/greptime-*.log
retention-days: 3
- name: Build greptime binary
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 92f972fbd306..0abdd391c823 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -1,7 +1,7 @@
on:
merge_group:
pull_request:
- types: [opened, synchronize, reopened, ready_for_review]
+ types: [ opened, synchronize, reopened, ready_for_review ]
paths-ignore:
- 'docs/**'
- 'config/**'
@@ -57,7 +57,7 @@ jobs:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- with:
+ with:
# Shares across multiple jobs
# Shares with `Clippy` job
shared-key: "check-lint"
@@ -75,7 +75,7 @@ jobs:
toolchain: stable
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- with:
+ with:
# Shares across multiple jobs
shared-key: "check-toml"
- name: Install taplo
@@ -136,13 +136,12 @@ jobs:
run: tar -xvf ./bins.tar.gz
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -c ./tests/cases --bins-dir ./bins
- # FIXME: Logs cannot found be on failure (or even success). Need to figure out the cause.
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
- path: ${{ runner.temp }}/greptime-*.log
+ path: /tmp/greptime-*.log
retention-days: 3
sqlness-kafka-wal:
@@ -167,13 +166,12 @@ jobs:
run: docker compose -f docker-compose-standalone.yml up -d --wait
- name: Run sqlness
run: RUST_BACKTRACE=1 ./bins/sqlness-runner -w kafka -k 127.0.0.1:9092 -c ./tests/cases --bins-dir ./bins
- # FIXME: Logs cannot be found on failure (or even success). Need to figure out the cause.
- name: Upload sqlness logs
if: always()
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
- path: ${{ runner.temp }}/greptime-*.log
+ path: /tmp/greptime-*.log
retention-days: 3
fmt:
@@ -191,7 +189,7 @@ jobs:
components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- with:
+ with:
# Shares across multiple jobs
shared-key: "check-rust-fmt"
- name: Run cargo fmt
@@ -212,7 +210,7 @@ jobs:
components: clippy
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- with:
+ with:
# Shares across multiple jobs
# Shares with `Check` job
shared-key: "check-lint"
diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml
index a11073741f45..71842844782c 100644
--- a/.github/workflows/nightly-ci.yml
+++ b/.github/workflows/nightly-ci.yml
@@ -48,7 +48,7 @@ jobs:
uses: actions/upload-artifact@v3
with:
name: sqlness-logs
- path: ${{ runner.temp }}/greptime-*.log
+ path: /tmp/greptime-*.log
retention-days: 3
test-on-windows:
|
ci
|
try fix log location (#3342)
|
90fcaa84879b26c240c3c4bae32e3c9bea16401d
|
2023-01-10 13:37:26
|
Lei, HUANG
|
feat: expose wal config (#852)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index dcd1024cfb12..15989930a8a5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1408,6 +1408,7 @@ dependencies = [
"paste",
"serde",
"snafu",
+ "toml",
]
[[package]]
@@ -2169,6 +2170,7 @@ dependencies = [
"datafusion-common",
"datatypes",
"futures",
+ "humantime-serde",
"hyper",
"log-store",
"meta-client",
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index a8ed29da4b63..6b5c0de8f28a 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -1,12 +1,19 @@
node_id = 42
mode = 'distributed'
rpc_addr = '127.0.0.1:3001'
-wal_dir = '/tmp/greptimedb/wal'
rpc_runtime_size = 8
mysql_addr = '127.0.0.1:4406'
mysql_runtime_size = 4
enable_memory_catalog = false
+[wal]
+dir = "/tmp/greptimedb/wal"
+file_size = '1GB'
+purge_interval = '10m'
+purge_threshold = '50GB'
+read_batch_size = 128
+sync_write = false
+
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 54587a6e4d11..af6ca0bcfa83 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -1,12 +1,20 @@
node_id = 0
mode = 'standalone'
-wal_dir = '/tmp/greptimedb/wal/'
enable_memory_catalog = false
[http_options]
addr = '127.0.0.1:4000'
timeout = "30s"
+[wal]
+dir = "/tmp/greptimedb/wal"
+file_size = '1GB'
+purge_interval = '10m'
+purge_threshold = '50GB'
+read_batch_size = 128
+sync_write = false
+
+
[storage]
type = 'File'
data_dir = '/tmp/greptimedb/data/'
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 00fa25d83c25..299f674437b8 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -125,7 +125,7 @@ impl TryFrom<StartCommand> for DatanodeOptions {
}
if let Some(wal_dir) = cmd.wal_dir {
- opts.wal_dir = wal_dir;
+ opts.wal.dir = wal_dir;
}
Ok(opts)
}
@@ -134,6 +134,7 @@ impl TryFrom<StartCommand> for DatanodeOptions {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
+ use std::time::Duration;
use datanode::datanode::ObjectStoreConfig;
use servers::Mode;
@@ -151,7 +152,7 @@ mod tests {
};
let options: DatanodeOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
- assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
+ assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal.dir);
assert_eq!("127.0.0.1:4406".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
let MetaClientOpts {
@@ -216,6 +217,11 @@ mod tests {
..Default::default()
})
.unwrap();
+ assert_eq!("/tmp/greptimedb/wal", dn_opts.wal.dir);
+ assert_eq!(Duration::from_secs(600), dn_opts.wal.purge_interval);
+ assert_eq!(1024 * 1024 * 1024, dn_opts.wal.file_size.0);
+ assert_eq!(1024 * 1024 * 1024 * 50, dn_opts.wal.purge_threshold.0);
+ assert!(!dn_opts.wal.sync_write);
assert_eq!(Some(42), dn_opts.node_id);
let MetaClientOpts {
metasrv_addrs: metasrv_addr,
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index bc16e3ea0ae1..4773bfa9744f 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -16,7 +16,7 @@ use std::sync::Arc;
use clap::Parser;
use common_telemetry::info;
-use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
+use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig, WalConfig};
use datanode::instance::InstanceRef;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::grpc::GrpcOptions;
@@ -73,7 +73,7 @@ pub struct StandaloneOptions {
pub influxdb_options: Option<InfluxdbOptions>,
pub prometheus_options: Option<PrometheusOptions>,
pub mode: Mode,
- pub wal_dir: String,
+ pub wal: WalConfig,
pub storage: ObjectStoreConfig,
pub enable_memory_catalog: bool,
}
@@ -89,7 +89,7 @@ impl Default for StandaloneOptions {
influxdb_options: Some(InfluxdbOptions::default()),
prometheus_options: Some(PrometheusOptions::default()),
mode: Mode::Standalone,
- wal_dir: "/tmp/greptimedb/wal".to_string(),
+ wal: WalConfig::default(),
storage: ObjectStoreConfig::default(),
enable_memory_catalog: false,
}
@@ -113,7 +113,7 @@ impl StandaloneOptions {
fn datanode_options(self) -> DatanodeOptions {
DatanodeOptions {
- wal_dir: self.wal_dir,
+ wal: self.wal,
storage: self.storage,
enable_memory_catalog: self.enable_memory_catalog,
..Default::default()
diff --git a/src/common/base/Cargo.toml b/src/common/base/Cargo.toml
index cb4e5a7654b0..a4e785ae0be3 100644
--- a/src/common/base/Cargo.toml
+++ b/src/common/base/Cargo.toml
@@ -11,3 +11,6 @@ common-error = { path = "../error" }
paste = "1.0"
serde = { version = "1.0", features = ["derive"] }
snafu.workspace = true
+
+[dev-dependencies]
+toml = "0.5"
diff --git a/src/common/base/src/lib.rs b/src/common/base/src/lib.rs
index c86c2bd472cd..e782b9696717 100644
--- a/src/common/base/src/lib.rs
+++ b/src/common/base/src/lib.rs
@@ -15,5 +15,7 @@
pub mod bit_vec;
pub mod buffer;
pub mod bytes;
+#[allow(clippy::all)]
+pub mod readable_size;
pub use bit_vec::BitVec;
diff --git a/src/common/base/src/readable_size.rs b/src/common/base/src/readable_size.rs
new file mode 100644
index 000000000000..ee428539b4e3
--- /dev/null
+++ b/src/common/base/src/readable_size.rs
@@ -0,0 +1,321 @@
+// Copyright (c) 2017-present, PingCAP, Inc. Licensed under Apache-2.0.
+
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is copied from https://github.com/tikv/raft-engine/blob/8dd2a39f359ff16f5295f35343f626e0c10132fa/src/util.rs without any modification.
+
+use std::fmt;
+use std::fmt::{Display, Write};
+use std::ops::{Div, Mul};
+use std::str::FromStr;
+
+use serde::de::{Unexpected, Visitor};
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
+
+const UNIT: u64 = 1;
+
+const BINARY_DATA_MAGNITUDE: u64 = 1024;
+pub const B: u64 = UNIT;
+pub const KIB: u64 = B * BINARY_DATA_MAGNITUDE;
+pub const MIB: u64 = KIB * BINARY_DATA_MAGNITUDE;
+pub const GIB: u64 = MIB * BINARY_DATA_MAGNITUDE;
+pub const TIB: u64 = GIB * BINARY_DATA_MAGNITUDE;
+pub const PIB: u64 = TIB * BINARY_DATA_MAGNITUDE;
+
+#[derive(Clone, Debug, Copy, PartialEq, Eq, PartialOrd)]
+pub struct ReadableSize(pub u64);
+
+impl ReadableSize {
+ pub const fn kb(count: u64) -> ReadableSize {
+ ReadableSize(count * KIB)
+ }
+
+ pub const fn mb(count: u64) -> ReadableSize {
+ ReadableSize(count * MIB)
+ }
+
+ pub const fn gb(count: u64) -> ReadableSize {
+ ReadableSize(count * GIB)
+ }
+
+ pub const fn as_mb(self) -> u64 {
+ self.0 / MIB
+ }
+}
+
+impl Div<u64> for ReadableSize {
+ type Output = ReadableSize;
+
+ fn div(self, rhs: u64) -> ReadableSize {
+ ReadableSize(self.0 / rhs)
+ }
+}
+
+impl Div<ReadableSize> for ReadableSize {
+ type Output = u64;
+
+ fn div(self, rhs: ReadableSize) -> u64 {
+ self.0 / rhs.0
+ }
+}
+
+impl Mul<u64> for ReadableSize {
+ type Output = ReadableSize;
+
+ fn mul(self, rhs: u64) -> ReadableSize {
+ ReadableSize(self.0 * rhs)
+ }
+}
+
+impl Serialize for ReadableSize {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let size = self.0;
+ let mut buffer = String::new();
+ if size == 0 {
+ write!(buffer, "{}KiB", size).unwrap();
+ } else if size % PIB == 0 {
+ write!(buffer, "{}PiB", size / PIB).unwrap();
+ } else if size % TIB == 0 {
+ write!(buffer, "{}TiB", size / TIB).unwrap();
+ } else if size % GIB as u64 == 0 {
+ write!(buffer, "{}GiB", size / GIB).unwrap();
+ } else if size % MIB as u64 == 0 {
+ write!(buffer, "{}MiB", size / MIB).unwrap();
+ } else if size % KIB as u64 == 0 {
+ write!(buffer, "{}KiB", size / KIB).unwrap();
+ } else {
+ return serializer.serialize_u64(size);
+ }
+ serializer.serialize_str(&buffer)
+ }
+}
+
+impl FromStr for ReadableSize {
+ type Err = String;
+
+ // This method parses value in binary unit.
+ fn from_str(s: &str) -> Result<ReadableSize, String> {
+ let size_str = s.trim();
+ if size_str.is_empty() {
+ return Err(format!("{:?} is not a valid size.", s));
+ }
+
+ if !size_str.is_ascii() {
+ return Err(format!("ASCII string is expected, but got {:?}", s));
+ }
+
+ // size: digits and '.' as decimal separator
+ let size_len = size_str
+ .to_string()
+ .chars()
+ .take_while(|c| char::is_ascii_digit(c) || ['.', 'e', 'E', '-', '+'].contains(c))
+ .count();
+
+ // unit: alphabetic characters
+ let (size, unit) = size_str.split_at(size_len);
+
+ let unit = match unit.trim() {
+ "K" | "KB" | "KiB" => KIB,
+ "M" | "MB" | "MiB" => MIB,
+ "G" | "GB" | "GiB" => GIB,
+ "T" | "TB" | "TiB" => TIB,
+ "P" | "PB" | "PiB" => PIB,
+ "B" | "" => B,
+ _ => {
+ return Err(format!(
+ "only B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, and PiB are supported: {:?}",
+ s
+ ));
+ }
+ };
+
+ match size.parse::<f64>() {
+ Ok(n) => Ok(ReadableSize((n * unit as f64) as u64)),
+ Err(_) => Err(format!("invalid size string: {:?}", s)),
+ }
+ }
+}
+
+impl Display for ReadableSize {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.0 >= PIB {
+ write!(f, "{:.1}PiB", self.0 as f64 / PIB as f64)
+ } else if self.0 >= TIB {
+ write!(f, "{:.1}TiB", self.0 as f64 / TIB as f64)
+ } else if self.0 >= GIB {
+ write!(f, "{:.1}GiB", self.0 as f64 / GIB as f64)
+ } else if self.0 >= MIB {
+ write!(f, "{:.1}MiB", self.0 as f64 / MIB as f64)
+ } else if self.0 >= KIB {
+ write!(f, "{:.1}KiB", self.0 as f64 / KIB as f64)
+ } else {
+ write!(f, "{}B", self.0)
+ }
+ }
+}
+
+impl<'de> Deserialize<'de> for ReadableSize {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct SizeVisitor;
+
+ impl<'de> Visitor<'de> for SizeVisitor {
+ type Value = ReadableSize;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("valid size")
+ }
+
+ fn visit_i64<E>(self, size: i64) -> Result<ReadableSize, E>
+ where
+ E: de::Error,
+ {
+ if size >= 0 {
+ self.visit_u64(size as u64)
+ } else {
+ Err(E::invalid_value(Unexpected::Signed(size), &self))
+ }
+ }
+
+ fn visit_u64<E>(self, size: u64) -> Result<ReadableSize, E>
+ where
+ E: de::Error,
+ {
+ Ok(ReadableSize(size))
+ }
+
+ fn visit_str<E>(self, size_str: &str) -> Result<ReadableSize, E>
+ where
+ E: de::Error,
+ {
+ size_str.parse().map_err(E::custom)
+ }
+ }
+
+ deserializer.deserialize_any(SizeVisitor)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_readable_size() {
+ let s = ReadableSize::kb(2);
+ assert_eq!(s.0, 2048);
+ assert_eq!(s.as_mb(), 0);
+ let s = ReadableSize::mb(2);
+ assert_eq!(s.0, 2 * 1024 * 1024);
+ assert_eq!(s.as_mb(), 2);
+ let s = ReadableSize::gb(2);
+ assert_eq!(s.0, 2 * 1024 * 1024 * 1024);
+ assert_eq!(s.as_mb(), 2048);
+
+ assert_eq!((ReadableSize::mb(2) / 2).0, MIB);
+ assert_eq!((ReadableSize::mb(1) / 2).0, 512 * KIB);
+ assert_eq!(ReadableSize::mb(2) / ReadableSize::kb(1), 2048);
+ }
+
+ #[test]
+ fn test_parse_readable_size() {
+ #[derive(Serialize, Deserialize)]
+ struct SizeHolder {
+ s: ReadableSize,
+ }
+
+ let legal_cases = vec![
+ (0, "0KiB"),
+ (2 * KIB, "2KiB"),
+ (4 * MIB, "4MiB"),
+ (5 * GIB, "5GiB"),
+ (7 * TIB, "7TiB"),
+ (11 * PIB, "11PiB"),
+ ];
+ for (size, exp) in legal_cases {
+ let c = SizeHolder {
+ s: ReadableSize(size),
+ };
+ let res_str = toml::to_string(&c).unwrap();
+ let exp_str = format!("s = {:?}\n", exp);
+ assert_eq!(res_str, exp_str);
+ let res_size: SizeHolder = toml::from_str(&exp_str).unwrap();
+ assert_eq!(res_size.s.0, size);
+ }
+
+ let c = SizeHolder {
+ s: ReadableSize(512),
+ };
+ let res_str = toml::to_string(&c).unwrap();
+ assert_eq!(res_str, "s = 512\n");
+ let res_size: SizeHolder = toml::from_str(&res_str).unwrap();
+ assert_eq!(res_size.s.0, c.s.0);
+
+ let decode_cases = vec![
+ (" 0.5 PB", PIB / 2),
+ ("0.5 TB", TIB / 2),
+ ("0.5GB ", GIB / 2),
+ ("0.5MB", MIB / 2),
+ ("0.5KB", KIB / 2),
+ ("0.5P", PIB / 2),
+ ("0.5T", TIB / 2),
+ ("0.5G", GIB / 2),
+ ("0.5M", MIB / 2),
+ ("0.5K", KIB / 2),
+ ("23", 23),
+ ("1", 1),
+ ("1024B", KIB),
+ // units with binary prefixes
+ (" 0.5 PiB", PIB / 2),
+ ("1PiB", PIB),
+ ("0.5 TiB", TIB / 2),
+ ("2 TiB", TIB * 2),
+ ("0.5GiB ", GIB / 2),
+ ("787GiB ", GIB * 787),
+ ("0.5MiB", MIB / 2),
+ ("3MiB", MIB * 3),
+ ("0.5KiB", KIB / 2),
+ ("1 KiB", KIB),
+ // scientific notation
+ ("0.5e6 B", B * 500000),
+ ("0.5E6 B", B * 500000),
+ ("1e6B", B * 1000000),
+ ("8E6B", B * 8000000),
+ ("8e7", B * 80000000),
+ ("1e-1MB", MIB / 10),
+ ("1e+1MB", MIB * 10),
+ ("0e+10MB", 0),
+ ];
+ for (src, exp) in decode_cases {
+ let src = format!("s = {:?}", src);
+ let res: SizeHolder = toml::from_str(&src).unwrap();
+ assert_eq!(res.s.0, exp);
+ }
+
+ let illegal_cases = vec![
+ "0.5kb", "0.5kB", "0.5Kb", "0.5k", "0.5g", "b", "gb", "1b", "B", "1K24B", " 5_KB",
+ "4B7", "5M_",
+ ];
+ for src in illegal_cases {
+ let src_str = format!("s = {:?}", src);
+ assert!(toml::from_str::<SizeHolder>(&src_str).is_err(), "{}", src);
+ }
+ }
+}
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index b63daad0211f..3482f1c54e7b 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -30,6 +30,7 @@ datafusion.workspace = true
datatypes = { path = "../datatypes" }
futures = "0.3"
hyper = { version = "0.14", features = ["full"] }
+humantime-serde = "1.1"
log-store = { path = "../log-store" }
meta-client = { path = "../meta-client" }
meta-srv = { path = "../meta-srv", features = ["mock"] }
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index cd5c7e724009..f3827da5301a 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -13,7 +13,9 @@
// limitations under the License.
use std::sync::Arc;
+use std::time::Duration;
+use common_base::readable_size::ReadableSize;
use common_telemetry::info;
use meta_client::MetaClientOpts;
use serde::{Deserialize, Serialize};
@@ -47,6 +49,36 @@ impl Default for ObjectStoreConfig {
}
}
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct WalConfig {
+ // wal directory
+ pub dir: String,
+ // wal file size in bytes
+ pub file_size: ReadableSize,
+ // wal purge threshold in bytes
+ pub purge_threshold: ReadableSize,
+ // purge interval in seconds
+ #[serde(with = "humantime_serde")]
+ pub purge_interval: Duration,
+ // read batch size
+ pub read_batch_size: usize,
+ // whether to sync log file after every write
+ pub sync_write: bool,
+}
+
+impl Default for WalConfig {
+ fn default() -> Self {
+ Self {
+ dir: "/tmp/greptimedb/wal".to_string(),
+ file_size: ReadableSize::gb(1), // log file size 1G
+ purge_threshold: ReadableSize::gb(50), // purge threshold 50G
+ purge_interval: Duration::from_secs(600),
+ read_batch_size: 128,
+ sync_write: false,
+ }
+ }
+}
+
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct DatanodeOptions {
@@ -56,7 +88,7 @@ pub struct DatanodeOptions {
pub mysql_addr: String,
pub mysql_runtime_size: usize,
pub meta_client_opts: Option<MetaClientOpts>,
- pub wal_dir: String,
+ pub wal: WalConfig,
pub storage: ObjectStoreConfig,
pub enable_memory_catalog: bool,
pub mode: Mode,
@@ -71,7 +103,7 @@ impl Default for DatanodeOptions {
mysql_addr: "127.0.0.1:4406".to_string(),
mysql_runtime_size: 2,
meta_client_opts: None,
- wal_dir: "/tmp/greptimedb/wal".to_string(),
+ wal: WalConfig::default(),
storage: ObjectStoreConfig::default(),
enable_memory_catalog: false,
mode: Mode::Standalone,
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 1751211b0b53..d98a59e966fb 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -38,7 +38,7 @@ use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::table::TableIdProviderRef;
-use crate::datanode::{DatanodeOptions, ObjectStoreConfig};
+use crate::datanode::{DatanodeOptions, ObjectStoreConfig, WalConfig};
use crate::error::{
self, CatalogSnafu, MetaClientInitSnafu, MissingMetasrvOptsSnafu, MissingNodeIdSnafu,
NewCatalogSnafu, OpenLogStoreSnafu, Result,
@@ -68,7 +68,7 @@ pub type InstanceRef = Arc<Instance>;
impl Instance {
pub async fn new(opts: &DatanodeOptions) -> Result<Self> {
let object_store = new_object_store(&opts.storage).await?;
- let logstore = Arc::new(create_log_store(&opts.wal_dir).await?);
+ let logstore = Arc::new(create_log_store(&opts.wal).await?);
let meta_client = match opts.mode {
Mode::Standalone => None,
@@ -289,16 +289,19 @@ async fn new_metasrv_client(node_id: u64, meta_config: &MetaClientOpts) -> Resul
Ok(meta_client)
}
-pub(crate) async fn create_log_store(path: impl AsRef<str>) -> Result<RaftEngineLogStore> {
- let path = path.as_ref();
+pub(crate) async fn create_log_store(wal_config: &WalConfig) -> Result<RaftEngineLogStore> {
// create WAL directory
- fs::create_dir_all(path::Path::new(path)).context(error::CreateDirSnafu { dir: path })?;
-
- info!("The WAL directory is: {}", path);
-
+ fs::create_dir_all(path::Path::new(&wal_config.dir)).context(error::CreateDirSnafu {
+ dir: &wal_config.dir,
+ })?;
+ info!("Creating logstore with config: {:?}", wal_config);
let log_config = LogConfig {
- log_file_dir: path.to_string(),
- ..Default::default()
+ file_size: wal_config.file_size.0,
+ log_file_dir: wal_config.dir.clone(),
+ purge_interval: wal_config.purge_interval,
+ purge_threshold: wal_config.purge_threshold.0,
+ read_batch_size: wal_config.read_batch_size,
+ sync_write: wal_config.sync_write,
};
let logstore = RaftEngineLogStore::try_new(log_config)
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
index 053ab289fe7a..9fa2b9bfe345 100644
--- a/src/datanode/src/mock.rs
+++ b/src/datanode/src/mock.rs
@@ -41,7 +41,7 @@ impl Instance {
pub async fn with_mock_meta_server(opts: &DatanodeOptions, meta_srv: MockInfo) -> Result<Self> {
let object_store = new_object_store(&opts.storage).await?;
- let logstore = Arc::new(create_log_store(&opts.wal_dir).await?);
+ let logstore = Arc::new(create_log_store(&opts.wal).await?);
let meta_client = Arc::new(mock_meta_client(meta_srv, opts.node_id.unwrap_or(42)).await);
let table_engine = Arc::new(DefaultEngine::new(
TableEngineConfig::default(),
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index 110cd12a4600..e1911cc110a2 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -27,7 +27,7 @@ use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
use tempdir::TempDir;
-use crate::datanode::{DatanodeOptions, ObjectStoreConfig};
+use crate::datanode::{DatanodeOptions, ObjectStoreConfig, WalConfig};
use crate::error::{CreateTableSnafu, Result};
use crate::instance::Instance;
use crate::sql::SqlHandler;
@@ -61,7 +61,10 @@ fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard)
let wal_tmp_dir = TempDir::new(&format!("gt_wal_{name}")).unwrap();
let data_tmp_dir = TempDir::new(&format!("gt_data_{name}")).unwrap();
let opts = DatanodeOptions {
- wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
+ wal: WalConfig {
+ dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
+ ..Default::default()
+ },
storage: ObjectStoreConfig::File {
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
},
diff --git a/src/frontend/src/tests.rs b/src/frontend/src/tests.rs
index 6a39671034b4..80650d25e229 100644
--- a/src/frontend/src/tests.rs
+++ b/src/frontend/src/tests.rs
@@ -20,7 +20,7 @@ use catalog::remote::MetaKvBackend;
use client::Client;
use common_grpc::channel_manager::ChannelManager;
use common_runtime::Builder as RuntimeBuilder;
-use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
+use datanode::datanode::{DatanodeOptions, ObjectStoreConfig, WalConfig};
use datanode::instance::Instance as DatanodeInstance;
use meta_client::client::MetaClientBuilder;
use meta_client::rpc::Peer;
@@ -76,7 +76,10 @@ fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard)
let wal_tmp_dir = TempDir::new(&format!("gt_wal_{name}")).unwrap();
let data_tmp_dir = TempDir::new(&format!("gt_data_{name}")).unwrap();
let opts = DatanodeOptions {
- wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
+ wal: WalConfig {
+ dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
+ ..Default::default()
+ },
storage: ObjectStoreConfig::File {
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
},
@@ -155,7 +158,10 @@ async fn create_distributed_datanode(
let data_tmp_dir = TempDir::new(&format!("gt_data_{test_name}_dist_dn_{datanode_id}")).unwrap();
let opts = DatanodeOptions {
node_id: Some(datanode_id),
- wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
+ wal: WalConfig {
+ dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
+ ..Default::default()
+ },
storage: ObjectStoreConfig::File {
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
},
diff --git a/src/log-store/src/config.rs b/src/log-store/src/config.rs
index 5230e55ea232..1f195f6a2766 100644
--- a/src/log-store/src/config.rs
+++ b/src/log-store/src/config.rs
@@ -16,11 +16,10 @@ use std::time::Duration;
#[derive(Debug, Clone)]
pub struct LogConfig {
- pub append_buffer_size: usize,
- pub max_log_file_size: usize,
+ pub file_size: u64,
pub log_file_dir: String,
- pub gc_interval: Duration,
- pub purge_threshold: usize,
+ pub purge_interval: Duration,
+ pub purge_threshold: u64,
pub read_batch_size: usize,
pub sync_write: bool,
}
@@ -30,10 +29,9 @@ impl Default for LogConfig {
/// in tests.
fn default() -> Self {
Self {
- append_buffer_size: 128,
- max_log_file_size: 1024 * 1024 * 1024,
+ file_size: 1024 * 1024 * 1024,
log_file_dir: "/tmp/greptimedb".to_string(),
- gc_interval: Duration::from_secs(10 * 60),
+ purge_interval: Duration::from_secs(10 * 60),
purge_threshold: 1024 * 1024 * 1024 * 50,
read_batch_size: 128,
sync_write: false,
@@ -52,9 +50,8 @@ mod tests {
common_telemetry::logging::init_default_ut_logging();
let default = LogConfig::default();
info!("LogConfig::default(): {:?}", default);
- assert_eq!(1024 * 1024 * 1024, default.max_log_file_size);
- assert_eq!(128, default.append_buffer_size);
- assert_eq!(Duration::from_secs(600), default.gc_interval);
+ assert_eq!(1024 * 1024 * 1024, default.file_size);
+ assert_eq!(Duration::from_secs(600), default.purge_interval);
assert_eq!(1024 * 1024 * 1024 * 50, default.purge_threshold);
assert_eq!(128, default.read_batch_size);
assert!(!default.sync_write);
diff --git a/src/log-store/src/raft_engine/log_store.rs b/src/log-store/src/raft_engine/log_store.rs
index 8d03594c5557..54e4cbb9b037 100644
--- a/src/log-store/src/raft_engine/log_store.rs
+++ b/src/log-store/src/raft_engine/log_store.rs
@@ -51,10 +51,10 @@ impl RaftEngineLogStore {
// TODO(hl): set according to available disk space
let raft_engine_config = Config {
dir: config.log_file_dir.clone(),
- purge_threshold: ReadableSize(config.purge_threshold as u64),
+ purge_threshold: ReadableSize(config.purge_threshold),
recovery_mode: RecoveryMode::TolerateTailCorruption,
batch_compression_threshold: ReadableSize::kb(8),
- target_file_size: ReadableSize(config.max_log_file_size as u64),
+ target_file_size: ReadableSize(config.file_size),
..Default::default()
};
let engine = Arc::new(Engine::open(raft_engine_config).context(RaftEngineSnafu)?);
@@ -75,7 +75,7 @@ impl RaftEngineLogStore {
async fn start(&self) -> Result<(), Error> {
let engine_clone = self.engine.clone();
- let interval = self.config.gc_interval;
+ let interval = self.config.purge_interval;
let token = CancellationToken::new();
let child = token.child_token();
// TODO(hl): Maybe spawn to a blocking runtime.
@@ -495,9 +495,9 @@ mod tests {
let config = LogConfig {
log_file_dir: dir.path().to_str().unwrap().to_string(),
- max_log_file_size: ReadableSize::mb(2).0 as usize,
- purge_threshold: ReadableSize::mb(4).0 as usize,
- gc_interval: Duration::from_secs(5),
+ file_size: ReadableSize::mb(2).0,
+ purge_threshold: ReadableSize::mb(4).0,
+ purge_interval: Duration::from_secs(5),
..Default::default()
};
@@ -528,9 +528,9 @@ mod tests {
let config = LogConfig {
log_file_dir: dir.path().to_str().unwrap().to_string(),
- max_log_file_size: ReadableSize::mb(2).0 as usize,
- purge_threshold: ReadableSize::mb(4).0 as usize,
- gc_interval: Duration::from_secs(5),
+ file_size: ReadableSize::mb(2).0,
+ purge_threshold: ReadableSize::mb(4).0,
+ purge_interval: Duration::from_secs(5),
..Default::default()
};
diff --git a/src/log-store/src/test_util/log_store_util.rs b/src/log-store/src/test_util/log_store_util.rs
index af20be8b76e3..684f368afdcb 100644
--- a/src/log-store/src/test_util/log_store_util.rs
+++ b/src/log-store/src/test_util/log_store_util.rs
@@ -22,8 +22,7 @@ use crate::LogConfig;
pub async fn create_tmp_local_file_log_store(dir: &str) -> (RaftEngineLogStore, TempDir) {
let dir = TempDir::new(dir).unwrap();
let cfg = LogConfig {
- append_buffer_size: 128,
- max_log_file_size: 128,
+ file_size: 128 * 1024,
log_file_dir: dir.path().to_str().unwrap().to_string(),
..Default::default()
};
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 4a49f7feaeb2..c16988585f59 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -23,7 +23,7 @@ use axum::Router;
use catalog::CatalogManagerRef;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_runtime::Builder as RuntimeBuilder;
-use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
+use datanode::datanode::{DatanodeOptions, ObjectStoreConfig, WalConfig};
use datanode::error::{CreateTableSnafu, Result};
use datanode::instance::{Instance, InstanceRef};
use datanode::sql::SqlHandler;
@@ -149,7 +149,10 @@ pub fn create_tmp_dir_and_datanode_opts(
let (storage, data_tmp_dir) = get_test_store_config(&store_type, name);
let opts = DatanodeOptions {
- wal_dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
+ wal: WalConfig {
+ dir: wal_tmp_dir.path().to_str().unwrap().to_string(),
+ ..Default::default()
+ },
storage,
mode: Mode::Standalone,
..Default::default()
|
feat
|
expose wal config (#852)
|
7a14db68a66d724afecce0d558dbe5d7ad749408
|
2023-12-04 10:39:27
|
Weny Xu
|
feat: add upgrade candidate region step (#2829)
| false
|
diff --git a/src/common/meta/src/distributed_time_constants.rs b/src/common/meta/src/distributed_time_constants.rs
index 4c4148f184af..2b2b84fff81a 100644
--- a/src/common/meta/src/distributed_time_constants.rs
+++ b/src/common/meta/src/distributed_time_constants.rs
@@ -33,5 +33,8 @@ pub const DATANODE_LEASE_SECS: u64 = REGION_LEASE_SECS;
/// The lease seconds of metasrv leader.
pub const META_LEASE_SECS: u64 = 3;
-// In a lease, there are two opportunities for renewal.
+/// In a lease, there are two opportunities for renewal.
pub const META_KEEP_ALIVE_INTERVAL_SECS: u64 = META_LEASE_SECS / 2;
+
+/// The default mailbox round-trip timeout.
+pub const MAILBOX_RTT_SECS: u64 = 1;
diff --git a/src/common/meta/src/instruction.rs b/src/common/meta/src/instruction.rs
index 7e787b41433e..8ad58552f267 100644
--- a/src/common/meta/src/instruction.rs
+++ b/src/common/meta/src/instruction.rs
@@ -111,6 +111,7 @@ impl OpenRegion {
/// The instruction of downgrading leader region.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DowngradeRegion {
+ /// The [RegionId].
pub region_id: RegionId,
}
@@ -120,20 +121,67 @@ impl Display for DowngradeRegion {
}
}
+/// Upgrades a follower region to leader region.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct UpgradeRegion {
+ /// The [RegionId].
+ pub region_id: RegionId,
+ /// The `last_entry_id` of old leader region.
+ pub last_entry_id: Option<u64>,
+ /// The second of waiting for a wal replay.
+ ///
+ /// `None` stands for no wait,
+ /// it's helpful to verify whether the leader region is ready.
+ pub wait_for_replay_secs: Option<u64>,
+}
+
#[derive(Debug, Clone, Serialize, Deserialize, Display)]
pub enum Instruction {
+ /// Opens a region.
+ ///
+ /// - Returns true if a specified region exists.
OpenRegion(OpenRegion),
+ /// Closes a region.
+ ///
+ /// - Returns true if a specified region does not exist.
CloseRegion(RegionIdent),
+ /// Upgrades a region.
+ UpgradeRegion(UpgradeRegion),
+ /// Downgrades a region.
DowngradeRegion(DowngradeRegion),
+ /// Invalidates a specified table cache.
InvalidateTableIdCache(TableId),
+ /// Invalidates a specified table name index cache.
InvalidateTableNameCache(TableName),
}
+/// The reply of [UpgradeRegion].
+#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
+pub struct UpgradeRegionReply {
+ /// Returns true if `last_entry_id` has been replayed to the latest.
+ pub ready: bool,
+ /// Indicates whether the region exists.
+ pub exists: bool,
+ /// Returns error if any.
+ pub error: Option<String>,
+}
+
+impl Display for UpgradeRegionReply {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "(ready={}, exists={}, error={:?})",
+ self.ready, self.exists, self.error
+ )
+ }
+}
+
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum InstructionReply {
OpenRegion(SimpleReply),
CloseRegion(SimpleReply),
+ UpgradeRegion(UpgradeRegionReply),
InvalidateTableCache(SimpleReply),
DowngradeRegion(DowngradeRegionReply),
}
@@ -143,6 +191,7 @@ impl Display for InstructionReply {
match self {
Self::OpenRegion(reply) => write!(f, "InstructionReply::OpenRegion({})", reply),
Self::CloseRegion(reply) => write!(f, "InstructionReply::CloseRegion({})", reply),
+ Self::UpgradeRegion(reply) => write!(f, "InstructionReply::UpgradeRegion({})", reply),
Self::InvalidateTableCache(reply) => {
write!(f, "InstructionReply::Invalidate({})", reply)
}
diff --git a/src/datanode/src/heartbeat/handler.rs b/src/datanode/src/heartbeat/handler.rs
index 9985fd6cf40e..ee243942e787 100644
--- a/src/datanode/src/heartbeat/handler.rs
+++ b/src/datanode/src/heartbeat/handler.rs
@@ -62,6 +62,9 @@ impl RegionHeartbeatResponseHandler {
let close_region_req = RegionRequest::Close(RegionCloseRequest {});
Ok((region_id, close_region_req))
}
+ Instruction::UpgradeRegion(_) => {
+ todo!()
+ }
Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => {
InvalidHeartbeatResponseSnafu.fail()
}
@@ -86,6 +89,9 @@ impl RegionHeartbeatResponseHandler {
result: false,
error: None,
}),
+ Instruction::UpgradeRegion(_) => {
+ todo!()
+ }
Instruction::InvalidateTableIdCache(_) | Instruction::InvalidateTableNameCache(_) => {
InstructionReply::InvalidateTableCache(SimpleReply {
result: false,
@@ -118,6 +124,9 @@ impl RegionHeartbeatResponseHandler {
reply.error = error;
}
},
+ InstructionReply::UpgradeRegion(_) => {
+ todo!()
+ }
InstructionReply::InvalidateTableCache(reply) => {
reply.result = success;
reply.error = error;
diff --git a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
index 51bed22f1260..f3834cc5e9b5 100644
--- a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
@@ -16,7 +16,7 @@ use std::any::Any;
use std::time::Duration;
use api::v1::meta::MailboxMessage;
-use common_meta::distributed_time_constants::REGION_LEASE_SECS;
+use common_meta::distributed_time_constants::{MAILBOX_RTT_SECS, REGION_LEASE_SECS};
use common_meta::instruction::{
DowngradeRegion, DowngradeRegionReply, Instruction, InstructionReply,
};
@@ -31,7 +31,7 @@ use crate::handler::HeartbeatMailbox;
use crate::procedure::region_migration::{Context, State};
use crate::service::mailbox::Channel;
-const DOWNGRADE_LEADER_REGION_TIMEOUT: Duration = Duration::from_secs(1);
+const DOWNGRADE_LEADER_REGION_TIMEOUT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
#[derive(Debug, Serialize, Deserialize)]
pub struct DowngradeLeaderRegion {
@@ -64,7 +64,7 @@ impl State for DowngradeLeaderRegion {
tokio::time::sleep_until(*deadline).await;
}
- Ok(Box::new(UpgradeCandidateRegion))
+ Ok(Box::<UpgradeCandidateRegion>::default())
}
fn as_any(&self) -> &dyn Any {
@@ -159,7 +159,7 @@ impl DowngradeLeaderRegion {
}
Err(error::Error::MailboxTimeout { .. }) => {
let reason = format!(
- "Mailbox received timeout for downgrade leader region {region_id} on Datanode {:?}",
+ "Mailbox received timeout for downgrade leader region {region_id} on datanode {:?}",
leader,
);
error::RetryLaterSnafu { reason }.fail()
diff --git a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
index 33b465f46b35..bd92217d8722 100644
--- a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
@@ -18,6 +18,7 @@ use std::time::Duration;
use api::v1::meta::MailboxMessage;
use common_meta::ddl::utils::region_storage_path;
+use common_meta::distributed_time_constants::MAILBOX_RTT_SECS;
use common_meta::instruction::{Instruction, InstructionReply, OpenRegion, SimpleReply};
use common_meta::RegionIdent;
use serde::{Deserialize, Serialize};
@@ -29,7 +30,7 @@ use crate::procedure::region_migration::downgrade_leader_region::DowngradeLeader
use crate::procedure::region_migration::{Context, State};
use crate::service::mailbox::Channel;
-const OPEN_CANDIDATE_REGION_TIMEOUT: Duration = Duration::from_secs(1);
+const OPEN_CANDIDATE_REGION_TIMEOUT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
#[derive(Debug, Serialize, Deserialize)]
pub struct OpenCandidateRegion;
@@ -152,7 +153,7 @@ impl OpenCandidateRegion {
} else {
error::RetryLaterSnafu {
reason: format!(
- "Region {region_id} is not opened by Datanode {:?}, error: {error:?}",
+ "Region {region_id} is not opened by datanode {:?}, error: {error:?}",
candidate,
),
}
@@ -161,7 +162,7 @@ impl OpenCandidateRegion {
}
Err(error::Error::MailboxTimeout { .. }) => {
let reason = format!(
- "Mailbox received timeout for open candidate region {region_id} on Datanode {:?}",
+ "Mailbox received timeout for open candidate region {region_id} on datanode {:?}",
candidate,
);
error::RetryLaterSnafu { reason }.fail()
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata.rs b/src/meta-srv/src/procedure/region_migration/update_metadata.rs
index 7d56c51390ae..a0ea0fa1f3b9 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata.rs
@@ -32,9 +32,9 @@ use crate::procedure::region_migration::{Context, State};
pub enum UpdateMetadata {
/// Downgrades the leader region.
Downgrade,
- /// Upgrade the candidate region.
+ /// Upgrades the candidate region.
Upgrade,
- /// Rollback the downgraded leader region.
+ /// Rolls back the downgraded region.
Rollback,
}
diff --git a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs
index 8b15a0730f7a..e3eb6e2f1b9d 100644
--- a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs
@@ -13,19 +13,55 @@
// limitations under the License.
use std::any::Any;
+use std::time::Duration;
+use api::v1::meta::MailboxMessage;
+use common_meta::distributed_time_constants::MAILBOX_RTT_SECS;
+use common_meta::instruction::{Instruction, InstructionReply, UpgradeRegion, UpgradeRegionReply};
+use common_telemetry::warn;
use serde::{Deserialize, Serialize};
+use snafu::{ensure, ResultExt};
+use tokio::time::sleep;
-use crate::error::Result;
+use super::update_metadata::UpdateMetadata;
+use crate::error::{self, Result};
+use crate::handler::HeartbeatMailbox;
use crate::procedure::region_migration::{Context, State};
+use crate::service::mailbox::Channel;
+
#[derive(Debug, Serialize, Deserialize)]
-pub struct UpgradeCandidateRegion;
+pub struct UpgradeCandidateRegion {
+ // The optimistic retry times.
+ optimistic_retry: usize,
+ // The retry initial interval.
+ retry_initial_interval: Duration,
+ // The replay timeout of a instruction.
+ replay_timeout: Duration,
+ // If it's true it requires the candidate region MUST replay the WAL to the latest entry id.
+ // Otherwise, it will rollback to the old leader region.
+ require_ready: bool,
+}
+
+impl Default for UpgradeCandidateRegion {
+ fn default() -> Self {
+ Self {
+ optimistic_retry: 3,
+ retry_initial_interval: Duration::from_millis(500),
+ replay_timeout: Duration::from_millis(1000),
+ require_ready: true,
+ }
+ }
+}
#[async_trait::async_trait]
#[typetag::serde]
impl State for UpgradeCandidateRegion {
- async fn next(&mut self, _ctx: &mut Context) -> Result<Box<dyn State>> {
- todo!();
+ async fn next(&mut self, ctx: &mut Context) -> Result<Box<dyn State>> {
+ if self.upgrade_region_with_retry(ctx).await {
+ Ok(Box::new(UpdateMetadata::Upgrade))
+ } else {
+ Ok(Box::new(UpdateMetadata::Rollback))
+ }
}
fn as_any(&self) -> &dyn Any {
@@ -33,4 +69,494 @@ impl State for UpgradeCandidateRegion {
}
}
-impl UpgradeCandidateRegion {}
+impl UpgradeCandidateRegion {
+ const UPGRADE_CANDIDATE_REGION_RTT: Duration = Duration::from_secs(MAILBOX_RTT_SECS);
+
+ /// Returns the timeout of the upgrade candidate region.
+ ///
+ /// Equals `replay_timeout` + RTT
+ fn send_upgrade_candidate_region_timeout(&self) -> Duration {
+ self.replay_timeout + UpgradeCandidateRegion::UPGRADE_CANDIDATE_REGION_RTT
+ }
+
+ /// Builds upgrade region instruction.
+ fn build_upgrade_region_instruction(&self, ctx: &Context) -> Instruction {
+ let pc = &ctx.persistent_ctx;
+ let region_id = pc.region_id;
+ let last_entry_id = ctx.volatile_ctx.leader_region_last_entry_id;
+
+ Instruction::UpgradeRegion(UpgradeRegion {
+ region_id,
+ last_entry_id,
+ wait_for_replay_secs: Some(self.replay_timeout.as_secs()),
+ })
+ }
+
+ /// Tries to upgrade a candidate region.
+ ///
+ /// Retry:
+ /// - If `require_ready` is true, but the candidate region returns `ready` is false.
+ /// - [MailboxTimeout](error::Error::MailboxTimeout), Timeout.
+ ///
+ /// Abort:
+ /// - The candidate region doesn't exist.
+ /// - [PusherNotFound](error::Error::PusherNotFound), The datanode is unreachable.
+ /// - [PushMessage](error::Error::PushMessage), The receiver is dropped.
+ /// - [MailboxReceiver](error::Error::MailboxReceiver), The sender is dropped without sending (impossible).
+ /// - [UnexpectedInstructionReply](error::Error::UnexpectedInstructionReply) (impossible).
+ /// - Invalid JSON (impossible).
+ async fn upgrade_region(&self, ctx: &Context, upgrade_instruction: &Instruction) -> Result<()> {
+ let pc = &ctx.persistent_ctx;
+ let region_id = pc.region_id;
+ let candidate = &pc.to_peer;
+
+ let msg = MailboxMessage::json_message(
+ &format!("Upgrade candidate region: {}", region_id),
+ &format!("Meta@{}", ctx.server_addr()),
+ &format!("Datanode-{}@{}", candidate.id, candidate.addr),
+ common_time::util::current_time_millis(),
+ upgrade_instruction,
+ )
+ .with_context(|_| error::SerializeToJsonSnafu {
+ input: upgrade_instruction.to_string(),
+ })?;
+
+ let ch = Channel::Datanode(candidate.id);
+ let receiver = ctx
+ .mailbox
+ .send(&ch, msg, self.send_upgrade_candidate_region_timeout())
+ .await?;
+
+ match receiver.await? {
+ Ok(msg) => {
+ let reply = HeartbeatMailbox::json_reply(&msg)?;
+ let InstructionReply::UpgradeRegion(UpgradeRegionReply {
+ ready,
+ exists,
+ error,
+ }) = reply
+ else {
+ return error::UnexpectedInstructionReplySnafu {
+ mailbox_message: msg.to_string(),
+ reason: "Unexpected reply of the upgrade region instruction",
+ }
+ .fail();
+ };
+
+ // Notes: The order of handling is important.
+ if error.is_some() {
+ return error::RetryLaterSnafu {
+ reason: format!(
+ "Failed to upgrade the region {} on datanode {:?}, error: {:?}",
+ region_id, candidate, error
+ ),
+ }
+ .fail();
+ }
+
+ ensure!(
+ exists,
+ error::UnexpectedSnafu {
+ violated: format!(
+ "Expected region {} doesn't exist on datanode {:?}",
+ region_id, candidate
+ )
+ }
+ );
+
+ if self.require_ready && !ready {
+ return error::RetryLaterSnafu {
+ reason: format!(
+ "Candidate region {} still replaying the wal on datanode {:?}",
+ region_id, candidate
+ ),
+ }
+ .fail();
+ }
+
+ Ok(())
+ }
+ Err(error::Error::MailboxTimeout { .. }) => {
+ let reason = format!(
+ "Mailbox received timeout for upgrade candidate region {region_id} on datanode {:?}",
+ candidate,
+ );
+ error::RetryLaterSnafu { reason }.fail()
+ }
+ Err(err) => Err(err),
+ }
+ }
+
+ /// Upgrades a candidate region.
+ ///
+ /// Returns true if the candidate region is upgraded successfully.
+ async fn upgrade_region_with_retry(&self, ctx: &Context) -> bool {
+ let upgrade_instruction = self.build_upgrade_region_instruction(ctx);
+
+ let mut retry = 0;
+ let mut upgraded = false;
+
+ loop {
+ if let Err(err) = self.upgrade_region(ctx, &upgrade_instruction).await {
+ retry += 1;
+ if err.is_retryable() && retry < self.optimistic_retry {
+ warn!("Failed to upgrade region, error: {err:?}, retry later");
+ sleep(self.retry_initial_interval).await;
+ } else {
+ break;
+ }
+ } else {
+ upgraded = true;
+ break;
+ }
+ }
+
+ upgraded
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use api::v1::meta::mailbox_message::Payload;
+ use common_meta::peer::Peer;
+ use common_time::util::current_time_millis;
+ use store_api::storage::RegionId;
+
+ use super::*;
+ use crate::error::Error;
+ use crate::procedure::region_migration::test_util::{
+ new_close_region_reply, send_mock_reply, TestingEnv,
+ };
+ use crate::procedure::region_migration::{ContextFactory, PersistentContext};
+
+ fn new_persistent_context() -> PersistentContext {
+ PersistentContext {
+ from_peer: Peer::empty(1),
+ to_peer: Peer::empty(2),
+ region_id: RegionId::new(1024, 1),
+ cluster_id: 0,
+ }
+ }
+
+ fn new_upgrade_region_reply(
+ id: u64,
+ ready: bool,
+
+ exists: bool,
+
+ error: Option<String>,
+ ) -> MailboxMessage {
+ MailboxMessage {
+ id,
+ subject: "mock".to_string(),
+ from: "datanode".to_string(),
+ to: "meta".to_string(),
+ timestamp_millis: current_time_millis(),
+ payload: Some(Payload::Json(
+ serde_json::to_string(&InstructionReply::UpgradeRegion(UpgradeRegionReply {
+ ready,
+ exists,
+ error,
+ }))
+ .unwrap(),
+ )),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_datanode_is_unreachable() {
+ let state = UpgradeCandidateRegion::default();
+ let persistent_context = new_persistent_context();
+ let env = TestingEnv::new();
+ let ctx = env.context_factory().new_context(persistent_context);
+
+ let instruction = &state.build_upgrade_region_instruction(&ctx);
+ let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+
+ assert_matches!(err, Error::PusherNotFound { .. });
+ assert!(!err.is_retryable());
+ }
+
+ #[tokio::test]
+ async fn test_pusher_dropped() {
+ let state = UpgradeCandidateRegion::default();
+ let persistent_context = new_persistent_context();
+ let to_peer_id = persistent_context.to_peer.id;
+
+ let mut env = TestingEnv::new();
+ let ctx = env.context_factory().new_context(persistent_context);
+ let mailbox_ctx = env.mailbox_context();
+
+ let (tx, rx) = tokio::sync::mpsc::channel(1);
+
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ drop(rx);
+
+ let instruction = &state.build_upgrade_region_instruction(&ctx);
+ let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+
+ assert_matches!(err, Error::PushMessage { .. });
+ assert!(!err.is_retryable());
+ }
+
+ #[tokio::test]
+ async fn test_unexpected_instruction_reply() {
+ let state = UpgradeCandidateRegion::default();
+ let persistent_context = new_persistent_context();
+ let to_peer_id = persistent_context.to_peer.id;
+
+ let mut env = TestingEnv::new();
+ let ctx = env.context_factory().new_context(persistent_context);
+ let mailbox_ctx = env.mailbox_context();
+ let mailbox = mailbox_ctx.mailbox().clone();
+
+ let (tx, rx) = tokio::sync::mpsc::channel(1);
+
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ send_mock_reply(mailbox, rx, |id| Ok(new_close_region_reply(id)));
+
+ let instruction = &state.build_upgrade_region_instruction(&ctx);
+ let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+ assert_matches!(err, Error::UnexpectedInstructionReply { .. });
+ assert!(!err.is_retryable());
+ }
+
+ #[tokio::test]
+ async fn test_upgrade_region_failed() {
+ let state = UpgradeCandidateRegion::default();
+ let persistent_context = new_persistent_context();
+ let to_peer_id = persistent_context.to_peer.id;
+
+ let mut env = TestingEnv::new();
+ let ctx = env.context_factory().new_context(persistent_context);
+ let mailbox_ctx = env.mailbox_context();
+ let mailbox = mailbox_ctx.mailbox().clone();
+
+ let (tx, rx) = tokio::sync::mpsc::channel(1);
+
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ // A reply contains an error.
+ send_mock_reply(mailbox, rx, |id| {
+ Ok(new_upgrade_region_reply(
+ id,
+ true,
+ true,
+ Some("test mocked".to_string()),
+ ))
+ });
+
+ let instruction = &state.build_upgrade_region_instruction(&ctx);
+ let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+
+ assert_matches!(err, Error::RetryLater { .. });
+ assert!(err.is_retryable());
+ assert!(err.to_string().contains("test mocked"));
+ }
+
+ #[tokio::test]
+ async fn test_upgrade_region_not_found() {
+ let state = UpgradeCandidateRegion::default();
+ let persistent_context = new_persistent_context();
+ let to_peer_id = persistent_context.to_peer.id;
+
+ let mut env = TestingEnv::new();
+ let ctx = env.context_factory().new_context(persistent_context);
+ let mailbox_ctx = env.mailbox_context();
+ let mailbox = mailbox_ctx.mailbox().clone();
+
+ let (tx, rx) = tokio::sync::mpsc::channel(1);
+
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ send_mock_reply(mailbox, rx, |id| {
+ Ok(new_upgrade_region_reply(id, true, false, None))
+ });
+
+ let instruction = &state.build_upgrade_region_instruction(&ctx);
+ let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+
+ assert_matches!(err, Error::Unexpected { .. });
+ assert!(!err.is_retryable());
+ assert!(err.to_string().contains("doesn't exist"));
+ }
+
+ #[tokio::test]
+ async fn test_upgrade_region_require_ready() {
+ let mut state = UpgradeCandidateRegion {
+ require_ready: true,
+ ..Default::default()
+ };
+
+ let persistent_context = new_persistent_context();
+ let to_peer_id = persistent_context.to_peer.id;
+
+ let mut env = TestingEnv::new();
+ let ctx = env.context_factory().new_context(persistent_context);
+ let mailbox_ctx = env.mailbox_context();
+ let mailbox = mailbox_ctx.mailbox().clone();
+
+ let (tx, rx) = tokio::sync::mpsc::channel(1);
+
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ send_mock_reply(mailbox, rx, |id| {
+ Ok(new_upgrade_region_reply(id, false, true, None))
+ });
+
+ let instruction = &state.build_upgrade_region_instruction(&ctx);
+ let err = state.upgrade_region(&ctx, instruction).await.unwrap_err();
+
+ assert_matches!(err, Error::RetryLater { .. });
+ assert!(err.is_retryable());
+ assert!(err.to_string().contains("still replaying the wal"));
+
+ // Sets the `require_ready` to false.
+ state.require_ready = false;
+
+ let mailbox = mailbox_ctx.mailbox().clone();
+ let (tx, rx) = tokio::sync::mpsc::channel(1);
+
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ send_mock_reply(mailbox, rx, |id| {
+ Ok(new_upgrade_region_reply(id, false, true, None))
+ });
+
+ let instruction = &state.build_upgrade_region_instruction(&ctx);
+ state.upgrade_region(&ctx, instruction).await.unwrap();
+ }
+
+ #[tokio::test]
+ async fn test_upgrade_region_with_retry_ok() {
+ let mut state = Box::<UpgradeCandidateRegion>::default();
+ state.retry_initial_interval = Duration::from_millis(100);
+ let persistent_context = new_persistent_context();
+ let to_peer_id = persistent_context.to_peer.id;
+
+ let mut env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let mailbox_ctx = env.mailbox_context();
+ let mailbox = mailbox_ctx.mailbox().clone();
+
+ let (tx, mut rx) = tokio::sync::mpsc::channel(1);
+
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ common_runtime::spawn_bg(async move {
+ let resp = rx.recv().await.unwrap().unwrap();
+ let reply_id = resp.mailbox_message.unwrap().id;
+ mailbox
+ .on_recv(
+ reply_id,
+ Err(error::MailboxTimeoutSnafu { id: reply_id }.build()),
+ )
+ .await
+ .unwrap();
+
+ // retry: 1
+ let resp = rx.recv().await.unwrap().unwrap();
+ let reply_id = resp.mailbox_message.unwrap().id;
+ mailbox
+ .on_recv(
+ reply_id,
+ Ok(new_upgrade_region_reply(reply_id, false, true, None)),
+ )
+ .await
+ .unwrap();
+
+ // retry: 2
+ let resp = rx.recv().await.unwrap().unwrap();
+ let reply_id = resp.mailbox_message.unwrap().id;
+ mailbox
+ .on_recv(
+ reply_id,
+ Ok(new_upgrade_region_reply(reply_id, true, true, None)),
+ )
+ .await
+ .unwrap();
+ });
+
+ let next = state.next(&mut ctx).await.unwrap();
+
+ let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
+
+ assert_matches!(update_metadata, UpdateMetadata::Upgrade);
+ }
+
+ #[tokio::test]
+ async fn test_upgrade_region_with_retry_failed() {
+ let mut state = Box::<UpgradeCandidateRegion>::default();
+ state.retry_initial_interval = Duration::from_millis(100);
+ let persistent_context = new_persistent_context();
+ let to_peer_id = persistent_context.to_peer.id;
+
+ let mut env = TestingEnv::new();
+ let mut ctx = env.context_factory().new_context(persistent_context);
+ let mailbox_ctx = env.mailbox_context();
+ let mailbox = mailbox_ctx.mailbox().clone();
+
+ let (tx, mut rx) = tokio::sync::mpsc::channel(1);
+
+ mailbox_ctx
+ .insert_heartbeat_response_receiver(Channel::Datanode(to_peer_id), tx)
+ .await;
+
+ common_runtime::spawn_bg(async move {
+ let resp = rx.recv().await.unwrap().unwrap();
+ let reply_id = resp.mailbox_message.unwrap().id;
+ mailbox
+ .on_recv(
+ reply_id,
+ Err(error::MailboxTimeoutSnafu { id: reply_id }.build()),
+ )
+ .await
+ .unwrap();
+
+ // retry: 1
+ let resp = rx.recv().await.unwrap().unwrap();
+ let reply_id = resp.mailbox_message.unwrap().id;
+ mailbox
+ .on_recv(
+ reply_id,
+ Ok(new_upgrade_region_reply(reply_id, false, true, None)),
+ )
+ .await
+ .unwrap();
+
+ // retry: 2
+ let resp = rx.recv().await.unwrap().unwrap();
+ let reply_id = resp.mailbox_message.unwrap().id;
+ mailbox
+ .on_recv(
+ reply_id,
+ Ok(new_upgrade_region_reply(reply_id, false, false, None)),
+ )
+ .await
+ .unwrap();
+ });
+
+ let next = state.next(&mut ctx).await.unwrap();
+
+ let update_metadata = next.as_any().downcast_ref::<UpdateMetadata>().unwrap();
+ assert_matches!(update_metadata, UpdateMetadata::Rollback);
+ }
+}
|
feat
|
add upgrade candidate region step (#2829)
|
f04d3802598aa049fdd93900e180df2dececf7e4
|
2024-12-18 14:21:46
|
Ruihang Xia
|
fix: validate matcher op for __name__ in promql (#5191)
| false
|
diff --git a/src/query/src/promql/planner.rs b/src/query/src/promql/planner.rs
index 1e7bc27dab6a..bfdfb5981ae1 100644
--- a/src/query/src/promql/planner.rs
+++ b/src/query/src/promql/planner.rs
@@ -689,6 +689,13 @@ impl PromPlanner {
let mut matches = label_matchers.find_matchers(METRIC_NAME);
ensure!(!matches.is_empty(), NoMetricMatcherSnafu);
ensure!(matches.len() == 1, MultipleMetricMatchersSnafu);
+ ensure!(
+ matches[0].op == MatchOp::Equal,
+ UnsupportedMatcherOpSnafu {
+ matcher_op: matches[0].op.to_string(),
+ matcher: METRIC_NAME
+ }
+ );
metric_name = matches.pop().map(|m| m.value);
}
diff --git a/tests/cases/standalone/common/tql/basic.result b/tests/cases/standalone/common/tql/basic.result
index 5c6725dbcd07..3015101a5554 100644
--- a/tests/cases/standalone/common/tql/basic.result
+++ b/tests/cases/standalone/common/tql/basic.result
@@ -66,6 +66,10 @@ TQL EVAL (0, 10, '5s') {__name__!="test"};
Error: 2000(InvalidSyntax), vector selector must contain at least one non-empty matcher
+TQL EVAL (0, 10, '5s') {__name__=~"test"};
+
+Error: 1004(InvalidArguments), Matcher operator =~ is not supported for __name__
+
-- the point at 1ms will be shadowed by the point at 2ms
TQL EVAL (0, 10, '5s') test{k="a"};
diff --git a/tests/cases/standalone/common/tql/basic.sql b/tests/cases/standalone/common/tql/basic.sql
index 85f29481486c..afca586ed8a1 100644
--- a/tests/cases/standalone/common/tql/basic.sql
+++ b/tests/cases/standalone/common/tql/basic.sql
@@ -22,6 +22,8 @@ TQL EVAL (0, 10, '5s') {__name__="test", __field__="i"};
-- NOT SUPPORTED: `__name__` matcher without equal condition
TQL EVAL (0, 10, '5s') {__name__!="test"};
+TQL EVAL (0, 10, '5s') {__name__=~"test"};
+
-- the point at 1ms will be shadowed by the point at 2ms
TQL EVAL (0, 10, '5s') test{k="a"};
|
fix
|
validate matcher op for __name__ in promql (#5191)
|
b32ea7d84ca8393ced9932c6876c2bad92f660a5
|
2025-03-12 19:15:19
|
Ruihang Xia
|
feat: add Docker image tag information to step summary in dev-build workflow (#5692)
| false
|
diff --git a/.github/workflows/dev-build.yml b/.github/workflows/dev-build.yml
index f325018ffaeb..999da6315859 100644
--- a/.github/workflows/dev-build.yml
+++ b/.github/workflows/dev-build.yml
@@ -238,6 +238,13 @@ jobs:
version: ${{ needs.allocate-runners.outputs.version }}
push-latest-tag: false # Don't push the latest tag to registry.
dev-mode: true # Only build the standard images.
+
+ - name: Echo Docker image tag to step summary
+ run: |
+ echo "## Docker Image Tag" >> $GITHUB_STEP_SUMMARY
+ echo "Image Tag: \`${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
+ echo "Full Image Name: \`docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
+ echo "Pull Command: \`docker pull docker.io/${{ vars.IMAGE_NAMESPACE }}/${{ vars.DEV_BUILD_IMAGE_NAME }}:${{ needs.allocate-runners.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
- name: Set build result
id: set-build-result
|
feat
|
add Docker image tag information to step summary in dev-build workflow (#5692)
|
7f14d407983db20d4ef6e7dff464ba30e22d616a
|
2023-04-25 12:44:46
|
Weny Xu
|
test: add tests for external table (#1460)
| false
|
diff --git a/src/frontend/src/tests/instance_test.rs b/src/frontend/src/tests/instance_test.rs
index 0924436de937..e498655f983f 100644
--- a/src/frontend/src/tests/instance_test.rs
+++ b/src/frontend/src/tests/instance_test.rs
@@ -29,7 +29,7 @@ use crate::error::{Error, Result};
use crate::instance::Instance;
use crate::tests::test_util::{
both_instances_cases, check_output_stream, check_unordered_output_stream, distributed,
- standalone, standalone_instance_case, MockInstance,
+ get_data_dir, standalone, standalone_instance_case, MockInstance,
};
#[apply(both_instances_cases)]
@@ -512,6 +512,179 @@ async fn test_execute_external_create_without_ts_type(instance: Arc<dyn MockInst
assert!(matches!(output, Output::AffectedRows(0)));
}
+#[apply(both_instances_cases)]
+async fn test_execute_query_external_table_csv(instance: Arc<dyn MockInstance>) {
+ let instance = instance.frontend();
+ let format = "csv";
+ let location = get_data_dir("../../tests/data/csv/various_type.csv")
+ .canonicalize()
+ .unwrap()
+ .display()
+ .to_string();
+
+ let table_name = "various_type_csv";
+
+ let output = execute_sql(
+ &instance,
+ &format!(
+ r#"create external table {table_name} with (location='{location}', format='{format}');"#,
+ ),
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+
+ let output = execute_sql(&instance, &format!("desc table {table_name};")).await;
+ let expect = "\
++------------+-----------------+------+---------+---------------+
+| Field | Type | Null | Default | Semantic Type |
++------------+-----------------+------+---------+---------------+
+| c_int | Int64 | YES | | FIELD |
+| c_float | Float64 | YES | | FIELD |
+| c_string | Float64 | YES | | FIELD |
+| c_bool | Boolean | YES | | FIELD |
+| c_date | Date | YES | | FIELD |
+| c_datetime | TimestampSecond | YES | | FIELD |
++------------+-----------------+------+---------+---------------+";
+ check_output_stream(output, expect).await;
+
+ let output = execute_sql(&instance, &format!("select * from {table_name};")).await;
+ let expect = "\
++-------+-----------+----------+--------+------------+---------------------+
+| c_int | c_float | c_string | c_bool | c_date | c_datetime |
++-------+-----------+----------+--------+------------+---------------------+
+| 1 | 1.1 | 1.11 | true | 1970-01-01 | 1970-01-01T00:00:00 |
+| 2 | 2.2 | 2.22 | true | 2020-11-08 | 2020-11-08T01:00:00 |
+| 3 | | 3.33 | true | 1969-12-31 | 1969-11-08T02:00:00 |
+| 4 | 4.4 | | false | | |
+| 5 | 6.6 | | false | 1990-01-01 | 1990-01-01T03:00:00 |
+| 4 | 4000000.0 | | false | | |
+| 4 | 4.0e-6 | | false | | |
++-------+-----------+----------+--------+------------+---------------------+";
+ check_output_stream(output, expect).await;
+}
+
+#[apply(both_instances_cases)]
+async fn test_execute_query_external_table_json(instance: Arc<dyn MockInstance>) {
+ let instance = instance.frontend();
+ let format = "json";
+ let location = get_data_dir("../../tests/data/json/various_type.json")
+ .canonicalize()
+ .unwrap()
+ .display()
+ .to_string();
+
+ let table_name = "various_type_json";
+
+ let output = execute_sql(
+ &instance,
+ &format!(
+ r#"create external table {table_name} with (location='{location}', format='{format}');"#,
+ ),
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+
+ let output = execute_sql(&instance, &format!("desc table {table_name};")).await;
+ let expect = "\
++-------+---------+------+---------+---------------+
+| Field | Type | Null | Default | Semantic Type |
++-------+---------+------+---------+---------------+
+| a | Int64 | YES | | FIELD |
+| b | Float64 | YES | | FIELD |
+| c | Boolean | YES | | FIELD |
+| d | String | YES | | FIELD |
+| e | Int64 | YES | | FIELD |
+| f | String | YES | | FIELD |
+| g | String | YES | | FIELD |
++-------+---------+------+---------+---------------+";
+ check_output_stream(output, expect).await;
+
+ let output = execute_sql(&instance, &format!("select * from {table_name};")).await;
+ let expect = "\
++-----------------+------+-------+------+------------+----------------+-------------------------+
+| a | b | c | d | e | f | g |
++-----------------+------+-------+------+------------+----------------+-------------------------+
+| 1 | 2.0 | false | 4 | 1681319393 | 1.02 | 2012-04-23T18:25:43.511 |
+| -10 | -3.5 | true | 4 | 1681356393 | -0.3 | 2016-04-23T18:25:43.511 |
+| 2 | 0.6 | false | text | 1681329393 | 1377.223 | |
+| 1 | 2.0 | false | 4 | | 1337.009 | |
+| 7 | -3.5 | true | 4 | | 1 | |
+| 1 | 0.6 | false | text | | 1338 | 2018-10-23T18:33:16.481 |
+| 1 | 2.0 | false | 4 | | 12345829100000 | |
+| 5 | -3.5 | true | 4 | | 99999999.99 | |
+| 1 | 0.6 | false | text | | 1 | |
+| 1 | 2.0 | false | 4 | | 1 | |
+| 1 | -3.5 | true | 4 | | 1 | |
+| 100000000000000 | 0.6 | false | text | | 1 | |
++-----------------+------+-------+------+------------+----------------+-------------------------+";
+ check_output_stream(output, expect).await;
+}
+
+#[apply(both_instances_cases)]
+async fn test_execute_query_external_table_json_with_schame(instance: Arc<dyn MockInstance>) {
+ let instance = instance.frontend();
+ let format = "json";
+ let location = get_data_dir("../../tests/data/json/various_type.json")
+ .canonicalize()
+ .unwrap()
+ .display()
+ .to_string();
+
+ let table_name = "various_type_json_with_schema";
+
+ let output = execute_sql(
+ &instance,
+ &format!(
+ r#"CREATE EXTERNAL TABLE {table_name} (
+ a BIGINT NULL,
+ b DOUBLE NULL,
+ c BOOLEAN NULL,
+ d STRING NULL,
+ e TIMESTAMP(0) NULL,
+ f DOUBLE NULL,
+ g TIMESTAMP(0) NULL,
+ ) WITH (location='{location}', format='{format}');"#,
+ ),
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+
+ let output = execute_sql(&instance, &format!("desc table {table_name};")).await;
+ let expect = "\
++-------+-----------------+------+---------+---------------+
+| Field | Type | Null | Default | Semantic Type |
++-------+-----------------+------+---------+---------------+
+| a | Int64 | YES | | FIELD |
+| b | Float64 | YES | | FIELD |
+| c | Boolean | YES | | FIELD |
+| d | String | YES | | FIELD |
+| e | TimestampSecond | YES | | FIELD |
+| f | Float64 | YES | | FIELD |
+| g | TimestampSecond | YES | | FIELD |
++-------+-----------------+------+---------+---------------+";
+ check_output_stream(output, expect).await;
+
+ let output = execute_sql(&instance, &format!("select * from {table_name};")).await;
+ let expect = "\
++-----------------+------+-------+------+---------------------+---------------+---------------------+
+| a | b | c | d | e | f | g |
++-----------------+------+-------+------+---------------------+---------------+---------------------+
+| 1 | 2.0 | false | 4 | 2023-04-12T17:09:53 | 1.02 | 2012-04-23T18:25:43 |
+| -10 | -3.5 | true | 4 | 2023-04-13T03:26:33 | -0.3 | 2016-04-23T18:25:43 |
+| 2 | 0.6 | false | text | 2023-04-12T19:56:33 | 1377.223 | |
+| 1 | 2.0 | false | 4 | | 1337.009 | |
+| 7 | -3.5 | true | 4 | | 1.0 | |
+| 1 | 0.6 | false | text | | 1338.0 | 2018-10-23T18:33:16 |
+| 1 | 2.0 | false | 4 | | 1.23458291e13 | |
+| 5 | -3.5 | true | 4 | | 99999999.99 | |
+| 1 | 0.6 | false | text | | 1.0 | |
+| 1 | 2.0 | false | 4 | | 1.0 | |
+| 1 | -3.5 | true | 4 | | 1.0 | |
+| 100000000000000 | 0.6 | false | text | | 1.0 | |
++-----------------+------+-------+------+---------------------+---------------+---------------------+";
+ check_output_stream(output, expect).await;
+}
+
#[apply(standalone_instance_case)]
async fn test_rename_table(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
diff --git a/src/frontend/src/tests/test_util.rs b/src/frontend/src/tests/test_util.rs
index c40e2752f241..668370c9ed35 100644
--- a/src/frontend/src/tests/test_util.rs
+++ b/src/frontend/src/tests/test_util.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::path::PathBuf;
use std::sync::Arc;
use common_query::Output;
@@ -118,3 +119,9 @@ pub(crate) async fn check_unordered_output_stream(output: Output, expected: &str
let expected = sort_table(expected);
assert_eq!(pretty_print, expected);
}
+
+pub fn get_data_dir(path: &str) -> PathBuf {
+ let dir = env!("CARGO_MANIFEST_DIR");
+
+ PathBuf::from(dir).join(path)
+}
diff --git a/tests/data/csv/various_type.csv b/tests/data/csv/various_type.csv
new file mode 100644
index 000000000000..11f4dc5c2afd
--- /dev/null
+++ b/tests/data/csv/various_type.csv
@@ -0,0 +1,8 @@
+c_int,c_float,c_string,c_bool,c_date,c_datetime
+1,1.1,"1.11",true,1970-01-01,1970-01-01T00:00:00
+2,2.2,"2.22",true,2020-11-08,2020-11-08T01:00:00
+3,,"3.33",true,1969-12-31,1969-11-08T02:00:00
+4,4.4,,false,,
+5,6.6,"",false,1990-01-01,1990-01-01T03:00:00
+4,4e6,,false,,
+4,4.0e-6,,false,,
\ No newline at end of file
diff --git a/tests/data/json/various_type.json b/tests/data/json/various_type.json
new file mode 100644
index 000000000000..0a7166186362
--- /dev/null
+++ b/tests/data/json/various_type.json
@@ -0,0 +1,12 @@
+{"a":1, "b":2.0, "c":false, "d":"4", "e":1681319393, "f": "1.02", "g": "2012-04-23T18:25:43.511"}
+{"a":-10, "b":-3.5, "c":true, "d":"4", "e": 1681356393, "f": "-0.3", "g": "2016-04-23T18:25:43.511"}
+{"a":2, "b":0.6, "c":false, "d":"text", "e": 1681329393, "f": "1377.223"}
+{"a":1, "b":2.0, "c":false, "d":"4", "f": "1337.009"}
+{"a":7, "b":-3.5, "c":true, "d":"4", "f": "1"}
+{"a":1, "b":0.6, "c":false, "d":"text", "f": "1338", "g": "2018-10-23T18:33:16.481"}
+{"a":1, "b":2.0, "c":false, "d":"4", "f": "12345829100000"}
+{"a":5, "b":-3.5, "c":true, "d":"4", "f": "99999999.99"}
+{"a":1, "b":0.6, "c":false, "d":"text", "f": "1"}
+{"a":1, "b":2.0, "c":false, "d":"4", "f": "1"}
+{"a":1, "b":-3.5, "c":true, "d":"4", "f": "1"}
+{"a":100000000000000, "b":0.6, "c":false, "d":"text", "f": "1"}
\ No newline at end of file
|
test
|
add tests for external table (#1460)
|
5abe4c141a0d4d82b8a12ce07fadd07c1f360b8c
|
2025-01-25 18:50:25
|
Ruihang Xia
|
feat: expose http endpoint for flownode and metasrv (#5437)
| false
|
diff --git a/docker/docker-compose/cluster-with-etcd.yaml b/docker/docker-compose/cluster-with-etcd.yaml
index e7794662a8d0..8e1773c7d7a5 100644
--- a/docker/docker-compose/cluster-with-etcd.yaml
+++ b/docker/docker-compose/cluster-with-etcd.yaml
@@ -39,14 +39,16 @@ services:
container_name: metasrv
ports:
- 3002:3002
+ - 3000:3000
command:
- metasrv
- start
- --bind-addr=0.0.0.0:3002
- --server-addr=metasrv:3002
- --store-addrs=etcd0:2379
+ - --http-addr=0.0.0.0:3000
healthcheck:
- test: [ "CMD", "curl", "-f", "http://metasrv:3002/health" ]
+ test: [ "CMD", "curl", "-f", "http://metasrv:3000/health" ]
interval: 5s
timeout: 3s
retries: 5
@@ -73,10 +75,10 @@ services:
volumes:
- /tmp/greptimedb-cluster-docker-compose/datanode0:/tmp/greptimedb
healthcheck:
- test: [ "CMD", "curl", "-f", "http://datanode0:5000/health" ]
+ test: [ "CMD", "curl", "-fv", "http://datanode0:5000/health" ]
interval: 5s
timeout: 3s
- retries: 5
+ retries: 10
depends_on:
metasrv:
condition: service_healthy
@@ -115,6 +117,7 @@ services:
container_name: flownode0
ports:
- 4004:4004
+ - 4005:4005
command:
- flownode
- start
@@ -122,9 +125,15 @@ services:
- --metasrv-addrs=metasrv:3002
- --rpc-addr=0.0.0.0:4004
- --rpc-hostname=flownode0:4004
+ - --http-addr=0.0.0.0:4005
depends_on:
frontend0:
condition: service_healthy
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://flownode0:4005/health" ]
+ interval: 5s
+ timeout: 3s
+ retries: 5
networks:
- greptimedb
|
feat
|
expose http endpoint for flownode and metasrv (#5437)
|
2ad0b24efad7e2616d2fe087904e4668943b352d
|
2024-03-25 08:43:01
|
x³u³
|
fix: set http response chartset to utf-8 when using table format (#3571)
| false
|
diff --git a/src/servers/src/http/table_result.rs b/src/servers/src/http/table_result.rs
index e601213c08bb..a7fac46e89a7 100644
--- a/src/servers/src/http/table_result.rs
+++ b/src/servers/src/http/table_result.rs
@@ -135,7 +135,7 @@ impl IntoResponse for TableResponse {
let mut resp = (
[(
header::CONTENT_TYPE,
- HeaderValue::from_static(mime::PLAIN.as_ref()),
+ HeaderValue::from_static(mime::TEXT_PLAIN_UTF_8.as_ref()),
)],
self.to_string(),
)
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index afcd44c26296..0f0c8966ca5d 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -46,7 +46,7 @@ async fn test_sql_not_provided() {
script_handler: None,
};
- for format in ["greptimedb_v1", "influxdb_v1", "csv"] {
+ for format in ["greptimedb_v1", "influxdb_v1", "csv", "table"] {
let query = http_handler::SqlQuery {
db: None,
sql: None,
@@ -82,7 +82,7 @@ async fn test_sql_output_rows() {
script_handler: None,
};
- for format in ["greptimedb_v1", "influxdb_v1", "csv"] {
+ for format in ["greptimedb_v1", "influxdb_v1", "csv", "table"] {
let query = create_query(format);
let json = http_handler::sql(
State(api_state.clone()),
@@ -154,6 +154,23 @@ async fn test_sql_output_rows() {
hyper::body::Bytes::from_static(b"4950\n"),
);
}
+ HttpResponse::Table(resp) => {
+ use http_body::Body as HttpBody;
+ let mut resp = resp.into_response();
+ assert_eq!(
+ resp.headers().get(header::CONTENT_TYPE),
+ Some(HeaderValue::from_static(mime::TEXT_PLAIN_UTF_8.as_ref())).as_ref(),
+ );
+ assert_eq!(
+ resp.body_mut().data().await.unwrap().unwrap(),
+ hyper::body::Bytes::from(
+ r#"┌─SUM(numbers.uint32s)─┐
+│ 4950 │
+└──────────────────────┘
+"#
+ ),
+ );
+ }
_ => unreachable!(),
}
}
@@ -172,7 +189,7 @@ async fn test_sql_form() {
script_handler: None,
};
- for format in ["greptimedb_v1", "influxdb_v1", "csv"] {
+ for format in ["greptimedb_v1", "influxdb_v1", "csv", "table"] {
let form = create_form(format);
let json = http_handler::sql(
State(api_state.clone()),
@@ -244,6 +261,23 @@ async fn test_sql_form() {
hyper::body::Bytes::from_static(b"4950\n"),
);
}
+ HttpResponse::Table(resp) => {
+ use http_body::Body as HttpBody;
+ let mut resp = resp.into_response();
+ assert_eq!(
+ resp.headers().get(header::CONTENT_TYPE),
+ Some(HeaderValue::from_static(mime::TEXT_PLAIN_UTF_8.as_ref())).as_ref(),
+ );
+ assert_eq!(
+ resp.body_mut().data().await.unwrap().unwrap(),
+ hyper::body::Bytes::from(
+ r#"┌─SUM(numbers.uint32s)─┐
+│ 4950 │
+└──────────────────────┘
+"#
+ ),
+ );
+ }
_ => unreachable!(),
}
}
|
fix
|
set http response chartset to utf-8 when using table format (#3571)
|
d4e0dc36859d37139ec79305bb119da0ab51b3b7
|
2023-03-06 08:37:21
|
yuanbohan
|
feat: specify prom server start addr (#1111)
| false
|
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index 135b6a82e448..2247ebf5dfab 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -46,8 +46,8 @@ enable = true
[prometheus_options]
enable = true
-# PromQL protocol options, see `standalone.example.toml`.
-[promql_options]
+# Prometheus protocol options, see `standalone.example.toml`.
+[prom_options]
addr = "127.0.0.1:4004"
# Metasrv client options, see `datanode.example.toml`.
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 69e58780c92e..e05190dc91b6 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -71,9 +71,9 @@ enable = true
# Whether to enable Prometheus remote write and read in HTTP API, true by default.
enable = true
-# PromQL protocol options.
-[promql_options]
-# PromQL server address, "127.0.0.1:4004" by default.
+# Prom protocol options.
+[prom_options]
+# Prometheus API server address, "127.0.0.1:4004" by default.
addr = "127.0.0.1:4004"
# WAL options.
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 6c9ce4c4d675..2c54a1e896ae 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -23,6 +23,7 @@ use frontend::instance::Instance;
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
+use frontend::prom::PromOptions;
use meta_client::MetaClientOptions;
use servers::auth::UserProviderRef;
use servers::http::HttpOptions;
@@ -67,6 +68,8 @@ pub struct StartCommand {
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
+ prom_addr: Option<String>,
+ #[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
@@ -141,6 +144,9 @@ impl TryFrom<StartCommand> for FrontendOptions {
..Default::default()
});
}
+ if let Some(addr) = cmd.prom_addr {
+ opts.prom_options = Some(PromOptions { addr });
+ }
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
@@ -186,6 +192,7 @@ mod tests {
let command = StartCommand {
http_addr: Some("127.0.0.1:1234".to_string()),
grpc_addr: None,
+ prom_addr: Some("127.0.0.1:4444".to_string()),
mysql_addr: Some("127.0.0.1:5678".to_string()),
postgres_addr: Some("127.0.0.1:5432".to_string()),
opentsdb_addr: Some("127.0.0.1:4321".to_string()),
@@ -209,6 +216,7 @@ mod tests {
opts.opentsdb_options.as_ref().unwrap().addr,
"127.0.0.1:4321"
);
+ assert_eq!(opts.prom_options.as_ref().unwrap().addr, "127.0.0.1:4444");
let default_opts = FrontendOptions::default();
assert_eq!(
@@ -247,6 +255,7 @@ mod tests {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
+ prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,
@@ -276,6 +285,7 @@ mod tests {
http_addr: None,
grpc_addr: None,
mysql_addr: None,
+ prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
influxdb_enable: None,
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 77fef7481b2f..4675f704a198 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -28,8 +28,8 @@ use frontend::instance::Instance as FeInstance;
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
+use frontend::prom::PromOptions;
use frontend::prometheus::PrometheusOptions;
-use frontend::promql::PromqlOptions;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -77,7 +77,7 @@ pub struct StandaloneOptions {
pub opentsdb_options: Option<OpentsdbOptions>,
pub influxdb_options: Option<InfluxdbOptions>,
pub prometheus_options: Option<PrometheusOptions>,
- pub promql_options: Option<PromqlOptions>,
+ pub prom_options: Option<PromOptions>,
pub wal: WalConfig,
pub storage: ObjectStoreConfig,
pub compaction: CompactionConfig,
@@ -96,7 +96,7 @@ impl Default for StandaloneOptions {
opentsdb_options: Some(OpentsdbOptions::default()),
influxdb_options: Some(InfluxdbOptions::default()),
prometheus_options: Some(PrometheusOptions::default()),
- promql_options: Some(PromqlOptions::default()),
+ prom_options: Some(PromOptions::default()),
wal: WalConfig::default(),
storage: ObjectStoreConfig::default(),
compaction: CompactionConfig::default(),
@@ -116,7 +116,7 @@ impl StandaloneOptions {
opentsdb_options: self.opentsdb_options,
influxdb_options: self.influxdb_options,
prometheus_options: self.prometheus_options,
- promql_options: self.promql_options,
+ prom_options: self.prom_options,
meta_client_options: None,
}
}
@@ -142,6 +142,8 @@ struct StartCommand {
#[clap(long)]
mysql_addr: Option<String>,
#[clap(long)]
+ prom_addr: Option<String>,
+ #[clap(long)]
postgres_addr: Option<String>,
#[clap(long)]
opentsdb_addr: Option<String>,
@@ -254,6 +256,11 @@ impl TryFrom<StartCommand> for FrontendOptions {
..Default::default()
})
}
+
+ if let Some(addr) = cmd.prom_addr {
+ opts.prom_options = Some(PromOptions { addr })
+ }
+
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
@@ -302,6 +309,7 @@ mod tests {
http_addr: None,
rpc_addr: None,
mysql_addr: None,
+ prom_addr: None,
postgres_addr: None,
opentsdb_addr: None,
config_file: Some(format!(
@@ -347,6 +355,7 @@ mod tests {
let command = StartCommand {
http_addr: None,
rpc_addr: None,
+ prom_addr: None,
mysql_addr: None,
postgres_addr: None,
opentsdb_addr: None,
diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs
index 943c746dc812..3fe5b1ada6d6 100644
--- a/src/datanode/src/instance/sql.rs
+++ b/src/datanode/src/instance/sql.rs
@@ -24,7 +24,7 @@ use datatypes::schema::Schema;
use futures::StreamExt;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
use servers::error as server_error;
-use servers::promql::PromqlHandler;
+use servers::prom::PromHandler;
use servers::query_handler::sql::SqlQueryHandler;
use session::context::{QueryContext, QueryContextRef};
use snafu::prelude::*;
@@ -366,7 +366,7 @@ impl SqlQueryHandler for Instance {
}
#[async_trait]
-impl PromqlHandler for Instance {
+impl PromHandler for Instance {
async fn do_query(&self, query: &PromQuery) -> server_error::Result<Output> {
let _timer = timer!(metric::METRIC_HANDLE_PROMQL_ELAPSED);
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index 2dced030e14b..da862511fa08 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -28,8 +28,8 @@ use crate::instance::FrontendInstance;
use crate::mysql::MysqlOptions;
use crate::opentsdb::OpentsdbOptions;
use crate::postgres::PostgresOptions;
+use crate::prom::PromOptions;
use crate::prometheus::PrometheusOptions;
-use crate::promql::PromqlOptions;
use crate::server::Services;
#[derive(Clone, Debug, Serialize, Deserialize)]
@@ -43,7 +43,7 @@ pub struct FrontendOptions {
pub opentsdb_options: Option<OpentsdbOptions>,
pub influxdb_options: Option<InfluxdbOptions>,
pub prometheus_options: Option<PrometheusOptions>,
- pub promql_options: Option<PromqlOptions>,
+ pub prom_options: Option<PromOptions>,
pub meta_client_options: Option<MetaClientOptions>,
}
@@ -58,7 +58,7 @@ impl Default for FrontendOptions {
opentsdb_options: Some(OpentsdbOptions::default()),
influxdb_options: Some(InfluxdbOptions::default()),
prometheus_options: Some(PrometheusOptions::default()),
- promql_options: Some(PromqlOptions::default()),
+ prom_options: Some(PromOptions::default()),
meta_client_options: None,
}
}
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index f5ea3a89a4df..2bc57a1ff656 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -49,7 +49,7 @@ use query::parser::PromQuery;
use query::query_engine::options::{validate_catalog_and_schema, QueryOptions};
use servers::error as server_error;
use servers::interceptor::{SqlQueryInterceptor, SqlQueryInterceptorRef};
-use servers::promql::{PromqlHandler, PromqlHandlerRef};
+use servers::prom::{PromHandler, PromHandlerRef};
use servers::query_handler::grpc::{GrpcQueryHandler, GrpcQueryHandlerRef};
use servers::query_handler::sql::{SqlQueryHandler, SqlQueryHandlerRef};
use servers::query_handler::{
@@ -81,7 +81,7 @@ pub trait FrontendInstance:
+ InfluxdbLineProtocolHandler
+ PrometheusProtocolHandler
+ ScriptHandler
- + PromqlHandler
+ + PromHandler
+ Send
+ Sync
+ 'static
@@ -99,7 +99,7 @@ pub struct Instance {
script_handler: Option<ScriptHandlerRef>,
sql_handler: SqlQueryHandlerRef<Error>,
grpc_query_handler: GrpcQueryHandlerRef<Error>,
- promql_handler: Option<PromqlHandlerRef>,
+ promql_handler: Option<PromHandlerRef>,
create_expr_factory: CreateExprFactoryRef,
@@ -539,7 +539,7 @@ impl ScriptHandler for Instance {
}
#[async_trait]
-impl PromqlHandler for Instance {
+impl PromHandler for Instance {
async fn do_query(&self, query: &PromQuery) -> server_error::Result<Output> {
if let Some(promql_handler) = &self.promql_handler {
promql_handler.do_query(query).await
diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs
index 4c33577f2c63..b5d82f93b726 100644
--- a/src/frontend/src/lib.rs
+++ b/src/frontend/src/lib.rs
@@ -25,8 +25,8 @@ pub mod instance;
pub mod mysql;
pub mod opentsdb;
pub mod postgres;
+pub mod prom;
pub mod prometheus;
-pub mod promql;
mod server;
mod sql;
mod table;
diff --git a/src/frontend/src/promql.rs b/src/frontend/src/prom.rs
similarity index 87%
rename from src/frontend/src/promql.rs
rename to src/frontend/src/prom.rs
index a2e18a492220..d617d95f39ad 100644
--- a/src/frontend/src/promql.rs
+++ b/src/frontend/src/prom.rs
@@ -15,11 +15,11 @@
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
-pub struct PromqlOptions {
+pub struct PromOptions {
pub addr: String,
}
-impl Default for PromqlOptions {
+impl Default for PromOptions {
fn default() -> Self {
Self {
addr: "127.0.0.1:4004".to_string(),
@@ -29,11 +29,11 @@ impl Default for PromqlOptions {
#[cfg(test)]
mod tests {
- use super::PromqlOptions;
+ use super::PromOptions;
#[test]
fn test_prometheus_options() {
- let default = PromqlOptions::default();
+ let default = PromOptions::default();
assert_eq!(default.addr, "127.0.0.1:4004".to_string());
}
}
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index a6ef49bddfbd..c6d609e3f800 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -25,7 +25,7 @@ use servers::http::HttpServer;
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
use servers::opentsdb::OpentsdbServer;
use servers::postgres::PostgresServer;
-use servers::promql::PromqlServer;
+use servers::prom::PromServer;
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor;
use servers::server::Server;
@@ -183,15 +183,15 @@ impl Services {
None
};
- let promql_server_and_addr = if let Some(promql_options) = &opts.promql_options {
- let promql_addr = parse_addr(&promql_options.addr)?;
+ let prom_server_and_addr = if let Some(prom_options) = &opts.prom_options {
+ let prom_addr = parse_addr(&prom_options.addr)?;
- let mut promql_server = PromqlServer::create_server(instance.clone());
+ let mut prom_server = PromServer::create_server(instance.clone());
if let Some(user_provider) = user_provider {
- promql_server.set_user_provider(user_provider);
+ prom_server.set_user_provider(user_provider);
}
- Some((promql_server as _, promql_addr))
+ Some((prom_server as _, prom_addr))
} else {
None
};
@@ -202,7 +202,7 @@ impl Services {
start_server(mysql_server_and_addr),
start_server(postgres_server_and_addr),
start_server(opentsdb_server_and_addr),
- start_server(promql_server_and_addr),
+ start_server(prom_server_and_addr),
)
.context(error::StartServerSnafu)?;
Ok(())
diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs
index d759a57bb6a8..dc47fbfba9bd 100644
--- a/src/servers/src/lib.rs
+++ b/src/servers/src/lib.rs
@@ -28,8 +28,8 @@ pub mod line_writer;
pub mod mysql;
pub mod opentsdb;
pub mod postgres;
+pub mod prom;
pub mod prometheus;
-pub mod promql;
pub mod query_handler;
pub mod server;
mod shutdown;
diff --git a/src/servers/src/promql.rs b/src/servers/src/prom.rs
similarity index 89%
rename from src/servers/src/promql.rs
rename to src/servers/src/prom.rs
index 44d52c4af101..6382f372005f 100644
--- a/src/servers/src/promql.rs
+++ b/src/servers/src/prom.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//! prom supply the prometheus HTTP API Server compliance
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
@@ -52,24 +53,25 @@ use crate::error::{
use crate::http::authorize::HttpAuth;
use crate::server::Server;
-pub const PROMQL_API_VERSION: &str = "v1";
+pub const PROM_API_VERSION: &str = "v1";
-pub type PromqlHandlerRef = Arc<dyn PromqlHandler + Send + Sync>;
+pub type PromHandlerRef = Arc<dyn PromHandler + Send + Sync>;
#[async_trait]
-pub trait PromqlHandler {
+pub trait PromHandler {
async fn do_query(&self, query: &PromQuery) -> Result<Output>;
}
-pub struct PromqlServer {
- query_handler: PromqlHandlerRef,
+/// PromServer represents PrometheusServer which handles the compliance with prometheus HTTP API
+pub struct PromServer {
+ query_handler: PromHandlerRef,
shutdown_tx: Mutex<Option<Sender<()>>>,
user_provider: Option<UserProviderRef>,
}
-impl PromqlServer {
- pub fn create_server(query_handler: PromqlHandlerRef) -> Box<Self> {
- Box::new(PromqlServer {
+impl PromServer {
+ pub fn create_server(query_handler: PromHandlerRef) -> Box<Self> {
+ Box::new(PromServer {
query_handler,
shutdown_tx: Mutex::new(None),
user_provider: None,
@@ -90,7 +92,7 @@ impl PromqlServer {
.with_state(self.query_handler.clone());
Router::new()
- .nest(&format!("/api/{PROMQL_API_VERSION}"), router)
+ .nest(&format!("/api/{PROM_API_VERSION}"), router)
// middlewares
.layer(
ServiceBuilder::new()
@@ -105,15 +107,15 @@ impl PromqlServer {
}
#[async_trait]
-impl Server for PromqlServer {
+impl Server for PromServer {
async fn shutdown(&self) -> Result<()> {
let mut shutdown_tx = self.shutdown_tx.lock().await;
if let Some(tx) = shutdown_tx.take() {
if tx.send(()).is_err() {
- info!("Receiver dropped, the PromQl server has already existed");
+ info!("Receiver dropped, the Prometheus API server has already existed");
}
}
- info!("Shutdown PromQL server");
+ info!("Shutdown Prometheus API server");
Ok(())
}
@@ -124,7 +126,9 @@ impl Server for PromqlServer {
let mut shutdown_tx = self.shutdown_tx.lock().await;
ensure!(
shutdown_tx.is_none(),
- AlreadyStartedSnafu { server: "PromQL" }
+ AlreadyStartedSnafu {
+ server: "Prometheus"
+ }
);
let app = self.make_app();
@@ -135,7 +139,7 @@ impl Server for PromqlServer {
server
};
let listening = server.local_addr();
- info!("PromQL server is bound to {}", listening);
+ info!("Prometheus API server is bound to {}", listening);
let graceful = server.with_graceful_shutdown(rx.map(drop));
graceful.await.context(StartHttpSnafu)?;
@@ -145,22 +149,22 @@ impl Server for PromqlServer {
}
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
-pub struct PromqlSeries {
+pub struct PromSeries {
metric: HashMap<String, String>,
values: Vec<(f64, String)>,
}
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
-pub struct PromqlData {
+pub struct PromData {
#[serde(rename = "resultType")]
result_type: String,
- result: Vec<PromqlSeries>,
+ result: Vec<PromSeries>,
}
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema)]
-pub struct PromqlJsonResponse {
+pub struct PromJsonResponse {
status: String,
- data: PromqlData,
+ data: PromData,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
@@ -170,23 +174,23 @@ pub struct PromqlJsonResponse {
warnings: Option<Vec<String>>,
}
-impl PromqlJsonResponse {
+impl PromJsonResponse {
pub fn error<S1, S2>(error_type: S1, reason: S2) -> Json<Self>
where
S1: Into<String>,
S2: Into<String>,
{
- Json(PromqlJsonResponse {
+ Json(PromJsonResponse {
status: "error".to_string(),
- data: PromqlData::default(),
+ data: PromData::default(),
error: Some(reason.into()),
error_type: Some(error_type.into()),
warnings: None,
})
}
- pub fn success(data: PromqlData) -> Json<Self> {
- Json(PromqlJsonResponse {
+ pub fn success(data: PromData) -> Json<Self> {
+ Json(PromJsonResponse {
status: "success".to_string(),
data,
error: None,
@@ -224,7 +228,7 @@ impl PromqlJsonResponse {
if err.status_code() == StatusCode::TableNotFound
|| err.status_code() == StatusCode::TableColumnNotFound
{
- Self::success(PromqlData {
+ Self::success(PromData {
result_type: "matrix".to_string(),
..Default::default()
})
@@ -235,7 +239,7 @@ impl PromqlJsonResponse {
}
}
- fn record_batches_to_data(batches: RecordBatches, metric_name: String) -> Result<PromqlData> {
+ fn record_batches_to_data(batches: RecordBatches, metric_name: String) -> Result<PromData> {
// infer semantic type of each column from schema.
// TODO(ruihang): wish there is a better way to do this.
let mut timestamp_column_index = None;
@@ -322,13 +326,13 @@ impl PromqlJsonResponse {
let result = buffer
.into_iter()
- .map(|(tags, values)| PromqlSeries {
+ .map(|(tags, values)| PromSeries {
metric: tags.into_iter().collect(),
values,
})
.collect();
- let data = PromqlData {
+ let data = PromData {
result_type: "matrix".to_string(),
result,
};
@@ -346,10 +350,10 @@ pub struct InstantQuery {
#[axum_macros::debug_handler]
pub async fn instant_query(
- State(_handler): State<PromqlHandlerRef>,
+ State(_handler): State<PromHandlerRef>,
Query(_params): Query<InstantQuery>,
-) -> Json<PromqlJsonResponse> {
- PromqlJsonResponse::error(
+) -> Json<PromJsonResponse> {
+ PromJsonResponse::error(
"not implemented",
"instant query api `/query` is not implemented. Use `/query_range` instead.",
)
@@ -366,10 +370,10 @@ pub struct RangeQuery {
#[axum_macros::debug_handler]
pub async fn range_query(
- State(handler): State<PromqlHandlerRef>,
+ State(handler): State<PromHandlerRef>,
Query(params): Query<RangeQuery>,
Form(form_params): Form<RangeQuery>,
-) -> Json<PromqlJsonResponse> {
+) -> Json<PromJsonResponse> {
let prom_query = PromQuery {
query: params.query.or(form_params.query).unwrap_or_default(),
start: params.start.or(form_params.start).unwrap_or_default(),
@@ -378,7 +382,7 @@ pub async fn range_query(
};
let result = handler.do_query(&prom_query).await;
let metric_name = retrieve_metric_name(&prom_query.query).unwrap_or_default();
- PromqlJsonResponse::from_query_result(result, metric_name).await
+ PromJsonResponse::from_query_result(result, metric_name).await
}
fn retrieve_metric_name(promql: &str) -> Option<String> {
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index 6c90b2812163..5a96ccb69716 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -13,6 +13,7 @@
// limitations under the License.
//! prometheus protocol supportings
+//! handles prometheus remote_write, remote_read logic
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::hash::{Hash, Hasher};
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 11ddb3b7b05d..79cdc7173d32 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -38,7 +38,7 @@ use once_cell::sync::OnceCell;
use rand::Rng;
use servers::grpc::GrpcServer;
use servers::http::{HttpOptions, HttpServer};
-use servers::promql::PromqlServer;
+use servers::prom::PromServer;
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor;
use servers::server::Server;
@@ -305,7 +305,7 @@ pub async fn setup_test_http_app_with_frontend(
(app, guard)
}
-pub async fn setup_test_promql_app_with_frontend(
+pub async fn setup_test_prom_app_with_frontend(
store_type: StorageType,
name: &str,
) -> (Router, TestGuard) {
@@ -320,8 +320,8 @@ pub async fn setup_test_promql_app_with_frontend(
)
.await
.unwrap();
- let promql_server = PromqlServer::create_server(Arc::new(frontend) as _);
- let app = promql_server.make_app();
+ let prom_server = PromServer::create_server(Arc::new(frontend) as _);
+ let app = prom_server.make_app();
(app, guard)
}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 1199f85f5ec8..ad6b498dc9a9 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -19,7 +19,7 @@ use serde_json::json;
use servers::http::handler::HealthResponse;
use servers::http::{JsonOutput, JsonResponse};
use tests_integration::test_util::{
- setup_test_http_app, setup_test_http_app_with_frontend, setup_test_promql_app_with_frontend,
+ setup_test_http_app, setup_test_http_app_with_frontend, setup_test_prom_app_with_frontend,
StorageType,
};
@@ -54,7 +54,7 @@ macro_rules! http_tests {
test_sql_api,
test_prometheus_promql_api,
- test_promql_http_api,
+ test_prom_http_api,
test_metrics_api,
test_scripts_api,
test_health_api,
@@ -284,9 +284,9 @@ pub async fn test_prometheus_promql_api(store_type: StorageType) {
guard.remove_all().await;
}
-pub async fn test_promql_http_api(store_type: StorageType) {
+pub async fn test_prom_http_api(store_type: StorageType) {
common_telemetry::init_default_ut_logging();
- let (app, mut guard) = setup_test_promql_app_with_frontend(store_type, "promql_api").await;
+ let (app, mut guard) = setup_test_prom_app_with_frontend(store_type, "promql_api").await;
let client = TestClient::new(app);
// instant query
|
feat
|
specify prom server start addr (#1111)
|
86dd19dcc85f7243ea0c631b621f5b33a022e355
|
2022-08-22 09:50:20
|
Yong
|
build: add dockerfile to build greptimedb container image (#194)
| false
|
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 000000000000..415d03e0bc3b
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,25 @@
+# macOS trash
+.DS_Store
+
+# Visual Studio Code
+.vscode/
+.devcontainer/
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Vim swap files
+*.swp
+
+# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
+.idea/
+*.iml
+out/
+
+# Rust
+target/
+
+# Git
+.git
diff --git a/README.md b/README.md
index e59ff811d3d2..73eb5fdd929c 100644
--- a/README.md
+++ b/README.md
@@ -7,15 +7,18 @@ GreptimeDB: the next-generation hybrid timeseries/analytics processing database
## Getting Started
### Prerequisites
+
To compile GreptimeDB from source, you'll need the following:
- Rust
- Protobuf
- OpenSSL
#### Rust
+
The easiest way to install Rust is to use [`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and install correct Rust version for you.
#### Protobuf
+
`protoc` is required for compiling `.proto` files. `protobuf` is available from
major package manager on macos and linux distributions. You can find an
installation instructions [here](https://grpc.io/docs/protoc-installation/).
@@ -37,6 +40,12 @@ For macOS:
brew install openssl
```
+### Build the Docker Image
+
+```
+docker build --network host -f docker/Dockerfile -t greptimedb .
+```
+
## Usage
### Start Datanode
@@ -62,6 +71,15 @@ Start datanode with config file:
cargo run -- --log-dir=logs --log-level=debug datanode start -c ./config/datanode.example.toml
```
+Start datanode by runing docker container:
+
+```
+docker run -p 3000:3000 \
+-p 3001:3001 \
+-p 3306:3306 \
+greptimedb
+```
+
### SQL Operations
1. Connecting DB by [mysql client](https://dev.mysql.com/downloads/mysql/):
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 000000000000..3a02c1195d48
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,32 @@
+FROM ubuntu:22.04 as builder
+
+ENV LANG en_US.utf8
+WORKDIR /greptimedb
+
+# Install dependencies.
+RUN apt-get update && apt-get install -y \
+ libssl-dev \
+ protobuf-compiler \
+ curl \
+ build-essential \
+ pkg-config
+
+# Install Rust.
+SHELL ["/bin/bash", "-c"]
+RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
+ENV PATH /root/.cargo/bin/:$PATH
+
+# Build the project in release mode.
+COPY . .
+RUN cargo build --release
+
+# Export the binary to the clean image.
+# TODO(zyy17): Maybe should use the more secure container image.
+FROM ubuntu:22.04 as base
+
+WORKDIR /greptimedb
+COPY --from=builder /greptimedb/target/release/greptime /greptimedb/bin/
+ENV PATH /greptimedb/bin/:$PATH
+
+ENTRYPOINT [ "greptime" ]
+CMD [ "datanode", "start"]
|
build
|
add dockerfile to build greptimedb container image (#194)
|
5c72c5ae19f580d24fce9d234c9dc9d17b90d83c
|
2022-05-09 12:31:19
|
Lei, Huang
|
tests: add more unit tests for sql mod (#20)
| false
|
diff --git a/src/sql/src/parsers/insert_parser.rs b/src/sql/src/parsers/insert_parser.rs
index a3d14a348cbe..61fd33c456d0 100644
--- a/src/sql/src/parsers/insert_parser.rs
+++ b/src/sql/src/parsers/insert_parser.rs
@@ -28,3 +28,30 @@ impl<'a> ParserContext<'a> {
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use sqlparser::dialect::GenericDialect;
+
+ use super::*;
+
+ #[test]
+ pub fn test_parse_insert() {
+ let sql = r"INSERT INTO table_1 VALUES (
+ 'test1',1,'true',
+ 'test2',2,'false')
+ ";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
+ assert_eq!(1, result.len());
+ assert_matches!(result[0], Statement::Insert { .. })
+ }
+
+ #[test]
+ pub fn test_parse_invalid_insert() {
+ let sql = r"INSERT INTO table_1 VALUES ("; // intentionally a bad sql
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ assert!(result.is_err(), "result is: {:?}", result);
+ }
+}
diff --git a/src/sql/src/statements/insert.rs b/src/sql/src/statements/insert.rs
index afd8c05f04ea..875ccfc92722 100644
--- a/src/sql/src/statements/insert.rs
+++ b/src/sql/src/statements/insert.rs
@@ -20,3 +20,21 @@ impl TryFrom<Statement> for Insert {
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use sqlparser::dialect::GenericDialect;
+
+ use super::*;
+ use crate::parser::ParserContext;
+
+ #[test]
+ pub fn test_insert_convert() {
+ let sql = r"INSERT INTO tables_0 VALUES ( 'field_0', 0) ";
+ let mut stmts = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
+ assert_eq!(1, stmts.len());
+ let insert = stmts.pop().unwrap();
+ let r: Result<Statement, ParserError> = insert.try_into();
+ r.unwrap();
+ }
+}
diff --git a/src/sql/src/statements/show_kind.rs b/src/sql/src/statements/show_kind.rs
index 228c9e83cfde..ca8e90113c97 100644
--- a/src/sql/src/statements/show_kind.rs
+++ b/src/sql/src/statements/show_kind.rs
@@ -8,3 +8,30 @@ pub enum ShowKind {
Like(Ident),
Where(Expr),
}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use sqlparser::dialect::GenericDialect;
+
+ use crate::parser::ParserContext;
+ use crate::statements::show_kind::ShowKind::All;
+ use crate::statements::statement::Statement;
+
+ #[test]
+ pub fn test_show_database() {
+ let sql = "SHOW DATABASES";
+ let stmts = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::ShowDatabases { .. });
+ match &stmts[0] {
+ Statement::ShowDatabases(show) => {
+ assert_eq!(All, show.kind);
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+}
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
index 0a15c4eb4d31..ff7d3c8e4361 100644
--- a/src/sql/src/statements/statement.rs
+++ b/src/sql/src/statements/statement.rs
@@ -42,3 +42,24 @@ pub struct Hint {
pub comment: String,
pub prefix: String,
}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use sqlparser::dialect::GenericDialect;
+
+ use super::*;
+ use crate::parser::ParserContext;
+
+ #[test]
+ pub fn test_statement_convert() {
+ let sql = "SELECT * FROM table_0";
+ let mut stmts = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
+ assert_eq!(1, stmts.len());
+ let x = stmts.remove(0);
+ let statement = SpStatement::try_from(x).unwrap();
+
+ assert_matches!(statement, SpStatement::Query { .. });
+ }
+}
|
tests
|
add more unit tests for sql mod (#20)
|
a56030e6a5011bcd34d1f6795ed14f10ee2945fd
|
2025-03-05 13:52:18
|
Lei, HUANG
|
refactor: remove cluster id field (#5610)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index dc70cbc8fbc3..cea12295428e 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4701,7 +4701,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=072ce580502e015df1a6b03a185b60309a7c2a7a#072ce580502e015df1a6b03a185b60309a7c2a7a"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486#d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486"
dependencies = [
"prost 0.13.3",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index de351dba9083..67f9649dad47 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -129,7 +129,7 @@ etcd-client = "0.14"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "072ce580502e015df1a6b03a185b60309a7c2a7a" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "d92c9ac4e90ef4abdcf5c2eaf5a164e18ba09486" }
hex = "0.4"
http = "1"
humantime = "2.1"
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index c26201e7c0fa..18427fef982f 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -287,7 +287,6 @@ impl StartCommand {
.await
.context(StartDatanodeSnafu)?;
- let cluster_id = 0; // TODO(hl): read from config
let member_id = opts
.node_id
.context(MissingConfigSnafu { msg: "'node_id'" })?;
@@ -296,13 +295,10 @@ impl StartCommand {
msg: "'meta_client_options'",
})?;
- let meta_client = meta_client::create_meta_client(
- cluster_id,
- MetaClientType::Datanode { member_id },
- meta_config,
- )
- .await
- .context(MetaClientInitSnafu)?;
+ let meta_client =
+ meta_client::create_meta_client(MetaClientType::Datanode { member_id }, meta_config)
+ .await
+ .context(MetaClientInitSnafu)?;
let meta_backend = Arc::new(MetaKvBackend {
client: meta_client.clone(),
diff --git a/src/cmd/src/flownode.rs b/src/cmd/src/flownode.rs
index 9280202471f9..6bd02a6a4651 100644
--- a/src/cmd/src/flownode.rs
+++ b/src/cmd/src/flownode.rs
@@ -241,9 +241,6 @@ impl StartCommand {
let mut opts = opts.component;
opts.grpc.detect_server_addr();
- // TODO(discord9): make it not optionale after cluster id is required
- let cluster_id = opts.cluster_id.unwrap_or(0);
-
let member_id = opts
.node_id
.context(MissingConfigSnafu { msg: "'node_id'" })?;
@@ -252,13 +249,10 @@ impl StartCommand {
msg: "'meta_client_options'",
})?;
- let meta_client = meta_client::create_meta_client(
- cluster_id,
- MetaClientType::Flownode { member_id },
- meta_config,
- )
- .await
- .context(MetaClientInitSnafu)?;
+ let meta_client =
+ meta_client::create_meta_client(MetaClientType::Flownode { member_id }, meta_config)
+ .await
+ .context(MetaClientInitSnafu)?;
let cache_max_capacity = meta_config.metadata_cache_max_capacity;
let cache_ttl = meta_config.metadata_cache_ttl;
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 3324328a8c78..030783cb2fd4 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -295,14 +295,10 @@ impl StartCommand {
let cache_ttl = meta_client_options.metadata_cache_ttl;
let cache_tti = meta_client_options.metadata_cache_tti;
- let cluster_id = 0; // (TODO: jeremy): It is currently a reserved field and has not been enabled.
- let meta_client = meta_client::create_meta_client(
- cluster_id,
- MetaClientType::Frontend,
- meta_client_options,
- )
- .await
- .context(MetaClientInitSnafu)?;
+ let meta_client =
+ meta_client::create_meta_client(MetaClientType::Frontend, meta_client_options)
+ .await
+ .context(MetaClientInitSnafu)?;
// TODO(discord9): add helper function to ease the creation of cache registry&such
let cached_meta_backend =
diff --git a/src/common/meta/src/cluster.rs b/src/common/meta/src/cluster.rs
index f73dcf15372b..27146ea94003 100644
--- a/src/common/meta/src/cluster.rs
+++ b/src/common/meta/src/cluster.rs
@@ -28,7 +28,6 @@ use crate::error::{
InvalidRoleSnafu, ParseNumSnafu, Result,
};
use crate::peer::Peer;
-use crate::ClusterId;
const CLUSTER_NODE_INFO_PREFIX: &str = "__meta_cluster_node_info";
@@ -56,12 +55,9 @@ pub trait ClusterInfo {
// TODO(jeremy): Other info, like region status, etc.
}
-/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-{cluster_id}-{role}-{node_id}`.
+/// The key of [NodeInfo] in the storage. The format is `__meta_cluster_node_info-0-{role}-{node_id}`.
#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct NodeInfoKey {
- /// The cluster id.
- // todo(hl): remove cluster_id as it is not assigned anywhere.
- pub cluster_id: ClusterId,
/// The role of the node. It can be `[Role::Datanode]` or `[Role::Frontend]`.
pub role: Role,
/// The node id.
@@ -84,24 +80,15 @@ impl NodeInfoKey {
_ => peer.id,
};
- Some(NodeInfoKey {
- cluster_id: header.cluster_id,
- role,
- node_id,
- })
+ Some(NodeInfoKey { role, node_id })
}
- pub fn key_prefix_with_cluster_id(cluster_id: u64) -> String {
- format!("{}-{}-", CLUSTER_NODE_INFO_PREFIX, cluster_id)
+ pub fn key_prefix() -> String {
+ format!("{}-0-", CLUSTER_NODE_INFO_PREFIX)
}
- pub fn key_prefix_with_role(cluster_id: ClusterId, role: Role) -> String {
- format!(
- "{}-{}-{}-",
- CLUSTER_NODE_INFO_PREFIX,
- cluster_id,
- i32::from(role)
- )
+ pub fn key_prefix_with_role(role: Role) -> String {
+ format!("{}-0-{}-", CLUSTER_NODE_INFO_PREFIX, i32::from(role))
}
}
@@ -193,15 +180,10 @@ impl FromStr for NodeInfoKey {
let caps = CLUSTER_NODE_INFO_PREFIX_PATTERN
.captures(key)
.context(InvalidNodeInfoKeySnafu { key })?;
-
ensure!(caps.len() == 4, InvalidNodeInfoKeySnafu { key });
- let cluster_id = caps[1].to_string();
let role = caps[2].to_string();
let node_id = caps[3].to_string();
- let cluster_id: u64 = cluster_id.parse().context(ParseNumSnafu {
- err_msg: format!("invalid cluster_id: {cluster_id}"),
- })?;
let role: i32 = role.parse().context(ParseNumSnafu {
err_msg: format!("invalid role {role}"),
})?;
@@ -210,11 +192,7 @@ impl FromStr for NodeInfoKey {
err_msg: format!("invalid node_id: {node_id}"),
})?;
- Ok(Self {
- cluster_id,
- role,
- node_id,
- })
+ Ok(Self { role, node_id })
}
}
@@ -233,9 +211,8 @@ impl TryFrom<Vec<u8>> for NodeInfoKey {
impl From<&NodeInfoKey> for Vec<u8> {
fn from(key: &NodeInfoKey) -> Self {
format!(
- "{}-{}-{}-{}",
+ "{}-0-{}-{}",
CLUSTER_NODE_INFO_PREFIX,
- key.cluster_id,
i32::from(key.role),
key.node_id
)
@@ -308,7 +285,6 @@ mod tests {
#[test]
fn test_node_info_key_round_trip() {
let key = NodeInfoKey {
- cluster_id: 1,
role: Datanode,
node_id: 2,
};
@@ -316,7 +292,6 @@ mod tests {
let key_bytes: Vec<u8> = (&key).into();
let new_key: NodeInfoKey = key_bytes.try_into().unwrap();
- assert_eq!(1, new_key.cluster_id);
assert_eq!(Datanode, new_key.role);
assert_eq!(2, new_key.node_id);
}
@@ -362,11 +337,11 @@ mod tests {
#[test]
fn test_node_info_key_prefix() {
- let prefix = NodeInfoKey::key_prefix_with_cluster_id(1);
- assert_eq!(prefix, "__meta_cluster_node_info-1-");
+ let prefix = NodeInfoKey::key_prefix();
+ assert_eq!(prefix, "__meta_cluster_node_info-0-");
- let prefix = NodeInfoKey::key_prefix_with_role(2, Frontend);
- assert_eq!(prefix, "__meta_cluster_node_info-2-1-");
+ let prefix = NodeInfoKey::key_prefix_with_role(Frontend);
+ assert_eq!(prefix, "__meta_cluster_node_info-0-1-");
}
#[test]
diff --git a/src/common/meta/src/datanode.rs b/src/common/meta/src/datanode.rs
index 03226027ca30..d717d1523e32 100644
--- a/src/common/meta/src/datanode.rs
+++ b/src/common/meta/src/datanode.rs
@@ -25,8 +25,8 @@ use store_api::region_engine::{RegionRole, RegionStatistic};
use store_api::storage::RegionId;
use table::metadata::TableId;
+use crate::error;
use crate::error::Result;
-use crate::{error, ClusterId};
pub(crate) const DATANODE_LEASE_PREFIX: &str = "__meta_datanode_lease";
const INACTIVE_REGION_PREFIX: &str = "__meta_inactive_region";
@@ -48,11 +48,10 @@ lazy_static! {
/// The key of the datanode stat in the storage.
///
-/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`.
+/// The format is `__meta_datanode_stat-0-{node_id}`.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct Stat {
pub timestamp_millis: i64,
- pub cluster_id: ClusterId,
// The datanode Id.
pub id: u64,
// The datanode address.
@@ -102,10 +101,7 @@ impl Stat {
}
pub fn stat_key(&self) -> DatanodeStatKey {
- DatanodeStatKey {
- cluster_id: self.cluster_id,
- node_id: self.id,
- }
+ DatanodeStatKey { node_id: self.id }
}
/// Returns a tuple array containing [RegionId] and [RegionRole].
@@ -145,7 +141,7 @@ impl TryFrom<&HeartbeatRequest> for Stat {
} = value;
match (header, peer) {
- (Some(header), Some(peer)) => {
+ (Some(_header), Some(peer)) => {
let region_stats = region_stats
.iter()
.map(RegionStat::from)
@@ -153,7 +149,6 @@ impl TryFrom<&HeartbeatRequest> for Stat {
Ok(Self {
timestamp_millis: time_util::current_time_millis(),
- cluster_id: header.cluster_id,
// datanode id
id: peer.id,
// datanode address
@@ -196,32 +191,24 @@ impl From<&api::v1::meta::RegionStat> for RegionStat {
/// The key of the datanode stat in the memory store.
///
-/// The format is `__meta_datanode_stat-{cluster_id}-{node_id}`.
+/// The format is `__meta_datanode_stat-0-{node_id}`.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub struct DatanodeStatKey {
- pub cluster_id: ClusterId,
pub node_id: u64,
}
impl DatanodeStatKey {
/// The key prefix.
pub fn prefix_key() -> Vec<u8> {
- format!("{DATANODE_STAT_PREFIX}-").into_bytes()
- }
-
- /// The key prefix with the cluster id.
- pub fn key_prefix_with_cluster_id(cluster_id: ClusterId) -> String {
- format!("{DATANODE_STAT_PREFIX}-{cluster_id}-")
+ // todo(hl): remove cluster id in prefix
+ format!("{DATANODE_STAT_PREFIX}-0-").into_bytes()
}
}
impl From<DatanodeStatKey> for Vec<u8> {
fn from(value: DatanodeStatKey) -> Self {
- format!(
- "{}-{}-{}",
- DATANODE_STAT_PREFIX, value.cluster_id, value.node_id
- )
- .into_bytes()
+ // todo(hl): remove cluster id in prefix
+ format!("{}-0-{}", DATANODE_STAT_PREFIX, value.node_id).into_bytes()
}
}
@@ -234,20 +221,12 @@ impl FromStr for DatanodeStatKey {
.context(error::InvalidStatKeySnafu { key })?;
ensure!(caps.len() == 3, error::InvalidStatKeySnafu { key });
-
- let cluster_id = caps[1].to_string();
let node_id = caps[2].to_string();
- let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
- err_msg: format!("invalid cluster_id: {cluster_id}"),
- })?;
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid node_id: {node_id}"),
})?;
- Ok(Self {
- cluster_id,
- node_id,
- })
+ Ok(Self { node_id })
}
}
@@ -321,7 +300,6 @@ mod tests {
#[test]
fn test_stat_key() {
let stat = Stat {
- cluster_id: 3,
id: 101,
region_num: 10,
..Default::default()
@@ -329,14 +307,12 @@ mod tests {
let stat_key = stat.stat_key();
- assert_eq!(3, stat_key.cluster_id);
assert_eq!(101, stat_key.node_id);
}
#[test]
fn test_stat_val_round_trip() {
let stat = Stat {
- cluster_id: 0,
id: 101,
region_num: 100,
..Default::default()
@@ -351,7 +327,6 @@ mod tests {
assert_eq!(1, stats.len());
let stat = stats.first().unwrap();
- assert_eq!(0, stat.cluster_id);
assert_eq!(101, stat.id);
assert_eq!(100, stat.region_num);
}
diff --git a/src/common/meta/src/ddl.rs b/src/common/meta/src/ddl.rs
index 0753ab51fccd..55a9a64c84a4 100644
--- a/src/common/meta/src/ddl.rs
+++ b/src/common/meta/src/ddl.rs
@@ -30,7 +30,7 @@ use crate::node_manager::NodeManagerRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
-use crate::{ClusterId, DatanodeId};
+use crate::DatanodeId;
pub mod alter_database;
pub mod alter_logical_tables;
@@ -57,7 +57,6 @@ pub mod utils;
#[derive(Debug, Default)]
pub struct ExecutorContext {
- pub cluster_id: Option<u64>,
pub tracing_context: Option<W3cTrace>,
}
@@ -90,10 +89,6 @@ pub trait ProcedureExecutor: Send + Sync {
pub type ProcedureExecutorRef = Arc<dyn ProcedureExecutor>;
-pub struct TableMetadataAllocatorContext {
- pub cluster_id: ClusterId,
-}
-
/// Metadata allocated to a table.
#[derive(Default)]
pub struct TableMetadata {
@@ -108,7 +103,7 @@ pub struct TableMetadata {
pub type RegionFailureDetectorControllerRef = Arc<dyn RegionFailureDetectorController>;
-pub type DetectingRegion = (ClusterId, DatanodeId, RegionId);
+pub type DetectingRegion = (DatanodeId, RegionId);
/// Used for actively registering Region failure detectors.
///
diff --git a/src/common/meta/src/ddl/alter_database.rs b/src/common/meta/src/ddl/alter_database.rs
index 68f0f5428e08..983222144c9d 100644
--- a/src/common/meta/src/ddl/alter_database.rs
+++ b/src/common/meta/src/ddl/alter_database.rs
@@ -30,7 +30,6 @@ use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock};
use crate::rpc::ddl::UnsetDatabaseOption::{self};
use crate::rpc::ddl::{AlterDatabaseKind, AlterDatabaseTask, SetDatabaseOption};
-use crate::ClusterId;
pub struct AlterDatabaseProcedure {
pub context: DdlContext,
@@ -65,14 +64,10 @@ fn build_new_schema_value(
impl AlterDatabaseProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterDatabase";
- pub fn new(
- cluster_id: ClusterId,
- task: AlterDatabaseTask,
- context: DdlContext,
- ) -> Result<Self> {
+ pub fn new(task: AlterDatabaseTask, context: DdlContext) -> Result<Self> {
Ok(Self {
context,
- data: AlterDatabaseData::new(task, cluster_id)?,
+ data: AlterDatabaseData::new(task)?,
})
}
@@ -183,7 +178,6 @@ enum AlterDatabaseState {
/// The data of alter database procedure.
#[derive(Debug, Serialize, Deserialize)]
pub struct AlterDatabaseData {
- cluster_id: ClusterId,
state: AlterDatabaseState,
kind: AlterDatabaseKind,
catalog_name: String,
@@ -192,9 +186,8 @@ pub struct AlterDatabaseData {
}
impl AlterDatabaseData {
- pub fn new(task: AlterDatabaseTask, cluster_id: ClusterId) -> Result<Self> {
+ pub fn new(task: AlterDatabaseTask) -> Result<Self> {
Ok(Self {
- cluster_id,
state: AlterDatabaseState::Prepare,
kind: AlterDatabaseKind::try_from(task.alter_expr.kind.unwrap())?,
catalog_name: task.alter_expr.catalog_name,
diff --git a/src/common/meta/src/ddl/alter_logical_tables.rs b/src/common/meta/src/ddl/alter_logical_tables.rs
index d9b318e9e90f..ea741accf319 100644
--- a/src/common/meta/src/ddl/alter_logical_tables.rs
+++ b/src/common/meta/src/ddl/alter_logical_tables.rs
@@ -37,9 +37,9 @@ use crate::key::table_info::TableInfoValue;
use crate::key::table_route::PhysicalTableRouteValue;
use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
+use crate::metrics;
use crate::rpc::ddl::AlterTableTask;
use crate::rpc::router::find_leaders;
-use crate::{metrics, ClusterId};
pub struct AlterLogicalTablesProcedure {
pub context: DdlContext,
@@ -50,7 +50,6 @@ impl AlterLogicalTablesProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterLogicalTables";
pub fn new(
- cluster_id: ClusterId,
tasks: Vec<AlterTableTask>,
physical_table_id: TableId,
context: DdlContext,
@@ -58,7 +57,6 @@ impl AlterLogicalTablesProcedure {
Self {
context,
data: AlterTablesData {
- cluster_id,
state: AlterTablesState::Prepare,
tasks,
table_info_values: vec![],
@@ -240,7 +238,6 @@ impl Procedure for AlterLogicalTablesProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub struct AlterTablesData {
- cluster_id: ClusterId,
state: AlterTablesState,
tasks: Vec<AlterTableTask>,
/// Table info values before the alter operation.
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index 55ecdba54549..e18d53229231 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -45,9 +45,9 @@ use crate::instruction::CacheIdent;
use crate::key::table_info::TableInfoValue;
use crate::key::{DeserializedValueWithBytes, RegionDistribution};
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
+use crate::metrics;
use crate::rpc::ddl::AlterTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution};
-use crate::{metrics, ClusterId};
/// The alter table procedure
pub struct AlterTableProcedure {
@@ -64,16 +64,11 @@ pub struct AlterTableProcedure {
impl AlterTableProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::AlterTable";
- pub fn new(
- cluster_id: ClusterId,
- table_id: TableId,
- task: AlterTableTask,
- context: DdlContext,
- ) -> Result<Self> {
+ pub fn new(table_id: TableId, task: AlterTableTask, context: DdlContext) -> Result<Self> {
task.validate()?;
Ok(Self {
context,
- data: AlterTableData::new(task, table_id, cluster_id),
+ data: AlterTableData::new(task, table_id),
new_table_info: None,
})
}
@@ -307,7 +302,6 @@ enum AlterTableState {
// The serialized data of alter table.
#[derive(Debug, Serialize, Deserialize)]
pub struct AlterTableData {
- cluster_id: ClusterId,
state: AlterTableState,
task: AlterTableTask,
table_id: TableId,
@@ -318,12 +312,11 @@ pub struct AlterTableData {
}
impl AlterTableData {
- pub fn new(task: AlterTableTask, table_id: TableId, cluster_id: u64) -> Self {
+ pub fn new(task: AlterTableTask, table_id: TableId) -> Self {
Self {
state: AlterTableState::Prepare,
task,
table_id,
- cluster_id,
table_info_value: None,
region_distribution: None,
}
diff --git a/src/common/meta/src/ddl/alter_table/region_request.rs b/src/common/meta/src/ddl/alter_table/region_request.rs
index 7de578aced29..ef3038cfbabb 100644
--- a/src/common/meta/src/ddl/alter_table/region_request.rs
+++ b/src/common/meta/src/ddl/alter_table/region_request.rs
@@ -167,10 +167,9 @@ mod tests {
use crate::test_util::{new_ddl_context, MockDatanodeManager};
/// Prepares a region with schema `[ts: Timestamp, host: Tag, cpu: Field]`.
- async fn prepare_ddl_context() -> (DdlContext, u64, TableId, RegionId, String) {
+ async fn prepare_ddl_context() -> (DdlContext, TableId, RegionId, String) {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(datanode_manager);
- let cluster_id = 1;
let table_id = 1024;
let region_id = RegionId::new(table_id, 1);
let table_name = "foo";
@@ -225,19 +224,12 @@ mod tests {
)
.await
.unwrap();
- (
- ddl_context,
- cluster_id,
- table_id,
- region_id,
- table_name.to_string(),
- )
+ (ddl_context, table_id, region_id, table_name.to_string())
}
#[tokio::test]
async fn test_make_alter_region_request() {
- let (ddl_context, cluster_id, table_id, region_id, table_name) =
- prepare_ddl_context().await;
+ let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await;
let task = AlterTableTask {
alter_table: AlterTableExpr {
@@ -265,8 +257,7 @@ mod tests {
},
};
- let mut procedure =
- AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
+ let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
let alter_kind = procedure.make_region_alter_kind().unwrap();
let Some(Body::Alter(alter_region_request)) = procedure
@@ -307,8 +298,7 @@ mod tests {
#[tokio::test]
async fn test_make_alter_column_type_region_request() {
- let (ddl_context, cluster_id, table_id, region_id, table_name) =
- prepare_ddl_context().await;
+ let (ddl_context, table_id, region_id, table_name) = prepare_ddl_context().await;
let task = AlterTableTask {
alter_table: AlterTableExpr {
@@ -325,8 +315,7 @@ mod tests {
},
};
- let mut procedure =
- AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap();
+ let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
let alter_kind = procedure.make_region_alter_kind().unwrap();
let Some(Body::Alter(alter_region_request)) = procedure
diff --git a/src/common/meta/src/ddl/create_flow.rs b/src/common/meta/src/ddl/create_flow.rs
index db8a7000592e..3e6f6ff551e0 100644
--- a/src/common/meta/src/ddl/create_flow.rs
+++ b/src/common/meta/src/ddl/create_flow.rs
@@ -46,9 +46,9 @@ use crate::key::flow::flow_route::FlowRouteValue;
use crate::key::table_name::TableNameKey;
use crate::key::{DeserializedValueWithBytes, FlowId, FlowPartitionId};
use crate::lock_key::{CatalogLock, FlowNameLock, TableNameLock};
+use crate::metrics;
use crate::peer::Peer;
use crate::rpc::ddl::{CreateFlowTask, QueryContext};
-use crate::{metrics, ClusterId};
/// The procedure of flow creation.
pub struct CreateFlowProcedure {
@@ -60,16 +60,10 @@ impl CreateFlowProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateFlow";
/// Returns a new [CreateFlowProcedure].
- pub fn new(
- cluster_id: ClusterId,
- task: CreateFlowTask,
- query_context: QueryContext,
- context: DdlContext,
- ) -> Self {
+ pub fn new(task: CreateFlowTask, query_context: QueryContext, context: DdlContext) -> Self {
Self {
context,
data: CreateFlowData {
- cluster_id,
task,
flow_id: None,
peers: vec![],
@@ -363,7 +357,6 @@ impl fmt::Display for FlowType {
/// The serializable data.
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateFlowData {
- pub(crate) cluster_id: ClusterId,
pub(crate) state: CreateFlowState,
pub(crate) task: CreateFlowTask,
pub(crate) flow_id: Option<FlowId>,
diff --git a/src/common/meta/src/ddl/create_flow/metadata.rs b/src/common/meta/src/ddl/create_flow/metadata.rs
index 40cf99ccc4b2..1681479d9173 100644
--- a/src/common/meta/src/ddl/create_flow/metadata.rs
+++ b/src/common/meta/src/ddl/create_flow/metadata.rs
@@ -23,11 +23,10 @@ impl CreateFlowProcedure {
pub(crate) async fn allocate_flow_id(&mut self) -> Result<()> {
//TODO(weny, ruihang): We doesn't support the partitions. It's always be 1, now.
let partitions = 1;
- let cluster_id = self.data.cluster_id;
let (flow_id, peers) = self
.context
.flow_metadata_allocator
- .create(cluster_id, partitions)
+ .create(partitions)
.await?;
self.data.flow_id = Some(flow_id);
self.data.peers = peers;
diff --git a/src/common/meta/src/ddl/create_logical_tables.rs b/src/common/meta/src/ddl/create_logical_tables.rs
index 7a72de63a13b..59882ec49190 100644
--- a/src/common/meta/src/ddl/create_logical_tables.rs
+++ b/src/common/meta/src/ddl/create_logical_tables.rs
@@ -36,9 +36,9 @@ use crate::ddl::DdlContext;
use crate::error::{DecodeJsonSnafu, MetadataCorruptionSnafu, Result};
use crate::key::table_route::TableRouteValue;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
+use crate::metrics;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::{find_leaders, RegionRoute};
-use crate::{metrics, ClusterId};
pub struct CreateLogicalTablesProcedure {
pub context: DdlContext,
@@ -49,7 +49,6 @@ impl CreateLogicalTablesProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateLogicalTables";
pub fn new(
- cluster_id: ClusterId,
tasks: Vec<CreateTableTask>,
physical_table_id: TableId,
context: DdlContext,
@@ -57,7 +56,6 @@ impl CreateLogicalTablesProcedure {
Self {
context,
data: CreateTablesData {
- cluster_id,
state: CreateTablesState::Prepare,
tasks,
table_ids_already_exists: vec![],
@@ -245,7 +243,6 @@ impl Procedure for CreateLogicalTablesProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateTablesData {
- cluster_id: ClusterId,
state: CreateTablesState,
tasks: Vec<CreateTableTask>,
table_ids_already_exists: Vec<Option<TableId>>,
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index 1d171f595e44..3bd97827df30 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -37,17 +37,17 @@ use crate::ddl::utils::{
add_peer_context_if_needed, convert_region_routes_to_detecting_regions, handle_retry_error,
region_storage_path,
};
-use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
+use crate::ddl::{DdlContext, TableMetadata};
use crate::error::{self, Result};
use crate::key::table_name::TableNameKey;
use crate::key::table_route::{PhysicalTableRouteValue, TableRouteValue};
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
+use crate::metrics;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::{
find_leader_regions, find_leaders, operating_leader_regions, RegionRoute,
};
-use crate::{metrics, ClusterId};
pub struct CreateTableProcedure {
pub context: DdlContext,
pub creator: TableCreator,
@@ -56,10 +56,10 @@ pub struct CreateTableProcedure {
impl CreateTableProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateTable";
- pub fn new(cluster_id: ClusterId, task: CreateTableTask, context: DdlContext) -> Self {
+ pub fn new(task: CreateTableTask, context: DdlContext) -> Self {
Self {
context,
- creator: TableCreator::new(cluster_id, task),
+ creator: TableCreator::new(task),
}
}
@@ -154,12 +154,7 @@ impl CreateTableProcedure {
} = self
.context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext {
- cluster_id: self.creator.data.cluster_id,
- },
- &self.creator.data.task,
- )
+ .create(&self.creator.data.task)
.await?;
self.creator
.set_allocated_metadata(table_id, table_route, region_wal_options);
@@ -268,7 +263,6 @@ impl CreateTableProcedure {
/// - Failed to create table metadata.
async fn on_create_metadata(&mut self) -> Result<Status> {
let table_id = self.table_id();
- let cluster_id = self.creator.data.cluster_id;
let manager = &self.context.table_metadata_manager;
let raw_table_info = self.table_info().clone();
@@ -276,10 +270,8 @@ impl CreateTableProcedure {
let region_wal_options = self.region_wal_options()?.clone();
// Safety: the table_route must be allocated.
let physical_table_route = self.table_route()?.clone();
- let detecting_regions = convert_region_routes_to_detecting_regions(
- cluster_id,
- &physical_table_route.region_routes,
- );
+ let detecting_regions =
+ convert_region_routes_to_detecting_regions(&physical_table_route.region_routes);
let table_route = TableRouteValue::Physical(physical_table_route);
manager
.create_table_metadata(raw_table_info, table_route, region_wal_options)
@@ -351,11 +343,10 @@ pub struct TableCreator {
}
impl TableCreator {
- pub fn new(cluster_id: ClusterId, task: CreateTableTask) -> Self {
+ pub fn new(task: CreateTableTask) -> Self {
Self {
data: CreateTableData {
state: CreateTableState::Prepare,
- cluster_id,
task,
table_route: None,
region_wal_options: None,
@@ -421,7 +412,6 @@ pub struct CreateTableData {
table_route: Option<PhysicalTableRouteValue>,
/// None stands for not allocated yet.
pub region_wal_options: Option<HashMap<RegionNumber, String>>,
- pub cluster_id: ClusterId,
}
impl CreateTableData {
diff --git a/src/common/meta/src/ddl/create_view.rs b/src/common/meta/src/ddl/create_view.rs
index 093311c4138e..349d2a84ccd3 100644
--- a/src/common/meta/src/ddl/create_view.rs
+++ b/src/common/meta/src/ddl/create_view.rs
@@ -24,13 +24,13 @@ use table::table_reference::TableReference;
use crate::cache_invalidator::Context;
use crate::ddl::utils::handle_retry_error;
-use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
+use crate::ddl::{DdlContext, TableMetadata};
use crate::error::{self, Result};
use crate::instruction::CacheIdent;
use crate::key::table_name::TableNameKey;
use crate::lock_key::{CatalogLock, SchemaLock, TableNameLock};
+use crate::metrics;
use crate::rpc::ddl::CreateViewTask;
-use crate::{metrics, ClusterId};
// The procedure to execute `[CreateViewTask]`.
pub struct CreateViewProcedure {
@@ -41,12 +41,11 @@ pub struct CreateViewProcedure {
impl CreateViewProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::CreateView";
- pub fn new(cluster_id: ClusterId, task: CreateViewTask, context: DdlContext) -> Self {
+ pub fn new(task: CreateViewTask, context: DdlContext) -> Self {
Self {
context,
data: CreateViewData {
state: CreateViewState::Prepare,
- cluster_id,
task,
need_update: false,
},
@@ -144,12 +143,7 @@ impl CreateViewProcedure {
let TableMetadata { table_id, .. } = self
.context
.table_metadata_allocator
- .create_view(
- &TableMetadataAllocatorContext {
- cluster_id: self.data.cluster_id,
- },
- &None,
- )
+ .create_view(&None)
.await?;
self.data.set_allocated_metadata(table_id, false);
}
@@ -285,7 +279,6 @@ pub enum CreateViewState {
pub struct CreateViewData {
pub state: CreateViewState,
pub task: CreateViewTask,
- pub cluster_id: ClusterId,
/// Whether to update the view info.
pub need_update: bool,
}
diff --git a/src/common/meta/src/ddl/drop_database.rs b/src/common/meta/src/ddl/drop_database.rs
index 578e7744f1a6..ce62b7d0c316 100644
--- a/src/common/meta/src/ddl/drop_database.rs
+++ b/src/common/meta/src/ddl/drop_database.rs
@@ -35,7 +35,6 @@ use crate::ddl::DdlContext;
use crate::error::Result;
use crate::key::table_name::TableNameValue;
use crate::lock_key::{CatalogLock, SchemaLock};
-use crate::ClusterId;
pub struct DropDatabaseProcedure {
/// The context of procedure runtime.
@@ -54,7 +53,6 @@ pub(crate) enum DropTableTarget {
/// Context of [DropDatabaseProcedure] execution.
pub(crate) struct DropDatabaseContext {
- cluster_id: ClusterId,
catalog: String,
schema: String,
drop_if_exists: bool,
@@ -87,7 +85,6 @@ impl DropDatabaseProcedure {
Self {
runtime_context: context,
context: DropDatabaseContext {
- cluster_id: 0,
catalog,
schema,
drop_if_exists,
@@ -108,7 +105,6 @@ impl DropDatabaseProcedure {
Ok(Self {
runtime_context,
context: DropDatabaseContext {
- cluster_id: 0,
catalog,
schema,
drop_if_exists,
diff --git a/src/common/meta/src/ddl/drop_database/cursor.rs b/src/common/meta/src/ddl/drop_database/cursor.rs
index 3b25b4202539..fcfe41f5ff4d 100644
--- a/src/common/meta/src/ddl/drop_database/cursor.rs
+++ b/src/common/meta/src/ddl/drop_database/cursor.rs
@@ -217,11 +217,10 @@ mod tests {
async fn test_next_without_logical_tables() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- create_physical_table(&ddl_context, 0, "phy").await;
+ create_physical_table(&ddl_context, "phy").await;
// It always starts from Logical
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -252,12 +251,11 @@ mod tests {
async fn test_next_with_logical_tables() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
- create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric_0").await;
+ let physical_table_id = create_physical_table(&ddl_context, "phy").await;
+ create_logical_table(ddl_context.clone(), physical_table_id, "metric_0").await;
// It always starts from Logical
let mut state = DropDatabaseCursor::new(DropTableTarget::Logical);
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -286,7 +284,6 @@ mod tests {
let ddl_context = new_ddl_context(node_manager);
let mut state = DropDatabaseCursor::new(DropTableTarget::Physical);
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
diff --git a/src/common/meta/src/ddl/drop_database/executor.rs b/src/common/meta/src/ddl/drop_database/executor.rs
index 5b57b5cf5796..f662e826e7c5 100644
--- a/src/common/meta/src/ddl/drop_database/executor.rs
+++ b/src/common/meta/src/ddl/drop_database/executor.rs
@@ -98,11 +98,10 @@ impl State for DropDatabaseExecutor {
async fn next(
&mut self,
ddl_ctx: &DdlContext,
- ctx: &mut DropDatabaseContext,
+ _ctx: &mut DropDatabaseContext,
) -> Result<(Box<dyn State>, Status)> {
self.register_dropping_regions(ddl_ctx)?;
- let executor =
- DropTableExecutor::new(ctx.cluster_id, self.table_name.clone(), self.table_id, true);
+ let executor = DropTableExecutor::new(self.table_name.clone(), self.table_id, true);
// Deletes metadata for table permanently.
let table_route_value = TableRouteValue::new(
self.table_id,
@@ -187,7 +186,7 @@ mod tests {
async fn test_next_with_physical_table() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
+ let physical_table_id = create_physical_table(&ddl_context, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
@@ -203,7 +202,6 @@ mod tests {
DropTableTarget::Physical,
);
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -216,7 +214,6 @@ mod tests {
}
// Execute again
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -239,8 +236,8 @@ mod tests {
async fn test_next_logical_table() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
- create_logical_table(ddl_context.clone(), 0, physical_table_id, "metric").await;
+ let physical_table_id = create_physical_table(&ddl_context, "phy").await;
+ create_logical_table(ddl_context.clone(), physical_table_id, "metric").await;
let logical_table_id = physical_table_id + 1;
let (_, table_route) = ddl_context
.table_metadata_manager
@@ -257,7 +254,6 @@ mod tests {
DropTableTarget::Logical,
);
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -270,7 +266,6 @@ mod tests {
}
// Execute again
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -345,7 +340,7 @@ mod tests {
async fn test_next_retryable_err() {
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
+ let physical_table_id = create_physical_table(&ddl_context, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
@@ -360,7 +355,6 @@ mod tests {
DropTableTarget::Physical,
);
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
@@ -374,7 +368,7 @@ mod tests {
async fn test_on_recovery() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let physical_table_id = create_physical_table(&ddl_context, 0, "phy").await;
+ let physical_table_id = create_physical_table(&ddl_context, "phy").await;
let (_, table_route) = ddl_context
.table_metadata_manager
.table_route_manager()
@@ -390,7 +384,6 @@ mod tests {
DropTableTarget::Physical,
);
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_SCHEMA_NAME.to_string(),
drop_if_exists: false,
diff --git a/src/common/meta/src/ddl/drop_database/metadata.rs b/src/common/meta/src/ddl/drop_database/metadata.rs
index 8d338df07c5f..005806146013 100644
--- a/src/common/meta/src/ddl/drop_database/metadata.rs
+++ b/src/common/meta/src/ddl/drop_database/metadata.rs
@@ -118,7 +118,6 @@ mod tests {
.unwrap();
let mut state = DropDatabaseRemoveMetadata;
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: true,
@@ -145,7 +144,6 @@ mod tests {
// Schema not exists
let mut state = DropDatabaseRemoveMetadata;
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: true,
diff --git a/src/common/meta/src/ddl/drop_database/start.rs b/src/common/meta/src/ddl/drop_database/start.rs
index deeb8ed215ed..792eeac8dda1 100644
--- a/src/common/meta/src/ddl/drop_database/start.rs
+++ b/src/common/meta/src/ddl/drop_database/start.rs
@@ -89,7 +89,6 @@ mod tests {
let ddl_context = new_ddl_context(node_manager);
let mut step = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: false,
@@ -105,7 +104,6 @@ mod tests {
let ddl_context = new_ddl_context(node_manager);
let mut state = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: true,
@@ -128,7 +126,6 @@ mod tests {
.unwrap();
let mut state = DropDatabaseStart;
let mut ctx = DropDatabaseContext {
- cluster_id: 0,
catalog: "foo".to_string(),
schema: "bar".to_string(),
drop_if_exists: false,
diff --git a/src/common/meta/src/ddl/drop_flow.rs b/src/common/meta/src/ddl/drop_flow.rs
index 59b813c452ce..38a4622aad23 100644
--- a/src/common/meta/src/ddl/drop_flow.rs
+++ b/src/common/meta/src/ddl/drop_flow.rs
@@ -37,8 +37,8 @@ use crate::instruction::{CacheIdent, DropFlow};
use crate::key::flow::flow_info::FlowInfoValue;
use crate::key::flow::flow_route::FlowRouteValue;
use crate::lock_key::{CatalogLock, FlowLock};
+use crate::metrics;
use crate::rpc::ddl::DropFlowTask;
-use crate::{metrics, ClusterId};
/// The procedure for dropping a flow.
pub struct DropFlowProcedure {
@@ -51,12 +51,11 @@ pub struct DropFlowProcedure {
impl DropFlowProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropFlow";
- pub fn new(cluster_id: ClusterId, task: DropFlowTask, context: DdlContext) -> Self {
+ pub fn new(task: DropFlowTask, context: DdlContext) -> Self {
Self {
context,
data: DropFlowData {
state: DropFlowState::Prepare,
- cluster_id,
task,
flow_info_value: None,
flow_route_values: vec![],
@@ -218,7 +217,6 @@ impl Procedure for DropFlowProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropFlowData {
state: DropFlowState,
- cluster_id: ClusterId,
task: DropFlowTask,
pub(crate) flow_info_value: Option<FlowInfoValue>,
pub(crate) flow_route_values: Vec<FlowRouteValue>,
diff --git a/src/common/meta/src/ddl/drop_table.rs b/src/common/meta/src/ddl/drop_table.rs
index 9f38e5450f5b..e68cae3382a9 100644
--- a/src/common/meta/src/ddl/drop_table.rs
+++ b/src/common/meta/src/ddl/drop_table.rs
@@ -40,10 +40,10 @@ use crate::ddl::DdlContext;
use crate::error::{self, Result};
use crate::key::table_route::TableRouteValue;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
+use crate::metrics;
use crate::region_keeper::OperatingRegionGuard;
use crate::rpc::ddl::DropTableTask;
use crate::rpc::router::{operating_leader_regions, RegionRoute};
-use crate::{metrics, ClusterId};
pub struct DropTableProcedure {
/// The context of procedure runtime.
@@ -59,8 +59,8 @@ pub struct DropTableProcedure {
impl DropTableProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropTable";
- pub fn new(cluster_id: ClusterId, task: DropTableTask, context: DdlContext) -> Self {
- let data = DropTableData::new(cluster_id, task);
+ pub fn new(task: DropTableTask, context: DdlContext) -> Self {
+ let data = DropTableData::new(task);
let executor = data.build_executor();
Self {
context,
@@ -268,7 +268,6 @@ impl Procedure for DropTableProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub struct DropTableData {
pub state: DropTableState,
- pub cluster_id: ClusterId,
pub task: DropTableTask,
pub physical_region_routes: Vec<RegionRoute>,
pub physical_table_id: Option<TableId>,
@@ -279,10 +278,9 @@ pub struct DropTableData {
}
impl DropTableData {
- pub fn new(cluster_id: ClusterId, task: DropTableTask) -> Self {
+ pub fn new(task: DropTableTask) -> Self {
Self {
state: DropTableState::Prepare,
- cluster_id,
task,
physical_region_routes: vec![],
physical_table_id: None,
@@ -301,7 +299,6 @@ impl DropTableData {
fn build_executor(&self) -> DropTableExecutor {
DropTableExecutor::new(
- self.cluster_id,
self.task.table_name(),
self.task.table_id,
self.task.drop_if_exists,
diff --git a/src/common/meta/src/ddl/drop_table/executor.rs b/src/common/meta/src/ddl/drop_table/executor.rs
index 7746f8da85ba..43ca7ce5ac85 100644
--- a/src/common/meta/src/ddl/drop_table/executor.rs
+++ b/src/common/meta/src/ddl/drop_table/executor.rs
@@ -36,7 +36,6 @@ use crate::instruction::CacheIdent;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
-use crate::ClusterId;
/// [Control] indicated to the caller whether to go to the next step.
#[derive(Debug)]
@@ -54,14 +53,8 @@ impl<T> Control<T> {
impl DropTableExecutor {
/// Returns the [DropTableExecutor].
- pub fn new(
- cluster_id: ClusterId,
- table: TableName,
- table_id: TableId,
- drop_if_exists: bool,
- ) -> Self {
+ pub fn new(table: TableName, table_id: TableId, drop_if_exists: bool) -> Self {
Self {
- cluster_id,
table,
table_id,
drop_if_exists,
@@ -74,7 +67,6 @@ impl DropTableExecutor {
/// - Invalidates the cache on the Frontend nodes.
/// - Drops the regions on the Datanode nodes.
pub struct DropTableExecutor {
- cluster_id: ClusterId,
table: TableName,
table_id: TableId,
drop_if_exists: bool,
@@ -164,7 +156,7 @@ impl DropTableExecutor {
let detecting_regions = if table_route_value.is_physical() {
// Safety: checked.
let regions = table_route_value.region_routes().unwrap();
- convert_region_routes_to_detecting_regions(self.cluster_id, regions)
+ convert_region_routes_to_detecting_regions(regions)
} else {
vec![]
};
@@ -321,7 +313,6 @@ mod tests {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ctx = new_ddl_context(node_manager);
let executor = DropTableExecutor::new(
- 0,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
true,
@@ -331,7 +322,6 @@ mod tests {
// Drops a non-exists table
let executor = DropTableExecutor::new(
- 0,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
false,
@@ -341,7 +331,6 @@ mod tests {
// Drops a exists table
let executor = DropTableExecutor::new(
- 0,
TableName::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, "my_table"),
1024,
false,
diff --git a/src/common/meta/src/ddl/drop_view.rs b/src/common/meta/src/ddl/drop_view.rs
index b56b39eee091..d803bf68e5c0 100644
--- a/src/common/meta/src/ddl/drop_view.rs
+++ b/src/common/meta/src/ddl/drop_view.rs
@@ -31,8 +31,8 @@ use crate::error::{self, Result};
use crate::instruction::CacheIdent;
use crate::key::table_name::TableNameKey;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
+use crate::metrics;
use crate::rpc::ddl::DropViewTask;
-use crate::{metrics, ClusterId};
/// The procedure for dropping a view.
pub struct DropViewProcedure {
@@ -45,12 +45,11 @@ pub struct DropViewProcedure {
impl DropViewProcedure {
pub const TYPE_NAME: &'static str = "metasrv-procedure::DropView";
- pub fn new(cluster_id: ClusterId, task: DropViewTask, context: DdlContext) -> Self {
+ pub fn new(task: DropViewTask, context: DdlContext) -> Self {
Self {
context,
data: DropViewData {
state: DropViewState::Prepare,
- cluster_id,
task,
},
}
@@ -216,7 +215,6 @@ impl Procedure for DropViewProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct DropViewData {
state: DropViewState,
- cluster_id: ClusterId,
task: DropViewTask,
}
diff --git a/src/common/meta/src/ddl/flow_meta.rs b/src/common/meta/src/ddl/flow_meta.rs
index f92f4048822c..2e71c9721709 100644
--- a/src/common/meta/src/ddl/flow_meta.rs
+++ b/src/common/meta/src/ddl/flow_meta.rs
@@ -20,7 +20,6 @@ use crate::error::Result;
use crate::key::FlowId;
use crate::peer::Peer;
use crate::sequence::SequenceRef;
-use crate::ClusterId;
/// The reference of [FlowMetadataAllocator].
pub type FlowMetadataAllocatorRef = Arc<FlowMetadataAllocator>;
@@ -60,16 +59,9 @@ impl FlowMetadataAllocator {
}
/// Allocates the [FlowId] and [Peer]s.
- pub async fn create(
- &self,
- cluster_id: ClusterId,
- partitions: usize,
- ) -> Result<(FlowId, Vec<Peer>)> {
+ pub async fn create(&self, partitions: usize) -> Result<(FlowId, Vec<Peer>)> {
let flow_id = self.allocate_flow_id().await?;
- let peers = self
- .partition_peer_allocator
- .alloc(cluster_id, partitions)
- .await?;
+ let peers = self.partition_peer_allocator.alloc(partitions).await?;
Ok((flow_id, peers))
}
@@ -79,7 +71,7 @@ impl FlowMetadataAllocator {
#[async_trait]
pub trait PartitionPeerAllocator: Send + Sync {
/// Allocates [Peer] nodes for storing partitions.
- async fn alloc(&self, cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>>;
+ async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>>;
}
/// [PartitionPeerAllocatorRef] allocates [Peer]s for partitions.
@@ -89,7 +81,7 @@ struct NoopPartitionPeerAllocator;
#[async_trait]
impl PartitionPeerAllocator for NoopPartitionPeerAllocator {
- async fn alloc(&self, _cluster_id: ClusterId, partitions: usize) -> Result<Vec<Peer>> {
+ async fn alloc(&self, partitions: usize) -> Result<Vec<Peer>> {
Ok(vec![Peer::default(); partitions])
}
}
diff --git a/src/common/meta/src/ddl/table_meta.rs b/src/common/meta/src/ddl/table_meta.rs
index 4ce4c1589411..c3ebfcd3f53d 100644
--- a/src/common/meta/src/ddl/table_meta.rs
+++ b/src/common/meta/src/ddl/table_meta.rs
@@ -20,7 +20,7 @@ use common_telemetry::{debug, info};
use snafu::ensure;
use store_api::storage::{RegionId, RegionNumber, TableId};
-use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
+use crate::ddl::TableMetadata;
use crate::error::{self, Result, UnsupportedSnafu};
use crate::key::table_route::PhysicalTableRouteValue;
use crate::peer::Peer;
@@ -109,7 +109,6 @@ impl TableMetadataAllocator {
async fn create_table_route(
&self,
- ctx: &TableMetadataAllocatorContext,
table_id: TableId,
task: &CreateTableTask,
) -> Result<PhysicalTableRouteValue> {
@@ -121,7 +120,7 @@ impl TableMetadataAllocator {
}
);
- let peers = self.peer_allocator.alloc(ctx, regions).await?;
+ let peers = self.peer_allocator.alloc(regions).await?;
let region_routes = task
.partitions
.iter()
@@ -147,11 +146,7 @@ impl TableMetadataAllocator {
}
/// Create VIEW metadata
- pub async fn create_view(
- &self,
- _ctx: &TableMetadataAllocatorContext,
- table_id: &Option<api::v1::TableId>,
- ) -> Result<TableMetadata> {
+ pub async fn create_view(&self, table_id: &Option<api::v1::TableId>) -> Result<TableMetadata> {
let table_id = self.allocate_table_id(table_id).await?;
Ok(TableMetadata {
@@ -160,13 +155,9 @@ impl TableMetadataAllocator {
})
}
- pub async fn create(
- &self,
- ctx: &TableMetadataAllocatorContext,
- task: &CreateTableTask,
- ) -> Result<TableMetadata> {
+ pub async fn create(&self, task: &CreateTableTask) -> Result<TableMetadata> {
let table_id = self.allocate_table_id(&task.create_table.table_id).await?;
- let table_route = self.create_table_route(ctx, table_id, task).await?;
+ let table_route = self.create_table_route(table_id, task).await?;
let region_wal_options = self.create_wal_options(&table_route)?;
debug!(
@@ -188,19 +179,14 @@ pub type PeerAllocatorRef = Arc<dyn PeerAllocator>;
#[async_trait]
pub trait PeerAllocator: Send + Sync {
/// Allocates `regions` size [`Peer`]s.
- async fn alloc(&self, ctx: &TableMetadataAllocatorContext, regions: usize)
- -> Result<Vec<Peer>>;
+ async fn alloc(&self, regions: usize) -> Result<Vec<Peer>>;
}
struct NoopPeerAllocator;
#[async_trait]
impl PeerAllocator for NoopPeerAllocator {
- async fn alloc(
- &self,
- _ctx: &TableMetadataAllocatorContext,
- regions: usize,
- ) -> Result<Vec<Peer>> {
+ async fn alloc(&self, regions: usize) -> Result<Vec<Peer>> {
Ok(vec![Peer::default(); regions])
}
}
diff --git a/src/common/meta/src/ddl/test_util.rs b/src/common/meta/src/ddl/test_util.rs
index 3a82f644e4fd..4d6a6c63b5f3 100644
--- a/src/common/meta/src/ddl/test_util.rs
+++ b/src/common/meta/src/ddl/test_util.rs
@@ -31,10 +31,9 @@ use crate::ddl::test_util::columns::TestColumnDefBuilder;
use crate::ddl::test_util::create_table::{
build_raw_table_info_from_expr, TestCreateTableExprBuilder,
};
-use crate::ddl::{DdlContext, TableMetadata, TableMetadataAllocatorContext};
+use crate::ddl::{DdlContext, TableMetadata};
use crate::key::table_route::TableRouteValue;
use crate::rpc::ddl::CreateTableTask;
-use crate::ClusterId;
pub async fn create_physical_table_metadata(
ddl_context: &DdlContext,
@@ -48,11 +47,7 @@ pub async fn create_physical_table_metadata(
.unwrap();
}
-pub async fn create_physical_table(
- ddl_context: &DdlContext,
- cluster_id: ClusterId,
- name: &str,
-) -> TableId {
+pub async fn create_physical_table(ddl_context: &DdlContext, name: &str) -> TableId {
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task(name);
let TableMetadata {
@@ -61,10 +56,7 @@ pub async fn create_physical_table(
..
} = ddl_context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext { cluster_id },
- &create_physical_table_task,
- )
+ .create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -80,15 +72,13 @@ pub async fn create_physical_table(
pub async fn create_logical_table(
ddl_context: DdlContext,
- cluster_id: ClusterId,
physical_table_id: TableId,
table_name: &str,
) -> TableId {
use std::assert_matches::assert_matches;
let tasks = vec![test_create_logical_table_task(table_name)];
- let mut procedure =
- CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
+ let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
let status = procedure.on_create_metadata().await.unwrap();
diff --git a/src/common/meta/src/ddl/tests/alter_logical_tables.rs b/src/common/meta/src/ddl/tests/alter_logical_tables.rs
index 03348c393052..4fa9992a1766 100644
--- a/src/common/meta/src/ddl/tests/alter_logical_tables.rs
+++ b/src/common/meta/src/ddl/tests/alter_logical_tables.rs
@@ -86,7 +86,6 @@ fn make_alter_logical_table_rename_task(
async fn test_on_prepare_check_schema() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let tasks = vec![
make_alter_logical_table_add_column_task(
Some("schema1"),
@@ -100,8 +99,7 @@ async fn test_on_prepare_check_schema() {
),
];
let physical_table_id = 1024u32;
- let mut procedure =
- AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
+ let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
}
@@ -110,50 +108,46 @@ async fn test_on_prepare_check_schema() {
async fn test_on_prepare_check_alter_kind() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let tasks = vec![make_alter_logical_table_rename_task(
"schema1",
"table1",
"new_table1",
)];
let physical_table_id = 1024u32;
- let mut procedure =
- AlterLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
+ let mut procedure = AlterLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
}
#[tokio::test]
async fn test_on_prepare_different_physical_table() {
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let phy1_id = create_physical_table(&ddl_context, cluster_id, "phy1").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy1_id, "table1").await;
- let phy2_id = create_physical_table(&ddl_context, cluster_id, "phy2").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy2_id, "table2").await;
+ let phy1_id = create_physical_table(&ddl_context, "phy1").await;
+ create_logical_table(ddl_context.clone(), phy1_id, "table1").await;
+ let phy2_id = create_physical_table(&ddl_context, "phy2").await;
+ create_logical_table(ddl_context.clone(), phy2_id, "table2").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
];
- let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy1_id, ddl_context);
+ let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy1_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, AlterLogicalTablesInvalidArguments { .. });
}
#[tokio::test]
async fn test_on_prepare_logical_table_not_exists() {
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
// Creates physical table
- let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
+ let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table1").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
@@ -161,23 +155,22 @@ async fn test_on_prepare_logical_table_not_exists() {
make_alter_logical_table_add_column_task(None, "table2", vec!["column2".to_string()]),
];
- let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
+ let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, TableNotFound { .. });
}
#[tokio::test]
async fn test_on_prepare() {
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
// Creates physical table
- let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
+ let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table1").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table2").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table3").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["column1".to_string()]),
@@ -185,25 +178,24 @@ async fn test_on_prepare() {
make_alter_logical_table_add_column_task(None, "table3", vec!["column3".to_string()]),
];
- let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
+ let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
let result = procedure.on_prepare().await;
assert_matches!(result, Ok(Status::Executing { persist: true }));
}
#[tokio::test]
async fn test_on_update_metadata() {
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
// Creates physical table
- let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
+ let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table4").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table5").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table1").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table2").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table3").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table4").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table5").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["new_col".to_string()]),
@@ -211,7 +203,7 @@ async fn test_on_update_metadata() {
make_alter_logical_table_add_column_task(None, "table3", vec!["new_col".to_string()]),
];
- let mut procedure = AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context);
+ let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context);
let mut status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
@@ -229,23 +221,21 @@ async fn test_on_update_metadata() {
#[tokio::test]
async fn test_on_part_duplicate_alter_request() {
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
// Creates physical table
- let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
+ let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table1").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table2").await;
let tasks = vec![
make_alter_logical_table_add_column_task(None, "table1", vec!["col_0".to_string()]),
make_alter_logical_table_add_column_task(None, "table2", vec!["col_0".to_string()]),
];
- let mut procedure =
- AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
+ let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
let mut status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
@@ -278,8 +268,7 @@ async fn test_on_part_duplicate_alter_request() {
),
];
- let mut procedure =
- AlterLogicalTablesProcedure::new(cluster_id, tasks, phy_id, ddl_context.clone());
+ let mut procedure = AlterLogicalTablesProcedure::new(tasks, phy_id, ddl_context.clone());
let mut status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
diff --git a/src/common/meta/src/ddl/tests/alter_table.rs b/src/common/meta/src/ddl/tests/alter_table.rs
index 18294efe00fe..f3abfab91ac0 100644
--- a/src/common/meta/src/ddl/tests/alter_table.rs
+++ b/src/common/meta/src/ddl/tests/alter_table.rs
@@ -59,7 +59,6 @@ fn test_rename_alter_table_task(table_name: &str, new_table_name: &str) -> Alter
async fn test_on_prepare_table_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let task = test_create_table_task("foo", 1024);
// Puts a value to table name key.
ddl_context
@@ -73,7 +72,7 @@ async fn test_on_prepare_table_exists_err() {
.unwrap();
let task = test_rename_alter_table_task("non-exists", "foo");
- let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap();
+ let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap();
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err.status_code(), StatusCode::TableAlreadyExists);
}
@@ -82,9 +81,8 @@ async fn test_on_prepare_table_exists_err() {
async fn test_on_prepare_table_not_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let task = test_rename_alter_table_task("non-exists", "foo");
- let mut procedure = AlterTableProcedure::new(cluster_id, 1024, task, ddl_context).unwrap();
+ let mut procedure = AlterTableProcedure::new(1024, task, ddl_context).unwrap();
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err.status_code(), StatusCode::TableNotFound);
}
@@ -95,7 +93,6 @@ async fn test_on_submit_alter_request() {
let datanode_handler = DatanodeWatcher(tx);
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let table_id = 1024;
let table_name = "foo";
let task = test_create_table_task(table_name, table_id);
@@ -144,8 +141,7 @@ async fn test_on_submit_alter_request() {
})),
},
};
- let mut procedure =
- AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap();
+ let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
procedure.submit_alter_region_requests().await.unwrap();
@@ -181,7 +177,6 @@ async fn test_on_submit_alter_request_with_outdated_request() {
RequestOutdatedErrorDatanodeHandler,
));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let table_id = 1024;
let table_name = "foo";
let task = test_create_table_task(table_name, table_id);
@@ -230,8 +225,7 @@ async fn test_on_submit_alter_request_with_outdated_request() {
})),
},
};
- let mut procedure =
- AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap();
+ let mut procedure = AlterTableProcedure::new(table_id, alter_table_task, ddl_context).unwrap();
procedure.on_prepare().await.unwrap();
procedure.submit_alter_region_requests().await.unwrap();
}
@@ -240,7 +234,6 @@ async fn test_on_submit_alter_request_with_outdated_request() {
async fn test_on_update_metadata_rename() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let table_name = "foo";
let new_table_name = "bar";
let table_id = 1024;
@@ -257,8 +250,7 @@ async fn test_on_update_metadata_rename() {
.unwrap();
let task = test_rename_alter_table_task(table_name, new_table_name);
- let mut procedure =
- AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
+ let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
procedure.on_prepare().await.unwrap();
procedure.on_update_metadata().await.unwrap();
@@ -291,7 +283,6 @@ async fn test_on_update_metadata_rename() {
async fn test_on_update_metadata_add_columns() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
let task = test_create_table_task(table_name, table_id);
@@ -335,8 +326,7 @@ async fn test_on_update_metadata_add_columns() {
})),
},
};
- let mut procedure =
- AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
+ let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
procedure.on_prepare().await.unwrap();
procedure.submit_alter_region_requests().await.unwrap();
procedure.on_update_metadata().await.unwrap();
@@ -361,7 +351,6 @@ async fn test_on_update_metadata_add_columns() {
async fn test_on_update_table_options() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
let task = test_create_table_task(table_name, table_id);
@@ -398,8 +387,7 @@ async fn test_on_update_table_options() {
})),
},
};
- let mut procedure =
- AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
+ let mut procedure = AlterTableProcedure::new(table_id, task, ddl_context.clone()).unwrap();
procedure.on_prepare().await.unwrap();
procedure.submit_alter_region_requests().await.unwrap();
procedure.on_update_metadata().await.unwrap();
diff --git a/src/common/meta/src/ddl/tests/create_flow.rs b/src/common/meta/src/ddl/tests/create_flow.rs
index a130e0590c47..4c9f86fe097a 100644
--- a/src/common/meta/src/ddl/tests/create_flow.rs
+++ b/src/common/meta/src/ddl/tests/create_flow.rs
@@ -25,11 +25,11 @@ use crate::ddl::create_flow::CreateFlowProcedure;
use crate::ddl::test_util::create_table::test_create_table_task;
use crate::ddl::test_util::flownode_handler::NaiveFlownodeHandler;
use crate::ddl::DdlContext;
+use crate::error;
use crate::key::table_route::TableRouteValue;
use crate::key::FlowId;
use crate::rpc::ddl::CreateFlowTask;
use crate::test_util::{new_ddl_context, MockFlownodeManager};
-use crate::{error, ClusterId};
pub(crate) fn test_create_flow_task(
name: &str,
@@ -53,7 +53,6 @@ pub(crate) fn test_create_flow_task(
#[tokio::test]
async fn test_create_flow_source_table_not_found() {
- let cluster_id = 1;
let source_table_names = vec![TableName::new(
DEFAULT_CATALOG_NAME,
DEFAULT_SCHEMA_NAME,
@@ -65,14 +64,13 @@ async fn test_create_flow_source_table_not_found() {
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
let ddl_context = new_ddl_context(node_manager);
let query_ctx = QueryContext::arc().into();
- let mut procedure = CreateFlowProcedure::new(cluster_id, task, query_ctx, ddl_context);
+ let mut procedure = CreateFlowProcedure::new(task, query_ctx, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, error::Error::TableNotFound { .. });
}
pub(crate) async fn create_test_flow(
ddl_context: &DdlContext,
- cluster_id: ClusterId,
flow_name: &str,
source_table_names: Vec<TableName>,
sink_table_name: TableName,
@@ -84,8 +82,7 @@ pub(crate) async fn create_test_flow(
false,
);
let query_ctx = QueryContext::arc().into();
- let mut procedure =
- CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone());
+ let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone());
let output = execute_procedure_until_done(&mut procedure).await.unwrap();
let flow_id = output.downcast_ref::<FlowId>().unwrap();
@@ -94,7 +91,6 @@ pub(crate) async fn create_test_flow(
#[tokio::test]
async fn test_create_flow() {
- let cluster_id = 1;
let table_id = 1024;
let source_table_names = vec![TableName::new(
DEFAULT_CATALOG_NAME,
@@ -118,7 +114,6 @@ async fn test_create_flow() {
.unwrap();
let flow_id = create_test_flow(
&ddl_context,
- cluster_id,
"my_flow",
source_table_names.clone(),
sink_table_name.clone(),
@@ -134,8 +129,7 @@ async fn test_create_flow() {
true,
);
let query_ctx = QueryContext::arc().into();
- let mut procedure =
- CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context.clone());
+ let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context.clone());
let output = execute_procedure_until_done(&mut procedure).await.unwrap();
let flow_id = output.downcast_ref::<FlowId>().unwrap();
assert_eq!(*flow_id, 1024);
@@ -143,7 +137,7 @@ async fn test_create_flow() {
// Creates again
let task = test_create_flow_task("my_flow", source_table_names, sink_table_name, false);
let query_ctx = QueryContext::arc().into();
- let mut procedure = CreateFlowProcedure::new(cluster_id, task.clone(), query_ctx, ddl_context);
+ let mut procedure = CreateFlowProcedure::new(task.clone(), query_ctx, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, error::Error::FlowAlreadyExists { .. });
}
diff --git a/src/common/meta/src/ddl/tests/create_logical_tables.rs b/src/common/meta/src/ddl/tests/create_logical_tables.rs
index c4f65bcac449..a331b32bb0ac 100644
--- a/src/common/meta/src/ddl/tests/create_logical_tables.rs
+++ b/src/common/meta/src/ddl/tests/create_logical_tables.rs
@@ -26,7 +26,7 @@ use crate::ddl::test_util::datanode_handler::NaiveDatanodeHandler;
use crate::ddl::test_util::{
create_physical_table_metadata, test_create_logical_table_task, test_create_physical_table_task,
};
-use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
+use crate::ddl::TableMetadata;
use crate::error::Error;
use crate::key::table_route::TableRouteValue;
use crate::test_util::{new_ddl_context, MockDatanodeManager};
@@ -35,11 +35,9 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
async fn test_on_prepare_physical_table_not_found() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let tasks = vec![test_create_logical_table_task("foo")];
let physical_table_id = 1024u32;
- let mut procedure =
- CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
+ let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::TableRouteNotFound { .. });
}
@@ -48,7 +46,6 @@ async fn test_on_prepare_physical_table_not_found() {
async fn test_on_prepare() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -57,10 +54,7 @@ async fn test_on_prepare() {
..
} = ddl_context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext { cluster_id },
- &create_physical_table_task,
- )
+ .create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -73,8 +67,7 @@ async fn test_on_prepare() {
// The create logical table procedure.
let tasks = vec![test_create_logical_table_task("foo")];
let physical_table_id = table_id;
- let mut procedure =
- CreateLogicalTablesProcedure::new(cluster_id, tasks, physical_table_id, ddl_context);
+ let mut procedure = CreateLogicalTablesProcedure::new(tasks, physical_table_id, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
}
@@ -83,7 +76,6 @@ async fn test_on_prepare() {
async fn test_on_prepare_logical_table_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -92,10 +84,7 @@ async fn test_on_prepare_logical_table_exists_err() {
..
} = ddl_context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext { cluster_id },
- &create_physical_table_task,
- )
+ .create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -119,7 +108,7 @@ async fn test_on_prepare_logical_table_exists_err() {
// The create logical table procedure.
let physical_table_id = table_id;
let mut procedure =
- CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context);
+ CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::TableAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
@@ -129,7 +118,6 @@ async fn test_on_prepare_logical_table_exists_err() {
async fn test_on_prepare_with_create_if_table_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -138,10 +126,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
..
} = ddl_context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext { cluster_id },
- &create_physical_table_task,
- )
+ .create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -167,7 +152,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
// Sets `create_if_not_exists`
task.create_table.create_if_not_exists = true;
let mut procedure =
- CreateLogicalTablesProcedure::new(cluster_id, vec![task], physical_table_id, ddl_context);
+ CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context);
let status = procedure.on_prepare().await.unwrap();
let output = status.downcast_output_ref::<Vec<u32>>().unwrap();
assert_eq!(*output, vec![8192]);
@@ -177,7 +162,6 @@ async fn test_on_prepare_with_create_if_table_exists() {
async fn test_on_prepare_part_logical_tables_exist() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -186,10 +170,7 @@ async fn test_on_prepare_part_logical_tables_exist() {
..
} = ddl_context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext { cluster_id },
- &create_physical_table_task,
- )
+ .create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -216,7 +197,6 @@ async fn test_on_prepare_part_logical_tables_exist() {
task.create_table.create_if_not_exists = true;
let non_exist_task = test_create_logical_table_task("non_exists");
let mut procedure = CreateLogicalTablesProcedure::new(
- cluster_id,
vec![task, non_exist_task],
physical_table_id,
ddl_context,
@@ -229,7 +209,6 @@ async fn test_on_prepare_part_logical_tables_exist() {
async fn test_on_create_metadata() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -238,10 +217,7 @@ async fn test_on_create_metadata() {
..
} = ddl_context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext { cluster_id },
- &create_physical_table_task,
- )
+ .create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -257,7 +233,6 @@ async fn test_on_create_metadata() {
let task = test_create_logical_table_task("foo");
let yet_another_task = test_create_logical_table_task("bar");
let mut procedure = CreateLogicalTablesProcedure::new(
- cluster_id,
vec![task, yet_another_task],
physical_table_id,
ddl_context,
@@ -279,7 +254,6 @@ async fn test_on_create_metadata() {
async fn test_on_create_metadata_part_logical_tables_exist() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -288,10 +262,7 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
..
} = ddl_context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext { cluster_id },
- &create_physical_table_task,
- )
+ .create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -318,7 +289,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
task.create_table.create_if_not_exists = true;
let non_exist_task = test_create_logical_table_task("non_exists");
let mut procedure = CreateLogicalTablesProcedure::new(
- cluster_id,
vec![task, non_exist_task],
physical_table_id,
ddl_context,
@@ -340,7 +310,6 @@ async fn test_on_create_metadata_part_logical_tables_exist() {
async fn test_on_create_metadata_err() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -349,10 +318,7 @@ async fn test_on_create_metadata_err() {
..
} = ddl_context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext { cluster_id },
- &create_physical_table_task,
- )
+ .create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -368,7 +334,6 @@ async fn test_on_create_metadata_err() {
let task = test_create_logical_table_task("foo");
let yet_another_task = test_create_logical_table_task("bar");
let mut procedure = CreateLogicalTablesProcedure::new(
- cluster_id,
vec![task.clone(), yet_another_task],
physical_table_id,
ddl_context.clone(),
diff --git a/src/common/meta/src/ddl/tests/create_table.rs b/src/common/meta/src/ddl/tests/create_table.rs
index b2756ceb4056..e62329c78097 100644
--- a/src/common/meta/src/ddl/tests/create_table.rs
+++ b/src/common/meta/src/ddl/tests/create_table.rs
@@ -87,7 +87,6 @@ pub(crate) fn test_create_table_task(name: &str) -> CreateTableTask {
async fn test_on_prepare_table_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
// Puts a value to table name key.
@@ -100,7 +99,7 @@ async fn test_on_prepare_table_exists_err() {
)
.await
.unwrap();
- let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateTableProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::TableAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
@@ -110,7 +109,6 @@ async fn test_on_prepare_table_exists_err() {
async fn test_on_prepare_with_create_if_table_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let mut task = test_create_table_task("foo");
task.create_table.create_if_not_exists = true;
task.table_info.ident.table_id = 1024;
@@ -124,7 +122,7 @@ async fn test_on_prepare_with_create_if_table_exists() {
)
.await
.unwrap();
- let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateTableProcedure::new(task, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Done { output: Some(..) });
let table_id = *status.downcast_output_ref::<u32>().unwrap();
@@ -135,10 +133,9 @@ async fn test_on_prepare_with_create_if_table_exists() {
async fn test_on_prepare_without_create_if_table_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let mut task = test_create_table_task("foo");
task.create_table.create_if_not_exists = true;
- let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateTableProcedure::new(task, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
assert_eq!(procedure.table_id(), 1024);
@@ -148,11 +145,10 @@ async fn test_on_prepare_without_create_if_table_exists() {
async fn test_on_prepare_with_no_partition_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let mut task = test_create_table_task("foo");
task.partitions = vec![];
task.create_table.create_if_not_exists = true;
- let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateTableProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::Unexpected { .. });
assert!(err
@@ -165,10 +161,9 @@ async fn test_on_datanode_create_regions_should_retry() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
- let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -183,10 +178,9 @@ async fn test_on_datanode_create_regions_should_not_retry() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(UnexpectedErrorDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
- let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -201,10 +195,9 @@ async fn test_on_create_metadata_error() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
- let mut procedure = CreateTableProcedure::new(cluster_id, task.clone(), ddl_context.clone());
+ let mut procedure = CreateTableProcedure::new(task.clone(), ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -233,10 +226,9 @@ async fn test_on_create_metadata() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let task = test_create_table_task("foo");
assert!(!task.create_table.create_if_not_exists);
- let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -251,14 +243,12 @@ async fn test_on_create_metadata() {
#[tokio::test]
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
- let cluster_id = 1;
-
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
let task = test_create_table_task("foo");
- let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
execute_procedure_until(&mut procedure, |p| {
p.creator.data.state == CreateTableState::CreateMetadata
diff --git a/src/common/meta/src/ddl/tests/create_view.rs b/src/common/meta/src/ddl/tests/create_view.rs
index f20022f49dbc..3f833333d2a3 100644
--- a/src/common/meta/src/ddl/tests/create_view.rs
+++ b/src/common/meta/src/ddl/tests/create_view.rs
@@ -97,7 +97,6 @@ pub(crate) fn test_create_view_task(name: &str) -> CreateViewTask {
async fn test_on_prepare_view_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let task = test_create_view_task("foo");
assert!(!task.create_view.create_if_not_exists);
// Puts a value to table name key.
@@ -113,7 +112,7 @@ async fn test_on_prepare_view_exists_err() {
)
.await
.unwrap();
- let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateViewProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::ViewAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
@@ -123,7 +122,6 @@ async fn test_on_prepare_view_exists_err() {
async fn test_on_prepare_with_create_if_view_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let mut task = test_create_view_task("foo");
task.create_view.create_if_not_exists = true;
task.view_info.ident.table_id = 1024;
@@ -140,7 +138,7 @@ async fn test_on_prepare_with_create_if_view_exists() {
)
.await
.unwrap();
- let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateViewProcedure::new(task, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Done { output: Some(..) });
let table_id = *status.downcast_output_ref::<u32>().unwrap();
@@ -151,10 +149,9 @@ async fn test_on_prepare_with_create_if_view_exists() {
async fn test_on_prepare_without_create_if_table_exists() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let mut task = test_create_view_task("foo");
task.create_view.create_if_not_exists = true;
- let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateViewProcedure::new(task, ddl_context);
let status = procedure.on_prepare().await.unwrap();
assert_matches!(status, Status::Executing { persist: true });
assert_eq!(procedure.view_id(), 1024);
@@ -165,10 +162,9 @@ async fn test_on_create_metadata() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let task = test_create_view_task("foo");
assert!(!task.create_view.create_if_not_exists);
- let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = CreateViewProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -185,10 +181,9 @@ async fn test_replace_view_metadata() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager.clone());
- let cluster_id = 1;
let task = test_create_view_task("foo");
assert!(!task.create_view.create_if_not_exists);
- let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
+ let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -213,7 +208,7 @@ async fn test_replace_view_metadata() {
let mut task = test_create_view_task("foo");
// The view already exists, prepare should fail
{
- let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
+ let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::ViewAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
@@ -224,7 +219,7 @@ async fn test_replace_view_metadata() {
task.create_view.logical_plan = vec![4, 5, 6];
task.create_view.definition = "new_definition".to_string();
- let mut procedure = CreateViewProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = CreateViewProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -254,12 +249,11 @@ async fn test_replace_table() {
common_telemetry::init_default_ut_logging();
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager.clone());
- let cluster_id = 1;
{
// Create a `foo` table.
let task = test_create_table_task("foo");
- let mut procedure = CreateTableProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = CreateTableProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = ProcedureContext {
procedure_id: ProcedureId::random(),
@@ -272,7 +266,7 @@ async fn test_replace_table() {
// Try to replace a view named `foo` too.
let mut task = test_create_view_task("foo");
task.create_view.or_replace = true;
- let mut procedure = CreateViewProcedure::new(cluster_id, task.clone(), ddl_context.clone());
+ let mut procedure = CreateViewProcedure::new(task.clone(), ddl_context.clone());
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, Error::TableAlreadyExists { .. });
assert_eq!(err.status_code(), StatusCode::TableAlreadyExists);
diff --git a/src/common/meta/src/ddl/tests/drop_database.rs b/src/common/meta/src/ddl/tests/drop_database.rs
index 66a5d3e7568b..8118cda53dda 100644
--- a/src/common/meta/src/ddl/tests/drop_database.rs
+++ b/src/common/meta/src/ddl/tests/drop_database.rs
@@ -31,7 +31,6 @@ use crate::test_util::{new_ddl_context, MockDatanodeManager};
#[tokio::test]
async fn test_drop_database_with_logical_tables() {
common_telemetry::init_default_ut_logging();
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
ddl_context
@@ -45,11 +44,11 @@ async fn test_drop_database_with_logical_tables() {
.await
.unwrap();
// Creates physical table
- let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
+ let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table1").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table2").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table3").await;
let mut procedure = DropDatabaseProcedure::new(
DEFAULT_CATALOG_NAME.to_string(),
@@ -80,7 +79,6 @@ async fn test_drop_database_with_logical_tables() {
#[tokio::test]
async fn test_drop_database_retryable_error() {
common_telemetry::init_default_ut_logging();
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(RetryErrorDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
ddl_context
@@ -94,11 +92,11 @@ async fn test_drop_database_retryable_error() {
.await
.unwrap();
// Creates physical table
- let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
+ let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates 3 logical tables
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table2").await;
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table3").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table1").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table2").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table3").await;
let mut procedure = DropDatabaseProcedure::new(
DEFAULT_CATALOG_NAME.to_string(),
@@ -128,7 +126,6 @@ async fn test_drop_database_retryable_error() {
#[tokio::test]
async fn test_drop_database_recover() {
common_telemetry::init_default_ut_logging();
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let ddl_context = new_ddl_context(node_manager);
ddl_context
@@ -142,9 +139,9 @@ async fn test_drop_database_recover() {
.await
.unwrap();
// Creates a physical table
- let phy_id = create_physical_table(&ddl_context, cluster_id, "phy").await;
+ let phy_id = create_physical_table(&ddl_context, "phy").await;
// Creates a logical tables
- create_logical_table(ddl_context.clone(), cluster_id, phy_id, "table1").await;
+ create_logical_table(ddl_context.clone(), phy_id, "table1").await;
let mut procedure = DropDatabaseProcedure::new(
DEFAULT_CATALOG_NAME.to_string(),
DEFAULT_SCHEMA_NAME.to_string(),
diff --git a/src/common/meta/src/ddl/tests/drop_flow.rs b/src/common/meta/src/ddl/tests/drop_flow.rs
index 97b4632a595a..9afb36a7d43f 100644
--- a/src/common/meta/src/ddl/tests/drop_flow.rs
+++ b/src/common/meta/src/ddl/tests/drop_flow.rs
@@ -40,12 +40,11 @@ fn test_drop_flow_task(flow_name: &str, flow_id: u32, drop_if_exists: bool) -> D
#[tokio::test]
async fn test_drop_flow_not_found() {
- let cluster_id = 1;
let flow_id = 1024;
let node_manager = Arc::new(MockFlownodeManager::new(NaiveFlownodeHandler));
let ddl_context = new_ddl_context(node_manager);
let task = test_drop_flow_task("my_flow", flow_id, false);
- let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = DropFlowProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, error::Error::FlowNotFound { .. });
}
@@ -53,7 +52,6 @@ async fn test_drop_flow_not_found() {
#[tokio::test]
async fn test_drop_flow() {
// create a flow
- let cluster_id = 1;
let table_id = 1024;
let source_table_names = vec![TableName::new(
DEFAULT_CATALOG_NAME,
@@ -75,27 +73,21 @@ async fn test_drop_flow() {
)
.await
.unwrap();
- let flow_id = create_test_flow(
- &ddl_context,
- cluster_id,
- "my_flow",
- source_table_names,
- sink_table_name,
- )
- .await;
+ let flow_id =
+ create_test_flow(&ddl_context, "my_flow", source_table_names, sink_table_name).await;
// Drops the flows
let task = test_drop_flow_task("my_flow", flow_id, false);
- let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropFlowProcedure::new(task, ddl_context.clone());
execute_procedure_until_done(&mut procedure).await;
// Drops if not exists
let task = test_drop_flow_task("my_flow", flow_id, true);
- let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropFlowProcedure::new(task, ddl_context.clone());
execute_procedure_until_done(&mut procedure).await;
// Drops again
let task = test_drop_flow_task("my_flow", flow_id, false);
- let mut procedure = DropFlowProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = DropFlowProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_matches!(err, error::Error::FlowNotFound { .. });
}
diff --git a/src/common/meta/src/ddl/tests/drop_table.rs b/src/common/meta/src/ddl/tests/drop_table.rs
index c3a5f5875cad..3e09f65422e2 100644
--- a/src/common/meta/src/ddl/tests/drop_table.rs
+++ b/src/common/meta/src/ddl/tests/drop_table.rs
@@ -35,7 +35,7 @@ use crate::ddl::test_util::{
create_logical_table, create_physical_table, create_physical_table_metadata,
test_create_logical_table_task, test_create_physical_table_task,
};
-use crate::ddl::{TableMetadata, TableMetadataAllocatorContext};
+use crate::ddl::TableMetadata;
use crate::key::table_route::TableRouteValue;
use crate::kv_backend::memory::MemoryKvBackend;
use crate::peer::Peer;
@@ -47,7 +47,6 @@ use crate::test_util::{new_ddl_context, new_ddl_context_with_kv_backend, MockDat
async fn test_on_prepare_table_not_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
let task = test_create_table_task(table_name, table_id);
@@ -63,7 +62,7 @@ async fn test_on_prepare_table_not_exists_err() {
.unwrap();
let task = new_drop_table_task("bar", table_id, false);
- let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = DropTableProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_eq!(err.status_code(), StatusCode::TableNotFound);
}
@@ -72,7 +71,6 @@ async fn test_on_prepare_table_not_exists_err() {
async fn test_on_prepare_table() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let table_name = "foo";
let table_id = 1024;
let task = test_create_table_task(table_name, table_id);
@@ -89,13 +87,13 @@ async fn test_on_prepare_table() {
let task = new_drop_table_task("bar", table_id, true);
// Drop if exists
- let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
assert!(!procedure.rollback_supported());
let task = new_drop_table_task(table_name, table_id, false);
// Drop table
- let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = DropTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
}
@@ -105,7 +103,6 @@ async fn test_on_datanode_drop_regions() {
let datanode_handler = DatanodeWatcher(tx);
let node_manager = Arc::new(MockDatanodeManager::new(datanode_handler));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let table_id = 1024;
let table_name = "foo";
let task = test_create_table_task(table_name, table_id);
@@ -144,7 +141,7 @@ async fn test_on_datanode_drop_regions() {
let task = new_drop_table_task(table_name, table_id, false);
// Drop table
- let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = DropTableProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
procedure.on_datanode_drop_regions().await.unwrap();
@@ -179,7 +176,6 @@ async fn test_on_rollback() {
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend.clone());
- let cluster_id = 1;
// Prepares physical table metadata.
let mut create_physical_table_task = test_create_physical_table_task("phy_table");
let TableMetadata {
@@ -188,10 +184,7 @@ async fn test_on_rollback() {
..
} = ddl_context
.table_metadata_allocator
- .create(
- &TableMetadataAllocatorContext { cluster_id },
- &create_physical_table_task,
- )
+ .create(&create_physical_table_task)
.await
.unwrap();
create_physical_table_task.set_table_id(table_id);
@@ -205,12 +198,8 @@ async fn test_on_rollback() {
let physical_table_id = table_id;
// Creates the logical table metadata.
let task = test_create_logical_table_task("foo");
- let mut procedure = CreateLogicalTablesProcedure::new(
- cluster_id,
- vec![task],
- physical_table_id,
- ddl_context.clone(),
- );
+ let mut procedure =
+ CreateLogicalTablesProcedure::new(vec![task], physical_table_id, ddl_context.clone());
procedure.on_prepare().await.unwrap();
let ctx = new_test_procedure_context();
procedure.execute(&ctx).await.unwrap();
@@ -223,7 +212,7 @@ async fn test_on_rollback() {
// Drops the physical table
{
let task = new_drop_table_task("phy_table", physical_table_id, false);
- let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
assert!(procedure.rollback_supported());
procedure.on_delete_metadata().await.unwrap();
@@ -238,7 +227,7 @@ async fn test_on_rollback() {
// Drops the logical table
let task = new_drop_table_task("foo", table_ids[0], false);
- let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
assert!(!procedure.rollback_supported());
}
@@ -255,18 +244,15 @@ fn new_drop_table_task(table_name: &str, table_id: TableId, drop_if_exists: bool
#[tokio::test]
async fn test_memory_region_keeper_guard_dropped_on_procedure_done() {
- let cluster_id = 1;
-
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
- let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
- let logical_table_id =
- create_logical_table(ddl_context.clone(), cluster_id, physical_table_id, "s").await;
+ let physical_table_id = create_physical_table(&ddl_context, "t").await;
+ let logical_table_id = create_logical_table(ddl_context.clone(), physical_table_id, "s").await;
let inner_test = |task: DropTableTask| async {
- let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
execute_procedure_until(&mut procedure, |p| {
p.data.state == DropTableState::InvalidateTableCache
})
@@ -304,14 +290,13 @@ async fn test_from_json() {
(DropTableState::DatanodeDropRegions, 1, 1),
(DropTableState::DeleteTombstone, 1, 0),
] {
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
- let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
+ let physical_table_id = create_physical_table(&ddl_context, "t").await;
let task = new_drop_table_task("t", physical_table_id, false);
- let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
execute_procedure_until(&mut procedure, |p| p.data.state == state).await;
let data = procedure.dump().unwrap();
assert_eq!(
@@ -334,14 +319,13 @@ async fn test_from_json() {
let num_operating_regions = 0;
let num_operating_regions_after_recovery = 0;
- let cluster_id = 1;
let node_manager = Arc::new(MockDatanodeManager::new(NaiveDatanodeHandler));
let kv_backend = Arc::new(MemoryKvBackend::new());
let ddl_context = new_ddl_context_with_kv_backend(node_manager, kv_backend);
- let physical_table_id = create_physical_table(&ddl_context, cluster_id, "t").await;
+ let physical_table_id = create_physical_table(&ddl_context, "t").await;
let task = new_drop_table_task("t", physical_table_id, false);
- let mut procedure = DropTableProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropTableProcedure::new(task, ddl_context.clone());
execute_procedure_until_done(&mut procedure).await;
let data = procedure.dump().unwrap();
assert_eq!(
diff --git a/src/common/meta/src/ddl/tests/drop_view.rs b/src/common/meta/src/ddl/tests/drop_view.rs
index 1e0cb668597a..f6e8391e35d2 100644
--- a/src/common/meta/src/ddl/tests/drop_view.rs
+++ b/src/common/meta/src/ddl/tests/drop_view.rs
@@ -41,7 +41,6 @@ fn new_drop_view_task(view: &str, view_id: TableId, drop_if_exists: bool) -> Dro
async fn test_on_prepare_view_not_exists_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let view_id = 1024;
let mut task = test_create_view_task("foo");
task.view_info.ident.table_id = view_id;
@@ -60,7 +59,7 @@ async fn test_on_prepare_view_not_exists_err() {
.unwrap();
let task = new_drop_view_task("bar", view_id, false);
- let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = DropViewProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_eq!(err.status_code(), StatusCode::TableNotFound);
}
@@ -69,7 +68,6 @@ async fn test_on_prepare_view_not_exists_err() {
async fn test_on_prepare_not_view_err() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let view_id = 1024;
let view_name = "foo";
let task = test_create_table_task(view_name, view_id);
@@ -85,7 +83,7 @@ async fn test_on_prepare_not_view_err() {
.unwrap();
let task = new_drop_view_task(view_name, view_id, false);
- let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = DropViewProcedure::new(task, ddl_context);
// It's not a view, expect error
let err = procedure.on_prepare().await.unwrap_err();
assert_eq!(err.status_code(), StatusCode::InvalidArguments);
@@ -95,7 +93,6 @@ async fn test_on_prepare_not_view_err() {
async fn test_on_prepare_success() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let view_id = 1024;
let view_name = "foo";
let mut task = test_create_view_task("foo");
@@ -116,12 +113,12 @@ async fn test_on_prepare_success() {
let task = new_drop_view_task("bar", view_id, true);
// Drop if exists
- let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropViewProcedure::new(task, ddl_context.clone());
procedure.on_prepare().await.unwrap();
let task = new_drop_view_task(view_name, view_id, false);
// Prepare success
- let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = DropViewProcedure::new(task, ddl_context);
procedure.on_prepare().await.unwrap();
assert_eq!(DropViewState::DeleteMetadata, procedure.state());
}
@@ -130,7 +127,6 @@ async fn test_on_prepare_success() {
async fn test_drop_view_success() {
let node_manager = Arc::new(MockDatanodeManager::new(()));
let ddl_context = new_ddl_context(node_manager);
- let cluster_id = 1;
let view_id = 1024;
let view_name = "foo";
let mut task = test_create_view_task("foo");
@@ -159,7 +155,7 @@ async fn test_drop_view_success() {
let task = new_drop_view_task(view_name, view_id, false);
// Prepare success
- let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context.clone());
+ let mut procedure = DropViewProcedure::new(task, ddl_context.clone());
execute_procedure_until_done(&mut procedure).await;
assert_eq!(DropViewState::InvalidateViewCache, procedure.state());
@@ -174,7 +170,7 @@ async fn test_drop_view_success() {
// Drop again
let task = new_drop_view_task(view_name, view_id, false);
- let mut procedure = DropViewProcedure::new(cluster_id, task, ddl_context);
+ let mut procedure = DropViewProcedure::new(task, ddl_context);
let err = procedure.on_prepare().await.unwrap_err();
assert_eq!(err.status_code(), StatusCode::TableNotFound);
}
diff --git a/src/common/meta/src/ddl/truncate_table.rs b/src/common/meta/src/ddl/truncate_table.rs
index edc7321e091c..c0608dc3b334 100644
--- a/src/common/meta/src/ddl/truncate_table.rs
+++ b/src/common/meta/src/ddl/truncate_table.rs
@@ -39,9 +39,9 @@ use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
use crate::key::DeserializedValueWithBytes;
use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
+use crate::metrics;
use crate::rpc::ddl::TruncateTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
-use crate::{metrics, ClusterId};
pub struct TruncateTableProcedure {
context: DdlContext,
@@ -91,7 +91,6 @@ impl TruncateTableProcedure {
pub(crate) const TYPE_NAME: &'static str = "metasrv-procedure::TruncateTable";
pub(crate) fn new(
- cluster_id: ClusterId,
task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
@@ -99,7 +98,7 @@ impl TruncateTableProcedure {
) -> Self {
Self {
context,
- data: TruncateTableData::new(cluster_id, task, table_info_value, region_routes),
+ data: TruncateTableData::new(task, table_info_value, region_routes),
}
}
@@ -189,7 +188,6 @@ impl TruncateTableProcedure {
#[derive(Debug, Serialize, Deserialize)]
pub struct TruncateTableData {
state: TruncateTableState,
- cluster_id: ClusterId,
task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
@@ -197,14 +195,12 @@ pub struct TruncateTableData {
impl TruncateTableData {
pub fn new(
- cluster_id: ClusterId,
task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
) -> Self {
Self {
state: TruncateTableState::Prepare,
- cluster_id,
task,
table_info_value,
region_routes,
diff --git a/src/common/meta/src/ddl/utils.rs b/src/common/meta/src/ddl/utils.rs
index f6852db753a5..a0973855f65c 100644
--- a/src/common/meta/src/ddl/utils.rs
+++ b/src/common/meta/src/ddl/utils.rs
@@ -34,7 +34,6 @@ use crate::key::TableMetadataManagerRef;
use crate::peer::Peer;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::RegionRoute;
-use crate::ClusterId;
/// Adds [Peer] context if the error is unretryable.
pub fn add_peer_context_if_needed(datanode: Peer) -> impl FnOnce(Error) -> Error {
@@ -144,7 +143,6 @@ pub async fn get_physical_table_id(
/// Converts a list of [`RegionRoute`] to a list of [`DetectingRegion`].
pub fn convert_region_routes_to_detecting_regions(
- cluster_id: ClusterId,
region_routes: &[RegionRoute],
) -> Vec<DetectingRegion> {
region_routes
@@ -153,7 +151,7 @@ pub fn convert_region_routes_to_detecting_regions(
route
.leader_peer
.as_ref()
- .map(|peer| (cluster_id, peer.id, route.region.id))
+ .map(|peer| (peer.id, route.region.id))
})
.collect::<Vec<_>>()
}
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index bac640d401a6..fa01e9f700f0 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -60,7 +60,6 @@ use crate::rpc::ddl::{
use crate::rpc::procedure;
use crate::rpc::procedure::{MigrateRegionRequest, MigrateRegionResponse, ProcedureStateResponse};
use crate::rpc::router::RegionRoute;
-use crate::ClusterId;
pub type DdlManagerRef = Arc<DdlManager>;
@@ -154,13 +153,12 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_alter_table_task(
&self,
- cluster_id: ClusterId,
table_id: TableId,
alter_table_task: AlterTableTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = AlterTableProcedure::new(cluster_id, table_id, alter_table_task, context)?;
+ let procedure = AlterTableProcedure::new(table_id, alter_table_task, context)?;
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -171,12 +169,11 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_table_task(
&self,
- cluster_id: ClusterId,
create_table_task: CreateTableTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = CreateTableProcedure::new(cluster_id, create_table_task, context);
+ let procedure = CreateTableProcedure::new(create_table_task, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -187,12 +184,11 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_view_task(
&self,
- cluster_id: ClusterId,
create_view_task: CreateViewTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = CreateViewProcedure::new(cluster_id, create_view_task, context);
+ let procedure = CreateViewProcedure::new(create_view_task, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -203,18 +199,13 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_logical_table_tasks(
&self,
- cluster_id: ClusterId,
create_table_tasks: Vec<CreateTableTask>,
physical_table_id: TableId,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = CreateLogicalTablesProcedure::new(
- cluster_id,
- create_table_tasks,
- physical_table_id,
- context,
- );
+ let procedure =
+ CreateLogicalTablesProcedure::new(create_table_tasks, physical_table_id, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -225,18 +216,13 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_alter_logical_table_tasks(
&self,
- cluster_id: ClusterId,
alter_table_tasks: Vec<AlterTableTask>,
physical_table_id: TableId,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = AlterLogicalTablesProcedure::new(
- cluster_id,
- alter_table_tasks,
- physical_table_id,
- context,
- );
+ let procedure =
+ AlterLogicalTablesProcedure::new(alter_table_tasks, physical_table_id, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -247,12 +233,11 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_drop_table_task(
&self,
- cluster_id: ClusterId,
drop_table_task: DropTableTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = DropTableProcedure::new(cluster_id, drop_table_task, context);
+ let procedure = DropTableProcedure::new(drop_table_task, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
@@ -263,7 +248,6 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_database(
&self,
- _cluster_id: ClusterId,
CreateDatabaseTask {
catalog,
schema,
@@ -283,7 +267,6 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_drop_database(
&self,
- _cluster_id: ClusterId,
DropDatabaseTask {
catalog,
schema,
@@ -299,11 +282,10 @@ impl DdlManager {
pub async fn submit_alter_database(
&self,
- cluster_id: ClusterId,
alter_database_task: AlterDatabaseTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = AlterDatabaseProcedure::new(cluster_id, alter_database_task, context)?;
+ let procedure = AlterDatabaseProcedure::new(alter_database_task, context)?;
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
@@ -313,12 +295,11 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_create_flow_task(
&self,
- cluster_id: ClusterId,
create_flow: CreateFlowTask,
query_context: QueryContext,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = CreateFlowProcedure::new(cluster_id, create_flow, query_context, context);
+ let procedure = CreateFlowProcedure::new(create_flow, query_context, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
@@ -328,11 +309,10 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_drop_flow_task(
&self,
- cluster_id: ClusterId,
drop_flow: DropFlowTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = DropFlowProcedure::new(cluster_id, drop_flow, context);
+ let procedure = DropFlowProcedure::new(drop_flow, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
@@ -342,11 +322,10 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_drop_view_task(
&self,
- cluster_id: ClusterId,
drop_view: DropViewTask,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
- let procedure = DropViewProcedure::new(cluster_id, drop_view, context);
+ let procedure = DropViewProcedure::new(drop_view, context);
let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
self.submit_procedure(procedure_with_id).await
@@ -356,14 +335,12 @@ impl DdlManager {
#[tracing::instrument(skip_all)]
pub async fn submit_truncate_table_task(
&self,
- cluster_id: ClusterId,
truncate_table_task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = TruncateTableProcedure::new(
- cluster_id,
truncate_table_task,
table_info_value,
region_routes,
@@ -397,7 +374,6 @@ impl DdlManager {
async fn handle_truncate_table_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
truncate_table_task: TruncateTableTask,
) -> Result<SubmitDdlTaskResponse> {
let table_id = truncate_table_task.table_id;
@@ -416,12 +392,7 @@ async fn handle_truncate_table_task(
let table_route = table_route_value.into_inner().region_routes()?.clone();
let (id, _) = ddl_manager
- .submit_truncate_table_task(
- cluster_id,
- truncate_table_task,
- table_info_value,
- table_route,
- )
+ .submit_truncate_table_task(truncate_table_task, table_info_value, table_route)
.await?;
info!("Table: {table_id} is truncated via procedure_id {id:?}");
@@ -434,7 +405,6 @@ async fn handle_truncate_table_task(
async fn handle_alter_table_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
alter_table_task: AlterTableTask,
) -> Result<SubmitDdlTaskResponse> {
let table_ref = alter_table_task.table_ref();
@@ -468,7 +438,7 @@ async fn handle_alter_table_task(
);
let (id, _) = ddl_manager
- .submit_alter_table_task(cluster_id, table_id, alter_table_task)
+ .submit_alter_table_task(table_id, alter_table_task)
.await?;
info!("Table: {table_id} is altered via procedure_id {id:?}");
@@ -481,13 +451,10 @@ async fn handle_alter_table_task(
async fn handle_drop_table_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
drop_table_task: DropTableTask,
) -> Result<SubmitDdlTaskResponse> {
let table_id = drop_table_task.table_id;
- let (id, _) = ddl_manager
- .submit_drop_table_task(cluster_id, drop_table_task)
- .await?;
+ let (id, _) = ddl_manager.submit_drop_table_task(drop_table_task).await?;
info!("Table: {table_id} is dropped via procedure_id {id:?}");
@@ -499,11 +466,10 @@ async fn handle_drop_table_task(
async fn handle_create_table_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
create_table_task: CreateTableTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, output) = ddl_manager
- .submit_create_table_task(cluster_id, create_table_task)
+ .submit_create_table_task(create_table_task)
.await?;
let procedure_id = id.to_string();
@@ -525,7 +491,6 @@ async fn handle_create_table_task(
async fn handle_create_logical_table_tasks(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
create_table_tasks: Vec<CreateTableTask>,
) -> Result<SubmitDdlTaskResponse> {
ensure!(
@@ -542,7 +507,7 @@ async fn handle_create_logical_table_tasks(
let num_logical_tables = create_table_tasks.len();
let (id, output) = ddl_manager
- .submit_create_logical_table_tasks(cluster_id, create_table_tasks, physical_table_id)
+ .submit_create_logical_table_tasks(create_table_tasks, physical_table_id)
.await?;
info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is created via procedure_id {id:?}");
@@ -568,11 +533,10 @@ async fn handle_create_logical_table_tasks(
async fn handle_create_database_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
create_database_task: CreateDatabaseTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
- .submit_create_database(cluster_id, create_database_task.clone())
+ .submit_create_database(create_database_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -589,11 +553,10 @@ async fn handle_create_database_task(
async fn handle_drop_database_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
drop_database_task: DropDatabaseTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
- .submit_drop_database(cluster_id, drop_database_task.clone())
+ .submit_drop_database(drop_database_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -610,11 +573,10 @@ async fn handle_drop_database_task(
async fn handle_alter_database_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
alter_database_task: AlterDatabaseTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
- .submit_alter_database(cluster_id, alter_database_task.clone())
+ .submit_alter_database(alter_database_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -632,11 +594,10 @@ async fn handle_alter_database_task(
async fn handle_drop_flow_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
drop_flow_task: DropFlowTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
- .submit_drop_flow_task(cluster_id, drop_flow_task.clone())
+ .submit_drop_flow_task(drop_flow_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -653,11 +614,10 @@ async fn handle_drop_flow_task(
async fn handle_drop_view_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
drop_view_task: DropViewTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, _) = ddl_manager
- .submit_drop_view_task(cluster_id, drop_view_task.clone())
+ .submit_drop_view_task(drop_view_task.clone())
.await?;
let procedure_id = id.to_string();
@@ -675,12 +635,11 @@ async fn handle_drop_view_task(
async fn handle_create_flow_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
create_flow_task: CreateFlowTask,
query_context: QueryContext,
) -> Result<SubmitDdlTaskResponse> {
let (id, output) = ddl_manager
- .submit_create_flow_task(cluster_id, create_flow_task.clone(), query_context)
+ .submit_create_flow_task(create_flow_task.clone(), query_context)
.await?;
let procedure_id = id.to_string();
@@ -712,7 +671,6 @@ async fn handle_create_flow_task(
async fn handle_alter_logical_table_tasks(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
alter_table_tasks: Vec<AlterTableTask>,
) -> Result<SubmitDdlTaskResponse> {
ensure!(
@@ -733,7 +691,7 @@ async fn handle_alter_logical_table_tasks(
let num_logical_tables = alter_table_tasks.len();
let (id, _) = ddl_manager
- .submit_alter_logical_table_tasks(cluster_id, alter_table_tasks, physical_table_id)
+ .submit_alter_logical_table_tasks(alter_table_tasks, physical_table_id)
.await?;
info!("{num_logical_tables} logical tables on physical table: {physical_table_id:?} is altered via procedure_id {id:?}");
@@ -749,11 +707,10 @@ async fn handle_alter_logical_table_tasks(
/// Handle the `[CreateViewTask]` and returns the DDL response when success.
async fn handle_create_view_task(
ddl_manager: &DdlManager,
- cluster_id: ClusterId,
create_view_task: CreateViewTask,
) -> Result<SubmitDdlTaskResponse> {
let (id, output) = ddl_manager
- .submit_create_view_task(cluster_id, create_view_task)
+ .submit_create_view_task(create_view_task)
.await?;
let procedure_id = id.to_string();
@@ -788,55 +745,43 @@ impl ProcedureExecutor for DdlManager {
.unwrap_or(TracingContext::from_current_span())
.attach(tracing::info_span!("DdlManager::submit_ddl_task"));
async move {
- let cluster_id = ctx.cluster_id.unwrap_or_default();
debug!("Submitting Ddl task: {:?}", request.task);
match request.task {
CreateTable(create_table_task) => {
- handle_create_table_task(self, cluster_id, create_table_task).await
- }
- DropTable(drop_table_task) => {
- handle_drop_table_task(self, cluster_id, drop_table_task).await
+ handle_create_table_task(self, create_table_task).await
}
+ DropTable(drop_table_task) => handle_drop_table_task(self, drop_table_task).await,
AlterTable(alter_table_task) => {
- handle_alter_table_task(self, cluster_id, alter_table_task).await
+ handle_alter_table_task(self, alter_table_task).await
}
TruncateTable(truncate_table_task) => {
- handle_truncate_table_task(self, cluster_id, truncate_table_task).await
+ handle_truncate_table_task(self, truncate_table_task).await
}
CreateLogicalTables(create_table_tasks) => {
- handle_create_logical_table_tasks(self, cluster_id, create_table_tasks).await
+ handle_create_logical_table_tasks(self, create_table_tasks).await
}
AlterLogicalTables(alter_table_tasks) => {
- handle_alter_logical_table_tasks(self, cluster_id, alter_table_tasks).await
+ handle_alter_logical_table_tasks(self, alter_table_tasks).await
}
DropLogicalTables(_) => todo!(),
CreateDatabase(create_database_task) => {
- handle_create_database_task(self, cluster_id, create_database_task).await
+ handle_create_database_task(self, create_database_task).await
}
DropDatabase(drop_database_task) => {
- handle_drop_database_task(self, cluster_id, drop_database_task).await
+ handle_drop_database_task(self, drop_database_task).await
}
AlterDatabase(alter_database_task) => {
- handle_alter_database_task(self, cluster_id, alter_database_task).await
+ handle_alter_database_task(self, alter_database_task).await
}
CreateFlow(create_flow_task) => {
- handle_create_flow_task(
- self,
- cluster_id,
- create_flow_task,
- request.query_context.into(),
- )
- .await
- }
- DropFlow(drop_flow_task) => {
- handle_drop_flow_task(self, cluster_id, drop_flow_task).await
+ handle_create_flow_task(self, create_flow_task, request.query_context.into())
+ .await
}
+ DropFlow(drop_flow_task) => handle_drop_flow_task(self, drop_flow_task).await,
CreateView(create_view_task) => {
- handle_create_view_task(self, cluster_id, create_view_task).await
- }
- DropView(drop_view_task) => {
- handle_drop_view_task(self, cluster_id, drop_view_task).await
+ handle_create_view_task(self, create_view_task).await
}
+ DropView(drop_view_task) => handle_drop_view_task(self, drop_view_task).await,
}
}
.trace(span)
diff --git a/src/common/meta/src/instruction.rs b/src/common/meta/src/instruction.rs
index 4864f7562d10..8cfc06e882aa 100644
--- a/src/common/meta/src/instruction.rs
+++ b/src/common/meta/src/instruction.rs
@@ -26,11 +26,10 @@ use crate::flow_name::FlowName;
use crate::key::schema_name::SchemaName;
use crate::key::FlowId;
use crate::peer::Peer;
-use crate::{ClusterId, DatanodeId, FlownodeId};
+use crate::{DatanodeId, FlownodeId};
#[derive(Eq, Hash, PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct RegionIdent {
- pub cluster_id: ClusterId,
pub datanode_id: DatanodeId,
pub table_id: TableId,
pub region_number: RegionNumber,
@@ -47,8 +46,8 @@ impl Display for RegionIdent {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
- "RegionIdent(datanode_id='{}.{}', table_id={}, region_number={}, engine = {})",
- self.cluster_id, self.datanode_id, self.table_id, self.region_number, self.engine
+ "RegionIdent(datanode_id='{}', table_id={}, region_number={}, engine = {})",
+ self.datanode_id, self.table_id, self.region_number, self.engine
)
}
}
@@ -262,7 +261,6 @@ mod tests {
fn test_serialize_instruction() {
let open_region = Instruction::OpenRegion(OpenRegion::new(
RegionIdent {
- cluster_id: 1,
datanode_id: 2,
table_id: 1024,
region_number: 1,
@@ -277,12 +275,11 @@ mod tests {
let serialized = serde_json::to_string(&open_region).unwrap();
assert_eq!(
- r#"{"OpenRegion":{"region_ident":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#,
+ r#"{"OpenRegion":{"region_ident":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"},"region_storage_path":"test/foo","region_options":{},"region_wal_options":{},"skip_wal_replay":false}}"#,
serialized
);
let close_region = Instruction::CloseRegion(RegionIdent {
- cluster_id: 1,
datanode_id: 2,
table_id: 1024,
region_number: 1,
@@ -292,7 +289,7 @@ mod tests {
let serialized = serde_json::to_string(&close_region).unwrap();
assert_eq!(
- r#"{"CloseRegion":{"cluster_id":1,"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#,
+ r#"{"CloseRegion":{"datanode_id":2,"table_id":1024,"region_number":1,"engine":"mito2"}}"#,
serialized
);
}
@@ -307,7 +304,6 @@ mod tests {
#[test]
fn test_compatible_serialize_open_region() {
let region_ident = RegionIdent {
- cluster_id: 1,
datanode_id: 2,
table_id: 1024,
region_number: 1,
diff --git a/src/common/meta/src/lib.rs b/src/common/meta/src/lib.rs
index 7479a1433769..ccd00ab8901c 100644
--- a/src/common/meta/src/lib.rs
+++ b/src/common/meta/src/lib.rs
@@ -47,8 +47,6 @@ pub mod test_util;
pub mod util;
pub mod wal_options_allocator;
-// The id of the cluster.
-pub type ClusterId = u64;
// The id of the datanode.
pub type DatanodeId = u64;
// The id of the flownode.
diff --git a/src/common/meta/src/node_expiry_listener.rs b/src/common/meta/src/node_expiry_listener.rs
index c5da2936a59d..7bc69f906563 100644
--- a/src/common/meta/src/node_expiry_listener.rs
+++ b/src/common/meta/src/node_expiry_listener.rs
@@ -99,7 +99,7 @@ impl NodeExpiryListener {
in_memory: &ResettableKvBackendRef,
max_idle_time: Duration,
) -> error::Result<impl Iterator<Item = NodeInfoKey>> {
- let prefix = NodeInfoKey::key_prefix_with_cluster_id(0);
+ let prefix = NodeInfoKey::key_prefix();
let req = RangeRequest::new().with_prefix(prefix);
let current_time_millis = common_time::util::current_time_millis();
let resp = in_memory.range(req).await?;
diff --git a/src/common/meta/src/peer.rs b/src/common/meta/src/peer.rs
index af1739ef91bf..daa64e36e716 100644
--- a/src/common/meta/src/peer.rs
+++ b/src/common/meta/src/peer.rs
@@ -19,7 +19,7 @@ use api::v1::meta::Peer as PbPeer;
use serde::{Deserialize, Serialize};
use crate::error::Error;
-use crate::{ClusterId, DatanodeId, FlownodeId};
+use crate::{DatanodeId, FlownodeId};
#[derive(Debug, Default, Clone, Hash, Eq, PartialEq, Deserialize, Serialize)]
pub struct Peer {
@@ -72,8 +72,8 @@ impl Display for Peer {
/// can query peer given a node id
#[async_trait::async_trait]
pub trait PeerLookupService {
- async fn datanode(&self, cluster_id: ClusterId, id: DatanodeId) -> Result<Option<Peer>, Error>;
- async fn flownode(&self, cluster_id: ClusterId, id: FlownodeId) -> Result<Option<Peer>, Error>;
+ async fn datanode(&self, id: DatanodeId) -> Result<Option<Peer>, Error>;
+ async fn flownode(&self, id: FlownodeId) -> Result<Option<Peer>, Error>;
}
pub type PeerLookupServiceRef = Arc<dyn PeerLookupService + Send + Sync>;
diff --git a/src/common/meta/src/rpc.rs b/src/common/meta/src/rpc.rs
index a11c5164b87b..4996df11f3fa 100644
--- a/src/common/meta/src/rpc.rs
+++ b/src/common/meta/src/rpc.rs
@@ -31,11 +31,6 @@ impl ResponseHeader {
self.0.protocol_version
}
- #[inline]
- pub fn cluster_id(&self) -> u64 {
- self.0.cluster_id
- }
-
#[inline]
pub fn error_code(&self) -> i32 {
match self.0.error.as_ref() {
@@ -143,7 +138,6 @@ mod tests {
fn test_response_header_trans() {
let pb_header = PbResponseHeader {
protocol_version: 101,
- cluster_id: 1,
error: Some(Error {
code: 100,
err_msg: "test".to_string(),
@@ -152,7 +146,6 @@ mod tests {
let header = ResponseHeader(pb_header);
assert_eq!(101, header.protocol_version());
- assert_eq!(1, header.cluster_id());
assert_eq!(100, header.error_code());
assert_eq!("test".to_string(), header.error_msg());
}
diff --git a/src/common/meta/src/test_util.rs b/src/common/meta/src/test_util.rs
index 3ceb47310885..2c4ba59c7b21 100644
--- a/src/common/meta/src/test_util.rs
+++ b/src/common/meta/src/test_util.rs
@@ -37,7 +37,7 @@ use crate::peer::{Peer, PeerLookupService};
use crate::region_keeper::MemoryRegionKeeper;
use crate::sequence::SequenceBuilder;
use crate::wal_options_allocator::WalOptionsAllocator;
-use crate::{ClusterId, DatanodeId, FlownodeId};
+use crate::{DatanodeId, FlownodeId};
#[async_trait::async_trait]
pub trait MockDatanodeHandler: Sync + Send + Clone {
@@ -189,11 +189,11 @@ pub struct NoopPeerLookupService;
#[async_trait::async_trait]
impl PeerLookupService for NoopPeerLookupService {
- async fn datanode(&self, _cluster_id: ClusterId, id: DatanodeId) -> Result<Option<Peer>> {
+ async fn datanode(&self, id: DatanodeId) -> Result<Option<Peer>> {
Ok(Some(Peer::empty(id)))
}
- async fn flownode(&self, _cluster_id: ClusterId, id: FlownodeId) -> Result<Option<Peer>> {
+ async fn flownode(&self, id: FlownodeId) -> Result<Option<Peer>> {
Ok(Some(Peer::empty(id)))
}
}
diff --git a/src/datanode/src/heartbeat/handler.rs b/src/datanode/src/heartbeat/handler.rs
index b5c99e57eec1..34b568550dcb 100644
--- a/src/datanode/src/heartbeat/handler.rs
+++ b/src/datanode/src/heartbeat/handler.rs
@@ -235,7 +235,6 @@ mod tests {
Instruction::CloseRegion(RegionIdent {
table_id: region_id.table_id(),
region_number: region_id.region_number(),
- cluster_id: 1,
datanode_id: 2,
engine: MITO_ENGINE_NAME.to_string(),
})
@@ -246,7 +245,6 @@ mod tests {
RegionIdent {
table_id: region_id.table_id(),
region_number: region_id.region_number(),
- cluster_id: 1,
datanode_id: 2,
engine: MITO_ENGINE_NAME.to_string(),
},
diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs
index 47557752f988..8fe859ff06bc 100644
--- a/src/flow/src/adapter.rs
+++ b/src/flow/src/adapter.rs
@@ -103,7 +103,6 @@ impl Default for FlowConfig {
#[serde(default)]
pub struct FlownodeOptions {
pub mode: Mode,
- pub cluster_id: Option<u64>,
pub node_id: Option<u64>,
pub flow: FlowConfig,
pub grpc: GrpcOptions,
@@ -118,7 +117,6 @@ impl Default for FlownodeOptions {
fn default() -> Self {
Self {
mode: servers::Mode::Standalone,
- cluster_id: None,
node_id: None,
flow: FlowConfig::default(),
grpc: GrpcOptions::default().with_bind_addr("127.0.0.1:3004"),
diff --git a/src/meta-client/examples/meta_client.rs b/src/meta-client/examples/meta_client.rs
index 1e4043b5d68e..e365eba2202c 100644
--- a/src/meta-client/examples/meta_client.rs
+++ b/src/meta-client/examples/meta_client.rs
@@ -31,13 +31,13 @@ fn main() {
#[tokio::main]
async fn run() {
- let id = (1000u64, 2000u64);
+ let id = 2000u64;
let config = ChannelConfig::new()
.timeout(Duration::from_secs(3))
.connect_timeout(Duration::from_secs(5))
.tcp_nodelay(true);
let channel_manager = ChannelManager::with_config(config);
- let mut meta_client = MetaClientBuilder::datanode_default_options(id.0, id.1)
+ let mut meta_client = MetaClientBuilder::datanode_default_options(id)
.channel_manager(channel_manager)
.build();
meta_client.start(&["127.0.0.1:3002"]).await.unwrap();
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index ee7aebba527a..38e2ee2800c5 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -47,7 +47,6 @@ use common_meta::rpc::store::{
DeleteRangeResponse, PutRequest, PutResponse, RangeRequest, RangeResponse,
};
use common_meta::rpc::KeyValue;
-use common_meta::ClusterId;
use common_telemetry::info;
use futures::TryStreamExt;
use heartbeat::Client as HeartbeatClient;
@@ -61,7 +60,7 @@ use crate::error::{
Result,
};
-pub type Id = (u64, u64);
+pub type Id = u64;
const DEFAULT_ASK_LEADER_MAX_RETRY: usize = 3;
const DEFAULT_SUBMIT_DDL_MAX_RETRY: usize = 3;
@@ -81,18 +80,18 @@ pub struct MetaClientBuilder {
}
impl MetaClientBuilder {
- pub fn new(cluster_id: ClusterId, member_id: u64, role: Role) -> Self {
+ pub fn new(member_id: u64, role: Role) -> Self {
Self {
- id: (cluster_id, member_id),
+ id: member_id,
role,
..Default::default()
}
}
/// Returns the role of Frontend's default options.
- pub fn frontend_default_options(cluster_id: ClusterId) -> Self {
+ pub fn frontend_default_options() -> Self {
// Frontend does not need a member id.
- Self::new(cluster_id, 0, Role::Frontend)
+ Self::new(0, Role::Frontend)
.enable_store()
.enable_heartbeat()
.enable_procedure()
@@ -100,15 +99,15 @@ impl MetaClientBuilder {
}
/// Returns the role of Datanode's default options.
- pub fn datanode_default_options(cluster_id: ClusterId, member_id: u64) -> Self {
- Self::new(cluster_id, member_id, Role::Datanode)
+ pub fn datanode_default_options(member_id: u64) -> Self {
+ Self::new(member_id, Role::Datanode)
.enable_store()
.enable_heartbeat()
}
/// Returns the role of Flownode's default options.
- pub fn flownode_default_options(cluster_id: ClusterId, member_id: u64) -> Self {
- Self::new(cluster_id, member_id, Role::Flownode)
+ pub fn flownode_default_options(member_id: u64) -> Self {
+ Self::new(member_id, Role::Flownode)
.enable_store()
.enable_heartbeat()
.enable_procedure()
@@ -273,15 +272,9 @@ impl ClusterInfo for MetaClient {
let cluster_client = self.cluster_client()?;
let (get_metasrv_nodes, nodes_key_prefix) = match role {
- None => (
- true,
- Some(NodeInfoKey::key_prefix_with_cluster_id(self.id.0)),
- ),
+ None => (true, Some(NodeInfoKey::key_prefix())),
Some(ClusterRole::Metasrv) => (true, None),
- Some(role) => (
- false,
- Some(NodeInfoKey::key_prefix_with_role(self.id.0, role)),
- ),
+ Some(role) => (false, Some(NodeInfoKey::key_prefix_with_role(role))),
};
let mut nodes = if get_metasrv_nodes {
@@ -324,7 +317,7 @@ impl ClusterInfo for MetaClient {
async fn list_region_stats(&self) -> Result<Vec<RegionStat>> {
let cluster_kv_backend = Arc::new(self.cluster_client()?);
- let range_prefix = DatanodeStatKey::key_prefix_with_cluster_id(self.id.0);
+ let range_prefix = DatanodeStatKey::prefix_key();
let req = RangeRequest::new().with_prefix(range_prefix);
let stream =
PaginationStream::new(cluster_kv_backend, req, 256, decode_stats).into_stream();
@@ -555,6 +548,8 @@ impl MetaClient {
#[cfg(test)]
mod tests {
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
use api::v1::meta::{HeartbeatRequest, Peer};
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackendRef};
use rand::Rng;
@@ -624,31 +619,31 @@ mod tests {
async fn test_meta_client_builder() {
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
- let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
+ let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
.enable_heartbeat()
.build();
let _ = meta_client.heartbeat_client().unwrap();
assert!(meta_client.store_client().is_err());
meta_client.start(urls).await.unwrap();
- let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode).build();
+ let mut meta_client = MetaClientBuilder::new(0, Role::Datanode).build();
assert!(meta_client.heartbeat_client().is_err());
assert!(meta_client.store_client().is_err());
meta_client.start(urls).await.unwrap();
- let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
+ let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
.enable_store()
.build();
assert!(meta_client.heartbeat_client().is_err());
let _ = meta_client.store_client().unwrap();
meta_client.start(urls).await.unwrap();
- let mut meta_client = MetaClientBuilder::new(1, 2, Role::Datanode)
+ let mut meta_client = MetaClientBuilder::new(2, Role::Datanode)
.enable_heartbeat()
.enable_store()
.build();
- assert_eq!(1, meta_client.id().0);
- assert_eq!(2, meta_client.id().1);
+ assert_eq!(2, meta_client.id());
+ assert_eq!(2, meta_client.id());
let _ = meta_client.heartbeat_client().unwrap();
let _ = meta_client.store_client().unwrap();
meta_client.start(urls).await.unwrap();
@@ -657,7 +652,7 @@ mod tests {
#[tokio::test]
async fn test_not_start_heartbeat_client() {
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
- let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
+ let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
.enable_store()
.build();
meta_client.start(urls).await.unwrap();
@@ -668,7 +663,7 @@ mod tests {
#[tokio::test]
async fn test_not_start_store_client() {
let urls = &["127.0.0.1:3001", "127.0.0.1:3002"];
- let mut meta_client = MetaClientBuilder::new(0, 0, Role::Datanode)
+ let mut meta_client = MetaClientBuilder::new(0, Role::Datanode)
.enable_heartbeat()
.build();
@@ -688,6 +683,9 @@ mod tests {
let tc = new_client("test_heartbeat").await;
let (sender, mut receiver) = tc.client.heartbeat().await.unwrap();
// send heartbeats
+
+ let request_sent = Arc::new(AtomicUsize::new(0));
+ let request_sent_clone = request_sent.clone();
let _handle = tokio::spawn(async move {
for _ in 0..5 {
let req = HeartbeatRequest {
@@ -698,14 +696,24 @@ mod tests {
..Default::default()
};
sender.send(req).await.unwrap();
+ request_sent_clone.fetch_add(1, Ordering::Relaxed);
}
});
- let _handle = tokio::spawn(async move {
- while let Some(res) = receiver.message().await.unwrap() {
- assert_eq!(1000, res.header.unwrap().cluster_id);
+ let heartbeat_count = Arc::new(AtomicUsize::new(0));
+ let heartbeat_count_clone = heartbeat_count.clone();
+ let handle = tokio::spawn(async move {
+ while let Some(_resp) = receiver.message().await.unwrap() {
+ heartbeat_count_clone.fetch_add(1, Ordering::Relaxed);
}
});
+
+ handle.await.unwrap();
+ //+1 for the initial response
+ assert_eq!(
+ request_sent.load(Ordering::Relaxed) + 1,
+ heartbeat_count.load(Ordering::Relaxed)
+ );
}
#[tokio::test]
diff --git a/src/meta-client/src/client/heartbeat.rs b/src/meta-client/src/client/heartbeat.rs
index 81d75977501b..fd5a6da46f0b 100644
--- a/src/meta-client/src/client/heartbeat.rs
+++ b/src/meta-client/src/client/heartbeat.rs
@@ -272,7 +272,7 @@ mod test {
#[tokio::test]
async fn test_already_start() {
- let mut client = Client::new((0, 0), Role::Datanode, ChannelManager::default(), 3);
+ let mut client = Client::new(0, Role::Datanode, ChannelManager::default(), 3);
client
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
.await
@@ -288,7 +288,7 @@ mod test {
#[tokio::test]
async fn test_heartbeat_stream() {
let (sender, mut receiver) = mpsc::channel::<HeartbeatRequest>(100);
- let sender = HeartbeatSender::new((8, 8), Role::Datanode, sender);
+ let sender = HeartbeatSender::new(8, Role::Datanode, sender);
let _handle = tokio::spawn(async move {
for _ in 0..10 {
sender.send(HeartbeatRequest::default()).await.unwrap();
@@ -296,7 +296,6 @@ mod test {
});
while let Some(req) = receiver.recv().await {
let header = req.header.unwrap();
- assert_eq!(8, header.cluster_id);
assert_eq!(8, header.member_id);
}
}
diff --git a/src/meta-client/src/client/store.rs b/src/meta-client/src/client/store.rs
index 4f0fea7e0f23..302692055522 100644
--- a/src/meta-client/src/client/store.rs
+++ b/src/meta-client/src/client/store.rs
@@ -255,7 +255,7 @@ mod test {
#[tokio::test]
async fn test_already_start() {
- let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
+ let mut client = Client::new(0, Role::Frontend, ChannelManager::default());
client
.start(&["127.0.0.1:1000", "127.0.0.1:1001"])
.await
@@ -270,7 +270,7 @@ mod test {
#[tokio::test]
async fn test_start_with_duplicate_peers() {
- let mut client = Client::new((0, 0), Role::Frontend, ChannelManager::default());
+ let mut client = Client::new(0, Role::Frontend, ChannelManager::default());
client
.start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"])
.await
diff --git a/src/meta-client/src/lib.rs b/src/meta-client/src/lib.rs
index 0a19539977e2..87eab997bc70 100644
--- a/src/meta-client/src/lib.rs
+++ b/src/meta-client/src/lib.rs
@@ -71,23 +71,22 @@ pub enum MetaClientType {
pub type MetaClientRef = Arc<client::MetaClient>;
pub async fn create_meta_client(
- cluster_id: u64,
client_type: MetaClientType,
meta_client_options: &MetaClientOptions,
) -> error::Result<MetaClientRef> {
info!(
- "Creating {:?} instance from cluster {} with Metasrv addrs {:?}",
- client_type, cluster_id, meta_client_options.metasrv_addrs
+ "Creating {:?} instance with Metasrv addrs {:?}",
+ client_type, meta_client_options.metasrv_addrs
);
let mut builder = match client_type {
MetaClientType::Datanode { member_id } => {
- MetaClientBuilder::datanode_default_options(cluster_id, member_id)
+ MetaClientBuilder::datanode_default_options(member_id)
}
MetaClientType::Flownode { member_id } => {
- MetaClientBuilder::flownode_default_options(cluster_id, member_id)
+ MetaClientBuilder::flownode_default_options(member_id)
}
- MetaClientType::Frontend => MetaClientBuilder::frontend_default_options(cluster_id),
+ MetaClientType::Frontend => MetaClientBuilder::frontend_default_options(),
};
let base_config = ChannelConfig::new()
diff --git a/src/meta-client/src/mocks.rs b/src/meta-client/src/mocks.rs
index 2643c44927ac..294ac16ef94a 100644
--- a/src/meta-client/src/mocks.rs
+++ b/src/meta-client/src/mocks.rs
@@ -60,8 +60,8 @@ pub async fn mock_client_with_etcdstore(addr: &str) -> (MetaClient, MockMetaCont
}
pub async fn mock_client_by(server_addr: String, channel_manager: ChannelManager) -> MetaClient {
- let id = (1000u64, 2000u64);
- let mut meta_client = MetaClientBuilder::datanode_default_options(id.0, id.1)
+ let id = 2000u64;
+ let mut meta_client = MetaClientBuilder::datanode_default_options(id)
.enable_access_cluster_info()
.channel_manager(channel_manager)
.build();
diff --git a/src/meta-srv/src/cluster.rs b/src/meta-srv/src/cluster.rs
index 9a6cecbd36f7..23d8cd05dcae 100644
--- a/src/meta-srv/src/cluster.rs
+++ b/src/meta-srv/src/cluster.rs
@@ -375,13 +375,9 @@ mod tests {
#[test]
fn test_to_stat_kv_map() {
- let stat_key = DatanodeStatKey {
- cluster_id: 0,
- node_id: 100,
- };
+ let stat_key = DatanodeStatKey { node_id: 100 };
let stat = Stat {
- cluster_id: 0,
id: 100,
addr: "127.0.0.1:3001".to_string(),
..Default::default()
@@ -400,7 +396,6 @@ mod tests {
let stat_val = kv_map.get(&stat_key).unwrap();
let stat = stat_val.stats.first().unwrap();
- assert_eq!(0, stat.cluster_id);
assert_eq!(100, stat.id);
assert_eq!("127.0.0.1:3001", stat.addr);
}
diff --git a/src/meta-srv/src/flow_meta_alloc.rs b/src/meta-srv/src/flow_meta_alloc.rs
index 1fac6efab11a..bdfac158aa32 100644
--- a/src/meta-srv/src/flow_meta_alloc.rs
+++ b/src/meta-srv/src/flow_meta_alloc.rs
@@ -15,7 +15,6 @@
use common_error::ext::BoxedError;
use common_meta::ddl::flow_meta::PartitionPeerAllocator;
use common_meta::peer::Peer;
-use common_meta::ClusterId;
use snafu::ResultExt;
use crate::metasrv::{SelectorContext, SelectorRef};
@@ -34,14 +33,9 @@ impl FlowPeerAllocator {
#[async_trait::async_trait]
impl PartitionPeerAllocator for FlowPeerAllocator {
- async fn alloc(
- &self,
- cluster_id: ClusterId,
- partitions: usize,
- ) -> common_meta::error::Result<Vec<Peer>> {
+ async fn alloc(&self, partitions: usize) -> common_meta::error::Result<Vec<Peer>> {
self.selector
.select(
- cluster_id,
&self.ctx,
SelectorOptions {
min_required_items: partitions,
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index 4eb9fef91d29..6d06f328c1e5 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -20,8 +20,8 @@ use std::time::{Duration, Instant};
use api::v1::meta::mailbox_message::Payload;
use api::v1::meta::{
- HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, RequestHeader,
- ResponseHeader, Role, PROTOCOL_VERSION,
+ HeartbeatRequest, HeartbeatResponse, MailboxMessage, RegionLease, ResponseHeader, Role,
+ PROTOCOL_VERSION,
};
use check_leader_handler::CheckLeaderHandler;
use collect_cluster_info_handler::{
@@ -153,13 +153,9 @@ pub struct Pusher {
}
impl Pusher {
- pub fn new(
- sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
- req_header: &RequestHeader,
- ) -> Self {
+ pub fn new(sender: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>) -> Self {
let res_header = ResponseHeader {
protocol_version: PROTOCOL_VERSION,
- cluster_id: req_header.cluster_id,
..Default::default()
};
@@ -772,7 +768,7 @@ mod tests {
use std::sync::Arc;
use std::time::Duration;
- use api::v1::meta::{MailboxMessage, RequestHeader, Role, PROTOCOL_VERSION};
+ use api::v1::meta::{MailboxMessage, Role};
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::sequence::SequenceBuilder;
use tokio::sync::mpsc;
@@ -814,12 +810,8 @@ mod tests {
async fn push_msg_via_mailbox() -> (MailboxRef, MailboxReceiver) {
let datanode_id = 12;
let (pusher_tx, mut pusher_rx) = mpsc::channel(16);
- let res_header = RequestHeader {
- protocol_version: PROTOCOL_VERSION,
- ..Default::default()
- };
let pusher_id = PusherId::new(Role::Datanode, datanode_id);
- let pusher: Pusher = Pusher::new(pusher_tx, &res_header);
+ let pusher: Pusher = Pusher::new(pusher_tx);
let handler_group = HeartbeatHandlerGroup::default();
handler_group.register_pusher(pusher_id, pusher).await;
diff --git a/src/meta-srv/src/handler/collect_stats_handler.rs b/src/meta-srv/src/handler/collect_stats_handler.rs
index 7b57ab9e5543..20f803cb1e2b 100644
--- a/src/meta-srv/src/handler/collect_stats_handler.rs
+++ b/src/meta-srv/src/handler/collect_stats_handler.rs
@@ -262,15 +262,11 @@ mod tests {
let handler = CollectStatsHandler::default();
handle_request_many_times(ctx.clone(), &handler, 1).await;
- let key = DatanodeStatKey {
- cluster_id: 3,
- node_id: 101,
- };
+ let key = DatanodeStatKey { node_id: 101 };
let key: Vec<u8> = key.into();
let res = ctx.in_memory.get(&key).await.unwrap();
let kv = res.unwrap();
let key: DatanodeStatKey = kv.key.clone().try_into().unwrap();
- assert_eq!(3, key.cluster_id);
assert_eq!(101, key.node_id);
let val: DatanodeStatValue = kv.value.try_into().unwrap();
// first new stat must be set in kv store immediately
@@ -295,7 +291,6 @@ mod tests {
for i in 1..=loop_times {
let mut acc = HeartbeatAccumulator {
stat: Some(Stat {
- cluster_id: 3,
id: 101,
region_num: i as _,
..Default::default()
diff --git a/src/meta-srv/src/handler/failure_handler.rs b/src/meta-srv/src/handler/failure_handler.rs
index ae38f887f4aa..203cf4af333c 100644
--- a/src/meta-srv/src/handler/failure_handler.rs
+++ b/src/meta-srv/src/handler/failure_handler.rs
@@ -101,7 +101,6 @@ mod tests {
}
}
acc.stat = Some(Stat {
- cluster_id: 1,
id: 42,
region_stats: vec![new_region_stat(1), new_region_stat(2), new_region_stat(3)],
timestamp_millis: 1000,
diff --git a/src/meta-srv/src/handler/keep_lease_handler.rs b/src/meta-srv/src/handler/keep_lease_handler.rs
index 76669cd76b2b..553963aaa81d 100644
--- a/src/meta-srv/src/handler/keep_lease_handler.rs
+++ b/src/meta-srv/src/handler/keep_lease_handler.rs
@@ -38,17 +38,14 @@ impl HeartbeatHandler for DatanodeKeepLeaseHandler {
_acc: &mut HeartbeatAccumulator,
) -> Result<HandleControl> {
let HeartbeatRequest { header, peer, .. } = req;
- let Some(header) = &header else {
+ let Some(_header) = &header else {
return Ok(HandleControl::Continue);
};
let Some(peer) = &peer else {
return Ok(HandleControl::Continue);
};
- let key = DatanodeLeaseKey {
- cluster_id: header.cluster_id,
- node_id: peer.id,
- };
+ let key = DatanodeLeaseKey { node_id: peer.id };
let value = LeaseValue {
timestamp_millis: time_util::current_time_millis(),
node_addr: peer.addr.clone(),
@@ -80,17 +77,14 @@ impl HeartbeatHandler for FlownodeKeepLeaseHandler {
_acc: &mut HeartbeatAccumulator,
) -> Result<HandleControl> {
let HeartbeatRequest { header, peer, .. } = req;
- let Some(header) = &header else {
+ let Some(_header) = &header else {
return Ok(HandleControl::Continue);
};
let Some(peer) = &peer else {
return Ok(HandleControl::Continue);
};
- let key = FlownodeLeaseKey {
- cluster_id: header.cluster_id,
- node_id: peer.id,
- };
+ let key = FlownodeLeaseKey { node_id: peer.id };
let value = LeaseValue {
timestamp_millis: time_util::current_time_millis(),
node_addr: peer.addr.clone(),
diff --git a/src/meta-srv/src/handler/region_lease_handler.rs b/src/meta-srv/src/handler/region_lease_handler.rs
index 98a74f67bba7..64ec1f01e4d1 100644
--- a/src/meta-srv/src/handler/region_lease_handler.rs
+++ b/src/meta-srv/src/handler/region_lease_handler.rs
@@ -64,7 +64,6 @@ impl HeartbeatHandler for RegionLeaseHandler {
};
let regions = stat.regions();
- let cluster_id = stat.cluster_id;
let datanode_id = stat.id;
let RenewRegionLeasesResponse {
@@ -72,7 +71,7 @@ impl HeartbeatHandler for RegionLeaseHandler {
renewed,
} = self
.region_lease_keeper
- .renew_region_leases(cluster_id, datanode_id, ®ions)
+ .renew_region_leases(datanode_id, ®ions)
.await?;
let renewed = renewed
@@ -153,7 +152,6 @@ mod test {
let peer = Peer::empty(datanode_id);
let follower_peer = Peer::empty(datanode_id + 1);
let table_info = new_test_table_info(table_id, vec![region_number]).into();
- let cluster_id = 1;
let region_routes = vec![RegionRoute {
region: Region::new_test(region_id),
@@ -181,7 +179,6 @@ mod test {
let acc = &mut HeartbeatAccumulator::default();
acc.stat = Some(Stat {
- cluster_id,
id: peer.id,
region_stats: vec![
new_empty_region_stat(region_id, RegionRole::Follower),
@@ -215,7 +212,6 @@ mod test {
let acc = &mut HeartbeatAccumulator::default();
acc.stat = Some(Stat {
- cluster_id,
id: follower_peer.id,
region_stats: vec![
new_empty_region_stat(region_id, RegionRole::Follower),
@@ -249,7 +245,6 @@ mod test {
let acc = &mut HeartbeatAccumulator::default();
acc.stat = Some(Stat {
- cluster_id,
id: follower_peer.id,
region_stats: vec![
new_empty_region_stat(region_id, RegionRole::Follower),
@@ -292,7 +287,6 @@ mod test {
let peer = Peer::empty(datanode_id);
let follower_peer = Peer::empty(datanode_id + 1);
let table_info = new_test_table_info(table_id, vec![region_number]).into();
- let cluster_id = 1;
let region_routes = vec![
RegionRoute {
@@ -333,7 +327,6 @@ mod test {
let acc = &mut HeartbeatAccumulator::default();
acc.stat = Some(Stat {
- cluster_id,
id: peer.id,
region_stats: vec![
new_empty_region_stat(region_id, RegionRole::Leader),
diff --git a/src/meta-srv/src/handler/response_header_handler.rs b/src/meta-srv/src/handler/response_header_handler.rs
index baa7e7ee291d..1cd6201598ee 100644
--- a/src/meta-srv/src/handler/response_header_handler.rs
+++ b/src/meta-srv/src/handler/response_header_handler.rs
@@ -28,18 +28,15 @@ impl HeartbeatHandler for ResponseHeaderHandler {
async fn handle(
&self,
- req: &HeartbeatRequest,
+ _req: &HeartbeatRequest,
_ctx: &mut Context,
acc: &mut HeartbeatAccumulator,
) -> Result<HandleControl> {
- let HeartbeatRequest { header, .. } = req;
let res_header = ResponseHeader {
protocol_version: PROTOCOL_VERSION,
- cluster_id: header.as_ref().map_or(0, |h| h.cluster_id),
..Default::default()
};
acc.header = Some(res_header);
-
Ok(HandleControl::Continue)
}
}
@@ -48,7 +45,7 @@ impl HeartbeatHandler for ResponseHeaderHandler {
mod tests {
use std::sync::Arc;
- use api::v1::meta::{HeartbeatResponse, RequestHeader};
+ use api::v1::meta::RequestHeader;
use common_meta::cache_invalidator::DummyCacheInvalidator;
use common_meta::key::TableMetadataManager;
use common_meta::kv_backend::memory::MemoryKvBackend;
@@ -90,7 +87,7 @@ mod tests {
};
let req = HeartbeatRequest {
- header: Some(RequestHeader::new((1, 2), Role::Datanode, W3cTrace::new())),
+ header: Some(RequestHeader::new(2, Role::Datanode, W3cTrace::new())),
..Default::default()
};
let mut acc = HeartbeatAccumulator::default();
@@ -100,12 +97,5 @@ mod tests {
.handle(&req, &mut ctx, &mut acc)
.await
.unwrap();
- let header = std::mem::take(&mut acc.header);
- let res = HeartbeatResponse {
- header,
- mailbox_message: acc.into_mailbox_message(),
- ..Default::default()
- };
- assert_eq!(1, res.header.unwrap().cluster_id);
}
}
diff --git a/src/meta-srv/src/key.rs b/src/meta-srv/src/key.rs
index 243efe69c870..aabebb8bbc02 100644
--- a/src/meta-srv/src/key.rs
+++ b/src/meta-srv/src/key.rs
@@ -35,20 +35,12 @@ macro_rules! impl_from_str_lease_key {
.context(error::InvalidLeaseKeySnafu { key })?;
ensure!(caps.len() == 3, error::InvalidLeaseKeySnafu { key });
-
- let cluster_id = caps[1].to_string();
let node_id = caps[2].to_string();
- let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
- err_msg: format!("invalid cluster_id: {cluster_id}"),
- })?;
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid node_id: {node_id}"),
})?;
- Ok(Self {
- cluster_id,
- node_id,
- })
+ Ok(Self { node_id })
}
}
};
@@ -73,7 +65,7 @@ macro_rules! impl_try_from_lease_key {
type Error = error::Error;
fn try_from(key: $key_type) -> error::Result<Self> {
- Ok(format!("{}-{}-{}", $prefix, key.cluster_id, key.node_id).into_bytes())
+ Ok(format!("{}-0-{}", $prefix, key.node_id).into_bytes())
}
}
};
diff --git a/src/meta-srv/src/key/datanode.rs b/src/meta-srv/src/key/datanode.rs
index 1c4583c2332a..ef4c89c34d73 100644
--- a/src/meta-srv/src/key/datanode.rs
+++ b/src/meta-srv/src/key/datanode.rs
@@ -15,7 +15,6 @@
use std::str::FromStr;
use common_meta::datanode::DatanodeStatKey;
-use common_meta::ClusterId;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
@@ -42,20 +41,18 @@ lazy_static! {
#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct DatanodeLeaseKey {
- pub cluster_id: ClusterId,
pub node_id: u64,
}
impl DatanodeLeaseKey {
- pub fn prefix_key_by_cluster(cluster_id: ClusterId) -> Vec<u8> {
- format!("{DATANODE_LEASE_PREFIX}-{cluster_id}-").into_bytes()
+ pub fn prefix_key() -> Vec<u8> {
+ format!("{DATANODE_LEASE_PREFIX}-0-").into_bytes()
}
}
impl From<&DatanodeLeaseKey> for DatanodeStatKey {
fn from(lease_key: &DatanodeLeaseKey) -> Self {
DatanodeStatKey {
- cluster_id: lease_key.cluster_id,
node_id: lease_key.node_id,
}
}
@@ -63,22 +60,21 @@ impl From<&DatanodeLeaseKey> for DatanodeStatKey {
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct InactiveRegionKey {
- pub cluster_id: ClusterId,
pub node_id: u64,
pub region_id: u64,
}
impl InactiveRegionKey {
- pub fn get_prefix_by_cluster(cluster_id: u64) -> Vec<u8> {
- format!("{}-{}-", INACTIVE_REGION_PREFIX, cluster_id).into_bytes()
+ pub fn get_prefix_by_cluster() -> Vec<u8> {
+ format!("{}-0-", INACTIVE_REGION_PREFIX).into_bytes()
}
}
impl From<InactiveRegionKey> for Vec<u8> {
fn from(value: InactiveRegionKey) -> Self {
format!(
- "{}-{}-{}-{}",
- INACTIVE_REGION_PREFIX, value.cluster_id, value.node_id, value.region_id
+ "{}-0-{}-{}",
+ INACTIVE_REGION_PREFIX, value.node_id, value.region_id
)
.into_bytes()
}
@@ -97,13 +93,8 @@ impl FromStr for InactiveRegionKey {
error::InvalidInactiveRegionKeySnafu { key }
);
- let cluster_id = caps[1].to_string();
let node_id = caps[2].to_string();
let region_id = caps[3].to_string();
-
- let cluster_id: u64 = cluster_id.parse().context(error::ParseNumSnafu {
- err_msg: format!("invalid cluster_id: {cluster_id}"),
- })?;
let node_id: u64 = node_id.parse().context(error::ParseNumSnafu {
err_msg: format!("invalid node_id: {node_id}"),
})?;
@@ -111,11 +102,7 @@ impl FromStr for InactiveRegionKey {
err_msg: format!("invalid region_id: {region_id}"),
})?;
- Ok(Self {
- cluster_id,
- node_id,
- region_id,
- })
+ Ok(Self { node_id, region_id })
}
}
@@ -135,24 +122,17 @@ mod tests {
#[test]
fn test_stat_key_round_trip() {
- let key = DatanodeStatKey {
- cluster_id: 0,
- node_id: 1,
- };
+ let key = DatanodeStatKey { node_id: 1 };
let key_bytes: Vec<u8> = key.into();
let new_key: DatanodeStatKey = key_bytes.try_into().unwrap();
- assert_eq!(0, new_key.cluster_id);
assert_eq!(1, new_key.node_id);
}
#[test]
fn test_lease_key_round_trip() {
- let key = DatanodeLeaseKey {
- cluster_id: 0,
- node_id: 1,
- };
+ let key = DatanodeLeaseKey { node_id: 1 };
let key_bytes: Vec<u8> = key.clone().try_into().unwrap();
let new_key: DatanodeLeaseKey = key_bytes.try_into().unwrap();
@@ -162,21 +142,16 @@ mod tests {
#[test]
fn test_lease_key_to_stat_key() {
- let lease_key = DatanodeLeaseKey {
- cluster_id: 1,
- node_id: 101,
- };
+ let lease_key = DatanodeLeaseKey { node_id: 101 };
let stat_key: DatanodeStatKey = (&lease_key).into();
- assert_eq!(1, stat_key.cluster_id);
assert_eq!(101, stat_key.node_id);
}
#[test]
fn test_inactive_region_key_round_trip() {
let key = InactiveRegionKey {
- cluster_id: 0,
node_id: 1,
region_id: 2,
};
diff --git a/src/meta-srv/src/key/flownode.rs b/src/meta-srv/src/key/flownode.rs
index acb36cbf7582..0255c36df5ae 100644
--- a/src/meta-srv/src/key/flownode.rs
+++ b/src/meta-srv/src/key/flownode.rs
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use common_meta::ClusterId;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
@@ -26,13 +25,12 @@ lazy_static! {
#[derive(Debug, Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct FlownodeLeaseKey {
- pub cluster_id: ClusterId,
pub node_id: u64,
}
impl FlownodeLeaseKey {
- pub fn prefix_key_by_cluster(cluster_id: ClusterId) -> Vec<u8> {
- format!("{FLOWNODE_LEASE_PREFIX}-{cluster_id}-").into_bytes()
+ pub fn prefix_key_by_cluster() -> Vec<u8> {
+ format!("{FLOWNODE_LEASE_PREFIX}-0-").into_bytes()
}
}
@@ -42,10 +40,7 @@ mod tests {
#[test]
fn test_lease_key_round_trip() {
- let key = FlownodeLeaseKey {
- cluster_id: 0,
- node_id: 1,
- };
+ let key = FlownodeLeaseKey { node_id: 1 };
let key_bytes: Vec<u8> = key.clone().try_into().unwrap();
let new_key: FlownodeLeaseKey = key_bytes.try_into().unwrap();
diff --git a/src/meta-srv/src/lease.rs b/src/meta-srv/src/lease.rs
index ef28c2ed7431..063c5233c721 100644
--- a/src/meta-srv/src/lease.rs
+++ b/src/meta-srv/src/lease.rs
@@ -18,7 +18,7 @@ use std::hash::Hash;
use common_error::ext::BoxedError;
use common_meta::kv_backend::KvBackend;
use common_meta::peer::{Peer, PeerLookupService};
-use common_meta::{util, ClusterId, DatanodeId, FlownodeId};
+use common_meta::{util, DatanodeId, FlownodeId};
use common_time::util as time_util;
use snafu::ResultExt;
@@ -35,14 +35,12 @@ fn build_lease_filter(lease_secs: u64) -> impl Fn(&LeaseValue) -> bool {
/// look up [`Peer`] given [`ClusterId`] and [`DatanodeId`], will only return if it's alive under given `lease_secs`
pub async fn lookup_datanode_peer(
- cluster_id: ClusterId,
datanode_id: DatanodeId,
meta_peer_client: &MetaPeerClientRef,
lease_secs: u64,
) -> Result<Option<Peer>> {
let lease_filter = build_lease_filter(lease_secs);
let lease_key = DatanodeLeaseKey {
- cluster_id,
node_id: datanode_id,
};
let lease_key_bytes: Vec<u8> = lease_key.clone().try_into()?;
@@ -63,29 +61,24 @@ pub async fn lookup_datanode_peer(
/// Find all alive datanodes
pub async fn alive_datanodes(
- cluster_id: ClusterId,
meta_peer_client: &MetaPeerClientRef,
lease_secs: u64,
) -> Result<HashMap<DatanodeLeaseKey, LeaseValue>> {
let predicate = build_lease_filter(lease_secs);
- filter(
- DatanodeLeaseKey::prefix_key_by_cluster(cluster_id),
- meta_peer_client,
- |v| predicate(v),
- )
+ filter(DatanodeLeaseKey::prefix_key(), meta_peer_client, |v| {
+ predicate(v)
+ })
.await
}
/// look up [`Peer`] given [`ClusterId`] and [`DatanodeId`], only return if it's alive under given `lease_secs`
pub async fn lookup_flownode_peer(
- cluster_id: ClusterId,
flownode_id: FlownodeId,
meta_peer_client: &MetaPeerClientRef,
lease_secs: u64,
) -> Result<Option<Peer>> {
let lease_filter = build_lease_filter(lease_secs);
let lease_key = FlownodeLeaseKey {
- cluster_id,
node_id: flownode_id,
};
let lease_key_bytes: Vec<u8> = lease_key.clone().try_into()?;
@@ -107,13 +100,12 @@ pub async fn lookup_flownode_peer(
/// Find all alive flownodes
pub async fn alive_flownodes(
- cluster_id: ClusterId,
meta_peer_client: &MetaPeerClientRef,
lease_secs: u64,
) -> Result<HashMap<FlownodeLeaseKey, LeaseValue>> {
let predicate = build_lease_filter(lease_secs);
filter(
- FlownodeLeaseKey::prefix_key_by_cluster(cluster_id),
+ FlownodeLeaseKey::prefix_key_by_cluster(),
meta_peer_client,
|v| predicate(v),
)
@@ -163,22 +155,14 @@ impl MetaPeerLookupService {
#[async_trait::async_trait]
impl PeerLookupService for MetaPeerLookupService {
- async fn datanode(
- &self,
- cluster_id: ClusterId,
- id: DatanodeId,
- ) -> common_meta::error::Result<Option<Peer>> {
- lookup_datanode_peer(cluster_id, id, &self.meta_peer_client, u64::MAX)
+ async fn datanode(&self, id: DatanodeId) -> common_meta::error::Result<Option<Peer>> {
+ lookup_datanode_peer(id, &self.meta_peer_client, u64::MAX)
.await
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)
}
- async fn flownode(
- &self,
- cluster_id: ClusterId,
- id: FlownodeId,
- ) -> common_meta::error::Result<Option<Peer>> {
- lookup_flownode_peer(cluster_id, id, &self.meta_peer_client, u64::MAX)
+ async fn flownode(&self, id: FlownodeId) -> common_meta::error::Result<Option<Peer>> {
+ lookup_flownode_peer(id, &self.meta_peer_client, u64::MAX)
.await
.map_err(BoxedError::new)
.context(common_meta::error::ExternalSnafu)
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index b8c29d988a85..b15e8ece31cb 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -26,6 +26,7 @@ use common_config::Configurable;
use common_greptimedb_telemetry::GreptimeDBTelemetryTask;
use common_meta::cache_invalidator::CacheInvalidatorRef;
use common_meta::ddl::ProcedureExecutorRef;
+use common_meta::distributed_time_constants;
use common_meta::key::maintenance::MaintenanceModeManagerRef;
use common_meta::key::TableMetadataManagerRef;
use common_meta::kv_backend::{KvBackendRef, ResettableKvBackend, ResettableKvBackendRef};
@@ -36,7 +37,6 @@ use common_meta::node_expiry_listener::NodeExpiryListener;
use common_meta::peer::Peer;
use common_meta::region_keeper::MemoryRegionKeeperRef;
use common_meta::wal_options_allocator::WalOptionsAllocatorRef;
-use common_meta::{distributed_time_constants, ClusterId};
use common_options::datanode::DatanodeClientOptions;
use common_procedure::options::ProcedureConfig;
use common_procedure::ProcedureManagerRef;
@@ -572,13 +572,8 @@ impl Metasrv {
}
/// Lookup a peer by peer_id, return it only when it's alive.
- pub(crate) async fn lookup_peer(
- &self,
- cluster_id: ClusterId,
- peer_id: u64,
- ) -> Result<Option<Peer>> {
+ pub(crate) async fn lookup_peer(&self, peer_id: u64) -> Result<Option<Peer>> {
lookup_datanode_peer(
- cluster_id,
peer_id,
&self.meta_peer_client,
distributed_time_constants::DATANODE_LEASE_SECS,
diff --git a/src/meta-srv/src/metrics.rs b/src/meta-srv/src/metrics.rs
index 7a7bdce9a887..1ed34bcd3b47 100644
--- a/src/meta-srv/src/metrics.rs
+++ b/src/meta-srv/src/metrics.rs
@@ -20,7 +20,7 @@ lazy_static! {
pub static ref METRIC_META_KV_REQUEST_ELAPSED: HistogramVec = register_histogram_vec!(
"greptime_meta_kv_request_elapsed",
"meta kv request",
- &["target", "op", "cluster_id"]
+ &["target", "op"]
)
.unwrap();
/// The heartbeat connection gauge.
diff --git a/src/meta-srv/src/procedure/region_migration.rs b/src/meta-srv/src/procedure/region_migration.rs
index 3b27d33f2270..59f476ce68e0 100644
--- a/src/meta-srv/src/procedure/region_migration.rs
+++ b/src/meta-srv/src/procedure/region_migration.rs
@@ -39,7 +39,6 @@ use common_meta::key::{DeserializedValueWithBytes, TableMetadataManagerRef};
use common_meta::lock_key::{CatalogLock, RegionLock, SchemaLock, TableLock};
use common_meta::peer::Peer;
use common_meta::region_keeper::{MemoryRegionKeeperRef, OperatingRegionGuard};
-use common_meta::ClusterId;
use common_procedure::error::{
Error as ProcedureError, FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu,
};
@@ -70,8 +69,6 @@ pub struct PersistentContext {
catalog: String,
/// The table schema.
schema: String,
- /// The Id of the cluster.
- cluster_id: ClusterId,
/// The [Peer] of migration source.
from_peer: Peer,
/// The [Peer] of migration destination.
@@ -273,12 +270,11 @@ impl Context {
/// The original failure detector was removed once the procedure was triggered.
/// Now, we need to register the failure detector for the failed region again.
pub async fn register_failure_detectors(&self) {
- let cluster_id = self.persistent_ctx.cluster_id;
let datanode_id = self.persistent_ctx.from_peer.id;
let region_id = self.persistent_ctx.region_id;
self.region_failure_detector_controller
- .register_failure_detectors(vec![(cluster_id, datanode_id, region_id)])
+ .register_failure_detectors(vec![(datanode_id, region_id)])
.await;
}
@@ -287,12 +283,11 @@ impl Context {
/// The original failure detectors was removed once the procedure was triggered.
/// However, the `from_peer` may still send the heartbeats contains the failed region.
pub async fn deregister_failure_detectors(&self) {
- let cluster_id = self.persistent_ctx.cluster_id;
let datanode_id = self.persistent_ctx.from_peer.id;
let region_id = self.persistent_ctx.region_id;
self.region_failure_detector_controller
- .deregister_failure_detectors(vec![(cluster_id, datanode_id, region_id)])
+ .deregister_failure_detectors(vec![(datanode_id, region_id)])
.await;
}
@@ -458,7 +453,6 @@ impl RegionMigrationProcedure {
} = serde_json::from_str(json).context(FromJsonSnafu)?;
let guard = tracker.insert_running_procedure(&RegionMigrationProcedureTask {
- cluster_id: persistent_ctx.cluster_id,
region_id: persistent_ctx.region_id,
from_peer: persistent_ctx.from_peer.clone(),
to_peer: persistent_ctx.to_peer.clone(),
@@ -580,7 +574,6 @@ mod tests {
use common_meta::key::test_utils::new_test_table_info;
use common_meta::rpc::router::{Region, RegionRoute};
- use super::migration_end::RegionMigrationEnd;
use super::update_metadata::UpdateMetadata;
use super::*;
use crate::handler::HeartbeatMailbox;
@@ -620,7 +613,7 @@ mod tests {
let procedure = RegionMigrationProcedure::new(persistent_context, context, None);
let serialized = procedure.dump().unwrap();
- let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"timeout":"10s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
+ let expected = r#"{"persistent_ctx":{"catalog":"greptime","schema":"public","from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105,"timeout":"10s"},"state":{"region_migration_state":"RegionMigrationStart"}}"#;
assert_eq!(expected, serialized);
}
@@ -628,7 +621,7 @@ mod tests {
fn test_backward_compatibility() {
let persistent_ctx = test_util::new_persistent_context(1, 2, RegionId::new(1024, 1));
// NOTES: Changes it will break backward compatibility.
- let serialized = r#"{"catalog":"greptime","schema":"public","cluster_id":0,"from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105}"#;
+ let serialized = r#"{"catalog":"greptime","schema":"public","from_peer":{"id":1,"addr":""},"to_peer":{"id":2,"addr":""},"region_id":4398046511105}"#;
let deserialized: PersistentContext = serde_json::from_str(serialized).unwrap();
assert_eq!(persistent_ctx, deserialized);
@@ -640,15 +633,8 @@ mod tests {
#[async_trait::async_trait]
#[typetag::serde]
impl State for MockState {
- async fn next(&mut self, ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
- let pc = &mut ctx.persistent_ctx;
-
- if pc.cluster_id == 2 {
- Ok((Box::new(RegionMigrationEnd), Status::done()))
- } else {
- pc.cluster_id += 1;
- Ok((Box::new(MockState), Status::executing(false)))
- }
+ async fn next(&mut self, _ctx: &mut Context) -> Result<(Box<dyn State>, Status)> {
+ Ok((Box::new(MockState), Status::done()))
}
fn as_any(&self) -> &dyn Any {
@@ -692,7 +678,6 @@ mod tests {
for _ in 1..3 {
status = Some(procedure.execute(&ctx).await.unwrap());
}
- assert_eq!(procedure.context.persistent_ctx.cluster_id, 2);
assert!(status.unwrap().is_done());
}
diff --git a/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs
index 9113607681cc..ca451e62387a 100644
--- a/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/close_downgraded_region.rs
@@ -62,7 +62,6 @@ impl CloseDowngradedRegion {
async fn build_close_region_instruction(&self, ctx: &mut Context) -> Result<Instruction> {
let pc = &ctx.persistent_ctx;
let downgrade_leader_datanode_id = pc.from_peer.id;
- let cluster_id = pc.cluster_id;
let table_id = pc.region_id.table_id();
let region_number = pc.region_id.region_number();
let datanode_table_value = ctx.get_from_peer_datanode_table_value().await?;
@@ -70,7 +69,6 @@ impl CloseDowngradedRegion {
let RegionInfo { engine, .. } = datanode_table_value.region_info.clone();
Ok(Instruction::CloseRegion(RegionIdent {
- cluster_id,
datanode_id: downgrade_leader_datanode_id,
table_id,
region_number,
diff --git a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
index 51b55d2be117..d1dfcd3e05d1 100644
--- a/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/downgrade_leader_region.rs
@@ -294,7 +294,6 @@ mod tests {
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
- cluster_id: 0,
timeout: Duration::from_millis(1000),
}
}
diff --git a/src/meta-srv/src/procedure/region_migration/manager.rs b/src/meta-srv/src/procedure/region_migration/manager.rs
index c61a9a4b6c3f..e2345559d081 100644
--- a/src/meta-srv/src/procedure/region_migration/manager.rs
+++ b/src/meta-srv/src/procedure/region_migration/manager.rs
@@ -22,7 +22,6 @@ use common_meta::key::table_info::TableInfoValue;
use common_meta::key::table_route::TableRouteValue;
use common_meta::peer::Peer;
use common_meta::rpc::router::RegionRoute;
-use common_meta::ClusterId;
use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
use common_telemetry::{error, info};
use snafu::{ensure, OptionExt, ResultExt};
@@ -101,7 +100,6 @@ impl Drop for RegionMigrationProcedureGuard {
#[derive(Debug, Clone)]
pub struct RegionMigrationProcedureTask {
- pub(crate) cluster_id: ClusterId,
pub(crate) region_id: RegionId,
pub(crate) from_peer: Peer,
pub(crate) to_peer: Peer,
@@ -109,15 +107,8 @@ pub struct RegionMigrationProcedureTask {
}
impl RegionMigrationProcedureTask {
- pub fn new(
- cluster_id: ClusterId,
- region_id: RegionId,
- from_peer: Peer,
- to_peer: Peer,
- timeout: Duration,
- ) -> Self {
+ pub fn new(region_id: RegionId, from_peer: Peer, to_peer: Peer, timeout: Duration) -> Self {
Self {
- cluster_id,
region_id,
from_peer,
to_peer,
@@ -130,8 +121,8 @@ impl Display for RegionMigrationProcedureTask {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
- "cluster: {}, region: {}, from_peer: {}, to_peer: {}",
- self.cluster_id, self.region_id, self.from_peer, self.to_peer
+ "region: {}, from_peer: {}, to_peer: {}",
+ self.region_id, self.from_peer, self.to_peer
)
}
}
@@ -331,7 +322,6 @@ impl RegionMigrationManager {
.with_label_values(&["desc", &task.to_peer.id.to_string()])
.inc();
let RegionMigrationProcedureTask {
- cluster_id,
region_id,
from_peer,
to_peer,
@@ -341,7 +331,6 @@ impl RegionMigrationManager {
PersistentContext {
catalog: catalog_name,
schema: schema_name,
- cluster_id,
region_id,
from_peer,
to_peer,
@@ -394,7 +383,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
- cluster_id: 1,
region_id,
from_peer: Peer::empty(2),
to_peer: Peer::empty(1),
@@ -419,7 +407,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
- cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(1),
@@ -437,7 +424,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
- cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
@@ -455,7 +441,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
- cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
@@ -483,7 +468,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
- cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
@@ -515,7 +499,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
- cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
@@ -542,7 +525,6 @@ mod test {
let manager = RegionMigrationManager::new(env.procedure_manager().clone(), context_factory);
let region_id = RegionId::new(1024, 1);
let task = RegionMigrationProcedureTask {
- cluster_id: 1,
region_id,
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
diff --git a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
index 454c0bf9c028..679dfd1355f5 100644
--- a/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/open_candidate_region.rs
@@ -62,7 +62,6 @@ impl OpenCandidateRegion {
/// - Datanode Table is not found.
async fn build_open_region_instruction(&self, ctx: &mut Context) -> Result<Instruction> {
let pc = &ctx.persistent_ctx;
- let cluster_id = pc.cluster_id;
let table_id = pc.region_id.table_id();
let region_number = pc.region_id.region_number();
let candidate_id = pc.to_peer.id;
@@ -77,7 +76,6 @@ impl OpenCandidateRegion {
let open_instruction = Instruction::OpenRegion(OpenRegion::new(
RegionIdent {
- cluster_id,
datanode_id: candidate_id,
table_id,
region_number,
@@ -214,7 +212,6 @@ mod tests {
fn new_mock_open_instruction(datanode_id: DatanodeId, region_id: RegionId) -> Instruction {
Instruction::OpenRegion(OpenRegion {
region_ident: RegionIdent {
- cluster_id: 0,
datanode_id,
table_id: region_id.table_id(),
region_number: region_id.region_number(),
diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs
index 2fe55edcab41..40d8325c8924 100644
--- a/src/meta-srv/src/procedure/region_migration/test_util.rs
+++ b/src/meta-srv/src/procedure/region_migration/test_util.rs
@@ -19,7 +19,7 @@ use std::sync::Arc;
use std::time::Duration;
use api::v1::meta::mailbox_message::Payload;
-use api::v1::meta::{HeartbeatResponse, MailboxMessage, RequestHeader};
+use api::v1::meta::{HeartbeatResponse, MailboxMessage};
use common_meta::ddl::NoopRegionFailureDetectorControl;
use common_meta::instruction::{
DowngradeRegionReply, InstructionReply, SimpleReply, UpgradeRegionReply,
@@ -85,7 +85,7 @@ impl MailboxContext {
tx: Sender<std::result::Result<HeartbeatResponse, tonic::Status>>,
) {
let pusher_id = channel.pusher_id();
- let pusher = Pusher::new(tx, &RequestHeader::default());
+ let pusher = Pusher::new(tx);
let _ = self.pushers.insert(pusher_id.string_key(), pusher).await;
}
@@ -317,7 +317,6 @@ pub fn new_persistent_context(from: u64, to: u64, region_id: RegionId) -> Persis
from_peer: Peer::empty(from),
to_peer: Peer::empty(to),
region_id,
- cluster_id: 0,
timeout: Duration::from_secs(10),
}
}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
index 0d568ab7b0bb..9e038bebc6cd 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
@@ -172,11 +172,7 @@ mod tests {
let detecting_regions = event.into_region_failure_detectors();
assert_eq!(
detecting_regions,
- vec![(
- ctx.persistent_ctx.cluster_id,
- from_peer.id,
- ctx.persistent_ctx.region_id
- )]
+ vec![(from_peer.id, ctx.persistent_ctx.region_id)]
);
let table_route = table_metadata_manager
diff --git a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs
index fa989274b44e..6ed8e4905bf2 100644
--- a/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/upgrade_candidate_region.rs
@@ -238,7 +238,6 @@ mod tests {
from_peer: Peer::empty(1),
to_peer: Peer::empty(2),
region_id: RegionId::new(1024, 1),
- cluster_id: 0,
timeout: Duration::from_millis(1000),
}
}
diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs
index 5690b8feddf7..2d25094c9888 100644
--- a/src/meta-srv/src/procedure/tests.rs
+++ b/src/meta-srv/src/procedure/tests.rs
@@ -97,7 +97,6 @@ fn create_table_task(table_name: Option<&str>) -> CreateTableTask {
#[test]
fn test_region_request_builder() {
let mut procedure = CreateTableProcedure::new(
- 1,
create_table_task(None),
test_data::new_ddl_context(Arc::new(NodeClients::default())),
);
@@ -192,7 +191,6 @@ async fn test_on_datanode_create_regions() {
let node_manager = new_node_manager(®ion_server, ®ion_routes).await;
let mut procedure = CreateTableProcedure::new(
- 1,
create_table_task(None),
test_data::new_ddl_context(node_manager),
);
@@ -260,7 +258,7 @@ async fn test_on_datanode_create_logical_regions() {
.0;
let _ = kv_backend.txn(physical_route_txn).await.unwrap();
let mut procedure =
- CreateLogicalTablesProcedure::new(1, vec![task1, task2, task3], physical_table_id, ctx);
+ CreateLogicalTablesProcedure::new(vec![task1, task2, task3], physical_table_id, ctx);
let expected_created_regions = Arc::new(Mutex::new(HashMap::from([(1, 3), (2, 3), (3, 3)])));
diff --git a/src/meta-srv/src/region/failure_detector.rs b/src/meta-srv/src/region/failure_detector.rs
index 8533d27f30ac..a795e2e3e8c9 100644
--- a/src/meta-srv/src/region/failure_detector.rs
+++ b/src/meta-srv/src/region/failure_detector.rs
@@ -129,7 +129,7 @@ mod tests {
#[test]
fn test_default_failure_detector_container() {
let container = RegionFailureDetector::new(Default::default());
- let detecting_region = (0, 2, RegionId::new(1, 1));
+ let detecting_region = (2, RegionId::new(1, 1));
let _ = container.region_failure_detector(detecting_region);
assert!(container.contains(&detecting_region));
diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs
index 194f3710c853..68e492406bbd 100644
--- a/src/meta-srv/src/region/lease_keeper.rs
+++ b/src/meta-srv/src/region/lease_keeper.rs
@@ -19,7 +19,7 @@ use common_meta::key::table_route::TableRouteValue;
use common_meta::key::TableMetadataManagerRef;
use common_meta::region_keeper::MemoryRegionKeeperRef;
use common_meta::rpc::router::RegionRoute;
-use common_meta::{ClusterId, DatanodeId};
+use common_meta::DatanodeId;
use common_telemetry::warn;
use snafu::ResultExt;
use store_api::region_engine::RegionRole;
@@ -167,7 +167,6 @@ impl RegionLeaseKeeper {
/// and corresponding regions will be added to `non_exists` of [RenewRegionLeasesResponse].
pub async fn renew_region_leases(
&self,
- _cluster_id: ClusterId,
datanode_id: DatanodeId,
regions: &[(RegionId, RegionRole)],
) -> Result<RenewRegionLeasesResponse> {
@@ -282,7 +281,6 @@ mod tests {
renewed,
} = keeper
.renew_region_leases(
- 0,
1,
&[
(RegionId::new(1024, 1), RegionRole::Follower),
@@ -384,7 +382,7 @@ mod tests {
non_exists,
renewed,
} = keeper
- .renew_region_leases(0, 1, &[(region_id, RegionRole::Follower)])
+ .renew_region_leases(1, &[(region_id, RegionRole::Follower)])
.await
.unwrap();
assert!(renewed.is_empty());
@@ -397,7 +395,7 @@ mod tests {
non_exists,
renewed,
} = keeper
- .renew_region_leases(0, leader_peer_id, &[(region_id, role)])
+ .renew_region_leases(leader_peer_id, &[(region_id, role)])
.await
.unwrap();
@@ -411,7 +409,7 @@ mod tests {
non_exists,
renewed,
} = keeper
- .renew_region_leases(0, follower_peer_id, &[(region_id, role)])
+ .renew_region_leases(follower_peer_id, &[(region_id, role)])
.await
.unwrap();
@@ -432,7 +430,7 @@ mod tests {
non_exists,
renewed,
} = keeper
- .renew_region_leases(0, leader_peer_id, &[(opening_region_id, role)])
+ .renew_region_leases(leader_peer_id, &[(opening_region_id, role)])
.await
.unwrap();
@@ -465,7 +463,6 @@ mod tests {
renewed,
} = keeper
.renew_region_leases(
- 0,
1,
&[
(region_id, RegionRole::Follower),
@@ -513,7 +510,7 @@ mod tests {
non_exists,
renewed,
} = keeper
- .renew_region_leases(0, follower_peer_id, &[(region_id, role)])
+ .renew_region_leases(follower_peer_id, &[(region_id, role)])
.await
.unwrap();
diff --git a/src/meta-srv/src/region/supervisor.rs b/src/meta-srv/src/region/supervisor.rs
index 4c3725d114c6..44b2a6e7f0b4 100644
--- a/src/meta-srv/src/region/supervisor.rs
+++ b/src/meta-srv/src/region/supervisor.rs
@@ -22,7 +22,7 @@ use common_meta::ddl::{DetectingRegion, RegionFailureDetectorController};
use common_meta::key::maintenance::MaintenanceModeManagerRef;
use common_meta::leadership_notifier::LeadershipChangeListener;
use common_meta::peer::PeerLookupServiceRef;
-use common_meta::{ClusterId, DatanodeId};
+use common_meta::DatanodeId;
use common_runtime::JoinHandle;
use common_telemetry::{error, info, warn};
use common_time::util::current_time_millis;
@@ -45,7 +45,6 @@ use crate::selector::SelectorOptions;
/// and a timestamp indicating when the heartbeat was sent.
#[derive(Debug)]
pub(crate) struct DatanodeHeartbeat {
- cluster_id: ClusterId,
datanode_id: DatanodeId,
// TODO(weny): Considers collecting the memtable size in regions.
regions: Vec<RegionId>,
@@ -55,7 +54,6 @@ pub(crate) struct DatanodeHeartbeat {
impl From<&Stat> for DatanodeHeartbeat {
fn from(value: &Stat) -> Self {
DatanodeHeartbeat {
- cluster_id: value.cluster_id,
datanode_id: value.id,
regions: value.region_stats.iter().map(|x| x.id).collect(),
timestamp: value.timestamp_millis,
@@ -341,7 +339,7 @@ impl RegionSupervisor {
}
}
- async fn handle_region_failures(&self, mut regions: Vec<(ClusterId, DatanodeId, RegionId)>) {
+ async fn handle_region_failures(&self, mut regions: Vec<(DatanodeId, RegionId)>) {
if regions.is_empty() {
return;
}
@@ -358,22 +356,19 @@ impl RegionSupervisor {
}
let migrating_regions = regions
- .extract_if(.., |(_, _, region_id)| {
+ .extract_if(.., |(_, region_id)| {
self.region_migration_manager.tracker().contains(*region_id)
})
.collect::<Vec<_>>();
- for (cluster_id, datanode_id, region_id) in migrating_regions {
- self.failure_detector
- .remove(&(cluster_id, datanode_id, region_id));
+ for (datanode_id, region_id) in migrating_regions {
+ self.failure_detector.remove(&(datanode_id, region_id));
}
warn!("Detects region failures: {:?}", regions);
- for (cluster_id, datanode_id, region_id) in regions {
- match self.do_failover(cluster_id, datanode_id, region_id).await {
- Ok(_) => self
- .failure_detector
- .remove(&(cluster_id, datanode_id, region_id)),
+ for (datanode_id, region_id) in regions {
+ match self.do_failover(datanode_id, region_id).await {
+ Ok(_) => self.failure_detector.remove(&(datanode_id, region_id)),
Err(err) => {
error!(err; "Failed to execute region failover for region: {region_id}, datanode: {datanode_id}");
}
@@ -388,15 +383,10 @@ impl RegionSupervisor {
.context(error::MaintenanceModeManagerSnafu)
}
- async fn do_failover(
- &self,
- cluster_id: ClusterId,
- datanode_id: DatanodeId,
- region_id: RegionId,
- ) -> Result<()> {
+ async fn do_failover(&self, datanode_id: DatanodeId, region_id: RegionId) -> Result<()> {
let from_peer = self
.peer_lookup
- .datanode(cluster_id, datanode_id)
+ .datanode(datanode_id)
.await
.context(error::LookupPeerSnafu {
peer_id: datanode_id,
@@ -407,7 +397,6 @@ impl RegionSupervisor {
let mut peers = self
.selector
.select(
- cluster_id,
&self.selector_context,
SelectorOptions {
min_required_items: 1,
@@ -423,7 +412,6 @@ impl RegionSupervisor {
return Ok(());
}
let task = RegionMigrationProcedureTask {
- cluster_id,
region_id,
from_peer,
to_peer,
@@ -442,7 +430,7 @@ impl RegionSupervisor {
}
/// Detects the failure of regions.
- fn detect_region_failure(&self) -> Vec<(ClusterId, DatanodeId, RegionId)> {
+ fn detect_region_failure(&self) -> Vec<(DatanodeId, RegionId)> {
self.failure_detector
.iter()
.filter_map(|e| {
@@ -464,7 +452,7 @@ impl RegionSupervisor {
/// Updates the state of corresponding failure detectors.
fn on_heartbeat_arrived(&self, heartbeat: DatanodeHeartbeat) {
for region_id in heartbeat.regions {
- let detecting_region = (heartbeat.cluster_id, heartbeat.datanode_id, region_id);
+ let detecting_region = (heartbeat.datanode_id, region_id);
let mut detector = self
.failure_detector
.region_failure_detector(detecting_region);
@@ -537,7 +525,6 @@ pub(crate) mod tests {
sender
.send(Event::HeartbeatArrived(DatanodeHeartbeat {
- cluster_id: 0,
datanode_id: 0,
regions: vec![RegionId::new(1, 1)],
timestamp: 100,
@@ -547,7 +534,7 @@ pub(crate) mod tests {
let (tx, rx) = oneshot::channel();
sender.send(Event::Dump(tx)).await.unwrap();
let detector = rx.await.unwrap();
- assert!(detector.contains(&(0, 0, RegionId::new(1, 1))));
+ assert!(detector.contains(&(0, RegionId::new(1, 1))));
// Clear up
sender.send(Event::Clear).await.unwrap();
@@ -561,7 +548,6 @@ pub(crate) mod tests {
(0..2000)
.map(|i| DatanodeHeartbeat {
timestamp: start + i * 1000 + rng.gen_range(0..100),
- cluster_id: 0,
datanode_id,
regions: region_ids
.iter()
@@ -630,7 +616,7 @@ pub(crate) mod tests {
let (mut supervisor, sender) = new_test_supervisor();
let controller = RegionFailureDetectorControl::new(sender.clone());
tokio::spawn(async move { supervisor.run().await });
- let detecting_region = (0, 1, RegionId::new(1, 1));
+ let detecting_region = (1, RegionId::new(1, 1));
controller
.register_failure_detectors(vec![detecting_region])
.await;
diff --git a/src/meta-srv/src/selector.rs b/src/meta-srv/src/selector.rs
index 0795bccd9b82..c197f04e59b5 100644
--- a/src/meta-srv/src/selector.rs
+++ b/src/meta-srv/src/selector.rs
@@ -25,19 +25,12 @@ use serde::{Deserialize, Serialize};
use crate::error;
use crate::error::Result;
-pub type Namespace = u64;
-
#[async_trait::async_trait]
pub trait Selector: Send + Sync {
type Context;
type Output;
- async fn select(
- &self,
- ns: Namespace,
- ctx: &Self::Context,
- opts: SelectorOptions,
- ) -> Result<Self::Output>;
+ async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output>;
}
#[derive(Debug)]
diff --git a/src/meta-srv/src/selector/lease_based.rs b/src/meta-srv/src/selector/lease_based.rs
index d9af63da6555..a7ce7c73218b 100644
--- a/src/meta-srv/src/selector/lease_based.rs
+++ b/src/meta-srv/src/selector/lease_based.rs
@@ -19,7 +19,7 @@ use crate::lease;
use crate::metasrv::SelectorContext;
use crate::selector::common::choose_items;
use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
-use crate::selector::{Namespace, Selector, SelectorOptions};
+use crate::selector::{Selector, SelectorOptions};
/// Select all alive datanodes based using a random weighted choose.
pub struct LeaseBasedSelector;
@@ -29,15 +29,10 @@ impl Selector for LeaseBasedSelector {
type Context = SelectorContext;
type Output = Vec<Peer>;
- async fn select(
- &self,
- ns: Namespace,
- ctx: &Self::Context,
- opts: SelectorOptions,
- ) -> Result<Self::Output> {
+ async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output> {
// 1. get alive datanodes.
let lease_kvs =
- lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
+ lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
// 2. compute weight array, but the weight of each item is the same.
let weight_array = lease_kvs
diff --git a/src/meta-srv/src/selector/load_based.rs b/src/meta-srv/src/selector/load_based.rs
index 8a00c7fdb7bd..2628990bf48e 100644
--- a/src/meta-srv/src/selector/load_based.rs
+++ b/src/meta-srv/src/selector/load_based.rs
@@ -29,7 +29,7 @@ use crate::metasrv::SelectorContext;
use crate::selector::common::choose_items;
use crate::selector::weight_compute::{RegionNumsBasedWeightCompute, WeightCompute};
use crate::selector::weighted_choose::RandomWeightedChoose;
-use crate::selector::{Namespace, Selector, SelectorOptions};
+use crate::selector::{Selector, SelectorOptions};
pub struct LoadBasedSelector<C> {
weight_compute: C,
@@ -57,15 +57,10 @@ where
type Context = SelectorContext;
type Output = Vec<Peer>;
- async fn select(
- &self,
- ns: Namespace,
- ctx: &Self::Context,
- opts: SelectorOptions,
- ) -> Result<Self::Output> {
+ async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Self::Output> {
// 1. get alive datanodes.
let lease_kvs =
- lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
+ lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
// 2. get stat kvs and filter out expired datanodes.
let stat_keys = lease_kvs.keys().map(|k| k.into()).collect();
@@ -97,8 +92,8 @@ where
let selected = choose_items(&opts, &mut weighted_choose)?;
debug!(
- "LoadBasedSelector select peers: {:?}, namespace: {}, opts: {:?}.",
- selected, ns, opts,
+ "LoadBasedSelector select peers: {:?}, opts: {:?}.",
+ selected, opts,
);
Ok(selected)
@@ -165,33 +160,21 @@ mod tests {
fn test_filter_out_expired_datanode() {
let mut stat_kvs = HashMap::new();
stat_kvs.insert(
- DatanodeStatKey {
- cluster_id: 1,
- node_id: 0,
- },
+ DatanodeStatKey { node_id: 0 },
DatanodeStatValue { stats: vec![] },
);
stat_kvs.insert(
- DatanodeStatKey {
- cluster_id: 1,
- node_id: 1,
- },
+ DatanodeStatKey { node_id: 1 },
DatanodeStatValue { stats: vec![] },
);
stat_kvs.insert(
- DatanodeStatKey {
- cluster_id: 1,
- node_id: 2,
- },
+ DatanodeStatKey { node_id: 2 },
DatanodeStatValue { stats: vec![] },
);
let mut lease_kvs = HashMap::new();
lease_kvs.insert(
- DatanodeLeaseKey {
- cluster_id: 1,
- node_id: 1,
- },
+ DatanodeLeaseKey { node_id: 1 },
LeaseValue {
timestamp_millis: 0,
node_addr: "127.0.0.1:3002".to_string(),
@@ -201,9 +184,6 @@ mod tests {
let alive_stat_kvs = filter_out_expired_datanode(stat_kvs, &lease_kvs);
assert_eq!(1, alive_stat_kvs.len());
- assert!(alive_stat_kvs.contains_key(&DatanodeStatKey {
- cluster_id: 1,
- node_id: 1
- }));
+ assert!(alive_stat_kvs.contains_key(&DatanodeStatKey { node_id: 1 }));
}
}
diff --git a/src/meta-srv/src/selector/round_robin.rs b/src/meta-srv/src/selector/round_robin.rs
index b50823cb02ee..f11a36555fa9 100644
--- a/src/meta-srv/src/selector/round_robin.rs
+++ b/src/meta-srv/src/selector/round_robin.rs
@@ -20,7 +20,7 @@ use snafu::ensure;
use crate::error::{NoEnoughAvailableNodeSnafu, Result};
use crate::lease;
use crate::metasrv::{SelectTarget, SelectorContext};
-use crate::selector::{Namespace, Selector, SelectorOptions};
+use crate::selector::{Selector, SelectorOptions};
/// Round-robin selector that returns the next peer in the list in sequence.
/// Datanodes are ordered by their node_id.
@@ -53,7 +53,6 @@ impl RoundRobinSelector {
async fn get_peers(
&self,
- ns: Namespace,
min_required_items: usize,
ctx: &SelectorContext,
) -> Result<Vec<Peer>> {
@@ -61,8 +60,7 @@ impl RoundRobinSelector {
SelectTarget::Datanode => {
// 1. get alive datanodes.
let lease_kvs =
- lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs)
- .await?;
+ lease::alive_datanodes(&ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
// 2. map into peers
lease_kvs
@@ -73,8 +71,7 @@ impl RoundRobinSelector {
SelectTarget::Flownode => {
// 1. get alive flownodes.
let lease_kvs =
- lease::alive_flownodes(ns, &ctx.meta_peer_client, ctx.flownode_lease_secs)
- .await?;
+ lease::alive_flownodes(&ctx.meta_peer_client, ctx.flownode_lease_secs).await?;
// 2. map into peers
lease_kvs
@@ -105,13 +102,8 @@ impl Selector for RoundRobinSelector {
type Context = SelectorContext;
type Output = Vec<Peer>;
- async fn select(
- &self,
- ns: Namespace,
- ctx: &Self::Context,
- opts: SelectorOptions,
- ) -> Result<Vec<Peer>> {
- let peers = self.get_peers(ns, opts.min_required_items, ctx).await?;
+ async fn select(&self, ctx: &Self::Context, opts: SelectorOptions) -> Result<Vec<Peer>> {
+ let peers = self.get_peers(opts.min_required_items, ctx).await?;
// choose peers
let mut selected = Vec::with_capacity(opts.min_required_items);
for _ in 0..opts.min_required_items {
@@ -135,8 +127,6 @@ mod test {
async fn test_round_robin_selector() {
let selector = RoundRobinSelector::default();
let ctx = create_selector_context();
- let ns = 0;
-
// add three nodes
let peer1 = Peer {
id: 2,
@@ -151,11 +141,10 @@ mod test {
addr: "node3".to_string(),
};
let peers = vec![peer1.clone(), peer2.clone(), peer3.clone()];
- put_datanodes(ns, &ctx.meta_peer_client, peers).await;
+ put_datanodes(&ctx.meta_peer_client, peers).await;
let peers = selector
.select(
- ns,
&ctx,
SelectorOptions {
min_required_items: 4,
@@ -172,7 +161,6 @@ mod test {
let peers = selector
.select(
- ns,
&ctx,
SelectorOptions {
min_required_items: 2,
diff --git a/src/meta-srv/src/selector/test_utils.rs b/src/meta-srv/src/selector/test_utils.rs
index 0c3b4e3f21d6..edf0f8d89755 100644
--- a/src/meta-srv/src/selector/test_utils.rs
+++ b/src/meta-srv/src/selector/test_utils.rs
@@ -22,7 +22,7 @@ use rand::prelude::SliceRandom;
use crate::cluster::MetaPeerClientBuilder;
use crate::error::Result;
use crate::metasrv::SelectorContext;
-use crate::selector::{Namespace, Selector, SelectorOptions};
+use crate::selector::{Selector, SelectorOptions};
/// Returns [SelectorContext] for test purpose.
pub fn new_test_selector_context() -> SelectorContext {
@@ -60,12 +60,7 @@ impl Selector for RandomNodeSelector {
type Context = SelectorContext;
type Output = Vec<Peer>;
- async fn select(
- &self,
- _ns: Namespace,
- _ctx: &Self::Context,
- _opts: SelectorOptions,
- ) -> Result<Self::Output> {
+ async fn select(&self, _ctx: &Self::Context, _opts: SelectorOptions) -> Result<Self::Output> {
let mut rng = rand::thread_rng();
let mut nodes = self.nodes.clone();
nodes.shuffle(&mut rng);
diff --git a/src/meta-srv/src/selector/weight_compute.rs b/src/meta-srv/src/selector/weight_compute.rs
index 16289bc3bd33..cd8b0409cd01 100644
--- a/src/meta-srv/src/selector/weight_compute.rs
+++ b/src/meta-srv/src/selector/weight_compute.rs
@@ -104,26 +104,17 @@ mod tests {
#[test]
fn test_weight_compute() {
let mut stat_kvs: HashMap<DatanodeStatKey, DatanodeStatValue> = HashMap::default();
- let stat_key = DatanodeStatKey {
- cluster_id: 1,
- node_id: 1,
- };
+ let stat_key = DatanodeStatKey { node_id: 1 };
let stat_val = DatanodeStatValue {
stats: vec![mock_stat_1()],
};
stat_kvs.insert(stat_key, stat_val);
- let stat_key = DatanodeStatKey {
- cluster_id: 1,
- node_id: 2,
- };
+ let stat_key = DatanodeStatKey { node_id: 2 };
let stat_val = DatanodeStatValue {
stats: vec![mock_stat_2()],
};
stat_kvs.insert(stat_key, stat_val);
- let stat_key = DatanodeStatKey {
- cluster_id: 1,
- node_id: 3,
- };
+ let stat_key = DatanodeStatKey { node_id: 3 };
let stat_val = DatanodeStatValue {
stats: vec![mock_stat_3()],
};
diff --git a/src/meta-srv/src/service/admin/node_lease.rs b/src/meta-srv/src/service/admin/node_lease.rs
index a3736d181844..f7b4855dac29 100644
--- a/src/meta-srv/src/service/admin/node_lease.rs
+++ b/src/meta-srv/src/service/admin/node_lease.rs
@@ -22,7 +22,7 @@ use crate::cluster::MetaPeerClientRef;
use crate::error::{self, Result};
use crate::key::{DatanodeLeaseKey, LeaseValue};
use crate::lease;
-use crate::service::admin::{util, HttpHandler};
+use crate::service::admin::HttpHandler;
pub struct NodeLeaseHandler {
pub meta_peer_client: MetaPeerClientRef,
@@ -34,11 +34,9 @@ impl HttpHandler for NodeLeaseHandler {
&self,
_: &str,
_: http::Method,
- params: &HashMap<String, String>,
+ _: &HashMap<String, String>,
) -> Result<http::Response<String>> {
- let cluster_id = util::extract_cluster_id(params)?;
-
- let leases = lease::alive_datanodes(cluster_id, &self.meta_peer_client, u64::MAX).await?;
+ let leases = lease::alive_datanodes(&self.meta_peer_client, u64::MAX).await?;
let leases = leases
.into_iter()
.map(|(k, v)| HumanLease {
diff --git a/src/meta-srv/src/service/admin/util.rs b/src/meta-srv/src/service/admin/util.rs
index 0ea46f6702b3..cdabf38a63e8 100644
--- a/src/meta-srv/src/service/admin/util.rs
+++ b/src/meta-srv/src/service/admin/util.rs
@@ -12,24 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashMap;
-
-use snafu::{OptionExt, ResultExt};
+use snafu::ResultExt;
use tonic::codegen::http;
-use crate::error::{self, MissingRequiredParameterSnafu, ParseNumSnafu, Result};
-
-pub fn extract_cluster_id(params: &HashMap<String, String>) -> Result<u64> {
- params
- .get("cluster_id")
- .map(|id| id.parse::<u64>())
- .context(MissingRequiredParameterSnafu {
- param: "cluster_id",
- })?
- .context(ParseNumSnafu {
- err_msg: "`cluster_id` is not a valid number",
- })
-}
+use crate::error::{self, Result};
pub fn to_text_response(text: &str) -> Result<http::Response<String>> {
http::Response::builder()
diff --git a/src/meta-srv/src/service/cluster.rs b/src/meta-srv/src/service/cluster.rs
index f5f5661b01d5..64e6eb9b6df1 100644
--- a/src/meta-srv/src/service/cluster.rs
+++ b/src/meta-srv/src/service/cluster.rs
@@ -29,7 +29,7 @@ use crate::{error, metasrv};
impl cluster_server::Cluster for Metasrv {
async fn batch_get(&self, req: Request<PbBatchGetRequest>) -> GrpcResult<PbBatchGetResponse> {
if !self.is_leader() {
- let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader());
+ let is_not_leader = ResponseHeader::failed(Error::is_not_leader());
let resp = PbBatchGetResponse {
header: Some(is_not_leader),
..Default::default()
@@ -46,13 +46,13 @@ impl cluster_server::Cluster for Metasrv {
.await
.context(error::KvBackendSnafu)?;
- let resp = resp.to_proto_resp(ResponseHeader::success(0));
+ let resp = resp.to_proto_resp(ResponseHeader::success());
Ok(Response::new(resp))
}
async fn range(&self, req: Request<PbRangeRequest>) -> GrpcResult<PbRangeResponse> {
if !self.is_leader() {
- let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader());
+ let is_not_leader = ResponseHeader::failed(Error::is_not_leader());
let resp = PbRangeResponse {
header: Some(is_not_leader),
..Default::default()
@@ -69,7 +69,7 @@ impl cluster_server::Cluster for Metasrv {
.await
.context(error::KvBackendSnafu)?;
- let resp = res.to_proto_resp(ResponseHeader::success(0));
+ let resp = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(resp))
}
@@ -78,7 +78,7 @@ impl cluster_server::Cluster for Metasrv {
req: Request<MetasrvPeersRequest>,
) -> GrpcResult<MetasrvPeersResponse> {
if !self.is_leader() {
- let is_not_leader = ResponseHeader::failed(0, Error::is_not_leader());
+ let is_not_leader = ResponseHeader::failed(Error::is_not_leader());
let resp = MetasrvPeersResponse {
header: Some(is_not_leader),
..Default::default()
@@ -103,7 +103,7 @@ impl cluster_server::Cluster for Metasrv {
};
let resp = MetasrvPeersResponse {
- header: Some(ResponseHeader::success(0)),
+ header: Some(ResponseHeader::success()),
leader: Some(leader),
followers,
};
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index 45adb5f57e4f..c450e17139b2 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -134,9 +134,7 @@ impl heartbeat_server::Heartbeat for Metasrv {
}
}
-async fn handle_ask_leader(req: AskLeaderRequest, ctx: Context) -> Result<AskLeaderResponse> {
- let cluster_id = req.header.as_ref().map_or(0, |h| h.cluster_id);
-
+async fn handle_ask_leader(_req: AskLeaderRequest, ctx: Context) -> Result<AskLeaderResponse> {
let addr = match ctx.election {
Some(election) => {
if election.is_leader() {
@@ -153,7 +151,7 @@ async fn handle_ask_leader(req: AskLeaderRequest, ctx: Context) -> Result<AskLea
addr,
});
- let header = Some(ResponseHeader::success(cluster_id));
+ let header = Some(ResponseHeader::success());
Ok(AskLeaderResponse { header, leader })
}
@@ -179,7 +177,7 @@ async fn register_pusher(
let role = header.role();
let id = get_node_id(header);
let pusher_id = PusherId::new(role, id);
- let pusher = Pusher::new(sender, header);
+ let pusher = Pusher::new(sender);
handler_group.register_pusher(pusher_id, pusher).await;
pusher_id
}
@@ -213,12 +211,11 @@ mod tests {
.unwrap();
let req = AskLeaderRequest {
- header: Some(RequestHeader::new((1, 1), Role::Datanode, W3cTrace::new())),
+ header: Some(RequestHeader::new(1, Role::Datanode, W3cTrace::new())),
};
let res = metasrv.ask_leader(req.into_request()).await.unwrap();
let res = res.into_inner();
- assert_eq!(1, res.header.unwrap().cluster_id);
assert_eq!(metasrv.options().bind_addr, res.leader.unwrap().addr);
}
diff --git a/src/meta-srv/src/service/procedure.rs b/src/meta-srv/src/service/procedure.rs
index 5fc438d174a8..bb41dd3ba926 100644
--- a/src/meta-srv/src/service/procedure.rs
+++ b/src/meta-srv/src/service/procedure.rs
@@ -41,7 +41,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
) -> GrpcResult<ProcedureStateResponse> {
if !self.is_leader() {
let resp = ProcedureStateResponse {
- header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
+ header: Some(ResponseHeader::failed(Error::is_not_leader())),
..Default::default()
};
@@ -71,7 +71,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
async fn ddl(&self, request: Request<PbDdlTaskRequest>) -> GrpcResult<PbDdlTaskResponse> {
if !self.is_leader() {
let resp = PbDdlTaskResponse {
- header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
+ header: Some(ResponseHeader::failed(Error::is_not_leader())),
..Default::default()
};
@@ -87,7 +87,6 @@ impl procedure_service_server::ProcedureService for Metasrv {
} = request.into_inner();
let header = header.context(error::MissingRequestHeaderSnafu)?;
- let cluster_id = header.cluster_id;
let query_context = query_context
.context(error::MissingRequiredParameterSnafu {
param: "query_context",
@@ -102,7 +101,6 @@ impl procedure_service_server::ProcedureService for Metasrv {
.procedure_executor()
.submit_ddl_task(
&ExecutorContext {
- cluster_id: Some(cluster_id),
tracing_context: Some(header.tracing_context),
},
SubmitDdlTaskRequest {
@@ -123,7 +121,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
) -> GrpcResult<MigrateRegionResponse> {
if !self.is_leader() {
let resp = MigrateRegionResponse {
- header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
+ header: Some(ResponseHeader::failed(Error::is_not_leader())),
..Default::default()
};
@@ -139,22 +137,19 @@ impl procedure_service_server::ProcedureService for Metasrv {
timeout_secs,
} = request.into_inner();
- let header = header.context(error::MissingRequestHeaderSnafu)?;
- let cluster_id = header.cluster_id;
-
+ let _header = header.context(error::MissingRequestHeaderSnafu)?;
let from_peer = self
- .lookup_peer(cluster_id, from_peer)
+ .lookup_peer(from_peer)
.await?
.context(error::PeerUnavailableSnafu { peer_id: from_peer })?;
let to_peer = self
- .lookup_peer(cluster_id, to_peer)
+ .lookup_peer(to_peer)
.await?
.context(error::PeerUnavailableSnafu { peer_id: to_peer })?;
let pid = self
.region_migration_manager()
.submit_procedure(RegionMigrationProcedureTask {
- cluster_id,
region_id: region_id.into(),
from_peer,
to_peer,
@@ -177,7 +172,7 @@ impl procedure_service_server::ProcedureService for Metasrv {
) -> GrpcResult<ProcedureDetailResponse> {
if !self.is_leader() {
let resp = ProcedureDetailResponse {
- header: Some(ResponseHeader::failed(0, Error::is_not_leader())),
+ header: Some(ResponseHeader::failed(Error::is_not_leader())),
..Default::default()
};
diff --git a/src/meta-srv/src/service/store.rs b/src/meta-srv/src/service/store.rs
index acbc090e5c89..bb410849a29b 100644
--- a/src/meta-srv/src/service/store.rs
+++ b/src/meta-srv/src/service/store.rs
@@ -28,10 +28,10 @@ use common_meta::rpc::store::{
BatchDeleteRequest, BatchGetRequest, BatchPutRequest, CompareAndPutRequest, DeleteRangeRequest,
PutRequest, RangeRequest,
};
-use snafu::{OptionExt, ResultExt};
+use snafu::ResultExt;
use tonic::{Request, Response};
-use crate::error::{self, MissingRequestHeaderSnafu};
+use crate::error::{self};
use crate::metasrv::Metasrv;
use crate::metrics::METRIC_META_KV_REQUEST_ELAPSED;
use crate::service::GrpcResult;
@@ -41,15 +41,8 @@ impl store_server::Store for Metasrv {
async fn range(&self, req: Request<PbRangeRequest>) -> GrpcResult<PbRangeResponse> {
let req = req.into_inner();
- let cluster_id = req
- .header
- .as_ref()
- .context(MissingRequestHeaderSnafu)?
- .cluster_id;
- let cluster_id_str = cluster_id.to_string();
-
let _timer = METRIC_META_KV_REQUEST_ELAPSED
- .with_label_values(&[self.kv_backend().name(), "range", cluster_id_str.as_str()])
+ .with_label_values(&[self.kv_backend().name(), "range"])
.start_timer();
let req: RangeRequest = req.into();
@@ -60,22 +53,14 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
- let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
+ let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
async fn put(&self, req: Request<PbPutRequest>) -> GrpcResult<PbPutResponse> {
let req = req.into_inner();
-
- let cluster_id = req
- .header
- .as_ref()
- .context(MissingRequestHeaderSnafu)?
- .cluster_id;
- let cluster_id_str = cluster_id.to_string();
-
let _timer = METRIC_META_KV_REQUEST_ELAPSED
- .with_label_values(&[self.kv_backend().name(), "put", cluster_id_str.as_str()])
+ .with_label_values(&[self.kv_backend().name(), "put"])
.start_timer();
let req: PutRequest = req.into();
@@ -86,26 +71,14 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
- let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
+ let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
async fn batch_get(&self, req: Request<PbBatchGetRequest>) -> GrpcResult<PbBatchGetResponse> {
let req = req.into_inner();
-
- let cluster_id = req
- .header
- .as_ref()
- .context(MissingRequestHeaderSnafu)?
- .cluster_id;
- let cluster_id_str = cluster_id.to_string();
-
let _timer = METRIC_META_KV_REQUEST_ELAPSED
- .with_label_values(&[
- self.kv_backend().name(),
- "batch_get",
- cluster_id_str.as_str(),
- ])
+ .with_label_values(&[self.kv_backend().name(), "batch_get"])
.start_timer();
let req: BatchGetRequest = req.into();
@@ -116,26 +89,15 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
- let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
+ let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
async fn batch_put(&self, req: Request<PbBatchPutRequest>) -> GrpcResult<PbBatchPutResponse> {
let req = req.into_inner();
- let cluster_id = req
- .header
- .as_ref()
- .context(MissingRequestHeaderSnafu)?
- .cluster_id;
- let cluster_id_str = cluster_id.to_string();
-
let _timer = METRIC_META_KV_REQUEST_ELAPSED
- .with_label_values(&[
- self.kv_backend().name(),
- "batch_pub",
- cluster_id_str.as_str(),
- ])
+ .with_label_values(&[self.kv_backend().name(), "batch_pub"])
.start_timer();
let req: BatchPutRequest = req.into();
@@ -146,7 +108,7 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
- let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
+ let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
@@ -156,19 +118,8 @@ impl store_server::Store for Metasrv {
) -> GrpcResult<PbBatchDeleteResponse> {
let req = req.into_inner();
- let cluster_id = req
- .header
- .as_ref()
- .context(MissingRequestHeaderSnafu)?
- .cluster_id;
- let cluster_id_str = cluster_id.to_string();
-
let _timer = METRIC_META_KV_REQUEST_ELAPSED
- .with_label_values(&[
- self.kv_backend().name(),
- "batch_delete",
- cluster_id_str.as_str(),
- ])
+ .with_label_values(&[self.kv_backend().name(), "batch_delete"])
.start_timer();
let req: BatchDeleteRequest = req.into();
@@ -179,7 +130,7 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
- let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
+ let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
@@ -189,19 +140,8 @@ impl store_server::Store for Metasrv {
) -> GrpcResult<PbCompareAndPutResponse> {
let req = req.into_inner();
- let cluster_id = req
- .header
- .as_ref()
- .context(MissingRequestHeaderSnafu)?
- .cluster_id;
- let cluster_id_str = cluster_id.to_string();
-
let _timer = METRIC_META_KV_REQUEST_ELAPSED
- .with_label_values(&[
- self.kv_backend().name(),
- "compare_and_put",
- cluster_id_str.as_str(),
- ])
+ .with_label_values(&[self.kv_backend().name(), "compare_and_put"])
.start_timer();
let req: CompareAndPutRequest = req.into();
@@ -212,7 +152,7 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
- let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
+ let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
@@ -222,19 +162,8 @@ impl store_server::Store for Metasrv {
) -> GrpcResult<PbDeleteRangeResponse> {
let req = req.into_inner();
- let cluster_id = req
- .header
- .as_ref()
- .context(MissingRequestHeaderSnafu)?
- .cluster_id;
- let cluster_id_str = cluster_id.to_string();
-
let _timer = METRIC_META_KV_REQUEST_ELAPSED
- .with_label_values(&[
- self.kv_backend().name(),
- "delete_range",
- cluster_id_str.as_str(),
- ])
+ .with_label_values(&[self.kv_backend().name(), "delete_range"])
.start_timer();
let req: DeleteRangeRequest = req.into();
@@ -245,7 +174,7 @@ impl store_server::Store for Metasrv {
.await
.context(error::KvBackendSnafu)?;
- let res = res.to_proto_resp(ResponseHeader::success(cluster_id));
+ let res = res.to_proto_resp(ResponseHeader::success());
Ok(Response::new(res))
}
}
@@ -276,7 +205,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = RangeRequest::default();
- req.set_header((1, 1), Role::Datanode, W3cTrace::new());
+ req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.range(req.into_request()).await;
let _ = res.unwrap();
@@ -287,7 +216,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = PutRequest::default();
- req.set_header((1, 1), Role::Datanode, W3cTrace::new());
+ req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.put(req.into_request()).await;
let _ = res.unwrap();
@@ -298,7 +227,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = BatchGetRequest::default();
- req.set_header((1, 1), Role::Datanode, W3cTrace::new());
+ req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.batch_get(req.into_request()).await;
let _ = res.unwrap();
@@ -310,7 +239,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = BatchPutRequest::default();
- req.set_header((1, 1), Role::Datanode, W3cTrace::new());
+ req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.batch_put(req.into_request()).await;
let _ = res.unwrap();
@@ -321,7 +250,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = BatchDeleteRequest::default();
- req.set_header((1, 1), Role::Datanode, W3cTrace::new());
+ req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.batch_delete(req.into_request()).await;
let _ = res.unwrap();
@@ -332,7 +261,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = CompareAndPutRequest::default();
- req.set_header((1, 1), Role::Datanode, W3cTrace::new());
+ req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.compare_and_put(req.into_request()).await;
let _ = res.unwrap();
@@ -343,7 +272,7 @@ mod tests {
let metasrv = new_metasrv().await;
let mut req = DeleteRangeRequest::default();
- req.set_header((1, 1), Role::Datanode, W3cTrace::new());
+ req.set_header(1, Role::Datanode, W3cTrace::new());
let res = metasrv.delete_range(req.into_request()).await;
let _ = res.unwrap();
diff --git a/src/meta-srv/src/table_meta_alloc.rs b/src/meta-srv/src/table_meta_alloc.rs
index a8f4b4406967..8578e6cd19a7 100644
--- a/src/meta-srv/src/table_meta_alloc.rs
+++ b/src/meta-srv/src/table_meta_alloc.rs
@@ -15,7 +15,6 @@
use async_trait::async_trait;
use common_error::ext::BoxedError;
use common_meta::ddl::table_meta::PeerAllocator;
-use common_meta::ddl::TableMetadataAllocatorContext;
use common_meta::error::{ExternalSnafu, Result as MetaResult};
use common_meta::peer::Peer;
use snafu::{ensure, ResultExt};
@@ -36,24 +35,18 @@ impl MetasrvPeerAllocator {
Self { ctx, selector }
}
- /// Allocates a specified number (by `regions`) of [`Peer`] instances based on the given
- /// [`TableMetadataAllocatorContext`] and number of regions. The returned peers will have
- /// the same length as the number of regions.
+ /// Allocates a specified number (by `regions`) of [`Peer`] instances based on the number of
+ /// regions. The returned peers will have the same length as the number of regions.
///
/// This method is mainly a wrapper around the [`SelectorRef`]::`select` method. There is
/// no guarantee that how the returned peers are used, like whether they are from the same
/// table or not. So this method isn't idempotent.
- async fn alloc(
- &self,
- ctx: &TableMetadataAllocatorContext,
- regions: usize,
- ) -> Result<Vec<Peer>> {
+ async fn alloc(&self, regions: usize) -> Result<Vec<Peer>> {
ensure!(regions <= MAX_REGION_SEQ as usize, TooManyPartitionsSnafu);
let mut peers = self
.selector
.select(
- ctx.cluster_id,
&self.ctx,
SelectorOptions {
min_required_items: regions,
@@ -79,12 +72,8 @@ impl MetasrvPeerAllocator {
#[async_trait]
impl PeerAllocator for MetasrvPeerAllocator {
- async fn alloc(
- &self,
- ctx: &TableMetadataAllocatorContext,
- regions: usize,
- ) -> MetaResult<Vec<Peer>> {
- self.alloc(ctx, regions)
+ async fn alloc(&self, regions: usize) -> MetaResult<Vec<Peer>> {
+ self.alloc(regions)
.await
.map_err(BoxedError::new)
.context(ExternalSnafu)
diff --git a/src/meta-srv/src/test_util.rs b/src/meta-srv/src/test_util.rs
index 6b9ccc99a0fa..b12e11fd1904 100644
--- a/src/meta-srv/src/test_util.rs
+++ b/src/meta-srv/src/test_util.rs
@@ -17,7 +17,6 @@ use std::sync::Arc;
use common_meta::kv_backend::memory::MemoryKvBackend;
use common_meta::peer::Peer;
use common_meta::rpc::router::{Region, RegionRoute};
-use common_meta::ClusterId;
use common_time::util as time_util;
use crate::cluster::{MetaPeerClientBuilder, MetaPeerClientRef};
@@ -63,15 +62,10 @@ pub(crate) fn create_selector_context() -> SelectorContext {
}
}
-pub(crate) async fn put_datanodes(
- cluster_id: ClusterId,
- meta_peer_client: &MetaPeerClientRef,
- datanodes: Vec<Peer>,
-) {
+pub(crate) async fn put_datanodes(meta_peer_client: &MetaPeerClientRef, datanodes: Vec<Peer>) {
let backend = meta_peer_client.memory_backend();
for datanode in datanodes {
let lease_key = DatanodeLeaseKey {
- cluster_id,
node_id: datanode.id,
};
let lease_value = LeaseValue {
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index d0051db7c569..d1bb7c64d5c5 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -308,11 +308,10 @@ impl GreptimeDbClusterBuilder {
expected_datanodes: usize,
) {
for _ in 0..10 {
- let alive_datanodes =
- meta_srv::lease::alive_datanodes(1000, meta_peer_client, u64::MAX)
- .await
- .unwrap()
- .len();
+ let alive_datanodes = meta_srv::lease::alive_datanodes(meta_peer_client, u64::MAX)
+ .await
+ .unwrap()
+ .len();
if alive_datanodes == expected_datanodes {
return;
}
@@ -322,10 +321,9 @@ impl GreptimeDbClusterBuilder {
}
async fn create_datanode(&self, opts: DatanodeOptions, metasrv: MockInfo) -> Datanode {
- let mut meta_client =
- MetaClientBuilder::datanode_default_options(1000, opts.node_id.unwrap())
- .channel_manager(metasrv.channel_manager)
- .build();
+ let mut meta_client = MetaClientBuilder::datanode_default_options(opts.node_id.unwrap())
+ .channel_manager(metasrv.channel_manager)
+ .build();
meta_client.start(&[&metasrv.server_addr]).await.unwrap();
let meta_client = Arc::new(meta_client);
@@ -357,7 +355,7 @@ impl GreptimeDbClusterBuilder {
metasrv: MockInfo,
datanode_clients: Arc<NodeClients>,
) -> Arc<FeInstance> {
- let mut meta_client = MetaClientBuilder::frontend_default_options(1000)
+ let mut meta_client = MetaClientBuilder::frontend_default_options()
.channel_manager(metasrv.channel_manager)
.enable_access_cluster_info()
.build();
diff --git a/tests-integration/tests/region_migration.rs b/tests-integration/tests/region_migration.rs
index 3f72ee0ccad7..8f54e253f7ff 100644
--- a/tests-integration/tests/region_migration.rs
+++ b/tests-integration/tests/region_migration.rs
@@ -35,7 +35,7 @@ use futures::future::BoxFuture;
use meta_srv::error::Result as MetaResult;
use meta_srv::metasrv::SelectorContext;
use meta_srv::procedure::region_migration::RegionMigrationProcedureTask;
-use meta_srv::selector::{Namespace, Selector, SelectorOptions};
+use meta_srv::selector::{Selector, SelectorOptions};
use servers::query_handler::sql::SqlQueryHandler;
use session::context::{QueryContext, QueryContextRef};
use store_api::storage::RegionId;
@@ -169,7 +169,6 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
// Trigger region migration.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
- 0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -216,7 +215,6 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
// Triggers again.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
- 0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -473,7 +471,6 @@ pub async fn test_region_migration_by_sql(store_type: StorageType, endpoints: Ve
// Triggers again.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
- 0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -578,7 +575,6 @@ pub async fn test_region_migration_multiple_regions(
// Trigger region migration.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
- 0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -625,7 +621,6 @@ pub async fn test_region_migration_multiple_regions(
// Triggers again.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
- 0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -715,7 +710,6 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
// Trigger region migration.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
- 0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -763,7 +757,6 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
// Triggers again.
let procedure = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
- 0,
region_id,
peer_factory(from_peer_id),
peer_factory(to_peer_id),
@@ -842,7 +835,6 @@ pub async fn test_region_migration_incorrect_from_peer(
// Trigger region migration.
let err = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
- 0,
region_id,
peer_factory(5),
peer_factory(1),
@@ -925,7 +917,6 @@ pub async fn test_region_migration_incorrect_region_id(
// Trigger region migration.
let err = region_migration_manager
.submit_procedure(RegionMigrationProcedureTask::new(
- 0,
region_id,
peer_factory(2),
peer_factory(1),
@@ -957,7 +948,6 @@ impl Selector for ConstNodeSelector {
async fn select(
&self,
- _ns: Namespace,
_ctx: &Self::Context,
_opts: SelectorOptions,
) -> MetaResult<Self::Output> {
|
refactor
|
remove cluster id field (#5610)
|
d7f1150098828110c878fa623ee9f2e49161ba70
|
2024-05-20 09:29:05
|
tison
|
ci: fixup strings in check ci status (#3987)
| false
|
diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml
index 05ddf232c345..e3311f2902ea 100644
--- a/.github/workflows/nightly-build.yml
+++ b/.github/workflows/nightly-build.yml
@@ -305,7 +305,7 @@ jobs:
run: pnpm tsx bin/report-ci-failure.ts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.build-result == 'success' }}
+ CI_REPORT_STATUS: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
- name: Notify nightly build successful result
uses: slackapi/[email protected]
if: ${{ needs.release-images-to-dockerhub.outputs.nightly-build-result == 'success' }}
diff --git a/cyborg/bin/report-ci-failure.ts b/cyborg/bin/report-ci-failure.ts
index bc8e69b9eaa2..ece8ebb387b6 100644
--- a/cyborg/bin/report-ci-failure.ts
+++ b/cyborg/bin/report-ci-failure.ts
@@ -24,7 +24,7 @@ async function main() {
core.info(`CI_REPORT_STATUS=${process.env["CI_REPORT_STATUS"]}, resolved to ${success}`)
const client = obtainClient("GITHUB_TOKEN")
- const title = `Workflow run '${context.action}' failed`
+ const title = `Workflow run '${context.workflow}' failed`
const url = `${process.env["GITHUB_SERVER_URL"]}/${process.env["GITHUB_REPOSITORY"]}/actions/runs/${process.env["GITHUB_RUN_ID"]}`
const failure_comment = `@GreptimeTeam/db-approver\nNew failure: ${url} `
const success_comment = `@GreptimeTeam/db-approver\nBack to success: ${url}`
|
ci
|
fixup strings in check ci status (#3987)
|
d51b65a8bf37a7f48a20e7b1931c938f97ee0544
|
2024-12-24 10:40:30
|
Zhenchi
|
feat(index-cache): abstract `IndexCache` to be shared by multi types of indexes (#5219)
| false
|
diff --git a/src/index/src/inverted_index/format/reader.rs b/src/index/src/inverted_index/format/reader.rs
index 21e5487d1e42..24f21504d001 100644
--- a/src/index/src/inverted_index/format/reader.rs
+++ b/src/index/src/inverted_index/format/reader.rs
@@ -31,12 +31,21 @@ mod footer;
/// InvertedIndexReader defines an asynchronous reader of inverted index data
#[mockall::automock]
#[async_trait]
-pub trait InvertedIndexReader: Send {
+pub trait InvertedIndexReader: Send + Sync {
/// Seeks to given offset and reads data with exact size as provided.
async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>>;
/// Reads the bytes in the given ranges.
- async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>>;
+ async fn read_vec(&mut self, ranges: &[Range<u64>]) -> Result<Vec<Bytes>> {
+ let mut result = Vec::with_capacity(ranges.len());
+ for range in ranges {
+ let data = self
+ .range_read(range.start, (range.end - range.start) as u32)
+ .await?;
+ result.push(Bytes::from(data));
+ }
+ Ok(result)
+ }
/// Retrieves metadata of all inverted indices stored within the blob.
async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>>;
diff --git a/src/index/src/inverted_index/format/reader/blob.rs b/src/index/src/inverted_index/format/reader/blob.rs
index fcaa63773d93..73d98835794a 100644
--- a/src/index/src/inverted_index/format/reader/blob.rs
+++ b/src/index/src/inverted_index/format/reader/blob.rs
@@ -51,7 +51,7 @@ impl<R> InvertedIndexBlobReader<R> {
}
#[async_trait]
-impl<R: RangeReader> InvertedIndexReader for InvertedIndexBlobReader<R> {
+impl<R: RangeReader + Sync> InvertedIndexReader for InvertedIndexBlobReader<R> {
async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>> {
let buf = self
.source
diff --git a/src/mito2/src/cache.rs b/src/mito2/src/cache.rs
index 03cf9136245a..a1864c55179e 100644
--- a/src/mito2/src/cache.rs
+++ b/src/mito2/src/cache.rs
@@ -37,7 +37,7 @@ use store_api::storage::{ConcreteDataType, RegionId, TimeSeriesRowSelector};
use crate::cache::cache_size::parquet_meta_size;
use crate::cache::file_cache::{FileType, IndexKey};
-use crate::cache::index::{InvertedIndexCache, InvertedIndexCacheRef};
+use crate::cache::index::inverted_index::{InvertedIndexCache, InvertedIndexCacheRef};
use crate::cache::write_cache::WriteCacheRef;
use crate::metrics::{CACHE_BYTES, CACHE_EVICTION, CACHE_HIT, CACHE_MISS};
use crate::read::Batch;
diff --git a/src/mito2/src/cache/index.rs b/src/mito2/src/cache/index.rs
index de39ea3784b6..c8bd7a8f329b 100644
--- a/src/mito2/src/cache/index.rs
+++ b/src/mito2/src/cache/index.rs
@@ -12,168 +12,29 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub mod inverted_index;
+
+use std::future::Future;
+use std::hash::Hash;
use std::ops::Range;
use std::sync::Arc;
-use api::v1::index::InvertedIndexMetas;
-use async_trait::async_trait;
use bytes::Bytes;
-use common_base::BitVec;
-use index::inverted_index::error::DecodeFstSnafu;
-use index::inverted_index::format::reader::InvertedIndexReader;
-use index::inverted_index::FstMap;
use object_store::Buffer;
-use prost::Message;
-use snafu::ResultExt;
use crate::metrics::{CACHE_BYTES, CACHE_HIT, CACHE_MISS};
-use crate::sst::file::FileId;
/// Metrics for index metadata.
const INDEX_METADATA_TYPE: &str = "index_metadata";
/// Metrics for index content.
const INDEX_CONTENT_TYPE: &str = "index_content";
-/// Inverted index blob reader with cache.
-pub struct CachedInvertedIndexBlobReader<R> {
- file_id: FileId,
- file_size: u64,
- inner: R,
- cache: InvertedIndexCacheRef,
-}
-
-impl<R> CachedInvertedIndexBlobReader<R> {
- pub fn new(file_id: FileId, file_size: u64, inner: R, cache: InvertedIndexCacheRef) -> Self {
- Self {
- file_id,
- file_size,
- inner,
- cache,
- }
- }
-}
-
-impl<R> CachedInvertedIndexBlobReader<R>
-where
- R: InvertedIndexReader,
-{
- /// Gets given range of index data from cache, and loads from source if the file
- /// is not already cached.
- async fn get_or_load(
- &mut self,
- offset: u64,
- size: u32,
- ) -> index::inverted_index::error::Result<Vec<u8>> {
- let keys =
- IndexDataPageKey::generate_page_keys(self.file_id, offset, size, self.cache.page_size);
- // Size is 0, return empty data.
- if keys.is_empty() {
- return Ok(Vec::new());
- }
- let mut data = Vec::with_capacity(keys.len());
- data.resize(keys.len(), Bytes::new());
- let mut cache_miss_range = vec![];
- let mut cache_miss_idx = vec![];
- let last_index = keys.len() - 1;
- // TODO: Avoid copy as much as possible.
- for (i, index) in keys.iter().enumerate() {
- match self.cache.get_index(index) {
- Some(page) => {
- CACHE_HIT.with_label_values(&[INDEX_CONTENT_TYPE]).inc();
- data[i] = page;
- }
- None => {
- CACHE_MISS.with_label_values(&[INDEX_CONTENT_TYPE]).inc();
- let base_offset = index.page_id * self.cache.page_size;
- let pruned_size = if i == last_index {
- prune_size(&keys, self.file_size, self.cache.page_size)
- } else {
- self.cache.page_size
- };
- cache_miss_range.push(base_offset..base_offset + pruned_size);
- cache_miss_idx.push(i);
- }
- }
- }
- if !cache_miss_range.is_empty() {
- let pages = self.inner.read_vec(&cache_miss_range).await?;
- for (i, page) in cache_miss_idx.into_iter().zip(pages.into_iter()) {
- let key = keys[i].clone();
- data[i] = page.clone();
- self.cache.put_index(key, page.clone());
- }
- }
- let buffer = Buffer::from_iter(data.into_iter());
- Ok(buffer
- .slice(IndexDataPageKey::calculate_range(
- offset,
- size,
- self.cache.page_size,
- ))
- .to_vec())
- }
-}
-
-#[async_trait]
-impl<R: InvertedIndexReader> InvertedIndexReader for CachedInvertedIndexBlobReader<R> {
- async fn range_read(
- &mut self,
- offset: u64,
- size: u32,
- ) -> index::inverted_index::error::Result<Vec<u8>> {
- self.inner.range_read(offset, size).await
- }
-
- async fn read_vec(
- &mut self,
- ranges: &[Range<u64>],
- ) -> index::inverted_index::error::Result<Vec<Bytes>> {
- self.inner.read_vec(ranges).await
- }
-
- async fn metadata(&mut self) -> index::inverted_index::error::Result<Arc<InvertedIndexMetas>> {
- if let Some(cached) = self.cache.get_index_metadata(self.file_id) {
- CACHE_HIT.with_label_values(&[INDEX_METADATA_TYPE]).inc();
- Ok(cached)
- } else {
- let meta = self.inner.metadata().await?;
- self.cache.put_index_metadata(self.file_id, meta.clone());
- CACHE_MISS.with_label_values(&[INDEX_METADATA_TYPE]).inc();
- Ok(meta)
- }
- }
-
- async fn fst(
- &mut self,
- offset: u64,
- size: u32,
- ) -> index::inverted_index::error::Result<FstMap> {
- self.get_or_load(offset, size)
- .await
- .and_then(|r| FstMap::new(r).context(DecodeFstSnafu))
- }
-
- async fn bitmap(
- &mut self,
- offset: u64,
- size: u32,
- ) -> index::inverted_index::error::Result<BitVec> {
- self.get_or_load(offset, size).await.map(BitVec::from_vec)
- }
-}
-
-#[derive(Debug, Clone, PartialEq, Eq, Hash)]
-pub struct IndexMetadataKey {
- file_id: FileId,
-}
-
-#[derive(Debug, Clone, PartialEq, Eq, Hash)]
-pub struct IndexDataPageKey {
- file_id: FileId,
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub struct PageKey {
page_id: u64,
}
-impl IndexDataPageKey {
+impl PageKey {
/// Converts an offset to a page ID based on the page size.
fn calculate_page_id(offset: u64, page_size: u64) -> u64 {
offset / page_size
@@ -199,49 +60,60 @@ impl IndexDataPageKey {
start..end
}
- /// Generates a vector of IndexKey instances for the pages that a given offset and size span.
- fn generate_page_keys(file_id: FileId, offset: u64, size: u32, page_size: u64) -> Vec<Self> {
+ /// Generates a iterator of `IndexKey` for the pages that a given offset and size span.
+ fn generate_page_keys(offset: u64, size: u32, page_size: u64) -> impl Iterator<Item = Self> {
let start_page = Self::calculate_page_id(offset, page_size);
let total_pages = Self::calculate_page_count(offset, size, page_size);
- (0..total_pages)
- .map(|i| Self {
- file_id,
- page_id: start_page + i as u64,
- })
- .collect()
+ (0..total_pages).map(move |i| Self {
+ page_id: start_page + i as u64,
+ })
}
}
-pub type InvertedIndexCacheRef = Arc<InvertedIndexCache>;
-
-pub struct InvertedIndexCache {
- /// Cache for inverted index metadata
- index_metadata: moka::sync::Cache<IndexMetadataKey, Arc<InvertedIndexMetas>>,
- /// Cache for inverted index content.
- index: moka::sync::Cache<IndexDataPageKey, Bytes>,
+/// Cache for index metadata and content.
+pub struct IndexCache<K, M> {
+ /// Cache for index metadata
+ index_metadata: moka::sync::Cache<K, Arc<M>>,
+ /// Cache for index content.
+ index: moka::sync::Cache<(K, PageKey), Bytes>,
// Page size for index content.
page_size: u64,
+
+ /// Weighter for metadata.
+ weight_of_metadata: fn(&K, &Arc<M>) -> u32,
+ /// Weighter for content.
+ weight_of_content: fn(&(K, PageKey), &Bytes) -> u32,
}
-impl InvertedIndexCache {
- /// Creates `InvertedIndexCache` with provided `index_metadata_cap` and `index_content_cap`.
- pub fn new(index_metadata_cap: u64, index_content_cap: u64, page_size: u64) -> Self {
- common_telemetry::debug!("Building InvertedIndexCache with metadata size: {index_metadata_cap}, content size: {index_content_cap}");
+impl<K, M> IndexCache<K, M>
+where
+ K: Hash + Eq + Send + Sync + 'static,
+ M: Send + Sync + 'static,
+{
+ pub fn new_with_weighter(
+ index_metadata_cap: u64,
+ index_content_cap: u64,
+ page_size: u64,
+ index_type: &'static str,
+ weight_of_metadata: fn(&K, &Arc<M>) -> u32,
+ weight_of_content: fn(&(K, PageKey), &Bytes) -> u32,
+ ) -> Self {
+ common_telemetry::debug!("Building IndexCache with metadata size: {index_metadata_cap}, content size: {index_content_cap}, page size: {page_size}, index type: {index_type}");
let index_metadata = moka::sync::CacheBuilder::new(index_metadata_cap)
- .name("inverted_index_metadata")
- .weigher(index_metadata_weight)
- .eviction_listener(|k, v, _cause| {
- let size = index_metadata_weight(&k, &v);
+ .name(&format!("index_metadata_{}", index_type))
+ .weigher(weight_of_metadata)
+ .eviction_listener(move |k, v, _cause| {
+ let size = weight_of_metadata(&k, &v);
CACHE_BYTES
.with_label_values(&[INDEX_METADATA_TYPE])
.sub(size.into());
})
.build();
let index_cache = moka::sync::CacheBuilder::new(index_content_cap)
- .name("inverted_index_content")
- .weigher(index_content_weight)
- .eviction_listener(|k, v, _cause| {
- let size = index_content_weight(&k, &v);
+ .name(&format!("index_content_{}", index_type))
+ .weigher(weight_of_content)
+ .eviction_listener(move |k, v, _cause| {
+ let size = weight_of_content(&k, &v);
CACHE_BYTES
.with_label_values(&[INDEX_CONTENT_TYPE])
.sub(size.into());
@@ -251,259 +123,109 @@ impl InvertedIndexCache {
index_metadata,
index: index_cache,
page_size,
+ weight_of_content,
+ weight_of_metadata,
}
}
}
-impl InvertedIndexCache {
- pub fn get_index_metadata(&self, file_id: FileId) -> Option<Arc<InvertedIndexMetas>> {
- self.index_metadata.get(&IndexMetadataKey { file_id })
+impl<K, M> IndexCache<K, M>
+where
+ K: Hash + Eq + Clone + Copy + Send + Sync + 'static,
+ M: Send + Sync + 'static,
+{
+ pub fn get_metadata(&self, key: K) -> Option<Arc<M>> {
+ self.index_metadata.get(&key)
}
- pub fn put_index_metadata(&self, file_id: FileId, metadata: Arc<InvertedIndexMetas>) {
- let key = IndexMetadataKey { file_id };
+ pub fn put_metadata(&self, key: K, metadata: Arc<M>) {
CACHE_BYTES
.with_label_values(&[INDEX_METADATA_TYPE])
- .add(index_metadata_weight(&key, &metadata).into());
+ .add((self.weight_of_metadata)(&key, &metadata).into());
self.index_metadata.insert(key, metadata)
}
- pub fn get_index(&self, key: &IndexDataPageKey) -> Option<Bytes> {
- self.index.get(key)
+ /// Gets given range of index data from cache, and loads from source if the file
+ /// is not already cached.
+ async fn get_or_load<F, Fut, E>(
+ &self,
+ key: K,
+ file_size: u64,
+ offset: u64,
+ size: u32,
+ load: F,
+ ) -> Result<Vec<u8>, E>
+ where
+ F: FnOnce(Vec<Range<u64>>) -> Fut,
+ Fut: Future<Output = Result<Vec<Bytes>, E>>,
+ E: std::error::Error,
+ {
+ let page_keys =
+ PageKey::generate_page_keys(offset, size, self.page_size).collect::<Vec<_>>();
+ // Size is 0, return empty data.
+ if page_keys.is_empty() {
+ return Ok(Vec::new());
+ }
+ let mut data = Vec::with_capacity(page_keys.len());
+ data.resize(page_keys.len(), Bytes::new());
+ let mut cache_miss_range = vec![];
+ let mut cache_miss_idx = vec![];
+ let last_index = page_keys.len() - 1;
+ // TODO: Avoid copy as much as possible.
+ for (i, page_key) in page_keys.iter().enumerate() {
+ match self.get_page(key, *page_key) {
+ Some(page) => {
+ CACHE_HIT.with_label_values(&[INDEX_CONTENT_TYPE]).inc();
+ data[i] = page;
+ }
+ None => {
+ CACHE_MISS.with_label_values(&[INDEX_CONTENT_TYPE]).inc();
+ let base_offset = page_key.page_id * self.page_size;
+ let pruned_size = if i == last_index {
+ prune_size(page_keys.iter(), file_size, self.page_size)
+ } else {
+ self.page_size
+ };
+ cache_miss_range.push(base_offset..base_offset + pruned_size);
+ cache_miss_idx.push(i);
+ }
+ }
+ }
+ if !cache_miss_range.is_empty() {
+ let pages = load(cache_miss_range).await?;
+ for (i, page) in cache_miss_idx.into_iter().zip(pages.into_iter()) {
+ let page_key = page_keys[i];
+ data[i] = page.clone();
+ self.put_page(key, page_key, page.clone());
+ }
+ }
+ let buffer = Buffer::from_iter(data.into_iter());
+ Ok(buffer
+ .slice(PageKey::calculate_range(offset, size, self.page_size))
+ .to_vec())
+ }
+
+ fn get_page(&self, key: K, page_key: PageKey) -> Option<Bytes> {
+ self.index.get(&(key, page_key))
}
- pub fn put_index(&self, key: IndexDataPageKey, value: Bytes) {
+ fn put_page(&self, key: K, page_key: PageKey, value: Bytes) {
CACHE_BYTES
.with_label_values(&[INDEX_CONTENT_TYPE])
- .add(index_content_weight(&key, &value).into());
- self.index.insert(key, value);
+ .add((self.weight_of_content)(&(key, page_key), &value).into());
+ self.index.insert((key, page_key), value);
}
}
-/// Calculates weight for index metadata.
-fn index_metadata_weight(k: &IndexMetadataKey, v: &Arc<InvertedIndexMetas>) -> u32 {
- (k.file_id.as_bytes().len() + v.encoded_len()) as u32
-}
-
-/// Calculates weight for index content.
-fn index_content_weight(k: &IndexDataPageKey, v: &Bytes) -> u32 {
- (k.file_id.as_bytes().len() + v.len()) as u32
-}
-
/// Prunes the size of the last page based on the indexes.
/// We have following cases:
/// 1. The rest file size is less than the page size, read to the end of the file.
/// 2. Otherwise, read the page size.
-fn prune_size(indexes: &[IndexDataPageKey], file_size: u64, page_size: u64) -> u64 {
+fn prune_size<'a>(
+ indexes: impl Iterator<Item = &'a PageKey>,
+ file_size: u64,
+ page_size: u64,
+) -> u64 {
let last_page_start = indexes.last().map(|i| i.page_id * page_size).unwrap_or(0);
page_size.min(file_size - last_page_start)
}
-
-#[cfg(test)]
-mod test {
- use std::num::NonZeroUsize;
-
- use common_base::BitVec;
- use futures::stream;
- use index::inverted_index::format::reader::{InvertedIndexBlobReader, InvertedIndexReader};
- use index::inverted_index::format::writer::{InvertedIndexBlobWriter, InvertedIndexWriter};
- use index::inverted_index::Bytes;
- use prometheus::register_int_counter_vec;
- use rand::{Rng, RngCore};
-
- use super::*;
- use crate::sst::index::store::InstrumentedStore;
- use crate::test_util::TestEnv;
-
- // Repeat times for following little fuzz tests.
- const FUZZ_REPEAT_TIMES: usize = 100;
-
- // Fuzz test for index data page key
- #[test]
- fn fuzz_index_calculation() {
- // randomly generate a large u8 array
- let mut rng = rand::thread_rng();
- let mut data = vec![0u8; 1024 * 1024];
- rng.fill_bytes(&mut data);
- let file_id = FileId::random();
-
- for _ in 0..FUZZ_REPEAT_TIMES {
- let offset = rng.gen_range(0..data.len() as u64);
- let size = rng.gen_range(0..data.len() as u32 - offset as u32);
- let page_size: usize = rng.gen_range(1..1024);
-
- let indexes =
- IndexDataPageKey::generate_page_keys(file_id, offset, size, page_size as u64);
- let page_num = indexes.len();
- let mut read = Vec::with_capacity(size as usize);
- for key in indexes.into_iter() {
- let start = key.page_id as usize * page_size;
- let page = if start + page_size < data.len() {
- &data[start..start + page_size]
- } else {
- &data[start..]
- };
- read.extend_from_slice(page);
- }
- let expected_range = offset as usize..(offset + size as u64 as u64) as usize;
- let read =
- read[IndexDataPageKey::calculate_range(offset, size, page_size as u64)].to_vec();
- if read != data.get(expected_range).unwrap() {
- panic!(
- "fuzz_read_index failed, offset: {}, size: {}, page_size: {}\nread len: {}, expected len: {}\nrange: {:?}, page num: {}",
- offset, size, page_size, read.len(), size as usize,
- IndexDataPageKey::calculate_range(offset, size, page_size as u64),
- page_num
- );
- }
- }
- }
-
- fn unpack(fst_value: u64) -> [u32; 2] {
- bytemuck::cast::<u64, [u32; 2]>(fst_value)
- }
-
- async fn create_inverted_index_blob() -> Vec<u8> {
- let mut blob = Vec::new();
- let mut writer = InvertedIndexBlobWriter::new(&mut blob);
- writer
- .add_index(
- "tag0".to_string(),
- BitVec::from_slice(&[0b0000_0001, 0b0000_0000]),
- Box::new(stream::iter(vec![
- Ok((Bytes::from("a"), BitVec::from_slice(&[0b0000_0001]))),
- Ok((Bytes::from("b"), BitVec::from_slice(&[0b0010_0000]))),
- Ok((Bytes::from("c"), BitVec::from_slice(&[0b0000_0001]))),
- ])),
- )
- .await
- .unwrap();
- writer
- .add_index(
- "tag1".to_string(),
- BitVec::from_slice(&[0b0000_0001, 0b0000_0000]),
- Box::new(stream::iter(vec![
- Ok((Bytes::from("x"), BitVec::from_slice(&[0b0000_0001]))),
- Ok((Bytes::from("y"), BitVec::from_slice(&[0b0010_0000]))),
- Ok((Bytes::from("z"), BitVec::from_slice(&[0b0000_0001]))),
- ])),
- )
- .await
- .unwrap();
- writer
- .finish(8, NonZeroUsize::new(1).unwrap())
- .await
- .unwrap();
-
- blob
- }
-
- #[tokio::test]
- async fn test_inverted_index_cache() {
- let blob = create_inverted_index_blob().await;
-
- // Init a test range reader in local fs.
- let mut env = TestEnv::new();
- let file_size = blob.len() as u64;
- let store = env.init_object_store_manager();
- let temp_path = "data";
- store.write(temp_path, blob).await.unwrap();
- let store = InstrumentedStore::new(store);
- let metric =
- register_int_counter_vec!("test_bytes", "a counter for test", &["test"]).unwrap();
- let counter = metric.with_label_values(&["test"]);
- let range_reader = store
- .range_reader("data", &counter, &counter)
- .await
- .unwrap();
-
- let reader = InvertedIndexBlobReader::new(range_reader);
- let mut cached_reader = CachedInvertedIndexBlobReader::new(
- FileId::random(),
- file_size,
- reader,
- Arc::new(InvertedIndexCache::new(8192, 8192, 50)),
- );
- let metadata = cached_reader.metadata().await.unwrap();
- assert_eq!(metadata.total_row_count, 8);
- assert_eq!(metadata.segment_row_count, 1);
- assert_eq!(metadata.metas.len(), 2);
- // tag0
- let tag0 = metadata.metas.get("tag0").unwrap();
- let stats0 = tag0.stats.as_ref().unwrap();
- assert_eq!(stats0.distinct_count, 3);
- assert_eq!(stats0.null_count, 1);
- assert_eq!(stats0.min_value, Bytes::from("a"));
- assert_eq!(stats0.max_value, Bytes::from("c"));
- let fst0 = cached_reader
- .fst(
- tag0.base_offset + tag0.relative_fst_offset as u64,
- tag0.fst_size,
- )
- .await
- .unwrap();
- assert_eq!(fst0.len(), 3);
- let [offset, size] = unpack(fst0.get(b"a").unwrap());
- let bitmap = cached_reader
- .bitmap(tag0.base_offset + offset as u64, size)
- .await
- .unwrap();
- assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001]));
- let [offset, size] = unpack(fst0.get(b"b").unwrap());
- let bitmap = cached_reader
- .bitmap(tag0.base_offset + offset as u64, size)
- .await
- .unwrap();
- assert_eq!(bitmap, BitVec::from_slice(&[0b0010_0000]));
- let [offset, size] = unpack(fst0.get(b"c").unwrap());
- let bitmap = cached_reader
- .bitmap(tag0.base_offset + offset as u64, size)
- .await
- .unwrap();
- assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001]));
-
- // tag1
- let tag1 = metadata.metas.get("tag1").unwrap();
- let stats1 = tag1.stats.as_ref().unwrap();
- assert_eq!(stats1.distinct_count, 3);
- assert_eq!(stats1.null_count, 1);
- assert_eq!(stats1.min_value, Bytes::from("x"));
- assert_eq!(stats1.max_value, Bytes::from("z"));
- let fst1 = cached_reader
- .fst(
- tag1.base_offset + tag1.relative_fst_offset as u64,
- tag1.fst_size,
- )
- .await
- .unwrap();
- assert_eq!(fst1.len(), 3);
- let [offset, size] = unpack(fst1.get(b"x").unwrap());
- let bitmap = cached_reader
- .bitmap(tag1.base_offset + offset as u64, size)
- .await
- .unwrap();
- assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001]));
- let [offset, size] = unpack(fst1.get(b"y").unwrap());
- let bitmap = cached_reader
- .bitmap(tag1.base_offset + offset as u64, size)
- .await
- .unwrap();
- assert_eq!(bitmap, BitVec::from_slice(&[0b0010_0000]));
- let [offset, size] = unpack(fst1.get(b"z").unwrap());
- let bitmap = cached_reader
- .bitmap(tag1.base_offset + offset as u64, size)
- .await
- .unwrap();
- assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001]));
-
- // fuzz test
- let mut rng = rand::thread_rng();
- for _ in 0..FUZZ_REPEAT_TIMES {
- let offset = rng.gen_range(0..file_size);
- let size = rng.gen_range(0..file_size as u32 - offset as u32);
- let expected = cached_reader.range_read(offset, size).await.unwrap();
- let read = cached_reader.get_or_load(offset, size).await.unwrap();
- assert_eq!(read, expected);
- }
- }
-}
diff --git a/src/mito2/src/cache/index/inverted_index.rs b/src/mito2/src/cache/index/inverted_index.rs
new file mode 100644
index 000000000000..3c399a37cc82
--- /dev/null
+++ b/src/mito2/src/cache/index/inverted_index.rs
@@ -0,0 +1,322 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use api::v1::index::InvertedIndexMetas;
+use async_trait::async_trait;
+use bytes::Bytes;
+use index::inverted_index::error::Result;
+use index::inverted_index::format::reader::InvertedIndexReader;
+use prost::Message;
+
+use crate::cache::index::{IndexCache, PageKey, INDEX_METADATA_TYPE};
+use crate::metrics::{CACHE_HIT, CACHE_MISS};
+use crate::sst::file::FileId;
+
+const INDEX_TYPE_INVERTED_INDEX: &str = "inverted_index";
+
+/// Cache for inverted index.
+pub type InvertedIndexCache = IndexCache<FileId, InvertedIndexMetas>;
+pub type InvertedIndexCacheRef = Arc<InvertedIndexCache>;
+
+impl InvertedIndexCache {
+ /// Creates a new inverted index cache.
+ pub fn new(index_metadata_cap: u64, index_content_cap: u64, page_size: u64) -> Self {
+ Self::new_with_weighter(
+ index_metadata_cap,
+ index_content_cap,
+ page_size,
+ INDEX_TYPE_INVERTED_INDEX,
+ inverted_index_metadata_weight,
+ inverted_index_content_weight,
+ )
+ }
+}
+
+/// Calculates weight for inverted index metadata.
+fn inverted_index_metadata_weight(k: &FileId, v: &Arc<InvertedIndexMetas>) -> u32 {
+ (k.as_bytes().len() + v.encoded_len()) as u32
+}
+
+/// Calculates weight for inverted index content.
+fn inverted_index_content_weight((k, _): &(FileId, PageKey), v: &Bytes) -> u32 {
+ (k.as_bytes().len() + v.len()) as u32
+}
+
+/// Inverted index blob reader with cache.
+pub struct CachedInvertedIndexBlobReader<R> {
+ file_id: FileId,
+ file_size: u64,
+ inner: R,
+ cache: InvertedIndexCacheRef,
+}
+
+impl<R> CachedInvertedIndexBlobReader<R> {
+ /// Creates a new inverted index blob reader with cache.
+ pub fn new(file_id: FileId, file_size: u64, inner: R, cache: InvertedIndexCacheRef) -> Self {
+ Self {
+ file_id,
+ file_size,
+ inner,
+ cache,
+ }
+ }
+}
+
+#[async_trait]
+impl<R: InvertedIndexReader> InvertedIndexReader for CachedInvertedIndexBlobReader<R> {
+ async fn range_read(&mut self, offset: u64, size: u32) -> Result<Vec<u8>> {
+ let inner = &mut self.inner;
+ self.cache
+ .get_or_load(
+ self.file_id,
+ self.file_size,
+ offset,
+ size,
+ move |ranges| async move { inner.read_vec(&ranges).await },
+ )
+ .await
+ }
+
+ async fn metadata(&mut self) -> Result<Arc<InvertedIndexMetas>> {
+ if let Some(cached) = self.cache.get_metadata(self.file_id) {
+ CACHE_HIT.with_label_values(&[INDEX_METADATA_TYPE]).inc();
+ Ok(cached)
+ } else {
+ let meta = self.inner.metadata().await?;
+ self.cache.put_metadata(self.file_id, meta.clone());
+ CACHE_MISS.with_label_values(&[INDEX_METADATA_TYPE]).inc();
+ Ok(meta)
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use std::num::NonZeroUsize;
+
+ use common_base::BitVec;
+ use futures::stream;
+ use index::inverted_index::format::reader::{InvertedIndexBlobReader, InvertedIndexReader};
+ use index::inverted_index::format::writer::{InvertedIndexBlobWriter, InvertedIndexWriter};
+ use index::inverted_index::Bytes;
+ use prometheus::register_int_counter_vec;
+ use rand::{Rng, RngCore};
+
+ use super::*;
+ use crate::sst::index::store::InstrumentedStore;
+ use crate::test_util::TestEnv;
+
+ // Repeat times for following little fuzz tests.
+ const FUZZ_REPEAT_TIMES: usize = 100;
+
+ // Fuzz test for index data page key
+ #[test]
+ fn fuzz_index_calculation() {
+ // randomly generate a large u8 array
+ let mut rng = rand::thread_rng();
+ let mut data = vec![0u8; 1024 * 1024];
+ rng.fill_bytes(&mut data);
+
+ for _ in 0..FUZZ_REPEAT_TIMES {
+ let offset = rng.gen_range(0..data.len() as u64);
+ let size = rng.gen_range(0..data.len() as u32 - offset as u32);
+ let page_size: usize = rng.gen_range(1..1024);
+
+ let indexes =
+ PageKey::generate_page_keys(offset, size, page_size as u64).collect::<Vec<_>>();
+ let page_num = indexes.len();
+ let mut read = Vec::with_capacity(size as usize);
+ for key in indexes.into_iter() {
+ let start = key.page_id as usize * page_size;
+ let page = if start + page_size < data.len() {
+ &data[start..start + page_size]
+ } else {
+ &data[start..]
+ };
+ read.extend_from_slice(page);
+ }
+ let expected_range = offset as usize..(offset + size as u64 as u64) as usize;
+ let read = read[PageKey::calculate_range(offset, size, page_size as u64)].to_vec();
+ if read != data.get(expected_range).unwrap() {
+ panic!(
+ "fuzz_read_index failed, offset: {}, size: {}, page_size: {}\nread len: {}, expected len: {}\nrange: {:?}, page num: {}",
+ offset, size, page_size, read.len(), size as usize,
+ PageKey::calculate_range(offset, size, page_size as u64),
+ page_num
+ );
+ }
+ }
+ }
+
+ fn unpack(fst_value: u64) -> [u32; 2] {
+ bytemuck::cast::<u64, [u32; 2]>(fst_value)
+ }
+
+ async fn create_inverted_index_blob() -> Vec<u8> {
+ let mut blob = Vec::new();
+ let mut writer = InvertedIndexBlobWriter::new(&mut blob);
+ writer
+ .add_index(
+ "tag0".to_string(),
+ BitVec::from_slice(&[0b0000_0001, 0b0000_0000]),
+ Box::new(stream::iter(vec![
+ Ok((Bytes::from("a"), BitVec::from_slice(&[0b0000_0001]))),
+ Ok((Bytes::from("b"), BitVec::from_slice(&[0b0010_0000]))),
+ Ok((Bytes::from("c"), BitVec::from_slice(&[0b0000_0001]))),
+ ])),
+ )
+ .await
+ .unwrap();
+ writer
+ .add_index(
+ "tag1".to_string(),
+ BitVec::from_slice(&[0b0000_0001, 0b0000_0000]),
+ Box::new(stream::iter(vec![
+ Ok((Bytes::from("x"), BitVec::from_slice(&[0b0000_0001]))),
+ Ok((Bytes::from("y"), BitVec::from_slice(&[0b0010_0000]))),
+ Ok((Bytes::from("z"), BitVec::from_slice(&[0b0000_0001]))),
+ ])),
+ )
+ .await
+ .unwrap();
+ writer
+ .finish(8, NonZeroUsize::new(1).unwrap())
+ .await
+ .unwrap();
+
+ blob
+ }
+
+ #[tokio::test]
+ async fn test_inverted_index_cache() {
+ let blob = create_inverted_index_blob().await;
+
+ // Init a test range reader in local fs.
+ let mut env = TestEnv::new();
+ let file_size = blob.len() as u64;
+ let store = env.init_object_store_manager();
+ let temp_path = "data";
+ store.write(temp_path, blob).await.unwrap();
+ let store = InstrumentedStore::new(store);
+ let metric =
+ register_int_counter_vec!("test_bytes", "a counter for test", &["test"]).unwrap();
+ let counter = metric.with_label_values(&["test"]);
+ let range_reader = store
+ .range_reader("data", &counter, &counter)
+ .await
+ .unwrap();
+
+ let reader = InvertedIndexBlobReader::new(range_reader);
+ let mut cached_reader = CachedInvertedIndexBlobReader::new(
+ FileId::random(),
+ file_size,
+ reader,
+ Arc::new(InvertedIndexCache::new(8192, 8192, 50)),
+ );
+ let metadata = cached_reader.metadata().await.unwrap();
+ assert_eq!(metadata.total_row_count, 8);
+ assert_eq!(metadata.segment_row_count, 1);
+ assert_eq!(metadata.metas.len(), 2);
+ // tag0
+ let tag0 = metadata.metas.get("tag0").unwrap();
+ let stats0 = tag0.stats.as_ref().unwrap();
+ assert_eq!(stats0.distinct_count, 3);
+ assert_eq!(stats0.null_count, 1);
+ assert_eq!(stats0.min_value, Bytes::from("a"));
+ assert_eq!(stats0.max_value, Bytes::from("c"));
+ let fst0 = cached_reader
+ .fst(
+ tag0.base_offset + tag0.relative_fst_offset as u64,
+ tag0.fst_size,
+ )
+ .await
+ .unwrap();
+ assert_eq!(fst0.len(), 3);
+ let [offset, size] = unpack(fst0.get(b"a").unwrap());
+ let bitmap = cached_reader
+ .bitmap(tag0.base_offset + offset as u64, size)
+ .await
+ .unwrap();
+ assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001]));
+ let [offset, size] = unpack(fst0.get(b"b").unwrap());
+ let bitmap = cached_reader
+ .bitmap(tag0.base_offset + offset as u64, size)
+ .await
+ .unwrap();
+ assert_eq!(bitmap, BitVec::from_slice(&[0b0010_0000]));
+ let [offset, size] = unpack(fst0.get(b"c").unwrap());
+ let bitmap = cached_reader
+ .bitmap(tag0.base_offset + offset as u64, size)
+ .await
+ .unwrap();
+ assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001]));
+
+ // tag1
+ let tag1 = metadata.metas.get("tag1").unwrap();
+ let stats1 = tag1.stats.as_ref().unwrap();
+ assert_eq!(stats1.distinct_count, 3);
+ assert_eq!(stats1.null_count, 1);
+ assert_eq!(stats1.min_value, Bytes::from("x"));
+ assert_eq!(stats1.max_value, Bytes::from("z"));
+ let fst1 = cached_reader
+ .fst(
+ tag1.base_offset + tag1.relative_fst_offset as u64,
+ tag1.fst_size,
+ )
+ .await
+ .unwrap();
+ assert_eq!(fst1.len(), 3);
+ let [offset, size] = unpack(fst1.get(b"x").unwrap());
+ let bitmap = cached_reader
+ .bitmap(tag1.base_offset + offset as u64, size)
+ .await
+ .unwrap();
+ assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001]));
+ let [offset, size] = unpack(fst1.get(b"y").unwrap());
+ let bitmap = cached_reader
+ .bitmap(tag1.base_offset + offset as u64, size)
+ .await
+ .unwrap();
+ assert_eq!(bitmap, BitVec::from_slice(&[0b0010_0000]));
+ let [offset, size] = unpack(fst1.get(b"z").unwrap());
+ let bitmap = cached_reader
+ .bitmap(tag1.base_offset + offset as u64, size)
+ .await
+ .unwrap();
+ assert_eq!(bitmap, BitVec::from_slice(&[0b0000_0001]));
+
+ // fuzz test
+ let mut rng = rand::thread_rng();
+ for _ in 0..FUZZ_REPEAT_TIMES {
+ let offset = rng.gen_range(0..file_size);
+ let size = rng.gen_range(0..file_size as u32 - offset as u32);
+ let expected = cached_reader.range_read(offset, size).await.unwrap();
+ let inner = &mut cached_reader.inner;
+ let read = cached_reader
+ .cache
+ .get_or_load(
+ cached_reader.file_id,
+ file_size,
+ offset,
+ size,
+ |ranges| async move { inner.read_vec(&ranges).await },
+ )
+ .await
+ .unwrap();
+ assert_eq!(read, expected);
+ }
+ }
+}
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index 091b9bc48c14..2dfa22f9f1c9 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -399,7 +399,7 @@ impl ScanRegion {
});
}
- /// Use the latest schema to build the inveretd index applier.
+ /// Use the latest schema to build the inverted index applier.
fn build_invereted_index_applier(&self) -> Option<InvertedIndexApplierRef> {
if self.ignore_inverted_index {
return None;
diff --git a/src/mito2/src/sst/index/inverted_index/applier.rs b/src/mito2/src/sst/index/inverted_index/applier.rs
index 0542fd7a59ea..6ad116dae035 100644
--- a/src/mito2/src/sst/index/inverted_index/applier.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier.rs
@@ -29,7 +29,7 @@ use snafu::ResultExt;
use store_api::storage::RegionId;
use crate::cache::file_cache::{FileCacheRef, FileType, IndexKey};
-use crate::cache::index::{CachedInvertedIndexBlobReader, InvertedIndexCacheRef};
+use crate::cache::index::inverted_index::{CachedInvertedIndexBlobReader, InvertedIndexCacheRef};
use crate::error::{
ApplyInvertedIndexSnafu, MetadataSnafu, PuffinBuildReaderSnafu, PuffinReadBlobSnafu, Result,
};
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder.rs b/src/mito2/src/sst/index/inverted_index/applier/builder.rs
index 653679b9fca8..c2f90b293003 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder.rs
@@ -34,7 +34,7 @@ use store_api::metadata::RegionMetadata;
use store_api::storage::ColumnId;
use crate::cache::file_cache::FileCacheRef;
-use crate::cache::index::InvertedIndexCacheRef;
+use crate::cache::index::inverted_index::InvertedIndexCacheRef;
use crate::error::{BuildIndexApplierSnafu, ColumnNotFoundSnafu, ConvertValueSnafu, Result};
use crate::row_converter::SortField;
use crate::sst::index::inverted_index::applier::InvertedIndexApplier;
diff --git a/src/mito2/src/sst/index/inverted_index/creator.rs b/src/mito2/src/sst/index/inverted_index/creator.rs
index 15cba55c4437..0076322fccbd 100644
--- a/src/mito2/src/sst/index/inverted_index/creator.rs
+++ b/src/mito2/src/sst/index/inverted_index/creator.rs
@@ -316,7 +316,7 @@ mod tests {
use store_api::storage::RegionId;
use super::*;
- use crate::cache::index::InvertedIndexCache;
+ use crate::cache::index::inverted_index::InvertedIndexCache;
use crate::metrics::CACHE_BYTES;
use crate::read::BatchColumn;
use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs
index 1a0eacecf823..72e32c3f0ae5 100644
--- a/src/mito2/src/test_util/memtable_util.rs
+++ b/src/mito2/src/test_util/memtable_util.rs
@@ -14,7 +14,6 @@
//! Memtable test utilities.
-use std::collections::BTreeMap;
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
@@ -34,8 +33,8 @@ use crate::error::Result;
use crate::memtable::key_values::KeyValue;
use crate::memtable::partition_tree::data::{timestamp_array_to_i64_slice, DataBatch, DataBuffer};
use crate::memtable::{
- BoxedBatchIterator, BulkPart, KeyValues, Memtable, MemtableBuilder, MemtableId, MemtableRange,
- MemtableRanges, MemtableRef, MemtableStats,
+ BoxedBatchIterator, BulkPart, KeyValues, Memtable, MemtableBuilder, MemtableId, MemtableRanges,
+ MemtableRef, MemtableStats,
};
use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
|
feat
|
abstract `IndexCache` to be shared by multi types of indexes (#5219)
|
830367b8f426e8b4b0acb1f909d39b7111375d5a
|
2023-04-20 13:27:56
|
Yingwen
|
feat: Drop table by procedure (#1401)
| false
|
diff --git a/src/datanode/src/sql/alter.rs b/src/datanode/src/sql/alter.rs
index caae6535d7b6..6ec8669d4642 100644
--- a/src/datanode/src/sql/alter.rs
+++ b/src/datanode/src/sql/alter.rs
@@ -221,7 +221,7 @@ mod tests {
}
#[tokio::test(flavor = "multi_thread")]
- async fn alter_table_by_procedure() {
+ async fn test_alter_table_by_procedure() {
let instance = MockInstance::with_procedure_enabled("alter_table_by_procedure").await;
// Create table first.
diff --git a/src/datanode/src/sql/drop_table.rs b/src/datanode/src/sql/drop_table.rs
index 5ad684a2452c..0a24d4cb9a67 100644
--- a/src/datanode/src/sql/drop_table.rs
+++ b/src/datanode/src/sql/drop_table.rs
@@ -15,17 +15,23 @@
use catalog::error::TableNotExistSnafu;
use catalog::DeregisterTableRequest;
use common_error::prelude::BoxedError;
+use common_procedure::{watcher, ProcedureManagerRef, ProcedureWithId};
use common_query::Output;
use common_telemetry::info;
use snafu::{OptionExt, ResultExt};
use table::engine::{EngineContext, TableReference};
use table::requests::DropTableRequest;
+use table_procedure::DropTableProcedure;
use crate::error::{self, Result};
use crate::sql::SqlHandler;
impl SqlHandler {
pub async fn drop_table(&self, req: DropTableRequest) -> Result<Output> {
+ if let Some(procedure_manager) = &self.procedure_manager {
+ return self.drop_table_by_procedure(procedure_manager, req).await;
+ }
+
let deregister_table_req = DeregisterTableRequest {
catalog: req.catalog_name.clone(),
schema: req.schema_name.clone(),
@@ -76,4 +82,80 @@ impl SqlHandler {
Ok(Output::AffectedRows(1))
}
+
+ pub(crate) async fn drop_table_by_procedure(
+ &self,
+ procedure_manager: &ProcedureManagerRef,
+ req: DropTableRequest,
+ ) -> Result<Output> {
+ let table_name = req.table_name.clone();
+ let procedure = DropTableProcedure::new(
+ req,
+ self.catalog_manager.clone(),
+ self.engine_procedure.clone(),
+ );
+
+ let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
+ let procedure_id = procedure_with_id.id;
+
+ info!("Drop table {} by procedure {}", table_name, procedure_id);
+
+ let mut watcher = procedure_manager
+ .submit(procedure_with_id)
+ .await
+ .context(error::SubmitProcedureSnafu { procedure_id })?;
+
+ watcher::wait(&mut watcher)
+ .await
+ .context(error::WaitProcedureSnafu { procedure_id })?;
+
+ Ok(Output::AffectedRows(1))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use query::parser::{QueryLanguageParser, QueryStatement};
+ use query::query_engine::SqlStatementExecutor;
+ use session::context::QueryContext;
+
+ use super::*;
+ use crate::tests::test_util::MockInstance;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_drop_table_by_procedure() {
+ let instance = MockInstance::with_procedure_enabled("alter_table_by_procedure").await;
+
+ // Create table first.
+ let sql = r#"create table test_drop(
+ host string,
+ ts timestamp,
+ cpu double default 0,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+ ) engine=mito with(regions=1);"#;
+ let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
+ QueryStatement::Sql(sql) => sql,
+ _ => unreachable!(),
+ };
+ let output = instance
+ .inner()
+ .execute_sql(stmt, QueryContext::arc())
+ .await
+ .unwrap();
+ assert!(matches!(output, Output::AffectedRows(0)));
+
+ // Drop table.
+ let sql = r#"drop table test_drop"#;
+ let stmt = match QueryLanguageParser::parse_sql(sql).unwrap() {
+ QueryStatement::Sql(sql) => sql,
+ _ => unreachable!(),
+ };
+ let output = instance
+ .inner()
+ .execute_sql(stmt, QueryContext::arc())
+ .await
+ .unwrap();
+ assert!(matches!(output, Output::AffectedRows(1)));
+ }
}
diff --git a/src/table-procedure/src/alter.rs b/src/table-procedure/src/alter.rs
index 81cf202edd7d..f6b772a357bb 100644
--- a/src/table-procedure/src/alter.rs
+++ b/src/table-procedure/src/alter.rs
@@ -32,6 +32,7 @@ use crate::error::{
SerializeProcedureSnafu, SubprocedureFailedSnafu, TableExistsSnafu, TableNotFoundSnafu,
};
+/// Procedure to alter a table.
pub struct AlterTableProcedure {
data: AlterTableData,
catalog_manager: CatalogManagerRef,
@@ -88,8 +89,8 @@ impl AlterTableProcedure {
data: AlterTableData {
state: AlterTableState::Prepare,
request,
- subprocedure_id: None,
table_id: None,
+ subprocedure_id: None,
},
catalog_manager,
engine_procedure,
@@ -137,13 +138,13 @@ impl AlterTableProcedure {
.catalog_manager
.catalog(&self.data.request.catalog_name)
.context(AccessCatalogSnafu)?
- .with_context(|| CatalogNotFoundSnafu {
+ .context(CatalogNotFoundSnafu {
name: &self.data.request.catalog_name,
})?;
let schema = catalog
.schema(&self.data.request.schema_name)
.context(AccessCatalogSnafu)?
- .with_context(|| SchemaNotFoundSnafu {
+ .context(SchemaNotFoundSnafu {
name: &self.data.request.schema_name,
})?;
@@ -276,7 +277,7 @@ struct AlterTableData {
///
/// Available after [AlterTableState::Prepare] state.
table_id: Option<TableId>,
- /// Id of the subprocedure to create this table in the engine.
+ /// Id of the subprocedure to alter this table in the engine.
///
/// This id is `Some` while the procedure is in [AlterTableState::EngineAlterTable]
/// state.
diff --git a/src/table-procedure/src/drop.rs b/src/table-procedure/src/drop.rs
new file mode 100644
index 000000000000..252a136a427d
--- /dev/null
+++ b/src/table-procedure/src/drop.rs
@@ -0,0 +1,309 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Procedure to drop a table.
+
+use async_trait::async_trait;
+use catalog::{CatalogManagerRef, DeregisterTableRequest};
+use common_procedure::{
+ Context, Error, LockKey, Procedure, ProcedureId, ProcedureManager, ProcedureState,
+ ProcedureWithId, Result, Status,
+};
+use common_telemetry::logging;
+use serde::{Deserialize, Serialize};
+use snafu::{OptionExt, ResultExt};
+use table::engine::{EngineContext, TableEngineProcedureRef, TableReference};
+use table::requests::DropTableRequest;
+
+use crate::error::{
+ AccessCatalogSnafu, DeserializeProcedureSnafu, SerializeProcedureSnafu,
+ SubprocedureFailedSnafu, TableNotFoundSnafu,
+};
+
+/// Procedure to drop a table.
+pub struct DropTableProcedure {
+ data: DropTableData,
+ catalog_manager: CatalogManagerRef,
+ engine_procedure: TableEngineProcedureRef,
+}
+
+#[async_trait]
+impl Procedure for DropTableProcedure {
+ fn type_name(&self) -> &str {
+ Self::TYPE_NAME
+ }
+
+ async fn execute(&mut self, ctx: &Context) -> Result<Status> {
+ match self.data.state {
+ DropTableState::Prepare => self.on_prepare().await,
+ DropTableState::RemoveFromCatalog => self.on_remove_from_catalog().await,
+ DropTableState::EngineDropTable => self.on_engine_drop_table(ctx).await,
+ }
+ }
+
+ fn dump(&self) -> Result<String> {
+ let json = serde_json::to_string(&self.data).context(SerializeProcedureSnafu)?;
+ Ok(json)
+ }
+
+ fn lock_key(&self) -> LockKey {
+ // We lock the whole table.
+ let table_name = self.data.table_ref().to_string();
+ LockKey::single(table_name)
+ }
+}
+
+impl DropTableProcedure {
+ const TYPE_NAME: &str = "table-procedure::DropTableProcedure";
+
+ /// Returns a new [DropTableProcedure].
+ pub fn new(
+ request: DropTableRequest,
+ catalog_manager: CatalogManagerRef,
+ engine_procedure: TableEngineProcedureRef,
+ ) -> DropTableProcedure {
+ DropTableProcedure {
+ data: DropTableData {
+ state: DropTableState::Prepare,
+ request,
+ subprocedure_id: None,
+ },
+ catalog_manager,
+ engine_procedure,
+ }
+ }
+
+ /// Register the loader of this procedure to the `procedure_manager`.
+ ///
+ /// # Panics
+ /// Panics on error.
+ pub fn register_loader(
+ catalog_manager: CatalogManagerRef,
+ engine_procedure: TableEngineProcedureRef,
+ procedure_manager: &dyn ProcedureManager,
+ ) {
+ procedure_manager
+ .register_loader(
+ Self::TYPE_NAME,
+ Box::new(move |data| {
+ Self::from_json(data, catalog_manager.clone(), engine_procedure.clone())
+ .map(|p| Box::new(p) as _)
+ }),
+ )
+ .unwrap()
+ }
+
+ /// Recover the procedure from json.
+ fn from_json(
+ json: &str,
+ catalog_manager: CatalogManagerRef,
+ engine_procedure: TableEngineProcedureRef,
+ ) -> Result<Self> {
+ let data: DropTableData = serde_json::from_str(json).context(DeserializeProcedureSnafu)?;
+
+ Ok(DropTableProcedure {
+ data,
+ catalog_manager,
+ engine_procedure,
+ })
+ }
+
+ async fn on_prepare(&mut self) -> Result<Status> {
+ let request = &self.data.request;
+ // Ensure the table exists.
+ self.catalog_manager
+ .table(
+ &request.catalog_name,
+ &request.schema_name,
+ &request.table_name,
+ )
+ .await
+ .context(AccessCatalogSnafu)?
+ .context(TableNotFoundSnafu {
+ name: &request.table_name,
+ })?;
+
+ self.data.state = DropTableState::RemoveFromCatalog;
+
+ Ok(Status::executing(true))
+ }
+
+ async fn on_remove_from_catalog(&mut self) -> Result<Status> {
+ let request = &self.data.request;
+ let has_table = self
+ .catalog_manager
+ .table(
+ &request.catalog_name,
+ &request.schema_name,
+ &request.table_name,
+ )
+ .await
+ .context(AccessCatalogSnafu)?
+ .is_some();
+ if has_table {
+ // The table is still in the catalog.
+ let deregister_table_req = DeregisterTableRequest {
+ catalog: self.data.request.catalog_name.clone(),
+ schema: self.data.request.schema_name.clone(),
+ table_name: self.data.request.table_name.clone(),
+ };
+ self.catalog_manager
+ .deregister_table(deregister_table_req)
+ .await
+ .context(AccessCatalogSnafu)?;
+ }
+
+ self.data.state = DropTableState::EngineDropTable;
+ // Assign procedure id to the subprocedure.
+ self.data.subprocedure_id = Some(ProcedureId::random());
+
+ Ok(Status::executing(true))
+ }
+
+ async fn on_engine_drop_table(&mut self, ctx: &Context) -> Result<Status> {
+ // Safety: subprocedure id is always set in this state.
+ let sub_id = self.data.subprocedure_id.unwrap();
+
+ // Query subprocedure state.
+ let Some(sub_state) = ctx.provider.procedure_state(sub_id).await? else {
+ logging::info!(
+ "On engine drop table {}, subprocedure not found, sub_id: {}",
+ self.data.request.table_name,
+ sub_id
+ );
+
+ // If the subprocedure is not found, we create a new subprocedure with the same id.
+ let engine_ctx = EngineContext::default();
+ let procedure = self
+ .engine_procedure
+ .drop_table_procedure(&engine_ctx, self.data.request.clone())
+ .map_err(Error::from_error_ext)?;
+ return Ok(Status::Suspended {
+ subprocedures: vec![ProcedureWithId {
+ id: sub_id,
+ procedure,
+ }],
+ persist: true,
+ });
+ };
+
+ match sub_state {
+ ProcedureState::Running | ProcedureState::Retrying { .. } => Ok(Status::Suspended {
+ subprocedures: Vec::new(),
+ persist: false,
+ }),
+ ProcedureState::Done => {
+ logging::info!(
+ "On engine drop table {}, done, sub_id: {}",
+ self.data.request.table_name,
+ sub_id
+ );
+
+ Ok(Status::Done)
+ }
+ ProcedureState::Failed { .. } => {
+ // Return error if the subprocedure is failed.
+ SubprocedureFailedSnafu {
+ subprocedure_id: sub_id,
+ }
+ .fail()?
+ }
+ }
+ }
+}
+
+/// Represents each step while dropping a table in the datanode.
+#[derive(Debug, Serialize, Deserialize)]
+enum DropTableState {
+ /// Validate request and prepare to drop table.
+ Prepare,
+ /// Remove the table from the catalog.
+ RemoveFromCatalog,
+ /// Drop table in the table engine.
+ EngineDropTable,
+}
+
+/// Serializable data of [DropTableProcedure].
+#[derive(Debug, Serialize, Deserialize)]
+struct DropTableData {
+ /// Current state.
+ state: DropTableState,
+ /// Request to drop this table.
+ request: DropTableRequest,
+ /// Id of the subprocedure to drop this table from the engine.
+ ///
+ /// This id is `Some` while the procedure is in [DropTableState::EngineDropTable]
+ /// state.
+ subprocedure_id: Option<ProcedureId>,
+}
+
+impl DropTableData {
+ fn table_ref(&self) -> TableReference {
+ TableReference {
+ catalog: &self.request.catalog_name,
+ schema: &self.request.schema_name,
+ table: &self.request.table_name,
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+ use table::engine::TableEngine;
+
+ use super::*;
+ use crate::test_util::TestEnv;
+
+ #[tokio::test]
+ async fn test_drop_table_procedure() {
+ let env = TestEnv::new("drop");
+ let table_name = "test_drop";
+ env.create_table(table_name).await;
+
+ let request = DropTableRequest {
+ catalog_name: DEFAULT_CATALOG_NAME.to_string(),
+ schema_name: DEFAULT_SCHEMA_NAME.to_string(),
+ table_name: table_name.to_string(),
+ };
+ let TestEnv {
+ dir: _dir,
+ table_engine,
+ procedure_manager,
+ catalog_manager,
+ } = env;
+ let procedure =
+ DropTableProcedure::new(request, catalog_manager.clone(), table_engine.clone());
+ let procedure_with_id = ProcedureWithId::with_random_id(Box::new(procedure));
+
+ let mut watcher = procedure_manager.submit(procedure_with_id).await.unwrap();
+ watcher.changed().await.unwrap();
+
+ let catalog = catalog_manager
+ .catalog(DEFAULT_CATALOG_NAME)
+ .unwrap()
+ .unwrap();
+ let schema = catalog.schema(DEFAULT_SCHEMA_NAME).unwrap().unwrap();
+ assert!(schema.table(table_name).await.unwrap().is_none());
+ let ctx = EngineContext::default();
+ assert!(!table_engine.table_exists(
+ &ctx,
+ &TableReference {
+ catalog: DEFAULT_CATALOG_NAME,
+ schema: DEFAULT_SCHEMA_NAME,
+ table: table_name,
+ }
+ ));
+ }
+}
diff --git a/src/table-procedure/src/lib.rs b/src/table-procedure/src/lib.rs
index 7c1b7eaeacf9..ae6713b0b00f 100644
--- a/src/table-procedure/src/lib.rs
+++ b/src/table-procedure/src/lib.rs
@@ -16,6 +16,7 @@
mod alter;
mod create;
+mod drop;
pub mod error;
#[cfg(test)]
mod test_util;
@@ -24,6 +25,7 @@ pub use alter::AlterTableProcedure;
use catalog::CatalogManagerRef;
use common_procedure::ProcedureManager;
pub use create::CreateTableProcedure;
+pub use drop::DropTableProcedure;
use table::engine::{TableEngineProcedureRef, TableEngineRef};
/// Register all procedure loaders to the procedure manager.
@@ -42,5 +44,10 @@ pub fn register_procedure_loaders(
table_engine,
procedure_manager,
);
- AlterTableProcedure::register_loader(catalog_manager, engine_procedure, procedure_manager);
+ AlterTableProcedure::register_loader(
+ catalog_manager.clone(),
+ engine_procedure.clone(),
+ procedure_manager,
+ );
+ DropTableProcedure::register_loader(catalog_manager, engine_procedure, procedure_manager);
}
|
feat
|
Drop table by procedure (#1401)
|
8e7e68708f97bd698fb4e5eb867489c7bf2f2751
|
2023-03-01 14:29:11
|
Ning Sun
|
docs: correct readme format (#1105)
| false
|
diff --git a/.github/pr-title-breaking-change-lable-config.json b/.github/pr-title-breaking-change-label-config.json
similarity index 100%
rename from .github/pr-title-breaking-change-lable-config.json
rename to .github/pr-title-breaking-change-label-config.json
diff --git a/README.md b/README.md
index d258339d225c..16cc9ead4f84 100644
--- a/README.md
+++ b/README.md
@@ -61,11 +61,12 @@ To compile GreptimeDB from source, you'll need:
find an installation instructions [here](https://grpc.io/docs/protoc-installation/).
**Note that `protoc` version needs to be >= 3.15** because we have used the `optional`
keyword. You can check it with `protoc --version`.
-- python3-dev or python3-devel(Optional, only needed if you want to run scripts in cpython): this install a Python shared library required for running python scripting engine(In CPython Mode).
- This is available as `python3-dev` on ubuntu, you can install it with `sudo apt install python3-dev`, or `python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE), Mac 's Python3 package should have this shared library by default.
-Then, you can build GreptimeDB from source code:
-
-```
+- python3-dev or python3-devel(Optional, only needed if you want to run scripts
+ in cpython): this install a Python shared library required for running python
+ scripting engine(In CPython Mode). This is available as `python3-dev` on
+ ubuntu, you can install it with `sudo apt install python3-dev`, or
+ `python3-devel` on RPM based distributions (e.g. Fedora, Red Hat, SuSE). Mac's
+ `Python3` package should have this shared library by default.
#### Build with Docker
diff --git a/src/script/src/python/ffi_types/vector/tests.rs b/src/script/src/python/ffi_types/vector/tests.rs
index 66007b5b53fb..e43380e04ffe 100644
--- a/src/script/src/python/ffi_types/vector/tests.rs
+++ b/src/script/src/python/ffi_types/vector/tests.rs
@@ -57,8 +57,8 @@ fn test_eval_py_vector_in_pairs() {
fn sample_py_vector() -> HashMap<String, VectorRef> {
let b1 = Arc::new(BooleanVector::from_slice(&[false, false, true, true])) as VectorRef;
let b2 = Arc::new(BooleanVector::from_slice(&[false, true, false, true])) as VectorRef;
- let f1 = Arc::new(Float64Vector::from_slice(&[0.0f64, 2.0, 10.0, 42.0])) as VectorRef;
- let f2 = Arc::new(Float64Vector::from_slice(&[-0.1f64, -42.0, 2., 7.0])) as VectorRef;
+ let f1 = Arc::new(Float64Vector::from_slice([0.0f64, 2.0, 10.0, 42.0])) as VectorRef;
+ let f2 = Arc::new(Float64Vector::from_slice([-0.1f64, -42.0, 2., 7.0])) as VectorRef;
HashMap::from([
("b1".to_owned(), b1),
("b2".to_owned(), b2),
@@ -85,24 +85,20 @@ fn get_test_cases() -> Vec<TestCase> {
},
TestCase {
eval: "f1+f2".to_string(),
- result: Arc::new(Float64Vector::from_slice(&[-0.1f64, -40.0, 12., 49.0])) as VectorRef,
+ result: Arc::new(Float64Vector::from_slice([-0.1f64, -40.0, 12., 49.0])) as VectorRef,
},
TestCase {
eval: "f1-f2".to_string(),
- result: Arc::new(Float64Vector::from_slice(&[0.1f64, 44.0, 8., 35.0])) as VectorRef,
+ result: Arc::new(Float64Vector::from_slice([0.1f64, 44.0, 8., 35.0])) as VectorRef,
},
TestCase {
eval: "f1*f2".to_string(),
- result: Arc::new(Float64Vector::from_slice(&[
- -0.0f64,
- -84.0,
- 20.,
- 42.0 * 7.0,
- ])) as VectorRef,
+ result: Arc::new(Float64Vector::from_slice([-0.0f64, -84.0, 20., 42.0 * 7.0]))
+ as VectorRef,
},
TestCase {
eval: "f1/f2".to_string(),
- result: Arc::new(Float64Vector::from_slice(&[
+ result: Arc::new(Float64Vector::from_slice([
0.0 / -0.1f64,
2. / -42.,
10. / 2.,
|
docs
|
correct readme format (#1105)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.