hash
stringlengths 40
40
| date
stringdate 2022-04-19 15:26:27
2025-03-21 10:49:23
| author
stringclasses 86
values | commit_message
stringlengths 12
115
| is_merge
bool 1
class | git_diff
stringlengths 214
553k
⌀ | type
stringclasses 15
values | masked_commit_message
stringlengths 8
110
|
|---|---|---|---|---|---|---|---|
d52d1eb12217f01e8a46196be1bba3edf491a7ca
|
2022-09-09 15:18:59
|
evenyag
|
fix: Only convert LogicalTypeId to ConcreteDataType in tests (#241)
| false
|
diff --git a/src/common/function/src/scalars/aggregate/diff.rs b/src/common/function/src/scalars/aggregate/diff.rs
index de23c3945717..e414434a9ccf 100644
--- a/src/common/function/src/scalars/aggregate/diff.rs
+++ b/src/common/function/src/scalars/aggregate/diff.rs
@@ -168,7 +168,7 @@ impl AggregateFunctionCreator for DiffAccumulatorCreator {
with_match_primitive_type_id!(
input_types[0].logical_type_id(),
|$S| {
- Ok(ConcreteDataType::list_datatype(PrimitiveType::<<$S as Primitive>::LargestType>::default().logical_type_id().data_type()))
+ Ok(ConcreteDataType::list_datatype(PrimitiveType::<<$S as Primitive>::LargestType>::default().into()))
},
{
unreachable!()
@@ -182,7 +182,7 @@ impl AggregateFunctionCreator for DiffAccumulatorCreator {
with_match_primitive_type_id!(
input_types[0].logical_type_id(),
|$S| {
- Ok(vec![ConcreteDataType::list_datatype(PrimitiveType::<$S>::default().logical_type_id().data_type())])
+ Ok(vec![ConcreteDataType::list_datatype(PrimitiveType::<$S>::default().into())])
},
{
unreachable!()
diff --git a/src/common/function/src/scalars/aggregate/polyval.rs b/src/common/function/src/scalars/aggregate/polyval.rs
index 6f0eef499238..d7b37ecbb4a9 100644
--- a/src/common/function/src/scalars/aggregate/polyval.rs
+++ b/src/common/function/src/scalars/aggregate/polyval.rs
@@ -237,7 +237,7 @@ impl AggregateFunctionCreator for PolyvalAccumulatorCreator {
with_match_primitive_type_id!(
input_type,
|$S| {
- Ok(PrimitiveType::<<$S as Primitive>::LargestType>::default().logical_type_id().data_type())
+ Ok(PrimitiveType::<<$S as Primitive>::LargestType>::default().into())
},
{
unreachable!()
diff --git a/src/datatypes/Cargo.toml b/src/datatypes/Cargo.toml
index 39ecbf682b11..a022b1c6c5cd 100644
--- a/src/datatypes/Cargo.toml
+++ b/src/datatypes/Cargo.toml
@@ -3,6 +3,10 @@ name = "datatypes"
version = "0.1.0"
edition = "2021"
+[features]
+default = []
+test = []
+
[dependencies.arrow]
package = "arrow2"
version = "0.10"
diff --git a/src/datatypes/src/type_id.rs b/src/datatypes/src/type_id.rs
index 5bd4e17463ac..28a81d13f3ec 100644
--- a/src/datatypes/src/type_id.rs
+++ b/src/datatypes/src/type_id.rs
@@ -36,8 +36,12 @@ pub enum LogicalTypeId {
}
impl LogicalTypeId {
+ /// Create ConcreteDataType based on this id. This method is for test only as it
+ /// would lost some info.
+ ///
/// # Panics
/// Panics if data type is not supported.
+ #[cfg(any(test, feature = "test"))]
pub fn data_type(&self) -> ConcreteDataType {
match self {
LogicalTypeId::Null => ConcreteDataType::null_datatype(),
diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml
index e61552e195f2..c80e6abef2b7 100644
--- a/src/storage/Cargo.toml
+++ b/src/storage/Cargo.toml
@@ -37,6 +37,7 @@ uuid = { version = "1.1" , features=["v4"]}
[dev-dependencies]
atomic_float="0.1"
criterion = "0.3"
+datatypes = { path = "../datatypes", features = ["test"] }
rand = "0.8"
tempdir = "0.3"
diff --git a/src/table-engine/Cargo.toml b/src/table-engine/Cargo.toml
index 5469e76ce8d4..635a1151bc18 100644
--- a/src/table-engine/Cargo.toml
+++ b/src/table-engine/Cargo.toml
@@ -32,6 +32,5 @@ tempdir = { version = "0.3", optional = true }
tokio = { version = "1.0", features = ["full"] }
[dev-dependencies]
-datatypes = { path = "../datatypes" }
tempdir = { version = "0.3" }
tokio = { version = "1.18", features = ["full"] }
|
fix
|
Only convert LogicalTypeId to ConcreteDataType in tests (#241)
|
b91c77b86237295e086fe2ebd5edc6f28332f685
|
2023-01-12 07:50:18
|
shuiyisong
|
chore: add path check to http auth (#866)
| false
|
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 2dd64d760a85..42dc29eb1a8b 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -93,6 +93,7 @@ pub(crate) fn query_context_from_db(
}
const HTTP_API_VERSION: &str = "v1";
+const HTTP_API_PREFIX: &str = "/v1/";
pub struct HttpServer {
sql_handler: SqlQueryHandlerRef,
diff --git a/src/servers/src/http/authorize.rs b/src/servers/src/http/authorize.rs
index 08353d280770..05acc538f380 100644
--- a/src/servers/src/http/authorize.rs
+++ b/src/servers/src/http/authorize.rs
@@ -25,6 +25,7 @@ use tower_http::auth::AsyncAuthorizeRequest;
use crate::auth::{Identity, UserProviderRef};
use crate::error::{self, Result};
+use crate::http::HTTP_API_PREFIX;
pub struct HttpAuth<RespBody> {
user_provider: Option<UserProviderRef>,
@@ -61,7 +62,8 @@ where
fn authorize(&mut self, mut request: Request<B>) -> Self::Future {
let user_provider = self.user_provider.clone();
Box::pin(async move {
- let user_provider = if let Some(user_provider) = &user_provider {
+ let need_auth = request.uri().path().starts_with(HTTP_API_PREFIX);
+ let user_provider = if let Some(user_provider) = user_provider.filter(|_| need_auth) {
user_provider
} else {
request.extensions_mut().insert(UserInfo::default());
@@ -192,7 +194,7 @@ mod tests {
};
// base64encode("username:password") == "dXNlcm5hbWU6cGFzc3dvcmQ="
- let req = mock_http_request("Basic dXNlcm5hbWU6cGFzc3dvcmQ=").unwrap();
+ let req = mock_http_request(Some("Basic dXNlcm5hbWU6cGFzc3dvcmQ="), None).unwrap();
let auth_res = http_auth.authorize(req).await.unwrap();
let user_info: &UserInfo = auth_res.extensions().get().unwrap();
let default = UserInfo::default();
@@ -206,22 +208,43 @@ mod tests {
};
// base64encode("greptime:greptime") == "Z3JlcHRpbWU6Z3JlcHRpbWU="
- let req = mock_http_request("Basic Z3JlcHRpbWU6Z3JlcHRpbWU=").unwrap();
+ let req = mock_http_request(Some("Basic Z3JlcHRpbWU6Z3JlcHRpbWU="), None).unwrap();
let req = http_auth.authorize(req).await.unwrap();
let user_info: &UserInfo = req.extensions().get().unwrap();
let default = UserInfo::default();
assert_eq!(default.username(), user_info.username());
- let req = mock_http_request_no_auth().unwrap();
+ let req = mock_http_request(None, None).unwrap();
let auth_res = http_auth.authorize(req).await;
assert!(auth_res.is_err());
// base64encode("username:password") == "dXNlcm5hbWU6cGFzc3dvcmQ="
- let wrong_req = mock_http_request("Basic dXNlcm5hbWU6cGFzc3dvcmQ=").unwrap();
+ let wrong_req = mock_http_request(Some("Basic dXNlcm5hbWU6cGFzc3dvcmQ="), None).unwrap();
let auth_res = http_auth.authorize(wrong_req).await;
assert!(auth_res.is_err());
}
+ #[tokio::test]
+ async fn test_whitelist_no_auth() {
+ // In mock user provider, right username:password == "greptime:greptime"
+ let mock_user_provider = Some(Arc::new(MockUserProvider {}) as Arc<dyn UserProvider>);
+ let mut http_auth: HttpAuth<BoxBody> = HttpAuth {
+ user_provider: mock_user_provider,
+ _ty: PhantomData,
+ };
+
+ // base64encode("greptime:greptime") == "Z3JlcHRpbWU6Z3JlcHRpbWU="
+ // try auth path first
+ let req = mock_http_request(None, None).unwrap();
+ let req = http_auth.authorize(req).await;
+ assert!(req.is_err());
+
+ // try whitelist path
+ let req = mock_http_request(None, Some("http://localhost/health")).unwrap();
+ let req = http_auth.authorize(req).await;
+ assert!(req.is_ok());
+ }
+
#[test]
fn test_decode_basic() {
// base64encode("username:password") == "dXNlcm5hbWU6cGFzc3dvcmQ="
@@ -249,36 +272,32 @@ mod tests {
#[test]
fn test_auth_header() {
// base64encode("username:password") == "dXNlcm5hbWU6cGFzc3dvcmQ="
- let req = mock_http_request("Basic dXNlcm5hbWU6cGFzc3dvcmQ=").unwrap();
+ let req = mock_http_request(Some("Basic dXNlcm5hbWU6cGFzc3dvcmQ="), None).unwrap();
let (auth_scheme, credential) = auth_header(&req).unwrap();
matches!(auth_scheme, AuthScheme::Basic);
assert_eq!("dXNlcm5hbWU6cGFzc3dvcmQ=", credential);
- let wrong_req = mock_http_request("Basic dXNlcm5hbWU6 cGFzc3dvcmQ=").unwrap();
+ let wrong_req = mock_http_request(Some("Basic dXNlcm5hbWU6 cGFzc3dvcmQ="), None).unwrap();
let res = auth_header(&wrong_req);
matches!(
res.err(),
Some(error::Error::InvalidAuthorizationHeader { .. })
);
- let wrong_req = mock_http_request("Digest dXNlcm5hbWU6cGFzc3dvcmQ=").unwrap();
+ let wrong_req = mock_http_request(Some("Digest dXNlcm5hbWU6cGFzc3dvcmQ="), None).unwrap();
let res = auth_header(&wrong_req);
matches!(res.err(), Some(error::Error::UnsupportedAuthScheme { .. }));
}
- fn mock_http_request(auth_header: &str) -> Result<Request<()>> {
- Ok(Request::builder()
- .uri("https://www.rust-lang.org/")
- .header(http::header::AUTHORIZATION, auth_header)
- .body(())
- .unwrap())
- }
+ fn mock_http_request(auth_header: Option<&str>, uri: Option<&str>) -> Result<Request<()>> {
+ let http_api_version = crate::http::HTTP_API_VERSION;
+ let mut req = Request::builder()
+ .uri(uri.unwrap_or(format!("http://localhost/{http_api_version}/sql").as_str()));
+ if let Some(auth_header) = auth_header {
+ req = req.header(http::header::AUTHORIZATION, auth_header);
+ }
- fn mock_http_request_no_auth() -> Result<Request<()>> {
- Ok(Request::builder()
- .uri("https://www.rust-lang.org/")
- .body(())
- .unwrap())
+ Ok(req.body(()).unwrap())
}
}
|
chore
|
add path check to http auth (#866)
|
c56106b883b4a0f483d51ae80eea9e3a2d3fa664
|
2025-02-14 13:41:48
|
Weny Xu
|
perf: optimize table alteration speed in metric engine (#5526)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index e4091bcf5384..d70ff012e4bb 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6737,6 +6737,7 @@ version = "0.12.0"
dependencies = [
"api",
"aquamarine",
+ "async-stream",
"async-trait",
"base64 0.21.7",
"common-base",
@@ -6749,6 +6750,7 @@ dependencies = [
"common-time",
"datafusion",
"datatypes",
+ "futures-util",
"itertools 0.10.5",
"lazy_static",
"mito2",
diff --git a/src/metric-engine/Cargo.toml b/src/metric-engine/Cargo.toml
index f8d21c35d400..5fe5ed3cb5a0 100644
--- a/src/metric-engine/Cargo.toml
+++ b/src/metric-engine/Cargo.toml
@@ -10,6 +10,7 @@ workspace = true
[dependencies]
api.workspace = true
aquamarine.workspace = true
+async-stream.workspace = true
async-trait.workspace = true
base64.workspace = true
common-base.workspace = true
@@ -21,6 +22,7 @@ common-telemetry.workspace = true
common-time.workspace = true
datafusion.workspace = true
datatypes.workspace = true
+futures-util.workspace = true
itertools.workspace = true
lazy_static = "1.4"
mito2.workspace = true
diff --git a/src/metric-engine/src/data_region.rs b/src/metric-engine/src/data_region.rs
index f903587d1087..22ce8f526186 100644
--- a/src/metric-engine/src/data_region.rs
+++ b/src/metric-engine/src/data_region.rs
@@ -13,8 +13,6 @@
// limitations under the License.
use api::v1::SemanticType;
-use common_error::ext::ErrorExt;
-use common_error::status_code::StatusCode;
use common_telemetry::{debug, info, warn};
use datatypes::schema::{SkippingIndexOptions, SkippingIndexType};
use mito2::engine::MitoEngine;
@@ -32,11 +30,9 @@ use crate::error::{
ColumnTypeMismatchSnafu, ForbiddenPhysicalAlterSnafu, MitoReadOperationSnafu,
MitoWriteOperationSnafu, Result, SetSkippingIndexOptionSnafu,
};
-use crate::metrics::{FORBIDDEN_OPERATION_COUNT, MITO_DDL_DURATION};
+use crate::metrics::{FORBIDDEN_OPERATION_COUNT, MITO_DDL_DURATION, PHYSICAL_COLUMN_COUNT};
use crate::utils;
-const MAX_RETRIES: usize = 5;
-
/// This is a generic handler like [MetricEngine](crate::engine::MetricEngine). It
/// will handle all the data related operations across physical tables. Thus
/// every operation should be associated to a [RegionId], which is the physical
@@ -65,33 +61,30 @@ impl DataRegion {
pub async fn add_columns(
&self,
region_id: RegionId,
- columns: &mut [ColumnMetadata],
+ columns: Vec<ColumnMetadata>,
index_options: IndexOptions,
) -> Result<()> {
+ // Return early if no new columns are added.
+ if columns.is_empty() {
+ return Ok(());
+ }
+
let region_id = utils::to_data_region_id(region_id);
- let mut retries = 0;
- // submit alter request
- while retries < MAX_RETRIES {
- let request = self
- .assemble_alter_request(region_id, columns, index_options)
- .await?;
+ let num_columns = columns.len();
+ let request = self
+ .assemble_alter_request(region_id, columns, index_options)
+ .await?;
- let _timer = MITO_DDL_DURATION.start_timer();
+ let _timer = MITO_DDL_DURATION.start_timer();
- let result = self.mito.handle_request(region_id, request).await;
- match result {
- Ok(_) => return Ok(()),
- Err(e) if e.status_code() == StatusCode::RequestOutdated => {
- info!("Retrying alter {region_id} due to outdated schema version, times {retries}");
- retries += 1;
- continue;
- }
- Err(e) => {
- return Err(e).context(MitoWriteOperationSnafu)?;
- }
- }
- }
+ let _ = self
+ .mito
+ .handle_request(region_id, request)
+ .await
+ .context(MitoWriteOperationSnafu)?;
+
+ PHYSICAL_COLUMN_COUNT.add(num_columns as _);
Ok(())
}
@@ -101,7 +94,7 @@ impl DataRegion {
async fn assemble_alter_request(
&self,
region_id: RegionId,
- columns: &mut [ColumnMetadata],
+ columns: Vec<ColumnMetadata>,
index_options: IndexOptions,
) -> Result<RegionRequest> {
// retrieve underlying version
@@ -128,9 +121,9 @@ impl DataRegion {
// overwrite semantic type
let new_columns = columns
- .iter_mut()
+ .into_iter()
.enumerate()
- .map(|(delta, c)| {
+ .map(|(delta, mut c)| {
if c.semantic_type == SemanticType::Tag {
if !c.column_schema.data_type.is_string() {
return ColumnTypeMismatchSnafu {
@@ -254,7 +247,7 @@ mod test {
// TestEnv will create a logical region which changes the version to 1.
assert_eq!(current_version, 1);
- let mut new_columns = vec![
+ let new_columns = vec![
ColumnMetadata {
column_id: 0,
semantic_type: SemanticType::Tag,
@@ -277,7 +270,7 @@ mod test {
env.data_region()
.add_columns(
env.default_physical_region_id(),
- &mut new_columns,
+ new_columns,
IndexOptions::Inverted,
)
.await
@@ -311,7 +304,7 @@ mod test {
let env = TestEnv::new().await;
env.init_metric_region().await;
- let mut new_columns = vec![ColumnMetadata {
+ let new_columns = vec![ColumnMetadata {
column_id: 0,
semantic_type: SemanticType::Tag,
column_schema: ColumnSchema::new("tag2", ConcreteDataType::int64_datatype(), false),
@@ -320,7 +313,7 @@ mod test {
.data_region()
.add_columns(
env.default_physical_region_id(),
- &mut new_columns,
+ new_columns,
IndexOptions::Inverted,
)
.await;
diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs
index 8e69458c12fd..95261580bdff 100644
--- a/src/metric-engine/src/engine.rs
+++ b/src/metric-engine/src/engine.rs
@@ -146,12 +146,17 @@ impl RegionEngine for MetricEngine {
})
}
BatchRegionDdlRequest::Alter(requests) => {
- self.handle_requests(
- requests
- .into_iter()
- .map(|(region_id, req)| (region_id, RegionRequest::Alter(req))),
- )
- .await
+ let mut extension_return_value = HashMap::new();
+ let rows = self
+ .inner
+ .alter_regions(requests, &mut extension_return_value)
+ .await
+ .map_err(BoxedError::new)?;
+
+ Ok(RegionResponse {
+ affected_rows: rows,
+ extensions: extension_return_value,
+ })
}
BatchRegionDdlRequest::Drop(requests) => {
self.handle_requests(
@@ -184,7 +189,7 @@ impl RegionEngine for MetricEngine {
RegionRequest::Close(close) => self.inner.close_region(region_id, close).await,
RegionRequest::Alter(alter) => {
self.inner
- .alter_region(region_id, alter, &mut extension_return_value)
+ .alter_regions(vec![(region_id, alter)], &mut extension_return_value)
.await
}
RegionRequest::Compact(_) => {
diff --git a/src/metric-engine/src/engine/alter.rs b/src/metric-engine/src/engine/alter.rs
index 5fd0c13e78a7..9bc5f56251c0 100644
--- a/src/metric-engine/src/engine/alter.rs
+++ b/src/metric-engine/src/engine/alter.rs
@@ -12,148 +12,158 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashMap;
+mod extract_new_columns;
+mod validate;
+
+use std::collections::{HashMap, HashSet};
use common_telemetry::error;
-use snafu::{OptionExt, ResultExt};
+use extract_new_columns::extract_new_columns;
+use snafu::{ensure, OptionExt, ResultExt};
use store_api::metadata::ColumnMetadata;
use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
use store_api::region_request::{AffectedRows, AlterKind, RegionAlterRequest};
use store_api::storage::RegionId;
+use validate::validate_alter_region_requests;
use crate::engine::MetricEngineInner;
use crate::error::{
LogicalRegionNotFoundSnafu, PhysicalRegionNotFoundSnafu, Result, SerializeColumnMetadataSnafu,
+ UnexpectedRequestSnafu,
};
-use crate::utils::{to_data_region_id, to_metadata_region_id};
+use crate::utils::to_data_region_id;
impl MetricEngineInner {
- /// Dispatch region alter request
- pub async fn alter_region(
+ pub async fn alter_regions(
&self,
- region_id: RegionId,
- request: RegionAlterRequest,
+ mut requests: Vec<(RegionId, RegionAlterRequest)>,
extension_return_value: &mut HashMap<String, Vec<u8>>,
) -> Result<AffectedRows> {
- let is_altering_physical_region = self.is_physical_region(region_id);
+ if requests.is_empty() {
+ return Ok(0);
+ }
- let result = if is_altering_physical_region {
- self.alter_physical_region(region_id, request).await
+ let first_region_id = &requests.first().unwrap().0;
+ if self.is_physical_region(*first_region_id) {
+ ensure!(
+ requests.len() == 1,
+ UnexpectedRequestSnafu {
+ reason: "Physical table must be altered with single request".to_string(),
+ }
+ );
+ let (region_id, request) = requests.pop().unwrap();
+ self.alter_physical_region(region_id, request).await?;
} else {
- let physical_region_id = self.alter_logical_region(region_id, request).await?;
-
- // Add physical table's column to extension map.
- // It's ok to overwrite existing key, as the latter come schema is more up-to-date
- let physical_columns = self
- .data_region
- .physical_columns(physical_region_id)
+ self.alter_logical_regions(requests, extension_return_value)
.await?;
- extension_return_value.insert(
- ALTER_PHYSICAL_EXTENSION_KEY.to_string(),
- ColumnMetadata::encode_list(&physical_columns)
- .context(SerializeColumnMetadataSnafu)?,
- );
-
- Ok(())
- };
-
- result.map(|_| 0)
+ }
+ Ok(0)
}
- /// Return the physical region id behind this logical region
- async fn alter_logical_region(
+ /// Alter multiple logical regions on the same physical region.
+ pub async fn alter_logical_regions(
&self,
- logical_region_id: RegionId,
- request: RegionAlterRequest,
- ) -> Result<RegionId> {
+ requests: Vec<(RegionId, RegionAlterRequest)>,
+ extension_return_value: &mut HashMap<String, Vec<u8>>,
+ ) -> Result<AffectedRows> {
+ // Checks all alter requests are add columns.
+ validate_alter_region_requests(&requests)?;
+
+ let first_logical_region_id = requests[0].0;
+
+ // Finds new columns to add
+ let mut new_column_names = HashSet::new();
+ let mut new_columns_to_add = vec![];
+
let (physical_region_id, index_options) = {
let state = &self.state.read().unwrap();
let physical_region_id = state
- .get_physical_region_id(logical_region_id)
+ .get_physical_region_id(first_logical_region_id)
.with_context(|| {
- error!("Trying to alter an nonexistent region {logical_region_id}");
+ error!("Trying to alter an nonexistent region {first_logical_region_id}");
LogicalRegionNotFoundSnafu {
- region_id: logical_region_id,
+ region_id: first_logical_region_id,
}
})?;
-
- let index_options = state
+ let region_state = state
.physical_region_states()
.get(&physical_region_id)
.with_context(|| PhysicalRegionNotFoundSnafu {
region_id: physical_region_id,
- })?
- .options()
- .index;
+ })?;
+ let physical_columns = region_state.physical_columns();
- (physical_region_id, index_options)
- };
+ extract_new_columns(
+ &requests,
+ physical_columns,
+ &mut new_column_names,
+ &mut new_columns_to_add,
+ )?;
- // only handle adding column
- let AlterKind::AddColumns { columns } = request.kind else {
- return Ok(physical_region_id);
+ (physical_region_id, region_state.options().index)
};
+ let data_region_id = to_data_region_id(physical_region_id);
- // lock metadata region for this logical region id
- let _write_guard = self
- .metadata_region
- .write_lock_logical_region(logical_region_id)
- .await;
-
- let metadata_region_id = to_metadata_region_id(physical_region_id);
- let mut columns_to_add = vec![];
- // columns that already exist in physical region
- let mut existing_columns = vec![];
+ let mut write_guards = HashMap::with_capacity(requests.len());
+ for (region_id, _) in requests.iter() {
+ if write_guards.contains_key(region_id) {
+ continue;
+ }
+ let _write_guard = self
+ .metadata_region
+ .write_lock_logical_region(*region_id)
+ .await;
+ write_guards.insert(*region_id, _write_guard);
+ }
- let pre_existing_physical_columns = self
- .data_region
- .physical_columns(physical_region_id)
+ self.data_region
+ .add_columns(data_region_id, new_columns_to_add, index_options)
.await?;
- let pre_exist_cols = pre_existing_physical_columns
+ let physical_columns = self.data_region.physical_columns(data_region_id).await?;
+ let physical_schema_map = physical_columns
.iter()
- .map(|col| (col.column_schema.name.as_str(), col))
+ .map(|metadata| (metadata.column_schema.name.as_str(), metadata))
.collect::<HashMap<_, _>>();
- // check pre-existing physical columns so if any columns to add is already exist,
- // we can skip it in physical alter operation
- // (but still need to update them in logical alter operation)
- for col in &columns {
- if let Some(exist_column) =
- pre_exist_cols.get(&col.column_metadata.column_schema.name.as_str())
- {
- // push the correct column schema with correct column id
- existing_columns.push(*exist_column);
- } else {
- columns_to_add.push(col.column_metadata.clone());
- }
- }
+ let logical_region_columns = requests.iter().map(|(region_id, request)| {
+ let AlterKind::AddColumns { columns } = &request.kind else {
+ unreachable!()
+ };
+ (
+ *region_id,
+ columns
+ .iter()
+ .map(|col| {
+ let column_name = col.column_metadata.column_schema.name.as_str();
+ let column_metadata = *physical_schema_map.get(column_name).unwrap();
+ (column_name, column_metadata)
+ })
+ .collect::<HashMap<_, _>>(),
+ )
+ });
- // alter data region
- let data_region_id = to_data_region_id(physical_region_id);
- self.add_columns_to_physical_data_region(
- data_region_id,
- logical_region_id,
- &mut columns_to_add,
- index_options,
- )
- .await?;
+ let new_add_columns = new_column_names.iter().map(|name| {
+ // Safety: previous steps ensure the physical region exist
+ let column_metadata = *physical_schema_map.get(name).unwrap();
+ (name.to_string(), column_metadata.column_id)
+ });
- // note here we don't use `columns` directly but concat `existing_columns` with `columns_to_add` to get correct metadata
- // about already existing columns
- for metadata in existing_columns.into_iter().chain(columns_to_add.iter()) {
- self.metadata_region
- .add_column(metadata_region_id, logical_region_id, metadata)
- .await?;
- }
+ // Writes logical regions metadata to metadata region
+ self.metadata_region
+ .add_logical_regions(physical_region_id, false, logical_region_columns)
+ .await?;
- // invalid logical column cache
- self.state
- .write()
- .unwrap()
- .invalid_logical_column_cache(logical_region_id);
+ extension_return_value.insert(
+ ALTER_PHYSICAL_EXTENSION_KEY.to_string(),
+ ColumnMetadata::encode_list(&physical_columns).context(SerializeColumnMetadataSnafu)?,
+ );
+
+ let mut state = self.state.write().unwrap();
+ state.add_physical_columns(data_region_id, new_add_columns);
+ state.invalid_logical_regions_cache(requests.iter().map(|(region_id, _)| *region_id));
- Ok(physical_region_id)
+ Ok(0)
}
async fn alter_physical_region(
@@ -241,7 +251,7 @@ mod test {
let region_id = env.default_logical_region_id();
engine_inner
- .alter_logical_region(region_id, request)
+ .alter_logical_regions(vec![(region_id, request)], &mut HashMap::new())
.await
.unwrap();
let semantic_type = metadata_region
diff --git a/src/metric-engine/src/engine/alter/extract_new_columns.rs b/src/metric-engine/src/engine/alter/extract_new_columns.rs
new file mode 100644
index 000000000000..fdb1ef6126e4
--- /dev/null
+++ b/src/metric-engine/src/engine/alter/extract_new_columns.rs
@@ -0,0 +1,51 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::{HashMap, HashSet};
+
+use store_api::metadata::ColumnMetadata;
+use store_api::region_request::{AlterKind, RegionAlterRequest};
+use store_api::storage::{ColumnId, RegionId};
+
+use crate::error::Result;
+
+/// Extract new columns from the create requests.
+///
+/// # Panics
+///
+/// This function will panic if the alter kind is not `AddColumns`.
+pub fn extract_new_columns<'a>(
+ requests: &'a [(RegionId, RegionAlterRequest)],
+ physical_columns: &HashMap<String, ColumnId>,
+ new_column_names: &mut HashSet<&'a str>,
+ new_columns: &mut Vec<ColumnMetadata>,
+) -> Result<()> {
+ for (_, request) in requests {
+ let AlterKind::AddColumns { columns } = &request.kind else {
+ unreachable!()
+ };
+ for col in columns {
+ let column_name = col.column_metadata.column_schema.name.as_str();
+ if !physical_columns.contains_key(column_name)
+ && !new_column_names.contains(column_name)
+ {
+ new_column_names.insert(column_name);
+ // TODO(weny): avoid clone
+ new_columns.push(col.column_metadata.clone());
+ }
+ }
+ }
+
+ Ok(())
+}
diff --git a/src/metric-engine/src/engine/alter/validate.rs b/src/metric-engine/src/engine/alter/validate.rs
new file mode 100644
index 000000000000..2e2d91eccfc6
--- /dev/null
+++ b/src/metric-engine/src/engine/alter/validate.rs
@@ -0,0 +1,33 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use snafu::ensure;
+use store_api::region_request::{AlterKind, RegionAlterRequest};
+use store_api::storage::RegionId;
+
+use crate::error::{Result, UnsupportedAlterKindSnafu};
+
+/// Validate the alter region requests.
+pub fn validate_alter_region_requests(requests: &[(RegionId, RegionAlterRequest)]) -> Result<()> {
+ for (_, request) in requests {
+ ensure!(
+ matches!(request.kind, AlterKind::AddColumns { .. }),
+ UnsupportedAlterKindSnafu {
+ kind: request.kind.as_ref()
+ }
+ );
+ }
+
+ Ok(())
+}
diff --git a/src/metric-engine/src/engine/create.rs b/src/metric-engine/src/engine/create.rs
index b76f22586eac..8ebd29be23ad 100644
--- a/src/metric-engine/src/engine/create.rs
+++ b/src/metric-engine/src/engine/create.rs
@@ -12,15 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-mod add_columns;
mod extract_new_columns;
mod validate;
use std::collections::{HashMap, HashSet};
-use add_columns::add_columns_to_physical_data_region;
use api::v1::SemanticType;
-use common_telemetry::{info, warn};
+use common_telemetry::info;
use common_time::{Timestamp, FOREVER};
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::ColumnSchema;
@@ -46,15 +44,15 @@ use store_api::storage::RegionId;
use validate::validate_create_logical_regions;
use crate::engine::create::extract_new_columns::extract_new_columns;
-use crate::engine::options::{set_data_region_options, IndexOptions, PhysicalRegionOptions};
+use crate::engine::options::{set_data_region_options, PhysicalRegionOptions};
use crate::engine::MetricEngineInner;
use crate::error::{
- ColumnNotFoundSnafu, ColumnTypeMismatchSnafu, ConflictRegionOptionSnafu, CreateMitoRegionSnafu,
- EmptyRequestSnafu, InternalColumnOccupiedSnafu, InvalidMetadataSnafu, MissingRegionOptionSnafu,
+ ColumnTypeMismatchSnafu, ConflictRegionOptionSnafu, CreateMitoRegionSnafu,
+ InternalColumnOccupiedSnafu, InvalidMetadataSnafu, MissingRegionOptionSnafu,
MultipleFieldColumnSnafu, NoFieldColumnSnafu, PhysicalRegionNotFoundSnafu, Result,
SerializeColumnMetadataSnafu, UnexpectedRequestSnafu,
};
-use crate::metrics::{PHYSICAL_COLUMN_COUNT, PHYSICAL_REGION_COUNT};
+use crate::metrics::PHYSICAL_REGION_COUNT;
use crate::utils::{self, to_data_region_id, to_metadata_region_id};
impl MetricEngineInner {
@@ -87,16 +85,8 @@ impl MetricEngineInner {
.options
.contains_key(LOGICAL_TABLE_METADATA_KEY)
{
- let physical_region_id = self.create_logical_regions(requests).await?;
- let physical_columns = self
- .data_region
- .physical_columns(physical_region_id)
+ self.create_logical_regions(requests, extension_return_value)
.await?;
- extension_return_value.insert(
- ALTER_PHYSICAL_EXTENSION_KEY.to_string(),
- ColumnMetadata::encode_list(&physical_columns)
- .context(SerializeColumnMetadataSnafu)?,
- );
} else {
return MissingRegionOptionSnafu {}.fail();
}
@@ -162,14 +152,11 @@ impl MetricEngineInner {
}
/// Create multiple logical regions on the same physical region.
- ///
- /// Returns the physical region id of the created logical regions.
async fn create_logical_regions(
&self,
requests: Vec<(RegionId, RegionCreateRequest)>,
- ) -> Result<RegionId> {
- ensure!(!requests.is_empty(), EmptyRequestSnafu {});
-
+ extension_return_value: &mut HashMap<String, Vec<u8>>,
+ ) -> Result<()> {
let physical_region_id = validate_create_logical_regions(&requests)?;
let data_region_id = utils::to_data_region_id(physical_region_id);
@@ -208,13 +195,9 @@ impl MetricEngineInner {
};
// TODO(weny): we dont need to pass a mutable new_columns here.
- add_columns_to_physical_data_region(
- data_region_id,
- index_option,
- &mut new_columns,
- &self.data_region,
- )
- .await?;
+ self.data_region
+ .add_columns(data_region_id, new_columns, index_option)
+ .await?;
let physical_columns = self.data_region.physical_columns(data_region_id).await?;
let physical_schema_map = physical_columns
@@ -225,7 +208,7 @@ impl MetricEngineInner {
.iter()
.map(|(region_id, _)| (*region_id))
.collect::<Vec<_>>();
- let logical_regions_column_names = requests.iter().map(|(region_id, request)| {
+ let logical_region_columns = requests.iter().map(|(region_id, request)| {
(
*region_id,
request
@@ -242,91 +225,26 @@ impl MetricEngineInner {
)
});
- let new_add_columns = new_columns.iter().map(|metadata| {
+ let new_add_columns = new_column_names.iter().map(|name| {
// Safety: previous steps ensure the physical region exist
- let column_metadata = *physical_schema_map
- .get(metadata.column_schema.name.as_str())
- .unwrap();
- (
- metadata.column_schema.name.to_string(),
- column_metadata.column_id,
- )
+ let column_metadata = *physical_schema_map.get(name).unwrap();
+ (name.to_string(), column_metadata.column_id)
});
+ extension_return_value.insert(
+ ALTER_PHYSICAL_EXTENSION_KEY.to_string(),
+ ColumnMetadata::encode_list(&physical_columns).context(SerializeColumnMetadataSnafu)?,
+ );
+
// Writes logical regions metadata to metadata region
self.metadata_region
- .add_logical_regions(physical_region_id, logical_regions_column_names)
+ .add_logical_regions(physical_region_id, true, logical_region_columns)
.await?;
let mut state = self.state.write().unwrap();
state.add_physical_columns(data_region_id, new_add_columns);
state.add_logical_regions(physical_region_id, logical_regions);
- Ok(data_region_id)
- }
-
- /// Execute corresponding alter requests to mito region. After calling this, `new_columns` will be assign a new column id
- /// which should be correct if the following requirements are met:
- ///
- /// # NOTE
- ///
- /// `new_columns` MUST NOT pre-exist in the physical region. Or the results will be wrong column id for the new columns.
- ///
- pub(crate) async fn add_columns_to_physical_data_region(
- &self,
- data_region_id: RegionId,
- logical_region_id: RegionId,
- new_columns: &mut [ColumnMetadata],
- index_options: IndexOptions,
- ) -> Result<()> {
- // Return early if no new columns are added.
- if new_columns.is_empty() {
- return Ok(());
- }
-
- // alter data region
- self.data_region
- .add_columns(data_region_id, new_columns, index_options)
- .await?;
-
- // correct the column id
- let after_alter_physical_schema = self.data_region.physical_columns(data_region_id).await?;
- let after_alter_physical_schema_map = after_alter_physical_schema
- .iter()
- .map(|metadata| (metadata.column_schema.name.as_str(), metadata))
- .collect::<HashMap<_, _>>();
-
- // double check to make sure column ids are not mismatched
- // shouldn't be a expensive operation, given it only query for physical columns
- for col in new_columns.iter_mut() {
- let column_metadata = after_alter_physical_schema_map
- .get(&col.column_schema.name.as_str())
- .with_context(|| ColumnNotFoundSnafu {
- name: &col.column_schema.name,
- region_id: data_region_id,
- })?;
- if col != *column_metadata {
- warn!(
- "Add already existing columns with different column metadata to physical region({:?}): new column={:?}, old column={:?}",
- data_region_id,
- col,
- column_metadata
- );
- // update to correct metadata
- *col = (*column_metadata).clone();
- }
- }
-
- // safety: previous step has checked this
- self.state.write().unwrap().add_physical_columns(
- data_region_id,
- new_columns
- .iter()
- .map(|meta| (meta.column_schema.name.clone(), meta.column_id)),
- );
- info!("Create region {logical_region_id} leads to adding columns {new_columns:?} to physical region {data_region_id}");
- PHYSICAL_COLUMN_COUNT.add(new_columns.len() as _);
-
Ok(())
}
diff --git a/src/metric-engine/src/engine/create/add_columns.rs b/src/metric-engine/src/engine/create/add_columns.rs
deleted file mode 100644
index 78c66ac9ee92..000000000000
--- a/src/metric-engine/src/engine/create/add_columns.rs
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use store_api::metadata::ColumnMetadata;
-use store_api::storage::RegionId;
-
-use crate::data_region::DataRegion;
-use crate::engine::IndexOptions;
-use crate::error::Result;
-use crate::metrics::PHYSICAL_COLUMN_COUNT;
-
-/// Add new columns to the physical data region.
-pub(crate) async fn add_columns_to_physical_data_region(
- data_region_id: RegionId,
- index_options: IndexOptions,
- new_columns: &mut [ColumnMetadata],
- data_region: &DataRegion,
-) -> Result<()> {
- // Return early if no new columns are added.
- if new_columns.is_empty() {
- return Ok(());
- }
-
- data_region
- .add_columns(data_region_id, new_columns, index_options)
- .await?;
-
- PHYSICAL_COLUMN_COUNT.add(new_columns.len() as _);
-
- Ok(())
-}
diff --git a/src/metric-engine/src/engine/create/validate.rs b/src/metric-engine/src/engine/create/validate.rs
index df98294480fc..943e42af52c4 100644
--- a/src/metric-engine/src/engine/create/validate.rs
+++ b/src/metric-engine/src/engine/create/validate.rs
@@ -18,8 +18,7 @@ use store_api::region_request::RegionCreateRequest;
use store_api::storage::RegionId;
use crate::error::{
- ConflictRegionOptionSnafu, EmptyRequestSnafu, MissingRegionOptionSnafu, ParseRegionIdSnafu,
- Result,
+ ConflictRegionOptionSnafu, MissingRegionOptionSnafu, ParseRegionIdSnafu, Result,
};
/// Validate the create logical regions request.
@@ -28,8 +27,6 @@ use crate::error::{
pub fn validate_create_logical_regions(
requests: &[(RegionId, RegionCreateRequest)],
) -> Result<RegionId> {
- ensure!(!requests.is_empty(), EmptyRequestSnafu {});
-
let (_, request) = requests.first().unwrap();
let first_physical_region_id_raw = request
.options
diff --git a/src/metric-engine/src/engine/state.rs b/src/metric-engine/src/engine/state.rs
index 3c273372fe0b..42975e83e643 100644
--- a/src/metric-engine/src/engine/state.rs
+++ b/src/metric-engine/src/engine/state.rs
@@ -139,6 +139,15 @@ impl MetricEngineState {
}
}
+ pub fn invalid_logical_regions_cache(
+ &mut self,
+ logical_region_ids: impl IntoIterator<Item = RegionId>,
+ ) {
+ for logical_region_id in logical_region_ids {
+ self.logical_columns.remove(&logical_region_id);
+ }
+ }
+
/// # Panic
/// if the physical region does not exist
pub fn add_logical_region(
@@ -233,10 +242,6 @@ impl MetricEngineState {
Ok(())
}
- pub fn invalid_logical_column_cache(&mut self, logical_region_id: RegionId) {
- self.logical_columns.remove(&logical_region_id);
- }
-
pub fn is_logical_region_exist(&self, logical_region_id: RegionId) -> bool {
self.logical_regions().contains_key(&logical_region_id)
}
diff --git a/src/metric-engine/src/error.rs b/src/metric-engine/src/error.rs
index 4e082da305f0..de0d935ee081 100644
--- a/src/metric-engine/src/error.rs
+++ b/src/metric-engine/src/error.rs
@@ -218,6 +218,13 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Unsupported alter kind: {}", kind))]
+ UnsupportedAlterKind {
+ kind: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Multiple field column found: {} and {}", previous, current))]
MultipleFieldColumn {
previous: String,
@@ -246,12 +253,6 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Empty request"))]
- EmptyRequest {
- #[snafu(implicit)]
- location: Location,
- },
-
#[snafu(display("Unexpected request: {}", reason))]
UnexpectedRequest {
reason: String,
@@ -276,8 +277,8 @@ impl ErrorExt for Error {
| NoFieldColumn { .. }
| AddingFieldColumn { .. }
| ParseRegionOptions { .. }
- | EmptyRequest { .. }
- | UnexpectedRequest { .. } => StatusCode::InvalidArguments,
+ | UnexpectedRequest { .. }
+ | UnsupportedAlterKind { .. } => StatusCode::InvalidArguments,
ForbiddenPhysicalAlter { .. } | UnsupportedRegionRequest { .. } => {
StatusCode::Unsupported
diff --git a/src/metric-engine/src/metadata_region.rs b/src/metric-engine/src/metadata_region.rs
index 71a3a0e3ce8d..753251c72bb9 100644
--- a/src/metric-engine/src/metadata_region.rs
+++ b/src/metric-engine/src/metadata_region.rs
@@ -17,10 +17,13 @@ use std::sync::Arc;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Row, Rows, SemanticType, Value};
+use async_stream::try_stream;
use base64::engine::general_purpose::STANDARD_NO_PAD;
use base64::Engine;
-use common_recordbatch::util::collect;
+use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
use datafusion::prelude::{col, lit};
+use futures_util::stream::BoxStream;
+use futures_util::TryStreamExt;
use mito2::engine::MitoEngine;
use snafu::{OptionExt, ResultExt};
use store_api::metadata::ColumnMetadata;
@@ -80,28 +83,6 @@ impl MetadataRegion {
.insert(logical_region_id, Arc::new(RwLock::new(())));
}
- /// Add a new column key to metadata.
- ///
- /// This method won't check if the column already exists. But
- /// will return if the column is successfully added.
- pub async fn add_column(
- &self,
- physical_region_id: RegionId,
- logical_region_id: RegionId,
- column_metadata: &ColumnMetadata,
- ) -> Result<bool> {
- let region_id = utils::to_metadata_region_id(physical_region_id);
- let column_key =
- Self::concat_column_key(logical_region_id, &column_metadata.column_schema.name);
-
- self.put_if_absent(
- region_id,
- column_key,
- Self::serialize_column_metadata(column_metadata),
- )
- .await
- }
-
/// Retrieve a read lock guard of given logical region id.
pub async fn read_lock_logical_region(
&self,
@@ -169,22 +150,6 @@ impl MetadataRegion {
Ok(())
}
- /// Check if the given column exists. Return the semantic type if exists.
- #[cfg(test)]
- pub async fn column_semantic_type(
- &self,
- physical_region_id: RegionId,
- logical_region_id: RegionId,
- column_name: &str,
- ) -> Result<Option<SemanticType>> {
- let region_id = utils::to_metadata_region_id(physical_region_id);
- let column_key = Self::concat_column_key(logical_region_id, column_name);
- let semantic_type = self.get(region_id, &column_key).await?;
- semantic_type
- .map(|s| Self::deserialize_column_metadata(&s).map(|c| c.semantic_type))
- .transpose()
- }
-
// TODO(ruihang): avoid using `get_all`
/// Get all the columns of a given logical region.
/// Return a list of (column_name, column_metadata).
@@ -197,7 +162,10 @@ impl MetadataRegion {
let region_column_prefix = Self::concat_column_key_prefix(logical_region_id);
let mut columns = vec![];
- for (k, v) in self.get_all(metadata_region_id).await? {
+ for (k, v) in self
+ .get_all_with_prefix(metadata_region_id, ®ion_column_prefix)
+ .await?
+ {
if !k.starts_with(®ion_column_prefix) {
continue;
}
@@ -214,7 +182,10 @@ impl MetadataRegion {
let metadata_region_id = utils::to_metadata_region_id(physical_region_id);
let mut regions = vec![];
- for (k, _) in self.get_all(metadata_region_id).await? {
+ for k in self
+ .get_all_key_with_prefix(metadata_region_id, REGION_PREFIX)
+ .await?
+ {
if !k.starts_with(REGION_PREFIX) {
continue;
}
@@ -286,120 +257,122 @@ impl MetadataRegion {
}
}
+/// Decode a record batch stream to a stream of items.
+pub fn decode_batch_stream<T: Send + 'static>(
+ mut record_batch_stream: SendableRecordBatchStream,
+ decode: fn(RecordBatch) -> Vec<T>,
+) -> BoxStream<'static, Result<T>> {
+ let stream = try_stream! {
+ while let Some(batch) = record_batch_stream.try_next().await.context(CollectRecordBatchStreamSnafu)? {
+ for item in decode(batch) {
+ yield item;
+ }
+ }
+ };
+ Box::pin(stream)
+}
+
+/// Decode a record batch to a list of key and value.
+fn decode_record_batch_to_key_and_value(batch: RecordBatch) -> Vec<(String, String)> {
+ let key_col = batch.column(0);
+ let val_col = batch.column(1);
+
+ (0..batch.num_rows())
+ .flat_map(move |row_index| {
+ let key = key_col
+ .get_ref(row_index)
+ .as_string()
+ .unwrap()
+ .map(|s| s.to_string());
+
+ key.map(|k| {
+ (
+ k,
+ val_col
+ .get_ref(row_index)
+ .as_string()
+ .unwrap()
+ .map(|s| s.to_string())
+ .unwrap_or_default(),
+ )
+ })
+ })
+ .collect()
+}
+
+/// Decode a record batch to a list of key.
+fn decode_record_batch_to_key(batch: RecordBatch) -> Vec<String> {
+ let key_col = batch.column(0);
+
+ (0..batch.num_rows())
+ .flat_map(move |row_index| {
+ let key = key_col
+ .get_ref(row_index)
+ .as_string()
+ .unwrap()
+ .map(|s| s.to_string());
+ key
+ })
+ .collect()
+}
+
// simulate to `KvBackend`
//
// methods in this block assume the given region id is transformed.
impl MetadataRegion {
- /// Put if not exist, return if this put operation is successful (error other
- /// than "key already exist" will be wrapped in [Err]).
- pub async fn put_if_absent(
- &self,
- region_id: RegionId,
- key: String,
- value: String,
- ) -> Result<bool> {
- if self.exists(region_id, &key).await? {
- return Ok(false);
- }
+ fn build_prefix_read_request(prefix: &str, key_only: bool) -> ScanRequest {
+ let filter_expr = col(METADATA_SCHEMA_KEY_COLUMN_NAME).like(lit(prefix));
- let put_request = Self::build_put_request(&key, &value);
- self.mito
- .handle_request(
- region_id,
- store_api::region_request::RegionRequest::Put(put_request),
- )
- .await
- .context(MitoWriteOperationSnafu)?;
- Ok(true)
+ let projection = if key_only {
+ vec![METADATA_SCHEMA_KEY_COLUMN_INDEX]
+ } else {
+ vec![
+ METADATA_SCHEMA_KEY_COLUMN_INDEX,
+ METADATA_SCHEMA_VALUE_COLUMN_INDEX,
+ ]
+ };
+ ScanRequest {
+ projection: Some(projection),
+ filters: vec![filter_expr],
+ output_ordering: None,
+ limit: None,
+ series_row_selector: None,
+ sequence: None,
+ }
}
- /// Check if the given key exists.
- ///
- /// Notice that due to mito doesn't support transaction, TOCTTOU is possible.
- pub async fn exists(&self, region_id: RegionId, key: &str) -> Result<bool> {
- let scan_req = Self::build_read_request(key);
+ pub async fn get_all_with_prefix(
+ &self,
+ region_id: RegionId,
+ prefix: &str,
+ ) -> Result<HashMap<String, String>> {
+ let scan_req = MetadataRegion::build_prefix_read_request(prefix, false);
let record_batch_stream = self
.mito
.scan_to_stream(region_id, scan_req)
.await
.context(MitoReadOperationSnafu)?;
- let scan_result = collect(record_batch_stream)
- .await
- .context(CollectRecordBatchStreamSnafu)?;
- let exist = !scan_result.is_empty() && scan_result.first().unwrap().num_rows() != 0;
- Ok(exist)
- }
-
- /// Retrieves the value associated with the given key in the specified region.
- /// Returns `Ok(None)` if the key is not found.
- #[cfg(test)]
- pub async fn get(&self, region_id: RegionId, key: &str) -> Result<Option<String>> {
- let scan_req = Self::build_read_request(key);
- let record_batch_stream = self
- .mito
- .scan_to_stream(region_id, scan_req)
+ decode_batch_stream(record_batch_stream, decode_record_batch_to_key_and_value)
+ .try_collect::<HashMap<_, _>>()
.await
- .context(MitoReadOperationSnafu)?;
- let scan_result = collect(record_batch_stream)
- .await
- .context(CollectRecordBatchStreamSnafu)?;
-
- let Some(first_batch) = scan_result.first() else {
- return Ok(None);
- };
-
- let val = first_batch
- .column(0)
- .get_ref(0)
- .as_string()
- .unwrap()
- .map(|s| s.to_string());
-
- Ok(val)
}
- /// Load all metadata from a given region.
- pub async fn get_all(&self, region_id: RegionId) -> Result<HashMap<String, String>> {
- let scan_req = ScanRequest {
- projection: Some(vec![
- METADATA_SCHEMA_KEY_COLUMN_INDEX,
- METADATA_SCHEMA_VALUE_COLUMN_INDEX,
- ]),
- filters: vec![],
- output_ordering: None,
- limit: None,
- series_row_selector: None,
- sequence: None,
- };
+ pub async fn get_all_key_with_prefix(
+ &self,
+ region_id: RegionId,
+ prefix: &str,
+ ) -> Result<Vec<String>> {
+ let scan_req = MetadataRegion::build_prefix_read_request(prefix, true);
let record_batch_stream = self
.mito
.scan_to_stream(region_id, scan_req)
.await
.context(MitoReadOperationSnafu)?;
- let scan_result = collect(record_batch_stream)
- .await
- .context(CollectRecordBatchStreamSnafu)?;
- let mut result = HashMap::new();
- for batch in scan_result {
- let key_col = batch.column(0);
- let val_col = batch.column(1);
- for row_index in 0..batch.num_rows() {
- let key = key_col
- .get_ref(row_index)
- .as_string()
- .unwrap()
- .map(|s| s.to_string());
- let val = val_col
- .get_ref(row_index)
- .as_string()
- .unwrap()
- .map(|s| s.to_string());
- result.insert(key.unwrap(), val.unwrap_or_default());
- }
- }
- Ok(result)
+ decode_batch_stream(record_batch_stream, decode_record_batch_to_key)
+ .try_collect::<Vec<_>>()
+ .await
}
/// Delete the given keys. For performance consideration, this method
@@ -416,23 +389,6 @@ impl MetadataRegion {
Ok(())
}
- /// Builds a [ScanRequest] to read metadata for a given key.
- /// The request will contains a EQ filter on the key column.
- ///
- /// Only the value column is projected.
- fn build_read_request(key: &str) -> ScanRequest {
- let filter_expr = col(METADATA_SCHEMA_KEY_COLUMN_NAME).eq(lit(key));
-
- ScanRequest {
- projection: Some(vec![METADATA_SCHEMA_VALUE_COLUMN_INDEX]),
- filters: vec![filter_expr],
- output_ordering: None,
- limit: None,
- series_row_selector: None,
- sequence: None,
- }
- }
-
pub(crate) fn build_put_request_from_iter(
kv: impl Iterator<Item = (String, String)>,
) -> RegionPutRequest {
@@ -479,47 +435,6 @@ impl MetadataRegion {
RegionPutRequest { rows, hint: None }
}
- fn build_put_request(key: &str, value: &str) -> RegionPutRequest {
- let cols = vec![
- ColumnSchema {
- column_name: METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME.to_string(),
- datatype: ColumnDataType::TimestampMillisecond as _,
- semantic_type: SemanticType::Timestamp as _,
- ..Default::default()
- },
- ColumnSchema {
- column_name: METADATA_SCHEMA_KEY_COLUMN_NAME.to_string(),
- datatype: ColumnDataType::String as _,
- semantic_type: SemanticType::Tag as _,
- ..Default::default()
- },
- ColumnSchema {
- column_name: METADATA_SCHEMA_VALUE_COLUMN_NAME.to_string(),
- datatype: ColumnDataType::String as _,
- semantic_type: SemanticType::Field as _,
- ..Default::default()
- },
- ];
- let rows = Rows {
- schema: cols,
- rows: vec![Row {
- values: vec![
- Value {
- value_data: Some(ValueData::TimestampMillisecondValue(0)),
- },
- Value {
- value_data: Some(ValueData::StringValue(key.to_string())),
- },
- Value {
- value_data: Some(ValueData::StringValue(value.to_string())),
- },
- ],
- }],
- };
-
- RegionPutRequest { rows, hint: None }
- }
-
fn build_delete_request(keys: &[String]) -> RegionDeleteRequest {
let cols = vec![
ColumnSchema {
@@ -557,16 +472,21 @@ impl MetadataRegion {
pub async fn add_logical_regions(
&self,
physical_region_id: RegionId,
+ write_region_id: bool,
logical_regions: impl Iterator<Item = (RegionId, HashMap<&str, &ColumnMetadata>)>,
) -> Result<()> {
let region_id = utils::to_metadata_region_id(physical_region_id);
let iter = logical_regions
.into_iter()
.flat_map(|(logical_region_id, column_metadatas)| {
- Some((
- MetadataRegion::concat_region_key(logical_region_id),
- String::new(),
- ))
+ if write_region_id {
+ Some((
+ MetadataRegion::concat_region_key(logical_region_id),
+ String::new(),
+ ))
+ } else {
+ None
+ }
.into_iter()
.chain(column_metadatas.into_iter().map(
move |(name, column_metadata)| {
@@ -592,11 +512,65 @@ impl MetadataRegion {
}
}
+#[cfg(test)]
+impl MetadataRegion {
+ /// Retrieves the value associated with the given key in the specified region.
+ /// Returns `Ok(None)` if the key is not found.
+ pub async fn get(&self, region_id: RegionId, key: &str) -> Result<Option<String>> {
+ let filter_expr = datafusion::prelude::col(METADATA_SCHEMA_KEY_COLUMN_NAME)
+ .eq(datafusion::prelude::lit(key));
+
+ let scan_req = ScanRequest {
+ projection: Some(vec![METADATA_SCHEMA_VALUE_COLUMN_INDEX]),
+ filters: vec![filter_expr],
+ output_ordering: None,
+ limit: None,
+ series_row_selector: None,
+ sequence: None,
+ };
+ let record_batch_stream = self
+ .mito
+ .scan_to_stream(region_id, scan_req)
+ .await
+ .context(MitoReadOperationSnafu)?;
+ let scan_result = common_recordbatch::util::collect(record_batch_stream)
+ .await
+ .context(CollectRecordBatchStreamSnafu)?;
+
+ let Some(first_batch) = scan_result.first() else {
+ return Ok(None);
+ };
+
+ let val = first_batch
+ .column(0)
+ .get_ref(0)
+ .as_string()
+ .unwrap()
+ .map(|s| s.to_string());
+
+ Ok(val)
+ }
+
+ /// Check if the given column exists. Return the semantic type if exists.
+ pub async fn column_semantic_type(
+ &self,
+ physical_region_id: RegionId,
+ logical_region_id: RegionId,
+ column_name: &str,
+ ) -> Result<Option<SemanticType>> {
+ let region_id = utils::to_metadata_region_id(physical_region_id);
+ let column_key = Self::concat_column_key(logical_region_id, column_name);
+ let semantic_type = self.get(region_id, &column_key).await?;
+ semantic_type
+ .map(|s| Self::deserialize_column_metadata(&s).map(|c| c.semantic_type))
+ .transpose()
+ }
+}
+
#[cfg(test)]
mod test {
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::ColumnSchema;
- use store_api::region_request::RegionRequest;
use super::*;
use crate::test_util::TestEnv;
@@ -665,150 +639,6 @@ mod test {
assert!(MetadataRegion::deserialize_column_metadata(semantic_type).is_err());
}
- #[test]
- fn test_build_read_request() {
- let key = "test_key";
- let expected_filter_expr = col(METADATA_SCHEMA_KEY_COLUMN_NAME).eq(lit(key));
- let expected_scan_request = ScanRequest {
- projection: Some(vec![METADATA_SCHEMA_VALUE_COLUMN_INDEX]),
- filters: vec![expected_filter_expr],
- output_ordering: None,
- limit: None,
- series_row_selector: None,
- sequence: None,
- };
- let actual_scan_request = MetadataRegion::build_read_request(key);
- assert_eq!(actual_scan_request, expected_scan_request);
- }
-
- #[tokio::test]
- async fn test_put_conditionally() {
- let env = TestEnv::new().await;
- env.init_metric_region().await;
- let metadata_region = env.metadata_region();
- let region_id = to_metadata_region_id(env.default_physical_region_id());
-
- // Test inserting a new key-value pair
- let key = "test_key".to_string();
- let value = "test_value".to_string();
- let result = metadata_region
- .put_if_absent(region_id, key.clone(), value.clone())
- .await;
- assert!(result.is_ok());
- assert!(result.unwrap());
-
- // Verify that the key-value pair was actually inserted
- let scan_req = MetadataRegion::build_read_request("test_key");
- let record_batch_stream = metadata_region
- .mito
- .scan_to_stream(region_id, scan_req)
- .await
- .unwrap();
- let scan_result = collect(record_batch_stream).await.unwrap();
- assert_eq!(scan_result.len(), 1);
-
- // Test inserting the same key-value pair again
- let result = metadata_region
- .put_if_absent(region_id, key.clone(), value.clone())
- .await;
- assert!(result.is_ok());
- assert!(!result.unwrap(),);
- }
-
- #[tokio::test]
- async fn test_exist() {
- let env = TestEnv::new().await;
- env.init_metric_region().await;
- let metadata_region = env.metadata_region();
- let region_id = to_metadata_region_id(env.default_physical_region_id());
-
- // Test checking for a non-existent key
- let key = "test_key".to_string();
- let result = metadata_region.exists(region_id, &key).await;
- assert!(result.is_ok());
- assert!(!result.unwrap());
-
- // Test inserting a key and then checking for its existence
- let value = "test_value".to_string();
- let put_request = MetadataRegion::build_put_request(&key, &value);
- metadata_region
- .mito
- .handle_request(region_id, RegionRequest::Put(put_request))
- .await
- .unwrap();
- let result = metadata_region.exists(region_id, &key).await;
- assert!(result.is_ok());
- assert!(result.unwrap(),);
- }
-
- #[tokio::test]
- async fn test_get() {
- let env = TestEnv::new().await;
- env.init_metric_region().await;
- let metadata_region = env.metadata_region();
- let region_id = to_metadata_region_id(env.default_physical_region_id());
-
- // Test getting a non-existent key
- let key = "test_key".to_string();
- let result = metadata_region.get(region_id, &key).await;
- assert!(result.is_ok());
- assert_eq!(result.unwrap(), None);
-
- // Test inserting a key and then getting its value
- let value = "test_value".to_string();
- let put_request = MetadataRegion::build_put_request(&key, &value);
- metadata_region
- .mito
- .handle_request(region_id, RegionRequest::Put(put_request))
- .await
- .unwrap();
- let result = metadata_region.get(region_id, &key).await;
- assert!(result.is_ok());
- assert_eq!(result.unwrap(), Some(value));
- }
-
- #[tokio::test]
- async fn test_add_column() {
- let env = TestEnv::new().await;
- env.init_metric_region().await;
- let metadata_region = env.metadata_region();
- let physical_region_id = to_metadata_region_id(env.default_physical_region_id());
-
- let logical_region_id = RegionId::new(868, 8390);
- let column_name = "column1";
- let semantic_type = SemanticType::Tag;
- let column_metadata = ColumnMetadata {
- column_schema: ColumnSchema::new(
- column_name,
- ConcreteDataType::string_datatype(),
- false,
- ),
- semantic_type,
- column_id: 5,
- };
- metadata_region
- .add_column(physical_region_id, logical_region_id, &column_metadata)
- .await
- .unwrap();
- let actual_semantic_type = metadata_region
- .column_semantic_type(physical_region_id, logical_region_id, column_name)
- .await
- .unwrap();
- assert_eq!(actual_semantic_type, Some(semantic_type));
-
- // duplicate column won't be updated
- let is_updated = metadata_region
- .add_column(physical_region_id, logical_region_id, &column_metadata)
- .await
- .unwrap();
- assert!(!is_updated);
- let actual_semantic_type = metadata_region
- .column_semantic_type(physical_region_id, logical_region_id, column_name)
- .await
- .unwrap();
- assert_eq!(actual_semantic_type, Some(semantic_type));
- }
-
fn test_column_metadatas() -> HashMap<String, ColumnMetadata> {
HashMap::from([
(
@@ -855,7 +685,7 @@ mod test {
.collect::<HashMap<_, _>>(),
)];
metadata_region
- .add_logical_regions(physical_region_id, iter.into_iter())
+ .add_logical_regions(physical_region_id, true, iter.into_iter())
.await
.unwrap();
// Add logical region again.
@@ -867,7 +697,7 @@ mod test {
.collect::<HashMap<_, _>>(),
)];
metadata_region
- .add_logical_regions(physical_region_id, iter.into_iter())
+ .add_logical_regions(physical_region_id, true, iter.into_iter())
.await
.unwrap();
@@ -877,17 +707,6 @@ mod test {
.await
.unwrap();
assert_eq!(logical_regions.len(), 2);
- assert_eq!(logical_regions[1], logical_region_id);
-
- // Check if the logical region exists.
- let result = metadata_region
- .exists(
- physical_region_id,
- &MetadataRegion::concat_region_key(logical_region_id),
- )
- .await
- .unwrap();
- assert!(result);
// Check if the logical region columns are added.
let logical_columns = metadata_region
diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs
index 58afdaf128b5..af82cd1deb86 100644
--- a/src/store-api/src/region_request.rs
+++ b/src/store-api/src/region_request.rs
@@ -30,7 +30,7 @@ use datatypes::data_type::ConcreteDataType;
use datatypes::schema::FulltextOptions;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
-use strum::IntoStaticStr;
+use strum::{AsRefStr, IntoStaticStr};
use crate::logstore::entry;
use crate::metadata::{
@@ -475,7 +475,7 @@ impl TryFrom<AlterRequest> for RegionAlterRequest {
}
/// Kind of the alteration.
-#[derive(Debug, PartialEq, Eq, Clone)]
+#[derive(Debug, PartialEq, Eq, Clone, AsRefStr)]
pub enum AlterKind {
/// Add columns to the region.
AddColumns {
|
perf
|
optimize table alteration speed in metric engine (#5526)
|
8b869642b88cd6feafe2e3191a67f37b90c974fc
|
2023-02-15 14:53:59
|
yuanbohan
|
feat: update promql-parser to v0.1.0 (#994)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 0eed6f2c1d07..e55db3a4dfe2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5260,8 +5260,9 @@ dependencies = [
[[package]]
name = "promql-parser"
-version = "0.0.1"
-source = "git+https://github.com/GreptimeTeam/promql-parser.git?rev=fa186978a1234baf5a3e372da03aa663d859cdd2#fa186978a1234baf5a3e372da03aa663d859cdd2"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24766dbb98852e704a98fc2c003d2b3ffa48317ab09b4ae184925f0e60385764"
dependencies = [
"cfgrammar",
"lazy_static",
diff --git a/src/promql/Cargo.toml b/src/promql/Cargo.toml
index e3c1d0e1c6ac..db5599f34bde 100644
--- a/src/promql/Cargo.toml
+++ b/src/promql/Cargo.toml
@@ -13,7 +13,7 @@ common-catalog = { path = "../common/catalog" }
datafusion.workspace = true
datatypes = { path = "../datatypes" }
futures = "0.3"
-promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", rev = "fa186978a1234baf5a3e372da03aa663d859cdd2" }
+promql-parser = "0.1.0"
session = { path = "../session" }
snafu = { version = "0.7", features = ["backtraces"] }
table = { path = "../table" }
diff --git a/src/promql/src/error.rs b/src/promql/src/error.rs
index a42c4b2d3e0b..0f422a46295e 100644
--- a/src/promql/src/error.rs
+++ b/src/promql/src/error.rs
@@ -25,7 +25,7 @@ pub enum Error {
#[snafu(display("Unsupported expr type: {}", name))]
UnsupportedExpr { name: String, backtrace: Backtrace },
- #[snafu(display("Unexpected token: {}", token))]
+ #[snafu(display("Unexpected token: {:?}", token))]
UnexpectedToken {
token: TokenType,
backtrace: Backtrace,
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index c7ab57c89dc0..7ec70e38ada2 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -103,7 +103,10 @@ impl<S: ContextProvider> PromPlanner<S> {
let input = self.prom_expr_to_plan(*expr.clone())?;
// calculate columns to group by
- let group_exprs = self.agg_modifier_to_col(input.schema(), modifier)?;
+ let group_exprs = modifier.as_ref().map_or(Ok(Vec::new()), |m| {
+ self.agg_modifier_to_col(input.schema(), m)
+ })?;
+
// convert op and value columns to aggregate exprs
let aggr_exprs = self.create_aggregate_exprs(*op, &input)?;
@@ -629,7 +632,7 @@ impl<S: ContextProvider> PromPlanner<S> {
token::T_STDVAR => AggregateFunctionEnum::Variance,
token::T_TOPK | token::T_BOTTOMK | token::T_COUNT_VALUES | token::T_QUANTILE => {
UnsupportedExprSnafu {
- name: op.to_string(),
+ name: format!("{op:?}"),
}
.fail()?
}
@@ -1135,7 +1138,9 @@ mod test {
// test group without
if let PromExpr::Aggregate(AggregateExpr { modifier, .. }) = &mut eval_stmt.expr {
- *modifier = AggModifier::Without(vec![String::from("tag_1")].into_iter().collect());
+ *modifier = Some(AggModifier::Without(
+ vec![String::from("tag_1")].into_iter().collect(),
+ ));
}
let context_provider = build_test_context_provider("some_metric".to_string(), 2, 2).await;
let plan = PromPlanner::stmt_to_plan(eval_stmt, context_provider).unwrap();
diff --git a/src/query/Cargo.toml b/src/query/Cargo.toml
index 20ebe7d44331..94f941b31ae9 100644
--- a/src/query/Cargo.toml
+++ b/src/query/Cargo.toml
@@ -29,7 +29,7 @@ futures-util.workspace = true
metrics = "0.20"
once_cell = "1.10"
promql = { path = "../promql" }
-promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", rev = "fa186978a1234baf5a3e372da03aa663d859cdd2" }
+promql-parser = "0.1.0"
serde.workspace = true
serde_json = "1.0"
session = { path = "../session" }
|
feat
|
update promql-parser to v0.1.0 (#994)
|
69a53130c223a821bf8ca1b9a028667c5a1c9bf9
|
2023-12-30 13:02:32
|
Zhenchi
|
feat(inverted_index): Add applier builder to convert Expr to Predicates (Part 1) (#3034)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index d179ea6c8c54..b042227a293b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4029,7 +4029,7 @@ dependencies = [
"prost 0.12.3",
"rand",
"regex",
- "regex-automata 0.1.10",
+ "regex-automata 0.2.0",
"snafu",
"tokio",
"tokio-util",
@@ -4977,6 +4977,7 @@ dependencies = [
"datatypes",
"futures",
"humantime-serde",
+ "index",
"lazy_static",
"log-store",
"memcomparable",
@@ -7134,8 +7135,18 @@ name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
+dependencies = [
+ "regex-syntax 0.6.29",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e9368763f5a9b804326f3af749e16f9abf378d227bcdee7634b13d8f17793782"
dependencies = [
"fst",
+ "memchr",
"regex-syntax 0.6.29",
]
diff --git a/Cargo.toml b/Cargo.toml
index 0e38d914eccb..a3413aa9d48d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -111,7 +111,7 @@ prost = "0.12"
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
rand = "0.8"
regex = "1.8"
-regex-automata = { version = "0.1", features = ["transducer"] }
+regex-automata = { version = "0.2", features = ["transducer"] }
reqwest = { version = "0.11", default-features = false, features = [
"json",
"rustls-tls-native-roots",
@@ -169,6 +169,7 @@ datanode = { path = "src/datanode" }
datatypes = { path = "src/datatypes" }
file-engine = { path = "src/file-engine" }
frontend = { path = "src/frontend" }
+index = { path = "src/index" }
log-store = { path = "src/log-store" }
meta-client = { path = "src/meta-client" }
meta-srv = { path = "src/meta-srv" }
diff --git a/src/common/config/src/wal/kafka.rs b/src/common/config/src/wal/kafka.rs
index e93aa6cb2271..858991264bb6 100644
--- a/src/common/config/src/wal/kafka.rs
+++ b/src/common/config/src/wal/kafka.rs
@@ -42,7 +42,7 @@ pub struct KafkaConfig {
#[serde(skip)]
#[serde(default)]
pub compression: RsKafkaCompression,
- /// The maximum log size a kakfa batch producer could buffer.
+ /// The maximum log size a kafka batch producer could buffer.
pub max_batch_size: ReadableSize,
/// The linger duration of a kafka batch producer.
#[serde(with = "humantime_serde")]
diff --git a/src/index/src/inverted_index/error.rs b/src/index/src/inverted_index/error.rs
index b795e33003b7..6e5f39006eb9 100644
--- a/src/index/src/inverted_index/error.rs
+++ b/src/index/src/inverted_index/error.rs
@@ -113,7 +113,7 @@ pub enum Error {
#[snafu(display("Failed to parse regex DFA"))]
ParseDFA {
#[snafu(source)]
- error: regex_automata::Error,
+ error: Box<regex_automata::dfa::Error>,
location: Location,
},
diff --git a/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs b/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs
index a0ae0d7b9afb..a608acd0bab5 100644
--- a/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs
+++ b/src/index/src/inverted_index/search/fst_apply/intersection_apply.rs
@@ -14,7 +14,7 @@
use fst::map::OpBuilder;
use fst::{IntoStreamer, Streamer};
-use regex_automata::DenseDFA;
+use regex_automata::dfa::dense::DFA;
use snafu::{ensure, ResultExt};
use crate::inverted_index::error::{
@@ -24,15 +24,13 @@ use crate::inverted_index::search::fst_apply::FstApplier;
use crate::inverted_index::search::predicate::{Predicate, Range};
use crate::inverted_index::FstMap;
-type Dfa = DenseDFA<Vec<usize>, usize>;
-
/// `IntersectionFstApplier` applies intersection operations on an FstMap using specified ranges and regex patterns.
pub struct IntersectionFstApplier {
/// A list of `Range` which define inclusive or exclusive ranges for keys to be queried in the FstMap.
ranges: Vec<Range>,
/// A list of `Dfa` compiled from regular expression patterns.
- dfas: Vec<Dfa>,
+ dfas: Vec<DFA<Vec<u32>>>,
}
impl FstApplier for IntersectionFstApplier {
@@ -88,8 +86,8 @@ impl IntersectionFstApplier {
match predicate {
Predicate::Range(range) => ranges.push(range.range),
Predicate::RegexMatch(regex) => {
- let dfa = DenseDFA::new(®ex.pattern);
- let dfa = dfa.context(ParseDFASnafu)?;
+ let dfa = DFA::new(®ex.pattern);
+ let dfa = dfa.map_err(Box::new).context(ParseDFASnafu)?;
dfas.push(dfa);
}
// Rejection of `InList` predicates is enforced here.
@@ -210,47 +208,67 @@ mod tests {
#[test]
fn test_intersection_fst_applier_with_valid_pattern() {
- let test_fst = FstMap::from_iter([("aa", 1), ("bb", 2), ("cc", 3)]).unwrap();
-
- let applier = create_applier_from_pattern("a.?").unwrap();
- let results = applier.apply(&test_fst);
- assert_eq!(results, vec![1]);
-
- let applier = create_applier_from_pattern("b.?").unwrap();
- let results = applier.apply(&test_fst);
- assert_eq!(results, vec![2]);
-
- let applier = create_applier_from_pattern("c.?").unwrap();
- let results = applier.apply(&test_fst);
- assert_eq!(results, vec![3]);
-
- let applier = create_applier_from_pattern("a.*").unwrap();
- let results = applier.apply(&test_fst);
- assert_eq!(results, vec![1]);
-
- let applier = create_applier_from_pattern("b.*").unwrap();
- let results = applier.apply(&test_fst);
- assert_eq!(results, vec![2]);
-
- let applier = create_applier_from_pattern("c.*").unwrap();
- let results = applier.apply(&test_fst);
- assert_eq!(results, vec![3]);
-
- let applier = create_applier_from_pattern("d.?").unwrap();
- let results = applier.apply(&test_fst);
- assert!(results.is_empty());
-
- let applier = create_applier_from_pattern("a.?|b.?").unwrap();
- let results = applier.apply(&test_fst);
- assert_eq!(results, vec![1, 2]);
-
- let applier = create_applier_from_pattern("d.?|a.?").unwrap();
- let results = applier.apply(&test_fst);
- assert_eq!(results, vec![1]);
-
- let applier = create_applier_from_pattern(".*").unwrap();
- let results = applier.apply(&test_fst);
- assert_eq!(results, vec![1, 2, 3]);
+ let test_fst = FstMap::from_iter([("123", 1), ("abc", 2)]).unwrap();
+
+ let cases = vec![
+ ("1", vec![1]),
+ ("2", vec![1]),
+ ("3", vec![1]),
+ ("^1", vec![1]),
+ ("^2", vec![]),
+ ("^3", vec![]),
+ ("^1.*", vec![1]),
+ ("^.*2", vec![1]),
+ ("^.*3", vec![1]),
+ ("1$", vec![]),
+ ("2$", vec![]),
+ ("3$", vec![1]),
+ ("1.*$", vec![1]),
+ ("2.*$", vec![1]),
+ ("3.*$", vec![1]),
+ ("^1..$", vec![1]),
+ ("^.2.$", vec![1]),
+ ("^..3$", vec![1]),
+ ("^[0-9]", vec![1]),
+ ("^[0-9]+$", vec![1]),
+ ("^[0-9][0-9]$", vec![]),
+ ("^[0-9][0-9][0-9]$", vec![1]),
+ ("^123$", vec![1]),
+ ("a", vec![2]),
+ ("b", vec![2]),
+ ("c", vec![2]),
+ ("^a", vec![2]),
+ ("^b", vec![]),
+ ("^c", vec![]),
+ ("^a.*", vec![2]),
+ ("^.*b", vec![2]),
+ ("^.*c", vec![2]),
+ ("a$", vec![]),
+ ("b$", vec![]),
+ ("c$", vec![2]),
+ ("a.*$", vec![2]),
+ ("b.*$", vec![2]),
+ ("c.*$", vec![2]),
+ ("^.[a-z]", vec![2]),
+ ("^abc$", vec![2]),
+ ("^ab$", vec![]),
+ ("abc$", vec![2]),
+ ("^a.c$", vec![2]),
+ ("^..c$", vec![2]),
+ ("ab", vec![2]),
+ (".*", vec![1, 2]),
+ ("", vec![1, 2]),
+ ("^$", vec![]),
+ ("1|a", vec![1, 2]),
+ ("^123$|^abc$", vec![1, 2]),
+ ("^123$|d", vec![1]),
+ ];
+
+ for (pattern, expected) in cases {
+ let applier = create_applier_from_pattern(pattern).unwrap();
+ let results = applier.apply(&test_fst);
+ assert_eq!(results, expected);
+ }
}
#[test]
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index 8c3ef50ec2c7..a28e4f0426ea 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -39,6 +39,7 @@ datafusion.workspace = true
datatypes.workspace = true
futures.workspace = true
humantime-serde.workspace = true
+index.workspace = true
lazy_static = "1.4"
log-store = { workspace = true, optional = true }
memcomparable = "0.2"
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 39457281d76b..68a35123ea39 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -423,6 +423,23 @@ pub enum Error {
#[snafu(source)]
error: parquet::errors::ParquetError,
},
+
+ #[snafu(display("Column not found, column: {column}"))]
+ ColumnNotFound { column: String, location: Location },
+
+ #[snafu(display("Failed to build index applier"))]
+ BuildIndexApplier {
+ #[snafu(source)]
+ source: index::inverted_index::error::Error,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to convert value"))]
+ ConvertValue {
+ #[snafu(source)]
+ source: datatypes::error::Error,
+ location: Location,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -468,6 +485,7 @@ impl ErrorExt for Error {
| InvalidRequest { .. }
| FillDefault { .. }
| ConvertColumnDataType { .. }
+ | ColumnNotFound { .. }
| InvalidMetadata { .. } => StatusCode::InvalidArguments,
RegionMetadataNotFound { .. }
| Join { .. }
@@ -504,6 +522,8 @@ impl ErrorExt for Error {
JsonOptions { .. } => StatusCode::InvalidArguments,
EmptyRegionDir { .. } | EmptyManifestDir { .. } => StatusCode::RegionNotFound,
ArrowReader { .. } => StatusCode::StorageUnavailable,
+ BuildIndexApplier { source, .. } => source.status_code(),
+ ConvertValue { source, .. } => source.status_code(),
}
}
diff --git a/src/mito2/src/row_converter.rs b/src/mito2/src/row_converter.rs
index 4cc6fd3274ac..33ef05433521 100644
--- a/src/mito2/src/row_converter.rs
+++ b/src/mito2/src/row_converter.rs
@@ -84,7 +84,11 @@ impl SortField {
}
impl SortField {
- fn serialize(&self, serializer: &mut Serializer<&mut Vec<u8>>, value: &ValueRef) -> Result<()> {
+ pub(crate) fn serialize(
+ &self,
+ serializer: &mut Serializer<&mut Vec<u8>>,
+ value: &ValueRef,
+ ) -> Result<()> {
macro_rules! cast_value_and_serialize {
(
$self: ident;
diff --git a/src/mito2/src/sst.rs b/src/mito2/src/sst.rs
index 32c7b4951a55..55939c2d246a 100644
--- a/src/mito2/src/sst.rs
+++ b/src/mito2/src/sst.rs
@@ -16,5 +16,6 @@
pub mod file;
pub mod file_purger;
+mod index;
pub mod parquet;
pub(crate) mod version;
diff --git a/src/mito2/src/sst/index.rs b/src/mito2/src/sst/index.rs
new file mode 100644
index 000000000000..baffda27aa6e
--- /dev/null
+++ b/src/mito2/src/sst/index.rs
@@ -0,0 +1,18 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![allow(dead_code)]
+
+pub mod applier;
+mod codec;
diff --git a/src/mito2/src/sst/index/applier.rs b/src/mito2/src/sst/index/applier.rs
new file mode 100644
index 000000000000..95ca25ba003d
--- /dev/null
+++ b/src/mito2/src/sst/index/applier.rs
@@ -0,0 +1,47 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub mod builder;
+
+use index::inverted_index::search::index_apply::IndexApplier;
+use object_store::ObjectStore;
+
+/// The [`SstIndexApplier`] is responsible for applying predicates to the provided SST files
+/// and returning the relevant row group ids for further scan.
+pub struct SstIndexApplier {
+ /// The root directory of the region.
+ region_dir: String,
+
+ /// Object store responsible for accessing SST files.
+ object_store: ObjectStore,
+
+ /// Predefined index applier used to apply predicates to index files
+ /// and return the relevant row group ids for further scan.
+ index_applier: Box<dyn IndexApplier>,
+}
+
+impl SstIndexApplier {
+ /// Creates a new [`SstIndexApplier`].
+ pub fn new(
+ region_dir: String,
+ object_store: ObjectStore,
+ index_applier: Box<dyn IndexApplier>,
+ ) -> Self {
+ Self {
+ region_dir,
+ object_store,
+ index_applier,
+ }
+ }
+}
diff --git a/src/mito2/src/sst/index/applier/builder.rs b/src/mito2/src/sst/index/applier/builder.rs
new file mode 100644
index 000000000000..52af22effb18
--- /dev/null
+++ b/src/mito2/src/sst/index/applier/builder.rs
@@ -0,0 +1,261 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod between;
+
+// TODO(zhongzc): This PR is too large. The following modules are coming soon.
+
+// mod comparison;
+// mod eq_list;
+// mod in_list;
+// mod regex_match;
+
+use std::collections::HashMap;
+
+use api::v1::SemanticType;
+use common_query::logical_plan::Expr;
+use common_telemetry::warn;
+use datafusion_common::ScalarValue;
+use datafusion_expr::Expr as DfExpr;
+use datatypes::data_type::ConcreteDataType;
+use datatypes::value::Value;
+use index::inverted_index::search::index_apply::PredicatesIndexApplier;
+use index::inverted_index::search::predicate::Predicate;
+use object_store::ObjectStore;
+use snafu::{OptionExt, ResultExt};
+use store_api::metadata::RegionMetadata;
+
+use crate::error::{BuildIndexApplierSnafu, ColumnNotFoundSnafu, ConvertValueSnafu, Result};
+use crate::row_converter::SortField;
+use crate::sst::index::applier::SstIndexApplier;
+use crate::sst::index::codec::IndexValueCodec;
+
+type ColumnName = String;
+
+/// Constructs an [`SstIndexApplier`] which applies predicates to SST files during scan.
+pub struct SstIndexApplierBuilder<'a> {
+ /// Directory of the region, required argument for constructing [`SstIndexApplier`].
+ region_dir: String,
+
+ /// Object store, required argument for constructing [`SstIndexApplier`].
+ object_store: ObjectStore,
+
+ /// Metadata of the region, used to get metadata like column type.
+ metadata: &'a RegionMetadata,
+
+ /// Stores predicates during traversal on the Expr tree.
+ output: HashMap<ColumnName, Vec<Predicate>>,
+}
+
+impl<'a> SstIndexApplierBuilder<'a> {
+ /// Creates a new [`SstIndexApplierBuilder`].
+ pub fn new(
+ region_dir: String,
+ object_store: ObjectStore,
+ metadata: &'a RegionMetadata,
+ ) -> Self {
+ Self {
+ region_dir,
+ object_store,
+ metadata,
+ output: HashMap::default(),
+ }
+ }
+
+ /// Consumes the builder to construct an [`SstIndexApplier`], optionally returned based on
+ /// the expressions provided. If no predicates match, returns `None`.
+ pub fn build(mut self, exprs: &[Expr]) -> Result<Option<SstIndexApplier>> {
+ for expr in exprs {
+ self.traverse_and_collect(expr.df_expr());
+ }
+
+ if self.output.is_empty() {
+ return Ok(None);
+ }
+
+ let predicates = self.output.into_iter().collect();
+ let applier = PredicatesIndexApplier::try_from(predicates);
+ Ok(Some(SstIndexApplier::new(
+ self.region_dir,
+ self.object_store,
+ Box::new(applier.context(BuildIndexApplierSnafu)?),
+ )))
+ }
+
+ /// Recursively traverses expressions to collect predicates.
+ /// Results are stored in `self.output`.
+ fn traverse_and_collect(&mut self, expr: &DfExpr) {
+ let res = match expr {
+ DfExpr::Between(between) => self.collect_between(between),
+
+ // TODO(zhongzc): This PR is too large. The following arms are coming soon.
+
+ // DfExpr::InList(in_list) => self.collect_inlist(in_list),
+ // DfExpr::BinaryExpr(BinaryExpr { left, op, right }) => match op {
+ // Operator::And => {
+ // self.traverse_and_collect(left);
+ // self.traverse_and_collect(right);
+ // Ok(())
+ // }
+ // Operator::Or => self.collect_or_eq_list(left, right),
+ // Operator::Eq => self.collect_eq(left, right),
+ // Operator::Lt | Operator::LtEq | Operator::Gt | Operator::GtEq => {
+ // self.collect_comparison_expr(left, op, right)
+ // }
+ // Operator::RegexMatch => self.collect_regex_match(left, right),
+ // _ => Ok(()),
+ // },
+
+ // TODO(zhongzc): support more expressions, e.g. IsNull, IsNotNull, ...
+ _ => Ok(()),
+ };
+
+ if let Err(err) = res {
+ warn!(err; "Failed to collect predicates, ignore it. expr: {expr}");
+ }
+ }
+
+ /// Helper function to add a predicate to the output.
+ fn add_predicate(&mut self, column_name: &str, predicate: Predicate) {
+ match self.output.get_mut(column_name) {
+ Some(predicates) => predicates.push(predicate),
+ None => {
+ self.output.insert(column_name.to_string(), vec![predicate]);
+ }
+ }
+ }
+
+ /// Helper function to get the column type of a tag column.
+ /// Returns `None` if the column is not a tag column.
+ fn tag_column_type(&self, column_name: &str) -> Result<Option<ConcreteDataType>> {
+ let column = self
+ .metadata
+ .column_by_name(column_name)
+ .context(ColumnNotFoundSnafu {
+ column: column_name,
+ })?;
+
+ Ok((column.semantic_type == SemanticType::Tag)
+ .then(|| column.column_schema.data_type.clone()))
+ }
+
+ /// Helper function to get a non-null literal.
+ fn nonnull_lit(expr: &DfExpr) -> Option<&ScalarValue> {
+ match expr {
+ DfExpr::Literal(lit) if !lit.is_null() => Some(lit),
+ _ => None,
+ }
+ }
+
+ /// Helper function to get the column name of a column expression.
+ fn column_name(expr: &DfExpr) -> Option<&str> {
+ match expr {
+ DfExpr::Column(column) => Some(&column.name),
+ _ => None,
+ }
+ }
+
+ /// Helper function to encode a literal into bytes.
+ fn encode_lit(lit: &ScalarValue, data_type: ConcreteDataType) -> Result<Vec<u8>> {
+ let value = Value::try_from(lit.clone()).context(ConvertValueSnafu)?;
+ let mut bytes = vec![];
+ let field = SortField::new(data_type);
+ IndexValueCodec::encode_value(value.as_value_ref(), &field, &mut bytes)?;
+ Ok(bytes)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use api::v1::SemanticType;
+ use datafusion_common::Column;
+ use datatypes::data_type::ConcreteDataType;
+ use datatypes::schema::ColumnSchema;
+ use object_store::services::Memory;
+ use object_store::ObjectStore;
+ use store_api::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder};
+ use store_api::storage::RegionId;
+
+ use super::*;
+
+ pub(crate) fn test_region_metadata() -> RegionMetadata {
+ let mut builder = RegionMetadataBuilder::new(RegionId::new(1234, 5678));
+ builder
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new("a", ConcreteDataType::string_datatype(), false),
+ semantic_type: SemanticType::Tag,
+ column_id: 1,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new("b", ConcreteDataType::string_datatype(), false),
+ semantic_type: SemanticType::Field,
+ column_id: 2,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "c",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Timestamp,
+ column_id: 3,
+ })
+ .primary_key(vec![1]);
+ builder.build().unwrap()
+ }
+
+ pub(crate) fn test_object_store() -> ObjectStore {
+ ObjectStore::new(Memory::default()).unwrap().finish()
+ }
+
+ pub(crate) fn tag_column() -> DfExpr {
+ DfExpr::Column(Column {
+ relation: None,
+ name: "a".to_string(),
+ })
+ }
+
+ pub(crate) fn field_column() -> DfExpr {
+ DfExpr::Column(Column {
+ relation: None,
+ name: "b".to_string(),
+ })
+ }
+
+ pub(crate) fn nonexistent_column() -> DfExpr {
+ DfExpr::Column(Column {
+ relation: None,
+ name: "nonexistent".to_string(),
+ })
+ }
+
+ pub(crate) fn string_lit(s: impl Into<String>) -> DfExpr {
+ DfExpr::Literal(ScalarValue::Utf8(Some(s.into())))
+ }
+
+ pub(crate) fn int64_lit(i: impl Into<i64>) -> DfExpr {
+ DfExpr::Literal(ScalarValue::Int64(Some(i.into())))
+ }
+
+ pub(crate) fn encoded_string(s: impl Into<String>) -> Vec<u8> {
+ let mut bytes = vec![];
+ IndexValueCodec::encode_value(
+ Value::from(s.into()).as_value_ref(),
+ &SortField::new(ConcreteDataType::string_datatype()),
+ &mut bytes,
+ )
+ .unwrap();
+ bytes
+ }
+}
diff --git a/src/mito2/src/sst/index/applier/builder/between.rs b/src/mito2/src/sst/index/applier/builder/between.rs
new file mode 100644
index 000000000000..50ae7073b2db
--- /dev/null
+++ b/src/mito2/src/sst/index/applier/builder/between.rs
@@ -0,0 +1,171 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use datafusion_expr::Between;
+use index::inverted_index::search::predicate::{Bound, Predicate, Range, RangePredicate};
+
+use crate::error::Result;
+use crate::sst::index::applier::builder::SstIndexApplierBuilder;
+
+impl<'a> SstIndexApplierBuilder<'a> {
+ /// Collects a `BETWEEN` expression in the form of `column BETWEEN lit AND lit`.
+ pub(crate) fn collect_between(&mut self, between: &Between) -> Result<()> {
+ if between.negated {
+ return Ok(());
+ }
+
+ let Some(column_name) = Self::column_name(&between.expr) else {
+ return Ok(());
+ };
+ let Some(data_type) = self.tag_column_type(column_name)? else {
+ return Ok(());
+ };
+ let Some(low) = Self::nonnull_lit(&between.low) else {
+ return Ok(());
+ };
+ let Some(high) = Self::nonnull_lit(&between.high) else {
+ return Ok(());
+ };
+
+ let predicate = Predicate::Range(RangePredicate {
+ range: Range {
+ lower: Some(Bound {
+ inclusive: true,
+ value: Self::encode_lit(low, data_type.clone())?,
+ }),
+ upper: Some(Bound {
+ inclusive: true,
+ value: Self::encode_lit(high, data_type)?,
+ }),
+ },
+ });
+
+ self.add_predicate(column_name, predicate);
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::error::Error;
+ use crate::sst::index::applier::builder::tests::{
+ encoded_string, field_column, int64_lit, nonexistent_column, string_lit, tag_column,
+ test_object_store, test_region_metadata,
+ };
+
+ #[test]
+ fn test_collect_between_basic() {
+ let metadata = test_region_metadata();
+ let mut builder =
+ SstIndexApplierBuilder::new("test".to_string(), test_object_store(), &metadata);
+
+ let between = Between {
+ negated: false,
+ expr: Box::new(tag_column()),
+ low: Box::new(string_lit("abc")),
+ high: Box::new(string_lit("def")),
+ };
+
+ builder.collect_between(&between).unwrap();
+
+ let predicates = builder.output.get("a").unwrap();
+ assert_eq!(predicates.len(), 1);
+ assert_eq!(
+ predicates[0],
+ Predicate::Range(RangePredicate {
+ range: Range {
+ lower: Some(Bound {
+ inclusive: true,
+ value: encoded_string("abc"),
+ }),
+ upper: Some(Bound {
+ inclusive: true,
+ value: encoded_string("def"),
+ }),
+ }
+ })
+ );
+ }
+
+ #[test]
+ fn test_collect_between_negated() {
+ let metadata = test_region_metadata();
+ let mut builder =
+ SstIndexApplierBuilder::new("test".to_string(), test_object_store(), &metadata);
+
+ let between = Between {
+ negated: true,
+ expr: Box::new(tag_column()),
+ low: Box::new(string_lit("abc")),
+ high: Box::new(string_lit("def")),
+ };
+
+ builder.collect_between(&between).unwrap();
+ assert!(builder.output.is_empty());
+ }
+
+ #[test]
+ fn test_collect_between_field_column() {
+ let metadata = test_region_metadata();
+ let mut builder =
+ SstIndexApplierBuilder::new("test".to_string(), test_object_store(), &metadata);
+
+ let between = Between {
+ negated: false,
+ expr: Box::new(field_column()),
+ low: Box::new(string_lit("abc")),
+ high: Box::new(string_lit("def")),
+ };
+
+ builder.collect_between(&between).unwrap();
+ assert!(builder.output.is_empty());
+ }
+
+ #[test]
+ fn test_collect_between_type_mismatch() {
+ let metadata = test_region_metadata();
+ let mut builder =
+ SstIndexApplierBuilder::new("test".to_string(), test_object_store(), &metadata);
+
+ let between = Between {
+ negated: false,
+ expr: Box::new(tag_column()),
+ low: Box::new(int64_lit(123)),
+ high: Box::new(int64_lit(456)),
+ };
+
+ let res = builder.collect_between(&between);
+ assert!(matches!(res, Err(Error::FieldTypeMismatch { .. })));
+ assert!(builder.output.is_empty());
+ }
+
+ #[test]
+ fn test_collect_between_nonexistent_column() {
+ let metadata = test_region_metadata();
+ let mut builder =
+ SstIndexApplierBuilder::new("test".to_string(), test_object_store(), &metadata);
+
+ let between = Between {
+ negated: false,
+ expr: Box::new(nonexistent_column()),
+ low: Box::new(string_lit("abc")),
+ high: Box::new(string_lit("def")),
+ };
+
+ let res = builder.collect_between(&between);
+ assert!(matches!(res, Err(Error::ColumnNotFound { .. })));
+ assert!(builder.output.is_empty());
+ }
+}
diff --git a/src/mito2/src/sst/index/codec.rs b/src/mito2/src/sst/index/codec.rs
new file mode 100644
index 000000000000..ada5ac07cbfc
--- /dev/null
+++ b/src/mito2/src/sst/index/codec.rs
@@ -0,0 +1,65 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use datatypes::value::ValueRef;
+use memcomparable::Serializer;
+
+use crate::error::Result;
+use crate::row_converter::SortField;
+
+/// Encodes index values according to their data types for sorting and storage use.
+pub struct IndexValueCodec;
+
+impl IndexValueCodec {
+ /// Serializes a `ValueRef` using the data type defined in `SortField` and writes
+ /// the result into a buffer.
+ ///
+ /// # Arguments
+ /// * `value` - The value to be encoded.
+ /// * `field` - Contains data type to guide serialization.
+ /// * `buffer` - Destination buffer for the serialized value.
+ pub fn encode_value(value: ValueRef, field: &SortField, buffer: &mut Vec<u8>) -> Result<()> {
+ buffer.reserve(field.estimated_size());
+ let mut serializer = Serializer::new(buffer);
+ field.serialize(&mut serializer, &value)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use datatypes::data_type::ConcreteDataType;
+
+ use super::*;
+ use crate::error::Error;
+
+ #[test]
+ fn test_encode_value_basic() {
+ let value = ValueRef::from("hello");
+ let field = SortField::new(ConcreteDataType::string_datatype());
+
+ let mut buffer = Vec::new();
+ IndexValueCodec::encode_value(value, &field, &mut buffer).unwrap();
+ assert!(!buffer.is_empty());
+ }
+
+ #[test]
+ fn test_encode_value_type_mismatch() {
+ let value = ValueRef::from("hello");
+ let field = SortField::new(ConcreteDataType::int64_datatype());
+
+ let mut buffer = Vec::new();
+ let res = IndexValueCodec::encode_value(value, &field, &mut buffer);
+ assert!(matches!(res, Err(Error::FieldTypeMismatch { .. })));
+ }
+}
|
feat
|
Add applier builder to convert Expr to Predicates (Part 1) (#3034)
|
7c135c0ef959f1ae47bc4d8f780ef71ebd72bdaa
|
2024-11-18 12:40:40
|
Weny Xu
|
feat: introduce `DynamicTimeoutLayer` (#5006)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 894d2c607690..d5bfbf953070 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1859,6 +1859,7 @@ dependencies = [
"frontend",
"futures",
"human-panic",
+ "humantime",
"lazy_static",
"meta-client",
"meta-srv",
@@ -10976,11 +10977,13 @@ dependencies = [
"datatypes",
"derive_builder 0.12.0",
"futures",
+ "futures-util",
"hashbrown 0.14.5",
"headers 0.3.9",
"hostname",
"http 0.2.12",
"http-body 0.4.6",
+ "humantime",
"humantime-serde",
"hyper 0.14.30",
"influxdb_line_protocol",
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index 95937567c371..c1f20cc9c526 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -53,6 +53,7 @@ flow.workspace = true
frontend = { workspace = true, default-features = false }
futures.workspace = true
human-panic = "2.0"
+humantime.workspace = true
lazy_static.workspace = true
meta-client.workspace = true
meta-srv.workspace = true
diff --git a/src/cmd/src/cli/database.rs b/src/cmd/src/cli/database.rs
index eb5647699ef0..d313e93acf7c 100644
--- a/src/cmd/src/cli/database.rs
+++ b/src/cmd/src/cli/database.rs
@@ -12,11 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::time::Duration;
+
use base64::engine::general_purpose;
use base64::Engine;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use humantime::format_duration;
use serde_json::Value;
use servers::http::greptime_result_v1::GreptimedbV1Response;
+use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
use servers::http::GreptimeQueryOutput;
use snafu::ResultExt;
@@ -26,10 +30,16 @@ pub(crate) struct DatabaseClient {
addr: String,
catalog: String,
auth_header: Option<String>,
+ timeout: Option<Duration>,
}
impl DatabaseClient {
- pub fn new(addr: String, catalog: String, auth_basic: Option<String>) -> Self {
+ pub fn new(
+ addr: String,
+ catalog: String,
+ auth_basic: Option<String>,
+ timeout: Option<Duration>,
+ ) -> Self {
let auth_header = if let Some(basic) = auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
Some(format!("basic {}", encoded))
@@ -41,6 +51,7 @@ impl DatabaseClient {
addr,
catalog,
auth_header,
+ timeout,
}
}
@@ -62,6 +73,12 @@ impl DatabaseClient {
if let Some(ref auth) = self.auth_header {
request = request.header("Authorization", auth);
}
+ if let Some(ref timeout) = self.timeout {
+ request = request.header(
+ GREPTIME_DB_HEADER_TIMEOUT,
+ format_duration(*timeout).to_string(),
+ );
+ }
let response = request.send().await.with_context(|_| HttpQuerySqlSnafu {
reason: format!("bad url: {}", url),
diff --git a/src/cmd/src/cli/export.rs b/src/cmd/src/cli/export.rs
index ee5f5329cdd5..760fdbbc02f6 100644
--- a/src/cmd/src/cli/export.rs
+++ b/src/cmd/src/cli/export.rs
@@ -15,6 +15,7 @@
use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::Arc;
+use std::time::Duration;
use async_trait::async_trait;
use clap::{Parser, ValueEnum};
@@ -83,14 +84,22 @@ pub struct ExportCommand {
/// The basic authentication for connecting to the server
#[clap(long)]
auth_basic: Option<String>,
+
+ /// The timeout of invoking the database.
+ #[clap(long, value_parser = humantime::parse_duration)]
+ timeout: Option<Duration>,
}
impl ExportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = database::split_database(&self.database)?;
- let database_client =
- DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
+ let database_client = DatabaseClient::new(
+ self.addr.clone(),
+ catalog.clone(),
+ self.auth_basic.clone(),
+ self.timeout,
+ );
Ok(Instance::new(
Box::new(Export {
diff --git a/src/cmd/src/cli/import.rs b/src/cmd/src/cli/import.rs
index b1d27fb0e058..908fc944bd03 100644
--- a/src/cmd/src/cli/import.rs
+++ b/src/cmd/src/cli/import.rs
@@ -14,6 +14,7 @@
use std::path::PathBuf;
use std::sync::Arc;
+use std::time::Duration;
use async_trait::async_trait;
use clap::{Parser, ValueEnum};
@@ -68,13 +69,21 @@ pub struct ImportCommand {
/// The basic authentication for connecting to the server
#[clap(long)]
auth_basic: Option<String>,
+
+ /// The timeout of invoking the database.
+ #[clap(long, value_parser = humantime::parse_duration)]
+ timeout: Option<Duration>,
}
impl ImportCommand {
pub async fn build(&self, guard: Vec<WorkerGuard>) -> Result<Instance> {
let (catalog, schema) = database::split_database(&self.database)?;
- let database_client =
- DatabaseClient::new(self.addr.clone(), catalog.clone(), self.auth_basic.clone());
+ let database_client = DatabaseClient::new(
+ self.addr.clone(),
+ catalog.clone(),
+ self.auth_basic.clone(),
+ self.timeout,
+ );
Ok(Instance::new(
Box::new(Import {
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index a2803ae03572..47bbfc4f7382 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -52,11 +52,13 @@ datafusion-expr.workspace = true
datatypes.workspace = true
derive_builder.workspace = true
futures = "0.3"
+futures-util.workspace = true
hashbrown = "0.14"
headers = "0.3"
hostname = "0.3"
http = "0.2"
http-body = "0.4"
+humantime.workspace = true
humantime-serde.workspace = true
hyper = { version = "0.14", features = ["full"] }
influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", branch = "feat/line-protocol" }
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 12ac06db9070..34cff5de6fe4 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -45,7 +45,6 @@ use serde_json::Value;
use snafu::{ensure, ResultExt};
use tokio::sync::oneshot::{self, Sender};
use tokio::sync::Mutex;
-use tower::timeout::TimeoutLayer;
use tower::ServiceBuilder;
use tower_http::decompression::RequestDecompressionLayer;
use tower_http::trace::TraceLayer;
@@ -101,6 +100,9 @@ pub mod greptime_result_v1;
pub mod influxdb_result_v1;
pub mod json_result;
pub mod table_result;
+mod timeout;
+
+pub(crate) use timeout::DynamicTimeoutLayer;
#[cfg(any(test, feature = "testing"))]
pub mod test_helpers;
@@ -704,7 +706,7 @@ impl HttpServer {
pub fn build(&self, router: Router) -> Router {
let timeout_layer = if self.options.timeout != Duration::default() {
- Some(ServiceBuilder::new().layer(TimeoutLayer::new(self.options.timeout)))
+ Some(ServiceBuilder::new().layer(DynamicTimeoutLayer::new(self.options.timeout)))
} else {
info!("HTTP server timeout is disabled");
None
@@ -997,10 +999,12 @@ mod test {
use datatypes::prelude::*;
use datatypes::schema::{ColumnSchema, Schema};
use datatypes::vectors::{StringVector, UInt32Vector};
+ use header::constants::GREPTIME_DB_HEADER_TIMEOUT;
use query::parser::PromQuery;
use query::query_engine::DescribeResult;
use session::context::QueryContextRef;
use tokio::sync::mpsc;
+ use tokio::time::Instant;
use super::*;
use crate::error::Error;
@@ -1062,8 +1066,8 @@ mod test {
}
}
- fn timeout() -> TimeoutLayer {
- TimeoutLayer::new(Duration::from_millis(10))
+ fn timeout() -> DynamicTimeoutLayer {
+ DynamicTimeoutLayer::new(Duration::from_millis(10))
}
async fn forever() {
@@ -1102,6 +1106,16 @@ mod test {
let client = TestClient::new(app);
let res = client.get("/test/timeout").send().await;
assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
+
+ let now = Instant::now();
+ let res = client
+ .get("/test/timeout")
+ .header(GREPTIME_DB_HEADER_TIMEOUT, "20ms")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::REQUEST_TIMEOUT);
+ let elapsed = now.elapsed();
+ assert!(elapsed > Duration::from_millis(15));
}
#[tokio::test]
diff --git a/src/servers/src/http/header.rs b/src/servers/src/http/header.rs
index 16962a56395a..bf5b0a4ebc14 100644
--- a/src/servers/src/http/header.rs
+++ b/src/servers/src/http/header.rs
@@ -39,6 +39,7 @@ pub mod constants {
// LEGACY HEADERS - KEEP IT UNMODIFIED
pub const GREPTIME_DB_HEADER_FORMAT: &str = "x-greptime-format";
+ pub const GREPTIME_DB_HEADER_TIMEOUT: &str = "x-greptime-timeout";
pub const GREPTIME_DB_HEADER_EXECUTION_TIME: &str = "x-greptime-execution-time";
pub const GREPTIME_DB_HEADER_METRICS: &str = "x-greptime-metrics";
pub const GREPTIME_DB_HEADER_NAME: &str = "x-greptime-db-name";
diff --git a/src/servers/src/http/timeout.rs b/src/servers/src/http/timeout.rs
new file mode 100644
index 000000000000..7a42918124d4
--- /dev/null
+++ b/src/servers/src/http/timeout.rs
@@ -0,0 +1,144 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::time::Duration;
+
+use axum::body::Body;
+use axum::http::Request;
+use axum::response::Response;
+use pin_project::pin_project;
+use tokio::time::Sleep;
+use tower::timeout::error::Elapsed;
+use tower::{BoxError, Layer, Service};
+
+use crate::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
+
+/// [`Timeout`] response future
+///
+/// [`Timeout`]: crate::timeout::Timeout
+///
+/// Modified from https://github.com/tower-rs/tower/blob/8b84b98d93a2493422a0ecddb6251f292a904cff/tower/src/timeout/future.rs
+#[derive(Debug)]
+#[pin_project]
+pub struct ResponseFuture<T> {
+ #[pin]
+ response: T,
+ #[pin]
+ sleep: Sleep,
+}
+
+impl<T> ResponseFuture<T> {
+ pub(crate) fn new(response: T, sleep: Sleep) -> Self {
+ ResponseFuture { response, sleep }
+ }
+}
+
+impl<F, T, E> Future for ResponseFuture<F>
+where
+ F: Future<Output = Result<T, E>>,
+ E: Into<BoxError>,
+{
+ type Output = Result<T, BoxError>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let this = self.project();
+
+ // First, try polling the future
+ match this.response.poll(cx) {
+ Poll::Ready(v) => return Poll::Ready(v.map_err(Into::into)),
+ Poll::Pending => {}
+ }
+
+ // Now check the sleep
+ match this.sleep.poll(cx) {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(_) => Poll::Ready(Err(Elapsed::new().into())),
+ }
+ }
+}
+
+/// Applies a timeout to requests via the supplied inner service.
+///
+/// Modified from https://github.com/tower-rs/tower/blob/8b84b98d93a2493422a0ecddb6251f292a904cff/tower/src/timeout/layer.rs
+#[derive(Debug, Clone)]
+pub struct DynamicTimeoutLayer {
+ default_timeout: Duration,
+}
+
+impl DynamicTimeoutLayer {
+ /// Create a timeout from a duration
+ pub fn new(default_timeout: Duration) -> Self {
+ DynamicTimeoutLayer { default_timeout }
+ }
+}
+
+impl<S> Layer<S> for DynamicTimeoutLayer {
+ type Service = DynamicTimeout<S>;
+
+ fn layer(&self, service: S) -> Self::Service {
+ DynamicTimeout::new(service, self.default_timeout)
+ }
+}
+
+/// Modified from https://github.com/tower-rs/tower/blob/8b84b98d93a2493422a0ecddb6251f292a904cff/tower/src/timeout/mod.rs
+#[derive(Clone)]
+pub struct DynamicTimeout<S> {
+ inner: S,
+ default_timeout: Duration,
+}
+
+impl<S> DynamicTimeout<S> {
+ /// Create a new [`DynamicTimeout`] with the given timeout
+ pub fn new(inner: S, default_timeout: Duration) -> Self {
+ DynamicTimeout {
+ inner,
+ default_timeout,
+ }
+ }
+}
+
+impl<S> Service<Request<Body>> for DynamicTimeout<S>
+where
+ S: Service<Request<Body>, Response = Response> + Send + 'static,
+ S::Error: Into<BoxError>,
+{
+ type Response = S::Response;
+ type Error = BoxError;
+ type Future = ResponseFuture<S::Future>;
+
+ fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ match self.inner.poll_ready(cx) {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),
+ }
+ }
+
+ fn call(&mut self, request: Request<Body>) -> Self::Future {
+ let user_timeout = request
+ .headers()
+ .get(GREPTIME_DB_HEADER_TIMEOUT)
+ .and_then(|value| {
+ value
+ .to_str()
+ .ok()
+ .and_then(|value| humantime::parse_duration(value).ok())
+ });
+ let response = self.inner.call(request);
+ let sleep = tokio::time::sleep(user_timeout.unwrap_or(self.default_timeout));
+ ResponseFuture::new(response, sleep)
+ }
+}
|
feat
|
introduce `DynamicTimeoutLayer` (#5006)
|
d1dfffcdaff8be61032dddf2dfabd5060f2b0b0a
|
2024-09-18 08:31:30
|
Weny Xu
|
chore: enable fuzz test for append table (#4702)
| false
|
diff --git a/tests-fuzz/src/generator/create_expr.rs b/tests-fuzz/src/generator/create_expr.rs
index 93809c06d4cd..200485e266c9 100644
--- a/tests-fuzz/src/generator/create_expr.rs
+++ b/tests-fuzz/src/generator/create_expr.rs
@@ -243,12 +243,20 @@ pub struct CreatePhysicalTableExprGenerator<R: Rng + 'static> {
name_generator: Box<dyn Random<Ident, R>>,
#[builder(default = "false")]
if_not_exists: bool,
+ #[builder(default, setter(into))]
+ with_clause: HashMap<String, String>,
}
impl<R: Rng + 'static> Generator<CreateTableExpr, R> for CreatePhysicalTableExprGenerator<R> {
type Error = Error;
fn generate(&self, rng: &mut R) -> Result<CreateTableExpr> {
+ let mut options = HashMap::with_capacity(self.with_clause.len() + 1);
+ options.insert("physical_metric_table".to_string(), Value::from(""));
+ for (key, value) in &self.with_clause {
+ options.insert(key.to_string(), Value::from(value.to_string()));
+ }
+
Ok(CreateTableExpr {
table_name: self.name_generator.gen(rng),
columns: vec![
@@ -266,7 +274,7 @@ impl<R: Rng + 'static> Generator<CreateTableExpr, R> for CreatePhysicalTableExpr
if_not_exists: self.if_not_exists,
partition: None,
engine: "metric".to_string(),
- options: [("physical_metric_table".to_string(), "".into())].into(),
+ options,
primary_keys: vec![],
})
}
diff --git a/tests-fuzz/src/translator/mysql/create_expr.rs b/tests-fuzz/src/translator/mysql/create_expr.rs
index 3ce659bf6e3d..073643b05967 100644
--- a/tests-fuzz/src/translator/mysql/create_expr.rs
+++ b/tests-fuzz/src/translator/mysql/create_expr.rs
@@ -151,7 +151,7 @@ impl CreateTableExprTranslator {
for (key, value) in &input.options {
output.push(format!("\"{key}\" = \"{value}\""));
}
- format!(" with ({})", output.join("\n"))
+ format!(" with ({})", output.join(",\n"))
}
}
}
diff --git a/tests-fuzz/targets/fuzz_alter_logical_table.rs b/tests-fuzz/targets/fuzz_alter_logical_table.rs
index 3ceb5b8b4572..80f017a35397 100644
--- a/tests-fuzz/targets/fuzz_alter_logical_table.rs
+++ b/tests-fuzz/targets/fuzz_alter_logical_table.rs
@@ -14,6 +14,7 @@
#![no_main]
+use std::collections::HashMap;
use std::sync::Arc;
use arbitrary::{Arbitrary, Unstructured};
@@ -76,12 +77,17 @@ impl Arbitrary<'_> for FuzzInput {
fn generate_create_physical_table_expr<R: Rng + 'static>(rng: &mut R) -> Result<CreateTableExpr> {
let physical_table_if_not_exists = rng.gen_bool(0.5);
+ let mut with_clause = HashMap::new();
+ if rng.gen_bool(0.5) {
+ with_clause.insert("append_mode".to_string(), "true".to_string());
+ }
let create_physical_table_expr = CreatePhysicalTableExprGeneratorBuilder::default()
.name_generator(Box::new(MappedGenerator::new(
WordGenerator,
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
)))
.if_not_exists(physical_table_if_not_exists)
+ .with_clause(with_clause)
.build()
.unwrap();
create_physical_table_expr.generate(rng)
diff --git a/tests-fuzz/targets/fuzz_alter_table.rs b/tests-fuzz/targets/fuzz_alter_table.rs
index 8c17612d0443..7f2a809c9e14 100644
--- a/tests-fuzz/targets/fuzz_alter_table.rs
+++ b/tests-fuzz/targets/fuzz_alter_table.rs
@@ -14,6 +14,7 @@
#![no_main]
+use std::collections::HashMap;
use std::sync::Arc;
use arbitrary::{Arbitrary, Unstructured};
@@ -71,6 +72,10 @@ enum AlterTableOption {
fn generate_create_table_expr<R: Rng + 'static>(rng: &mut R) -> Result<CreateTableExpr> {
let max_columns = get_gt_fuzz_input_max_columns();
let columns = rng.gen_range(2..max_columns);
+ let mut with_clause = HashMap::new();
+ if rng.gen_bool(0.5) {
+ with_clause.insert("append_mode".to_string(), "true".to_string());
+ }
let create_table_generator = CreateTableExprGeneratorBuilder::default()
.name_generator(Box::new(MappedGenerator::new(
WordGenerator,
@@ -78,6 +83,7 @@ fn generate_create_table_expr<R: Rng + 'static>(rng: &mut R) -> Result<CreateTab
)))
.columns(columns)
.engine("mito")
+ .with_clause(with_clause)
.build()
.unwrap();
create_table_generator.generate(rng)
diff --git a/tests-fuzz/targets/fuzz_create_logical_table.rs b/tests-fuzz/targets/fuzz_create_logical_table.rs
index 39251732f2db..64ed26ba7e9a 100644
--- a/tests-fuzz/targets/fuzz_create_logical_table.rs
+++ b/tests-fuzz/targets/fuzz_create_logical_table.rs
@@ -14,6 +14,7 @@
#![no_main]
+use std::collections::HashMap;
use std::sync::Arc;
use common_telemetry::info;
@@ -68,12 +69,17 @@ async fn execute_create_logic_table(ctx: FuzzContext, input: FuzzInput) -> Resul
// Create physical table
let physical_table_if_not_exists = rng.gen_bool(0.5);
+ let mut with_clause = HashMap::new();
+ if rng.gen_bool(0.5) {
+ with_clause.insert("append_mode".to_string(), "true".to_string());
+ }
let create_physical_table_expr = CreatePhysicalTableExprGeneratorBuilder::default()
.name_generator(Box::new(MappedGenerator::new(
WordGenerator,
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
)))
.if_not_exists(physical_table_if_not_exists)
+ .with_clause(with_clause)
.build()
.unwrap()
.generate(&mut rng)?;
diff --git a/tests-fuzz/targets/fuzz_create_table.rs b/tests-fuzz/targets/fuzz_create_table.rs
index 0d2458c15eb0..6d03b0dffab9 100644
--- a/tests-fuzz/targets/fuzz_create_table.rs
+++ b/tests-fuzz/targets/fuzz_create_table.rs
@@ -14,6 +14,8 @@
#![no_main]
+use std::collections::HashMap;
+
use common_telemetry::info;
use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured};
use libfuzzer_sys::fuzz_target;
@@ -65,6 +67,10 @@ impl Arbitrary<'_> for FuzzInput {
fn generate_expr(input: FuzzInput) -> Result<CreateTableExpr> {
let mut rng = ChaChaRng::seed_from_u64(input.seed);
let if_not_exists = rng.gen_bool(0.5);
+ let mut with_clause = HashMap::new();
+ if rng.gen_bool(0.5) {
+ with_clause.insert("append_mode".to_string(), "true".to_string());
+ }
let create_table_generator = CreateTableExprGeneratorBuilder::default()
.name_generator(Box::new(MappedGenerator::new(
@@ -74,6 +80,7 @@ fn generate_expr(input: FuzzInput) -> Result<CreateTableExpr> {
.columns(input.columns)
.engine("mito")
.if_not_exists(if_not_exists)
+ .with_clause(with_clause)
.build()
.unwrap();
create_table_generator.generate(&mut rng)
diff --git a/tests-fuzz/targets/fuzz_insert.rs b/tests-fuzz/targets/fuzz_insert.rs
index a5db2bfffe45..739d6af7a386 100644
--- a/tests-fuzz/targets/fuzz_insert.rs
+++ b/tests-fuzz/targets/fuzz_insert.rs
@@ -14,6 +14,7 @@
#![no_main]
+use std::collections::HashMap;
use std::sync::Arc;
use common_telemetry::info;
@@ -83,6 +84,11 @@ fn generate_create_expr<R: Rng + 'static>(
input: FuzzInput,
rng: &mut R,
) -> Result<CreateTableExpr> {
+ let mut with_clause = HashMap::new();
+ if rng.gen_bool(0.5) {
+ with_clause.insert("append_mode".to_string(), "true".to_string());
+ }
+
let create_table_generator = CreateTableExprGeneratorBuilder::default()
.name_generator(Box::new(MappedGenerator::new(
WordGenerator,
@@ -90,6 +96,7 @@ fn generate_create_expr<R: Rng + 'static>(
)))
.columns(input.columns)
.engine("mito")
+ .with_clause(with_clause)
.ts_column_type_generator(Box::new(MySQLTsColumnTypeGenerator))
.build()
.unwrap();
diff --git a/tests-fuzz/targets/fuzz_insert_logical_table.rs b/tests-fuzz/targets/fuzz_insert_logical_table.rs
index 6efb6ea50955..abafdef9a8b7 100644
--- a/tests-fuzz/targets/fuzz_insert_logical_table.rs
+++ b/tests-fuzz/targets/fuzz_insert_logical_table.rs
@@ -79,12 +79,17 @@ impl Arbitrary<'_> for FuzzInput {
fn generate_create_physical_table_expr<R: Rng + 'static>(rng: &mut R) -> Result<CreateTableExpr> {
let physical_table_if_not_exists = rng.gen_bool(0.5);
+ let mut with_clause = HashMap::new();
+ if rng.gen_bool(0.5) {
+ with_clause.insert("append_mode".to_string(), "true".to_string());
+ }
let create_physical_table_expr = CreatePhysicalTableExprGeneratorBuilder::default()
.name_generator(Box::new(MappedGenerator::new(
WordGenerator,
merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
)))
.if_not_exists(physical_table_if_not_exists)
+ .with_clause(with_clause)
.build()
.unwrap();
create_physical_table_expr.generate(rng)
|
chore
|
enable fuzz test for append table (#4702)
|
6d1dd5e7af9caf6bebba4f63fd29c6e196877ba1
|
2022-11-03 16:05:30
|
Ruihang Xia
|
fix: also run CI in develop branch (#387)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 93aa3bb1270b..be55b6beaf54 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -1,6 +1,18 @@
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
+ push:
+ branches:
+ - develop
+ - main
+ paths-ignore:
+ - 'docs/**'
+ - 'config/**'
+ - '.github/**'
+ - '**.md'
+ - '**.yml'
+ - '.dockerignore'
+ - 'docker/**'
name: Continuous integration for developing
|
fix
|
also run CI in develop branch (#387)
|
5b6279f191f44f3460583f03a39e9265fa1c7047
|
2025-01-26 11:27:23
|
yihong
|
fix: no need for special case since datafusion updated (#5458)
| false
|
diff --git a/src/servers/src/postgres/fixtures.rs b/src/servers/src/postgres/fixtures.rs
index 34761d5d35ec..4774c4ece052 100644
--- a/src/servers/src/postgres/fixtures.rs
+++ b/src/servers/src/postgres/fixtures.rs
@@ -115,11 +115,7 @@ pub(crate) fn process<'a>(query: &str, query_ctx: QueryContextRef) -> Option<Vec
}
}
-static LIMIT_CAST_PATTERN: Lazy<Regex> =
- Lazy::new(|| Regex::new("(?i)(LIMIT\\s+\\d+)::bigint").unwrap());
pub(crate) fn rewrite_sql(query: &str) -> Cow<'_, str> {
- //TODO(sunng87): remove this when we upgraded datafusion to 43 or newer
- let query = LIMIT_CAST_PATTERN.replace_all(query, "$1");
// DBeaver tricky replacement for datafusion not support sql
// TODO: add more here
query
@@ -218,11 +214,6 @@ mod test {
#[test]
fn test_rewrite() {
- let sql = "SELECT * FROM number LIMIT 1::bigint";
- let sql2 = "SELECT * FROM number limit 1::BIGINT";
-
- assert_eq!("SELECT * FROM number LIMIT 1", rewrite_sql(sql));
- assert_eq!("SELECT * FROM number limit 1", rewrite_sql(sql2));
assert_eq!(
"SELECT db.oid as _oid,db.* FROM pg_catalog.pg_database db",
rewrite_sql("SELECT db.oid,db.* FROM pg_catalog.pg_database db")
|
fix
|
no need for special case since datafusion updated (#5458)
|
8786624515b85cdcf238ae88f92b7cfbb80abab0
|
2024-09-19 11:00:56
|
Ning Sun
|
feat: improve support for postgres extended protocol (#4721)
| false
|
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 03eadfde970d..6ed5844de09b 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -398,11 +398,19 @@ impl QueryEngine for DatafusionQueryEngine {
query_ctx: QueryContextRef,
) -> Result<DescribeResult> {
let ctx = self.engine_context(query_ctx);
- let optimised_plan = self.optimize(&ctx, &plan)?;
- Ok(DescribeResult {
- schema: optimised_plan.schema()?,
- logical_plan: optimised_plan,
- })
+ if let Ok(optimised_plan) = self.optimize(&ctx, &plan) {
+ Ok(DescribeResult {
+ schema: optimised_plan.schema()?,
+ logical_plan: optimised_plan,
+ })
+ } else {
+ // Table's like those in information_schema cannot be optimized when
+ // it contains parameters. So we fallback to original plans.
+ Ok(DescribeResult {
+ schema: plan.schema()?,
+ logical_plan: plan,
+ })
+ }
}
async fn execute(&self, plan: LogicalPlan, query_ctx: QueryContextRef) -> Result<Output> {
diff --git a/src/servers/src/postgres/fixtures.rs b/src/servers/src/postgres/fixtures.rs
index 5b02480da941..18c3661b9334 100644
--- a/src/servers/src/postgres/fixtures.rs
+++ b/src/servers/src/postgres/fixtures.rs
@@ -54,17 +54,19 @@ static SET_TRANSACTION_PATTERN: Lazy<Regex> =
static TRANSACTION_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new("(?i)^(BEGIN|ROLLBACK|COMMIT);?").unwrap());
+/// Test if given query statement matches the patterns
+pub(crate) fn matches(query: &str) -> bool {
+ TRANSACTION_PATTERN.captures(query).is_some()
+ || SHOW_PATTERN.captures(query).is_some()
+ || SET_TRANSACTION_PATTERN.is_match(query)
+}
+
/// Process unsupported SQL and return fixed result as a compatibility solution
-pub(crate) fn process<'a>(
- query: &str,
- _query_ctx: QueryContextRef,
-) -> Option<PgWireResult<Vec<Response<'a>>>> {
+pub(crate) fn process<'a>(query: &str, _query_ctx: QueryContextRef) -> Option<Vec<Response<'a>>> {
// Transaction directives:
if let Some(tx) = TRANSACTION_PATTERN.captures(query) {
let tx_tag = &tx[1];
- Some(Ok(vec![Response::Execution(Tag::new(
- &tx_tag.to_uppercase(),
- ))]))
+ Some(vec![Response::Execution(Tag::new(&tx_tag.to_uppercase()))])
} else if let Some(show_var) = SHOW_PATTERN.captures(query) {
let show_var = show_var[1].to_lowercase();
if let Some(value) = VAR_VALUES.get(&show_var.as_ref()) {
@@ -81,12 +83,12 @@ pub(crate) fn process<'a>(
vec![vec![value.to_string()]],
));
- Some(Ok(vec![Response::Query(QueryResponse::new(schema, data))]))
+ Some(vec![Response::Query(QueryResponse::new(schema, data))])
} else {
None
}
} else if SET_TRANSACTION_PATTERN.is_match(query) {
- Some(Ok(vec![Response::Execution(Tag::new("SET"))]))
+ Some(vec![Response::Execution(Tag::new("SET"))])
} else {
None
}
@@ -101,7 +103,6 @@ mod test {
fn assert_tag(q: &str, t: &str, query_context: QueryContextRef) {
if let Response::Execution(tag) = process(q, query_context.clone())
.unwrap_or_else(|| panic!("fail to match {}", q))
- .expect("unexpected error")
.remove(0)
{
assert_eq!(Tag::new(t), tag);
@@ -113,7 +114,6 @@ mod test {
fn get_data<'a>(q: &str, query_context: QueryContextRef) -> QueryResponse<'a> {
if let Response::Query(resp) = process(q, query_context.clone())
.unwrap_or_else(|| panic!("fail to match {}", q))
- .expect("unexpected error")
.remove(0)
{
resp
diff --git a/src/servers/src/postgres/handler.rs b/src/servers/src/postgres/handler.rs
index 190684ed34fc..53d907d814db 100644
--- a/src/servers/src/postgres/handler.rs
+++ b/src/servers/src/postgres/handler.rs
@@ -59,8 +59,13 @@ impl SimpleQueryHandler for PostgresServerHandler {
.with_label_values(&[crate::metrics::METRIC_POSTGRES_SIMPLE_QUERY, db.as_str()])
.start_timer();
+ if query.is_empty() {
+ // early return if query is empty
+ return Ok(vec![Response::EmptyQuery]);
+ }
+
if let Some(resps) = fixtures::process(query, query_ctx.clone()) {
- resps
+ Ok(resps)
} else {
let outputs = self.query_handler.do_query(query, query_ctx.clone()).await;
@@ -184,6 +189,16 @@ impl QueryParser for DefaultQueryParser {
async fn parse_sql(&self, sql: &str, _types: &[Type]) -> PgWireResult<Self::Statement> {
crate::metrics::METRIC_POSTGRES_PREPARED_COUNT.inc();
let query_ctx = self.session.new_query_context();
+
+ // do not parse if query is empty or matches rules
+ if sql.is_empty() || fixtures::matches(sql) {
+ return Ok(SqlPlan {
+ query: sql.to_owned(),
+ plan: None,
+ schema: None,
+ });
+ }
+
let mut stmts =
ParserContext::create_with_dialect(sql, &PostgreSqlDialect {}, ParseOptions::default())
.map_err(|e| PgWireError::ApiError(Box::new(e)))?;
@@ -193,6 +208,7 @@ impl QueryParser for DefaultQueryParser {
))))
} else {
let stmt = stmts.remove(0);
+
let describe_result = self
.query_handler
.do_describe(stmt, query_ctx)
@@ -244,6 +260,16 @@ impl ExtendedQueryHandler for PostgresServerHandler {
let sql_plan = &portal.statement.statement;
+ if sql_plan.query.is_empty() {
+ // early return if query is empty
+ return Ok(Response::EmptyQuery);
+ }
+
+ if let Some(mut resps) = fixtures::process(&sql_plan.query, query_ctx.clone()) {
+ // if the statement matches our predefined rules, return it early
+ return Ok(resps.remove(0));
+ }
+
let output = if let Some(plan) = &sql_plan.plan {
let plan = plan
.replace_params_with_values(parameters_to_scalar_values(plan, portal)?.as_ref())
@@ -297,6 +323,17 @@ impl ExtendedQueryHandler for PostgresServerHandler {
.map(|fields| DescribeStatementResponse::new(param_types, fields))
.map_err(|e| PgWireError::ApiError(Box::new(e)))
} else {
+ if let Some(mut resp) =
+ fixtures::process(&sql_plan.query, self.session.new_query_context())
+ {
+ if let Response::Query(query_response) = resp.remove(0) {
+ return Ok(DescribeStatementResponse::new(
+ param_types,
+ (*query_response.row_schema()).clone(),
+ ));
+ }
+ }
+
Ok(DescribeStatementResponse::new(param_types, vec![]))
}
}
@@ -317,6 +354,16 @@ impl ExtendedQueryHandler for PostgresServerHandler {
.map(DescribePortalResponse::new)
.map_err(|e| PgWireError::ApiError(Box::new(e)))
} else {
+ if let Some(mut resp) =
+ fixtures::process(&sql_plan.query, self.session.new_query_context())
+ {
+ if let Response::Query(query_response) = resp.remove(0) {
+ return Ok(DescribePortalResponse::new(
+ (*query_response.row_schema()).clone(),
+ ));
+ }
+ }
+
Ok(DescribePortalResponse::new(vec![]))
}
}
diff --git a/src/servers/src/postgres/types.rs b/src/servers/src/postgres/types.rs
index 2bec6c2999f5..9f9d94905e4a 100644
--- a/src/servers/src/postgres/types.rs
+++ b/src/servers/src/postgres/types.rs
@@ -239,14 +239,14 @@ pub(super) fn parameter_to_string(portal: &Portal<SqlPlan>, idx: usize) -> PgWir
.unwrap_or_else(|| "".to_owned())),
_ => Err(invalid_parameter_error(
"unsupported_parameter_type",
- Some(¶m_type.to_string()),
+ Some(param_type.to_string()),
)),
}
}
-pub(super) fn invalid_parameter_error(msg: &str, detail: Option<&str>) -> PgWireError {
+pub(super) fn invalid_parameter_error(msg: &str, detail: Option<String>) -> PgWireError {
let mut error_info = PgErrorCode::Ec22023.to_err_info(msg.to_string());
- error_info.detail = detail.map(|s| s.to_owned());
+ error_info.detail = detail;
PgWireError::UserError(Box::new(error_info))
}
@@ -279,303 +279,314 @@ pub(super) fn parameters_to_scalar_values(
.get_param_types()
.map_err(|e| PgWireError::ApiError(Box::new(e)))?;
- // ensure parameter count consistent for: client parameter types, server
- // parameter types and parameter count
- if param_types.len() != param_count {
- return Err(invalid_parameter_error(
- "invalid_parameter_count",
- Some(&format!(
- "Expected: {}, found: {}",
- param_types.len(),
- param_count
- )),
- ));
- }
-
for idx in 0..param_count {
- let server_type =
- if let Some(Some(server_infer_type)) = param_types.get(&format!("${}", idx + 1)) {
- server_infer_type
- } else {
- // at the moment we require type information inferenced by
- // server so here we return error if the type is unknown from
- // server-side.
- //
- // It might be possible to parse the parameter just using client
- // specified type, we will implement that if there is a case.
- return Err(invalid_parameter_error("unknown_parameter_type", None));
- };
+ let server_type = param_types
+ .get(&format!("${}", idx + 1))
+ .and_then(|t| t.as_ref());
let client_type = if let Some(client_given_type) = client_param_types.get(idx) {
client_given_type.clone()
+ } else if let Some(server_provided_type) = &server_type {
+ type_gt_to_pg(server_provided_type).map_err(|e| PgWireError::ApiError(Box::new(e)))?
} else {
- type_gt_to_pg(server_type).map_err(|e| PgWireError::ApiError(Box::new(e)))?
+ return Err(invalid_parameter_error(
+ "unknown_parameter_type",
+ Some(format!(
+ "Cannot get parameter type information for parameter {}",
+ idx
+ )),
+ ));
};
let value = match &client_type {
&Type::VARCHAR | &Type::TEXT => {
let data = portal.parameter::<String>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::String(_) => ScalarValue::Utf8(data),
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ))
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::String(_) => ScalarValue::Utf8(data),
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ))
+ }
}
+ } else {
+ ScalarValue::Utf8(data)
}
}
&Type::BOOL => {
let data = portal.parameter::<bool>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Boolean(_) => ScalarValue::Boolean(data),
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ))
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Boolean(_) => ScalarValue::Boolean(data),
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ))
+ }
}
+ } else {
+ ScalarValue::Boolean(data)
}
}
&Type::INT2 => {
let data = portal.parameter::<i16>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
- ConcreteDataType::Int16(_) => ScalarValue::Int16(data),
- ConcreteDataType::Int32(_) => ScalarValue::Int32(data.map(|n| n as i32)),
- ConcreteDataType::Int64(_) => ScalarValue::Int64(data.map(|n| n as i64)),
- ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
- ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
- ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
- ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
- ConcreteDataType::Timestamp(unit) => {
- to_timestamp_scalar_value(data, unit, server_type)?
- }
- ConcreteDataType::DateTime(_) => ScalarValue::Date64(data.map(|d| d as i64)),
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ))
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
+ ConcreteDataType::Int16(_) => ScalarValue::Int16(data),
+ ConcreteDataType::Int32(_) => ScalarValue::Int32(data.map(|n| n as i32)),
+ ConcreteDataType::Int64(_) => ScalarValue::Int64(data.map(|n| n as i64)),
+ ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
+ ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
+ ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
+ ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
+ ConcreteDataType::Timestamp(unit) => {
+ to_timestamp_scalar_value(data, unit, server_type)?
+ }
+ ConcreteDataType::DateTime(_) => {
+ ScalarValue::Date64(data.map(|d| d as i64))
+ }
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ))
+ }
}
+ } else {
+ ScalarValue::Int16(data)
}
}
&Type::INT4 => {
let data = portal.parameter::<i32>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
- ConcreteDataType::Int16(_) => ScalarValue::Int16(data.map(|n| n as i16)),
- ConcreteDataType::Int32(_) => ScalarValue::Int32(data),
- ConcreteDataType::Int64(_) => ScalarValue::Int64(data.map(|n| n as i64)),
- ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
- ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
- ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
- ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
- ConcreteDataType::Timestamp(unit) => {
- to_timestamp_scalar_value(data, unit, server_type)?
- }
- ConcreteDataType::DateTime(_) => ScalarValue::Date64(data.map(|d| d as i64)),
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ))
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
+ ConcreteDataType::Int16(_) => ScalarValue::Int16(data.map(|n| n as i16)),
+ ConcreteDataType::Int32(_) => ScalarValue::Int32(data),
+ ConcreteDataType::Int64(_) => ScalarValue::Int64(data.map(|n| n as i64)),
+ ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
+ ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
+ ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
+ ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
+ ConcreteDataType::Timestamp(unit) => {
+ to_timestamp_scalar_value(data, unit, server_type)?
+ }
+ ConcreteDataType::DateTime(_) => {
+ ScalarValue::Date64(data.map(|d| d as i64))
+ }
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ))
+ }
}
+ } else {
+ ScalarValue::Int32(data)
}
}
&Type::INT8 => {
let data = portal.parameter::<i64>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
- ConcreteDataType::Int16(_) => ScalarValue::Int16(data.map(|n| n as i16)),
- ConcreteDataType::Int32(_) => ScalarValue::Int32(data.map(|n| n as i32)),
- ConcreteDataType::Int64(_) => ScalarValue::Int64(data),
- ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
- ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
- ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
- ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
- ConcreteDataType::Timestamp(unit) => {
- to_timestamp_scalar_value(data, unit, server_type)?
- }
- ConcreteDataType::DateTime(_) => ScalarValue::Date64(data),
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ))
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
+ ConcreteDataType::Int16(_) => ScalarValue::Int16(data.map(|n| n as i16)),
+ ConcreteDataType::Int32(_) => ScalarValue::Int32(data.map(|n| n as i32)),
+ ConcreteDataType::Int64(_) => ScalarValue::Int64(data),
+ ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
+ ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
+ ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
+ ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
+ ConcreteDataType::Timestamp(unit) => {
+ to_timestamp_scalar_value(data, unit, server_type)?
+ }
+ ConcreteDataType::DateTime(_) => ScalarValue::Date64(data),
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ))
+ }
}
+ } else {
+ ScalarValue::Int64(data)
}
}
&Type::FLOAT4 => {
let data = portal.parameter::<f32>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
- ConcreteDataType::Int16(_) => ScalarValue::Int16(data.map(|n| n as i16)),
- ConcreteDataType::Int32(_) => ScalarValue::Int32(data.map(|n| n as i32)),
- ConcreteDataType::Int64(_) => ScalarValue::Int64(data.map(|n| n as i64)),
- ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
- ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
- ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
- ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
- ConcreteDataType::Float32(_) => ScalarValue::Float32(data),
- ConcreteDataType::Float64(_) => ScalarValue::Float64(data.map(|n| n as f64)),
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ))
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
+ ConcreteDataType::Int16(_) => ScalarValue::Int16(data.map(|n| n as i16)),
+ ConcreteDataType::Int32(_) => ScalarValue::Int32(data.map(|n| n as i32)),
+ ConcreteDataType::Int64(_) => ScalarValue::Int64(data.map(|n| n as i64)),
+ ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
+ ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
+ ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
+ ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
+ ConcreteDataType::Float32(_) => ScalarValue::Float32(data),
+ ConcreteDataType::Float64(_) => {
+ ScalarValue::Float64(data.map(|n| n as f64))
+ }
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ))
+ }
}
+ } else {
+ ScalarValue::Float32(data)
}
}
&Type::FLOAT8 => {
let data = portal.parameter::<f64>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
- ConcreteDataType::Int16(_) => ScalarValue::Int16(data.map(|n| n as i16)),
- ConcreteDataType::Int32(_) => ScalarValue::Int32(data.map(|n| n as i32)),
- ConcreteDataType::Int64(_) => ScalarValue::Int64(data.map(|n| n as i64)),
- ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
- ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
- ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
- ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
- ConcreteDataType::Float32(_) => ScalarValue::Float32(data.map(|n| n as f32)),
- ConcreteDataType::Float64(_) => ScalarValue::Float64(data),
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ))
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Int8(_) => ScalarValue::Int8(data.map(|n| n as i8)),
+ ConcreteDataType::Int16(_) => ScalarValue::Int16(data.map(|n| n as i16)),
+ ConcreteDataType::Int32(_) => ScalarValue::Int32(data.map(|n| n as i32)),
+ ConcreteDataType::Int64(_) => ScalarValue::Int64(data.map(|n| n as i64)),
+ ConcreteDataType::UInt8(_) => ScalarValue::UInt8(data.map(|n| n as u8)),
+ ConcreteDataType::UInt16(_) => ScalarValue::UInt16(data.map(|n| n as u16)),
+ ConcreteDataType::UInt32(_) => ScalarValue::UInt32(data.map(|n| n as u32)),
+ ConcreteDataType::UInt64(_) => ScalarValue::UInt64(data.map(|n| n as u64)),
+ ConcreteDataType::Float32(_) => {
+ ScalarValue::Float32(data.map(|n| n as f32))
+ }
+ ConcreteDataType::Float64(_) => ScalarValue::Float64(data),
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ))
+ }
}
+ } else {
+ ScalarValue::Float64(data)
}
}
&Type::TIMESTAMP => {
let data = portal.parameter::<NaiveDateTime>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Timestamp(unit) => match *unit {
- TimestampType::Second(_) => ScalarValue::TimestampSecond(
- data.map(|ts| ts.and_utc().timestamp()),
- None,
- ),
- TimestampType::Millisecond(_) => ScalarValue::TimestampMillisecond(
- data.map(|ts| ts.and_utc().timestamp_millis()),
- None,
- ),
- TimestampType::Microsecond(_) => ScalarValue::TimestampMicrosecond(
- data.map(|ts| ts.and_utc().timestamp_micros()),
- None,
- ),
- TimestampType::Nanosecond(_) => ScalarValue::TimestampNanosecond(
- data.map(|ts| ts.and_utc().timestamp_micros()),
- None,
- ),
- },
- ConcreteDataType::DateTime(_) => {
- ScalarValue::Date64(data.map(|d| d.and_utc().timestamp_millis()))
- }
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ))
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Timestamp(unit) => match *unit {
+ TimestampType::Second(_) => ScalarValue::TimestampSecond(
+ data.map(|ts| ts.and_utc().timestamp()),
+ None,
+ ),
+ TimestampType::Millisecond(_) => ScalarValue::TimestampMillisecond(
+ data.map(|ts| ts.and_utc().timestamp_millis()),
+ None,
+ ),
+ TimestampType::Microsecond(_) => ScalarValue::TimestampMicrosecond(
+ data.map(|ts| ts.and_utc().timestamp_micros()),
+ None,
+ ),
+ TimestampType::Nanosecond(_) => ScalarValue::TimestampNanosecond(
+ data.map(|ts| ts.and_utc().timestamp_micros()),
+ None,
+ ),
+ },
+ ConcreteDataType::DateTime(_) => {
+ ScalarValue::Date64(data.map(|d| d.and_utc().timestamp_millis()))
+ }
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ))
+ }
}
+ } else {
+ ScalarValue::TimestampMillisecond(
+ data.map(|ts| ts.and_utc().timestamp_millis()),
+ None,
+ )
}
}
&Type::DATE => {
let data = portal.parameter::<NaiveDate>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Date(_) => ScalarValue::Date32(data.map(|d| {
- (d - NaiveDate::from_ymd_opt(1970, 1, 1).unwrap()).num_days() as i32
- })),
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ));
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Date(_) => ScalarValue::Date32(data.map(|d| {
+ (d - NaiveDate::from(NaiveDateTime::UNIX_EPOCH)).num_days() as i32
+ })),
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ));
+ }
}
+ } else {
+ ScalarValue::Date32(data.map(|d| {
+ (d - NaiveDate::from(NaiveDateTime::UNIX_EPOCH)).num_days() as i32
+ }))
}
}
&Type::INTERVAL => {
let data = portal.parameter::<PgInterval>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Interval(_) => {
- ScalarValue::IntervalMonthDayNano(data.map(|i| Interval::from(i).to_i128()))
- }
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ));
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Interval(_) => ScalarValue::IntervalMonthDayNano(
+ data.map(|i| Interval::from(i).to_i128()),
+ ),
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ));
+ }
}
+ } else {
+ ScalarValue::IntervalMonthDayNano(data.map(|i| Interval::from(i).to_i128()))
}
}
&Type::BYTEA => {
let data = portal.parameter::<Vec<u8>>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::String(_) => {
- ScalarValue::Utf8(data.map(|d| String::from_utf8_lossy(&d).to_string()))
- }
- ConcreteDataType::Binary(_) => ScalarValue::Binary(data),
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ));
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::String(_) => {
+ ScalarValue::Utf8(data.map(|d| String::from_utf8_lossy(&d).to_string()))
+ }
+ ConcreteDataType::Binary(_) => ScalarValue::Binary(data),
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ));
+ }
}
+ } else {
+ ScalarValue::Binary(data)
}
}
&Type::JSONB => {
let data = portal.parameter::<serde_json::Value>(idx, &client_type)?;
- match server_type {
- ConcreteDataType::Binary(_) => {
- ScalarValue::Binary(data.map(|d| jsonb::Value::from(d).to_vec()))
- }
- _ => {
- return Err(invalid_parameter_error(
- "invalid_parameter_type",
- Some(&format!(
- "Expected: {}, found: {}",
- server_type, client_type
- )),
- ));
+ if let Some(server_type) = &server_type {
+ match server_type {
+ ConcreteDataType::Binary(_) => {
+ ScalarValue::Binary(data.map(|d| jsonb::Value::from(d).to_vec()))
+ }
+ _ => {
+ return Err(invalid_parameter_error(
+ "invalid_parameter_type",
+ Some(format!("Expected: {}, found: {}", server_type, client_type)),
+ ));
+ }
}
+ } else {
+ ScalarValue::Binary(data.map(|d| jsonb::Value::from(d).to_vec()))
}
}
_ => Err(invalid_parameter_error(
"unsupported_parameter_value",
- Some(&format!("Found type: {}", client_type)),
+ Some(format!("Found type: {}", client_type)),
))?,
};
|
feat
|
improve support for postgres extended protocol (#4721)
|
1578c004b095ba7aa8a58024755e60affbb34de0
|
2024-11-22 08:01:53
|
Yohan Wal
|
fix: prepare param mismatch (#5025)
| false
|
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index 587742687d82..d84a5945cabb 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -189,8 +189,6 @@ impl MysqlInstanceShim {
dummy_params(param_num)?
};
- debug_assert_eq!(params.len(), param_num - 1);
-
let columns = schema
.as_ref()
.map(|schema| {
@@ -205,14 +203,26 @@ impl MysqlInstanceShim {
.transpose()?
.unwrap_or_default();
- self.save_plan(
- SqlPlan {
- query: query.to_string(),
- plan,
- schema,
- },
- stmt_key,
- );
+ // DataFusion may optimize the plan so that some parameters are not used.
+ if params.len() != param_num - 1 {
+ self.save_plan(
+ SqlPlan {
+ query: query.to_string(),
+ plan: None,
+ schema: None,
+ },
+ stmt_key,
+ );
+ } else {
+ self.save_plan(
+ SqlPlan {
+ query: query.to_string(),
+ plan,
+ schema,
+ },
+ stmt_key,
+ );
+ }
Ok((params, columns))
}
diff --git a/tests/cases/standalone/common/prepare/mysql_prepare.result b/tests/cases/standalone/common/prepare/mysql_prepare.result
new file mode 100644
index 000000000000..ae6ca4400cab
--- /dev/null
+++ b/tests/cases/standalone/common/prepare/mysql_prepare.result
@@ -0,0 +1,72 @@
+-- invalid prepare, from
+-- https://github.com/duckdb/duckdb/blob/00a605270719941ca0412ad5d0a14b1bdfbf9eb5/test/sql/prepared/invalid_prepare.test
+-- SQLNESS PROTOCOL MYSQL
+SELECT ?;
+
+Failed to execute query, err: MySqlError { ERROR 1815 (HY000): (PlanQuery): Failed to plan SQL: Error during planning: Placeholder type could not be resolved. Make sure that the placeholder is bound to a concrete type, e.g. by providing parameter values. }
+
+-- SQLNESS PROTOCOL MYSQL
+PREPARE stmt FROM 'SELECT ?::int;';
+
+affected_rows: 0
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 1;
+
++----------+
+| Int64(1) |
++----------+
+| 1 |
++----------+
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 'a';
+
+Failed to parse query result, err: MySqlError { ERROR 1815 (HY000): (EngineExecuteQuery): Cast error: Cannot cast string 'a' to value of Int32 type }
+
+-- SQLNESS PROTOCOL MYSQL
+DEALLOCATE stmt;
+
+affected_rows: 0
+
+-- SQLNESS PROTOCOL MYSQL
+PREPARE stmt FROM 'SELECT ?::int WHERE 1=0;';
+
+affected_rows: 0
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 1;
+
+affected_rows: 0
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 'a';
+
+affected_rows: 0
+
+-- SQLNESS PROTOCOL MYSQL
+DEALLOCATE stmt;
+
+affected_rows: 0
+
+-- parameter variants, from:
+-- https://github.com/duckdb/duckdb/blob/2360dd00f193b5d0850f9379d0c3794eb2084f36/test/sql/prepared/parameter_variants.test
+-- SQLNESS PROTOCOL MYSQL
+PREPARE stmt FROM 'SELECT CAST(? AS INTEGER), CAST(? AS STRING);';
+
+affected_rows: 0
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 1, 'hello';
+
++----------+---------------+
+| Int64(1) | Utf8("hello") |
++----------+---------------+
+| 1 | hello |
++----------+---------------+
+
+-- SQLNESS PROTOCOL MYSQL
+DEALLOCATE stmt;
+
+affected_rows: 0
+
diff --git a/tests/cases/standalone/common/prepare/mysql_prepare.sql b/tests/cases/standalone/common/prepare/mysql_prepare.sql
new file mode 100644
index 000000000000..da1681a790ff
--- /dev/null
+++ b/tests/cases/standalone/common/prepare/mysql_prepare.sql
@@ -0,0 +1,39 @@
+-- invalid prepare, from
+-- https://github.com/duckdb/duckdb/blob/00a605270719941ca0412ad5d0a14b1bdfbf9eb5/test/sql/prepared/invalid_prepare.test
+-- SQLNESS PROTOCOL MYSQL
+SELECT ?;
+
+-- SQLNESS PROTOCOL MYSQL
+PREPARE stmt FROM 'SELECT ?::int;';
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 1;
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 'a';
+
+-- SQLNESS PROTOCOL MYSQL
+DEALLOCATE stmt;
+
+-- SQLNESS PROTOCOL MYSQL
+PREPARE stmt FROM 'SELECT ?::int WHERE 1=0;';
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 1;
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 'a';
+
+-- SQLNESS PROTOCOL MYSQL
+DEALLOCATE stmt;
+
+-- parameter variants, from:
+-- https://github.com/duckdb/duckdb/blob/2360dd00f193b5d0850f9379d0c3794eb2084f36/test/sql/prepared/parameter_variants.test
+-- SQLNESS PROTOCOL MYSQL
+PREPARE stmt FROM 'SELECT CAST(? AS INTEGER), CAST(? AS STRING);';
+
+-- SQLNESS PROTOCOL MYSQL
+EXECUTE stmt USING 1, 'hello';
+
+-- SQLNESS PROTOCOL MYSQL
+DEALLOCATE stmt;
|
fix
|
prepare param mismatch (#5025)
|
ce139c8a232f1be43c2998f372a2b5d17732db83
|
2022-08-26 08:29:23
|
Lei, Huang
|
fix: impl scalar value helper and remove range limit (#205)
| false
|
diff --git a/src/common/time/src/date.rs b/src/common/time/src/date.rs
index deb4893c7ef6..0f1beb735d97 100644
--- a/src/common/time/src/date.rs
+++ b/src/common/time/src/date.rs
@@ -4,17 +4,15 @@ use std::str::FromStr;
use chrono::{Datelike, NaiveDate};
use serde::{Deserialize, Serialize};
use serde_json::Value;
-use snafu::{ensure, ResultExt};
+use snafu::ResultExt;
use crate::error::Result;
-use crate::error::{DateOverflowSnafu, Error, ParseDateStrSnafu};
+use crate::error::{Error, ParseDateStrSnafu};
const UNIX_EPOCH_FROM_CE: i32 = 719_163;
/// ISO 8601 [Date] values. The inner representation is a signed 32 bit integer that represents the
/// **days since "1970-01-01 00:00:00 UTC" (UNIX Epoch)**.
-///
-/// [Date] value ranges between "0000-01-01" to "9999-12-31".
#[derive(
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Deserialize, Serialize,
)]
@@ -44,23 +42,13 @@ impl Display for Date {
}
impl Date {
- pub fn try_new(val: i32) -> Result<Self> {
- ensure!(
- val >= Self::MIN.0 && val <= Self::MAX.0,
- DateOverflowSnafu { value: val }
- );
-
- Ok(Self(val))
+ pub fn new(val: i32) -> Self {
+ Self(val)
}
pub fn val(&self) -> i32 {
self.0
}
-
- /// Max valid Date value: "9999-12-31"
- pub const MAX: Date = Date(2932896);
- /// Min valid Date value: "1000-01-01"
- pub const MIN: Date = Date(-354285);
}
#[cfg(test)]
@@ -71,9 +59,9 @@ mod tests {
#[test]
pub fn test_print_date2() {
- assert_eq!("1969-12-31", Date::try_new(-1).unwrap().to_string());
- assert_eq!("1970-01-01", Date::try_new(0).unwrap().to_string());
- assert_eq!("1970-02-12", Date::try_new(42).unwrap().to_string());
+ assert_eq!("1969-12-31", Date::new(-1).to_string());
+ assert_eq!("1970-01-01", Date::new(0).to_string());
+ assert_eq!("1970-02-12", Date::new(42).to_string());
}
#[test]
@@ -93,19 +81,9 @@ mod tests {
}
#[test]
- pub fn test_illegal_date_values() {
- assert!(Date::try_new(Date::MAX.0 + 1).is_err());
- assert!(Date::try_new(Date::MIN.0 - 1).is_err());
- }
-
- #[test]
- pub fn test_edge_date_values() {
- let date = Date::from_str("9999-12-31").unwrap();
- assert_eq!(Date::MAX.0, date.0);
- assert_eq!(date, Date::try_new(date.0).unwrap());
-
- let date = Date::from_str("1000-01-01").unwrap();
- assert_eq!(Date::MIN.0, date.0);
- assert_eq!(date, Date::try_new(date.0).unwrap());
+ pub fn test_min_max() {
+ let mut date = Date::from_str("9999-12-31").unwrap();
+ date.0 += 1000;
+ assert_eq!(date, Date::from_str(&date.to_string()).unwrap());
}
}
diff --git a/src/common/time/src/datetime.rs b/src/common/time/src/datetime.rs
index d927a4b04ee7..f7feaf6e89ed 100644
--- a/src/common/time/src/datetime.rs
+++ b/src/common/time/src/datetime.rs
@@ -3,15 +3,13 @@ use std::str::FromStr;
use chrono::NaiveDateTime;
use serde::{Deserialize, Serialize};
-use snafu::{ensure, ResultExt};
+use snafu::ResultExt;
-use crate::error::{DateTimeOverflowSnafu, Error, ParseDateStrSnafu, Result};
+use crate::error::{Error, ParseDateStrSnafu, Result};
const DATETIME_FORMAT: &str = "%F %T";
/// [DateTime] represents the **seconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch)**.
-///
-/// Valid [DateTime] value ranges from "1000-01-01 00:00:00" to "9999-12-31 23:59:59".
#[derive(
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
)]
@@ -41,23 +39,13 @@ impl FromStr for DateTime {
}
impl DateTime {
- pub fn try_new(val: i64) -> Result<Self> {
- ensure!(
- val >= Self::MIN.0 && val <= Self::MAX.0,
- DateTimeOverflowSnafu { value: val }
- );
-
- Ok(Self(val))
+ pub fn new(val: i64) -> Self {
+ Self(val)
}
pub fn val(&self) -> i64 {
self.0
}
-
- /// Max valid DateTime value: 9999-12-31 23:59:59
- pub const MAX: DateTime = DateTime(253402300799);
- /// Min valid DateTime value: 0000-01-01 00:00:00
- pub const MIN: DateTime = DateTime(-30610224000);
}
#[cfg(test)]
@@ -66,24 +54,9 @@ mod tests {
#[test]
pub fn test_new_date_time() {
- assert_eq!(
- "1970-01-01 00:00:00",
- DateTime::try_new(0).unwrap().to_string()
- );
- assert_eq!(
- "1970-01-01 00:00:01",
- DateTime::try_new(1).unwrap().to_string()
- );
- assert_eq!(
- "1969-12-31 23:59:59",
- DateTime::try_new(-1).unwrap().to_string()
- );
- }
-
- #[test]
- pub fn test_max_min() {
- assert_eq!("9999-12-31 23:59:59", DateTime::MAX.to_string());
- assert_eq!("1000-01-01 00:00:00", DateTime::MIN.to_string());
+ assert_eq!("1970-01-01 00:00:00", DateTime::new(0).to_string());
+ assert_eq!("1970-01-01 00:00:01", DateTime::new(1).to_string());
+ assert_eq!("1969-12-31 23:59:59", DateTime::new(-1).to_string());
}
#[test]
diff --git a/src/common/time/src/error.rs b/src/common/time/src/error.rs
index b9c9fa28b666..2760785cfde0 100644
--- a/src/common/time/src/error.rs
+++ b/src/common/time/src/error.rs
@@ -1,17 +1,11 @@
use chrono::ParseError;
-use snafu::{Backtrace, Snafu};
+use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Failed to parse string to date, raw: {}, source: {}", raw, source))]
ParseDateStr { raw: String, source: ParseError },
-
- #[snafu(display("Failed to parse i32 value to Date: {}", value))]
- DateOverflow { value: i32, backtrace: Backtrace },
-
- #[snafu(display("Failed to parse i64 value to DateTime: {}", value))]
- DateTimeOverflow { value: i64, backtrace: Backtrace },
}
pub type Result<T> = std::result::Result<T, Error>;
diff --git a/src/datatypes/src/scalars.rs b/src/datatypes/src/scalars.rs
index 6f8cd9fbbf8f..5b7a49b78a7c 100644
--- a/src/datatypes/src/scalars.rs
+++ b/src/datatypes/src/scalars.rs
@@ -335,12 +335,10 @@ mod tests {
#[test]
pub fn test_build_date_vector() {
let expect: Vec<Option<Date>> = vec![
- Some(Date::try_new(0).unwrap()),
- Some(Date::try_new(-1).unwrap()),
- Some(Date::try_new(1).unwrap()),
+ Some(Date::new(0)),
+ Some(Date::new(-1)),
None,
- Some(Date::MAX),
- Some(Date::MIN),
+ Some(Date::new(1)),
];
let vector: DateVector = build_vector_from_slice(&expect);
assert_vector_eq(&expect, &vector);
@@ -348,7 +346,7 @@ mod tests {
#[test]
pub fn test_date_scalar() {
- let date = Date::try_new(1).unwrap();
+ let date = Date::new(1);
assert_eq!(date, date.as_scalar_ref());
assert_eq!(date, date.to_owned_scalar());
}
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index 36b44d921e59..381da8512cc2 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -406,11 +406,11 @@ mod tests {
);
assert_eq!(
serde_json::Value::Number(5000i32.into()),
- to_json(Value::Date(common_time::date::Date::try_new(5000).unwrap()))
+ to_json(Value::Date(common_time::date::Date::new(5000)))
);
assert_eq!(
serde_json::Value::Number(5000i64.into()),
- to_json(Value::DateTime(DateTime::try_new(5000).unwrap()))
+ to_json(Value::DateTime(DateTime::new(5000)))
);
let json_value: serde_json::Value =
diff --git a/src/datatypes/src/vectors/builder.rs b/src/datatypes/src/vectors/builder.rs
index 2f379ee3aa47..fea07e0a8a21 100644
--- a/src/datatypes/src/vectors/builder.rs
+++ b/src/datatypes/src/vectors/builder.rs
@@ -138,11 +138,9 @@ impl VectorBuilder {
(VectorBuilder::String(b), Value::String(v)) => b.push(Some(v.as_utf8())),
(VectorBuilder::Binary(b), Value::Binary(v)) => b.push(Some(v)),
(VectorBuilder::Date(b), Value::Date(v)) => b.push(Some(*v)),
- (VectorBuilder::Date(b), Value::Int32(v)) => b.push(Some(Date::try_new(*v).unwrap())),
+ (VectorBuilder::Date(b), Value::Int32(v)) => b.push(Some(Date::new(*v))),
(VectorBuilder::DateTime(b), Value::DateTime(v)) => b.push(Some(*v)),
- (VectorBuilder::DateTime(b), Value::Int64(v)) => {
- b.push(Some(DateTime::try_new(*v).unwrap()))
- }
+ (VectorBuilder::DateTime(b), Value::Int64(v)) => b.push(Some(DateTime::new(*v))),
_ => panic!(
"Value {:?} does not match builder type {:?}",
value,
@@ -292,11 +290,11 @@ mod tests {
let mut builder = VectorBuilder::with_capacity(ConcreteDataType::date_datatype(), 3);
assert_eq!(ConcreteDataType::date_datatype(), builder.data_type());
builder.push_null();
- builder.push(&Value::Date(Date::try_new(123).unwrap()));
+ builder.push(&Value::Date(Date::new(123)));
let v = builder.finish();
let v = v.as_any().downcast_ref::<DateVector>().unwrap();
assert_eq!(Value::Null, v.get(0));
- assert_eq!(Value::Date(Date::try_new(123).unwrap()), v.get(1));
+ assert_eq!(Value::Date(Date::new(123)), v.get(1));
assert_eq!(
&arrow::datatypes::DataType::Date32,
v.to_arrow_array().data_type()
@@ -308,11 +306,11 @@ mod tests {
let mut builder = VectorBuilder::with_capacity(ConcreteDataType::datetime_datatype(), 3);
assert_eq!(ConcreteDataType::datetime_datatype(), builder.data_type());
builder.push_null();
- builder.push(&Value::DateTime(DateTime::try_new(123).unwrap()));
+ builder.push(&Value::DateTime(DateTime::new(123)));
let v = builder.finish();
let v = v.as_any().downcast_ref::<DateTimeVector>().unwrap();
assert_eq!(Value::Null, v.get(0));
- assert_eq!(Value::DateTime(DateTime::try_new(123).unwrap()), v.get(1));
+ assert_eq!(Value::DateTime(DateTime::new(123)), v.get(1));
assert_eq!(
&arrow::datatypes::DataType::Date64,
v.to_arrow_array().data_type()
diff --git a/src/datatypes/src/vectors/date.rs b/src/datatypes/src/vectors/date.rs
index 37cfb5998f2e..3cddd1d80117 100644
--- a/src/datatypes/src/vectors/date.rs
+++ b/src/datatypes/src/vectors/date.rs
@@ -83,9 +83,7 @@ impl Vector for DateVector {
fn get(&self, index: usize) -> Value {
match self.array.get(index) {
- Value::Int32(v) => {
- Value::Date(Date::try_new(v).expect("Not expected to overflow here"))
- }
+ Value::Int32(v) => Value::Date(Date::new(v)),
Value::Null => Value::Null,
_ => {
unreachable!()
@@ -114,9 +112,7 @@ impl<'a> Iterator for DateIter<'a> {
type Item = Option<Date>;
fn next(&mut self) -> Option<Self::Item> {
- self.iter
- .next()
- .map(|v| v.map(|v| Date::try_new(v).unwrap()))
+ self.iter.next().map(|v| v.map(Date::new))
}
}
@@ -128,7 +124,7 @@ impl ScalarVector for DateVector {
type Builder = DateVectorBuilder;
fn get_data(&self, idx: usize) -> Option<Self::RefItem<'_>> {
- self.array.get_data(idx).map(|v| Date::try_new(v).unwrap())
+ self.array.get_data(idx).map(Date::new)
}
fn iter_data(&self) -> Self::Iter<'_> {
@@ -143,7 +139,7 @@ impl Serializable for DateVector {
Ok(self
.array
.iter_data()
- .map(|v| v.map(|d| Date::try_new(d).unwrap()))
+ .map(|v| v.map(Date::new))
.map(|v| match v {
None => serde_json::Value::Null,
Some(v) => v.into(),
@@ -205,26 +201,25 @@ mod tests {
#[test]
pub fn test_build_date_vector() {
let mut builder = DateVectorBuilder::with_capacity(4);
- builder.push(Some(Date::try_new(1).unwrap()));
+ builder.push(Some(Date::new(1)));
builder.push(None);
- builder.push(Some(Date::try_new(-1).unwrap()));
+ builder.push(Some(Date::new(-1)));
let vector = builder.finish();
assert_eq!(3, vector.len());
- assert_eq!(Some(Date::try_new(1).unwrap()), vector.get_data(0));
+ assert_eq!(Some(Date::new(1)), vector.get_data(0));
assert_eq!(None, vector.get_data(1));
- assert_eq!(Some(Date::try_new(-1).unwrap()), vector.get_data(2));
+ assert_eq!(Some(Date::new(-1)), vector.get_data(2));
let mut iter = vector.iter_data();
- assert_eq!(Some(Date::try_new(1).unwrap()), iter.next().unwrap());
+ assert_eq!(Some(Date::new(1)), iter.next().unwrap());
assert_eq!(None, iter.next().unwrap());
- assert_eq!(Some(Date::try_new(-1).unwrap()), iter.next().unwrap());
+ assert_eq!(Some(Date::new(-1)), iter.next().unwrap());
}
#[test]
pub fn test_date_scalar() {
- let vector =
- DateVector::from_slice(&[Date::try_new(1).unwrap(), Date::try_new(2).unwrap()]);
+ let vector = DateVector::from_slice(&[Date::new(1), Date::new(2)]);
assert_eq!(2, vector.len());
- assert_eq!(Some(Date::try_new(1).unwrap()), vector.get_data(0));
- assert_eq!(Some(Date::try_new(2).unwrap()), vector.get_data(1));
+ assert_eq!(Some(Date::new(1)), vector.get_data(0));
+ assert_eq!(Some(Date::new(2)), vector.get_data(1));
}
}
diff --git a/src/datatypes/src/vectors/datetime.rs b/src/datatypes/src/vectors/datetime.rs
index 1f51775c5edc..130a1e6f1139 100644
--- a/src/datatypes/src/vectors/datetime.rs
+++ b/src/datatypes/src/vectors/datetime.rs
@@ -84,9 +84,7 @@ impl Vector for DateTimeVector {
fn get(&self, index: usize) -> Value {
match self.array.get(index) {
- Value::Int64(v) => {
- Value::DateTime(DateTime::try_new(v).expect("Not expected to overflow here"))
- }
+ Value::Int64(v) => Value::DateTime(DateTime::new(v)),
Value::Null => Value::Null,
_ => {
unreachable!()
@@ -104,7 +102,7 @@ impl Serializable for DateTimeVector {
Ok(self
.array
.iter_data()
- .map(|v| v.map(|d| DateTime::try_new(d).unwrap()))
+ .map(|v| v.map(DateTime::new))
.map(|v| match v {
None => serde_json::Value::Null,
Some(v) => v.into(),
@@ -113,6 +111,14 @@ impl Serializable for DateTimeVector {
}
}
+impl From<Vec<Option<i64>>> for DateTimeVector {
+ fn from(data: Vec<Option<i64>>) -> Self {
+ Self {
+ array: PrimitiveVector::<i64>::from(data),
+ }
+ }
+}
+
pub struct DateTimeVectorBuilder {
buffer: PrimitiveVectorBuilder<i64>,
}
@@ -167,9 +173,7 @@ impl<'a> Iterator for DateTimeIter<'a> {
type Item = Option<DateTime>;
fn next(&mut self) -> Option<Self::Item> {
- self.iter
- .next()
- .map(|v| v.map(|v| DateTime::try_new(v).unwrap()))
+ self.iter.next().map(|v| v.map(DateTime::new))
}
}
@@ -180,9 +184,7 @@ impl ScalarVector for DateTimeVector {
type Builder = DateTimeVectorBuilder;
fn get_data(&self, idx: usize) -> Option<Self::RefItem<'_>> {
- self.array
- .get_data(idx)
- .map(|v| DateTime::try_new(v).unwrap())
+ self.array.get_data(idx).map(DateTime::new)
}
fn iter_data(&self) -> Self::Iter<'_> {
@@ -209,9 +211,9 @@ mod tests {
v.to_arrow_array().data_type()
);
let mut iter = v.iter_data();
- assert_eq!(Some(DateTime::try_new(1).unwrap()), iter.next().unwrap());
- assert_eq!(Some(DateTime::try_new(2).unwrap()), iter.next().unwrap());
- assert_eq!(Some(DateTime::try_new(3).unwrap()), iter.next().unwrap());
+ assert_eq!(Some(DateTime::new(1)), iter.next().unwrap());
+ assert_eq!(Some(DateTime::new(2)), iter.next().unwrap());
+ assert_eq!(Some(DateTime::new(3)), iter.next().unwrap());
assert!(!v.is_null(0));
assert_eq!(24, v.memory_size()); // size of i64 * 3
@@ -230,14 +232,14 @@ mod tests {
#[test]
pub fn test_datetime_vector_builder() {
let mut builder = DateTimeVectorBuilder::with_capacity(3);
- builder.push(Some(DateTime::try_new(1).unwrap()));
+ builder.push(Some(DateTime::new(1)));
builder.push(None);
- builder.push(Some(DateTime::try_new(-1).unwrap()));
+ builder.push(Some(DateTime::new(-1)));
let v = builder.finish();
assert_eq!(ConcreteDataType::datetime_datatype(), v.data_type());
- assert_eq!(Value::DateTime(DateTime::try_new(1).unwrap()), v.get(0));
+ assert_eq!(Value::DateTime(DateTime::new(1)), v.get(0));
assert_eq!(Value::Null, v.get(1));
- assert_eq!(Value::DateTime(DateTime::try_new(-1).unwrap()), v.get(2));
+ assert_eq!(Value::DateTime(DateTime::new(-1)), v.get(2));
}
}
diff --git a/src/datatypes/src/vectors/helper.rs b/src/datatypes/src/vectors/helper.rs
index cc2bcd790e45..d7e682a68707 100644
--- a/src/datatypes/src/vectors/helper.rs
+++ b/src/datatypes/src/vectors/helper.rs
@@ -142,6 +142,9 @@ impl Helper {
ScalarValue::Date32(v) => {
ConstantVector::new(Arc::new(DateVector::from(vec![v])), length)
}
+ ScalarValue::Date64(v) => {
+ ConstantVector::new(Arc::new(DateTimeVector::from(vec![v])), length)
+ }
_ => {
return ConversionSnafu {
from: format!("Unsupported scalar value: {}", value),
@@ -193,6 +196,7 @@ impl Helper {
mod tests {
use arrow::array::Int32Array;
use common_time::date::Date;
+ use common_time::datetime::DateTime;
use super::*;
@@ -230,7 +234,17 @@ mod tests {
assert_eq!(ConcreteDataType::date_datatype(), vector.data_type());
assert_eq!(3, vector.len());
for i in 0..vector.len() {
- assert_eq!(Value::Date(Date::try_new(42).unwrap()), vector.get(i));
+ assert_eq!(Value::Date(Date::new(42)), vector.get(i));
+ }
+ }
+
+ #[test]
+ pub fn test_try_from_scalar_datetime_value() {
+ let vector = Helper::try_from_scalar_value(ScalarValue::Date64(Some(42)), 3).unwrap();
+ assert_eq!(ConcreteDataType::datetime_datatype(), vector.data_type());
+ assert_eq!(3, vector.len());
+ for i in 0..vector.len() {
+ assert_eq!(Value::DateTime(DateTime::new(42)), vector.get(i));
}
}
}
|
fix
|
impl scalar value helper and remove range limit (#205)
|
4460af800f344676f18749ed0a5205ba2e84844b
|
2023-12-30 18:32:26
|
AntiTopQuark
|
feat(TableRouteValue): add panic notes and type checks (#3031)
| false
|
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index 092d4dd24263..c3b1f7c31121 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -191,7 +191,7 @@ impl AlterTableProcedure {
.await?
.context(TableRouteNotFoundSnafu { table_id })?
.into_inner();
- let region_routes = table_route.region_routes();
+ let region_routes = table_route.region_routes()?;
let leaders = find_leaders(region_routes);
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index c73844fc8337..c6e09006b470 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -217,7 +217,7 @@ impl CreateTableProcedure {
.context(TableRouteNotFoundSnafu {
table_id: physical_table_id,
})?;
- let region_routes = physical_table_route.region_routes();
+ let region_routes = physical_table_route.region_routes()?;
let request_builder = self.new_region_request_builder(Some(physical_table_id))?;
diff --git a/src/common/meta/src/ddl/drop_table.rs b/src/common/meta/src/ddl/drop_table.rs
index 94c6cdf0a06a..7fac47e62cb1 100644
--- a/src/common/meta/src/ddl/drop_table.rs
+++ b/src/common/meta/src/ddl/drop_table.rs
@@ -116,7 +116,7 @@ impl DropTableProcedure {
/// Register dropping regions if doesn't exist.
fn register_dropping_regions(&mut self) -> Result<()> {
- let region_routes = self.data.region_routes();
+ let region_routes = self.data.region_routes()?;
let dropping_regions = operating_leader_regions(region_routes);
@@ -190,7 +190,7 @@ impl DropTableProcedure {
pub async fn on_datanode_drop_regions(&self) -> Result<Status> {
let table_id = self.data.table_id();
- let region_routes = &self.data.region_routes();
+ let region_routes = &self.data.region_routes()?;
let leaders = find_leaders(region_routes);
let mut drop_region_tasks = Vec::with_capacity(leaders.len());
@@ -306,7 +306,7 @@ impl DropTableData {
self.task.table_ref()
}
- fn region_routes(&self) -> &Vec<RegionRoute> {
+ fn region_routes(&self) -> Result<&Vec<RegionRoute>> {
self.table_route_value.region_routes()
}
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index af669797f4d4..7876d2a8a793 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -278,7 +278,7 @@ async fn handle_truncate_table_task(
let table_route_value =
table_route_value.context(error::TableRouteNotFoundSnafu { table_id })?;
- let table_route = table_route_value.into_inner().region_routes().clone();
+ let table_route = table_route_value.into_inner().region_routes()?.clone();
let id = ddl_manager
.submit_truncate_table_task(
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index 323d922b9cda..2a0db2abbb08 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -351,6 +351,9 @@ pub enum Error {
#[snafu(display("The topic pool is empty"))]
EmptyTopicPool { location: Location },
+
+ #[snafu(display("Unexpected table route type: {}", err_msg))]
+ UnexpectedLogicalRouteTable { location: Location, err_msg: String },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -392,7 +395,8 @@ impl ErrorExt for Error {
| BuildKafkaPartitionClient { .. }
| ProduceRecord { .. }
| CreateKafkaWalTopic { .. }
- | EmptyTopicPool { .. } => StatusCode::Unexpected,
+ | EmptyTopicPool { .. }
+ | UnexpectedLogicalRouteTable { .. } => StatusCode::Unexpected,
SendMessage { .. }
| GetKvCache { .. }
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index bb2b87a973f5..57de421be202 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -483,7 +483,7 @@ impl TableMetadataManager {
.build_delete_txn(table_id, table_info_value)?;
// Deletes datanode table key value pairs.
- let distribution = region_distribution(table_route_value.region_routes())?;
+ let distribution = region_distribution(table_route_value.region_routes()?)?;
let delete_datanode_txn = self
.datanode_table_manager()
.build_delete_txn(table_id, distribution)?;
@@ -608,7 +608,7 @@ impl TableMetadataManager {
) -> Result<()> {
// Updates the datanode table key value pairs.
let current_region_distribution =
- region_distribution(current_table_route_value.region_routes())?;
+ region_distribution(current_table_route_value.region_routes()?)?;
let new_region_distribution = region_distribution(&new_region_routes)?;
let update_datanode_table_txn = self.datanode_table_manager().build_update_txn(
@@ -621,7 +621,7 @@ impl TableMetadataManager {
)?;
// Updates the table_route.
- let new_table_route_value = current_table_route_value.update(new_region_routes);
+ let new_table_route_value = current_table_route_value.update(new_region_routes)?;
let (update_table_route_txn, on_update_table_route_failure) = self
.table_route_manager()
@@ -656,7 +656,7 @@ impl TableMetadataManager {
where
F: Fn(&RegionRoute) -> Option<Option<RegionStatus>>,
{
- let mut new_region_routes = current_table_route_value.region_routes().clone();
+ let mut new_region_routes = current_table_route_value.region_routes()?.clone();
let mut updated = 0;
for route in &mut new_region_routes {
@@ -673,7 +673,7 @@ impl TableMetadataManager {
}
// Updates the table_route.
- let new_table_route_value = current_table_route_value.update(new_region_routes);
+ let new_table_route_value = current_table_route_value.update(new_region_routes)?;
let (update_table_route_txn, on_update_table_route_failure) = self
.table_route_manager()
@@ -897,7 +897,11 @@ mod tests {
table_info
);
assert_eq!(
- remote_table_route.unwrap().into_inner().region_routes(),
+ remote_table_route
+ .unwrap()
+ .into_inner()
+ .region_routes()
+ .unwrap(),
region_routes
);
}
@@ -978,7 +982,7 @@ mod tests {
.unwrap()
.unwrap()
.into_inner();
- assert_eq!(removed_table_route.region_routes(), region_routes);
+ assert_eq!(removed_table_route.region_routes().unwrap(), region_routes);
}
#[tokio::test]
@@ -1173,11 +1177,11 @@ mod tests {
.unwrap();
assert_eq!(
- updated_route_value.region_routes()[0].leader_status,
+ updated_route_value.region_routes().unwrap()[0].leader_status,
Some(RegionStatus::Downgraded)
);
assert_eq!(
- updated_route_value.region_routes()[1].leader_status,
+ updated_route_value.region_routes().unwrap()[1].leader_status,
Some(RegionStatus::Downgraded)
);
}
@@ -1271,7 +1275,8 @@ mod tests {
let current_table_route_value = DeserializedValueWithBytes::from_inner(
current_table_route_value
.inner
- .update(new_region_routes.clone()),
+ .update(new_region_routes.clone())
+ .unwrap(),
);
let new_region_routes = vec![new_region_route(2, 4), new_region_route(5, 5)];
// it should be ok.
@@ -1295,13 +1300,16 @@ mod tests {
// if the current_table_route_value is wrong, it should return an error.
// The ABA problem.
- let wrong_table_route_value =
- DeserializedValueWithBytes::from_inner(current_table_route_value.update(vec![
- new_region_route(1, 1),
- new_region_route(2, 2),
- new_region_route(3, 3),
- new_region_route(4, 4),
- ]));
+ let wrong_table_route_value = DeserializedValueWithBytes::from_inner(
+ current_table_route_value
+ .update(vec![
+ new_region_route(1, 1),
+ new_region_route(2, 2),
+ new_region_route(3, 3),
+ new_region_route(4, 4),
+ ])
+ .unwrap(),
+ );
assert!(table_metadata_manager
.update_table_route(
table_id,
diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs
index d767d098a79f..4d2ac35001f3 100644
--- a/src/common/meta/src/key/table_route.rs
+++ b/src/common/meta/src/key/table_route.rs
@@ -16,12 +16,12 @@ use std::collections::HashMap;
use std::fmt::Display;
use serde::{Deserialize, Serialize};
-use snafu::ResultExt;
+use snafu::{ensure, ResultExt};
use store_api::storage::{RegionId, RegionNumber};
use table::metadata::TableId;
use super::{DeserializedValueWithBytes, TableMetaValue};
-use crate::error::{Result, SerdeJsonSnafu};
+use crate::error::{Result, SerdeJsonSnafu, UnexpectedLogicalRouteTableSnafu};
use crate::key::{to_removed_key, RegionDistribution, TableMetaKey, TABLE_ROUTE_PREFIX};
use crate::kv_backend::txn::{Compare, CompareOp, Txn, TxnOp, TxnOpResponse};
use crate::kv_backend::KvBackendRef;
@@ -62,29 +62,48 @@ impl TableRouteValue {
}
/// Returns a new version [TableRouteValue] with `region_routes`.
- pub fn update(&self, region_routes: Vec<RegionRoute>) -> Self {
+ pub fn update(&self, region_routes: Vec<RegionRoute>) -> Result<Self> {
+ ensure!(
+ self.is_physical(),
+ UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ }
+ );
let version = self.physical_table_route().version;
- Self::Physical(PhysicalTableRouteValue {
+ Ok(Self::Physical(PhysicalTableRouteValue {
region_routes,
version: version + 1,
- })
+ }))
}
/// Returns the version.
///
/// For test purpose.
#[cfg(any(test, feature = "testing"))]
- pub fn version(&self) -> u64 {
- self.physical_table_route().version
+ pub fn version(&self) -> Result<u64> {
+ ensure!(
+ self.is_physical(),
+ UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ }
+ );
+ Ok(self.physical_table_route().version)
}
/// Returns the corresponding [RegionRoute].
- pub fn region_route(&self, region_id: RegionId) -> Option<RegionRoute> {
- self.physical_table_route()
+ pub fn region_route(&self, region_id: RegionId) -> Result<Option<RegionRoute>> {
+ ensure!(
+ self.is_physical(),
+ UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ }
+ );
+ Ok(self
+ .physical_table_route()
.region_routes
.iter()
.find(|route| route.region.id == region_id)
- .cloned()
+ .cloned())
}
/// Returns true if it's [TableRouteValue::Physical].
@@ -93,11 +112,14 @@ impl TableRouteValue {
}
/// Gets the [RegionRoute]s of this [TableRouteValue::Physical].
- ///
- /// # Panics
- /// The route type is not the [TableRouteValue::Physical].
- pub fn region_routes(&self) -> &Vec<RegionRoute> {
- &self.physical_table_route().region_routes
+ pub fn region_routes(&self) -> Result<&Vec<RegionRoute>> {
+ ensure!(
+ self.is_physical(),
+ UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ }
+ );
+ Ok(&self.physical_table_route().region_routes)
}
fn physical_table_route(&self) -> &PhysicalTableRouteValue {
@@ -354,7 +376,7 @@ impl TableRouteManager {
) -> Result<Option<RegionDistribution>> {
self.get(table_id)
.await?
- .map(|table_route| region_distribution(table_route.region_routes()))
+ .map(|table_route| region_distribution(table_route.region_routes()?))
.transpose()
}
}
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 530fba83aa2e..5272c3abe77a 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -602,6 +602,13 @@ pub enum Error {
#[snafu(display("Weight array is not set"))]
NotSetWeightArray { location: Location },
+
+ #[snafu(display("Unexpected table route type: {}", err_msg))]
+ UnexpectedLogicalRouteTable {
+ location: Location,
+ err_msg: String,
+ source: common_meta::error::Error,
+ },
}
impl Error {
@@ -717,7 +724,8 @@ impl ErrorExt for Error {
| Error::TableMetadataManager { source, .. }
| Error::KvBackend { source, .. }
| Error::UpdateTableRoute { source, .. }
- | Error::GetFullTableInfo { source, .. } => source.status_code(),
+ | Error::GetFullTableInfo { source, .. }
+ | Error::UnexpectedLogicalRouteTable { source, .. } => source.status_code(),
Error::InitMetadata { source, .. } | Error::InitDdlManager { source, .. } => {
source.status_code()
diff --git a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
index c2d06590aec2..650c794126a6 100644
--- a/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
+++ b/src/meta-srv/src/procedure/region_failover/deactivate_region.rs
@@ -208,6 +208,7 @@ mod tests {
let should_downgraded = table_route_value
.region_routes()
+ .unwrap()
.iter()
.find(|route| route.region.id.region_number() == failed_region.region_number)
.unwrap();
diff --git a/src/meta-srv/src/procedure/region_failover/update_metadata.rs b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
index 23ade1a2a1fe..c2218c6afede 100644
--- a/src/meta-srv/src/procedure/region_failover/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
@@ -85,7 +85,12 @@ impl UpdateRegionMetadata {
.context(error::TableMetadataManagerSnafu)?
.context(TableRouteNotFoundSnafu { table_id })?;
- let mut new_region_routes = table_route_value.region_routes().clone();
+ let mut new_region_routes = table_route_value
+ .region_routes()
+ .context(error::UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ })?
+ .clone();
for region_route in new_region_routes.iter_mut() {
if region_route.region.id.region_number() == failed_region.region_number {
@@ -234,6 +239,7 @@ mod tests {
.unwrap()
.into_inner()
.region_routes()
+ .unwrap()
.clone()
}
@@ -396,8 +402,8 @@ mod tests {
.unwrap()
.into_inner();
- let peers = &extract_all_peers(table_route_value.region_routes());
- let actual = table_route_value.region_routes();
+ let peers = &extract_all_peers(table_route_value.region_routes().unwrap());
+ let actual = table_route_value.region_routes().unwrap();
let expected = &vec![
new_region_route(1, peers, 2),
new_region_route(2, peers, 3),
@@ -416,7 +422,7 @@ mod tests {
.unwrap()
.into_inner();
- let map = region_distribution(table_route_value.region_routes()).unwrap();
+ let map = region_distribution(table_route_value.region_routes().unwrap()).unwrap();
assert_eq!(map.len(), 2);
assert_eq!(map.get(&2), Some(&vec![1, 3]));
assert_eq!(map.get(&3), Some(&vec![2, 4]));
diff --git a/src/meta-srv/src/procedure/region_migration.rs b/src/meta-srv/src/procedure/region_migration.rs
index a1e92277d60b..b187a026723a 100644
--- a/src/meta-srv/src/procedure/region_migration.rs
+++ b/src/meta-srv/src/procedure/region_migration.rs
@@ -753,7 +753,7 @@ mod tests {
.unwrap()
.version();
// Should be unchanged.
- assert_eq!(table_routes_version, 0);
+ assert_eq!(table_routes_version.unwrap(), 0);
}
#[tokio::test]
diff --git a/src/meta-srv/src/procedure/region_migration/manager.rs b/src/meta-srv/src/procedure/region_migration/manager.rs
index 03794ed85d11..dd034ba3e7d2 100644
--- a/src/meta-srv/src/procedure/region_migration/manager.rs
+++ b/src/meta-srv/src/procedure/region_migration/manager.rs
@@ -244,6 +244,9 @@ impl RegionMigrationManager {
// Safety: checked before.
let region_route = table_route
.region_route(region_id)
+ .context(error::UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ })?
.context(error::RegionRouteNotFoundSnafu { region_id })?;
if self.has_migrated(®ion_route, &task)? {
diff --git a/src/meta-srv/src/procedure/region_migration/migration_start.rs b/src/meta-srv/src/procedure/region_migration/migration_start.rs
index fa84a1a6dd5e..68b291cb87c1 100644
--- a/src/meta-srv/src/procedure/region_migration/migration_start.rs
+++ b/src/meta-srv/src/procedure/region_migration/migration_start.rs
@@ -18,7 +18,7 @@ use common_meta::peer::Peer;
use common_meta::rpc::router::RegionRoute;
use common_procedure::Status;
use serde::{Deserialize, Serialize};
-use snafu::OptionExt;
+use snafu::{OptionExt, ResultExt};
use store_api::storage::RegionId;
use super::migration_end::RegionMigrationEnd;
@@ -85,6 +85,9 @@ impl RegionMigrationStart {
let region_route = table_route
.region_routes()
+ .context(error::UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ })?
.iter()
.find(|route| route.region.id == region_id)
.cloned()
diff --git a/src/meta-srv/src/procedure/region_migration/test_util.rs b/src/meta-srv/src/procedure/region_migration/test_util.rs
index 4431791ff70f..4e9bb3939525 100644
--- a/src/meta-srv/src/procedure/region_migration/test_util.rs
+++ b/src/meta-srv/src/procedure/region_migration/test_util.rs
@@ -419,7 +419,7 @@ impl ProcedureMigrationTestSuite {
.unwrap()
.unwrap()
.into_inner();
- let region_routes = table_route.region_routes();
+ let region_routes = table_route.region_routes().unwrap();
let expected_leader_id = self.context.persistent_ctx.to_peer.id;
let removed_follower_id = self.context.persistent_ctx.from_peer.id;
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
index cc67aa7ca8e9..818aadd9cda6 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/downgrade_leader_region.rs
@@ -208,8 +208,8 @@ mod tests {
.unwrap();
// It should remain unchanged.
- assert_eq!(latest_table_route.version(), 0);
- assert!(!latest_table_route.region_routes()[0].is_leader_downgraded());
+ assert_eq!(latest_table_route.version().unwrap(), 0);
+ assert!(!latest_table_route.region_routes().unwrap()[0].is_leader_downgraded());
assert!(ctx.volatile_ctx.table_route.is_none());
}
@@ -249,7 +249,7 @@ mod tests {
.unwrap()
.unwrap();
- assert!(latest_table_route.region_routes()[0].is_leader_downgraded());
+ assert!(latest_table_route.region_routes().unwrap()[0].is_leader_downgraded());
assert!(ctx.volatile_ctx.table_route.is_none());
}
}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
index 7281737752a4..844188f2f1f9 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/rollback_downgraded_region.rs
@@ -170,7 +170,10 @@ mod tests {
.unwrap()
.unwrap()
.into_inner();
- assert_eq!(&expected_region_routes, table_route.region_routes());
+ assert_eq!(
+ &expected_region_routes,
+ table_route.region_routes().unwrap()
+ );
}
#[tokio::test]
@@ -231,6 +234,9 @@ mod tests {
.unwrap()
.unwrap()
.into_inner();
- assert_eq!(&expected_region_routes, table_route.region_routes());
+ assert_eq!(
+ &expected_region_routes,
+ table_route.region_routes().unwrap()
+ );
}
}
diff --git a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
index 597d9afe9a7b..745b8487a8f3 100644
--- a/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
+++ b/src/meta-srv/src/procedure/region_migration/update_metadata/upgrade_candidate_region.rs
@@ -33,7 +33,12 @@ impl UpdateMetadata {
let region_id = ctx.region_id();
let table_route_value = ctx.get_table_route_value().await?.clone();
- let mut region_routes = table_route_value.region_routes().clone();
+ let mut region_routes = table_route_value
+ .region_routes()
+ .context(error::UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ })?
+ .clone();
let region_route = region_routes
.iter_mut()
.find(|route| route.region.id == region_id)
@@ -81,7 +86,12 @@ impl UpdateMetadata {
let region_id = ctx.region_id();
let table_route_value = ctx.get_table_route_value().await?.clone();
- let region_routes = table_route_value.region_routes().clone();
+ let region_routes = table_route_value
+ .region_routes()
+ .context(error::UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ })?
+ .clone();
let region_route = region_routes
.into_iter()
.find(|route| route.region.id == region_id)
@@ -465,7 +475,7 @@ mod tests {
.unwrap()
.unwrap()
.into_inner();
- let region_routes = table_route.region_routes();
+ let region_routes = table_route.region_routes().unwrap();
assert!(ctx.volatile_ctx.table_route.is_none());
assert!(ctx.volatile_ctx.opening_region_guard.is_none());
diff --git a/src/meta-srv/src/region/lease_keeper.rs b/src/meta-srv/src/region/lease_keeper.rs
index cbd2451896b1..9b066065b427 100644
--- a/src/meta-srv/src/region/lease_keeper.rs
+++ b/src/meta-srv/src/region/lease_keeper.rs
@@ -127,7 +127,7 @@ impl RegionLeaseKeeper {
}
if let Some(table_route) = table_metadata.get(®ion_id.table_id()) {
- if let Some(region_route) = table_route.region_route(region_id) {
+ if let Ok(Some(region_route)) = table_route.region_route(region_id) {
return renew_region_lease_via_region_route(®ion_route, datanode_id, region_id);
}
}
diff --git a/src/meta-srv/src/selector/load_based.rs b/src/meta-srv/src/selector/load_based.rs
index e8b3dcdf9e97..9573757a3ffc 100644
--- a/src/meta-srv/src/selector/load_based.rs
+++ b/src/meta-srv/src/selector/load_based.rs
@@ -142,13 +142,19 @@ async fn get_leader_peer_ids(
.await
.context(error::TableMetadataManagerSnafu)
.map(|route| {
- route.map_or_else(Vec::new, |route| {
- find_leaders(route.region_routes())
- .into_iter()
- .map(|peer| peer.id)
- .collect()
- })
- })
+ route.map_or_else(
+ || Ok(Vec::new()),
+ |route| {
+ let region_routes = route
+ .region_routes()
+ .context(error::UnexpectedLogicalRouteTableSnafu { err_msg: "" })?;
+ Ok(find_leaders(region_routes)
+ .into_iter()
+ .map(|peer| peer.id)
+ .collect())
+ },
+ )
+ })?
}
#[cfg(test)]
diff --git a/src/partition/src/error.rs b/src/partition/src/error.rs
index 7765a77c9796..6bfe76fa5b8f 100644
--- a/src/partition/src/error.rs
+++ b/src/partition/src/error.rs
@@ -119,6 +119,13 @@ pub enum Error {
region_id: RegionId,
location: Location,
},
+
+ #[snafu(display("Unexpected table route type: {}", err_msg))]
+ UnexpectedLogicalRouteTable {
+ location: Location,
+ err_msg: String,
+ source: common_meta::error::Error,
+ },
}
impl ErrorExt for Error {
@@ -138,6 +145,7 @@ impl ErrorExt for Error {
Error::FindDatanode { .. } => StatusCode::InvalidArguments,
Error::TableRouteManager { source, .. } => source.status_code(),
Error::MissingDefaultValue { .. } => StatusCode::Internal,
+ Error::UnexpectedLogicalRouteTable { source, .. } => source.status_code(),
}
}
diff --git a/src/partition/src/manager.rs b/src/partition/src/manager.rs
index ad15c62cc1dd..2963ac8e2b45 100644
--- a/src/partition/src/manager.rs
+++ b/src/partition/src/manager.rs
@@ -75,8 +75,13 @@ impl PartitionRuleManager {
.context(error::TableRouteManagerSnafu)?
.context(error::FindTableRoutesSnafu { table_id })?
.into_inner();
-
- Ok(RegionRoutes(route.region_routes().clone()))
+ let region_routes =
+ route
+ .region_routes()
+ .context(error::UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ })?;
+ Ok(RegionRoutes(region_routes.clone()))
}
pub async fn find_table_partitions(&self, table_id: TableId) -> Result<Vec<PartitionInfo>> {
@@ -87,7 +92,12 @@ impl PartitionRuleManager {
.context(error::TableRouteManagerSnafu)?
.context(error::FindTableRoutesSnafu { table_id })?
.into_inner();
- let region_routes = route.region_routes();
+ let region_routes =
+ route
+ .region_routes()
+ .context(error::UnexpectedLogicalRouteTableSnafu {
+ err_msg: "{self:?} is a non-physical TableRouteValue.",
+ })?;
ensure!(
!region_routes.is_empty(),
diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs
index e9731cc336fa..24cd470c3905 100644
--- a/tests-integration/src/grpc.rs
+++ b/tests-integration/src/grpc.rs
@@ -521,11 +521,15 @@ CREATE TABLE {table_name} (
.unwrap()
.into_inner();
- let region_to_dn_map = region_distribution(table_route_value.region_routes())
- .unwrap()
- .iter()
- .map(|(k, v)| (v[0], *k))
- .collect::<HashMap<u32, u64>>();
+ let region_to_dn_map = region_distribution(
+ table_route_value
+ .region_routes()
+ .expect("physical table route"),
+ )
+ .unwrap()
+ .iter()
+ .map(|(k, v)| (v[0], *k))
+ .collect::<HashMap<u32, u64>>();
assert!(region_to_dn_map.len() <= instance.datanodes().len());
let stmt = QueryLanguageParser::parse_sql(&format!(
diff --git a/tests-integration/src/instance.rs b/tests-integration/src/instance.rs
index 05253dc0a236..5b7ed080d9d9 100644
--- a/tests-integration/src/instance.rs
+++ b/tests-integration/src/instance.rs
@@ -216,11 +216,15 @@ mod tests {
.unwrap()
.into_inner();
- let region_to_dn_map = region_distribution(table_route_value.region_routes())
- .unwrap()
- .iter()
- .map(|(k, v)| (v[0], *k))
- .collect::<HashMap<u32, u64>>();
+ let region_to_dn_map = region_distribution(
+ table_route_value
+ .region_routes()
+ .expect("region routes should be physical"),
+ )
+ .unwrap()
+ .iter()
+ .map(|(k, v)| (v[0], *k))
+ .collect::<HashMap<u32, u64>>();
assert!(region_to_dn_map.len() <= instance.datanodes().len());
let stmt = QueryLanguageParser::parse_sql("SELECT ts, host FROM demo ORDER BY ts").unwrap();
|
feat
|
add panic notes and type checks (#3031)
|
bf16422cee715d9c1f190be3f23cc0f66ea657ae
|
2024-08-21 11:50:09
|
localhost
|
fix: pipeline prepare loop break detects a conditional error (#4593)
| false
|
diff --git a/src/pipeline/src/etl.rs b/src/pipeline/src/etl.rs
index 1a75866656a6..b2c8802dd52a 100644
--- a/src/pipeline/src/etl.rs
+++ b/src/pipeline/src/etl.rs
@@ -284,7 +284,7 @@ where
let mut search_from = 0;
// because of the key in the json map is ordered
for (payload_key, payload_value) in map.into_iter() {
- if search_from >= self.required_keys.len() - 1 {
+ if search_from >= self.required_keys.len() {
break;
}
@@ -359,15 +359,16 @@ mod tests {
#[test]
fn test_pipeline_prepare() {
- let input_value_str = r#"
+ {
+ let input_value_str = r#"
{
"my_field": "1,2",
"foo": "bar"
}
"#;
- let input_value: serde_json::Value = serde_json::from_str(input_value_str).unwrap();
+ let input_value: serde_json::Value = serde_json::from_str(input_value_str).unwrap();
- let pipeline_yaml = r#"
+ let pipeline_yaml = r#"
---
description: Pipeline for Apache Tomcat
@@ -381,32 +382,73 @@ transform:
- field: field2
type: uint32
"#;
- let pipeline: Pipeline<GreptimeTransformer> =
- parse(&Content::Yaml(pipeline_yaml.into())).unwrap();
- let mut payload = pipeline.init_intermediate_state();
- pipeline.prepare(input_value, &mut payload).unwrap();
- assert_eq!(
- &["greptime_timestamp", "my_field"].to_vec(),
- pipeline.required_keys()
- );
- assert_eq!(
- payload,
- vec![
- Value::Null,
- Value::String("1,2".to_string()),
- Value::Null,
- Value::Null
- ]
- );
- let result = pipeline.exec_mut(&mut payload).unwrap();
+ let pipeline: Pipeline<GreptimeTransformer> =
+ parse(&Content::Yaml(pipeline_yaml.into())).unwrap();
+ let mut payload = pipeline.init_intermediate_state();
+ pipeline.prepare(input_value, &mut payload).unwrap();
+ assert_eq!(
+ &["greptime_timestamp", "my_field"].to_vec(),
+ pipeline.required_keys()
+ );
+ assert_eq!(
+ payload,
+ vec![
+ Value::Null,
+ Value::String("1,2".to_string()),
+ Value::Null,
+ Value::Null
+ ]
+ );
+ let result = pipeline.exec_mut(&mut payload).unwrap();
- assert_eq!(result.values[0].value_data, Some(ValueData::U32Value(1)));
- assert_eq!(result.values[1].value_data, Some(ValueData::U32Value(2)));
- match &result.values[2].value_data {
- Some(ValueData::TimestampNanosecondValue(v)) => {
- assert_ne!(*v, 0);
+ assert_eq!(result.values[0].value_data, Some(ValueData::U32Value(1)));
+ assert_eq!(result.values[1].value_data, Some(ValueData::U32Value(2)));
+ match &result.values[2].value_data {
+ Some(ValueData::TimestampNanosecondValue(v)) => {
+ assert_ne!(*v, 0);
+ }
+ _ => panic!("expect null value"),
}
- _ => panic!("expect null value"),
+ }
+ {
+ let input_value_str = r#"
+ {
+ "reqTimeSec": "1573840000.000"
+ }
+ "#;
+
+ let pipeline_yaml = r#"
+---
+description: Pipeline for Demo Log
+
+processors:
+ - gsub:
+ field: reqTimeSec
+ pattern: "\\."
+ replacement: ""
+ - epoch:
+ field: reqTimeSec
+ resolution: millisecond
+ ignore_missing: true
+
+transform:
+ - field: reqTimeSec
+ type: epoch, millisecond
+ index: timestamp
+"#;
+ let input_value: serde_json::Value = serde_json::from_str(input_value_str).unwrap();
+ let pipeline: Pipeline<GreptimeTransformer> =
+ parse(&Content::Yaml(pipeline_yaml.into())).unwrap();
+ let mut payload = pipeline.init_intermediate_state();
+ pipeline.prepare(input_value, &mut payload).unwrap();
+ assert_eq!(&["reqTimeSec"].to_vec(), pipeline.required_keys());
+ assert_eq!(payload, vec![Value::String("1573840000.000".to_string())]);
+ let result = pipeline.exec_mut(&mut payload).unwrap();
+
+ assert_eq!(
+ result.values[0].value_data,
+ Some(ValueData::TimestampMillisecondValue(1573840000000))
+ );
}
}
|
fix
|
pipeline prepare loop break detects a conditional error (#4593)
|
5abff7a536ece42cbe53e96c4205b9625e923a49
|
2022-11-02 20:06:32
|
LFC
|
feat: range columns partitioning rule (#374)
| false
|
diff --git a/src/frontend/src/partitioning.rs b/src/frontend/src/partitioning.rs
index 7a0b3081464c..678705e37966 100644
--- a/src/frontend/src/partitioning.rs
+++ b/src/frontend/src/partitioning.rs
@@ -1,3 +1,4 @@
+mod columns;
mod range;
use std::fmt::Debug;
@@ -6,8 +7,6 @@ pub use datafusion_expr::Operator;
use datatypes::prelude::Value;
use store_api::storage::RegionId;
-pub(crate) type ValueList = Vec<Value>;
-
pub trait PartitionRule {
type Error: Debug;
@@ -15,11 +14,20 @@ pub trait PartitionRule {
// TODO(LFC): Unify `find_region` and `find_regions` methods when distributed read and write features are both merged into develop.
// Or find better names since one is mainly for writes and the other is for reads.
- fn find_region(&self, values: &ValueList) -> Result<RegionId, Self::Error>;
+ fn find_region(&self, values: &[Value]) -> Result<RegionId, Self::Error>;
fn find_regions(&self, exprs: &[PartitionExpr]) -> Result<Vec<RegionId>, Self::Error>;
}
+/// The right bound(exclusive) of partition range.
+#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
+enum PartitionBound {
+ Value(Value),
+ // FIXME(LFC): no allow, for clippy temporarily
+ #[allow(dead_code)]
+ MaxValue,
+}
+
#[derive(Debug, PartialEq, Eq)]
pub struct PartitionExpr {
column: String,
@@ -32,3 +40,17 @@ impl PartitionExpr {
&self.value
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_partition_bound() {
+ let b1 = PartitionBound::Value(1_i32.into());
+ let b2 = PartitionBound::Value(100_i32.into());
+ let b3 = PartitionBound::MaxValue;
+ assert!(b1 < b2);
+ assert!(b2 < b3);
+ }
+}
diff --git a/src/frontend/src/partitioning/columns.rs b/src/frontend/src/partitioning/columns.rs
new file mode 100644
index 000000000000..542e26cf8358
--- /dev/null
+++ b/src/frontend/src/partitioning/columns.rs
@@ -0,0 +1,382 @@
+use datafusion_expr::Operator;
+use datatypes::value::Value;
+use snafu::ensure;
+
+use crate::error::{self, Error};
+use crate::partitioning::{PartitionBound, PartitionExpr, PartitionRule, RegionId};
+
+/// A [RangeColumnsPartitionRule] is very similar to [RangePartitionRule] except that it allows
+/// partitioning by multiple columns.
+///
+/// This rule is generated from create table request, using MySQL's syntax:
+///
+/// ```SQL
+/// CREATE TABLE table_name (
+/// columns definition
+/// )
+/// PARTITION BY RANGE COLUMNS(column_list) (
+/// PARTITION region_name VALUES LESS THAN (value_list)[,
+/// PARTITION region_name VALUES LESS THAN (value_list)][,
+/// ...]
+/// )
+///
+/// column_list:
+/// column_name[, column_name][, ...]
+///
+/// value_list:
+/// value[, value][, ...]
+/// ```
+///
+/// Please refer to MySQL's ["RANGE COLUMNS Partitioning"](https://dev.mysql.com/doc/refman/8.0/en/partitioning-columns-range.html)
+/// document for more details.
+struct RangeColumnsPartitionRule {
+ column_list: Vec<String>,
+ value_lists: Vec<Vec<PartitionBound>>,
+ regions: Vec<RegionId>,
+
+ // TODO(LFC): Implement finding regions by all partitioning columns, not by the first one only.
+ // Singled out the first partitioning column's bounds for finding regions by range.
+ //
+ // Theoretically, finding regions in `value_list`s should use all the partition columns values
+ // as a whole in the comparison (think of how Rust's vector is compared to each other). And
+ // this is how we do it if provided with concrete values (see `find_region` method).
+ //
+ // However, when we need to find regions by range, for example, a filter of "x < 100" defined
+ // in SQL, currently I'm not quite sure how that could be implemented. Especially facing the complex
+ // filter expression like "a < 1 AND (b > 2 OR c != 3)".
+ //
+ // So I decided to use the first partitioning column temporarily in finding regions by range,
+ // and further investigate how MySQL (and others) implemented this feature in detail.
+ //
+ // Finding regions only using the first partitioning column is fine. It might return regions that
+ // actually do not contain the range's value (causing unnecessary table scans), but will
+ // not lose any data that should have been scanned.
+ //
+ // The following two fields are acted as caches, so we don't need to recalculate them every time.
+ first_column_bounds: Vec<PartitionBound>,
+ first_column_regions: Vec<Vec<RegionId>>,
+}
+
+impl RangeColumnsPartitionRule {
+ // It's assured that input arguments are valid because they are checked in SQL parsing stage.
+ // So we can skip validating them.
+ // FIXME(LFC): no allow, for clippy temporarily
+ #[allow(dead_code)]
+ fn new(
+ column_list: Vec<String>,
+ value_lists: Vec<Vec<PartitionBound>>,
+ regions: Vec<RegionId>,
+ ) -> Self {
+ let first_column_bounds = value_lists
+ .iter()
+ .map(|x| &x[0])
+ .collect::<Vec<&PartitionBound>>();
+
+ let mut distinct_bounds = Vec::<PartitionBound>::new();
+ distinct_bounds.push(first_column_bounds[0].clone());
+ let mut first_column_regions = Vec::<Vec<RegionId>>::new();
+ first_column_regions.push(vec![regions[0]]);
+
+ for i in 1..first_column_bounds.len() {
+ if first_column_bounds[i] == &distinct_bounds[distinct_bounds.len() - 1] {
+ first_column_regions[distinct_bounds.len() - 1].push(regions[i]);
+ } else {
+ distinct_bounds.push(first_column_bounds[i].clone());
+ first_column_regions.push(vec![regions[i]]);
+ }
+ }
+
+ Self {
+ column_list,
+ value_lists,
+ regions,
+ first_column_bounds: distinct_bounds,
+ first_column_regions,
+ }
+ }
+}
+
+impl PartitionRule for RangeColumnsPartitionRule {
+ type Error = Error;
+
+ fn partition_columns(&self) -> Vec<String> {
+ self.column_list.clone()
+ }
+
+ fn find_region(&self, values: &[Value]) -> Result<RegionId, Self::Error> {
+ ensure!(
+ values.len() == self.column_list.len(),
+ error::RegionKeysSizeSnafu {
+ expect: self.column_list.len(),
+ actual: values.len(),
+ }
+ );
+
+ // How tuple is compared:
+ // (a, b) < (x, y) <= (a < x) || ((a == x) && (b < y))
+ let values = values
+ .iter()
+ .map(|v| PartitionBound::Value(v.clone()))
+ .collect::<Vec<PartitionBound>>();
+ Ok(match self.value_lists.binary_search(&values) {
+ Ok(i) => self.regions[i + 1],
+ Err(i) => self.regions[i],
+ })
+ }
+
+ fn find_regions(&self, exprs: &[PartitionExpr]) -> Result<Vec<RegionId>, Self::Error> {
+ let regions = if exprs.iter().all(|x| self.column_list.contains(&x.column)) {
+ let PartitionExpr {
+ column: _,
+ op,
+ value,
+ } = exprs
+ .iter()
+ .find(|x| x.column == self.column_list[0])
+ // "unwrap" is safe because we have checked that "self.column_list" contains all columns in "exprs"
+ .unwrap();
+
+ // an example of bounds and regions:
+ // SQL:
+ // PARTITION p1 VALUES LESS THAN (10, 'c'),
+ // PARTITION p2 VALUES LESS THAN (20, 'h'),
+ // PARTITION p3 VALUES LESS THAN (20, 'm'),
+ // PARTITION p4 VALUES LESS THAN (50, 'p'),
+ // PARTITION p5 VALUES LESS THAN (MAXVALUE, 'x'),
+ // PARTITION p6 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+ // bounds: [10, 20, 50, MAXVALUE]
+ // regions: [[1], [2, 3], [4], [5, 6]]
+ let regions = &self.first_column_regions;
+ match self
+ .first_column_bounds
+ .binary_search(&PartitionBound::Value(value.clone()))
+ {
+ Ok(i) => match op {
+ Operator::Lt => ®ions[..=i],
+ Operator::LtEq => ®ions[..=(i + 1)],
+ Operator::Eq => ®ions[(i + 1)..=(i + 1)],
+ Operator::Gt | Operator::GtEq => ®ions[(i + 1)..],
+ Operator::NotEq => ®ions[..],
+ _ => unimplemented!(),
+ },
+ Err(i) => match op {
+ Operator::Lt | Operator::LtEq => ®ions[..=i],
+ Operator::Eq => ®ions[i..=i],
+ Operator::Gt | Operator::GtEq => ®ions[i..],
+ Operator::NotEq => ®ions[..],
+ _ => unimplemented!(),
+ },
+ }
+ .iter()
+ .flatten()
+ .cloned()
+ .collect::<Vec<RegionId>>()
+ } else {
+ self.regions.clone()
+ };
+ Ok(regions)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use super::*;
+
+ #[test]
+ fn test_find_regions() {
+ // PARTITION BY RANGE COLUMNS(a, b)
+ // PARTITION p1 VALUES LESS THAN ('hz', 10),
+ // PARTITION p2 VALUES LESS THAN ('sh', 20),
+ // PARTITION p3 VALUES LESS THAN ('sh', 50),
+ // PARTITION p4 VALUES LESS THAN ('sz', 100),
+ // PARTITION p5 VALUES LESS THAN (MAXVALUE, 200),
+ // PARTITION p6 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+ let rule = RangeColumnsPartitionRule::new(
+ vec!["a".to_string(), "b".to_string()],
+ vec![
+ vec![
+ PartitionBound::Value("hz".into()),
+ PartitionBound::Value(10_i32.into()),
+ ],
+ vec![
+ PartitionBound::Value("sh".into()),
+ PartitionBound::Value(20_i32.into()),
+ ],
+ vec![
+ PartitionBound::Value("sh".into()),
+ PartitionBound::Value(50_i32.into()),
+ ],
+ vec![
+ PartitionBound::Value("sz".into()),
+ PartitionBound::Value(100_i32.into()),
+ ],
+ vec![
+ PartitionBound::MaxValue,
+ PartitionBound::Value(200_i32.into()),
+ ],
+ vec![PartitionBound::MaxValue, PartitionBound::MaxValue],
+ ],
+ vec![1, 2, 3, 4, 5, 6],
+ );
+
+ let test = |op: Operator, value: &str, expected_regions: Vec<u64>| {
+ let exprs = vec![
+ // Intentionally fix column b's partition expr to "b < 1". If we support finding
+ // regions by both columns("a" and "b") in the future, some test cases should fail.
+ PartitionExpr {
+ column: "b".to_string(),
+ op: Operator::Lt,
+ value: 1_i32.into(),
+ },
+ PartitionExpr {
+ column: "a".to_string(),
+ op,
+ value: value.into(),
+ },
+ ];
+ let regions = rule.find_regions(&exprs).unwrap();
+ assert_eq!(
+ regions,
+ expected_regions.into_iter().collect::<Vec<RegionId>>()
+ );
+ };
+
+ test(Operator::NotEq, "hz", vec![1, 2, 3, 4, 5, 6]);
+ test(Operator::NotEq, "what", vec![1, 2, 3, 4, 5, 6]);
+
+ test(Operator::GtEq, "ab", vec![1, 2, 3, 4, 5, 6]);
+ test(Operator::GtEq, "hz", vec![2, 3, 4, 5, 6]);
+ test(Operator::GtEq, "ijk", vec![2, 3, 4, 5, 6]);
+ test(Operator::GtEq, "sh", vec![4, 5, 6]);
+ test(Operator::GtEq, "ssh", vec![4, 5, 6]);
+ test(Operator::GtEq, "sz", vec![5, 6]);
+ test(Operator::GtEq, "zz", vec![5, 6]);
+
+ test(Operator::Gt, "ab", vec![1, 2, 3, 4, 5, 6]);
+ test(Operator::Gt, "hz", vec![2, 3, 4, 5, 6]);
+ test(Operator::Gt, "ijk", vec![2, 3, 4, 5, 6]);
+ test(Operator::Gt, "sh", vec![4, 5, 6]);
+ test(Operator::Gt, "ssh", vec![4, 5, 6]);
+ test(Operator::Gt, "sz", vec![5, 6]);
+ test(Operator::Gt, "zz", vec![5, 6]);
+
+ test(Operator::Eq, "ab", vec![1]);
+ test(Operator::Eq, "hz", vec![2, 3]);
+ test(Operator::Eq, "ijk", vec![2, 3]);
+ test(Operator::Eq, "sh", vec![4]);
+ test(Operator::Eq, "ssh", vec![4]);
+ test(Operator::Eq, "sz", vec![5, 6]);
+ test(Operator::Eq, "zz", vec![5, 6]);
+
+ test(Operator::Lt, "ab", vec![1]);
+ test(Operator::Lt, "hz", vec![1]);
+ test(Operator::Lt, "ijk", vec![1, 2, 3]);
+ test(Operator::Lt, "sh", vec![1, 2, 3]);
+ test(Operator::Lt, "ssh", vec![1, 2, 3, 4]);
+ test(Operator::Lt, "sz", vec![1, 2, 3, 4]);
+ test(Operator::Lt, "zz", vec![1, 2, 3, 4, 5, 6]);
+
+ test(Operator::LtEq, "ab", vec![1]);
+ test(Operator::LtEq, "hz", vec![1, 2, 3]);
+ test(Operator::LtEq, "ijk", vec![1, 2, 3]);
+ test(Operator::LtEq, "sh", vec![1, 2, 3, 4]);
+ test(Operator::LtEq, "ssh", vec![1, 2, 3, 4]);
+ test(Operator::LtEq, "sz", vec![1, 2, 3, 4, 5, 6]);
+ test(Operator::LtEq, "zz", vec![1, 2, 3, 4, 5, 6]);
+
+ // If trying to find regions that is not partitioning column, return all regions.
+ let exprs = vec![
+ PartitionExpr {
+ column: "c".to_string(),
+ op: Operator::Lt,
+ value: 1_i32.into(),
+ },
+ PartitionExpr {
+ column: "a".to_string(),
+ op: Operator::Lt,
+ value: "hz".into(),
+ },
+ ];
+ let regions = rule.find_regions(&exprs).unwrap();
+ assert_eq!(regions, vec![1, 2, 3, 4, 5, 6]);
+ }
+
+ #[test]
+ fn test_find_region() {
+ // PARTITION BY RANGE COLUMNS(a) (
+ // PARTITION r1 VALUES LESS THAN ('hz'),
+ // PARTITION r2 VALUES LESS THAN ('sh'),
+ // PARTITION r3 VALUES LESS THAN (MAXVALUE),
+ // )
+ let rule = RangeColumnsPartitionRule::new(
+ vec!["a".to_string()],
+ vec![
+ vec![PartitionBound::Value("hz".into())],
+ vec![PartitionBound::Value("sh".into())],
+ vec![PartitionBound::MaxValue],
+ ],
+ vec![1, 2, 3],
+ );
+ assert_matches!(
+ rule.find_region(&["foo".into(), 1000_i32.into()]),
+ Err(error::Error::RegionKeysSize {
+ expect: 1,
+ actual: 2,
+ ..
+ })
+ );
+ assert_matches!(rule.find_region(&["foo".into()]), Ok(1));
+ assert_matches!(rule.find_region(&["bar".into()]), Ok(1));
+ assert_matches!(rule.find_region(&["hz".into()]), Ok(2));
+ assert_matches!(rule.find_region(&["hzz".into()]), Ok(2));
+ assert_matches!(rule.find_region(&["sh".into()]), Ok(3));
+ assert_matches!(rule.find_region(&["zzzz".into()]), Ok(3));
+
+ // PARTITION BY RANGE COLUMNS(a, b) (
+ // PARTITION r1 VALUES LESS THAN ('hz', 10),
+ // PARTITION r2 VALUES LESS THAN ('hz', 20),
+ // PARTITION r3 VALUES LESS THAN ('sh', 50),
+ // PARTITION r4 VALUES LESS THAN (MAXVALUE, MAXVALUE),
+ // )
+ let rule = RangeColumnsPartitionRule::new(
+ vec!["a".to_string(), "b".to_string()],
+ vec![
+ vec![
+ PartitionBound::Value("hz".into()),
+ PartitionBound::Value(10_i32.into()),
+ ],
+ vec![
+ PartitionBound::Value("hz".into()),
+ PartitionBound::Value(20_i32.into()),
+ ],
+ vec![
+ PartitionBound::Value("sh".into()),
+ PartitionBound::Value(50_i32.into()),
+ ],
+ vec![PartitionBound::MaxValue, PartitionBound::MaxValue],
+ ],
+ vec![1, 2, 3, 4],
+ );
+ assert_matches!(
+ rule.find_region(&["foo".into()]),
+ Err(error::Error::RegionKeysSize {
+ expect: 2,
+ actual: 1,
+ ..
+ })
+ );
+ assert_matches!(rule.find_region(&["foo".into(), 1_i32.into()]), Ok(1));
+ assert_matches!(rule.find_region(&["bar".into(), 11_i32.into()]), Ok(1));
+ assert_matches!(rule.find_region(&["hz".into(), 2_i32.into()]), Ok(1));
+ assert_matches!(rule.find_region(&["hz".into(), 12_i32.into()]), Ok(2));
+ assert_matches!(rule.find_region(&["hz".into(), 22_i32.into()]), Ok(3));
+ assert_matches!(rule.find_region(&["hz".into(), 999_i32.into()]), Ok(3));
+ assert_matches!(rule.find_region(&["hzz".into(), 1_i32.into()]), Ok(3));
+ assert_matches!(rule.find_region(&["hzz".into(), 999_i32.into()]), Ok(3));
+ assert_matches!(rule.find_region(&["sh".into(), 49_i32.into()]), Ok(3));
+ assert_matches!(rule.find_region(&["sh".into(), 50_i32.into()]), Ok(4));
+ assert_matches!(rule.find_region(&["zzz".into(), 1_i32.into()]), Ok(4));
+ }
+}
diff --git a/src/frontend/src/partitioning/range.rs b/src/frontend/src/partitioning/range.rs
index 10fc4fa6fc60..572ff81a3799 100644
--- a/src/frontend/src/partitioning/range.rs
+++ b/src/frontend/src/partitioning/range.rs
@@ -2,7 +2,7 @@ use datatypes::prelude::*;
use snafu::OptionExt;
use crate::error::{self, Error};
-use crate::partitioning::{Operator, PartitionExpr, PartitionRule, RegionId, ValueList};
+use crate::partitioning::{Operator, PartitionExpr, PartitionRule, RegionId};
/// [RangePartitionRule] manages the distribution of partitions partitioning by some column's value
/// range. It's generated from create table request, using MySQL's syntax:
@@ -26,6 +26,7 @@ use crate::partitioning::{Operator, PartitionExpr, PartitionRule, RegionId, Valu
/// - partition name must be unique
/// - range bounds(the "value"s) must be strictly increased
/// - the last partition range must be bounded by "MAXVALUE"
+///
/// are all been done in the create table SQL parsing stage. So we can safely skip some checks on the
/// input arguments.
///
@@ -66,7 +67,7 @@ impl PartitionRule for RangePartitionRule {
vec![self.column_name().to_string()]
}
- fn find_region(&self, _values: &ValueList) -> Result<RegionId, Self::Error> {
+ fn find_region(&self, _values: &[Value]) -> Result<RegionId, Self::Error> {
unimplemented!()
}
@@ -100,7 +101,7 @@ impl PartitionRule for RangePartitionRule {
Operator::NotEq => &self.regions[..],
_ => unimplemented!(),
},
- Err(i) => match *op {
+ Err(i) => match op {
Operator::Lt | Operator::LtEq => &self.regions[..=i],
Operator::Eq => &self.regions[i..=i],
Operator::Gt | Operator::GtEq => &self.regions[i..],
diff --git a/src/frontend/src/spliter.rs b/src/frontend/src/spliter.rs
index 5740c2a9ebf6..bb55b85eff6c 100644
--- a/src/frontend/src/spliter.rs
+++ b/src/frontend/src/spliter.rs
@@ -1,5 +1,6 @@
use std::collections::HashMap;
+use datatypes::value::Value;
use datatypes::vectors::VectorBuilder;
use datatypes::vectors::VectorRef;
use snafu::ensure;
@@ -11,7 +12,7 @@ use crate::error::FindPartitionColumnSnafu;
use crate::error::FindRegionSnafu;
use crate::error::InvalidInsertRequestSnafu;
use crate::error::Result;
-use crate::partitioning::{PartitionRule, ValueList};
+use crate::partitioning::PartitionRule;
pub type DistInsertRequest = HashMap<RegionId, InsertRequest>;
@@ -105,7 +106,7 @@ fn find_partitioning_values(
.collect()
}
-fn partition_values(partition_columns: &[VectorRef], idx: usize) -> ValueList {
+fn partition_values(partition_columns: &[VectorRef], idx: usize) -> Vec<Value> {
partition_columns
.iter()
.map(|column| column.get(idx))
@@ -411,7 +412,7 @@ mod tests {
vec!["id".to_string()]
}
- fn find_region(&self, values: &super::ValueList) -> Result<RegionId, Self::Error> {
+ fn find_region(&self, values: &[Value]) -> Result<RegionId, Self::Error> {
let val = values.get(0).unwrap().to_owned();
let id_1: Value = 1_i16.into();
let id_2: Value = 2_i16.into();
|
feat
|
range columns partitioning rule (#374)
|
a954ba862a14f8ad6591c527e14f2b9cb78dd9ed
|
2022-09-21 15:19:53
|
evenyag
|
feat: Implement dedup reader (#270)
| false
|
diff --git a/src/datatypes/src/vectors/boolean.rs b/src/datatypes/src/vectors/boolean.rs
index d28a825ee98d..c1d98fa0f27c 100644
--- a/src/datatypes/src/vectors/boolean.rs
+++ b/src/datatypes/src/vectors/boolean.rs
@@ -4,6 +4,8 @@ use std::sync::Arc;
use arrow::array::{Array, ArrayRef, BooleanArray, MutableArray, MutableBooleanArray};
use arrow::bitmap::utils::{BitmapIter, ZipValidity};
+use arrow::bitmap::MutableBitmap;
+use arrow::datatypes::DataType as ArrowDataType;
use snafu::{OptionExt, ResultExt};
use crate::data_type::ConcreteDataType;
@@ -59,6 +61,14 @@ impl<Ptr: Borrow<Option<bool>>> FromIterator<Ptr> for BooleanVector {
}
}
+impl From<MutableBitmap> for BooleanVector {
+ fn from(bitmap: MutableBitmap) -> BooleanVector {
+ BooleanVector {
+ array: BooleanArray::new(ArrowDataType::Boolean, bitmap.into(), None),
+ }
+ }
+}
+
impl Vector for BooleanVector {
fn data_type(&self) -> ConcreteDataType {
ConcreteDataType::boolean_datatype()
@@ -327,4 +337,16 @@ mod tests {
let expect: VectorRef = Arc::new(BooleanVector::from_slice(&[true, false, true]));
assert_eq!(expect, vector);
}
+
+ #[test]
+ fn test_from_mutable_bitmap() {
+ let mut bitmap = MutableBitmap::new();
+ let values = [false, true, true, false, true];
+ for v in values {
+ bitmap.push(v);
+ }
+ let vector = BooleanVector::from(bitmap);
+ let expect = BooleanVector::from_slice(&values);
+ assert_eq!(expect, vector);
+ }
}
diff --git a/src/datatypes/src/vectors/null.rs b/src/datatypes/src/vectors/null.rs
index 329210886e59..84dd8f75672b 100644
--- a/src/datatypes/src/vectors/null.rs
+++ b/src/datatypes/src/vectors/null.rs
@@ -175,6 +175,10 @@ impl MutableVector for NullVectorBuilder {
pub(crate) fn replicate_null(vector: &NullVector, offsets: &[usize]) -> VectorRef {
assert_eq!(offsets.len(), vector.len());
+ if offsets.is_empty() {
+ return vector.slice(0, 0);
+ }
+
Arc::new(NullVector::new(*offsets.last().unwrap()))
}
diff --git a/src/datatypes/src/vectors/operations/replicate.rs b/src/datatypes/src/vectors/operations/replicate.rs
index 8ed712fd4052..1715777b3e71 100644
--- a/src/datatypes/src/vectors/operations/replicate.rs
+++ b/src/datatypes/src/vectors/operations/replicate.rs
@@ -72,6 +72,11 @@ mod tests {
#[test]
fn test_replicate_null() {
+ let v = NullVector::new(0);
+ let offsets = [];
+ let v = v.replicate(&offsets);
+ assert!(v.is_empty());
+
let v = NullVector::new(3);
let offsets = [1, 3, 5];
diff --git a/src/storage/src/chunk.rs b/src/storage/src/chunk.rs
index 3a94fb34d53b..e816d6f60cee 100644
--- a/src/storage/src/chunk.rs
+++ b/src/storage/src/chunk.rs
@@ -8,7 +8,7 @@ use table::predicate::Predicate;
use crate::error::{self, Error, Result};
use crate::memtable::{IterContext, MemtableRef, MemtableSet};
-use crate::read::{BoxedBatchReader, MergeReaderBuilder};
+use crate::read::{BoxedBatchReader, DedupReader, MergeReaderBuilder};
use crate::schema::{ProjectedSchema, ProjectedSchemaRef, RegionSchemaRef};
use crate::sst::{AccessLayerRef, FileHandle, LevelMetas, ReadOptions, Visitor};
@@ -18,7 +18,7 @@ use crate::sst::{AccessLayerRef, FileHandle, LevelMetas, ReadOptions, Visitor};
// necessary to do so.
pub struct ChunkReaderImpl {
schema: ProjectedSchemaRef,
- sst_reader: BoxedBatchReader,
+ batch_reader: BoxedBatchReader,
}
#[async_trait]
@@ -30,7 +30,7 @@ impl ChunkReader for ChunkReaderImpl {
}
async fn next_chunk(&mut self) -> Result<Option<Chunk>> {
- let batch = match self.sst_reader.next_batch().await? {
+ let batch = match self.batch_reader.next_batch().await? {
Some(b) => b,
None => return Ok(None),
};
@@ -42,8 +42,11 @@ impl ChunkReader for ChunkReaderImpl {
}
impl ChunkReaderImpl {
- pub fn new(schema: ProjectedSchemaRef, sst_reader: BoxedBatchReader) -> ChunkReaderImpl {
- ChunkReaderImpl { schema, sst_reader }
+ pub fn new(schema: ProjectedSchemaRef, batch_reader: BoxedBatchReader) -> ChunkReaderImpl {
+ ChunkReaderImpl {
+ schema,
+ batch_reader,
+ }
}
}
@@ -142,6 +145,7 @@ impl ChunkReaderBuilder {
}
let reader = reader_builder.build();
+ let reader = DedupReader::new(schema.clone(), reader);
Ok(ChunkReaderImpl::new(schema, Box::new(reader)))
}
diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs
index 2a8bf019e882..af6cc3e1ad7a 100644
--- a/src/storage/src/error.rs
+++ b/src/storage/src/error.rs
@@ -246,6 +246,13 @@ pub enum Error {
#[snafu(display("Failed to build batch, {}", msg))]
BuildBatch { msg: String, backtrace: Backtrace },
+
+ #[snafu(display("Failed to filter column {}, source: {}", name, source))]
+ FilterColumn {
+ name: String,
+ #[snafu(backtrace)]
+ source: datatypes::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -276,7 +283,8 @@ impl ErrorExt for Error {
| VersionNotFound { .. }
| SequenceNotMonotonic { .. }
| ConvertStoreSchema { .. }
- | InvalidRawRegion { .. } => StatusCode::Unexpected,
+ | InvalidRawRegion { .. }
+ | FilterColumn { .. } => StatusCode::Unexpected,
FlushIo { .. }
| WriteParquet { .. }
diff --git a/src/storage/src/read.rs b/src/storage/src/read.rs
index 16102cbb9c98..08b91f8077e8 100644
--- a/src/storage/src/read.rs
+++ b/src/storage/src/read.rs
@@ -1,21 +1,24 @@
//! Common structs and utilities for read.
+mod dedup;
mod merge;
+use std::cmp::Ordering;
+
use async_trait::async_trait;
+use datatypes::arrow::bitmap::MutableBitmap;
use datatypes::data_type::DataType;
use datatypes::prelude::ConcreteDataType;
-use datatypes::vectors::{MutableVector, VectorRef};
+use datatypes::vectors::{BooleanVector, MutableVector, VectorRef};
+pub use dedup::DedupReader;
pub use merge::{MergeReader, MergeReaderBuilder};
use snafu::{ensure, ResultExt};
use crate::error::{self, Result};
/// Storage internal representation of a batch of rows.
-///
-/// `Batch` must contain at least one column, but might not hold any row.
// Now the structure of `Batch` is still unstable, all pub fields may be changed.
-#[derive(Debug, Default, PartialEq, Eq)]
+#[derive(Debug, Default, PartialEq, Eq, Clone)]
pub struct Batch {
/// Rows organized in columnar format.
///
@@ -28,9 +31,7 @@ impl Batch {
/// Create a new `Batch` from `columns`.
///
/// # Panics
- /// Panics if
- /// - `columns` is empty.
- /// - vectors in `columns` have different length.
+ /// Panics if vectors in `columns` have different length.
pub fn new(columns: Vec<VectorRef>) -> Batch {
Self::assert_columns(&columns);
@@ -44,8 +45,7 @@ impl Batch {
#[inline]
pub fn num_rows(&self) -> usize {
- // The invariant of `Batch::new()` ensure columns isn't empty.
- self.columns[0].len()
+ self.columns.get(0).map(|v| v.len()).unwrap_or(0)
}
#[inline]
@@ -77,12 +77,50 @@ impl Batch {
}
fn assert_columns(columns: &[VectorRef]) {
- assert!(!columns.is_empty());
+ if columns.is_empty() {
+ return;
+ }
+
let length = columns[0].len();
assert!(columns.iter().all(|col| col.len() == length));
}
}
+/// Compute operations for Batch.
+pub trait BatchOp {
+ /// Compare `i-th` in `left` to `j-th` row in `right` by key (row key + internal columns).
+ ///
+ /// The caller should ensure `left` and `right` have same schema as `self`.
+ ///
+ /// # Panics
+ /// Panics if
+ /// - `i` or `j` is out of bound.
+ /// - `left` or `right` has insufficient column num.
+ fn compare_row(&self, left: &Batch, i: usize, right: &Batch, j: usize) -> Ordering;
+
+ /// Dedup rows in `batch` by row key.
+ ///
+ /// If `prev` is `Some` and not empty, the last row of `prev` would be used to dedup
+ /// current `batch`. Set `i-th` bit of `selected` to `true` if we need to keep `i-th`
+ /// row. So the caller could use `selected` to build a [BooleanVector] to filter the
+ /// batch.
+ ///
+ /// The caller must ensure `selected` is initialized by filling `batch.num_rows()` bits
+ /// to zero.
+ ///
+ /// # Panics
+ /// Panics if `batch` and `prev` have different number of columns (unless `prev` is
+ /// empty).
+ fn dedup(&self, batch: &Batch, selected: &mut MutableBitmap, prev: Option<&Batch>);
+
+ /// Filters the `batch`, returns elements matching the `filter` (i.e. where the values
+ /// are true).
+ ///
+ /// Note that the nulls of `filter` are interpreted as `false` will lead to these elements
+ /// being masked out.
+ fn filter(&self, batch: &Batch, filter: &BooleanVector) -> Result<Batch>;
+}
+
/// Reusable [Batch] builder.
pub struct BatchBuilder {
builders: Vec<Box<dyn MutableVector>>,
diff --git a/src/storage/src/read/dedup.rs b/src/storage/src/read/dedup.rs
new file mode 100644
index 000000000000..6d6f93bdf757
--- /dev/null
+++ b/src/storage/src/read/dedup.rs
@@ -0,0 +1,164 @@
+use async_trait::async_trait;
+use datatypes::arrow::bitmap::MutableBitmap;
+use datatypes::vectors::BooleanVector;
+
+use crate::error::Result;
+use crate::read::{Batch, BatchOp, BatchReader};
+use crate::schema::ProjectedSchemaRef;
+
+/// A reader that dedup rows from inner reader.
+pub struct DedupReader<R> {
+ /// Projected schema to read.
+ schema: ProjectedSchemaRef,
+ /// The inner reader.
+ reader: R,
+ /// Previous batch from the reader.
+ prev_batch: Option<Batch>,
+}
+
+impl<R> DedupReader<R> {
+ pub fn new(schema: ProjectedSchemaRef, reader: R) -> DedupReader<R> {
+ DedupReader {
+ schema,
+ reader,
+ prev_batch: None,
+ }
+ }
+
+ /// Take `batch` and then returns a new batch with no duplicated rows.
+ ///
+ /// This method may returns empty `Batch`.
+ fn dedup_batch(&mut self, batch: Batch) -> Result<Batch> {
+ if batch.is_empty() {
+ // No need to update `prev_batch` if current batch is empty.
+ return Ok(batch);
+ }
+
+ // The `arrow` filter needs `BooleanArray` as input so there is no convenient
+ // and efficient way to reuse the bitmap. Though we could use `MutableBooleanArray`,
+ // but we couldn't zero all bits in the mutable array easily.
+ let mut selected = MutableBitmap::from_len_zeroed(batch.num_rows());
+ self.schema
+ .dedup(&batch, &mut selected, self.prev_batch.as_ref());
+
+ // Store current batch to `prev_batch` so we could compare the next batch
+ // with this batch. We store batch before filtering it mainly for correctness, as
+ // once we supports `DELETE`, rows with `OpType::Delete` would be removed from the
+ // batch after filter, then we may store an incorrect `last row` of previous batch.
+ self.prev_batch
+ .get_or_insert_with(Batch::default)
+ .clone_from(&batch); // Use `clone_from` to reuse allocated memory if possible.
+
+ // TODO(yingwen): To support `DELETE`, we could find all rows whose op_types are equal
+ // to `OpType::Delete`, mark their `selected` to false, then filter the batch.
+
+ let filter = BooleanVector::from(selected);
+ // Filter duplicate rows.
+ self.schema.filter(&batch, &filter)
+ }
+}
+
+#[async_trait]
+impl<R: BatchReader> BatchReader for DedupReader<R> {
+ async fn next_batch(&mut self) -> Result<Option<Batch>> {
+ while let Some(batch) = self.reader.next_batch().await? {
+ let filtered = self.dedup_batch(batch)?;
+ // Skip empty batch.
+ if !filtered.is_empty() {
+ return Ok(Some(filtered));
+ }
+ }
+
+ Ok(None)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use store_api::storage::OpType;
+
+ use super::*;
+ use crate::test_util::read_util;
+
+ #[tokio::test]
+ async fn test_dedup_reader_empty() {
+ let schema = read_util::new_projected_schema();
+ let reader = read_util::build_vec_reader(&[]);
+ let mut reader = DedupReader::new(schema, reader);
+
+ assert!(reader.next_batch().await.unwrap().is_none());
+ // Call next_batch() again is allowed.
+ assert!(reader.next_batch().await.unwrap().is_none());
+ }
+
+ #[tokio::test]
+ async fn test_dedup_by_sequence() {
+ let schema = read_util::new_projected_schema();
+ let reader = read_util::build_full_vec_reader(&[
+ // key, value, sequence, op_type
+ &[
+ (100, 1, 1000, OpType::Put),
+ (100, 2, 999, OpType::Put),
+ (100, 3, 998, OpType::Put),
+ (101, 1, 1000, OpType::Put),
+ ],
+ &[
+ (101, 2, 999, OpType::Put),
+ (102, 12, 1000, OpType::Put),
+ (103, 13, 1000, OpType::Put),
+ ],
+ &[(103, 2, 999, OpType::Put)],
+ ]);
+ let mut reader = DedupReader::new(schema, reader);
+
+ let result = read_util::collect_kv_batch(&mut reader).await;
+ let expect = [
+ (100, Some(1)),
+ (101, Some(1)),
+ (102, Some(12)),
+ (103, Some(13)),
+ ];
+ assert_eq!(&expect, &result[..]);
+ }
+
+ #[tokio::test]
+ async fn test_dedup_contains_empty_input() {
+ let schema = read_util::new_projected_schema();
+ let reader = read_util::build_full_vec_reader(&[
+ // key, value, sequence, op_type
+ &[
+ (100, 1, 1000, OpType::Put),
+ (100, 2, 999, OpType::Put),
+ (101, 1, 1000, OpType::Put),
+ ],
+ &[],
+ &[(101, 2, 999, OpType::Put), (102, 12, 1000, OpType::Put)],
+ ]);
+ let mut reader = DedupReader::new(schema, reader);
+
+ let result = read_util::collect_kv_batch(&mut reader).await;
+ let expect = [(100, Some(1)), (101, Some(1)), (102, Some(12))];
+ assert_eq!(&expect, &result[..]);
+ }
+
+ #[tokio::test]
+ async fn test_dedup_contains_empty_output() {
+ let schema = read_util::new_projected_schema();
+ let reader = read_util::build_full_vec_reader(&[
+ // key, value, sequence, op_type
+ &[
+ (100, 1, 1000, OpType::Put),
+ (100, 2, 999, OpType::Put),
+ (101, 1, 1000, OpType::Put),
+ ],
+ &[(101, 2, 999, OpType::Put)],
+ &[(101, 3, 998, OpType::Put), (101, 4, 997, OpType::Put)],
+ &[(102, 12, 998, OpType::Put)],
+ ]);
+ let mut reader = DedupReader::new(schema, reader);
+
+ let result = read_util::collect_kv_batch(&mut reader).await;
+ let expect = [(100, Some(1)), (101, Some(1)), (102, Some(12))];
+ assert_eq!(&expect, &result[..]);
+ }
+}
diff --git a/src/storage/src/read/merge.rs b/src/storage/src/read/merge.rs
index a1f69b2e34f2..a62014183016 100644
--- a/src/storage/src/read/merge.rs
+++ b/src/storage/src/read/merge.rs
@@ -49,7 +49,7 @@ use store_api::storage::consts;
use crate::error::Result;
use crate::memtable::BoxedBatchIterator;
-use crate::read::{Batch, BatchBuilder, BatchReader, BoxedBatchReader};
+use crate::read::{Batch, BatchBuilder, BatchOp, BatchReader, BoxedBatchReader};
use crate::schema::{ProjectedSchema, ProjectedSchemaRef};
/// Batch data source.
@@ -98,7 +98,7 @@ struct RowCursor<'a> {
impl<'a> RowCursor<'a> {
#[inline]
fn compare(&self, schema: &ProjectedSchema, other: &RowCursor) -> Ordering {
- schema.compare_row_of_batch(self.batch, self.pos, other.batch, other.pos)
+ schema.compare_row(self.batch, self.pos, other.batch, other.pos)
}
}
diff --git a/src/storage/src/region/tests/flush.rs b/src/storage/src/region/tests/flush.rs
index 17d94c273f4a..fdf4e2528d32 100644
--- a/src/storage/src/region/tests/flush.rs
+++ b/src/storage/src/region/tests/flush.rs
@@ -122,7 +122,6 @@ async fn test_flush_and_stall() {
let store_dir = dir.path().to_str().unwrap();
let flush_switch = Arc::new(FlushSwitch::default());
- // Always trigger flush before write.
let tester = FlushTester::new(store_dir, flush_switch.clone()).await;
let data = [(1000, Some(100))];
@@ -182,7 +181,6 @@ async fn test_read_after_flush() {
let store_dir = dir.path().to_str().unwrap();
let flush_switch = Arc::new(FlushSwitch::default());
- // Always trigger flush before write.
let tester = FlushTester::new(store_dir, flush_switch.clone()).await;
// Put elements so we have content to flush.
@@ -209,3 +207,48 @@ async fn test_read_after_flush() {
let output = tester.full_scan().await;
assert_eq!(expect, output);
}
+
+#[tokio::test]
+async fn test_merge_read_after_flush() {
+ let dir = TempDir::new("merge-read-flush").unwrap();
+ let store_dir = dir.path().to_str().unwrap();
+
+ let flush_switch = Arc::new(FlushSwitch::default());
+ let tester = FlushTester::new(store_dir, flush_switch.clone()).await;
+
+ // Put elements so we have content to flush (In SST1).
+ tester.put(&[(3000, Some(300))]).await;
+ tester.put(&[(2000, Some(200))]).await;
+
+ // Now set should flush to true to trigger flush.
+ flush_switch.set_should_flush(true);
+
+ // Put element to trigger flush (In SST2).
+ tester.put(&[(2000, Some(201))]).await;
+ tester.wait_flush_done().await;
+
+ // Disable flush.
+ flush_switch.set_should_flush(false);
+ // In SST2.
+ tester.put(&[(2000, Some(202))]).await;
+ tester.put(&[(1000, Some(100))]).await;
+
+ // Enable flush.
+ flush_switch.set_should_flush(true);
+ // Trigger flush and overwrite row (In memtable).
+ tester.put(&[(2000, Some(203))]).await;
+ tester.wait_flush_done().await;
+
+ let expect = vec![(1000, Some(100)), (2000, Some(203)), (3000, Some(300))];
+
+ let output = tester.full_scan().await;
+ assert_eq!(expect, output);
+
+ // Reopen
+ let mut tester = tester;
+ tester.reopen().await;
+
+ // Scan after reopen.
+ let output = tester.full_scan().await;
+ assert_eq!(expect, output);
+}
diff --git a/src/storage/src/schema.rs b/src/storage/src/schema.rs
index 0e5ae4f50a39..29c0a16f9ed2 100644
--- a/src/storage/src/schema.rs
+++ b/src/storage/src/schema.rs
@@ -4,16 +4,18 @@ use std::sync::Arc;
use common_error::prelude::*;
use datatypes::arrow::array::Array;
+use datatypes::arrow::bitmap::MutableBitmap;
use datatypes::arrow::chunk::Chunk as ArrowChunk;
use datatypes::arrow::datatypes::Schema as ArrowSchema;
use datatypes::schema::Metadata;
-use datatypes::vectors::{Helper, VectorRef};
+use datatypes::vectors::{BooleanVector, Helper, VectorRef};
use serde::{Deserialize, Serialize};
use snafu::ensure;
use store_api::storage::{consts, Chunk, ColumnId, ColumnSchema, Schema, SchemaBuilder, SchemaRef};
+use crate::error;
use crate::metadata::{ColumnMetadata, ColumnsMetadata, ColumnsMetadataRef};
-use crate::read::Batch;
+use crate::read::{Batch, BatchOp};
const ROW_KEY_END_KEY: &str = "greptime:storage:row_key_end";
const USER_COLUMN_END_KEY: &str = "greptime:storage:user_column_end";
@@ -231,20 +233,6 @@ impl StoreSchema {
Ok(Batch::new(columns))
}
- fn compare_row_of_batch(&self, left: &Batch, i: usize, right: &Batch, j: usize) -> Ordering {
- let indices = self.full_key_indices();
- for idx in indices {
- let (left_col, right_col) = (left.column(idx), right.column(idx));
- // Comparision of vector is done by virtual method calls currently. Consider using
- // enum dispatch if this becomes bottleneck.
- let order = left_col.get_ref(i).cmp(&right_col.get_ref(j));
- if order != Ordering::Equal {
- return order;
- }
- }
- Ordering::Equal
- }
-
fn from_columns_metadata(columns: &ColumnsMetadata, version: u32) -> Result<StoreSchema> {
let column_schemas: Vec<_> = columns
.iter_all_columns()
@@ -315,13 +303,6 @@ impl StoreSchema {
fn num_columns(&self) -> usize {
self.schema.num_columns()
}
-
- fn full_key_indices(&self) -> impl Iterator<Item = usize> {
- // row key, sequence, op_type
- (0..self.row_key_end)
- .chain(std::iter::once(self.sequence_index()))
- .chain(std::iter::once(self.op_type_index()))
- }
}
impl TryFrom<ArrowSchema> for StoreSchema {
@@ -552,25 +533,6 @@ impl ProjectedSchema {
Batch::new(columns)
}
- /// Compare `i-th` in `left` to `j-th` row in `right` by key (row key + internal columns).
- ///
- /// The caller should ensure `left` and `right` have same schema as `self.schema_to_read()`.
- ///
- /// # Panics
- /// Panics if
- /// - `i` or `j` is out of bound.
- /// - `left` or `right` has insufficient column num.
- #[inline]
- pub fn compare_row_of_batch(
- &self,
- left: &Batch,
- i: usize,
- right: &Batch,
- j: usize,
- ) -> Ordering {
- self.schema_to_read.compare_row_of_batch(left, i, right, j)
- }
-
fn build_schema_to_read(
region_schema: &RegionSchema,
projection: &Projection,
@@ -652,6 +614,65 @@ impl ProjectedSchema {
}
}
+impl BatchOp for ProjectedSchema {
+ fn compare_row(&self, left: &Batch, i: usize, right: &Batch, j: usize) -> Ordering {
+ // Ordered by (row_key asc, sequence desc, op_type desc).
+ let indices = self.schema_to_read.row_key_indices();
+ for idx in indices {
+ let (left_col, right_col) = (left.column(idx), right.column(idx));
+ // Comparision of vector is done by virtual method calls currently. Consider using
+ // enum dispatch if this becomes bottleneck.
+ let order = left_col.get_ref(i).cmp(&right_col.get_ref(j));
+ if order != Ordering::Equal {
+ return order;
+ }
+ }
+ let (sequence_index, op_type_index) = (
+ self.schema_to_read.sequence_index(),
+ self.schema_to_read.op_type_index(),
+ );
+ right
+ .column(sequence_index)
+ .get_ref(j)
+ .cmp(&left.column(sequence_index).get_ref(i))
+ .then_with(|| {
+ right
+ .column(op_type_index)
+ .get_ref(j)
+ .cmp(&left.column(op_type_index).get_ref(i))
+ })
+ }
+
+ fn dedup(&self, batch: &Batch, selected: &mut MutableBitmap, prev: Option<&Batch>) {
+ if let Some(prev) = prev {
+ assert_eq!(batch.num_columns(), prev.num_columns());
+ }
+ let indices = self.schema_to_read.row_key_indices();
+ for idx in indices {
+ let (current, prev_col) = (
+ batch.column(idx),
+ prev.map(|prev| prev.column(idx).as_ref()),
+ );
+ current.dedup(selected, prev_col);
+ }
+ }
+
+ fn filter(&self, batch: &Batch, filter: &BooleanVector) -> error::Result<Batch> {
+ let columns = batch
+ .columns()
+ .iter()
+ .enumerate()
+ .map(|(i, v)| {
+ v.filter(filter).context(error::FilterColumnSnafu {
+ name: self.schema_to_read.column_name(i),
+ })
+ })
+ .collect::<error::Result<Vec<_>>>()?;
+
+ Ok(Batch::new(columns))
+ }
+}
+
fn parse_index_from_metadata(metadata: &Metadata, key: &str) -> Result<usize> {
let value = metadata.get(key).context(MissingMetaSnafu { key })?;
value.parse().context(ParseIndexSnafu { value })
@@ -673,12 +694,14 @@ fn build_user_schema(columns: &ColumnsMetadata, version: u32) -> Result<Schema>
#[cfg(test)]
mod tests {
+ use datatypes::prelude::ScalarVector;
use datatypes::type_id::LogicalTypeId;
- use datatypes::vectors::{Int64Vector, UInt64Vector, UInt8Vector};
+ use datatypes::vectors::{Int64Vector, TimestampVector, UInt64Vector, UInt8Vector};
+ use store_api::storage::OpType;
use super::*;
use crate::metadata::RegionMetadata;
- use crate::test_util::{descriptor_util, schema_util};
+ use crate::test_util::{descriptor_util, read_util, schema_util};
fn new_batch() -> Batch {
let k0 = Int64Vector::from_slice(&[1, 2, 3]);
@@ -934,4 +957,48 @@ mod tests {
.unwrap();
assert!(matches!(err, Error::InvalidProjection { .. }));
}
+
+ #[test]
+ fn test_compare_batch() {
+ let schema = read_util::new_projected_schema();
+ let left = read_util::new_full_kv_batch(&[(1000, 1, 1000, OpType::Put)]);
+ let right = read_util::new_full_kv_batch(&[
+ (999, 1, 1000, OpType::Put),
+ (1000, 1, 999, OpType::Put),
+ (1000, 1, 1000, OpType::Put),
+ ]);
+
+ assert_eq!(Ordering::Greater, schema.compare_row(&left, 0, &right, 0));
+ assert_eq!(Ordering::Less, schema.compare_row(&left, 0, &right, 1));
+ assert_eq!(Ordering::Equal, schema.compare_row(&left, 0, &right, 2));
+ }
+
+ #[test]
+ fn test_dedup_batch() {
+ let schema = read_util::new_projected_schema();
+ let batch = read_util::new_kv_batch(&[(1000, Some(1)), (2000, Some(2)), (2000, Some(2))]);
+ let mut selected = MutableBitmap::from_len_zeroed(3);
+
+ schema.dedup(&batch, &mut selected, None);
+ assert!(selected.get(0));
+ assert!(selected.get(1));
+ assert!(!selected.get(2));
+
+ let prev = read_util::new_kv_batch(&[(1000, Some(1))]);
+ schema.dedup(&batch, &mut selected, Some(&prev));
+ assert!(!selected.get(0));
+ assert!(selected.get(1));
+ assert!(!selected.get(2));
+ }
+
+ #[test]
+ fn test_filter_batch() {
+ let schema = read_util::new_projected_schema();
+ let batch = read_util::new_kv_batch(&[(1000, Some(1)), (2000, Some(2)), (3000, Some(3))]);
+ let filter = BooleanVector::from_slice(&[true, false, true]);
+
+ let res = schema.filter(&batch, &filter).unwrap();
+ let expect: VectorRef = Arc::new(TimestampVector::from_values([1000, 3000]));
+ assert_eq!(expect, *res.column(0));
+ }
}
diff --git a/src/storage/src/test_util/read_util.rs b/src/storage/src/test_util/read_util.rs
index d18868682b4d..b15b43a00421 100644
--- a/src/storage/src/test_util/read_util.rs
+++ b/src/storage/src/test_util/read_util.rs
@@ -4,6 +4,7 @@ use async_trait::async_trait;
use datatypes::prelude::ScalarVector;
use datatypes::type_id::LogicalTypeId;
use datatypes::vectors::{Int64Vector, TimestampVector, UInt64Vector, UInt8Vector};
+use store_api::storage::OpType;
use crate::error::Result;
use crate::memtable::{BatchIterator, BoxedBatchIterator, RowOrdering};
@@ -29,7 +30,7 @@ pub fn new_projected_schema() -> ProjectedSchemaRef {
}
/// Build a new batch, with 0 sequence and op_type.
-fn new_kv_batch(key_values: &[(i64, Option<i64>)]) -> Batch {
+pub fn new_kv_batch(key_values: &[(i64, Option<i64>)]) -> Batch {
let key = Arc::new(TimestampVector::from_values(key_values.iter().map(|v| v.0)));
let value = Arc::new(Int64Vector::from_iter(key_values.iter().map(|v| v.1)));
let sequences = Arc::new(UInt64Vector::from_vec(vec![0; key_values.len()]));
@@ -38,6 +39,18 @@ fn new_kv_batch(key_values: &[(i64, Option<i64>)]) -> Batch {
Batch::new(vec![key, value, sequences, op_types])
}
+/// Build a new batch from (key, value, sequence, op_type)
+pub fn new_full_kv_batch(all_values: &[(i64, i64, u64, OpType)]) -> Batch {
+ let key = Arc::new(TimestampVector::from_values(all_values.iter().map(|v| v.0)));
+ let value = Arc::new(Int64Vector::from_values(all_values.iter().map(|v| v.1)));
+ let sequences = Arc::new(UInt64Vector::from_values(all_values.iter().map(|v| v.2)));
+ let op_types = Arc::new(UInt8Vector::from_values(
+ all_values.iter().map(|v| v.3.as_u8()),
+ ));
+
+ Batch::new(vec![key, value, sequences, op_types])
+}
+
fn check_kv_batch(batches: &[Batch], expect: &[&[(i64, Option<i64>)]]) {
for (batch, key_values) in batches.iter().zip(expect.iter()) {
let key = batch
@@ -145,6 +158,15 @@ pub fn build_vec_reader(batches: &[&[(i64, Option<i64>)]]) -> VecBatchReader {
VecBatchReader::new(batches)
}
+pub fn build_full_vec_reader(batches: &[&[(i64, i64, u64, OpType)]]) -> VecBatchReader {
+ let batches: Vec<_> = batches
+ .iter()
+ .map(|key_values| new_full_kv_batch(key_values))
+ .collect();
+
+ VecBatchReader::new(batches)
+}
+
pub fn build_boxed_reader(batches: &[&[(i64, Option<i64>)]]) -> BoxedBatchReader {
Box::new(build_vec_reader(batches))
}
|
feat
|
Implement dedup reader (#270)
|
57c02af55b67e1917e44b753de31125e11057a99
|
2023-05-17 15:18:13
|
fys
|
feat: change default selector in meta from "LeaseBased" to "LoadBased" (#1598)
| false
|
diff --git a/src/meta-srv/src/selector.rs b/src/meta-srv/src/selector.rs
index 19057bca05d2..58a83429c7b1 100644
--- a/src/meta-srv/src/selector.rs
+++ b/src/meta-srv/src/selector.rs
@@ -32,8 +32,8 @@ pub trait Selector: Send + Sync {
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
pub enum SelectorType {
- LoadBased,
#[default]
+ LoadBased,
LeaseBased,
}
@@ -59,7 +59,7 @@ mod tests {
#[test]
fn test_default_selector_type() {
- assert_eq!(SelectorType::LeaseBased, SelectorType::default());
+ assert_eq!(SelectorType::LoadBased, SelectorType::default());
}
#[test]
|
feat
|
change default selector in meta from "LeaseBased" to "LoadBased" (#1598)
|
abf1680d144a825c754c54db0355d5164e267a2a
|
2025-03-04 17:25:25
|
Lei, HUANG
|
fix: interval rewrite rule that messes up show create flow function (#5642)
| false
|
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 60d94247206e..b99b88b7214b 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -2475,4 +2475,13 @@ CREATE TABLE log (
assert!(extensions.fulltext_index_options.is_some());
}
}
+
+ #[test]
+ fn test_parse_interval_cast() {
+ let s = "select '10s'::INTERVAL";
+ let stmts =
+ ParserContext::create_with_dialect(s, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!("SELECT '10 seconds'::INTERVAL", &stmts[0].to_string());
+ }
}
diff --git a/src/sql/src/statements/transform/expand_interval.rs b/src/sql/src/statements/transform/expand_interval.rs
index 9761dcb0d7bc..8cddaf8b7339 100644
--- a/src/sql/src/statements/transform/expand_interval.rs
+++ b/src/sql/src/statements/transform/expand_interval.rs
@@ -99,15 +99,21 @@ impl TransformRule for ExpandIntervalTransformRule {
Expr::Cast {
expr: cast_exp,
data_type,
- ..
+ kind,
+ format,
} => {
if DataType::Interval == *data_type {
match &**cast_exp {
Expr::Value(Value::SingleQuotedString(value))
| Expr::Value(Value::DoubleQuotedString(value)) => {
- let interval_name =
+ let interval_value =
normalize_interval_name(value).unwrap_or_else(|| value.to_string());
- *expr = create_interval(single_quoted_string_expr(interval_name));
+ *expr = Expr::Cast {
+ kind: kind.clone(),
+ expr: single_quoted_string_expr(interval_value),
+ data_type: DataType::Interval,
+ format: std::mem::take(format),
+ }
}
_ => {}
}
@@ -123,16 +129,6 @@ fn single_quoted_string_expr(string: String) -> Box<Expr> {
Box::new(Expr::Value(Value::SingleQuotedString(string)))
}
-fn create_interval(value: Box<Expr>) -> Expr {
- Expr::Interval(Interval {
- value,
- leading_field: None,
- leading_precision: None,
- last_field: None,
- fractional_seconds_precision: None,
- })
-}
-
fn update_existing_interval_with_value(interval: &Interval, value: Box<Expr>) -> Expr {
Expr::Interval(Interval {
value,
@@ -199,14 +195,23 @@ fn expand_interval_abbreviation(interval_str: &str) -> Option<String> {
mod tests {
use std::ops::ControlFlow;
- use sqlparser::ast::{BinaryOperator, DataType, Expr, Interval, Value};
+ use sqlparser::ast::{BinaryOperator, CastKind, DataType, Expr, Interval, Value};
use crate::statements::transform::expand_interval::{
- create_interval, normalize_interval_name, single_quoted_string_expr,
- ExpandIntervalTransformRule,
+ normalize_interval_name, single_quoted_string_expr, ExpandIntervalTransformRule,
};
use crate::statements::transform::TransformRule;
+ fn create_interval(value: Box<Expr>) -> Expr {
+ Expr::Interval(Interval {
+ value,
+ leading_field: None,
+ leading_precision: None,
+ last_field: None,
+ fractional_seconds_precision: None,
+ })
+ }
+
#[test]
fn test_transform_interval_basic_conversions() {
let test_cases = vec![
@@ -379,15 +384,14 @@ mod tests {
assert_eq!(control_flow, ControlFlow::Continue(()));
assert_eq!(
cast_to_interval_expr,
- Expr::Interval(Interval {
- value: Box::new(Expr::Value(Value::SingleQuotedString(
+ Expr::Cast {
+ kind: CastKind::Cast,
+ expr: Box::new(Expr::Value(Value::SingleQuotedString(
"3 years 2 months".to_string()
))),
- leading_field: None,
- leading_precision: None,
- last_field: None,
- fractional_seconds_precision: None,
- })
+ data_type: DataType::Interval,
+ format: None,
+ }
);
let mut cast_to_i64_expr = Expr::Cast {
diff --git a/tests/cases/standalone/common/types/interval/interval.result b/tests/cases/standalone/common/types/interval/interval.result
index c98b42283887..2a5bd555256b 100644
--- a/tests/cases/standalone/common/types/interval/interval.result
+++ b/tests/cases/standalone/common/types/interval/interval.result
@@ -25,11 +25,11 @@ SELECT INTERVAL '-2 months';
SELECT '3 hours'::INTERVAL;
-+--------------------------------------------------------------------------------------------------+
-| IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 0, nanoseconds: 10800000000000 }") |
-+--------------------------------------------------------------------------------------------------+
-| 3 hours |
-+--------------------------------------------------------------------------------------------------+
++-----------------+
+| Utf8("3 hours") |
++-----------------+
+| 3 hours |
++-----------------+
SELECT INTERVAL '1 year 2 months 3 days 4 hours' + INTERVAL '1 year';
@@ -128,11 +128,11 @@ SELECT INTERVAL '1y2w3d4h';
SELECT '3y2mon'::INTERVAL;
-+--------------------------------------------------------------------------------------+
-| IntervalMonthDayNano("IntervalMonthDayNano { months: 38, days: 0, nanoseconds: 0 }") |
-+--------------------------------------------------------------------------------------+
-| 38 mons |
-+--------------------------------------------------------------------------------------+
++--------------------------+
+| Utf8("3 years 2 months") |
++--------------------------+
+| 38 mons |
++--------------------------+
SELECT INTERVAL '7 days' - INTERVAL '1d';
@@ -169,11 +169,11 @@ SELECT INTERVAL '-P3Y3M700DT133H17M36.789S';
SELECT 'P3Y3M700DT133H17M36.789S'::INTERVAL;
-+------------------------------------------------------------------------------------------------------+
-| IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 0, nanoseconds: 163343856789000000 }") |
-+------------------------------------------------------------------------------------------------------+
-| 45373 hours 17 mins 36.789000000 secs |
-+------------------------------------------------------------------------------------------------------+
++---------------------------------------+
+| Utf8("163343856789 milliseconds") |
++---------------------------------------+
+| 45373 hours 17 mins 36.789000000 secs |
++---------------------------------------+
SELECT INTERVAL '2h' + INTERVAL 'P3Y3M700DT133H17M36.789S';
@@ -185,115 +185,115 @@ SELECT INTERVAL '2h' + INTERVAL 'P3Y3M700DT133H17M36.789S';
select '2022-01-01T00:00:01'::timestamp + '1 days'::interval;
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 1, nanoseconds: 0 }") |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| 2022-01-02T00:00:01 |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++-----------------------------------------------------------------------------------------------+
+| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("1 days") |
++-----------------------------------------------------------------------------------------------+
+| 2022-01-02T00:00:01 |
++-----------------------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp + '2 days'::interval;
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 2, nanoseconds: 0 }") |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| 2022-01-03T00:00:01 |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++-----------------------------------------------------------------------------------------------+
+| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("2 days") |
++-----------------------------------------------------------------------------------------------+
+| 2022-01-03T00:00:01 |
++-----------------------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp - '1 days'::interval;
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) - IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 1, nanoseconds: 0 }") |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| 2021-12-31T00:00:01 |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++-----------------------------------------------------------------------------------------------+
+| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) - Utf8("1 days") |
++-----------------------------------------------------------------------------------------------+
+| 2021-12-31T00:00:01 |
++-----------------------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp - '2 days'::interval;
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) - IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 2, nanoseconds: 0 }") |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| 2021-12-30T00:00:01 |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++-----------------------------------------------------------------------------------------------+
+| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) - Utf8("2 days") |
++-----------------------------------------------------------------------------------------------+
+| 2021-12-30T00:00:01 |
++-----------------------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp + '1 month'::interval;
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + IntervalMonthDayNano("IntervalMonthDayNano { months: 1, days: 0, nanoseconds: 0 }") |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| 2022-02-01T00:00:01 |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++------------------------------------------------------------------------------------------------+
+| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("1 month") |
++------------------------------------------------------------------------------------------------+
+| 2022-02-01T00:00:01 |
++------------------------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp + '2 months'::interval;
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + IntervalMonthDayNano("IntervalMonthDayNano { months: 2, days: 0, nanoseconds: 0 }") |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| 2022-03-01T00:00:01 |
-+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++-------------------------------------------------------------------------------------------------+
+| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("2 months") |
++-------------------------------------------------------------------------------------------------+
+| 2022-03-01T00:00:01 |
++-------------------------------------------------------------------------------------------------+
select '2022-01-01T00:00:01'::timestamp + '1 year'::interval;
-+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + IntervalMonthDayNano("IntervalMonthDayNano { months: 12, days: 0, nanoseconds: 0 }") |
-+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| 2023-01-01T00:00:01 |
-+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++-----------------------------------------------------------------------------------------------+
+| arrow_cast(Utf8("2022-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("1 year") |
++-----------------------------------------------------------------------------------------------+
+| 2023-01-01T00:00:01 |
++-----------------------------------------------------------------------------------------------+
select '2023-01-01T00:00:01'::timestamp + '2 years'::interval;
-+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| arrow_cast(Utf8("2023-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + IntervalMonthDayNano("IntervalMonthDayNano { months: 24, days: 0, nanoseconds: 0 }") |
-+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| 2025-01-01T00:00:01 |
-+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++------------------------------------------------------------------------------------------------+
+| arrow_cast(Utf8("2023-01-01T00:00:01"),Utf8("Timestamp(Millisecond, None)")) + Utf8("2 years") |
++------------------------------------------------------------------------------------------------+
+| 2025-01-01T00:00:01 |
++------------------------------------------------------------------------------------------------+
-- DATE + INTERVAL
SELECT DATE '2000-10-30' + '1 days'::interval;
-+----------------------------------------------------------------------------------------------------------+
-| Utf8("2000-10-30") + IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 1, nanoseconds: 0 }") |
-+----------------------------------------------------------------------------------------------------------+
-| 2000-10-31 |
-+----------------------------------------------------------------------------------------------------------+
++-------------------------------------+
+| Utf8("2000-10-30") + Utf8("1 days") |
++-------------------------------------+
+| 2000-10-31 |
++-------------------------------------+
SELECT DATE '2000-10-30' + '2 months'::interval;
-+----------------------------------------------------------------------------------------------------------+
-| Utf8("2000-10-30") + IntervalMonthDayNano("IntervalMonthDayNano { months: 2, days: 0, nanoseconds: 0 }") |
-+----------------------------------------------------------------------------------------------------------+
-| 2000-12-30 |
-+----------------------------------------------------------------------------------------------------------+
++---------------------------------------+
+| Utf8("2000-10-30") + Utf8("2 months") |
++---------------------------------------+
+| 2000-12-30 |
++---------------------------------------+
SELECT DATE '2000-10-30' + '2 years'::interval;
-+-----------------------------------------------------------------------------------------------------------+
-| Utf8("2000-10-30") + IntervalMonthDayNano("IntervalMonthDayNano { months: 24, days: 0, nanoseconds: 0 }") |
-+-----------------------------------------------------------------------------------------------------------+
-| 2002-10-30 |
-+-----------------------------------------------------------------------------------------------------------+
++--------------------------------------+
+| Utf8("2000-10-30") + Utf8("2 years") |
++--------------------------------------+
+| 2002-10-30 |
++--------------------------------------+
-- DATE - INTERVAL
SELECT DATE '2000-10-30' - '1 days'::interval;
-+----------------------------------------------------------------------------------------------------------+
-| Utf8("2000-10-30") - IntervalMonthDayNano("IntervalMonthDayNano { months: 0, days: 1, nanoseconds: 0 }") |
-+----------------------------------------------------------------------------------------------------------+
-| 2000-10-29 |
-+----------------------------------------------------------------------------------------------------------+
++-------------------------------------+
+| Utf8("2000-10-30") - Utf8("1 days") |
++-------------------------------------+
+| 2000-10-29 |
++-------------------------------------+
SELECT DATE '2000-10-30' - '2 months'::interval;
-+----------------------------------------------------------------------------------------------------------+
-| Utf8("2000-10-30") - IntervalMonthDayNano("IntervalMonthDayNano { months: 2, days: 0, nanoseconds: 0 }") |
-+----------------------------------------------------------------------------------------------------------+
-| 2000-08-30 |
-+----------------------------------------------------------------------------------------------------------+
++---------------------------------------+
+| Utf8("2000-10-30") - Utf8("2 months") |
++---------------------------------------+
+| 2000-08-30 |
++---------------------------------------+
SELECT DATE '2000-10-30' - '2 years'::interval;
-+-----------------------------------------------------------------------------------------------------------+
-| Utf8("2000-10-30") - IntervalMonthDayNano("IntervalMonthDayNano { months: 24, days: 0, nanoseconds: 0 }") |
-+-----------------------------------------------------------------------------------------------------------+
-| 1998-10-30 |
-+-----------------------------------------------------------------------------------------------------------+
++--------------------------------------+
+| Utf8("2000-10-30") - Utf8("2 years") |
++--------------------------------------+
+| 1998-10-30 |
++--------------------------------------+
|
fix
|
interval rewrite rule that messes up show create flow function (#5642)
|
b58296de22ec774c48e420d86d5d2bb80c82e06f
|
2023-12-28 12:26:17
|
Ruihang Xia
|
feat: Implement OR for PromQL (#3024)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 97e7f48e922e..abe0acb61213 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6541,6 +6541,7 @@ dependencies = [
name = "promql"
version = "0.5.0"
dependencies = [
+ "ahash 0.8.6",
"async-recursion",
"async-trait",
"bytemuck",
diff --git a/src/promql/Cargo.toml b/src/promql/Cargo.toml
index a10973d4ebc1..6be12de4e343 100644
--- a/src/promql/Cargo.toml
+++ b/src/promql/Cargo.toml
@@ -5,6 +5,7 @@ edition.workspace = true
license.workspace = true
[dependencies]
+ahash.workspace = true
async-recursion = "1.0"
async-trait.workspace = true
bytemuck.workspace = true
diff --git a/src/promql/src/extension_plan.rs b/src/promql/src/extension_plan.rs
index 49a9199bf0cc..ff2195e532ee 100644
--- a/src/promql/src/extension_plan.rs
+++ b/src/promql/src/extension_plan.rs
@@ -19,6 +19,9 @@ mod normalize;
mod planner;
mod range_manipulate;
mod series_divide;
+#[cfg(test)]
+mod test_util;
+mod union_distinct_on;
use datafusion::arrow::datatypes::{ArrowPrimitiveType, TimestampMillisecondType};
pub use empty_metric::{build_special_time_expr, EmptyMetric, EmptyMetricExec, EmptyMetricStream};
@@ -28,5 +31,6 @@ pub use normalize::{SeriesNormalize, SeriesNormalizeExec, SeriesNormalizeStream}
pub use planner::PromExtensionPlanner;
pub use range_manipulate::{RangeManipulate, RangeManipulateExec, RangeManipulateStream};
pub use series_divide::{SeriesDivide, SeriesDivideExec, SeriesDivideStream};
+pub use union_distinct_on::{UnionDistinctOn, UnionDistinctOnExec, UnionDistinctOnStream};
pub(crate) type Millisecond = <TimestampMillisecondType as ArrowPrimitiveType>::Native;
diff --git a/src/promql/src/extension_plan/instant_manipulate.rs b/src/promql/src/extension_plan/instant_manipulate.rs
index ba155627d2c5..e65592bb374e 100644
--- a/src/promql/src/extension_plan/instant_manipulate.rs
+++ b/src/promql/src/extension_plan/instant_manipulate.rs
@@ -445,40 +445,12 @@ impl InstantManipulateStream {
#[cfg(test)]
mod test {
- use datafusion::arrow::array::Float64Array;
- use datafusion::arrow::datatypes::{
- ArrowPrimitiveType, DataType, Field, Schema, TimestampMillisecondType,
- };
- use datafusion::physical_plan::memory::MemoryExec;
use datafusion::prelude::SessionContext;
- use datatypes::arrow::array::TimestampMillisecondArray;
- use datatypes::arrow_array::StringArray;
use super::*;
-
- const TIME_INDEX_COLUMN: &str = "timestamp";
-
- fn prepare_test_data() -> MemoryExec {
- let schema = Arc::new(Schema::new(vec![
- Field::new(TIME_INDEX_COLUMN, TimestampMillisecondType::DATA_TYPE, true),
- Field::new("value", DataType::Float64, true),
- Field::new("path", DataType::Utf8, true),
- ]));
- let timestamp_column = Arc::new(TimestampMillisecondArray::from(vec![
- 0, 30_000, 60_000, 90_000, 120_000, // every 30s
- 180_000, 240_000, // every 60s
- 241_000, 271_000, 291_000, // others
- ])) as _;
- let field_column = Arc::new(Float64Array::from(vec![1.0; 10])) as _;
- let path_column = Arc::new(StringArray::from(vec!["foo"; 10])) as _;
- let data = RecordBatch::try_new(
- schema.clone(),
- vec![timestamp_column, field_column, path_column],
- )
- .unwrap();
-
- MemoryExec::try_new(&[vec![data]], schema, None).unwrap()
- }
+ use crate::extension_plan::test_util::{
+ prepare_test_data, prepare_test_data_with_nan, TIME_INDEX_COLUMN,
+ };
async fn do_normalize_test(
start: Millisecond,
@@ -749,22 +721,6 @@ mod test {
do_normalize_test(190_000, 300_000, 30_000, 10_000, expected, false).await;
}
- fn prepare_test_data_with_nan() -> MemoryExec {
- let schema = Arc::new(Schema::new(vec![
- Field::new(TIME_INDEX_COLUMN, TimestampMillisecondType::DATA_TYPE, true),
- Field::new("value", DataType::Float64, true),
- ]));
- let timestamp_column = Arc::new(TimestampMillisecondArray::from(vec![
- 0, 30_000, 60_000, 90_000, 120_000, // every 30s
- ])) as _;
- let field_column =
- Arc::new(Float64Array::from(vec![0.0, f64::NAN, 6.0, f64::NAN, 12.0])) as _;
- let data =
- RecordBatch::try_new(schema.clone(), vec![timestamp_column, field_column]).unwrap();
-
- MemoryExec::try_new(&[vec![data]], schema, None).unwrap()
- }
-
#[tokio::test]
async fn lookback_10s_interval_10s_with_nan() {
let expected = String::from(
diff --git a/src/promql/src/extension_plan/planner.rs b/src/promql/src/extension_plan/planner.rs
index 7798c9b32193..80cd565bd20a 100644
--- a/src/promql/src/extension_plan/planner.rs
+++ b/src/promql/src/extension_plan/planner.rs
@@ -21,7 +21,7 @@ use datafusion::logical_expr::{LogicalPlan, UserDefinedLogicalNode};
use datafusion::physical_plan::ExecutionPlan;
use datafusion::physical_planner::{ExtensionPlanner, PhysicalPlanner};
-use super::HistogramFold;
+use super::{HistogramFold, UnionDistinctOn};
use crate::extension_plan::{
EmptyMetric, InstantManipulate, RangeManipulate, SeriesDivide, SeriesNormalize,
};
@@ -50,6 +50,11 @@ impl ExtensionPlanner for PromExtensionPlanner {
Ok(Some(node.to_execution_plan(session_state, planner)?))
} else if let Some(node) = node.as_any().downcast_ref::<HistogramFold>() {
Ok(Some(node.to_execution_plan(physical_inputs[0].clone())))
+ } else if let Some(node) = node.as_any().downcast_ref::<UnionDistinctOn>() {
+ Ok(Some(node.to_execution_plan(
+ physical_inputs[0].clone(),
+ physical_inputs[1].clone(),
+ )))
} else {
Ok(None)
}
diff --git a/src/promql/src/extension_plan/test_util.rs b/src/promql/src/extension_plan/test_util.rs
new file mode 100644
index 000000000000..f751cb9fa84b
--- /dev/null
+++ b/src/promql/src/extension_plan/test_util.rs
@@ -0,0 +1,64 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Utils for testing extension plan
+
+use std::sync::Arc;
+
+use common_recordbatch::DfRecordBatch as RecordBatch;
+use datafusion::arrow::array::Float64Array;
+use datafusion::arrow::datatypes::{
+ ArrowPrimitiveType, DataType, Field, Schema, TimestampMillisecondType,
+};
+use datafusion::physical_plan::memory::MemoryExec;
+use datatypes::arrow::array::TimestampMillisecondArray;
+use datatypes::arrow_array::StringArray;
+
+pub(crate) const TIME_INDEX_COLUMN: &str = "timestamp";
+
+pub(crate) fn prepare_test_data() -> MemoryExec {
+ let schema = Arc::new(Schema::new(vec![
+ Field::new(TIME_INDEX_COLUMN, TimestampMillisecondType::DATA_TYPE, true),
+ Field::new("value", DataType::Float64, true),
+ Field::new("path", DataType::Utf8, true),
+ ]));
+ let timestamp_column = Arc::new(TimestampMillisecondArray::from(vec![
+ 0, 30_000, 60_000, 90_000, 120_000, // every 30s
+ 180_000, 240_000, // every 60s
+ 241_000, 271_000, 291_000, // others
+ ])) as _;
+ let field_column = Arc::new(Float64Array::from(vec![1.0; 10])) as _;
+ let path_column = Arc::new(StringArray::from(vec!["foo"; 10])) as _;
+ let data = RecordBatch::try_new(
+ schema.clone(),
+ vec![timestamp_column, field_column, path_column],
+ )
+ .unwrap();
+
+ MemoryExec::try_new(&[vec![data]], schema, None).unwrap()
+}
+
+pub(crate) fn prepare_test_data_with_nan() -> MemoryExec {
+ let schema = Arc::new(Schema::new(vec![
+ Field::new(TIME_INDEX_COLUMN, TimestampMillisecondType::DATA_TYPE, true),
+ Field::new("value", DataType::Float64, true),
+ ]));
+ let timestamp_column = Arc::new(TimestampMillisecondArray::from(vec![
+ 0, 30_000, 60_000, 90_000, 120_000, // every 30s
+ ])) as _;
+ let field_column = Arc::new(Float64Array::from(vec![0.0, f64::NAN, 6.0, f64::NAN, 12.0])) as _;
+ let data = RecordBatch::try_new(schema.clone(), vec![timestamp_column, field_column]).unwrap();
+
+ MemoryExec::try_new(&[vec![data]], schema, None).unwrap()
+}
diff --git a/src/promql/src/extension_plan/union_distinct_on.rs b/src/promql/src/extension_plan/union_distinct_on.rs
new file mode 100644
index 000000000000..22551b73f810
--- /dev/null
+++ b/src/promql/src/extension_plan/union_distinct_on.rs
@@ -0,0 +1,576 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::{Context, Poll};
+
+use ahash::{HashMap, RandomState};
+use datafusion::arrow::array::UInt64Array;
+use datafusion::arrow::datatypes::SchemaRef;
+use datafusion::arrow::record_batch::RecordBatch;
+use datafusion::common::DFSchemaRef;
+use datafusion::error::{DataFusionError, Result as DataFusionResult};
+use datafusion::execution::context::TaskContext;
+use datafusion::logical_expr::{Expr, LogicalPlan, UserDefinedLogicalNodeCore};
+use datafusion::physical_expr::PhysicalSortExpr;
+use datafusion::physical_plan::metrics::{BaselineMetrics, ExecutionPlanMetricsSet, MetricsSet};
+use datafusion::physical_plan::{
+ hash_utils, DisplayAs, DisplayFormatType, Distribution, ExecutionPlan, Partitioning,
+ RecordBatchStream, SendableRecordBatchStream, Statistics,
+};
+use datatypes::arrow::compute;
+use futures::future::BoxFuture;
+use futures::{ready, Stream, StreamExt, TryStreamExt};
+
+/// A special kind of `UNION`(`OR` in PromQL) operator, for PromQL specific use case.
+///
+/// This operator is similar to `UNION` from SQL, but it only accepts two inputs. The
+/// most different part is that it treat left child and right child differently:
+/// - All columns from left child will be outputted.
+/// - Only check collisions (when not distinct) on the columns specified by `compare_keys`.
+/// - When there is a collision:
+/// - If the collision is from right child itself, only the first observed row will be
+/// preserved. All others are discarded.
+/// - If the collision is from left child, the row in right child will be discarded.
+/// - The output order is not maintained. This plan will output left child first, then right child.
+/// - The output schema contains all columns from left or right child plans.
+///
+/// From the implementation perspective, this operator is similar to `HashJoin`, but the
+/// probe side is the right child, and the build side is the left child. Another difference
+/// is that the probe is opting-out.
+///
+/// This plan will exhaust the right child first to build probe hash table, then streaming
+/// on left side, and use the left side to "mask" the hash table.
+#[derive(Debug, PartialEq, Eq, Hash)]
+pub struct UnionDistinctOn {
+ left: LogicalPlan,
+ right: LogicalPlan,
+ /// The columns to compare for equality.
+ /// TIME INDEX is included.
+ compare_keys: Vec<String>,
+ ts_col: String,
+ output_schema: DFSchemaRef,
+}
+
+impl UnionDistinctOn {
+ pub fn name() -> &'static str {
+ "UnionDistinctOn"
+ }
+
+ pub fn new(
+ left: LogicalPlan,
+ right: LogicalPlan,
+ compare_keys: Vec<String>,
+ ts_col: String,
+ output_schema: DFSchemaRef,
+ ) -> Self {
+ Self {
+ left,
+ right,
+ compare_keys,
+ ts_col,
+ output_schema,
+ }
+ }
+
+ pub fn to_execution_plan(
+ &self,
+ left_exec: Arc<dyn ExecutionPlan>,
+ right_exec: Arc<dyn ExecutionPlan>,
+ ) -> Arc<dyn ExecutionPlan> {
+ Arc::new(UnionDistinctOnExec {
+ left: left_exec,
+ right: right_exec,
+ compare_keys: self.compare_keys.clone(),
+ ts_col: self.ts_col.clone(),
+ output_schema: Arc::new(self.output_schema.as_ref().into()),
+ metric: ExecutionPlanMetricsSet::new(),
+ random_state: RandomState::new(),
+ })
+ }
+}
+
+impl UserDefinedLogicalNodeCore for UnionDistinctOn {
+ fn name(&self) -> &str {
+ Self::name()
+ }
+
+ fn inputs(&self) -> Vec<&LogicalPlan> {
+ vec![&self.left, &self.right]
+ }
+
+ fn schema(&self) -> &DFSchemaRef {
+ &self.output_schema
+ }
+
+ fn expressions(&self) -> Vec<Expr> {
+ vec![]
+ }
+
+ fn fmt_for_explain(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "UnionDistinctOn: on col=[{:?}], ts_col=[{}]",
+ self.compare_keys, self.ts_col
+ )
+ }
+
+ fn from_template(&self, _exprs: &[Expr], inputs: &[LogicalPlan]) -> Self {
+ assert_eq!(inputs.len(), 2);
+
+ let left = inputs[0].clone();
+ let right = inputs[1].clone();
+ Self {
+ left,
+ right,
+ compare_keys: self.compare_keys.clone(),
+ ts_col: self.ts_col.clone(),
+ output_schema: self.output_schema.clone(),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct UnionDistinctOnExec {
+ left: Arc<dyn ExecutionPlan>,
+ right: Arc<dyn ExecutionPlan>,
+ compare_keys: Vec<String>,
+ ts_col: String,
+ output_schema: SchemaRef,
+ metric: ExecutionPlanMetricsSet,
+
+ /// Shared the `RandomState` for the hashing algorithm
+ random_state: RandomState,
+}
+
+impl ExecutionPlan for UnionDistinctOnExec {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn schema(&self) -> SchemaRef {
+ self.output_schema.clone()
+ }
+
+ fn required_input_distribution(&self) -> Vec<Distribution> {
+ vec![Distribution::SinglePartition, Distribution::SinglePartition]
+ }
+
+ fn output_partitioning(&self) -> Partitioning {
+ Partitioning::UnknownPartitioning(1)
+ }
+
+ /// [UnionDistinctOnExec] will output left first, then right.
+ /// So the order of the output is not maintained.
+ fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> {
+ None
+ }
+
+ fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
+ vec![self.left.clone(), self.right.clone()]
+ }
+
+ fn with_new_children(
+ self: Arc<Self>,
+ children: Vec<Arc<dyn ExecutionPlan>>,
+ ) -> DataFusionResult<Arc<dyn ExecutionPlan>> {
+ assert_eq!(children.len(), 2);
+
+ let left = children[0].clone();
+ let right = children[1].clone();
+ Ok(Arc::new(UnionDistinctOnExec {
+ left,
+ right,
+ compare_keys: self.compare_keys.clone(),
+ ts_col: self.ts_col.clone(),
+ output_schema: self.output_schema.clone(),
+ metric: self.metric.clone(),
+ random_state: self.random_state.clone(),
+ }))
+ }
+
+ fn execute(
+ &self,
+ partition: usize,
+ context: Arc<TaskContext>,
+ ) -> DataFusionResult<SendableRecordBatchStream> {
+ let left_stream = self.left.execute(partition, context.clone())?;
+ let right_stream = self.right.execute(partition, context.clone())?;
+
+ // Convert column name to column index. Add one for the time column.
+ let mut key_indices = Vec::with_capacity(self.compare_keys.len() + 1);
+ for key in &self.compare_keys {
+ let index = self
+ .output_schema
+ .column_with_name(key)
+ .map(|(i, _)| i)
+ .ok_or_else(|| DataFusionError::Internal(format!("Column {} not found", key)))?;
+ key_indices.push(index);
+ }
+ let ts_index = self
+ .output_schema
+ .column_with_name(&self.ts_col)
+ .map(|(i, _)| i)
+ .ok_or_else(|| {
+ DataFusionError::Internal(format!("Column {} not found", self.ts_col))
+ })?;
+ key_indices.push(ts_index);
+
+ // Build right hash table future.
+ let hashed_data_future = HashedDataFut::Pending(Box::pin(HashedData::new(
+ right_stream,
+ self.random_state.clone(),
+ key_indices.clone(),
+ )));
+
+ let baseline_metric = BaselineMetrics::new(&self.metric, partition);
+ Ok(Box::pin(UnionDistinctOnStream {
+ left: left_stream,
+ right: hashed_data_future,
+ compare_keys: key_indices,
+ output_schema: self.output_schema.clone(),
+ metric: baseline_metric,
+ }))
+ }
+
+ fn metrics(&self) -> Option<MetricsSet> {
+ Some(self.metric.clone_inner())
+ }
+
+ fn statistics(&self) -> Statistics {
+ Statistics::default()
+ }
+}
+
+impl DisplayAs for UnionDistinctOnExec {
+ fn fmt_as(&self, t: DisplayFormatType, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ match t {
+ DisplayFormatType::Default | DisplayFormatType::Verbose => {
+ write!(
+ f,
+ "UnionDistinctOnExec: on col=[{:?}], ts_col=[{}]",
+ self.compare_keys, self.ts_col
+ )
+ }
+ }
+ }
+}
+
+// TODO(ruihang): some unused fields are for metrics, which will be implemented later.
+#[allow(dead_code)]
+pub struct UnionDistinctOnStream {
+ left: SendableRecordBatchStream,
+ right: HashedDataFut,
+ /// Include time index
+ compare_keys: Vec<usize>,
+ output_schema: SchemaRef,
+ metric: BaselineMetrics,
+}
+
+impl UnionDistinctOnStream {
+ fn poll_impl(&mut self, cx: &mut Context<'_>) -> Poll<Option<<Self as Stream>::Item>> {
+ // resolve the right stream
+ let right = match self.right {
+ HashedDataFut::Pending(ref mut fut) => {
+ let right = ready!(fut.as_mut().poll(cx))?;
+ self.right = HashedDataFut::Ready(right);
+ let HashedDataFut::Ready(right_ref) = &mut self.right else {
+ unreachable!()
+ };
+ right_ref
+ }
+ HashedDataFut::Ready(ref mut right) => right,
+ HashedDataFut::Empty => return Poll::Ready(None),
+ };
+
+ // poll left and probe with right
+ let next_left = ready!(self.left.poll_next_unpin(cx));
+ match next_left {
+ Some(Ok(left)) => {
+ // observe left batch and return it
+ right.update_map(&left)?;
+ Poll::Ready(Some(Ok(left)))
+ }
+ Some(Err(e)) => Poll::Ready(Some(Err(e))),
+ None => {
+ // left stream is exhausted, so we can send the right part
+ let right = std::mem::replace(&mut self.right, HashedDataFut::Empty);
+ let HashedDataFut::Ready(data) = right else {
+ unreachable!()
+ };
+ Poll::Ready(Some(data.finish()))
+ }
+ }
+ }
+}
+
+impl RecordBatchStream for UnionDistinctOnStream {
+ fn schema(&self) -> SchemaRef {
+ self.output_schema.clone()
+ }
+}
+
+impl Stream for UnionDistinctOnStream {
+ type Item = DataFusionResult<RecordBatch>;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ self.poll_impl(cx)
+ }
+}
+
+/// Simple future state for [HashedData]
+enum HashedDataFut {
+ /// The result is not ready
+ Pending(BoxFuture<'static, DataFusionResult<HashedData>>),
+ /// The result is ready
+ Ready(HashedData),
+ /// The result is taken
+ Empty,
+}
+
+/// ALL input batches and its hash table
+struct HashedData {
+ // TODO(ruihang): use `JoinHashMap` instead after upgrading to DF 34.0
+ /// Hash table for all input batches. The key is hash value, and the value
+ /// is the index of `bathc`.
+ hash_map: HashMap<u64, usize>,
+ /// Output batch.
+ batch: RecordBatch,
+ /// The indices of the columns to be hashed.
+ hash_key_indices: Vec<usize>,
+ random_state: RandomState,
+}
+
+impl HashedData {
+ pub async fn new(
+ input: SendableRecordBatchStream,
+ random_state: RandomState,
+ hash_key_indices: Vec<usize>,
+ ) -> DataFusionResult<Self> {
+ // Collect all batches from the input stream
+ let initial = (Vec::new(), 0);
+ let (batches, _num_rows) = input
+ .try_fold(initial, |mut acc, batch| async {
+ // Update rowcount
+ acc.1 += batch.num_rows();
+ // Push batch to output
+ acc.0.push(batch);
+ Ok(acc)
+ })
+ .await?;
+
+ // Create hash for each batch
+ let mut hash_map = HashMap::default();
+ let mut hashes_buffer = Vec::new();
+ let mut interleave_indices = Vec::new();
+ for (batch_number, batch) in batches.iter().enumerate() {
+ hashes_buffer.resize(batch.num_rows(), 0);
+ // get columns for hashing
+ let arrays = hash_key_indices
+ .iter()
+ .map(|i| batch.column(*i).clone())
+ .collect::<Vec<_>>();
+
+ // compute hash
+ let hash_values =
+ hash_utils::create_hashes(&arrays, &random_state, &mut hashes_buffer)?;
+ for (row_number, hash_value) in hash_values.iter().enumerate() {
+ // Only keeps the first observed row for each hash value
+ if hash_map
+ .try_insert(*hash_value, interleave_indices.len())
+ .is_ok()
+ {
+ interleave_indices.push((batch_number, row_number));
+ }
+ }
+ }
+
+ // Finilize the hash map
+ let batch = interleave_batches(batches, interleave_indices)?;
+
+ Ok(Self {
+ hash_map,
+ batch,
+ hash_key_indices,
+ random_state,
+ })
+ }
+
+ /// Remove rows that hash value present in the input
+ /// record batch from the hash map.
+ pub fn update_map(&mut self, input: &RecordBatch) -> DataFusionResult<()> {
+ // get columns for hashing
+ let mut hashes_buffer = Vec::new();
+ let arrays = self
+ .hash_key_indices
+ .iter()
+ .map(|i| input.column(*i).clone())
+ .collect::<Vec<_>>();
+
+ // compute hash
+ hashes_buffer.resize(input.num_rows(), 0);
+ let hash_values =
+ hash_utils::create_hashes(&arrays, &self.random_state, &mut hashes_buffer)?;
+
+ // remove those hashes
+ for hash in hash_values {
+ self.hash_map.remove(hash);
+ }
+
+ Ok(())
+ }
+
+ pub fn finish(self) -> DataFusionResult<RecordBatch> {
+ let valid_indices = self.hash_map.values().copied().collect::<Vec<_>>();
+ let result = take_batch(&self.batch, &valid_indices)?;
+ Ok(result)
+ }
+}
+
+/// Utility function to interleave batches. Based on [interleave](datafusion::arrow::compute::interleave)
+fn interleave_batches(
+ batches: Vec<RecordBatch>,
+ indices: Vec<(usize, usize)>,
+) -> DataFusionResult<RecordBatch> {
+ let schema = batches[0].schema();
+
+ // transform batches into arrays
+ let mut arrays = vec![vec![]; schema.fields().len()];
+ for batch in &batches {
+ for (i, array) in batch.columns().iter().enumerate() {
+ arrays[i].push(array.as_ref());
+ }
+ }
+
+ // interleave arrays
+ let mut interleaved_arrays = Vec::with_capacity(arrays.len());
+ for array in arrays {
+ interleaved_arrays.push(compute::interleave(&array, &indices)?);
+ }
+
+ // assemble new record batch
+ RecordBatch::try_new(schema.clone(), interleaved_arrays).map_err(DataFusionError::ArrowError)
+}
+
+/// Utility function to take rows from a record batch. Based on [take](datafusion::arrow::compute::take)
+fn take_batch(batch: &RecordBatch, indices: &[usize]) -> DataFusionResult<RecordBatch> {
+ // fast path
+ if batch.num_rows() == indices.len() {
+ return Ok(batch.clone());
+ }
+
+ let schema = batch.schema();
+
+ let indices_array = UInt64Array::from_iter(indices.iter().map(|i| *i as u64));
+ let arrays = batch
+ .columns()
+ .iter()
+ .map(|array| compute::take(array, &indices_array, None))
+ .collect::<std::result::Result<Vec<_>, _>>()
+ .map_err(DataFusionError::ArrowError)?;
+
+ let result = RecordBatch::try_new(schema, arrays).map_err(DataFusionError::ArrowError)?;
+ Ok(result)
+}
+
+#[cfg(test)]
+mod test {
+ use datafusion::arrow::array::Int32Array;
+ use datafusion::arrow::datatypes::{DataType, Field, Schema};
+
+ use super::*;
+
+ #[test]
+ fn test_interleave_batches() {
+ let schema = Schema::new(vec![
+ Field::new("a", DataType::Int32, false),
+ Field::new("b", DataType::Int32, false),
+ ]);
+
+ let batch1 = RecordBatch::try_new(
+ Arc::new(schema.clone()),
+ vec![
+ Arc::new(Int32Array::from(vec![1, 2, 3])),
+ Arc::new(Int32Array::from(vec![4, 5, 6])),
+ ],
+ )
+ .unwrap();
+
+ let batch2 = RecordBatch::try_new(
+ Arc::new(schema.clone()),
+ vec![
+ Arc::new(Int32Array::from(vec![7, 8, 9])),
+ Arc::new(Int32Array::from(vec![10, 11, 12])),
+ ],
+ )
+ .unwrap();
+
+ let batch3 = RecordBatch::try_new(
+ Arc::new(schema.clone()),
+ vec![
+ Arc::new(Int32Array::from(vec![13, 14, 15])),
+ Arc::new(Int32Array::from(vec![16, 17, 18])),
+ ],
+ )
+ .unwrap();
+
+ let batches = vec![batch1, batch2, batch3];
+ let indices = vec![(0, 0), (1, 0), (2, 0), (0, 1), (1, 1), (2, 1)];
+ let result = interleave_batches(batches, indices).unwrap();
+
+ let expected = RecordBatch::try_new(
+ Arc::new(schema),
+ vec![
+ Arc::new(Int32Array::from(vec![1, 7, 13, 2, 8, 14])),
+ Arc::new(Int32Array::from(vec![4, 10, 16, 5, 11, 17])),
+ ],
+ )
+ .unwrap();
+
+ assert_eq!(result, expected);
+ }
+
+ #[test]
+ fn test_take_batch() {
+ let schema = Schema::new(vec![
+ Field::new("a", DataType::Int32, false),
+ Field::new("b", DataType::Int32, false),
+ ]);
+
+ let batch = RecordBatch::try_new(
+ Arc::new(schema.clone()),
+ vec![
+ Arc::new(Int32Array::from(vec![1, 2, 3])),
+ Arc::new(Int32Array::from(vec![4, 5, 6])),
+ ],
+ )
+ .unwrap();
+
+ let indices = vec![0, 2];
+ let result = take_batch(&batch, &indices).unwrap();
+
+ let expected = RecordBatch::try_new(
+ Arc::new(schema),
+ vec![
+ Arc::new(Int32Array::from(vec![1, 3])),
+ Arc::new(Int32Array::from(vec![4, 6])),
+ ],
+ )
+ .unwrap();
+
+ assert_eq!(result, expected);
+ }
+}
diff --git a/src/promql/src/lib.rs b/src/promql/src/lib.rs
index 9514a015380b..127bf45d5f1a 100644
--- a/src/promql/src/lib.rs
+++ b/src/promql/src/lib.rs
@@ -14,6 +14,7 @@
#![feature(option_get_or_insert_default)]
#![feature(let_chains)]
+#![feature(map_try_insert)]
pub mod error;
pub mod extension_plan;
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index 137035755bd5..7c8176d7b95e 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -51,7 +51,7 @@ use crate::error::{
};
use crate::extension_plan::{
build_special_time_expr, EmptyMetric, HistogramFold, InstantManipulate, Millisecond,
- RangeManipulate, SeriesDivide, SeriesNormalize,
+ RangeManipulate, SeriesDivide, SeriesNormalize, UnionDistinctOn,
};
use crate::functions::{
AbsentOverTime, AvgOverTime, Changes, CountOverTime, Delta, Deriv, HoltWinters, IDelta,
@@ -1489,6 +1489,7 @@ impl PromPlanner {
.context(DataFusionPlanningSnafu)
}
+ /// Build a set operator (AND/OR/UNLESS)
fn set_op_on_non_field_columns(
&self,
left: LogicalPlan,
@@ -1501,6 +1502,10 @@ impl PromPlanner {
let mut left_tag_col_set = left_tag_cols.into_iter().collect::<HashSet<_>>();
let mut right_tag_col_set = right_tag_cols.into_iter().collect::<HashSet<_>>();
+ if matches!(op.id(), token::T_LOR) {
+ return self.or_operator(left, right, left_tag_col_set, right_tag_col_set, modifier);
+ }
+
// apply modifier
if let Some(modifier) = modifier {
// one-to-many and many-to-one are not supported
@@ -1545,7 +1550,8 @@ impl PromPlanner {
)
};
let join_keys = left_tag_col_set
- .into_iter()
+ .iter()
+ .cloned()
.chain([self.ctx.time_index_column.clone().unwrap()])
.collect::<Vec<_>>();
@@ -1579,17 +1585,122 @@ impl PromPlanner {
.build()
.context(DataFusionPlanningSnafu),
token::T_LOR => {
- // `OR` can not be expressed by `UNION` precisely.
- // it will generate unexpceted result when schemas don't match
- UnsupportedExprSnafu {
- name: "set operation `OR`",
- }
- .fail()
+ self.or_operator(left, right, left_tag_col_set, right_tag_col_set, modifier)
}
_ => UnexpectedTokenSnafu { token: op }.fail(),
}
}
+ // TODO(ruihang): change function name
+ fn or_operator(
+ &self,
+ left: LogicalPlan,
+ right: LogicalPlan,
+ left_tag_cols_set: HashSet<String>,
+ right_tag_cols_set: HashSet<String>,
+ modifier: &Option<BinModifier>,
+ ) -> Result<LogicalPlan> {
+ // prepare hash sets
+ let all_tags = left_tag_cols_set
+ .union(&right_tag_cols_set)
+ .cloned()
+ .collect::<HashSet<_>>();
+ let tags_not_in_left = all_tags
+ .difference(&left_tag_cols_set)
+ .cloned()
+ .collect::<Vec<_>>();
+ let tags_not_in_right = all_tags
+ .difference(&right_tag_cols_set)
+ .cloned()
+ .collect::<Vec<_>>();
+ let left_qualifier = left.schema().field(0).qualifier().cloned();
+ let right_qualifier = right.schema().field(0).qualifier().cloned();
+ let left_qualifier_string = left_qualifier
+ .as_ref()
+ .map(|l| l.to_string())
+ .unwrap_or_default();
+ let right_qualifier_string = right_qualifier
+ .as_ref()
+ .map(|r| r.to_string())
+ .unwrap_or_default();
+
+ // step 0: fill all columns in output schema
+ let all_columns_set = left
+ .schema()
+ .fields()
+ .iter()
+ .chain(right.schema().fields().iter())
+ .map(|field| field.name().clone())
+ .collect::<HashSet<_>>();
+ let mut all_columns = all_columns_set.into_iter().collect::<Vec<_>>();
+ // sort to ensure the generated schema is not volatile
+ all_columns.sort_unstable();
+
+ // step 1: align schema using project, fill non-exist columns with null
+ let left_proj_exprs = all_columns.iter().map(|col| {
+ if tags_not_in_left.contains(col) {
+ DfExpr::Literal(ScalarValue::Utf8(None)).alias(col.to_string())
+ } else {
+ DfExpr::Column(Column::new(left_qualifier.clone(), col))
+ }
+ });
+ let right_proj_exprs = all_columns.iter().map(|col| {
+ if tags_not_in_right.contains(col) {
+ DfExpr::Literal(ScalarValue::Utf8(None)).alias(col.to_string())
+ } else {
+ DfExpr::Column(Column::new(right_qualifier.clone(), col))
+ }
+ });
+
+ let left_projected = LogicalPlanBuilder::from(left)
+ .project(left_proj_exprs)
+ .context(DataFusionPlanningSnafu)?
+ .alias(left_qualifier_string.clone())
+ .context(DataFusionPlanningSnafu)?
+ .build()
+ .context(DataFusionPlanningSnafu)?;
+ let right_projected = LogicalPlanBuilder::from(right)
+ .project(right_proj_exprs)
+ .context(DataFusionPlanningSnafu)?
+ .alias(right_qualifier_string.clone())
+ .context(DataFusionPlanningSnafu)?
+ .build()
+ .context(DataFusionPlanningSnafu)?;
+
+ // step 2: compute match columns
+ let mut match_columns = if let Some(modifier) = modifier
+ && let Some(matching) = &modifier.matching
+ {
+ match matching {
+ // keeps columns mentioned in `on`
+ LabelModifier::Include(on) => on.labels.clone(),
+ // removes columns memtioned in `ignoring`
+ LabelModifier::Exclude(ignoring) => {
+ let ignoring = ignoring.labels.iter().cloned().collect::<HashSet<_>>();
+ all_tags.difference(&ignoring).cloned().collect()
+ }
+ }
+ } else {
+ all_tags.iter().cloned().collect()
+ };
+ // sort to ensure the generated plan is not volatile
+ match_columns.sort_unstable();
+ // step 3: build `UnionDistinctOn` plan
+ let schema = left_projected.schema().clone();
+ let union_distinct_on = UnionDistinctOn::new(
+ left_projected,
+ right_projected,
+ match_columns,
+ self.ctx.time_index_column.clone().unwrap(),
+ schema,
+ );
+ let result = LogicalPlan::Extension(Extension {
+ node: Arc::new(union_distinct_on),
+ });
+
+ Ok(result)
+ }
+
/// Build a projection that project and perform operation expr for every value columns.
/// Non-value columns (tag and timestamp) will be preserved in the projection.
///
diff --git a/tests/cases/standalone/common/promql/set_operation.result b/tests/cases/standalone/common/promql/set_operation.result
index d14b6fe88bc1..15a7a865a317 100644
--- a/tests/cases/standalone/common/promql/set_operation.result
+++ b/tests/cases/standalone/common/promql/set_operation.result
@@ -130,10 +130,21 @@ tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and ignoring(g, job)
-- http_requests{group="production", instance="0", job="app-server"} 500
-- http_requests{group="production", instance="1", job="api-server"} 200
-- http_requests{group="production", instance="1", job="app-server"} 600
--- NOT SUPPORTED: `or`
+-- SQLNESS SORT_RESULT 3 1
tql eval (3000, 3000, '1s') http_requests{g="canary"} or http_requests{g="production"};
-Error: 1004(InvalidArguments), Unsupported expr type: set operation `OR`
++------------+----------+-----+---------------------+-------+
+| g | instance | job | ts | val |
++------------+----------+-----+---------------------+-------+
+| canary | 0 | api | 1970-01-01T00:50:00 | 300.0 |
+| canary | 0 | app | 1970-01-01T00:50:00 | 700.0 |
+| canary | 1 | api | 1970-01-01T00:50:00 | 400.0 |
+| canary | 1 | app | 1970-01-01T00:50:00 | 800.0 |
+| production | 0 | api | 1970-01-01T00:50:00 | 100.0 |
+| production | 0 | app | 1970-01-01T00:50:00 | 500.0 |
+| production | 1 | api | 1970-01-01T00:50:00 | 200.0 |
+| production | 1 | app | 1970-01-01T00:50:00 | 600.0 |
++------------+----------+-----+---------------------+-------+
-- # On overlap the rhs samples must be dropped.
-- eval instant at 50m (http_requests{group="canary"} + 1) or http_requests{instance="1"}
@@ -143,10 +154,10 @@ Error: 1004(InvalidArguments), Unsupported expr type: set operation `OR`
-- {group="canary", instance="1", job="app-server"} 801
-- http_requests{group="production", instance="1", job="api-server"} 200
-- http_requests{group="production", instance="1", job="app-server"} 600
--- NOT SUPPORTED: `or`
+-- SQLNESS SORT_RESULT 3 1
tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) or http_requests{instance="1"};
-Error: 1004(InvalidArguments), Unsupported expr type: set operation `OR`
+Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named http_requests.val. Valid fields are http_requests.job, http_requests.instance, http_requests.g, http_requests.ts, "val + Float64(1)".
-- # Matching only on instance excludes everything that has instance=0/1 but includes
-- # entries without the instance label.
@@ -161,7 +172,7 @@ Error: 1004(InvalidArguments), Unsupported expr type: set operation `OR`
-- NOT SUPPORTED: `or`
tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) or on(instance) (http_requests or cpu_count or vector_matching_a);
-Error: 1004(InvalidArguments), Unsupported expr type: set operation `OR`
+Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named cpu_count.val. Valid fields are cpu_count.ts.
-- eval instant at 50m (http_requests{group="canary"} + 1) or ignoring(l, group, job) (http_requests or cpu_count or vector_matching_a)
-- {group="canary", instance="0", job="api-server"} 301
@@ -174,7 +185,7 @@ Error: 1004(InvalidArguments), Unsupported expr type: set operation `OR`
-- NOT SUPPORTED: `or`
tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) or ignoring(l, g, job) (http_requests or cpu_count or vector_matching_a);
-Error: 1004(InvalidArguments), Unsupported expr type: set operation `OR`
+Error: 1004(InvalidArguments), Internal error during building DataFusion plan: No field named cpu_count.val. Valid fields are cpu_count.ts.
-- eval instant at 50m http_requests{group="canary"} unless http_requests{instance="0"}
-- http_requests{group="canary", instance="1", job="api-server"} 400
@@ -268,3 +279,128 @@ drop table vector_matching_a;
Affected Rows: 0
+-- the following cases are not from Prometheus.
+create table t1 (ts timestamp time index, job string primary key, val double);
+
+Affected Rows: 0
+
+insert into t1 values (0, "a", 1.0), (500000, "b", 2.0), (1000000, "a", 3.0), (1500000, "c", 4.0);
+
+Affected Rows: 4
+
+create table t2 (ts timestamp time index, val double);
+
+Affected Rows: 0
+
+insert into t2 values (0, 0), (300000, 0), (600000, 0), (900000, 0), (1200000, 0), (1500000, 0), (1800000, 0);
+
+Affected Rows: 7
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t1 or t2;
+
++-----+---------------------+-----+
+| job | ts | val |
++-----+---------------------+-----+
+| | 1970-01-01T00:00:00 | 0.0 |
+| | 1970-01-01T00:06:40 | 0.0 |
+| | 1970-01-01T00:13:20 | 0.0 |
+| | 1970-01-01T00:20:00 | 0.0 |
+| | 1970-01-01T00:26:40 | 0.0 |
+| | 1970-01-01T00:33:20 | 0.0 |
+| a | 1970-01-01T00:00:00 | 1.0 |
+| a | 1970-01-01T00:20:00 | 3.0 |
+| b | 1970-01-01T00:13:20 | 2.0 |
+| c | 1970-01-01T00:26:40 | 4.0 |
++-----+---------------------+-----+
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t1 or on () t2;
+
++-----+---------------------+-----+
+| job | ts | val |
++-----+---------------------+-----+
+| | 1970-01-01T00:06:40 | 0.0 |
+| | 1970-01-01T00:33:20 | 0.0 |
+| a | 1970-01-01T00:00:00 | 1.0 |
+| a | 1970-01-01T00:20:00 | 3.0 |
+| b | 1970-01-01T00:13:20 | 2.0 |
+| c | 1970-01-01T00:26:40 | 4.0 |
++-----+---------------------+-----+
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t1 or on (job) t2;
+
++-----+---------------------+-----+
+| job | ts | val |
++-----+---------------------+-----+
+| | 1970-01-01T00:00:00 | 0.0 |
+| | 1970-01-01T00:06:40 | 0.0 |
+| | 1970-01-01T00:13:20 | 0.0 |
+| | 1970-01-01T00:20:00 | 0.0 |
+| | 1970-01-01T00:26:40 | 0.0 |
+| | 1970-01-01T00:33:20 | 0.0 |
+| a | 1970-01-01T00:00:00 | 1.0 |
+| a | 1970-01-01T00:20:00 | 3.0 |
+| b | 1970-01-01T00:13:20 | 2.0 |
+| c | 1970-01-01T00:26:40 | 4.0 |
++-----+---------------------+-----+
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t2 or t1;
+
++-----+---------------------+-----+
+| job | ts | val |
++-----+---------------------+-----+
+| | 1970-01-01T00:00:00 | 0.0 |
+| | 1970-01-01T00:06:40 | 0.0 |
+| | 1970-01-01T00:13:20 | 0.0 |
+| | 1970-01-01T00:20:00 | 0.0 |
+| | 1970-01-01T00:26:40 | 0.0 |
+| | 1970-01-01T00:33:20 | 0.0 |
+| a | 1970-01-01T00:00:00 | 1.0 |
+| a | 1970-01-01T00:20:00 | 3.0 |
+| b | 1970-01-01T00:13:20 | 2.0 |
+| c | 1970-01-01T00:26:40 | 4.0 |
++-----+---------------------+-----+
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t2 or on () t1;
+
++-----+---------------------+-----+
+| job | ts | val |
++-----+---------------------+-----+
+| | 1970-01-01T00:00:00 | 0.0 |
+| | 1970-01-01T00:06:40 | 0.0 |
+| | 1970-01-01T00:13:20 | 0.0 |
+| | 1970-01-01T00:20:00 | 0.0 |
+| | 1970-01-01T00:26:40 | 0.0 |
+| | 1970-01-01T00:33:20 | 0.0 |
++-----+---------------------+-----+
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t2 or on(job) t1;
+
++-----+---------------------+-----+
+| job | ts | val |
++-----+---------------------+-----+
+| | 1970-01-01T00:00:00 | 0.0 |
+| | 1970-01-01T00:06:40 | 0.0 |
+| | 1970-01-01T00:13:20 | 0.0 |
+| | 1970-01-01T00:20:00 | 0.0 |
+| | 1970-01-01T00:26:40 | 0.0 |
+| | 1970-01-01T00:33:20 | 0.0 |
+| a | 1970-01-01T00:00:00 | 1.0 |
+| a | 1970-01-01T00:20:00 | 3.0 |
+| b | 1970-01-01T00:13:20 | 2.0 |
+| c | 1970-01-01T00:26:40 | 4.0 |
++-----+---------------------+-----+
+
+drop table t1;
+
+Affected Rows: 0
+
+drop table t2;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/promql/set_operation.sql b/tests/cases/standalone/common/promql/set_operation.sql
index e91460df3478..6a71711bd896 100644
--- a/tests/cases/standalone/common/promql/set_operation.sql
+++ b/tests/cases/standalone/common/promql/set_operation.sql
@@ -79,7 +79,7 @@ tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) and ignoring(g, job)
-- http_requests{group="production", instance="0", job="app-server"} 500
-- http_requests{group="production", instance="1", job="api-server"} 200
-- http_requests{group="production", instance="1", job="app-server"} 600
--- NOT SUPPORTED: `or`
+-- SQLNESS SORT_RESULT 3 1
tql eval (3000, 3000, '1s') http_requests{g="canary"} or http_requests{g="production"};
-- # On overlap the rhs samples must be dropped.
@@ -90,7 +90,7 @@ tql eval (3000, 3000, '1s') http_requests{g="canary"} or http_requests{g="produc
-- {group="canary", instance="1", job="app-server"} 801
-- http_requests{group="production", instance="1", job="api-server"} 200
-- http_requests{group="production", instance="1", job="app-server"} 600
--- NOT SUPPORTED: `or`
+-- SQLNESS SORT_RESULT 3 1
tql eval (3000, 3000, '1s') (http_requests{g="canary"} + 1) or http_requests{instance="1"};
@@ -173,3 +173,35 @@ drop table http_requests;
drop table cpu_count;
drop table vector_matching_a;
+
+-- the following cases are not from Prometheus.
+
+create table t1 (ts timestamp time index, job string primary key, val double);
+
+insert into t1 values (0, "a", 1.0), (500000, "b", 2.0), (1000000, "a", 3.0), (1500000, "c", 4.0);
+
+create table t2 (ts timestamp time index, val double);
+
+insert into t2 values (0, 0), (300000, 0), (600000, 0), (900000, 0), (1200000, 0), (1500000, 0), (1800000, 0);
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t1 or t2;
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t1 or on () t2;
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t1 or on (job) t2;
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t2 or t1;
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t2 or on () t1;
+
+-- SQLNESS SORT_RESULT 3 1
+tql eval (0, 2000, '400') t2 or on(job) t1;
+
+drop table t1;
+
+drop table t2;
|
feat
|
Implement OR for PromQL (#3024)
|
bf21527f18fe3b6a6756939c836e74fbed1c92c6
|
2024-04-22 18:07:25
|
Ruihang Xia
|
fix: set is_time_index properly on updating physical table's schema (#3770)
| false
|
diff --git a/src/common/meta/src/ddl/physical_table_metadata.rs b/src/common/meta/src/ddl/physical_table_metadata.rs
index df66995bd883..376a1431338d 100644
--- a/src/common/meta/src/ddl/physical_table_metadata.rs
+++ b/src/common/meta/src/ddl/physical_table_metadata.rs
@@ -52,5 +52,9 @@ pub(crate) fn build_new_physical_table_info(
columns.push(col.column_schema.clone());
}
+ if let Some(time_index) = *time_index {
+ raw_table_info.meta.schema.column_schemas[time_index].set_time_index();
+ }
+
raw_table_info
}
diff --git a/src/datatypes/src/schema/column_schema.rs b/src/datatypes/src/schema/column_schema.rs
index f515370f0e98..b263fed9a743 100644
--- a/src/datatypes/src/schema/column_schema.rs
+++ b/src/datatypes/src/schema/column_schema.rs
@@ -54,6 +54,10 @@ impl fmt::Debug for ColumnSchema {
if self.is_nullable { "null" } else { "not null" },
)?;
+ if self.is_time_index {
+ write!(f, " time_index")?;
+ }
+
// Add default constraint if present
if let Some(default_constraint) = &self.default_constraint {
write!(f, " default={:?}", default_constraint)?;
@@ -159,6 +163,14 @@ impl ColumnSchema {
self.is_nullable = true;
}
+ /// Set the `is_time_index` to `true` of the column.
+ /// Similar to [with_time_index] but don't take the ownership.
+ ///
+ /// [with_time_index]: Self::with_time_index
+ pub fn set_time_index(&mut self) {
+ self.is_time_index = true;
+ }
+
/// Creates a new [`ColumnSchema`] with given metadata.
pub fn with_metadata(mut self, metadata: Metadata) -> Self {
self.metadata = metadata;
diff --git a/tests/cases/standalone/common/alter/alter_metric_table.result b/tests/cases/standalone/common/alter/alter_metric_table.result
index 8ae541b71e07..d9808265af89 100644
--- a/tests/cases/standalone/common/alter/alter_metric_table.result
+++ b/tests/cases/standalone/common/alter/alter_metric_table.result
@@ -11,6 +11,15 @@ SHOW TABLES;
| phy |
+---------+
+DESC TABLE phy;
+
++--------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------+----------------------+-----+------+---------+---------------+
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+| val | Float64 | | YES | | FIELD |
++--------+----------------------+-----+------+---------+---------------+
+
CREATE TABLE t1 (ts timestamp time index, val double, host string primary key) engine = metric with ("on_physical_table" = "phy");
Affected Rows: 0
@@ -44,7 +53,7 @@ DESC TABLE phy;
+------------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+------------+----------------------+-----+------+---------+---------------+
-| ts | TimestampMillisecond | | NO | | FIELD |
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
| val | Float64 | | YES | | FIELD |
| __table_id | UInt32 | PRI | NO | | TAG |
| __tsid | UInt64 | PRI | NO | | TAG |
@@ -87,7 +96,7 @@ DESC TABLE phy;
+------------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+------------+----------------------+-----+------+---------+---------------+
-| ts | TimestampMillisecond | | NO | | FIELD |
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
| val | Float64 | | YES | | FIELD |
| __table_id | UInt32 | PRI | NO | | TAG |
| __tsid | UInt64 | PRI | NO | | TAG |
diff --git a/tests/cases/standalone/common/alter/alter_metric_table.sql b/tests/cases/standalone/common/alter/alter_metric_table.sql
index 579dd90c4896..be3d7db53ed3 100644
--- a/tests/cases/standalone/common/alter/alter_metric_table.sql
+++ b/tests/cases/standalone/common/alter/alter_metric_table.sql
@@ -2,6 +2,8 @@ CREATE TABLE phy (ts timestamp time index, val double) engine=metric with ("phys
SHOW TABLES;
+DESC TABLE phy;
+
CREATE TABLE t1 (ts timestamp time index, val double, host string primary key) engine = metric with ("on_physical_table" = "phy");
CREATE TABLE t2 (ts timestamp time index, job string primary key, val double) engine = metric with ("on_physical_table" = "phy");
diff --git a/tests/cases/standalone/common/create/create_metric_table.result b/tests/cases/standalone/common/create/create_metric_table.result
index 5384723ca7bd..f844c5cbd5c2 100644
--- a/tests/cases/standalone/common/create/create_metric_table.result
+++ b/tests/cases/standalone/common/create/create_metric_table.result
@@ -43,7 +43,7 @@ DESC TABLE phy;
+------------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+------------+----------------------+-----+------+---------+---------------+
-| ts | TimestampMillisecond | | NO | | FIELD |
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
| val | Float64 | | YES | | FIELD |
| __table_id | UInt32 | PRI | NO | | TAG |
| __tsid | UInt64 | PRI | NO | | TAG |
@@ -83,7 +83,7 @@ DESC TABLE phy;
+------------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+------------+----------------------+-----+------+---------+---------------+
-| ts | TimestampMillisecond | | NO | | FIELD |
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
| val | Float64 | | YES | | FIELD |
| __table_id | UInt32 | PRI | NO | | TAG |
| __tsid | UInt64 | PRI | NO | | TAG |
diff --git a/tests/cases/standalone/common/insert/logical_metric_table.result b/tests/cases/standalone/common/insert/logical_metric_table.result
index a6e958d4a008..ff32ee185b2e 100644
--- a/tests/cases/standalone/common/insert/logical_metric_table.result
+++ b/tests/cases/standalone/common/insert/logical_metric_table.result
@@ -54,7 +54,7 @@ DESC TABLE phy;
+------------+----------------------+-----+------+---------+---------------+
| Column | Type | Key | Null | Default | Semantic Type |
+------------+----------------------+-----+------+---------+---------------+
-| ts | TimestampMillisecond | | NO | | FIELD |
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
| val | Float64 | | YES | | FIELD |
| __table_id | UInt32 | PRI | NO | | TAG |
| __tsid | UInt64 | PRI | NO | | TAG |
|
fix
|
set is_time_index properly on updating physical table's schema (#3770)
|
d6b8672e638293936edfaadb7f5d13d5bdee83e4
|
2025-02-05 08:31:45
|
yihong
|
docs: the year is better to show in 2025 (#5468)
| false
|
diff --git a/README.md b/README.md
index ba94fdbbb2e2..51434ff8372b 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@
<a href="https://greptime.com/product/cloud">GreptimeCloud</a> |
<a href="https://docs.greptime.com/">User Guide</a> |
<a href="https://greptimedb.rs/">API Docs</a> |
- <a href="https://github.com/GreptimeTeam/greptimedb/issues/3412">Roadmap 2024</a>
+ <a href="https://github.com/GreptimeTeam/greptimedb/issues/5446">Roadmap 2025</a>
</h4>
<a href="https://github.com/GreptimeTeam/greptimedb/releases/latest">
|
docs
|
the year is better to show in 2025 (#5468)
|
a8b426aebe4d8268d3216e53009588733b587f96
|
2024-11-05 09:14:25
|
Ning Sun
|
feat: add more geo functions (#4888)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index aeacc90ba398..392115e1222b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2070,6 +2070,8 @@ dependencies = [
"datafusion",
"datatypes",
"derive_more",
+ "geo",
+ "geo-types",
"geohash",
"h3o",
"jsonb",
@@ -2088,6 +2090,7 @@ dependencies = [
"store-api",
"table",
"tokio",
+ "wkt",
]
[[package]]
@@ -3706,6 +3709,16 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
+[[package]]
+name = "earcutr"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79127ed59a85d7687c409e9978547cffb7dc79675355ed22da6b66fd5f6ead01"
+dependencies = [
+ "itertools 0.11.0",
+ "num-traits",
+]
+
[[package]]
name = "either"
version = "1.13.0"
@@ -4014,6 +4027,12 @@ dependencies = [
"libc",
]
+[[package]]
+name = "float_next_after"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
+
[[package]]
name = "flow"
version = "0.9.5"
@@ -4438,6 +4457,24 @@ dependencies = [
"version_check",
]
+[[package]]
+name = "geo"
+version = "0.29.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81d088357a9cc60cec8253b3578f6834b4a3aa20edb55f5d1c030c36d8143f11"
+dependencies = [
+ "earcutr",
+ "float_next_after",
+ "geo-types",
+ "geographiclib-rs",
+ "i_overlay",
+ "log",
+ "num-traits",
+ "robust",
+ "rstar",
+ "spade",
+]
+
[[package]]
name = "geo-types"
version = "0.7.13"
@@ -4446,9 +4483,19 @@ checksum = "9ff16065e5720f376fbced200a5ae0f47ace85fd70b7e54269790281353b6d61"
dependencies = [
"approx 0.5.1",
"num-traits",
+ "rstar",
"serde",
]
+[[package]]
+name = "geographiclib-rs"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6e5ed84f8089c70234b0a8e0aedb6dc733671612ddc0d37c6066052f9781960"
+dependencies = [
+ "libm",
+]
+
[[package]]
name = "geohash"
version = "0.13.1"
@@ -4597,6 +4644,15 @@ dependencies = [
"num-traits",
]
+[[package]]
+name = "hash32"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606"
+dependencies = [
+ "byteorder",
+]
+
[[package]]
name = "hashbrown"
version = "0.12.3"
@@ -4692,6 +4748,16 @@ dependencies = [
"http 1.1.0",
]
+[[package]]
+name = "heapless"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad"
+dependencies = [
+ "hash32",
+ "stable_deref_trait",
+]
+
[[package]]
name = "heck"
version = "0.4.1"
@@ -5117,6 +5183,50 @@ dependencies = [
"tracing",
]
+[[package]]
+name = "i_float"
+version = "1.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f5fe043aae28ce70bd2f78b2f5f82a3654d63607c82594da4dabb8b6cb81f2b2"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "i_key_sort"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "347c253b4748a1a28baf94c9ce133b6b166f08573157e05afe718812bc599fcd"
+
+[[package]]
+name = "i_overlay"
+version = "1.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a469f68cb8a7cef375b2b0f581faf5859b4b50600438c00d46b71acc25ebbd0c"
+dependencies = [
+ "i_float",
+ "i_key_sort",
+ "i_shape",
+ "i_tree",
+ "rayon",
+]
+
+[[package]]
+name = "i_shape"
+version = "1.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b44852d57a991c7dedaf76c55bc44f677f547ff899a430d29e13efd6133d7d8"
+dependencies = [
+ "i_float",
+ "serde",
+]
+
+[[package]]
+name = "i_tree"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "155181bc97d770181cf9477da51218a19ee92a8e5be642e796661aee2b601139"
+
[[package]]
name = "iana-time-zone"
version = "0.1.61"
@@ -9570,6 +9680,12 @@ dependencies = [
"syn 1.0.109",
]
+[[package]]
+name = "robust"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cbf4a6aa5f6d6888f39e980649f3ad6b666acdce1d78e95b8a2cb076e687ae30"
+
[[package]]
name = "ron"
version = "0.7.1"
@@ -9664,6 +9780,17 @@ dependencies = [
"zstd 0.13.2",
]
+[[package]]
+name = "rstar"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "133315eb94c7b1e8d0cb097e5a710d850263372fd028fff18969de708afc7008"
+dependencies = [
+ "heapless",
+ "num-traits",
+ "smallvec",
+]
+
[[package]]
name = "rstest"
version = "0.21.0"
@@ -11156,6 +11283,18 @@ dependencies = [
"windows-sys 0.52.0",
]
+[[package]]
+name = "spade"
+version = "2.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93f5ef1f863aca7d1d7dda7ccfc36a0a4279bd6d3c375176e5e0712e25cb4889"
+dependencies = [
+ "hashbrown 0.14.5",
+ "num-traits",
+ "robust",
+ "smallvec",
+]
+
[[package]]
name = "sparsevec"
version = "0.2.0"
@@ -14149,6 +14288,18 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "wkt"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54f7f1ff4ea4c18936d6cd26a6fd24f0003af37e951a8e0e8b9e9a2d0bd0a46d"
+dependencies = [
+ "geo-types",
+ "log",
+ "num-traits",
+ "thiserror",
+]
+
[[package]]
name = "wyz"
version = "0.5.1"
diff --git a/src/common/function/Cargo.toml b/src/common/function/Cargo.toml
index 6b23762d90c2..6c1ecc2d381e 100644
--- a/src/common/function/Cargo.toml
+++ b/src/common/function/Cargo.toml
@@ -9,7 +9,7 @@ workspace = true
[features]
default = ["geo"]
-geo = ["geohash", "h3o", "s2"]
+geo = ["geohash", "h3o", "s2", "wkt", "geo-types", "dep:geo"]
[dependencies]
api.workspace = true
@@ -28,6 +28,8 @@ common-version.workspace = true
datafusion.workspace = true
datatypes.workspace = true
derive_more = { version = "1", default-features = false, features = ["display"] }
+geo = { version = "0.29", optional = true }
+geo-types = { version = "0.7", optional = true }
geohash = { version = "0.13", optional = true }
h3o = { version = "0.6", optional = true }
jsonb.workspace = true
@@ -44,6 +46,7 @@ sql.workspace = true
statrs = "0.16"
store-api.workspace = true
table.workspace = true
+wkt = { version = "0.11", optional = true }
[dev-dependencies]
ron = "0.7"
diff --git a/src/common/function/src/scalars/geo.rs b/src/common/function/src/scalars/geo.rs
index 866acddc4dc9..37a7b3eb5537 100644
--- a/src/common/function/src/scalars/geo.rs
+++ b/src/common/function/src/scalars/geo.rs
@@ -17,7 +17,10 @@ pub(crate) mod encoding;
mod geohash;
mod h3;
mod helpers;
+mod measure;
+mod relation;
mod s2;
+mod wkt;
use crate::function_registry::FunctionRegistry;
@@ -48,6 +51,7 @@ impl GeoFunctions {
registry.register(Arc::new(h3::H3CellToChildrenSize));
registry.register(Arc::new(h3::H3CellToChildPos));
registry.register(Arc::new(h3::H3ChildPosToCell));
+ registry.register(Arc::new(h3::H3CellContains));
// h3 grid traversal
registry.register(Arc::new(h3::H3GridDisk));
@@ -55,10 +59,27 @@ impl GeoFunctions {
registry.register(Arc::new(h3::H3GridDistance));
registry.register(Arc::new(h3::H3GridPathCells));
+ // h3 measurement
+ registry.register(Arc::new(h3::H3CellDistanceSphereKm));
+ registry.register(Arc::new(h3::H3CellDistanceEuclideanDegree));
+
// s2
registry.register(Arc::new(s2::S2LatLngToCell));
registry.register(Arc::new(s2::S2CellLevel));
registry.register(Arc::new(s2::S2CellToToken));
registry.register(Arc::new(s2::S2CellParent));
+
+ // spatial data type
+ registry.register(Arc::new(wkt::LatLngToPointWkt));
+
+ // spatial relation
+ registry.register(Arc::new(relation::STContains));
+ registry.register(Arc::new(relation::STWithin));
+ registry.register(Arc::new(relation::STIntersects));
+
+ // spatial measure
+ registry.register(Arc::new(measure::STDistance));
+ registry.register(Arc::new(measure::STDistanceSphere));
+ registry.register(Arc::new(measure::STArea));
}
}
diff --git a/src/common/function/src/scalars/geo/h3.rs b/src/common/function/src/scalars/geo/h3.rs
index 7f98c3147a61..e86c903dc2c9 100644
--- a/src/common/function/src/scalars/geo/h3.rs
+++ b/src/common/function/src/scalars/geo/h3.rs
@@ -23,8 +23,8 @@ use datatypes::prelude::ConcreteDataType;
use datatypes::scalars::{Scalar, ScalarVectorBuilder};
use datatypes::value::{ListValue, Value};
use datatypes::vectors::{
- BooleanVectorBuilder, Int32VectorBuilder, ListVectorBuilder, MutableVector,
- StringVectorBuilder, UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
+ BooleanVectorBuilder, Float64VectorBuilder, Int32VectorBuilder, ListVectorBuilder,
+ MutableVector, StringVectorBuilder, UInt64VectorBuilder, UInt8VectorBuilder, VectorRef,
};
use derive_more::Display;
use h3o::{CellIndex, LatLng, Resolution};
@@ -38,6 +38,7 @@ static CELL_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
vec![
ConcreteDataType::int64_datatype(),
ConcreteDataType::uint64_datatype(),
+ ConcreteDataType::string_datatype(),
]
});
@@ -952,6 +953,181 @@ impl Function for H3GridPathCells {
}
}
+/// Tests if cells contains given cells
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3CellContains;
+
+impl Function for H3CellContains {
+ fn name(&self) -> &str {
+ "h3_cells_contains"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::boolean_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ let multi_cell_types = vec![
+ ConcreteDataType::list_datatype(ConcreteDataType::int64_datatype()),
+ ConcreteDataType::list_datatype(ConcreteDataType::uint64_datatype()),
+ ConcreteDataType::list_datatype(ConcreteDataType::string_datatype()),
+ ConcreteDataType::string_datatype(),
+ ];
+
+ let mut signatures = Vec::with_capacity(multi_cell_types.len() * CELL_TYPES.len());
+ for multi_cell_type in &multi_cell_types {
+ for cell_type in CELL_TYPES.as_slice() {
+ signatures.push(TypeSignature::Exact(vec![
+ multi_cell_type.clone(),
+ cell_type.clone(),
+ ]));
+ }
+ }
+
+ Signature::one_of(signatures, Volatility::Stable)
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let cells_vec = &columns[0];
+ let cell_this_vec = &columns[1];
+
+ let size = cell_this_vec.len();
+ let mut results = BooleanVectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let mut result = None;
+ if let (cells, Some(cell_this)) = (
+ cells_from_value(cells_vec.get(i))?,
+ cell_from_value(cell_this_vec.get(i))?,
+ ) {
+ result = Some(false);
+
+ for cell_that in cells.iter() {
+ // get cell resolution, and find cell_this's parent at
+ // this solution, test if cell_that equals the parent
+ let resolution = cell_that.resolution();
+ if let Some(cell_this_parent) = cell_this.parent(resolution) {
+ if cell_this_parent == *cell_that {
+ result = Some(true);
+ break;
+ }
+ }
+ }
+ }
+
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Get WGS84 great circle distance of two cell centroid
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3CellDistanceSphereKm;
+
+impl Function for H3CellDistanceSphereKm {
+ fn name(&self) -> &str {
+ "h3_distance_sphere_km"
+ }
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::float64_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ signature_of_double_cells()
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let cell_this_vec = &columns[0];
+ let cell_that_vec = &columns[1];
+ let size = cell_this_vec.len();
+
+ let mut results = Float64VectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let result = match (
+ cell_from_value(cell_this_vec.get(i))?,
+ cell_from_value(cell_that_vec.get(i))?,
+ ) {
+ (Some(cell_this), Some(cell_that)) => {
+ let centroid_this = LatLng::from(cell_this);
+ let centroid_that = LatLng::from(cell_that);
+
+ Some(centroid_this.distance_km(centroid_that))
+ }
+ _ => None,
+ };
+
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Get Euclidean distance of two cell centroid
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct H3CellDistanceEuclideanDegree;
+
+impl H3CellDistanceEuclideanDegree {
+ fn distance(centroid_this: LatLng, centroid_that: LatLng) -> f64 {
+ ((centroid_this.lat() - centroid_that.lat()).powi(2)
+ + (centroid_this.lng() - centroid_that.lng()).powi(2))
+ .sqrt()
+ }
+}
+
+impl Function for H3CellDistanceEuclideanDegree {
+ fn name(&self) -> &str {
+ "h3_distance_degree"
+ }
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::float64_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ signature_of_double_cells()
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let cell_this_vec = &columns[0];
+ let cell_that_vec = &columns[1];
+ let size = cell_this_vec.len();
+
+ let mut results = Float64VectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let result = match (
+ cell_from_value(cell_this_vec.get(i))?,
+ cell_from_value(cell_that_vec.get(i))?,
+ ) {
+ (Some(cell_this), Some(cell_that)) => {
+ let centroid_this = LatLng::from(cell_this);
+ let centroid_that = LatLng::from(cell_that);
+
+ let dist = Self::distance(centroid_this, centroid_that);
+ Some(dist)
+ }
+ _ => None,
+ };
+
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
fn value_to_resolution(v: Value) -> Result<Resolution> {
let r = match v {
Value::Int8(v) => v as u8,
@@ -1073,7 +1249,126 @@ fn cell_from_value(v: Value) -> Result<Option<CellIndex>> {
})
.context(error::ExecuteSnafu)?,
),
+ Value::String(s) => Some(
+ CellIndex::from_str(s.as_utf8())
+ .map_err(|e| {
+ BoxedError::new(PlainError::new(
+ format!("H3 error: {}", e),
+ StatusCode::EngineExecuteQuery,
+ ))
+ })
+ .context(error::ExecuteSnafu)?,
+ ),
_ => None,
};
Ok(cell)
}
+
+/// extract cell array from all possible types including:
+/// - int64 list
+/// - uint64 list
+/// - string list
+/// - comma-separated string
+fn cells_from_value(v: Value) -> Result<Vec<CellIndex>> {
+ match v {
+ Value::List(list) => match list.datatype() {
+ ConcreteDataType::Int64(_) => list
+ .items()
+ .iter()
+ .map(|v| {
+ if let Value::Int64(v) = v {
+ CellIndex::try_from(*v as u64)
+ .map_err(|e| {
+ BoxedError::new(PlainError::new(
+ format!("H3 error: {}", e),
+ StatusCode::EngineExecuteQuery,
+ ))
+ })
+ .context(error::ExecuteSnafu)
+ } else {
+ Err(BoxedError::new(PlainError::new(
+ "Invalid data type in array".to_string(),
+ StatusCode::EngineExecuteQuery,
+ )))
+ .context(error::ExecuteSnafu)
+ }
+ })
+ .collect::<Result<Vec<CellIndex>>>(),
+ ConcreteDataType::UInt64(_) => list
+ .items()
+ .iter()
+ .map(|v| {
+ if let Value::UInt64(v) = v {
+ CellIndex::try_from(*v)
+ .map_err(|e| {
+ BoxedError::new(PlainError::new(
+ format!("H3 error: {}", e),
+ StatusCode::EngineExecuteQuery,
+ ))
+ })
+ .context(error::ExecuteSnafu)
+ } else {
+ Err(BoxedError::new(PlainError::new(
+ "Invalid data type in array".to_string(),
+ StatusCode::EngineExecuteQuery,
+ )))
+ .context(error::ExecuteSnafu)
+ }
+ })
+ .collect::<Result<Vec<CellIndex>>>(),
+ ConcreteDataType::String(_) => list
+ .items()
+ .iter()
+ .map(|v| {
+ if let Value::String(v) = v {
+ CellIndex::from_str(v.as_utf8().trim())
+ .map_err(|e| {
+ BoxedError::new(PlainError::new(
+ format!("H3 error: {}", e),
+ StatusCode::EngineExecuteQuery,
+ ))
+ })
+ .context(error::ExecuteSnafu)
+ } else {
+ Err(BoxedError::new(PlainError::new(
+ "Invalid data type in array".to_string(),
+ StatusCode::EngineExecuteQuery,
+ )))
+ .context(error::ExecuteSnafu)
+ }
+ })
+ .collect::<Result<Vec<CellIndex>>>(),
+ _ => Ok(vec![]),
+ },
+ Value::String(csv) => {
+ let str_seq = csv.as_utf8().split(',');
+ str_seq
+ .map(|v| {
+ CellIndex::from_str(v.trim())
+ .map_err(|e| {
+ BoxedError::new(PlainError::new(
+ format!("H3 error: {}", e),
+ StatusCode::EngineExecuteQuery,
+ ))
+ })
+ .context(error::ExecuteSnafu)
+ })
+ .collect::<Result<Vec<CellIndex>>>()
+ }
+ _ => Ok(vec![]),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_h3_euclidean_distance() {
+ let point_this = LatLng::new(42.3521, -72.1235).expect("incorrect lat lng");
+ let point_that = LatLng::new(42.45, -72.1260).expect("incorrect lat lng");
+
+ let dist = H3CellDistanceEuclideanDegree::distance(point_this, point_that);
+ assert_eq!(dist, 0.09793191512474639);
+ }
+}
diff --git a/src/common/function/src/scalars/geo/measure.rs b/src/common/function/src/scalars/geo/measure.rs
new file mode 100644
index 000000000000..a18225990384
--- /dev/null
+++ b/src/common/function/src/scalars/geo/measure.rs
@@ -0,0 +1,195 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_error::ext::{BoxedError, PlainError};
+use common_error::status_code::StatusCode;
+use common_query::error::{self, Result};
+use common_query::prelude::{Signature, TypeSignature};
+use datafusion::logical_expr::Volatility;
+use datatypes::prelude::ConcreteDataType;
+use datatypes::scalars::ScalarVectorBuilder;
+use datatypes::vectors::{Float64VectorBuilder, MutableVector, VectorRef};
+use derive_more::Display;
+use geo::algorithm::line_measures::metric_spaces::Euclidean;
+use geo::{Area, Distance, Haversine};
+use geo_types::Geometry;
+use snafu::ResultExt;
+
+use super::helpers::{ensure_columns_len, ensure_columns_n};
+use super::wkt::parse_wkt;
+use crate::function::{Function, FunctionContext};
+
+/// Return WGS84(SRID: 4326) euclidean distance between two geometry object, in degree
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct STDistance;
+
+impl Function for STDistance {
+ fn name(&self) -> &str {
+ "st_distance"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::float64_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ Signature::new(
+ TypeSignature::Exact(vec![
+ ConcreteDataType::string_datatype(),
+ ConcreteDataType::string_datatype(),
+ ]),
+ Volatility::Stable,
+ )
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let wkt_this_vec = &columns[0];
+ let wkt_that_vec = &columns[1];
+
+ let size = wkt_this_vec.len();
+ let mut results = Float64VectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let wkt_this = wkt_this_vec.get(i).as_string();
+ let wkt_that = wkt_that_vec.get(i).as_string();
+
+ let result = match (wkt_this, wkt_that) {
+ (Some(wkt_this), Some(wkt_that)) => {
+ let geom_this = parse_wkt(&wkt_this)?;
+ let geom_that = parse_wkt(&wkt_that)?;
+
+ Some(Euclidean::distance(&geom_this, &geom_that))
+ }
+ _ => None,
+ };
+
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Return great circle distance between two geometry object, in meters
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct STDistanceSphere;
+
+impl Function for STDistanceSphere {
+ fn name(&self) -> &str {
+ "st_distance_sphere_m"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::float64_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ Signature::new(
+ TypeSignature::Exact(vec![
+ ConcreteDataType::string_datatype(),
+ ConcreteDataType::string_datatype(),
+ ]),
+ Volatility::Stable,
+ )
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let wkt_this_vec = &columns[0];
+ let wkt_that_vec = &columns[1];
+
+ let size = wkt_this_vec.len();
+ let mut results = Float64VectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let wkt_this = wkt_this_vec.get(i).as_string();
+ let wkt_that = wkt_that_vec.get(i).as_string();
+
+ let result = match (wkt_this, wkt_that) {
+ (Some(wkt_this), Some(wkt_that)) => {
+ let geom_this = parse_wkt(&wkt_this)?;
+ let geom_that = parse_wkt(&wkt_that)?;
+
+ match (geom_this, geom_that) {
+ (Geometry::Point(this), Geometry::Point(that)) => {
+ Some(Haversine::distance(this, that))
+ }
+ _ => {
+ Err(BoxedError::new(PlainError::new(
+ "Great circle distance between non-point objects are not supported for now.".to_string(),
+ StatusCode::Unsupported,
+ ))).context(error::ExecuteSnafu)?
+ }
+ }
+ }
+ _ => None,
+ };
+
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Return area of given geometry object
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct STArea;
+
+impl Function for STArea {
+ fn name(&self) -> &str {
+ "st_area"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::float64_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ Signature::new(
+ TypeSignature::Exact(vec![ConcreteDataType::string_datatype()]),
+ Volatility::Stable,
+ )
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 1);
+
+ let wkt_vec = &columns[0];
+
+ let size = wkt_vec.len();
+ let mut results = Float64VectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let wkt = wkt_vec.get(i).as_string();
+
+ let result = if let Some(wkt) = wkt {
+ let geom = parse_wkt(&wkt)?;
+ Some(geom.unsigned_area())
+ } else {
+ None
+ };
+
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
diff --git a/src/common/function/src/scalars/geo/relation.rs b/src/common/function/src/scalars/geo/relation.rs
new file mode 100644
index 000000000000..570a7c7f569c
--- /dev/null
+++ b/src/common/function/src/scalars/geo/relation.rs
@@ -0,0 +1,190 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_query::error::Result;
+use common_query::prelude::{Signature, TypeSignature};
+use datafusion::logical_expr::Volatility;
+use datatypes::prelude::ConcreteDataType;
+use datatypes::scalars::ScalarVectorBuilder;
+use datatypes::vectors::{BooleanVectorBuilder, MutableVector, VectorRef};
+use derive_more::Display;
+use geo::algorithm::contains::Contains;
+use geo::algorithm::intersects::Intersects;
+use geo::algorithm::within::Within;
+
+use super::helpers::{ensure_columns_len, ensure_columns_n};
+use super::wkt::parse_wkt;
+use crate::function::{Function, FunctionContext};
+
+/// Test if spatial relationship: contains
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct STContains;
+
+impl Function for STContains {
+ fn name(&self) -> &str {
+ "st_contains"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::boolean_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ Signature::new(
+ TypeSignature::Exact(vec![
+ ConcreteDataType::string_datatype(),
+ ConcreteDataType::string_datatype(),
+ ]),
+ Volatility::Stable,
+ )
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let wkt_this_vec = &columns[0];
+ let wkt_that_vec = &columns[1];
+
+ let size = wkt_this_vec.len();
+ let mut results = BooleanVectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let wkt_this = wkt_this_vec.get(i).as_string();
+ let wkt_that = wkt_that_vec.get(i).as_string();
+
+ let result = match (wkt_this, wkt_that) {
+ (Some(wkt_this), Some(wkt_that)) => {
+ let geom_this = parse_wkt(&wkt_this)?;
+ let geom_that = parse_wkt(&wkt_that)?;
+
+ Some(geom_this.contains(&geom_that))
+ }
+ _ => None,
+ };
+
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Test if spatial relationship: within
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct STWithin;
+
+impl Function for STWithin {
+ fn name(&self) -> &str {
+ "st_within"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::boolean_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ Signature::new(
+ TypeSignature::Exact(vec![
+ ConcreteDataType::string_datatype(),
+ ConcreteDataType::string_datatype(),
+ ]),
+ Volatility::Stable,
+ )
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let wkt_this_vec = &columns[0];
+ let wkt_that_vec = &columns[1];
+
+ let size = wkt_this_vec.len();
+ let mut results = BooleanVectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let wkt_this = wkt_this_vec.get(i).as_string();
+ let wkt_that = wkt_that_vec.get(i).as_string();
+
+ let result = match (wkt_this, wkt_that) {
+ (Some(wkt_this), Some(wkt_that)) => {
+ let geom_this = parse_wkt(&wkt_this)?;
+ let geom_that = parse_wkt(&wkt_that)?;
+
+ Some(geom_this.is_within(&geom_that))
+ }
+ _ => None,
+ };
+
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+/// Test if spatial relationship: within
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct STIntersects;
+
+impl Function for STIntersects {
+ fn name(&self) -> &str {
+ "st_intersects"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::boolean_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ Signature::new(
+ TypeSignature::Exact(vec![
+ ConcreteDataType::string_datatype(),
+ ConcreteDataType::string_datatype(),
+ ]),
+ Volatility::Stable,
+ )
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let wkt_this_vec = &columns[0];
+ let wkt_that_vec = &columns[1];
+
+ let size = wkt_this_vec.len();
+ let mut results = BooleanVectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let wkt_this = wkt_this_vec.get(i).as_string();
+ let wkt_that = wkt_that_vec.get(i).as_string();
+
+ let result = match (wkt_this, wkt_that) {
+ (Some(wkt_this), Some(wkt_that)) => {
+ let geom_this = parse_wkt(&wkt_this)?;
+ let geom_that = parse_wkt(&wkt_that)?;
+
+ Some(geom_this.intersects(&geom_that))
+ }
+ _ => None,
+ };
+
+ results.push(result);
+ }
+
+ Ok(results.to_vector())
+ }
+}
diff --git a/src/common/function/src/scalars/geo/wkt.rs b/src/common/function/src/scalars/geo/wkt.rs
new file mode 100644
index 000000000000..3602eb5d3631
--- /dev/null
+++ b/src/common/function/src/scalars/geo/wkt.rs
@@ -0,0 +1,100 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_error::ext::{BoxedError, PlainError};
+use common_error::status_code::StatusCode;
+use common_query::error::{self, Result};
+use common_query::prelude::{Signature, TypeSignature};
+use datafusion::logical_expr::Volatility;
+use datatypes::prelude::ConcreteDataType;
+use datatypes::scalars::ScalarVectorBuilder;
+use datatypes::vectors::{MutableVector, StringVectorBuilder, VectorRef};
+use derive_more::Display;
+use geo_types::{Geometry, Point};
+use once_cell::sync::Lazy;
+use snafu::ResultExt;
+use wkt::{ToWkt, TryFromWkt};
+
+use super::helpers::{ensure_columns_len, ensure_columns_n};
+use crate::function::{Function, FunctionContext};
+
+static COORDINATE_TYPES: Lazy<Vec<ConcreteDataType>> = Lazy::new(|| {
+ vec![
+ ConcreteDataType::float32_datatype(),
+ ConcreteDataType::float64_datatype(),
+ ]
+});
+
+/// Return WGS84(SRID: 4326) euclidean distance between two geometry object, in degree
+#[derive(Clone, Debug, Default, Display)]
+#[display("{}", self.name())]
+pub struct LatLngToPointWkt;
+
+impl Function for LatLngToPointWkt {
+ fn name(&self) -> &str {
+ "wkt_point_from_latlng"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::string_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ let mut signatures = Vec::new();
+ for coord_type in COORDINATE_TYPES.as_slice() {
+ signatures.push(TypeSignature::Exact(vec![
+ // latitude
+ coord_type.clone(),
+ // longitude
+ coord_type.clone(),
+ ]));
+ }
+ Signature::one_of(signatures, Volatility::Stable)
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure_columns_n!(columns, 2);
+
+ let lat_vec = &columns[0];
+ let lng_vec = &columns[1];
+
+ let size = lat_vec.len();
+ let mut results = StringVectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let lat = lat_vec.get(i).as_f64_lossy();
+ let lng = lng_vec.get(i).as_f64_lossy();
+
+ let result = match (lat, lng) {
+ (Some(lat), Some(lng)) => Some(Point::new(lng, lat).wkt_string()),
+ _ => None,
+ };
+
+ results.push(result.as_deref());
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+pub(super) fn parse_wkt(s: &str) -> Result<Geometry> {
+ Geometry::try_from_wkt_str(s)
+ .map_err(|e| {
+ BoxedError::new(PlainError::new(
+ format!("Fail to parse WKT: {}", e),
+ StatusCode::EngineExecuteQuery,
+ ))
+ })
+ .context(error::ExecuteSnafu)
+}
diff --git a/src/servers/src/postgres/types.rs b/src/servers/src/postgres/types.rs
index a7ce2252b955..0bd8964a82f6 100644
--- a/src/servers/src/postgres/types.rs
+++ b/src/servers/src/postgres/types.rs
@@ -24,6 +24,7 @@ use chrono::{NaiveDate, NaiveDateTime, NaiveTime};
use common_time::{IntervalDayTime, IntervalMonthDayNano, IntervalYearMonth};
use datafusion_common::ScalarValue;
use datafusion_expr::LogicalPlan;
+use datatypes::arrow::datatypes::DataType as ArrowDataType;
use datatypes::prelude::{ConcreteDataType, Value};
use datatypes::schema::Schema;
use datatypes::types::{IntervalType, TimestampType};
@@ -529,6 +530,21 @@ pub(super) fn type_pg_to_gt(origin: &Type) -> Result<ConcreteDataType> {
)),
&Type::DATE => Ok(ConcreteDataType::date_datatype()),
&Type::TIME => Ok(ConcreteDataType::datetime_datatype()),
+ &Type::CHAR_ARRAY => Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::int8_datatype(),
+ )),
+ &Type::INT2_ARRAY => Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::int16_datatype(),
+ )),
+ &Type::INT4_ARRAY => Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::int32_datatype(),
+ )),
+ &Type::INT8_ARRAY => Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::int64_datatype(),
+ )),
+ &Type::VARCHAR_ARRAY => Ok(ConcreteDataType::list_datatype(
+ ConcreteDataType::string_datatype(),
+ )),
_ => server_error::InternalSnafu {
err_msg: format!("unimplemented datatype {origin:?}"),
}
@@ -974,6 +990,42 @@ pub(super) fn parameters_to_scalar_values(
ScalarValue::Binary(data.map(|d| d.to_string().into_bytes()))
}
}
+ &Type::INT2_ARRAY => {
+ let data = portal.parameter::<Vec<i16>>(idx, &client_type)?;
+ if let Some(data) = data {
+ let values = data.into_iter().map(|i| i.into()).collect::<Vec<_>>();
+ ScalarValue::List(ScalarValue::new_list(&values, &ArrowDataType::Int16))
+ } else {
+ ScalarValue::Null
+ }
+ }
+ &Type::INT4_ARRAY => {
+ let data = portal.parameter::<Vec<i32>>(idx, &client_type)?;
+ if let Some(data) = data {
+ let values = data.into_iter().map(|i| i.into()).collect::<Vec<_>>();
+ ScalarValue::List(ScalarValue::new_list(&values, &ArrowDataType::Int32))
+ } else {
+ ScalarValue::Null
+ }
+ }
+ &Type::INT8_ARRAY => {
+ let data = portal.parameter::<Vec<i64>>(idx, &client_type)?;
+ if let Some(data) = data {
+ let values = data.into_iter().map(|i| i.into()).collect::<Vec<_>>();
+ ScalarValue::List(ScalarValue::new_list(&values, &ArrowDataType::Int64))
+ } else {
+ ScalarValue::Null
+ }
+ }
+ &Type::VARCHAR_ARRAY => {
+ let data = portal.parameter::<Vec<String>>(idx, &client_type)?;
+ if let Some(data) = data {
+ let values = data.into_iter().map(|i| i.into()).collect::<Vec<_>>();
+ ScalarValue::List(ScalarValue::new_list(&values, &ArrowDataType::Utf8))
+ } else {
+ ScalarValue::Null
+ }
+ }
_ => Err(invalid_parameter_error(
"unsupported_parameter_value",
Some(format!("Found type: {}", client_type)),
diff --git a/tests/cases/standalone/common/function/geo.result b/tests/cases/standalone/common/function/geo.result
index 8c9460c738fa..2f0d08b851c9 100644
--- a/tests/cases/standalone/common/function/geo.result
+++ b/tests/cases/standalone/common/function/geo.result
@@ -142,18 +142,53 @@ FROM (SELECT h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell);
SELECT
h3_grid_distance(cell1, cell2) AS distance,
h3_grid_path_cells(cell1, cell2) AS path_cells,
+ round(h3_distance_sphere_km(cell1, cell2), 5) AS sphere_distance,
+ h3_distance_degree(cell1, cell2) AS euclidean_distance,
FROM
(
SELECT
- h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell1,
- h3_latlng_to_cell(39.634, -104.999, 8::UInt64) AS cell2
+ h3_string_to_cell('86283082fffffff') AS cell1,
+ h3_string_to_cell('86283470fffffff') AS cell2
);
-+----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| distance | path_cells |
-+----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
-| 1612 | [613196570438926335, 613196569755254783, 613196569744769023, 613196569748963327, 613196569669271551, 613196569673465855, 613196569763643391, 613196569767837695, 613196570023690239, 613196570021593087, 613196570025787391, 613196569998524415, 613196570002718719, 613196570040467455, 613196570029981695, 613196570034175999, 613196572437512191, 613196572441706495, 613196572414443519, 613196572418637823, 613196572456386559, 613196572445900799, 613196572450095103, 613196572705947647, 613196572710141951, 613196572691267583, 613196572680781823, 613196572684976127, 613196572722724863, 613196572726919167, 613196592932978687, 613196592937172991, 613196592421273599, 613196592410787839, 613196592414982143, 613196592452730879, 613196592456925183, 613196592261890047, 613196592266084351, 613196592689709055, 613196592679223295, 613196592683417599, 613196592496771071, 613196592500965375, 613196592538714111, 613196592528228351, 613196592532422655, 613196587587338239, 613196587591532543, 613196587396497407, 613196587400691711, 613196587438440447, 613196587427954687, 613196587432148991, 613196586916249599, 613196586920443903, 613196587664932863, 613196587669127167, 613196587706875903, 613196587704778751, 613196587708973055, 613196587681710079, 613196587685904383, 613196593444683775, 613196593434198015, 613196593438392319, 613196593476141055, 613196593480335359, 613196593453072383, 613196593457266687, 613196593713119231, 613196593702633471, 613196593706827775, 613196593744576511, 613196593748770815, 613196593729896447, 613196593719410687, 613196593723604991, 613196962272903167, 613196962277097471, 613196962249834495, 613196962254028799, 613196962291777535, 613196962281291775, 613196962285486079, 613196961601814527, 613196961606008831, 613196961578745855, 613196961582940159, 613196961620688895, 613196961610203135, 613196961614397439, 613196961931067391, 613196961935261695, 613196961855569919, 613196961845084159, 613196961849278463, 613196961887027199, 613196961891221503, 613196956830793727, 613196956834988031, 613196956755296255, 613196956744810495, 613196956749004799, 613196956786753535, 613196956790947839, 613196957099229183, 613196957103423487, 613196957023731711, 613196957021634559, 613196957025828863, 613196957116006399, 613196957120200703, 613196962878980095, 613196962868494335, 613196962872688639, 613196962792996863, 613196962797191167, 613196962887368703, 613196962891563007, 613196963147415551, 613196963136929791, 613196963141124095, 613196963061432319, 613196963065626623, 613196963164192767, 613196963153707007, 613196963157901311, 613196979519881215, 613196979524075519, 613196979496812543, 613196979501006847, 613196979538755583, 613196979528269823, 613196979532464127, 613196978848792575, 613196978852986879, 613196978825723903, 613196978829918207, 613196978867666943, 613196978857181183, 613196978861375487, 613196979614253055, 613196979618447359, 613196979102547967, 613196979092062207, 613196979096256511, 613196979134005247, 613196979138199551, 613196977869422591, 613196977873616895, 613196974002274303, 613196973991788543, 613196973995982847, 613196974033731583, 613196974037925887, 613196978137858047, 613196978142052351, 613196978139955199, 613196974268612607, 613196974272806911, 613196980520222719, 613196980524417023, 613196980562165759, 613196980551679999, 613196980555874303, 613196980039974911, 613196980044169215, 613196979849134079, 613196979853328383, 613196979891077119, 613196979880591359, 613196979884785663, 613196980308410367, 613196980312604671, 613196980125958143, 613196980115472383, 613196980119666687, 613196980157415423, 613196980161609727, 613196980134346751, 613196980138541055, 613220567281041407, 613220567270555647, 613220567274749951, 613220567312498687, 613220567316692991, 613220567289430015, 613220567293624319, 613220567549476863, 613220567538991103, 613220567543185407, 613220567524311039, 613220567528505343, 613220567566254079, 613220567555768319, 613220567559962623, 613220565802549247, 613220565806743551, 613220565779480575, 613220565783674879, 613220565821423615, 613220565810937855, 613220565815132159, 613220566070984703, 613220566075179007, 613220566047916031, 613220566052110335, 613220566050013183, 613220566087761919, 613220566091956223, 613220568547721215, 613220568551915519, 613220568472223743, 613220568461737983, 613220568465932287, 613220568503681023, 613220568507875327, 613220567876632575, 613220567880826879, 613220567801135103, 613220567790649343, 613220567794843647, 613220567832592383, 613220567836786687, 613220568153456639, 613220568142970879, 613220568147165183, 613220568067473407, 613220568071667711, 613220568161845247, 613220568166039551, 613220524398477311, 613220524387991551, 613220524392185855, 613220524312494079, 613220524316688383, 613220524406865919, 613220524411060223, 613220524666912767, 613220524656427007, 613220524669009919, 613220524641746943, 613220524645941247, 613220524683689983, 613220524673204223, 613220524677398527, 613220522919985151, 613220522924179455, 613220522896916479, 613220522901110783, 613220522938859519, 613220522928373759, 613220522932568063, 613220523188420607, 613220523192614911, 613220523165351935, 613220523169546239, 613220523167449087, 613220523205197823, 613220523209392127, 613220523014356991, 613220523018551295, 613220525589659647, 613220525579173887, 613220525583368191, 613220525621116927, 613220525625311231, 613220525430276095, 613220525434470399, 613220524918571007, 613220524908085247, 613220524912279551, 613220524950028287, 613220524954222591, 613220525707100159, 613220525696614399, 613220525700808703, 613220525184909311, 613220525189103615, 613220573312450559, 613220573316644863, 613220573354393599, 613220573343907839, 613220573348102143, 613220541559472127, 613220541563666431, 613220573580886015, 613220573585080319, 613220573622829055, 613220573612343295, 613220573624926207, 613220573597663231, 613220573601857535, 613220540571713535, 613220540561227775, 613220540565422079, 613220540603170815, 613220540607365119, 613220540580102143, 613220540584296447, 613220539900624895, 613220539890139135, 613220539894333439, 613220539932082175, 613220539936276479, 613220539909013503, 613220539913207807, 613220539911110655, 613220540166963199, 613220540171157503, 613220540143894527, 613220540148088831, 613220540185837567, 613220540175351807, 613220540179546111, 613220542582882303, 613220542587076607, 613220542559813631, 613220542564007935, 613220542601756671, 613220542591270911, 613220542595465215, 613220542851317759, 613220542855512063, 613220542836637695, 613220542826151935, 613220542830346239, 613220542868094975, 613220542872289279, 613220461710409727, 613220461714604031, 613220461634912255, 613220461624426495, 613220461628620799, 613220461666369535, 613220461670563839, 613220461978845183, 613220461983039487, 613220461903347711, 613220461892861951, 613220461905444863, 613220461995622399, 613220461999816703, 613220214221307903, 613220214210822143, 613220214215016447, 613220214135324671, 613220214139518975, 613220214229696511, 613220214233890815, 613220213550219263, 613220213539733503, 613220213543927807, 613220213464236031, 613220213468430335, 613220213558607871, 613220213562802175, 613220213560705023, 613220213816557567, 613220213820751871, 613220213793488895, 613220213797683199, 613220213835431935, 613220213824946175, 613220213829140479, 613220216232476671, 613220216236670975, 613220216209407999, 613220216213602303, 613220216251351039, 613220216240865279, 613220216245059583, 613220216500912127, 613220476981870591, 613220216486232063, 613220216475746303, 613220216479940607, 613220216517689343, 613220216521883647, 613220479393595391, 613220479397789695, 613220478881890303, 613220478871404543, 613220478875598847, 613220478913347583, 613220478917541887, 613220478722506751, 613220478726701055, 613220479150325759, 613220479139839999, 613220479152422911, 613220478957387775, 613220478961582079, 613220478999330815, 613220478988845055, 613220478993039359, 613220231382302719, 613220231386497023, 613220231191461887, 613220231195656191, 613220231233404927, 613220231222919167, 613220231227113471, 613220230711214079, 613220230715408383, 613220231459897343, 613220231464091647, 613220231461994495, 613220231499743231, 613220231503937535, 613220231476674559, 613220231480868863, 613220631302897663, 613220631292411903, 613220631296606207, 613220631334354943, 613220631338549247, 613220631311286271, 613220631315480575, 613220631571333119, 613220631560847359, 613220631565041663, 613220631602790399, 613220631550361599, 613220631588110335, 613220631577624575, 613220631581818879, 613220633985155071, 613220633989349375, 613220633962086399, 613220633966280703, 613220634004029439, 613220633993543679, 613220633997737983, 613220633314066431, 613220633318260735, 613220633290997759, 613220633295192063, 613220633332940799, 613220633322455039, 613220633335037951, 613220633643319295, 613220633647513599, 613220633567821823, 613220633557336063, 613220633561530367, 613220633599279103, 613220633603473407, 613220600625758207, 613220600629952511, 613220600550260735, 613220600539774975, 613220600543969279, 613220600581718015, 613220600585912319, 613220600894193663, 613220600898387967, 613220600896290815, 613220600816599039, 613220600820793343, 613220600910970879, 613220600915165183, 613220588420333567, 613220588409847807, 613220588414042111, 613220588334350335, 613220588338544639, 613220588428722175, 613220588432916479, 613220588688769023, 613220588678283263, 613220588682477567, 613220588602785791, 613220588667797503, 613220588705546239, 613220588695060479, 613220588699254783, 613220591102590975, 613220591106785279, 613220591079522303, 613220591083716607, 613220591121465343, 613220591110979583, 613220591115173887, 613220590431502335, 613220590435696639, 613220590408433663, 613220590412627967, 613220590450376703, 613220590439890943, 613220590452473855, 613220591196962815, 613220591201157119, 613220590685257727, 613220590674771967, 613220590678966271, 613220590716715007, 613220590720909311, 613220589452132351, 613220589456326655, 613220617797238783, 613220617786753023, 613220617790947327, 613220617828696063, 613220617832890367, 613220589720567807, 613220589718470655, 613220589722664959, 613220618063577087, 613220618067771391, 613220606061576191, 613220606065770495, 613220606103519231, 613220606093033471, 613220606097227775, 613220605581328383, 613220605585522687, 613220605390487551, 613220605394681855, 613220605432430591, 613220605421944831, 613220605426139135, 613220605849763839, 613220605411459071, 613220605667311615, 613220605656825855, 613220605661020159, 613220605698768895, 613220605702963199, 613220605675700223, 613220605679894527, 613220608083230719, 613220608072744959, 613220608076939263, 613220608114687999, 613220608118882303, 613220608091619327, 613220608095813631, 613220608351666175, 613220608341180415, 613220608353763327, 613220608326500351, 613220608330694655, 613220608368443391, 613220608357957631, 613220608362151935, 613220606604738559, 613220606608932863, 613220606581669887, 613220606585864191, 613220606623612927, 613220606613127167, 613220606617321471, 613220606873174015, 613220606877368319, 613220606850105343, 613220606848008191, 613220606852202495, 613220606889951231, 613220606894145535, 613221654100705279, 613221654104899583, 613221654025207807, 613221654014722047, 613221654018916351, 613221654056665087, 613221654060859391, 613221653429616639, 613221653433810943, 613221653354119167, 613221653343633407, 613221653347827711, 613221653385576447, 613221653450588159, 613221653706440703, 613221653695954943, 613221653700149247, 613221653620457471, 613221653624651775, 613221653714829311, 613221653719023615, 613221656122359807, 613221656111874047, 613221656116068351, 613221656036376575, 613221656040570879, 613221656130748415, 613221656134942719, 613221656390795263, 613221656388698111, 613221656392892415, 613221656365629439, 613221656369823743, 613221656407572479, 613221656397086719, 613221656401281023, 613221654643867647, 613221654648061951, 613221654620798975, 613221654624993279, 613221654662742015, 613221654652256255, 613221654656450559, 613221654912303103, 613221654916497407, 613221654889234431, 613221654887137279, 613221654891331583, 613221654929080319, 613221654933274623, 613221654738239487, 613221654742433791, 613221671272185855, 613221671261700095, 613221671265894399, 613221671303643135, 613221671307837439, 613221671112802303, 613221671116996607, 613221670601097215, 613221670590611455, 613221670594805759, 613221670632554495, 613221671351877631, 613221671389626367, 613221671379140607, 613221671383334911, 613221670867435519, 613221670871629823, 613221669602852863, 613221669607047167, 613221669644795903, 613221669634310143, 613221669638504447, 613221673283354623, 613221673287548927, 613221669871288319, 613221669875482623, 613221669913231359, 613221669911134207, 613221669915328511, 613221669888065535, 613221669892259839, 613221672295596031, 613221672285110271, 613221672289304575, 613221672327053311, 613221672331247615, 613221672303984639, 613221672308178943, 613221671624507391, 613221671614021631, 613221671618215935, 613221671655964671, 613221671660158975, 613221671632895999, 613221671630798847, 613221671634993151, 613221671890845695, 613221671895039999, 613221671867777023, 613221671871971327, 613221671909720063, 613221671899234303, 613221671903428607, 613221559416389631, 613221559420583935, 613221559393320959, 613221559397515263, 613221559435263999, 613221559424778239, 613221559428972543, 613221559684825087, 613221559749836799, 613221559670145023, 613221559659659263, 613221559663853567, 613221559701602303, 613221559705796607, 613221558000812031, 613221558005006335, 613221557925314559, 613221557914828799, 613221557919023103, 613221557956771839, 613221557960966143, 613221558269247487, 613221558273441791, 613221558193750015, 613221558191652863, 613221558195847167, 613221558286024703, 613221558290219007, 613221560693555199, 613221560683069439, 613221560687263743, 613221560607571967, 613221560611766271, 613221560701943807, 613221560706138111, 613221560022466559, 613221560011980799, 613221560016175103, 613221559936483327, 613221559940677631, 613221560030855167, 613221560028758015, 613221560032952319, 613221560288804863, 613221560292999167, 613221560265736191, 613221560269930495, 613221560307679231, 613221560297193471, 613221560301387775, 613221576663367679, 613221576667561983, 613221576640299007, 613221576644493311, 613221576682242047, 613221576671756287, 613221576675950591, 613221580784271359, 613221580788465663, 613221576917123071, 613221576906637311, 613221576910831615, 613221576948580351, 613221576952774655, 613221575683997695, 613221575688191999, 613221575172292607, 613221575161806847, 613221575166001151, 613221575203749887, 613221575207944191, 613221575012909055, 613221575017103359, 613221575440728063, 613221575438630911, 613221575442825215, 613221575247790079, 613221575251984383, 613221575289733119, 613221575279247359, 613221575283441663, 613221577854550015, 613221577858744319, 613221577663709183, 613221577667903487, 613221577705652223, 613221577695166463, 613221577699360767, 613221577183461375, 613221577187655679, 613221577932144639, 613221577930047487, 613221577934241791, 613221577971990527, 613221577976184831, 613221577948921855, 613221577953116159, 613221735109492735, 613221735099006975, 613221735103201279, 613221735140950015, 613221735145144319, 613221735117881343, 613221735122075647, 613221735377928191, 613221735367442431, 613221735371636735, 613221735352762367, 613221735356956671, 613221735394705407, 613221735384219647, 613221735388413951, 613221730275557375, 613221730279751679, 613221730252488703, 613221730256683007, 613221730294431743, 613221730283945983, 613221730288140287, 613221729604468735, 613221729608663039, 613221729581400063, 613221729585594367, 613221729623343103, 613221729621245951, 613221729625440255, 613221729933721599, 613221729937915903, 613221729858224127, 613221729847738367, 613221729851932671, 613221729889681407, 613221729893875711, 613221732349640703, 613221732353835007, 613221732274143231, 613221732263657471, 613221732267851775, 613221732305600511, 613221732309794815, 613221732626464767, 613221732615979007, 613221732620173311, 613221732540481535, 613221732544675839, 613221732634853375, 613221732639047679, 613221692226928639, 613221692216442879, 613221692220637183, 613221692140945407, 613221692145139711, 613221692235317247, 613221692239511551, 613221692495364095, 613221692484878335, 613221692489072639, 613221692470198271, 613221692474392575, 613221692512141311, 613221692501655551, 613221692505849855, 613221687392993279, 613221687397187583, 613221687369924607, 613221687374118911, 613221687411867647, 613221687401381887, 613221687405576191, 613221686721904639, 613221686726098943, 613221686698835967, 613221686703030271, 613221686740779007, 613221686738681855, 613221686742876159, 613221687487365119, 613221687491559423, 613221686975660031, 613221686965174271, 613221686969368575, 613221687007117311, 613221687011311615, 613221693258727423, 613221693262921727, 613221689391579135, 613221689381093375, 613221689385287679, 613221689423036415, 613221689427230719, 613221693535551487, 613221693525065727, 613221693529260031, 613221689657917439, 613221689662111743, 613221709868171263, 613221709872365567, 613221709910114303, 613221709899628543, 613221709903822847, 613221709387923455, 613221709392117759, 613221709197082623, 613221709201276927, 613221709239025663, 613221709228539903, 613221709232734207, 613221709213859839, 613221709218054143, 613221709473906687, 613221709463420927, 613221709467615231, 613221709505363967, 613221709509558271, 613221709482295295, 613221709486489599, 613221704373633023, 613221704363147263, 613221704367341567, 613221704405090303, 613221704409284607, 613221704382021631, 613221704386215935, 613221704642068479, 613221704639971327, 613221704644165631, 613221704616902655, 613221704621096959, 613221704658845695, 613221704648359935, 613221704652554239, 613221710411333631, 613221710415527935, 613221710388264959, 613221710392459263, 613221710430207999, 613221710419722239, 613221710423916543, 613221710679769087, 613221710683963391, 613221710665089023, 613221710654603263, 613221710658797567, 613221710696546303, 613221710700740607, 613168113669636095, 613168113665441791, 613168113627693055, 613168113623498751, 613168113633984511, 613168113713676287, 613168113709481983, 613168113401200639, 613168113397006335, 613168113359257599, 613168113355063295, 613168113365549055, 613168113384423423, 613168113380229119, 613168114063900671, 613168114059706367, 613168114070192127, 613168114032443391, 613168114028249087, 613168114055512063, 613168114051317759, 613168111647981567, 613168111643787263, 613168111654273023, 613168111616524287, 613168111612329983, 613168111639592959, 613168111635398655, 613168111637495807, 613168111381643263, 613168111377448959, 613168111404711935, 613168111400517631, 613168111362768895, 613168111358574591, 613168111369060351, 613168113126473727, 613168113122279423, 613168113149542399, 613168113145348095, 613168113107599359, 613168113103405055, 613168113113890815, 613168112858038271, 613168112853843967, 613168112872718335, 613168112868524031, 613168112879009791, 613168112841261055, 613168112837066751, 613168113032101887, 613168113027907583, 613168096498155519, 613168096493961215, 613168096504446975, 613168096466698239, 613168096462503935, 613168096657539071, 613168096653344767, 613168096229720063, 613168096225525759, 613168096236011519, 613168096422658047, 613168096418463743, 613168096380715007, 613168096376520703, 613168096387006463, 613168096902905855, 613168096898711551, 613168098167488511, 613168098163294207, 613168098125545471, 613168098121351167, 613168098131836927, 613168094486986751, 613168094482792447, 613168097899053055, 613168097894858751, 613168097896955903, 613168097859207167, 613168097855012863, 613168097882275839, 613168097878081535, 613168095474745343, 613168095470551039, 613168095481036799, 613168095443288063, 613168095439093759, 613168095466356735, 613168095462162431, 613168095206309887, 613168095202115583, 613168095212601343, 613168095174852607, 613168095170658303, 613168095189532671, 613168095185338367, 613168095195824127, 613168095879495679, 613168095875301375, 613168095785123839, 613168095780929535, 613168095860621311, 613168095856427007, 613168095866912767, 613168208353951743, 613168208349757439, 613168208259579903, 613168208255385599, 613168208335077375, 613168208330883071, 613168208341368831, 613168208024698879, 613168208020504575, 613168207982755839, 613168207978561535, 613168207989047295, 613168208068739071, 613168208064544767, 613168209769529343, 613168209765335039, 613168209727586303, 613168209723391999, 613168209733877759, 613168209813569535, 613168209809375231, 613168209501093887, 613168209496899583, 613168209498996735, 613168209461247999, 613168209457053695, 613168209484316671, 613168209480122367, 613168207076786175, 613168207072591871, 613168207083077631, 613168207045328895, 613168207041134591, 613168207068397567, 613168207064203263, 613168206808350719, 613168206804156415, 613168206814642175, 613168206776893439, 613168206772699135, 613168206791573503, 613168206787379199, 613168206797864959, 613168207481536511, 613168207477342207, 613168207504605183, 613168207500410879, 613168207462662143, 613168207458467839, 613168207468953599, 613168191106973695, 613168191102779391, 613168191130042367, 613168191125848063, 613168191088099327, 613168191083905023, 613168191086002175, 613168159068782591, 613168159064588287, 613168190853218303, 613168190849023999, 613168190859509759, 613168190821761023, 613168190817566719, 613168192086343679, 613168192082149375, 613168192598048767, 613168192593854463, 613168192604340223, 613168192566591487, 613168192562397183, 613168191817908223, 613168191813713919, 613168191815811071, 613168192331710463, 613168192327516159, 613168192522551295, 613168192518356991, 613168192480608255, 613168192476413951, 613168192486899711, 613168189915791359, 613168189911597055, 613168190106632191, 613168190102437887, 613168190064689151, 613168190060494847, 613168190070980607, 613168189647355903, 613168189643161599, 613168189829808127, 613168189825613823, 613168189836099583, 613168189798350847, 613168189794156543, 613168189821419519, 613168189817225215, 613168202312056831, 613168202307862527, 613168202318348287, 613168202280599551, 613168202276405247, 613168202303668223, 613168202299473919, 613168202043621375, 613168202039427071, 613168202041524223, 613168201951346687, 613168201947152383, 613168202026844159, 613168202022649855, 613168202033135615, 613168037494783999, 613168037490589695, 613168037400412159, 613168037396217855, 613168037475909631, 613168037471715327, 613168037482201087, 613168037226348543, 613168037222154239, 613168037131976703, 613168037127782399, 613168037129879551, 613168037209571327, 613168037205377023, 613168037836619775, 613168037832425471, 613168037794676735, 613168037790482431, 613168037800968191, 613168037880659967, 613168037876465663, 613168035420700671, 613168035416506367, 613168035378757631, 613168035374563327, 613168035385049087, 613168035464740863, 613168035399729151, 613168035143876607, 613168035139682303, 613168035150168063, 613168035112419327, 613168035108225023, 613168035135487999, 613168035131293695, 613168047626125311, 613168047621931007, 613168047632416767, 613168047594668031, 613168047590473727, 613168047617736703, 613168047613542399, 613168047357689855, 613168047353495551, 613168047355592703, 613168047382855679, 613168047378661375, 613168047340912639, 613168047336718335, 613168047347204095, 613168020247805951, 613168020243611647, 613168020270874623, 613168020266680319, 613168020228931583, 613168020224737279, 613168020235223039, 613168019979370495, 613168019975176191, 613168020002439167, 613168019998244863, 613168020000342015, 613168019962593279, 613168019958398975, 613168020153434111, 613168020149239807, 613168020665139199, 613168020660944895, 613168020671430655, 613168020633681919, 613168020629487615, 613168032635682815, 613168032631488511, 613168018249220095, 613168018245025791, 613168018255511551, 613168018217762815, 613168032396607487, 613168032358858751, 613168032354664447, 613168032365150207, 613168017982881791, 613168017978687487, 613168029984882687, 613168029980688383, 613168029942939647, 613168029938745343, 613168029949231103, 613168030465130495, 613168030460936191, 613168029716447231, 613168029712252927, 613168029674504191, 613168029670309887, 613168029672407039, 613168029699670015, 613168029695475711, 613168030379147263, 613168030374952959, 613168030385438719, 613168030347689983, 613168030343495679, 613168030370758655, 613168030366564351, 613168063396708351, 613168063392514047, 613168063402999807, 613168063365251071, 613168063361056767, 613168063388319743, 613168063384125439, 613168063386222591, 613168063130370047, 613168063126175743, 613168063035998207, 613168063031803903, 613168063111495679, 613168063107301375, 613168063117787135, 613168831526862847, 613168831522668543, 613168831432491007, 613168831428296703, 613168831507988479, 613168831503794175, 613168831514279935, 613168831258427391, 613168831193415679, 613168831155666943, 613168831151472639, 613168831161958399, 613168831241650175, 613168831237455871, 613168828781690879, 613168828777496575, 613168828739747839, 613168828735553535, 613168828746039295, 613168828825731071, 613168828821536767, 613168828513255423, 613168828509061119, 613168828471312383, 613168828467118079, 613168828469215231, 613168828496478207, 613168828492283903, 613168829175955455, 613168829171761151, 613168829182246911, 613168829144498175, 613168829140303871, 613168829167566847, 613168829163372543, 613167015357579263, 613167015353384959, 613167015363870719, 613167015326121983, 613167015321927679, 613167015349190655, 613167015336607743, 613167015347093503, 613167015091240959, 613167015087046655, 613167015114309631, 613167015110115327, 613167015072366591, 613167015068172287, 613167015078658047, 613168814279884799, 613168814275690495, 613168814302953471, 613168814298759167, 613168814261010431, 613168814256816127, 613168814267301887, 613168814011449343, 613168813510230015, 613168814026129407, 613168814021935103, 613168814032420863, 613168813994672127, 613168813990477823, 613168814185512959, 613168814181318655, 613168811610210303, 613168811606015999, 613168811616501759, 613168811578753023, 613168811574558719, 613168811769593855, 613168811765399551, 613168811341774847, 613168811337580543, 613168811339677695, 613168811534712831, 613168811530518527, 613168811492769791, 613168811488575487, 613168811499061247, 613168812014960639, 613168812010766335, 613168813279543295, 613168813275348991, 613168813237600255, 613168813233405951, 613168813243891711, 613166998196584447, 613166998192390143, 613168813011107839, 613168812998524927, 613168813009010687, 613168812971261951, 613168812967067647, 613168812994330623, 613168812990136319, 613167078641238015, 613167078637043711, 613167078647529471, 613167078609780735, 613167078605586431, 613167078632849407, 613167078628655103, 613167078372802559, 613167078368608255, 613167078379094015, 613167078341345279, 613167078276333567, 613167078356025343, 613167078351831039, 613167078362316799, 613167079045988351, 613167079041794047, 613167078951616511, 613167078947422207, 613167079027113983, 613167079022919679, 613167079033405439, 613167076630069247, 613167076625874943, 613167076535697407, 613167076531503103, 613167076611194879, 613167076607000575, 613167076609097727, 613167076300816383, 613167076296622079, 613167076258873343, 613167076254679039, 613167076265164799, 613167076344856575, 613167076340662271, 613167078045646847, 613167078041452543, 613167078003703807, 613167077999509503, 613167078009995263, 613167078089687039, 613167078085492735, 613167077777211391, 613167077764628479, 613167077775114239, 613167077737365503, 613167077733171199, 613167077760434175, 613167077756239871, 613167061394259967, 613167061390065663, 613167061400551423, 613167061362802687, 613167061358608383, 613167061385871359, 613167061381677055, 613167061125824511, 613167061121630207, 613167061132115967, 613167061094367231, 613167061146796031, 613167061109047295, 613167061104852991, 613167061115338751, 613167061799010303, 613167061794815999, 613167061822078975, 613167061817884671, 613167061780135935, 613167061775941631, 613167061786427391, 613167059383091199, 613167059378896895, 613167059406159871, 613167059401965567, 613167059364216831, 613167059366313983, 613167059362119679, 613167055262187519, 613167055257993215, 613167059129335807, 613167059125141503, 613167059135627263, 613167059097878527, 613167059093684223, 613167060362461183, 613167060358266879, 613167060874166271, 613167060869971967, 613167060880457727, 613167060842708991, 613167060838514687, 613167060094025727, 613167060081442815, 613167060091928575, 613167060607827967, 613167060603633663, 613167060798668799, 613167060794474495, 613167060756725759, 613167060752531455, 613167060763017215, 613167104362807295, 613167104358612991, 613167104553648127, 613167104549453823, 613167104511705087, 613167104507510783, 613167104517996543, 613167104094371839, 613167104532676607, 613167104276824063, 613167104272629759, 613167104283115519, 613167104245366783, 613167104241172479, 613167104268435455, 613167104264241151, 613167098505461759, 613167098501267455, 613167098511753215, 613167098474004479, 613167098469810175, 613167098497073151, 613167098492878847, 613167098237026303, 613167098239123455, 613167098234929151, 613167098144751615, 613167098140557311, 613167098220249087, 613167098216054783, 613167098226540543, 613167103339397119, 613167103335202815, 613167103245025279, 613167103240830975, 613167103320522751, 613167103316328447, 613167103326814207, 613167103070961663, 613167103066767359, 613167102976589823, 613167102964006911, 613167102974492671, 613167103054184447, 613167103049990143, 613167103681232895, 613167103677038591, 613167103639289855, 613167103635095551, 613167103645581311, 613167103725273087, 613167103721078783, 613166949867716607, 613166949863522303, 613166949825773567, 613166949821579263, 613166949832065023, 613166949850939391, 613166949846745087, 613166949590892543, 613166949586698239, 613166949597183999, 613166949559435263, 613166949555240959, 613166949582503935, 613166949578309631, 613166943819530239, 613166943815335935, 613166943825821695, 613166943788072959, 613166943783878655, 613166943811141631, 613166943806947327, 613166943551094783, 613166943553191935, 613166943548997631, 613166943576260607, 613166943572066303, 613166943534317567, 613166943530123263, 613166943540609023, 613166948653465599, 613166948649271295, 613166948676534271, 613166948672339967, 613166948634591231, 613166948630396927, 613166948640882687, 613166948385030143, 613166948380835839, 613166948408098815, 613166948395515903, 613166948406001663, 613166948368252927, 613166948364058623, 613166948559093759, 613166948554899455, 613166949070798847, 613166949066604543, 613166949077090303, 613166949039341567, 613166949035147263, 613166928829087743, 613166928824893439, 613166932696236031, 613166932692041727, 613166932702527487, 613166928594206719, 613166928590012415, 613166928552263679, 613166928548069375, 613166928558555135, 613166932429897727, 613166932425703423, 613166926178287615, 613166926174093311, 613166926136344575, 613166926132150271, 613166926142636031, 613166926658535423, 613166926654341119, 613166925909852159, 613166925905657855, 613166925867909119, 613166925870006271, 613166925865811967, 613166925893074943, 613166925888880639, 613166926572552191, 613166926568357887, 613166926578843647, 613166926541094911, 613166926536900607, 613166926564163583, 613166926559969279, 613166931672825855, 613166931668631551, 613166931679117311, 613166931641368575, 613166931637174271, 613166931664437247, 613166931651854335, 613166931662340095, 613166931406487551, 613166931402293247, 613166931312115711, 613166931307921407, 613166931387613183, 613166931383418879, 613166931393904639, 613167727720267775, 613167727716073471, 613167727625895935, 613167727621701631, 613167727701393407, 613167727697199103, 613167727707684863, 613167727391014911, 613167727386820607, 613167727349071871, 613167727344877567, 613167727355363327, 613167727435055103, 613167727430860799, 613167724975095807, 613167724970901503] |
-+----------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
++----------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------+--------------------+
+| distance | path_cells | sphere_distance | euclidean_distance |
++----------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------+--------------------+
+| 9 | [604189371209351167, 604189371075133439, 604189375235883007, 604189375101665279, 604189638034194431, 604189638571065343, 604189638436847615, 604189642597597183, 604189642463379455, 604189641255419903] | 55.05017 | 0.5493924369709845 |
++----------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------+--------------------+
+
+SELECT
+ h3_cells_contains('86283470fffffff,862834777ffffff, 862834757ffffff, 86283471fffffff, 862834707ffffff', '8b283470d112fff') AS R00,
+ h3_cells_contains('86283470fffffff,862834777ffffff, 862834757ffffff, 86283471fffffff, 862834707ffffff', 604189641792290815) AS R01,
+ h3_cells_contains('86283470fffffff,862834777ffffff, 862834757ffffff, 86283471fffffff, 862834707ffffff', 626707639343067135) AS R02;
+
++------+-------+------+
+| r00 | r01 | r02 |
++------+-------+------+
+| true | false | true |
++------+-------+------+
+
+SELECT
+ h3_cells_contains(['86283470fffffff', '862834777ffffff', '862834757ffffff', '86283471fffffff', '862834707ffffff'], '86283472fffffff') AS R10,
+ h3_cells_contains(['86283470fffffff', '862834777ffffff', '862834757ffffff', '86283471fffffff', '862834707ffffff'], '8b283470d112fff') AS R11,
+ h3_cells_contains(['86283470fffffff', '862834777ffffff', '862834757ffffff', '86283471fffffff', '862834707ffffff'], 626707639343067135) AS R12;
+
++-------+------+------+
+| r10 | r11 | r12 |
++-------+------+------+
+| false | true | true |
++-------+------+------+
+
+SELECT
+ h3_cells_contains([604189641255419903, 604189643000250367, 604189642463379455, 604189641523855359, 604189641121202175], '8b283470d112fff') AS R20,
+ h3_cells_contains([604189641255419903, 604189643000250367, 604189642463379455, 604189641523855359, 604189641121202175], 604189641792290815) AS R21,
+ h3_cells_contains([604189641255419903, 604189643000250367, 604189642463379455, 604189641523855359, 604189641121202175], 626707639343067135) AS R22;
+
++------+-------+------+
+| r20 | r21 | r22 |
++------+-------+------+
+| true | false | true |
++------+-------+------+
SELECT geohash(37.76938, -122.3889, 9);
@@ -299,3 +334,57 @@ FROM(
| [[-122.3888,37.77001],[-122.3839,37.76928],[-122.3889,37.76938],[-122.382,37.7693]] |
+-------------------------------------------------------------------------------------+
+SELECT wkt_point_from_latlng(37.76938, -122.3889) AS point;
+
++---------------------------+
+| point |
++---------------------------+
+| POINT(-122.3889 37.76938) |
++---------------------------+
+
+SELECT
+ st_distance(p1, p2) AS euclidean_dist,
+ st_distance_sphere_m(p1, p2) AS sphere_dist_m,
+ st_distance(p1, polygon1) AS euclidean_dist_pp,
+ st_area(p1) as area_point,
+ st_area(polygon1) as area_polygon,
+FROM
+ (
+ SELECT
+ wkt_point_from_latlng(37.76938, -122.3889) AS p1,
+ wkt_point_from_latlng(38.5216, -121.4247) AS p2,
+ 'POLYGON ((-121.491698 38.653343, -121.582353 38.556757, -121.469721 38.449287, -121.315883 38.541721, -121.491698 38.653343))' AS polygon1,
+ );
+
++--------------------+--------------------+--------------------+------------+----------------------+
+| euclidean_dist | sphere_dist_m | euclidean_dist_pp | area_point | area_polygon |
++--------------------+--------------------+--------------------+------------+----------------------+
+| 1.2229131483470166 | 118766.03647159638 | 1.1271559800391486 | 0.0 | 0.027022178074000106 |
++--------------------+--------------------+--------------------+------------+----------------------+
+
+SELECT st_distance_sphere_m(wkt_point_from_latlng(37.76938, -122.3889), 'POLYGON ((-121.491698 38.653343, -121.582353 38.556757, -121.469721 38.449287, -121.315883 38.541721, -121.491698 38.653343))');
+
+Error: 3001(EngineExecuteQuery), Great circle distance between non-point objects are not supported for now.
+
+SELECT
+ st_contains(polygon1, p1),
+ st_contains(polygon2, p1),
+ st_within(p1, polygon1),
+ st_within(p1, polygon2),
+ st_intersects(polygon1, polygon2),
+ st_intersects(polygon1, polygon3),
+FROM
+ (
+ SELECT
+ wkt_point_from_latlng(37.383287, -122.01325) AS p1,
+ 'POLYGON ((-122.031661 37.428252, -122.139829 37.387072, -122.135365 37.361971, -122.057759 37.332222, -121.987707 37.328946, -121.943754 37.333041, -121.919373 37.349145, -121.945814 37.376705, -121.975689 37.417345, -121.998696 37.409164, -122.031661 37.428252))' AS polygon1,
+ 'POLYGON ((-121.491698 38.653343, -121.582353 38.556757, -121.469721 38.449287, -121.315883 38.541721, -121.491698 38.653343))' AS polygon2,
+ 'POLYGON ((-122.089628 37.450332, -122.20535 37.378342, -122.093062 37.36088, -122.044301 37.372886, -122.089628 37.450332))' AS polygon3,
+ );
+
++--------------------------+--------------------------+------------------------+------------------------+----------------------------------+----------------------------------+
+| st_contains(polygon1,p1) | st_contains(polygon2,p1) | st_within(p1,polygon1) | st_within(p1,polygon2) | st_intersects(polygon1,polygon2) | st_intersects(polygon1,polygon3) |
++--------------------------+--------------------------+------------------------+------------------------+----------------------------------+----------------------------------+
+| true | false | true | false | false | true |
++--------------------------+--------------------------+------------------------+------------------------+----------------------------------+----------------------------------+
+
diff --git a/tests/cases/standalone/common/function/geo.sql b/tests/cases/standalone/common/function/geo.sql
index 205d45ddd5f5..93565f30fcda 100644
--- a/tests/cases/standalone/common/function/geo.sql
+++ b/tests/cases/standalone/common/function/geo.sql
@@ -48,13 +48,31 @@ FROM (SELECT h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell);
SELECT
h3_grid_distance(cell1, cell2) AS distance,
h3_grid_path_cells(cell1, cell2) AS path_cells,
+ round(h3_distance_sphere_km(cell1, cell2), 5) AS sphere_distance,
+ h3_distance_degree(cell1, cell2) AS euclidean_distance,
FROM
(
SELECT
- h3_latlng_to_cell(37.76938, -122.3889, 8::UInt64) AS cell1,
- h3_latlng_to_cell(39.634, -104.999, 8::UInt64) AS cell2
+ h3_string_to_cell('86283082fffffff') AS cell1,
+ h3_string_to_cell('86283470fffffff') AS cell2
);
+SELECT
+ h3_cells_contains('86283470fffffff,862834777ffffff, 862834757ffffff, 86283471fffffff, 862834707ffffff', '8b283470d112fff') AS R00,
+ h3_cells_contains('86283470fffffff,862834777ffffff, 862834757ffffff, 86283471fffffff, 862834707ffffff', 604189641792290815) AS R01,
+ h3_cells_contains('86283470fffffff,862834777ffffff, 862834757ffffff, 86283471fffffff, 862834707ffffff', 626707639343067135) AS R02;
+
+SELECT
+ h3_cells_contains(['86283470fffffff', '862834777ffffff', '862834757ffffff', '86283471fffffff', '862834707ffffff'], '86283472fffffff') AS R10,
+ h3_cells_contains(['86283470fffffff', '862834777ffffff', '862834757ffffff', '86283471fffffff', '862834707ffffff'], '8b283470d112fff') AS R11,
+ h3_cells_contains(['86283470fffffff', '862834777ffffff', '862834757ffffff', '86283471fffffff', '862834707ffffff'], 626707639343067135) AS R12;
+
+SELECT
+ h3_cells_contains([604189641255419903, 604189643000250367, 604189642463379455, 604189641523855359, 604189641121202175], '8b283470d112fff') AS R20,
+ h3_cells_contains([604189641255419903, 604189643000250367, 604189642463379455, 604189641523855359, 604189641121202175], 604189641792290815) AS R21,
+ h3_cells_contains([604189641255419903, 604189643000250367, 604189642463379455, 604189641523855359, 604189641121202175], 626707639343067135) AS R22;
+
+
SELECT geohash(37.76938, -122.3889, 9);
SELECT geohash(37.76938, -122.3889, 10);
@@ -104,3 +122,39 @@ FROM(
UNION ALL
SELECT 37.77001 AS lat, -122.3888 AS lon, 1728083372::TimestampSecond AS ts
);
+
+SELECT wkt_point_from_latlng(37.76938, -122.3889) AS point;
+
+SELECT
+ st_distance(p1, p2) AS euclidean_dist,
+ st_distance_sphere_m(p1, p2) AS sphere_dist_m,
+ st_distance(p1, polygon1) AS euclidean_dist_pp,
+ st_area(p1) as area_point,
+ st_area(polygon1) as area_polygon,
+FROM
+ (
+ SELECT
+ wkt_point_from_latlng(37.76938, -122.3889) AS p1,
+ wkt_point_from_latlng(38.5216, -121.4247) AS p2,
+ 'POLYGON ((-121.491698 38.653343, -121.582353 38.556757, -121.469721 38.449287, -121.315883 38.541721, -121.491698 38.653343))' AS polygon1,
+ );
+
+
+SELECT st_distance_sphere_m(wkt_point_from_latlng(37.76938, -122.3889), 'POLYGON ((-121.491698 38.653343, -121.582353 38.556757, -121.469721 38.449287, -121.315883 38.541721, -121.491698 38.653343))');
+
+
+SELECT
+ st_contains(polygon1, p1),
+ st_contains(polygon2, p1),
+ st_within(p1, polygon1),
+ st_within(p1, polygon2),
+ st_intersects(polygon1, polygon2),
+ st_intersects(polygon1, polygon3),
+FROM
+ (
+ SELECT
+ wkt_point_from_latlng(37.383287, -122.01325) AS p1,
+ 'POLYGON ((-122.031661 37.428252, -122.139829 37.387072, -122.135365 37.361971, -122.057759 37.332222, -121.987707 37.328946, -121.943754 37.333041, -121.919373 37.349145, -121.945814 37.376705, -121.975689 37.417345, -121.998696 37.409164, -122.031661 37.428252))' AS polygon1,
+ 'POLYGON ((-121.491698 38.653343, -121.582353 38.556757, -121.469721 38.449287, -121.315883 38.541721, -121.491698 38.653343))' AS polygon2,
+ 'POLYGON ((-122.089628 37.450332, -122.20535 37.378342, -122.093062 37.36088, -122.044301 37.372886, -122.089628 37.450332))' AS polygon3,
+ );
|
feat
|
add more geo functions (#4888)
|
7700a167f22dabb642636425da10ba2b4edb96b8
|
2022-06-14 14:23:12
|
evenyag
|
chores: Address CR comment
| false
|
diff --git a/src/datatypes/src/vectors/mutable.rs b/src/datatypes/src/vectors/mutable.rs
index 39a7bcc370c1..ff9ed18a382c 100644
--- a/src/datatypes/src/vectors/mutable.rs
+++ b/src/datatypes/src/vectors/mutable.rs
@@ -15,11 +15,5 @@ pub trait MutableVector: Send + Sync {
fn as_mut_any(&mut self) -> &mut dyn Any;
- // /// Push a value into the mutable vector.
- // ///
- // /// # Panics
- // /// Panics if the data type of the value differs from the mutable vector's data type.
- // fn push_value(&mut self, value: &Value);
-
fn to_vector(&mut self) -> VectorRef;
}
diff --git a/src/storage/src/memtable.rs b/src/storage/src/memtable.rs
index ea0e10c4df42..d828398adf9f 100644
--- a/src/storage/src/memtable.rs
+++ b/src/storage/src/memtable.rs
@@ -26,7 +26,7 @@ pub trait Memtable: Send + Sync {
/// Panics if the schema of key/value differs from memtable's schema.
fn write(&self, kvs: &KeyValues) -> Result<()>;
- /// Iterators the memtable.
+ /// Iterates the memtable.
// TODO(yingwen): Consider passing a projector (does column projection).
fn iter(&self, ctx: IterContext) -> Result<BatchIteratorPtr>;
diff --git a/src/storage/src/memtable/btree.rs b/src/storage/src/memtable/btree.rs
index bd13dcf724b2..4d3464a4d802 100644
--- a/src/storage/src/memtable/btree.rs
+++ b/src/storage/src/memtable/btree.rs
@@ -119,64 +119,12 @@ impl BTreeIterator {
self.last_key = keys.last().map(|k| (*k).clone());
Some(Batch {
- keys: Self::keys_to_vectors(&keys),
+ keys: rows_to_vectors(keys.as_slice()),
sequences: sequences.finish(),
value_types: value_types.finish(),
- values: Self::values_to_vectors(&values),
+ values: rows_to_vectors(values.as_slice()),
})
}
-
- // Assumes column num of all row key is equal.
- fn keys_to_vectors(keys: &[&InnerKey]) -> Vec<VectorRef> {
- if keys.is_empty() {
- return Vec::new();
- }
-
- let column_num = keys[0].row_key.len();
- let row_num = keys.len();
- let mut builders = Vec::with_capacity(column_num);
- for v in &keys[0].row_key {
- builders.push(VectorBuilder::with_capacity(v.data_type(), row_num));
- }
-
- let mut vectors = Vec::with_capacity(column_num);
- for (col_idx, builder) in builders.iter_mut().enumerate() {
- for row_key in keys {
- let value = &row_key.row_key[col_idx];
- builder.push(value);
- }
-
- vectors.push(builder.finish());
- }
-
- vectors
- }
-
- // Assumes column num of all row value is equal.
- fn values_to_vectors(values: &[&RowValue]) -> Vec<VectorRef> {
- if values.is_empty() {
- return Vec::new();
- }
-
- let column_num = values[0].values.len();
- let row_num = values.len();
- let mut builders = Vec::with_capacity(column_num);
- for v in &values[0].values {
- builders.push(VectorBuilder::with_capacity(v.data_type(), row_num));
- }
-
- let mut vectors = Vec::with_capacity(column_num);
- for (col_idx, builder) in builders.iter_mut().enumerate() {
- for row_value in values {
- let value = &row_value.values[col_idx];
- builder.push(value);
- }
-
- vectors.push(builder.finish());
- }
-
- vectors
- }
}
/// `MapIterWrapper` removes same user key with elder sequence.
@@ -317,3 +265,63 @@ impl InnerKey {
struct RowValue {
values: Vec<Value>,
}
+
+trait RowsProvider {
+ fn row_num(&self) -> usize;
+
+ fn column_num(&self) -> usize {
+ self.row_by_index(0).len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.row_num() == 0
+ }
+
+ fn row_by_index(&self, idx: usize) -> &Vec<Value>;
+}
+
+impl<'a> RowsProvider for &'a [&InnerKey] {
+ fn row_num(&self) -> usize {
+ self.len()
+ }
+
+ fn row_by_index(&self, idx: usize) -> &Vec<Value> {
+ &self[idx].row_key
+ }
+}
+
+impl<'a> RowsProvider for &'a [&RowValue] {
+ fn row_num(&self) -> usize {
+ self.len()
+ }
+
+ fn row_by_index(&self, idx: usize) -> &Vec<Value> {
+ &self[idx].values
+ }
+}
+
+fn rows_to_vectors<T: RowsProvider>(provider: T) -> Vec<VectorRef> {
+ if provider.is_empty() {
+ return Vec::new();
+ }
+
+ let column_num = provider.column_num();
+ let row_num = provider.row_num();
+ let mut builders = Vec::with_capacity(column_num);
+ for v in provider.row_by_index(0) {
+ builders.push(VectorBuilder::with_capacity(v.data_type(), row_num));
+ }
+
+ let mut vectors = Vec::with_capacity(column_num);
+ for (col_idx, builder) in builders.iter_mut().enumerate() {
+ for row_idx in 0..row_num {
+ let row = provider.row_by_index(row_idx);
+ let value = &row[col_idx];
+ builder.push(value);
+ }
+
+ vectors.push(builder.finish());
+ }
+
+ vectors
+}
|
chores
|
Address CR comment
|
c8cf3b1677e40702f2e5dde152d89974814b3f6f
|
2024-07-19 18:54:10
|
Lei, HUANG
|
fix(wal): handle WAL deletion on region drop (#4400)
| false
|
diff --git a/src/mito2/src/worker/handle_drop.rs b/src/mito2/src/worker/handle_drop.rs
index c307e437c502..06a439cc5ea8 100644
--- a/src/mito2/src/worker/handle_drop.rs
+++ b/src/mito2/src/worker/handle_drop.rs
@@ -22,6 +22,7 @@ use futures::TryStreamExt;
use object_store::util::join_path;
use object_store::{EntryMode, ObjectStore};
use snafu::ResultExt;
+use store_api::logstore::LogStore;
use store_api::region_request::AffectedRows;
use store_api::storage::RegionId;
use tokio::time::sleep;
@@ -34,7 +35,10 @@ use crate::worker::{RegionWorkerLoop, DROPPING_MARKER_FILE};
const GC_TASK_INTERVAL_SEC: u64 = 5 * 60; // 5 minutes
const MAX_RETRY_TIMES: u64 = 288; // 24 hours (5m * 288)
-impl<S> RegionWorkerLoop<S> {
+impl<S> RegionWorkerLoop<S>
+where
+ S: LogStore,
+{
pub(crate) async fn handle_drop_request(
&mut self,
region_id: RegionId,
@@ -58,7 +62,7 @@ impl<S> RegionWorkerLoop<S> {
error!(e; "Failed to write the drop marker file for region {}", region_id);
// Sets the state back to writable. It's possible that the marker file has been written.
- // We sets the state back to writable so we can retry the drop operation.
+ // We set the state back to writable so we can retry the drop operation.
region.switch_state_to_writable(RegionState::Dropping);
})?;
@@ -66,6 +70,15 @@ impl<S> RegionWorkerLoop<S> {
// Removes this region from region map to prevent other requests from accessing this region
self.regions.remove_region(region_id);
self.dropping_regions.insert_region(region.clone());
+
+ // Delete region data in WAL.
+ self.wal
+ .obsolete(
+ region_id,
+ region.version_control.current().last_entry_id,
+ ®ion.provider,
+ )
+ .await?;
// Notifies flush scheduler.
self.flush_scheduler.on_region_dropped(region_id);
// Notifies compaction scheduler.
|
fix
|
handle WAL deletion on region drop (#4400)
|
4ca5387bb84460bf393347a32fa259390e3e9fe6
|
2022-04-19 15:26:27
|
dennis zhuang
|
chore: Initial commit
| false
|
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000000..088ba6ba7d34
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,10 @@
+# Generated by Cargo
+# will have compiled files and executables
+/target/
+
+# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
+# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
+Cargo.lock
+
+# These are backup files generated by rustfmt
+**/*.rs.bk
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 000000000000..926483c43e60
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "GrepTimeDB"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
diff --git a/README.md b/README.md
new file mode 100644
index 000000000000..a20635aff012
--- /dev/null
+++ b/README.md
@@ -0,0 +1,2 @@
+# GrepTimeDB
+GrepTimeDB: the next-generation hybrid timeseries/analytics processing database in the cloud.
diff --git a/src/main.rs b/src/main.rs
new file mode 100644
index 000000000000..e7a11a969c03
--- /dev/null
+++ b/src/main.rs
@@ -0,0 +1,3 @@
+fn main() {
+ println!("Hello, world!");
+}
|
chore
|
Initial commit
|
b0c56a3e23639ca4c302d3f208c79b4c92e0d369
|
2023-09-18 15:13:02
|
dennis zhuang
|
feat: type alias (#2331)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 7ca7d99b95c3..51052b6e5072 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1923,6 +1923,8 @@ dependencies = [
"datatypes",
"serde",
"snafu",
+ "sqlparser 0.34.0",
+ "sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"statrs",
"tokio",
]
@@ -9043,10 +9045,12 @@ dependencies = [
"datatypes",
"hex",
"itertools 0.10.5",
+ "lazy_static",
"once_cell",
"regex",
"snafu",
"sqlparser 0.34.0",
+ "sqlparser_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"table",
]
diff --git a/src/common/query/Cargo.toml b/src/common/query/Cargo.toml
index e4c44b95861f..60f6a8003d81 100644
--- a/src/common/query/Cargo.toml
+++ b/src/common/query/Cargo.toml
@@ -16,6 +16,8 @@ datafusion.workspace = true
datatypes = { workspace = true }
serde.workspace = true
snafu.workspace = true
+sqlparser.workspace = true
+sqlparser_derive = "0.1"
statrs = "0.16"
[dev-dependencies]
diff --git a/src/common/query/src/lib.rs b/src/common/query/src/lib.rs
index 5d7922ade2df..95d1aed2fe59 100644
--- a/src/common/query/src/lib.rs
+++ b/src/common/query/src/lib.rs
@@ -26,6 +26,7 @@ pub mod logical_plan;
pub mod physical_plan;
pub mod prelude;
mod signature;
+use sqlparser_derive::{Visit, VisitMut};
// sql output
pub enum Output {
@@ -48,7 +49,7 @@ impl Debug for Output {
pub use datafusion::physical_plan::ExecutionPlan as DfPhysicalPlan;
-#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)]
pub enum AddColumnLocation {
First,
After { column_name: String },
diff --git a/src/operator/src/expr_factory.rs b/src/operator/src/expr_factory.rs
index b9fda8224e79..c45ab659c60a 100644
--- a/src/operator/src/expr_factory.rs
+++ b/src/operator/src/expr_factory.rs
@@ -128,11 +128,11 @@ pub(crate) async fn create_external_expr(
let mut table_options = create.options;
- let (object_store, files) = prepare_file_table_files(&table_options)
+ let (object_store, files) = prepare_file_table_files(&table_options.map)
.await
.context(PrepareFileTableSnafu)?;
- let file_column_schemas = infer_file_table_schema(&object_store, &files, &table_options)
+ let file_column_schemas = infer_file_table_schema(&object_store, &files, &table_options.map)
.await
.context(InferFileTableSchemaSnafu)?
.column_schemas;
@@ -157,7 +157,7 @@ pub(crate) async fn create_external_expr(
files,
file_column_schemas,
};
- let _ = table_options.insert(
+ table_options.insert(
FILE_TABLE_META_KEY.to_string(),
serde_json::to_string(&meta).context(EncodeJsonSnafu)?,
);
@@ -172,7 +172,7 @@ pub(crate) async fn create_external_expr(
time_index,
primary_keys,
create_if_not_exists: create.if_not_exists,
- table_options,
+ table_options: table_options.map,
table_id: None,
engine: create.engine.to_string(),
};
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index 5cb0a2e4d385..e6b46d26a601 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -21,7 +21,6 @@ mod dml;
mod show;
mod tql;
-use std::collections::HashMap;
use std::str::FromStr;
use std::sync::Arc;
@@ -43,6 +42,7 @@ use session::context::QueryContextRef;
use snafu::{OptionExt, ResultExt};
use sql::statements::copy::{CopyDatabaseArgument, CopyTable, CopyTableArgument};
use sql::statements::statement::Statement;
+use sql::statements::OptionMap;
use sqlparser::ast::ObjectName;
use table::engine::TableReference;
use table::requests::{CopyDatabaseRequest, CopyDirection, CopyTableRequest};
@@ -259,8 +259,8 @@ fn to_copy_table_request(stmt: CopyTable, query_ctx: QueryContextRef) -> Result<
schema_name,
table_name,
location,
- with,
- connection,
+ with: with.map,
+ connection: connection.map,
pattern,
direction,
// we copy the whole table by default.
@@ -292,14 +292,14 @@ fn to_copy_database_request(
catalog_name,
schema_name: database_name,
location: arg.location,
- with: arg.with,
- connection: arg.connection,
+ with: arg.with.map,
+ connection: arg.connection.map,
time_range,
})
}
/// Extracts timestamp from a [HashMap<String, String>] with given key.
-fn extract_timestamp(map: &HashMap<String, String>, key: &str) -> Result<Option<Timestamp>> {
+fn extract_timestamp(map: &OptionMap, key: &str) -> Result<Option<Timestamp>> {
map.get(key)
.map(|v| {
Timestamp::from_str(v)
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index bec041d2108e..049ded45c69d 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -65,12 +65,7 @@ pub enum Error {
source: std::io::Error,
},
- #[snafu(display(
- "Failed to execute query, source: {}, query: {}, location: {}",
- source,
- query,
- location
- ))]
+ #[snafu(display("Failed to execute query, source: {}, query: {}", source, query))]
ExecuteQuery {
query: String,
location: Location,
@@ -279,7 +274,7 @@ pub enum Error {
source: query::error::Error,
},
- #[snafu(display("Failed to get param types, source: {source}, location: {location}"))]
+ #[snafu(display("Failed to get param types, source: {source}"))]
GetPreparedStmtParams {
source: query::error::Error,
location: Location,
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
index 5d271470aa12..3e17e1fbe36f 100644
--- a/src/sql/Cargo.toml
+++ b/src/sql/Cargo.toml
@@ -15,10 +15,12 @@ datafusion-sql.workspace = true
datatypes = { workspace = true }
hex = "0.4"
itertools.workspace = true
+lazy_static.workspace = true
once_cell.workspace = true
regex.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser.workspace = true
+sqlparser_derive = "0.1"
table = { workspace = true }
[dev-dependencies]
diff --git a/src/sql/src/ast.rs b/src/sql/src/ast.rs
index a72d7965b772..e1944de98ab6 100644
--- a/src/sql/src/ast.rs
+++ b/src/sql/src/ast.rs
@@ -13,7 +13,7 @@
// limitations under the License.
pub use sqlparser::ast::{
- visit_expressions_mut, BinaryOperator, ColumnDef, ColumnOption, ColumnOptionDef, DataType,
- Expr, Function, FunctionArg, FunctionArgExpr, Ident, ObjectName, SqlOption, TableConstraint,
- TimezoneInfo, Value, VisitMut, Visitor,
+ visit_expressions_mut, visit_statements_mut, BinaryOperator, ColumnDef, ColumnOption,
+ ColumnOptionDef, DataType, Expr, Function, FunctionArg, FunctionArgExpr, Ident, ObjectName,
+ SqlOption, TableConstraint, TimezoneInfo, Value, Visit, VisitMut, Visitor, VisitorMut,
};
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index 9b9409b2f562..913d2ce760f6 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -22,6 +22,7 @@ use crate::ast::{Expr, ObjectName};
use crate::error::{self, Result, SyntaxSnafu};
use crate::parsers::tql_parser;
use crate::statements::statement::Statement;
+use crate::statements::transform_statements;
/// GrepTime SQL parser context, a simple wrapper for Datafusion SQL parser.
pub struct ParserContext<'a> {
@@ -58,6 +59,8 @@ impl<'a> ParserContext<'a> {
expecting_statement_delimiter = true;
}
+ transform_statements(&mut stmts)?;
+
Ok(stmts)
}
diff --git a/src/sql/src/parsers/copy_parser.rs b/src/sql/src/parsers/copy_parser.rs
index fc3573fa198d..34b3a80cfdf7 100644
--- a/src/sql/src/parsers/copy_parser.rs
+++ b/src/sql/src/parsers/copy_parser.rs
@@ -62,8 +62,8 @@ impl<'a> ParserContext<'a> {
let (with, connection, location) = self.parse_copy_to()?;
Ok(CopyDatabaseArgument {
database_name,
- with,
- connection,
+ with: with.into(),
+ connection: connection.into(),
location,
})
}
@@ -82,8 +82,8 @@ impl<'a> ParserContext<'a> {
let (with, connection, location) = self.parse_copy_to()?;
Ok(CopyTable::To(CopyTableArgument {
table_name,
- with,
- connection,
+ with: with.into(),
+ connection: connection.into(),
location,
}))
} else {
@@ -308,7 +308,10 @@ mod tests {
if let Some(expected_pattern) = test.expected_pattern {
assert_eq!(copy_table.pattern().unwrap(), expected_pattern);
}
- assert_eq!(copy_table.connection.clone(), test.expected_connection);
+ assert_eq!(
+ copy_table.connection.clone(),
+ test.expected_connection.into()
+ );
}
_ => unreachable!(),
}
@@ -348,7 +351,10 @@ mod tests {
Statement::Copy(crate::statements::copy::Copy::CopyTable(CopyTable::To(
copy_table,
))) => {
- assert_eq!(copy_table.connection.clone(), test.expected_connection);
+ assert_eq!(
+ copy_table.connection.clone(),
+ test.expected_connection.into()
+ );
}
_ => unreachable!(),
}
@@ -374,7 +380,7 @@ mod tests {
[("format".to_string(), "parquet".to_string())]
.into_iter()
.collect::<HashMap<_, _>>(),
- stmt.with
+ stmt.with.map
);
assert_eq!(
@@ -384,7 +390,7 @@ mod tests {
]
.into_iter()
.collect::<HashMap<_, _>>(),
- stmt.connection
+ stmt.connection.map
);
}
}
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 15ca790bd297..51b5ac48b536 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -37,7 +37,9 @@ use crate::statements::create::{
CreateDatabase, CreateExternalTable, CreateTable, PartitionEntry, Partitions, TIME_INDEX,
};
use crate::statements::statement::Statement;
-use crate::statements::{sql_data_type_to_concrete_data_type, sql_value_to_value};
+use crate::statements::{
+ get_data_type_by_alias_name, sql_data_type_to_concrete_data_type, sql_value_to_value,
+};
use crate::util::parse_option_string;
pub const ENGINE: &str = "ENGINE";
@@ -106,7 +108,7 @@ impl<'a> ParserContext<'a> {
name: table_name,
columns,
constraints,
- options,
+ options: options.into(),
if_not_exists,
engine,
}))
@@ -374,8 +376,11 @@ impl<'a> ParserContext<'a> {
msg: "time index column can't be null",
}
);
+
+ // The timestamp type may be an alias type, we have to retrieve the actual type.
+ let data_type = get_real_timestamp_type(&column.data_type);
ensure!(
- matches!(column.data_type, DataType::Timestamp(_, _)),
+ matches!(data_type, DataType::Timestamp(_, _)),
InvalidColumnOptionSnafu {
name: column.name.to_string(),
msg: "time index column data type should be timestamp",
@@ -653,8 +658,9 @@ fn validate_time_index(create_table: &CreateTable) -> Result<()> {
),
})?;
+ let time_index_data_type = get_real_timestamp_type(&time_index_column.data_type);
ensure!(
- matches!(time_index_column.data_type, DataType::Timestamp(_, _)),
+ matches!(time_index_data_type, DataType::Timestamp(_, _)),
InvalidColumnOptionSnafu {
name: time_index_column.name.to_string(),
msg: "time index column data type should be timestamp",
@@ -664,6 +670,19 @@ fn validate_time_index(create_table: &CreateTable) -> Result<()> {
Ok(())
}
+fn get_real_timestamp_type(data_type: &DataType) -> DataType {
+ match data_type {
+ DataType::Custom(name, tokens) if name.0.len() == 1 && tokens.is_empty() => {
+ if let Some(real_type) = get_data_type_by_alias_name(name.0[0].value.as_str()) {
+ real_type
+ } else {
+ data_type.clone()
+ }
+ }
+ _ => data_type.clone(),
+ }
+}
+
fn validate_partitions(columns: &[ColumnDef], partitions: &Partitions) -> Result<()> {
let partition_columns = ensure_partition_columns_defined(columns, partitions)?;
@@ -881,7 +900,7 @@ mod tests {
match &stmts[0] {
Statement::CreateExternalTable(c) => {
assert_eq!(c.name.to_string(), test.expected_table_name.to_string());
- assert_eq!(c.options, test.expected_options);
+ assert_eq!(c.options, test.expected_options.into());
assert_eq!(c.if_not_exists, test.expected_if_not_exist);
assert_eq!(c.engine, test.expected_engine);
}
@@ -895,7 +914,7 @@ mod tests {
let sql = "CREATE EXTERNAL TABLE city (
host string,
ts int64,
- cpu float64 default 0,
+ cpu float32 default 0,
memory float64,
TIME INDEX (ts),
PRIMARY KEY(ts, host)
@@ -911,13 +930,13 @@ mod tests {
match &stmts[0] {
Statement::CreateExternalTable(c) => {
assert_eq!(c.name.to_string(), "city");
- assert_eq!(c.options, options);
+ assert_eq!(c.options, options.into());
let columns = &c.columns;
assert_column_def(&columns[0], "host", "STRING");
- assert_column_def(&columns[1], "ts", "int64");
- assert_column_def(&columns[2], "cpu", "float64");
- assert_column_def(&columns[3], "memory", "float64");
+ assert_column_def(&columns[1], "ts", "BIGINT");
+ assert_column_def(&columns[2], "cpu", "FLOAT");
+ assert_column_def(&columns[3], "memory", "DOUBLE");
let constraints = &c.constraints;
assert_matches!(
@@ -1423,7 +1442,7 @@ ENGINE=mito";
let sql = r"create table demo(
host string,
ts timestamp,
- cpu float64 default 0,
+ cpu float32 default 0,
memory float64,
TIME INDEX (ts),
PRIMARY KEY(ts, host)) engine=mito
@@ -1440,8 +1459,9 @@ ENGINE=mito";
let columns = &c.columns;
assert_column_def(&columns[0], "host", "STRING");
assert_column_def(&columns[1], "ts", "TIMESTAMP");
- assert_column_def(&columns[2], "cpu", "float64");
- assert_column_def(&columns[3], "memory", "float64");
+ assert_column_def(&columns[2], "cpu", "FLOAT");
+ assert_column_def(&columns[3], "memory", "DOUBLE");
+
let constraints = &c.constraints;
assert_matches!(
&constraints[0],
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index 1510335c77cd..1e4f03ad9f5f 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -20,10 +20,12 @@ pub mod describe;
pub mod drop;
pub mod explain;
pub mod insert;
+mod option_map;
pub mod query;
pub mod show;
pub mod statement;
pub mod tql;
+mod transform;
pub mod truncate;
use std::str::FromStr;
@@ -38,7 +40,9 @@ use datatypes::prelude::ConcreteDataType;
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
use datatypes::types::TimestampType;
use datatypes::value::{OrderedF32, OrderedF64, Value};
+pub use option_map::OptionMap;
use snafu::{ensure, OptionExt, ResultExt};
+pub use transform::{get_data_type_by_alias_name, transform_statements};
use crate::ast::{
ColumnDef, ColumnOption, ColumnOptionDef, DataType as SqlDataType, Expr, TimezoneInfo,
diff --git a/src/sql/src/statements/alter.rs b/src/sql/src/statements/alter.rs
index 132f9368c19c..cf3dc1bf9179 100644
--- a/src/sql/src/statements/alter.rs
+++ b/src/sql/src/statements/alter.rs
@@ -14,8 +14,9 @@
use common_query::AddColumnLocation;
use sqlparser::ast::{ColumnDef, Ident, ObjectName, TableConstraint};
+use sqlparser_derive::{Visit, VisitMut};
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct AlterTable {
table_name: ObjectName,
alter_operation: AlterTableOperation,
@@ -38,7 +39,7 @@ impl AlterTable {
}
}
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum AlterTableOperation {
/// `ADD <table_constraint>`
AddConstraint(TableConstraint),
diff --git a/src/sql/src/statements/copy.rs b/src/sql/src/statements/copy.rs
index 494c07c70723..cd3893300ba8 100644
--- a/src/sql/src/statements/copy.rs
+++ b/src/sql/src/statements/copy.rs
@@ -12,35 +12,36 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashMap;
-
use sqlparser::ast::ObjectName;
+use sqlparser_derive::{Visit, VisitMut};
+
+use crate::statements::OptionMap;
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum Copy {
CopyTable(CopyTable),
CopyDatabase(CopyDatabaseArgument),
}
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum CopyTable {
To(CopyTableArgument),
From(CopyTableArgument),
}
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct CopyDatabaseArgument {
pub database_name: ObjectName,
- pub with: HashMap<String, String>,
- pub connection: HashMap<String, String>,
+ pub with: OptionMap,
+ pub connection: OptionMap,
pub location: String,
}
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct CopyTableArgument {
pub table_name: ObjectName,
- pub with: HashMap<String, String>,
- pub connection: HashMap<String, String>,
+ pub with: OptionMap,
+ pub connection: OptionMap,
/// Copy tbl [To|From] 'location'.
pub location: String,
}
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index f2075a15f5be..47b511be5616 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use common_catalog::consts::FILE_ENGINE;
use itertools::Itertools;
+use sqlparser_derive::{Visit, VisitMut};
use crate::ast::{ColumnDef, Ident, ObjectName, SqlOption, TableConstraint, Value as SqlValue};
+use crate::statements::OptionMap;
const LINE_SEP: &str = ",\n";
const COMMA_SEP: &str = ", ";
@@ -57,7 +58,7 @@ pub fn is_time_index(constraint: &TableConstraint) -> bool {
} if name.value == TIME_INDEX)
}
-#[derive(Debug, PartialEq, Eq, Clone)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
pub struct CreateTable {
/// Create if not exists
pub if_not_exists: bool,
@@ -124,7 +125,7 @@ impl CreateTable {
}
}
-#[derive(Debug, PartialEq, Eq, Clone)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
pub struct Partitions {
pub column_list: Vec<Ident>,
pub entries: Vec<PartitionEntry>,
@@ -139,7 +140,7 @@ impl Partitions {
}
}
-#[derive(Debug, PartialEq, Eq, Clone)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
pub struct PartitionEntry {
pub name: Ident,
pub value_list: Vec<SqlValue>,
@@ -197,14 +198,14 @@ impl Display for CreateTable {
}
}
-#[derive(Debug, PartialEq, Eq, Clone)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
pub struct CreateDatabase {
pub name: ObjectName,
/// Create if not exists
pub if_not_exists: bool,
}
-#[derive(Debug, PartialEq, Eq, Clone)]
+#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
pub struct CreateExternalTable {
/// Table name
pub name: ObjectName,
@@ -212,7 +213,7 @@ pub struct CreateExternalTable {
pub constraints: Vec<TableConstraint>,
/// Table options in `WITH`.
/// All keys are lowercase.
- pub options: HashMap<String, String>,
+ pub options: OptionMap,
pub if_not_exists: bool,
pub engine: String,
}
diff --git a/src/sql/src/statements/delete.rs b/src/sql/src/statements/delete.rs
index 47ba87a011f7..4346610b7d19 100644
--- a/src/sql/src/statements/delete.rs
+++ b/src/sql/src/statements/delete.rs
@@ -13,8 +13,9 @@
// limitations under the License.
use sqlparser::ast::Statement;
+use sqlparser_derive::{Visit, VisitMut};
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct Delete {
pub inner: Statement,
}
diff --git a/src/sql/src/statements/describe.rs b/src/sql/src/statements/describe.rs
index 006dd83986a9..ee865093055e 100644
--- a/src/sql/src/statements/describe.rs
+++ b/src/sql/src/statements/describe.rs
@@ -13,9 +13,10 @@
// limitations under the License.
use sqlparser::ast::ObjectName;
+use sqlparser_derive::{Visit, VisitMut};
/// SQL structure for `DESCRIBE TABLE`.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct DescribeTable {
name: ObjectName,
}
diff --git a/src/sql/src/statements/drop.rs b/src/sql/src/statements/drop.rs
index b8fc0401fa63..b8e46ac3f2a7 100644
--- a/src/sql/src/statements/drop.rs
+++ b/src/sql/src/statements/drop.rs
@@ -13,9 +13,10 @@
// limitations under the License.
use sqlparser::ast::ObjectName;
+use sqlparser_derive::{Visit, VisitMut};
/// DROP TABLE statement.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct DropTable {
table_name: ObjectName,
}
diff --git a/src/sql/src/statements/explain.rs b/src/sql/src/statements/explain.rs
index 82b757d193fa..fe953d66184d 100644
--- a/src/sql/src/statements/explain.rs
+++ b/src/sql/src/statements/explain.rs
@@ -13,11 +13,12 @@
// limitations under the License.
use sqlparser::ast::Statement as SpStatement;
+use sqlparser_derive::{Visit, VisitMut};
use crate::error::Error;
/// Explain statement.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct Explain {
pub inner: SpStatement,
}
diff --git a/src/sql/src/statements/insert.rs b/src/sql/src/statements/insert.rs
index b5bdf55b09c1..96af1a8de910 100644
--- a/src/sql/src/statements/insert.rs
+++ b/src/sql/src/statements/insert.rs
@@ -13,12 +13,13 @@
// limitations under the License.
use sqlparser::ast::{ObjectName, Query, SetExpr, Statement, UnaryOperator, Values};
use sqlparser::parser::ParserError;
+use sqlparser_derive::{Visit, VisitMut};
use crate::ast::{Expr, Value};
use crate::error::Result;
use crate::statements::query::Query as GtQuery;
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct Insert {
// Can only be sqlparser::ast::Statement::Insert variant
pub inner: Statement,
diff --git a/src/sql/src/statements/option_map.rs b/src/sql/src/statements/option_map.rs
new file mode 100644
index 000000000000..63069f6fc09a
--- /dev/null
+++ b/src/sql/src/statements/option_map.rs
@@ -0,0 +1,63 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod visit;
+mod visit_mut;
+
+use std::borrow::Borrow;
+use std::collections::HashMap;
+use std::iter::FromIterator;
+
+/// Options hashmap.
+/// Because the trait `Visit` and `VisitMut` is not implemented for `HashMap<String, String>`, we have to wrap it and implement them by ourself.
+#[derive(Clone, Eq, PartialEq, Debug)]
+pub struct OptionMap {
+ pub map: HashMap<String, String>,
+}
+
+impl OptionMap {
+ pub fn insert(&mut self, k: String, v: String) {
+ self.map.insert(k, v);
+ }
+
+ pub fn get(&self, k: &str) -> Option<&String> {
+ self.map.get(k)
+ }
+}
+
+impl From<HashMap<String, String>> for OptionMap {
+ fn from(map: HashMap<String, String>) -> Self {
+ Self { map }
+ }
+}
+
+impl AsRef<HashMap<String, String>> for OptionMap {
+ fn as_ref(&self) -> &HashMap<String, String> {
+ &self.map
+ }
+}
+
+impl Borrow<HashMap<String, String>> for OptionMap {
+ fn borrow(&self) -> &HashMap<String, String> {
+ &self.map
+ }
+}
+
+impl FromIterator<(String, String)> for OptionMap {
+ fn from_iter<I: IntoIterator<Item = (String, String)>>(iter: I) -> Self {
+ Self {
+ map: iter.into_iter().collect(),
+ }
+ }
+}
diff --git a/src/sql/src/statements/option_map/visit.rs b/src/sql/src/statements/option_map/visit.rs
new file mode 100644
index 000000000000..1242ae69d6f7
--- /dev/null
+++ b/src/sql/src/statements/option_map/visit.rs
@@ -0,0 +1,29 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::ops::ControlFlow;
+
+use sqlparser::ast::{Visit, Visitor};
+
+use crate::statements::OptionMap;
+
+impl Visit for OptionMap {
+ fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> {
+ for (k, v) in &self.map {
+ k.visit(visitor)?;
+ v.visit(visitor)?;
+ }
+ ControlFlow::Continue(())
+ }
+}
diff --git a/src/sql/src/statements/option_map/visit_mut.rs b/src/sql/src/statements/option_map/visit_mut.rs
new file mode 100644
index 000000000000..0c6143056072
--- /dev/null
+++ b/src/sql/src/statements/option_map/visit_mut.rs
@@ -0,0 +1,28 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::ops::ControlFlow;
+
+use sqlparser::ast::{VisitMut, VisitorMut};
+
+use crate::statements::OptionMap;
+
+impl VisitMut for OptionMap {
+ fn visit<V: VisitorMut>(&mut self, visitor: &mut V) -> ControlFlow<V::Break> {
+ for (_, v) in self.map.iter_mut() {
+ v.visit(visitor)?;
+ }
+ ControlFlow::Continue(())
+ }
+}
diff --git a/src/sql/src/statements/query.rs b/src/sql/src/statements/query.rs
index c2b720ce1593..e03372d26845 100644
--- a/src/sql/src/statements/query.rs
+++ b/src/sql/src/statements/query.rs
@@ -15,11 +15,12 @@
use std::fmt;
use sqlparser::ast::Query as SpQuery;
+use sqlparser_derive::{Visit, VisitMut};
use crate::error::Error;
/// Query statement instance.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct Query {
pub inner: SpQuery,
}
diff --git a/src/sql/src/statements/show.rs b/src/sql/src/statements/show.rs
index 409398227960..eed546fba907 100644
--- a/src/sql/src/statements/show.rs
+++ b/src/sql/src/statements/show.rs
@@ -14,10 +14,12 @@
use std::fmt;
+use sqlparser_derive::{Visit, VisitMut};
+
use crate::ast::{Expr, Ident, ObjectName};
/// Show kind for SQL expressions like `SHOW DATABASE` or `SHOW TABLE`
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum ShowKind {
All,
Like(Ident),
@@ -35,7 +37,7 @@ impl fmt::Display for ShowKind {
}
/// SQL structure for `SHOW DATABASES`.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct ShowDatabases {
pub kind: ShowKind,
}
@@ -48,14 +50,14 @@ impl ShowDatabases {
}
/// SQL structure for `SHOW TABLES`.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct ShowTables {
pub kind: ShowKind,
pub database: Option<String>,
}
/// SQL structure for `SHOW CREATE TABLE`.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct ShowCreateTable {
pub table_name: ObjectName,
}
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
index 5263f5558d34..462535be4c1a 100644
--- a/src/sql/src/statements/statement.rs
+++ b/src/sql/src/statements/statement.rs
@@ -14,6 +14,7 @@
use datafusion_sql::parser::Statement as DfStatement;
use sqlparser::ast::Statement as SpStatement;
+use sqlparser_derive::{Visit, VisitMut};
use crate::error::{ConvertToDfStatementSnafu, Error};
use crate::statements::alter::AlterTable;
@@ -30,7 +31,7 @@ use crate::statements::truncate::TruncateTable;
/// Tokens parsed by `DFParser` are converted into these values.
#[allow(clippy::large_enum_variant)]
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum Statement {
// Query
Query(Box<Query>),
diff --git a/src/sql/src/statements/tql.rs b/src/sql/src/statements/tql.rs
index d9b7187bf733..24bfeb187087 100644
--- a/src/sql/src/statements/tql.rs
+++ b/src/sql/src/statements/tql.rs
@@ -11,14 +11,16 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-#[derive(Debug, Clone, PartialEq, Eq)]
+use sqlparser_derive::{Visit, VisitMut};
+
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum Tql {
Eval(TqlEval),
Explain(TqlExplain),
Analyze(TqlAnalyze),
}
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct TqlEval {
pub start: String,
pub end: String,
@@ -27,7 +29,7 @@ pub struct TqlEval {
}
/// TQL EXPLAIN (like SQL EXPLAIN): doesn't execute the query but tells how the query would be executed.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct TqlExplain {
pub start: String,
pub end: String,
@@ -36,7 +38,7 @@ pub struct TqlExplain {
}
/// TQL ANALYZE (like SQL ANALYZE): executes the plan and tells the detailed per-step execution time.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct TqlAnalyze {
pub start: String,
pub end: String,
diff --git a/src/sql/src/statements/transform.rs b/src/sql/src/statements/transform.rs
new file mode 100644
index 000000000000..e48cb29f1b70
--- /dev/null
+++ b/src/sql/src/statements/transform.rs
@@ -0,0 +1,64 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::ops::ControlFlow;
+use std::sync::Arc;
+
+use lazy_static::lazy_static;
+use sqlparser::ast::{visit_expressions_mut, Expr};
+
+use crate::error::Result;
+use crate::statements::statement::Statement;
+mod type_alias;
+pub use type_alias::get_data_type_by_alias_name;
+use type_alias::TypeAliasTransformRule;
+
+lazy_static! {
+ /// [TransformRule] registry
+ static ref RULES: Vec<Arc<dyn TransformRule>> = vec![
+ Arc::new(TypeAliasTransformRule{}),
+ ];
+}
+
+/// Transform rule to transform statement or expr
+pub(crate) trait TransformRule: Send + Sync {
+ /// Visit a [Statement]
+ fn visit_statement(&self, _stmt: &mut Statement) -> Result<()> {
+ Ok(())
+ }
+
+ /// Visit an [Expr]
+ fn visit_expr(&self, _expr: &mut Expr) -> ControlFlow<()> {
+ ControlFlow::<()>::Continue(())
+ }
+}
+
+/// Transform statements by rules
+pub fn transform_statements(stmts: &mut Vec<Statement>) -> Result<()> {
+ for stmt in &mut *stmts {
+ for rule in RULES.iter() {
+ rule.visit_statement(stmt)?;
+ }
+ }
+
+ visit_expressions_mut(stmts, |expr| {
+ for rule in RULES.iter() {
+ rule.visit_expr(expr)?;
+ }
+
+ ControlFlow::<()>::Continue(())
+ });
+
+ Ok(())
+}
diff --git a/src/sql/src/statements/transform/type_alias.rs b/src/sql/src/statements/transform/type_alias.rs
new file mode 100644
index 000000000000..871b38a81f94
--- /dev/null
+++ b/src/sql/src/statements/transform/type_alias.rs
@@ -0,0 +1,353 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+use std::ops::ControlFlow;
+
+use datatypes::data_type::DataType as GreptimeDataType;
+use sqlparser::ast::{
+ ColumnDef, DataType, Expr, Function, FunctionArg, FunctionArgExpr, Ident, ObjectName, Value,
+};
+
+use crate::error::Result;
+use crate::statements::create::{CreateExternalTable, CreateTable};
+use crate::statements::statement::Statement;
+use crate::statements::transform::TransformRule;
+use crate::statements::{sql_data_type_to_concrete_data_type, TimezoneInfo};
+
+/// SQL data type alias transformer:
+/// - `TimestampSecond`, `Timestamp_s`, `Timestamp_sec` for `Timestamp(0)`.
+/// - `TimestampMillisecond`, `Timestamp_ms` for `Timestamp(3)`.
+/// - `TimestampMicrosecond`, `Timestamp_us` for `Timestamp(6)`.
+/// - `TimestampNanosecond`, `Timestamp_ns` for `Timestamp(9)`.
+/// - `INT8` for `tinyint`
+/// - `INT16` for `smallint`
+/// - `INT32` for `int`
+/// - `INT32` for `bigint`
+/// - And `UINT8`, `UINT16` etc. for `UnsignedTinyint` etc.
+pub(crate) struct TypeAliasTransformRule;
+
+impl TransformRule for TypeAliasTransformRule {
+ fn visit_statement(&self, stmt: &mut Statement) -> Result<()> {
+ match stmt {
+ Statement::CreateTable(CreateTable { columns, .. }) => {
+ columns
+ .iter_mut()
+ .for_each(|ColumnDef { data_type, .. }| replace_type_alias(data_type));
+ }
+ Statement::CreateExternalTable(CreateExternalTable { columns, .. }) => {
+ columns
+ .iter_mut()
+ .for_each(|ColumnDef { data_type, .. }| replace_type_alias(data_type));
+ }
+ _ => {}
+ }
+
+ Ok(())
+ }
+
+ fn visit_expr(&self, expr: &mut Expr) -> ControlFlow<()> {
+ match expr {
+ // Type alias
+ Expr::Cast {
+ data_type: DataType::Custom(name, tokens),
+ expr: cast_expr,
+ } if name.0.len() == 1 && tokens.is_empty() => {
+ if let Some(new_type) = get_data_type_by_alias_name(name.0[0].value.as_str()) {
+ if let Ok(concrete_type) = sql_data_type_to_concrete_data_type(&new_type) {
+ let new_type = concrete_type.as_arrow_type();
+ *expr = Expr::Function(Function {
+ name: ObjectName(vec![Ident::new("arrow_cast")]),
+ args: vec![
+ FunctionArg::Unnamed(FunctionArgExpr::Expr((**cast_expr).clone())),
+ FunctionArg::Unnamed(FunctionArgExpr::Expr(Expr::Value(
+ Value::SingleQuotedString(new_type.to_string()),
+ ))),
+ ],
+ over: None,
+ distinct: false,
+ special: false,
+ order_by: vec![],
+ });
+ }
+ }
+ }
+
+ // Timestamp(precision) in cast, datafusion doesn't support Timestamp(9) etc.
+ // We have to transform it into arrow_cast(expr, type).
+ Expr::Cast {
+ data_type: DataType::Timestamp(precision, zone),
+ expr: cast_expr,
+ } => {
+ if let Ok(concrete_type) =
+ sql_data_type_to_concrete_data_type(&DataType::Timestamp(*precision, *zone))
+ {
+ let new_type = concrete_type.as_arrow_type();
+ *expr = Expr::Function(Function {
+ name: ObjectName(vec![Ident::new("arrow_cast")]),
+ args: vec![
+ FunctionArg::Unnamed(FunctionArgExpr::Expr((**cast_expr).clone())),
+ FunctionArg::Unnamed(FunctionArgExpr::Expr(Expr::Value(
+ Value::SingleQuotedString(new_type.to_string()),
+ ))),
+ ],
+ over: None,
+ distinct: false,
+ special: false,
+ order_by: vec![],
+ });
+ }
+ }
+
+ // TODO(dennis): supports try_cast
+ _ => {}
+ }
+
+ ControlFlow::<()>::Continue(())
+ }
+}
+
+fn replace_type_alias(data_type: &mut DataType) {
+ match data_type {
+ // TODO(dennis): The sqlparser latest version contains the Int8 alias for postres Bigint.
+ // Which means 8 bytes in postgres (not 8 bits). If we upgrade the sqlparser, need to process it.
+ // See https://docs.rs/sqlparser/latest/sqlparser/ast/enum.DataType.html#variant.Int8
+ DataType::Custom(name, tokens) if name.0.len() == 1 && tokens.is_empty() => {
+ if let Some(new_type) = get_data_type_by_alias_name(name.0[0].value.as_str()) {
+ *data_type = new_type;
+ }
+ }
+ _ => {}
+ }
+}
+
+pub fn get_data_type_by_alias_name(name: &str) -> Option<DataType> {
+ match name.to_uppercase().as_ref() {
+ // Timestamp type alias
+ "TIMESTAMP_S" | "TIMESTAMP_SEC" | "TIMESTAMPSECOND" => {
+ Some(DataType::Timestamp(Some(0), TimezoneInfo::None))
+ }
+
+ "TIMESTAMP_MS" | "TIMESTAMPMILLISECOND" => {
+ Some(DataType::Timestamp(Some(3), TimezoneInfo::None))
+ }
+ "TIMESTAMP_US" | "TIMESTAMPMICROSECOND" => {
+ Some(DataType::Timestamp(Some(6), TimezoneInfo::None))
+ }
+ "TIMESTAMP_NS" | "TIMESTAMPNANOSECOND" => {
+ Some(DataType::Timestamp(Some(9), TimezoneInfo::None))
+ }
+ // Number type alias
+ "INT8" => Some(DataType::TinyInt(None)),
+ "INT16" => Some(DataType::SmallInt(None)),
+ "INT32" => Some(DataType::Int(None)),
+ "INT64" => Some(DataType::BigInt(None)),
+ "UINT8" => Some(DataType::UnsignedTinyInt(None)),
+ "UINT16" => Some(DataType::UnsignedSmallInt(None)),
+ "UINT32" => Some(DataType::UnsignedInt(None)),
+ "UINT64" => Some(DataType::UnsignedBigInt(None)),
+ "FLOAT32" => Some(DataType::Float(None)),
+ "FLOAT64" => Some(DataType::Double),
+ _ => None,
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use sqlparser::dialect::GenericDialect;
+
+ use super::*;
+ use crate::parser::ParserContext;
+ use crate::statements::transform_statements;
+
+ #[test]
+ fn test_get_data_type_by_alias_name() {
+ assert_eq!(
+ get_data_type_by_alias_name("float64"),
+ Some(DataType::Double)
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("Float64"),
+ Some(DataType::Double)
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("FLOAT64"),
+ Some(DataType::Double)
+ );
+
+ assert_eq!(
+ get_data_type_by_alias_name("float32"),
+ Some(DataType::Float(None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("int8"),
+ Some(DataType::TinyInt(None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("INT16"),
+ Some(DataType::SmallInt(None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("INT32"),
+ Some(DataType::Int(None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("INT64"),
+ Some(DataType::BigInt(None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("Uint8"),
+ Some(DataType::UnsignedTinyInt(None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("UINT16"),
+ Some(DataType::UnsignedSmallInt(None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("UINT32"),
+ Some(DataType::UnsignedInt(None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("uint64"),
+ Some(DataType::UnsignedBigInt(None))
+ );
+
+ assert_eq!(
+ get_data_type_by_alias_name("TimestampSecond"),
+ Some(DataType::Timestamp(Some(0), TimezoneInfo::None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("Timestamp_s"),
+ Some(DataType::Timestamp(Some(0), TimezoneInfo::None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("Timestamp_sec"),
+ Some(DataType::Timestamp(Some(0), TimezoneInfo::None))
+ );
+
+ assert_eq!(
+ get_data_type_by_alias_name("TimestampMilliSecond"),
+ Some(DataType::Timestamp(Some(3), TimezoneInfo::None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("Timestamp_ms"),
+ Some(DataType::Timestamp(Some(3), TimezoneInfo::None))
+ );
+
+ assert_eq!(
+ get_data_type_by_alias_name("TimestampMicroSecond"),
+ Some(DataType::Timestamp(Some(6), TimezoneInfo::None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("Timestamp_us"),
+ Some(DataType::Timestamp(Some(6), TimezoneInfo::None))
+ );
+
+ assert_eq!(
+ get_data_type_by_alias_name("TimestampNanoSecond"),
+ Some(DataType::Timestamp(Some(9), TimezoneInfo::None))
+ );
+ assert_eq!(
+ get_data_type_by_alias_name("Timestamp_ns"),
+ Some(DataType::Timestamp(Some(9), TimezoneInfo::None))
+ );
+ }
+
+ fn test_timestamp_alias(alias: &str, expected: &str) {
+ let sql = format!("SELECT TIMESTAMP '2020-01-01 01:23:45.12345678'::{alias}");
+ let mut stmts = ParserContext::create_with_dialect(&sql, &GenericDialect {}).unwrap();
+ transform_statements(&mut stmts).unwrap();
+
+ match &stmts[0] {
+ Statement::Query(q) => assert_eq!(format!("SELECT arrow_cast(TIMESTAMP '2020-01-01 01:23:45.12345678', 'Timestamp({expected}, None)')"), q.to_string()),
+ _ => unreachable!(),
+ }
+ }
+
+ fn test_timestamp_precision_type(precision: i32, expected: &str) {
+ test_timestamp_alias(&format!("Timestamp({precision})"), expected);
+ }
+
+ #[test]
+ fn test_transform_timestamp_alias() {
+ // Timestamp[Second | Millisecond | Microsecond | Nanosecond]
+ test_timestamp_alias("TimestampSecond", "Second");
+ test_timestamp_alias("Timestamp_s", "Second");
+ test_timestamp_alias("TimestampMillisecond", "Millisecond");
+ test_timestamp_alias("Timestamp_ms", "Millisecond");
+ test_timestamp_alias("TimestampMicrosecond", "Microsecond");
+ test_timestamp_alias("Timestamp_us", "Microsecond");
+ test_timestamp_alias("TimestampNanosecond", "Nanosecond");
+ test_timestamp_alias("Timestamp_ns", "Nanosecond");
+ // Timestamp(precision)
+ test_timestamp_precision_type(0, "Second");
+ test_timestamp_precision_type(3, "Millisecond");
+ test_timestamp_precision_type(6, "Microsecond");
+ test_timestamp_precision_type(9, "Nanosecond");
+ }
+
+ #[test]
+ fn test_create_sql_with_type_alias() {
+ let sql = r#"
+CREATE TABLE data_types (
+ s string,
+ tint int8,
+ sint int16,
+ i int32,
+ bint int64,
+ v varchar,
+ f float32,
+ d float64,
+ b boolean,
+ vb varbinary,
+ dt date,
+ dtt datetime,
+ ts0 TimestampSecond,
+ ts3 TimestampMillisecond,
+ ts6 TimestampMicrosecond,
+ ts9 TimestampNanosecond DEFAULT CURRENT_TIMESTAMP TIME INDEX,
+ PRIMARY KEY(s));"#;
+
+ let mut stmts = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
+ transform_statements(&mut stmts).unwrap();
+
+ match &stmts[0] {
+ Statement::CreateTable(c) => {
+ let expected = r#"CREATE TABLE data_types (
+ s STRING,
+ tint TINYINT,
+ sint SMALLINT,
+ i INT,
+ bint BIGINT,
+ v VARCHAR,
+ f FLOAT,
+ d DOUBLE,
+ b BOOLEAN,
+ vb VARBINARY,
+ dt DATE,
+ dtt DATETIME,
+ ts0 TIMESTAMP(0),
+ ts3 TIMESTAMP(3),
+ ts6 TIMESTAMP(6),
+ ts9 TIMESTAMP(9) DEFAULT CURRENT_TIMESTAMP() NOT NULL,
+ TIME INDEX (ts9),
+ PRIMARY KEY (s)
+)
+ENGINE=mito
+"#;
+
+ assert_eq!(expected, c.to_string());
+ }
+ _ => unreachable!(),
+ }
+ }
+}
diff --git a/src/sql/src/statements/truncate.rs b/src/sql/src/statements/truncate.rs
index e7eae4e18572..aa08cde559b4 100644
--- a/src/sql/src/statements/truncate.rs
+++ b/src/sql/src/statements/truncate.rs
@@ -13,9 +13,10 @@
// limitations under the License.
use sqlparser::ast::ObjectName;
+use sqlparser_derive::{Visit, VisitMut};
/// TRUNCATE TABLE statement.
-#[derive(Debug, Clone, PartialEq, Eq)]
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct TruncateTable {
table_name: ObjectName,
}
diff --git a/tests/cases/standalone/common/create/create_type_alias.result b/tests/cases/standalone/common/create/create_type_alias.result
new file mode 100644
index 000000000000..6e192a4e5f05
--- /dev/null
+++ b/tests/cases/standalone/common/create/create_type_alias.result
@@ -0,0 +1,80 @@
+CREATE TABLE data_types (
+ s string,
+ tint int8,
+ sint int16,
+ i INT32,
+ bint INT64,
+ v varchar,
+ f FLOAT32,
+ d FLOAT64,
+ b boolean,
+ vb varbinary,
+ dt date,
+ dtt datetime,
+ ts0 TimestampSecond,
+ ts3 Timestamp_MS,
+ ts6 Timestamp_US,
+ ts9 TimestampNanosecond DEFAULT CURRENT_TIMESTAMP TIME INDEX,
+ PRIMARY KEY(s));
+
+Affected Rows: 0
+
+SHOW CREATE TABLE data_types;
+
++------------+------------------------------------------------------------+
+| Table | Create Table |
++------------+------------------------------------------------------------+
+| data_types | CREATE TABLE IF NOT EXISTS "data_types" ( |
+| | "s" STRING NULL, |
+| | "tint" TINYINT NULL, |
+| | "sint" SMALLINT NULL, |
+| | "i" INT NULL, |
+| | "bint" BIGINT NULL, |
+| | "v" STRING NULL, |
+| | "f" FLOAT NULL, |
+| | "d" DOUBLE NULL, |
+| | "b" BOOLEAN NULL, |
+| | "vb" VARBINARY NULL, |
+| | "dt" DATE NULL, |
+| | "dtt" DATETIME NULL, |
+| | "ts0" TIMESTAMP(0) NULL, |
+| | "ts3" TIMESTAMP(3) NULL, |
+| | "ts6" TIMESTAMP(6) NULL, |
+| | "ts9" TIMESTAMP(9) NOT NULL DEFAULT current_timestamp(), |
+| | TIME INDEX ("ts9"), |
+| | PRIMARY KEY ("s") |
+| | ) |
+| | |
+| | ENGINE=mito |
+| | WITH( |
+| | regions = 1 |
+| | ) |
++------------+------------------------------------------------------------+
+
+DESC TABLE data_types;
+
++--------+----------------------+-----+------+---------------------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------+----------------------+-----+------+---------------------+---------------+
+| s | String | PRI | YES | | TAG |
+| tint | Int8 | | YES | | FIELD |
+| sint | Int16 | | YES | | FIELD |
+| i | Int32 | | YES | | FIELD |
+| bint | Int64 | | YES | | FIELD |
+| v | String | | YES | | FIELD |
+| f | Float32 | | YES | | FIELD |
+| d | Float64 | | YES | | FIELD |
+| b | Boolean | | YES | | FIELD |
+| vb | Binary | | YES | | FIELD |
+| dt | Date | | YES | | FIELD |
+| dtt | DateTime | | YES | | FIELD |
+| ts0 | TimestampSecond | | YES | | FIELD |
+| ts3 | TimestampMillisecond | | YES | | FIELD |
+| ts6 | TimestampMicrosecond | | YES | | FIELD |
+| ts9 | TimestampNanosecond | PRI | NO | current_timestamp() | TIMESTAMP |
++--------+----------------------+-----+------+---------------------+---------------+
+
+DROP TABLE data_types;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/common/create/create_type_alias.sql b/tests/cases/standalone/common/create/create_type_alias.sql
new file mode 100644
index 000000000000..e4ef33824563
--- /dev/null
+++ b/tests/cases/standalone/common/create/create_type_alias.sql
@@ -0,0 +1,24 @@
+CREATE TABLE data_types (
+ s string,
+ tint int8,
+ sint int16,
+ i INT32,
+ bint INT64,
+ v varchar,
+ f FLOAT32,
+ d FLOAT64,
+ b boolean,
+ vb varbinary,
+ dt date,
+ dtt datetime,
+ ts0 TimestampSecond,
+ ts3 Timestamp_MS,
+ ts6 Timestamp_US,
+ ts9 TimestampNanosecond DEFAULT CURRENT_TIMESTAMP TIME INDEX,
+ PRIMARY KEY(s));
+
+SHOW CREATE TABLE data_types;
+
+DESC TABLE data_types;
+
+DROP TABLE data_types;
diff --git a/tests/cases/standalone/common/delete/delete.result b/tests/cases/standalone/common/delete/delete.result
index 4c517017d105..0a593f3f5c70 100644
--- a/tests/cases/standalone/common/delete/delete.result
+++ b/tests/cases/standalone/common/delete/delete.result
@@ -31,7 +31,7 @@ SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
| 2022-06-15T07:02:39 | host3 | 88.8 | 4096.0 |
+---------------------+-------+------+--------+
-DELETE FROM monitor WHERE host = 'host1' AND ts = 1655276557000000000::timestamp;
+DELETE FROM monitor WHERE host = 'host1' AND ts = 1655276557000::timestamp;
Affected Rows: 1
diff --git a/tests/cases/standalone/common/delete/delete.sql b/tests/cases/standalone/common/delete/delete.sql
index 8cb9a2559c0d..f59eb73df81b 100644
--- a/tests/cases/standalone/common/delete/delete.sql
+++ b/tests/cases/standalone/common/delete/delete.sql
@@ -13,7 +13,7 @@ INSERT INTO monitor(ts, host, cpu, memory) VALUES
SELECT ts, host, cpu, memory FROM monitor ORDER BY ts;
-DELETE FROM monitor WHERE host = 'host1' AND ts = 1655276557000000000::timestamp;
+DELETE FROM monitor WHERE host = 'host1' AND ts = 1655276557000::timestamp;
DELETE FROM monitor WHERE host = 'host2';
diff --git a/tests/cases/standalone/common/types/timestamp/timestamp_precision.result b/tests/cases/standalone/common/types/timestamp/timestamp_precision.result
index 441c9192bc61..6c6877020031 100644
--- a/tests/cases/standalone/common/types/timestamp/timestamp_precision.result
+++ b/tests/cases/standalone/common/types/timestamp/timestamp_precision.result
@@ -29,31 +29,59 @@ Error: 1003(Internal), Execution error: Date part 'MICROSECONDS' not supported
-- any other precision is rounded up (e.g. 1/2 -> 3, 4/5 -> 6, 7/8 -> 9)
SELECT TIMESTAMP '2020-01-01 01:23:45.123456789'::TIMESTAMP(0);
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported SQL type Timestamp(Some(0), None)
++---------------------------------------+
+| Utf8("2020-01-01 01:23:45.123456789") |
++---------------------------------------+
+| 2020-01-01T01:23:45 |
++---------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.123456789'::TIMESTAMP(3);
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported SQL type Timestamp(Some(3), None)
++---------------------------------------+
+| Utf8("2020-01-01 01:23:45.123456789") |
++---------------------------------------+
+| 2020-01-01T01:23:45.123 |
++---------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.123456789'::TIMESTAMP(6);
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported SQL type Timestamp(Some(6), None)
++---------------------------------------+
+| Utf8("2020-01-01 01:23:45.123456789") |
++---------------------------------------+
+| 2020-01-01T01:23:45.123456 |
++---------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.123456789'::TIMESTAMP(9);
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported SQL type Timestamp(Some(9), None)
++---------------------------------------+
+| Utf8("2020-01-01 01:23:45.123456789") |
++---------------------------------------+
+| 2020-01-01T01:23:45.123456789 |
++---------------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.12'::TIMESTAMP(3);
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported SQL type Timestamp(Some(3), None)
++--------------------------------+
+| Utf8("2020-01-01 01:23:45.12") |
++--------------------------------+
+| 2020-01-01T01:23:45.120 |
++--------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.12345'::TIMESTAMP(6);
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported SQL type Timestamp(Some(6), None)
++-----------------------------------+
+| Utf8("2020-01-01 01:23:45.12345") |
++-----------------------------------+
+| 2020-01-01T01:23:45.123450 |
++-----------------------------------+
SELECT TIMESTAMP '2020-01-01 01:23:45.12345678'::TIMESTAMP(9);
-Error: 3000(PlanQuery), This feature is not implemented: Unsupported SQL type Timestamp(Some(9), None)
++--------------------------------------+
+| Utf8("2020-01-01 01:23:45.12345678") |
++--------------------------------------+
+| 2020-01-01T01:23:45.123456780 |
++--------------------------------------+
DROP TABLE ts_precision;
diff --git a/tests/cases/standalone/common/types/timestamp/timestamp_types.result b/tests/cases/standalone/common/types/timestamp/timestamp_types.result
new file mode 100644
index 000000000000..349caedcb0ee
--- /dev/null
+++ b/tests/cases/standalone/common/types/timestamp/timestamp_types.result
@@ -0,0 +1,380 @@
+--description: Test TIMESTAMP types
+CREATE TABLE IF NOT EXISTS timestamp (sec TIMESTAMP_S, milli TIMESTAMP_MS,micro TIMESTAMP_US, nano TIMESTAMP_NS, ts TIMESTAMP TIME INDEX);
+
+Affected Rows: 0
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:01','2008-01-01 00:00:01.594','2008-01-01 00:00:01.88926','2008-01-01 00:00:01.889268321', 1);
+
+Affected Rows: 1
+
+SELECT * from timestamp;
+
++---------------------+-------------------------+----------------------------+-------------------------------+-------------------------+
+| sec | milli | micro | nano | ts |
++---------------------+-------------------------+----------------------------+-------------------------------+-------------------------+
+| 2008-01-01T00:00:01 | 2008-01-01T00:00:01.594 | 2008-01-01T00:00:01.889260 | 2008-01-01T00:00:01.889268321 | 1970-01-01T00:00:00.001 |
++---------------------+-------------------------+----------------------------+-------------------------------+-------------------------+
+
+SELECT extract(YEAR from sec),extract( YEAR from milli),extract(YEAR from nano) from timestamp;
+
++---------------------------------------+-----------------------------------------+----------------------------------------+
+| date_part(Utf8("YEAR"),timestamp.sec) | date_part(Utf8("YEAR"),timestamp.milli) | date_part(Utf8("YEAR"),timestamp.nano) |
++---------------------------------------+-----------------------------------------+----------------------------------------+
+| 2008.0 | 2008.0 | 2008.0 |
++---------------------------------------+-----------------------------------------+----------------------------------------+
+
+SELECT nano::TIMESTAMP, milli::TIMESTAMP,sec::TIMESTAMP from timestamp;
+
++-------------------------+-------------------------+---------------------+
+| timestamp.nano | milli | timestamp.sec |
++-------------------------+-------------------------+---------------------+
+| 2008-01-01T00:00:01.889 | 2008-01-01T00:00:01.594 | 2008-01-01T00:00:01 |
++-------------------------+-------------------------+---------------------+
+
+SELECT micro::TIMESTAMP_S as m1, micro::TIMESTAMP_MS as m2,micro::TIMESTAMP_NS as m3 from timestamp;
+
++---------------------+-------------------------+----------------------------+
+| m1 | m2 | m3 |
++---------------------+-------------------------+----------------------------+
+| 2008-01-01T00:00:01 | 2008-01-01T00:00:01.889 | 2008-01-01T00:00:01.889260 |
++---------------------+-------------------------+----------------------------+
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:51','2008-01-01 00:00:01.894','2008-01-01 00:00:01.99926','2008-01-01 00:00:01.999268321', 2);
+
+Affected Rows: 1
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:11','2008-01-01 00:00:01.794','2008-01-01 00:00:01.98926','2008-01-01 00:00:01.899268321', 3);
+
+Affected Rows: 1
+
+select '90000-01-19 03:14:07.999999'::TIMESTAMP_US::TIMESTAMP_NS;
+
+Error: 3001(EngineExecuteQuery), Parser error: Error parsing timestamp from '90000-01-19 03:14:07.999999': error parsing date
+
+select sec::DATE from timestamp;
+
++---------------+
+| timestamp.sec |
++---------------+
+| 2008-01-01 |
+| 2008-01-01 |
+| 2008-01-01 |
++---------------+
+
+select milli::DATE from timestamp;
+
++-----------------+
+| timestamp.milli |
++-----------------+
+| 2008-01-01 |
+| 2008-01-01 |
+| 2008-01-01 |
++-----------------+
+
+select nano::DATE from timestamp;
+
++----------------+
+| timestamp.nano |
++----------------+
+| 2008-01-01 |
+| 2008-01-01 |
+| 2008-01-01 |
++----------------+
+
+select sec::TIME from timestamp;
+
++---------------+
+| timestamp.sec |
++---------------+
+| 00:00:01 |
+| 00:00:51 |
+| 00:00:11 |
++---------------+
+
+select milli::TIME from timestamp;
+
++-----------------+
+| timestamp.milli |
++-----------------+
+| 00:00:01.594 |
+| 00:00:01.894 |
+| 00:00:01.794 |
++-----------------+
+
+select nano::TIME from timestamp;
+
++--------------------+
+| timestamp.nano |
++--------------------+
+| 00:00:01.889268321 |
+| 00:00:01.999268321 |
+| 00:00:01.899268321 |
++--------------------+
+
+select sec::TIMESTAMP_MS from timestamp;
+
++---------------------+
+| timestamp.sec |
++---------------------+
+| 2008-01-01T00:00:01 |
+| 2008-01-01T00:00:51 |
+| 2008-01-01T00:00:11 |
++---------------------+
+
+select sec::TIMESTAMP_NS from timestamp;
+
++---------------------+
+| timestamp.sec |
++---------------------+
+| 2008-01-01T00:00:01 |
+| 2008-01-01T00:00:51 |
+| 2008-01-01T00:00:11 |
++---------------------+
+
+select milli::TIMESTAMP_SEC from timestamp;
+
++---------------------+
+| timestamp.milli |
++---------------------+
+| 2008-01-01T00:00:01 |
+| 2008-01-01T00:00:01 |
+| 2008-01-01T00:00:01 |
++---------------------+
+
+select milli::TIMESTAMP_NS from timestamp;
+
++-------------------------+
+| timestamp.milli |
++-------------------------+
+| 2008-01-01T00:00:01.594 |
+| 2008-01-01T00:00:01.894 |
+| 2008-01-01T00:00:01.794 |
++-------------------------+
+
+select nano::TIMESTAMP_SEC from timestamp;
+
++---------------------+
+| timestamp.nano |
++---------------------+
+| 2008-01-01T00:00:01 |
+| 2008-01-01T00:00:01 |
+| 2008-01-01T00:00:01 |
++---------------------+
+
+select nano::TIMESTAMP_MS from timestamp;
+
++-------------------------+
+| timestamp.nano |
++-------------------------+
+| 2008-01-01T00:00:01.889 |
+| 2008-01-01T00:00:01.999 |
+| 2008-01-01T00:00:01.899 |
++-------------------------+
+
+select sec from timestamp order by sec;
+
++---------------------+
+| sec |
++---------------------+
+| 2008-01-01T00:00:01 |
+| 2008-01-01T00:00:11 |
+| 2008-01-01T00:00:51 |
++---------------------+
+
+select milli from timestamp order by milli;
+
++-------------------------+
+| milli |
++-------------------------+
+| 2008-01-01T00:00:01.594 |
+| 2008-01-01T00:00:01.794 |
+| 2008-01-01T00:00:01.894 |
++-------------------------+
+
+select nano from timestamp order by nano;
+
++-------------------------------+
+| nano |
++-------------------------------+
+| 2008-01-01T00:00:01.889268321 |
+| 2008-01-01T00:00:01.899268321 |
+| 2008-01-01T00:00:01.999268321 |
++-------------------------------+
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:51','2008-01-01 00:00:01.894','2008-01-01 00:00:01.99926','2008-01-01 00:00:01.999268321', 4);
+
+Affected Rows: 1
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:11','2008-01-01 00:00:01.794','2008-01-01 00:00:01.98926','2008-01-01 00:00:01.899268321', 5);
+
+Affected Rows: 1
+
+select count(*), nano from timestamp group by nano order by nano;
+
++-----------------+-------------------------------+
+| COUNT(UInt8(1)) | nano |
++-----------------+-------------------------------+
+| 1 | 2008-01-01T00:00:01.889268321 |
+| 2 | 2008-01-01T00:00:01.899268321 |
+| 2 | 2008-01-01T00:00:01.999268321 |
++-----------------+-------------------------------+
+
+select count(*), sec from timestamp group by sec order by sec;
+
++-----------------+---------------------+
+| COUNT(UInt8(1)) | sec |
++-----------------+---------------------+
+| 1 | 2008-01-01T00:00:01 |
+| 2 | 2008-01-01T00:00:11 |
+| 2 | 2008-01-01T00:00:51 |
++-----------------+---------------------+
+
+select count(*), milli from timestamp group by milli order by milli;
+
++-----------------+-------------------------+
+| COUNT(UInt8(1)) | milli |
++-----------------+-------------------------+
+| 1 | 2008-01-01T00:00:01.594 |
+| 2 | 2008-01-01T00:00:01.794 |
+| 2 | 2008-01-01T00:00:01.894 |
++-----------------+-------------------------+
+
+CREATE TABLE IF NOT EXISTS timestamp_two (sec TIMESTAMP_S, milli TIMESTAMP_MS,micro TIMESTAMP_US, nano TIMESTAMP_NS TIME INDEX);
+
+Affected Rows: 0
+
+INSERT INTO timestamp_two VALUES ('2008-01-01 00:00:11','2008-01-01 00:00:01.794','2008-01-01 00:00:01.98926','2008-01-01 00:00:01.899268321');
+
+Affected Rows: 1
+
+select timestamp.sec from timestamp inner join timestamp_two on (timestamp.sec = timestamp_two.sec);
+
++---------------------+
+| sec |
++---------------------+
+| 2008-01-01T00:00:11 |
+| 2008-01-01T00:00:11 |
++---------------------+
+
+select timestamp.milli from timestamp inner join timestamp_two on (timestamp.milli = timestamp_two.milli);
+
++-------------------------+
+| milli |
++-------------------------+
+| 2008-01-01T00:00:01.794 |
+| 2008-01-01T00:00:01.794 |
++-------------------------+
+
+select timestamp.nano from timestamp inner join timestamp_two on (timestamp.nano = timestamp_two.nano);
+
++-------------------------------+
+| nano |
++-------------------------------+
+| 2008-01-01T00:00:01.899268321 |
+| 2008-01-01T00:00:01.899268321 |
++-------------------------------+
+
+select '2008-01-01 00:00:11'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_MS;
+
++-----------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11") = Utf8("2008-01-01 00:00:11") |
++-----------------------------------------------------------+
+| true |
++-----------------------------------------------------------+
+
+select '2008-01-01 00:00:11'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+
++-----------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11") = Utf8("2008-01-01 00:00:11") |
++-----------------------------------------------------------+
+| true |
++-----------------------------------------------------------+
+
+select '2008-01-01 00:00:11'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
++-----------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11") = Utf8("2008-01-01 00:00:11") |
++-----------------------------------------------------------+
+| true |
++-----------------------------------------------------------+
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_MS;
+
++-------------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11.1") = Utf8("2008-01-01 00:00:11") |
++-------------------------------------------------------------+
+| false |
++-------------------------------------------------------------+
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+
++-------------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11.1") = Utf8("2008-01-01 00:00:11") |
++-------------------------------------------------------------+
+| false |
++-------------------------------------------------------------+
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_US = '2008-01-01 00:00:11.1'::TIMESTAMP_S;
+
++---------------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11.1") = Utf8("2008-01-01 00:00:11.1") |
++---------------------------------------------------------------+
+| true |
++---------------------------------------------------------------+
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+
++-------------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11.1") = Utf8("2008-01-01 00:00:11") |
++-------------------------------------------------------------+
+| false |
++-------------------------------------------------------------+
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
++-------------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11.1") = Utf8("2008-01-01 00:00:11") |
++-------------------------------------------------------------+
+| true |
++-------------------------------------------------------------+
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_NS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
++-------------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11.1") = Utf8("2008-01-01 00:00:11") |
++-------------------------------------------------------------+
+| true |
++-------------------------------------------------------------+
+
+select '2008-01-01 00:00:11'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+
++-----------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11") = Utf8("2008-01-01 00:00:11") |
++-----------------------------------------------------------+
+| true |
++-----------------------------------------------------------+
+
+select '2008-01-01 00:00:11'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
++-----------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11") = Utf8("2008-01-01 00:00:11") |
++-----------------------------------------------------------+
+| true |
++-----------------------------------------------------------+
+
+select '2008-01-01 00:00:11'::TIMESTAMP_NS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
++-----------------------------------------------------------+
+| Utf8("2008-01-01 00:00:11") = Utf8("2008-01-01 00:00:11") |
++-----------------------------------------------------------+
+| true |
++-----------------------------------------------------------+
+
+DROP TABLE timestamp;
+
+Affected Rows: 1
+
+DROP TABLE timestamp_two;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/common/types/timestamp/timestamp_types.sql b/tests/cases/standalone/common/types/timestamp/timestamp_types.sql
new file mode 100644
index 000000000000..6d73a453d17e
--- /dev/null
+++ b/tests/cases/standalone/common/types/timestamp/timestamp_types.sql
@@ -0,0 +1,99 @@
+--description: Test TIMESTAMP types
+
+CREATE TABLE IF NOT EXISTS timestamp (sec TIMESTAMP_S, milli TIMESTAMP_MS,micro TIMESTAMP_US, nano TIMESTAMP_NS, ts TIMESTAMP TIME INDEX);
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:01','2008-01-01 00:00:01.594','2008-01-01 00:00:01.88926','2008-01-01 00:00:01.889268321', 1);
+
+SELECT * from timestamp;
+
+SELECT extract(YEAR from sec),extract( YEAR from milli),extract(YEAR from nano) from timestamp;
+
+SELECT nano::TIMESTAMP, milli::TIMESTAMP,sec::TIMESTAMP from timestamp;
+
+SELECT micro::TIMESTAMP_S as m1, micro::TIMESTAMP_MS as m2,micro::TIMESTAMP_NS as m3 from timestamp;
+
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:51','2008-01-01 00:00:01.894','2008-01-01 00:00:01.99926','2008-01-01 00:00:01.999268321', 2);
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:11','2008-01-01 00:00:01.794','2008-01-01 00:00:01.98926','2008-01-01 00:00:01.899268321', 3);
+
+
+select '90000-01-19 03:14:07.999999'::TIMESTAMP_US::TIMESTAMP_NS;
+
+select sec::DATE from timestamp;
+
+select milli::DATE from timestamp;
+
+select nano::DATE from timestamp;
+
+select sec::TIME from timestamp;
+
+select milli::TIME from timestamp;
+
+select nano::TIME from timestamp;
+
+select sec::TIMESTAMP_MS from timestamp;
+
+select sec::TIMESTAMP_NS from timestamp;
+
+select milli::TIMESTAMP_SEC from timestamp;
+
+select milli::TIMESTAMP_NS from timestamp;
+
+select nano::TIMESTAMP_SEC from timestamp;
+
+select nano::TIMESTAMP_MS from timestamp;
+
+select sec from timestamp order by sec;
+
+select milli from timestamp order by milli;
+
+select nano from timestamp order by nano;
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:51','2008-01-01 00:00:01.894','2008-01-01 00:00:01.99926','2008-01-01 00:00:01.999268321', 4);
+
+INSERT INTO timestamp VALUES ('2008-01-01 00:00:11','2008-01-01 00:00:01.794','2008-01-01 00:00:01.98926','2008-01-01 00:00:01.899268321', 5);
+
+select count(*), nano from timestamp group by nano order by nano;
+
+select count(*), sec from timestamp group by sec order by sec;
+
+select count(*), milli from timestamp group by milli order by milli;
+
+CREATE TABLE IF NOT EXISTS timestamp_two (sec TIMESTAMP_S, milli TIMESTAMP_MS,micro TIMESTAMP_US, nano TIMESTAMP_NS TIME INDEX);
+
+INSERT INTO timestamp_two VALUES ('2008-01-01 00:00:11','2008-01-01 00:00:01.794','2008-01-01 00:00:01.98926','2008-01-01 00:00:01.899268321');
+
+select timestamp.sec from timestamp inner join timestamp_two on (timestamp.sec = timestamp_two.sec);
+
+select timestamp.milli from timestamp inner join timestamp_two on (timestamp.milli = timestamp_two.milli);
+
+select timestamp.nano from timestamp inner join timestamp_two on (timestamp.nano = timestamp_two.nano);
+
+select '2008-01-01 00:00:11'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_MS;
+
+select '2008-01-01 00:00:11'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+
+select '2008-01-01 00:00:11'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_MS;
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_US = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_US = '2008-01-01 00:00:11.1'::TIMESTAMP_S;
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
+select '2008-01-01 00:00:11.1'::TIMESTAMP_NS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
+select '2008-01-01 00:00:11'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_NS;
+
+select '2008-01-01 00:00:11'::TIMESTAMP_MS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
+select '2008-01-01 00:00:11'::TIMESTAMP_NS = '2008-01-01 00:00:11'::TIMESTAMP_S;
+
+DROP TABLE timestamp;
+
+DROP TABLE timestamp_two;
diff --git a/tests/cases/standalone/common/types/timestamp/timestamp_tz.result b/tests/cases/standalone/common/types/timestamp/timestamp_tz.result
index f863d1e02a99..8084c4645749 100644
--- a/tests/cases/standalone/common/types/timestamp/timestamp_tz.result
+++ b/tests/cases/standalone/common/types/timestamp/timestamp_tz.result
@@ -11,7 +11,7 @@ select '2021-11-15 02:30:00'::TIMESTAMP::TIMESTAMPTZ;
+-----------------------------+
| Utf8("2021-11-15 02:30:00") |
+-----------------------------+
-| 2021-11-15T02:30:00Z |
+| 2021-11-15T02:30:00 |
+-----------------------------+
SELECT '2021-04-29 10:50:09-05'::TIMESTAMPTZ::DATE;
|
feat
|
type alias (#2331)
|
aa22f9c94a80904a71a8297fc810b701099ab366
|
2024-01-03 13:35:45
|
Weny Xu
|
refactor: allow procedure to acquire share lock (#3061)
| false
|
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index c3b1f7c31121..e196ed70c6d6 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -394,7 +394,7 @@ impl Procedure for AlterTableProcedure {
fn lock_key(&self) -> LockKey {
let key = self.lock_key_inner();
- LockKey::new(key)
+ LockKey::new_exclusive(key)
}
}
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index b480c82acdfd..9bdb6929c66e 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -349,7 +349,7 @@ impl Procedure for CreateTableProcedure {
table_ref.table,
);
- LockKey::single(key)
+ LockKey::single_exclusive(key)
}
}
diff --git a/src/common/meta/src/ddl/drop_table.rs b/src/common/meta/src/ddl/drop_table.rs
index 7fac47e62cb1..dfd674d13938 100644
--- a/src/common/meta/src/ddl/drop_table.rs
+++ b/src/common/meta/src/ddl/drop_table.rs
@@ -273,7 +273,7 @@ impl Procedure for DropTableProcedure {
table_ref.table,
);
- LockKey::single(key)
+ LockKey::single_exclusive(key)
}
}
diff --git a/src/common/meta/src/ddl/truncate_table.rs b/src/common/meta/src/ddl/truncate_table.rs
index ec5a7897cd63..90f746104c99 100644
--- a/src/common/meta/src/ddl/truncate_table.rs
+++ b/src/common/meta/src/ddl/truncate_table.rs
@@ -81,7 +81,7 @@ impl Procedure for TruncateTableProcedure {
table_ref.table,
);
- LockKey::single(key)
+ LockKey::single_exclusive(key)
}
}
diff --git a/src/common/procedure/src/local.rs b/src/common/procedure/src/local.rs
index ae01022c9cc4..30c0403f683b 100644
--- a/src/common/procedure/src/local.rs
+++ b/src/common/procedure/src/local.rs
@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-mod lock;
mod runner;
+mod rwlock;
use std::collections::{HashMap, VecDeque};
use std::sync::atomic::{AtomicBool, Ordering};
@@ -29,11 +29,11 @@ use snafu::{ensure, ResultExt};
use tokio::sync::watch::{self, Receiver, Sender};
use tokio::sync::{Mutex as TokioMutex, Notify};
+use self::rwlock::KeyRwLock;
use crate::error::{
DuplicateProcedureSnafu, Error, LoaderConflictSnafu, ManagerNotStartSnafu, Result,
StartRemoveOutdatedMetaTaskSnafu, StopRemoveOutdatedMetaTaskSnafu,
};
-use crate::local::lock::LockMap;
use crate::local::runner::Runner;
use crate::procedure::BoxedProcedureLoader;
use crate::store::{ProcedureMessage, ProcedureStore, StateStoreRef};
@@ -57,8 +57,6 @@ const META_TTL: Duration = Duration::from_secs(60 * 10);
pub(crate) struct ProcedureMeta {
/// Id of this procedure.
id: ProcedureId,
- /// Notify to wait for a lock.
- lock_notify: Notify,
/// Parent procedure id.
parent_id: Option<ProcedureId>,
/// Notify to wait for subprocedures.
@@ -78,7 +76,6 @@ impl ProcedureMeta {
let (state_sender, state_receiver) = watch::channel(ProcedureState::Running);
ProcedureMeta {
id,
- lock_notify: Notify::new(),
parent_id,
child_notify: Notify::new(),
lock_key,
@@ -131,7 +128,7 @@ struct LoadedProcedure {
pub(crate) struct ManagerContext {
/// Procedure loaders. The key is the type name of the procedure which the loader returns.
loaders: Mutex<HashMap<String, BoxedProcedureLoader>>,
- lock_map: LockMap,
+ key_lock: KeyRwLock<String>,
procedures: RwLock<HashMap<ProcedureId, ProcedureMetaRef>>,
/// Messages loaded from the procedure store.
messages: Mutex<HashMap<ProcedureId, ProcedureMessage>>,
@@ -152,8 +149,8 @@ impl ManagerContext {
/// Returns a new [ManagerContext].
fn new() -> ManagerContext {
ManagerContext {
+ key_lock: KeyRwLock::new(),
loaders: Mutex::new(HashMap::new()),
- lock_map: LockMap::new(),
procedures: RwLock::new(HashMap::new()),
messages: Mutex::new(HashMap::new()),
finished_procedures: Mutex::new(VecDeque::new()),
@@ -850,7 +847,7 @@ mod tests {
assert!(manager.procedure_watcher(procedure_id).is_none());
let mut procedure = ProcedureToLoad::new("submit");
- procedure.lock_key = LockKey::single("test.submit");
+ procedure.lock_key = LockKey::single_exclusive("test.submit");
assert!(manager
.submit(ProcedureWithId {
id: procedure_id,
@@ -918,7 +915,7 @@ mod tests {
}
fn lock_key(&self) -> LockKey {
- LockKey::single("test.submit")
+ LockKey::single_exclusive("test.submit")
}
}
@@ -955,7 +952,7 @@ mod tests {
let manager = LocalManager::new(config, state_store);
let mut procedure = ProcedureToLoad::new("submit");
- procedure.lock_key = LockKey::single("test.submit");
+ procedure.lock_key = LockKey::single_exclusive("test.submit");
let procedure_id = ProcedureId::random();
assert_matches!(
manager
@@ -986,7 +983,7 @@ mod tests {
manager.start().await.unwrap();
let mut procedure = ProcedureToLoad::new("submit");
- procedure.lock_key = LockKey::single("test.submit");
+ procedure.lock_key = LockKey::single_exclusive("test.submit");
let procedure_id = ProcedureId::random();
assert!(manager
.submit(ProcedureWithId {
@@ -1018,7 +1015,7 @@ mod tests {
manager.manager_ctx.set_running();
let mut procedure = ProcedureToLoad::new("submit");
- procedure.lock_key = LockKey::single("test.submit");
+ procedure.lock_key = LockKey::single_exclusive("test.submit");
let procedure_id = ProcedureId::random();
assert!(manager
.submit(ProcedureWithId {
@@ -1041,7 +1038,7 @@ mod tests {
// The remove_outdated_meta method has been stopped, so any procedure meta-data will not be automatically removed.
manager.stop().await.unwrap();
let mut procedure = ProcedureToLoad::new("submit");
- procedure.lock_key = LockKey::single("test.submit");
+ procedure.lock_key = LockKey::single_exclusive("test.submit");
let procedure_id = ProcedureId::random();
manager.manager_ctx.set_running();
@@ -1063,7 +1060,7 @@ mod tests {
// After restart
let mut procedure = ProcedureToLoad::new("submit");
- procedure.lock_key = LockKey::single("test.submit");
+ procedure.lock_key = LockKey::single_exclusive("test.submit");
let procedure_id = ProcedureId::random();
assert!(manager
.submit(ProcedureWithId {
diff --git a/src/common/procedure/src/local/lock.rs b/src/common/procedure/src/local/lock.rs
deleted file mode 100644
index 59e197d951bb..000000000000
--- a/src/common/procedure/src/local/lock.rs
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::collections::{HashMap, VecDeque};
-use std::sync::RwLock;
-
-use crate::local::ProcedureMetaRef;
-use crate::ProcedureId;
-
-/// A lock entry.
-#[derive(Debug)]
-struct Lock {
- /// Current lock owner.
- owner: ProcedureMetaRef,
- /// Waiter procedures.
- waiters: VecDeque<ProcedureMetaRef>,
-}
-
-impl Lock {
- /// Returns a [Lock] with specific `owner` procedure.
- fn from_owner(owner: ProcedureMetaRef) -> Lock {
- Lock {
- owner,
- waiters: VecDeque::new(),
- }
- }
-
- /// Try to pop a waiter from the waiter list, set it as owner
- /// and wake up the new owner.
- ///
- /// Returns false if there is no waiter in the waiter list.
- fn switch_owner(&mut self) -> bool {
- if let Some(waiter) = self.waiters.pop_front() {
- // Update owner.
- self.owner = waiter.clone();
- // We need to use notify_one() since the waiter may have not called `notified()` yet.
- waiter.lock_notify.notify_one();
- true
- } else {
- false
- }
- }
-}
-
-/// Manages lock entries for procedures.
-pub(crate) struct LockMap {
- locks: RwLock<HashMap<String, Lock>>,
-}
-
-impl LockMap {
- /// Returns a new [LockMap].
- pub(crate) fn new() -> LockMap {
- LockMap {
- locks: RwLock::new(HashMap::new()),
- }
- }
-
- /// Acquire lock by `key` for procedure with specific `meta`.
- ///
- /// Though `meta` is cloneable, callers must ensure that only one `meta`
- /// is acquiring and holding the lock at the same time.
- ///
- /// # Panics
- /// Panics if the procedure acquires the lock recursively.
- pub(crate) async fn acquire_lock(&self, key: &str, meta: ProcedureMetaRef) {
- assert!(!self.hold_lock(key, meta.id));
-
- {
- let mut locks = self.locks.write().unwrap();
- if let Some(lock) = locks.get_mut(key) {
- // Lock already exists, but we don't expect that a procedure acquires
- // the same lock again.
- assert_ne!(lock.owner.id, meta.id);
-
- // Add this procedure to the waiter list. Here we don't check
- // whether the procedure is already in the waiter list as we
- // expect that a procedure should not wait for two lock simultaneously.
- lock.waiters.push_back(meta.clone());
- } else {
- let _ = locks.insert(key.to_string(), Lock::from_owner(meta));
-
- return;
- }
- }
-
- // Wait for notify.
- meta.lock_notify.notified().await;
-
- assert!(self.hold_lock(key, meta.id));
- }
-
- /// Release lock by `key`.
- pub(crate) fn release_lock(&self, key: &str, procedure_id: ProcedureId) {
- let mut locks = self.locks.write().unwrap();
- if let Some(lock) = locks.get_mut(key) {
- if lock.owner.id != procedure_id {
- // This is not the lock owner.
- return;
- }
-
- if !lock.switch_owner() {
- // No body waits for this lock, we can remove the lock entry.
- let _ = locks.remove(key);
- }
- }
- }
-
- /// Returns true if the procedure with specific `procedure_id` holds the
- /// lock of `key`.
- fn hold_lock(&self, key: &str, procedure_id: ProcedureId) -> bool {
- let locks = self.locks.read().unwrap();
- locks
- .get(key)
- .map(|lock| lock.owner.id == procedure_id)
- .unwrap_or(false)
- }
-
- /// Returns true if the procedure is waiting for the lock `key`.
- #[cfg(test)]
- fn waiting_lock(&self, key: &str, procedure_id: ProcedureId) -> bool {
- let locks = self.locks.read().unwrap();
- locks
- .get(key)
- .map(|lock| lock.waiters.iter().any(|meta| meta.id == procedure_id))
- .unwrap_or(false)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::sync::Arc;
-
- use super::*;
- use crate::local::test_util;
-
- #[test]
- fn test_lock_no_waiter() {
- let meta = Arc::new(test_util::procedure_meta_for_test());
- let mut lock = Lock::from_owner(meta);
-
- assert!(!lock.switch_owner());
- }
-
- #[tokio::test]
- async fn test_lock_with_waiter() {
- let owner = Arc::new(test_util::procedure_meta_for_test());
- let mut lock = Lock::from_owner(owner);
-
- let waiter = Arc::new(test_util::procedure_meta_for_test());
- lock.waiters.push_back(waiter.clone());
-
- assert!(lock.switch_owner());
- assert!(lock.waiters.is_empty());
-
- waiter.lock_notify.notified().await;
- assert_eq!(lock.owner.id, waiter.id);
- }
-
- #[tokio::test]
- async fn test_lock_map() {
- let key = "hello";
-
- let owner = Arc::new(test_util::procedure_meta_for_test());
- let lock_map = Arc::new(LockMap::new());
- lock_map.acquire_lock(key, owner.clone()).await;
-
- let waiter = Arc::new(test_util::procedure_meta_for_test());
- let waiter_id = waiter.id;
-
- // Waiter release the lock, this should not take effect.
- lock_map.release_lock(key, waiter_id);
-
- let lock_map2 = lock_map.clone();
- let owner_id = owner.id;
- let handle = tokio::spawn(async move {
- assert!(lock_map2.hold_lock(key, owner_id));
- assert!(!lock_map2.hold_lock(key, waiter_id));
-
- // Waiter wait for lock.
- lock_map2.acquire_lock(key, waiter.clone()).await;
-
- assert!(lock_map2.hold_lock(key, waiter_id));
- });
-
- // Owner still holds the lock.
- assert!(lock_map.hold_lock(key, owner_id));
-
- // Wait until the waiter acquired the lock
- while !lock_map.waiting_lock(key, waiter_id) {
- tokio::time::sleep(std::time::Duration::from_millis(5)).await;
- }
- // Release lock
- lock_map.release_lock(key, owner_id);
- assert!(!lock_map.hold_lock(key, owner_id));
-
- // Wait for task.
- handle.await.unwrap();
- // The waiter should hold the lock now.
- assert!(lock_map.hold_lock(key, waiter_id));
-
- lock_map.release_lock(key, waiter_id);
- }
-}
diff --git a/src/common/procedure/src/local/runner.rs b/src/common/procedure/src/local/runner.rs
index 0b50f4497f03..87f2e2f635b1 100644
--- a/src/common/procedure/src/local/runner.rs
+++ b/src/common/procedure/src/local/runner.rs
@@ -19,8 +19,10 @@ use backon::{BackoffBuilder, ExponentialBuilder};
use common_telemetry::logging;
use tokio::time;
+use super::rwlock::OwnedKeyRwLockGuard;
use crate::error::{self, ProcedurePanicSnafu, Result};
use crate::local::{ManagerContext, ProcedureMeta, ProcedureMetaRef};
+use crate::procedure::StringKey;
use crate::store::ProcedureStore;
use crate::ProcedureState::Retrying;
use crate::{BoxedProcedure, Context, Error, ProcedureId, ProcedureState, ProcedureWithId, Status};
@@ -56,6 +58,7 @@ impl ExecResult {
struct ProcedureGuard {
meta: ProcedureMetaRef,
manager_ctx: Arc<ManagerContext>,
+ key_guards: Vec<OwnedKeyRwLockGuard>,
finish: bool,
}
@@ -65,6 +68,7 @@ impl ProcedureGuard {
ProcedureGuard {
meta,
manager_ctx,
+ key_guards: vec![],
finish: false,
}
}
@@ -95,10 +99,15 @@ impl Drop for ProcedureGuard {
self.manager_ctx.notify_by_subprocedure(parent_id);
}
- // Release lock in reverse order.
- for key in self.meta.lock_key.keys_to_unlock() {
- self.manager_ctx.lock_map.release_lock(key, self.meta.id);
+ // Drops the key guards in the reverse order.
+ while !self.key_guards.is_empty() {
+ self.key_guards.pop();
}
+
+ // Clean the staled locks.
+ self.manager_ctx
+ .key_lock
+ .clean_keys(self.meta.lock_key.keys_to_lock().map(|k| k.as_string()));
}
}
@@ -121,7 +130,7 @@ impl Runner {
/// Run the procedure.
pub(crate) async fn run(mut self) {
// Ensure we can update the procedure state.
- let guard = ProcedureGuard::new(self.meta.clone(), self.manager_ctx.clone());
+ let mut guard = ProcedureGuard::new(self.meta.clone(), self.manager_ctx.clone());
logging::info!(
"Runner {}-{} starts",
@@ -133,10 +142,14 @@ impl Runner {
// recursive locking by adding a root procedure id to the meta.
for key in self.meta.lock_key.keys_to_lock() {
// Acquire lock for each key.
- self.manager_ctx
- .lock_map
- .acquire_lock(key, self.meta.clone())
- .await;
+ let key_guard = match key {
+ StringKey::Share(key) => self.manager_ctx.key_lock.read(key.clone()).await.into(),
+ StringKey::Exclusive(key) => {
+ self.manager_ctx.key_lock.write(key.clone()).await.into()
+ }
+ };
+
+ guard.key_guards.push(key_guard);
}
// Execute the procedure. We need to release the lock whenever the the execution
@@ -604,7 +617,7 @@ mod tests {
};
let normal = ProcedureAdapter {
data: "normal".to_string(),
- lock_key: LockKey::single("catalog.schema.table"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table"),
exec_fn,
};
@@ -665,7 +678,7 @@ mod tests {
};
let suspend = ProcedureAdapter {
data: "suspend".to_string(),
- lock_key: LockKey::single("catalog.schema.table"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table"),
exec_fn,
};
@@ -697,7 +710,7 @@ mod tests {
};
let child = ProcedureAdapter {
data: "child".to_string(),
- lock_key: LockKey::new(keys.iter().map(|k| k.to_string())),
+ lock_key: LockKey::new_exclusive(keys.iter().map(|k| k.to_string())),
exec_fn,
};
@@ -765,7 +778,7 @@ mod tests {
};
let parent = ProcedureAdapter {
data: "parent".to_string(),
- lock_key: LockKey::single("catalog.schema.table"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table"),
exec_fn,
};
@@ -784,6 +797,7 @@ mod tests {
runner.manager_ctx = manager_ctx.clone();
runner.run().await;
+ assert!(manager_ctx.key_lock.is_empty());
// Check child procedures.
for child_id in children_ids {
@@ -810,7 +824,7 @@ mod tests {
let exec_fn = move |_| async move { Ok(Status::Executing { persist: true }) }.boxed();
let normal = ProcedureAdapter {
data: "normal".to_string(),
- lock_key: LockKey::single("catalog.schema.table"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table"),
exec_fn,
};
@@ -851,7 +865,7 @@ mod tests {
|_| async { Err(Error::external(MockError::new(StatusCode::Unexpected))) }.boxed();
let normal = ProcedureAdapter {
data: "fail".to_string(),
- lock_key: LockKey::single("catalog.schema.table"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table"),
exec_fn,
};
@@ -875,7 +889,7 @@ mod tests {
|_| async { Err(Error::external(MockError::new(StatusCode::Unexpected))) }.boxed();
let fail = ProcedureAdapter {
data: "fail".to_string(),
- lock_key: LockKey::single("catalog.schema.table"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table"),
exec_fn,
};
@@ -917,7 +931,7 @@ mod tests {
let retry_later = ProcedureAdapter {
data: "retry_later".to_string(),
- lock_key: LockKey::single("catalog.schema.table"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table"),
exec_fn,
};
@@ -952,7 +966,7 @@ mod tests {
let exceed_max_retry_later = ProcedureAdapter {
data: "exceed_max_retry_later".to_string(),
- lock_key: LockKey::single("catalog.schema.table"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table"),
exec_fn,
};
@@ -993,7 +1007,7 @@ mod tests {
};
let fail = ProcedureAdapter {
data: "fail".to_string(),
- lock_key: LockKey::single("catalog.schema.table.region-0"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table.region-0"),
exec_fn,
};
@@ -1027,7 +1041,7 @@ mod tests {
};
let parent = ProcedureAdapter {
data: "parent".to_string(),
- lock_key: LockKey::single("catalog.schema.table"),
+ lock_key: LockKey::single_exclusive("catalog.schema.table"),
exec_fn,
};
@@ -1042,10 +1056,11 @@ mod tests {
// Manually add this procedure to the manager ctx.
assert!(manager_ctx.try_insert_procedure(meta.clone()));
// Replace the manager ctx.
- runner.manager_ctx = manager_ctx;
+ runner.manager_ctx = manager_ctx.clone();
// Run the runner and execute the procedure.
runner.run().await;
+ assert!(manager_ctx.key_lock.is_empty());
let err = meta.state().error().unwrap().output_msg();
assert!(err.contains("subprocedure failed"), "{err}");
}
diff --git a/src/common/procedure/src/local/rwlock.rs b/src/common/procedure/src/local/rwlock.rs
new file mode 100644
index 000000000000..a1701320364c
--- /dev/null
+++ b/src/common/procedure/src/local/rwlock.rs
@@ -0,0 +1,247 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::hash::Hash;
+use std::sync::{Arc, Mutex};
+
+use tokio::sync::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
+
+pub enum OwnedKeyRwLockGuard {
+ Read(OwnedRwLockReadGuard<()>),
+ Write(OwnedRwLockWriteGuard<()>),
+}
+
+impl From<OwnedRwLockReadGuard<()>> for OwnedKeyRwLockGuard {
+ fn from(guard: OwnedRwLockReadGuard<()>) -> Self {
+ OwnedKeyRwLockGuard::Read(guard)
+ }
+}
+
+impl From<OwnedRwLockWriteGuard<()>> for OwnedKeyRwLockGuard {
+ fn from(guard: OwnedRwLockWriteGuard<()>) -> Self {
+ OwnedKeyRwLockGuard::Write(guard)
+ }
+}
+
+/// Locks based on a key, allowing other keys to lock independently.
+#[derive(Debug)]
+pub struct KeyRwLock<K> {
+ /// The inner map of locks for specific keys.
+ inner: Mutex<HashMap<K, Arc<RwLock<()>>>>,
+}
+
+impl<K> KeyRwLock<K>
+where
+ K: Eq + Hash + Clone,
+{
+ pub fn new() -> Self {
+ KeyRwLock {
+ inner: Default::default(),
+ }
+ }
+
+ /// Locks the key with shared read access, returning a guard.
+ pub async fn read(&self, key: K) -> OwnedRwLockReadGuard<()> {
+ let lock = {
+ let mut locks = self.inner.lock().unwrap();
+ locks.entry(key).or_default().clone()
+ };
+
+ lock.read_owned().await
+ }
+
+ /// Locks the key with exclusive write access, returning a guard.
+ pub async fn write(&self, key: K) -> OwnedRwLockWriteGuard<()> {
+ let lock = {
+ let mut locks = self.inner.lock().unwrap();
+ locks.entry(key).or_default().clone()
+ };
+
+ lock.write_owned().await
+ }
+
+ /// Clean up stale locks.
+ ///
+ /// Note: It only cleans a lock if
+ /// - Its strong ref count equals one.
+ /// - Able to acquire the write lock.
+ pub fn clean_keys<'a>(&'a self, iter: impl IntoIterator<Item = &'a K>) {
+ let mut locks = self.inner.lock().unwrap();
+ let mut keys = Vec::new();
+ for key in iter {
+ if let Some(lock) = locks.get(key) {
+ if lock.try_write().is_ok() {
+ debug_assert_eq!(Arc::weak_count(lock), 0);
+ // Ensures nobody keeps this ref.
+ if Arc::strong_count(lock) == 1 {
+ keys.push(key);
+ }
+ }
+ }
+ }
+
+ for key in keys {
+ locks.remove(key);
+ }
+ }
+}
+
+#[cfg(test)]
+impl<K> KeyRwLock<K>
+where
+ K: Eq + Hash + Clone,
+{
+ /// Tries to lock the key with shared read access, returning immediately.
+ pub fn try_read(&self, key: K) -> Result<OwnedRwLockReadGuard<()>, tokio::sync::TryLockError> {
+ let lock = {
+ let mut locks = self.inner.lock().unwrap();
+ locks.entry(key).or_default().clone()
+ };
+
+ lock.try_read_owned()
+ }
+
+ /// Tries lock this key with exclusive write access, returning immediately.
+ pub fn try_write(
+ &self,
+ key: K,
+ ) -> Result<OwnedRwLockWriteGuard<()>, tokio::sync::TryLockError> {
+ let lock = {
+ let mut locks = self.inner.lock().unwrap();
+ locks.entry(key).or_default().clone()
+ };
+
+ lock.try_write_owned()
+ }
+
+ /// Returns number of keys.
+ pub fn len(&self) -> usize {
+ self.inner.lock().unwrap().len()
+ }
+
+ /// Returns true the inner map is empty.
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[tokio::test]
+ async fn test_naive() {
+ let lock_key = KeyRwLock::new();
+
+ {
+ let _guard = lock_key.read("test1").await;
+ assert_eq!(lock_key.len(), 1);
+ assert!(lock_key.try_read("test1").is_ok());
+ assert!(lock_key.try_write("test1").is_err());
+ }
+
+ {
+ let _guard0 = lock_key.write("test2").await;
+ let _guard = lock_key.write("test1").await;
+ assert_eq!(lock_key.len(), 2);
+ assert!(lock_key.try_read("test1").is_err());
+ assert!(lock_key.try_write("test1").is_err());
+ }
+
+ assert_eq!(lock_key.len(), 2);
+
+ lock_key.clean_keys(&vec!["test1", "test2"]);
+ assert!(lock_key.is_empty());
+
+ let mut guards = Vec::new();
+ for key in ["test1", "test2"] {
+ guards.push(lock_key.read(key).await);
+ }
+ while !guards.is_empty() {
+ guards.pop();
+ }
+ lock_key.clean_keys(vec![&"test1", &"test2"]);
+ assert_eq!(lock_key.len(), 0);
+ }
+
+ #[tokio::test]
+ async fn test_clean_keys() {
+ let lock_key = KeyRwLock::<&str>::new();
+ {
+ let rwlock = {
+ lock_key
+ .inner
+ .lock()
+ .unwrap()
+ .entry("test")
+ .or_default()
+ .clone()
+ };
+ assert_eq!(Arc::strong_count(&rwlock), 2);
+ let _guard = rwlock.read_owned().await;
+
+ {
+ let inner = lock_key.inner.lock().unwrap();
+ let rwlock = inner.get("test").unwrap();
+ assert_eq!(Arc::strong_count(rwlock), 2);
+ }
+ }
+
+ {
+ let rwlock = {
+ lock_key
+ .inner
+ .lock()
+ .unwrap()
+ .entry("test")
+ .or_default()
+ .clone()
+ };
+ assert_eq!(Arc::strong_count(&rwlock), 2);
+ let _guard = rwlock.write_owned().await;
+
+ {
+ let inner = lock_key.inner.lock().unwrap();
+ let rwlock = inner.get("test").unwrap();
+ assert_eq!(Arc::strong_count(rwlock), 2);
+ }
+ }
+
+ {
+ let inner = lock_key.inner.lock().unwrap();
+ let rwlock = inner.get("test").unwrap();
+ assert_eq!(Arc::strong_count(rwlock), 1);
+ }
+
+ // Someone has the ref of the rwlock, but it waits to be granted the lock.
+ let rwlock = {
+ lock_key
+ .inner
+ .lock()
+ .unwrap()
+ .entry("test")
+ .or_default()
+ .clone()
+ };
+ assert_eq!(Arc::strong_count(&rwlock), 2);
+ // However, One thread trying to remove the "test" key should have no effect.
+ lock_key.clean_keys(vec![&"test"]);
+ // Should get the rwlock.
+ {
+ let inner = lock_key.inner.lock().unwrap();
+ inner.get("test").unwrap();
+ }
+ }
+}
diff --git a/src/common/procedure/src/procedure.rs b/src/common/procedure/src/procedure.rs
index 54f34b7d7ccf..2df005bdf042 100644
--- a/src/common/procedure/src/procedure.rs
+++ b/src/common/procedure/src/procedure.rs
@@ -116,22 +116,49 @@ impl<T: Procedure + ?Sized> Procedure for Box<T> {
}
}
+#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub enum StringKey {
+ Share(String),
+ Exclusive(String),
+}
+
/// Keys to identify required locks.
///
/// [LockKey] always sorts keys lexicographically so that they can be acquired
/// in the same order.
-// Most procedures should only acquire 1 ~ 2 locks so we use smallvec to hold keys.
+/// Most procedures should only acquire 1 ~ 2 locks so we use smallvec to hold keys.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
-pub struct LockKey(SmallVec<[String; 2]>);
+pub struct LockKey(SmallVec<[StringKey; 2]>);
+
+impl StringKey {
+ pub fn into_string(self) -> String {
+ match self {
+ StringKey::Share(s) => s,
+ StringKey::Exclusive(s) => s,
+ }
+ }
+
+ pub fn as_string(&self) -> &String {
+ match self {
+ StringKey::Share(s) => s,
+ StringKey::Exclusive(s) => s,
+ }
+ }
+}
impl LockKey {
/// Returns a new [LockKey] with only one key.
- pub fn single(key: impl Into<String>) -> LockKey {
+ pub fn single(key: impl Into<StringKey>) -> LockKey {
LockKey(smallvec![key.into()])
}
+ /// Returns a new [LockKey] with only one key.
+ pub fn single_exclusive(key: impl Into<String>) -> LockKey {
+ LockKey(smallvec![StringKey::Exclusive(key.into())])
+ }
+
/// Returns a new [LockKey] with keys from specific `iter`.
- pub fn new(iter: impl IntoIterator<Item = String>) -> LockKey {
+ pub fn new(iter: impl IntoIterator<Item = StringKey>) -> LockKey {
let mut vec: SmallVec<_> = iter.into_iter().collect();
vec.sort();
// Dedup keys to avoid acquiring the same key multiple times.
@@ -139,14 +166,14 @@ impl LockKey {
LockKey(vec)
}
- /// Returns the keys to lock.
- pub fn keys_to_lock(&self) -> impl Iterator<Item = &String> {
- self.0.iter()
+ /// Returns a new [LockKey] with keys from specific `iter`.
+ pub fn new_exclusive(iter: impl IntoIterator<Item = String>) -> LockKey {
+ Self::new(iter.into_iter().map(StringKey::Exclusive))
}
- /// Returns the keys to unlock.
- pub fn keys_to_unlock(&self) -> impl Iterator<Item = &String> {
- self.0.iter().rev()
+ /// Returns the keys to lock.
+ pub fn keys_to_lock(&self) -> impl Iterator<Item = &StringKey> {
+ self.0.iter()
}
}
@@ -340,20 +367,25 @@ mod tests {
#[test]
fn test_lock_key() {
let entity = "catalog.schema.my_table";
- let key = LockKey::single(entity);
- assert_eq!(vec![entity], key.keys_to_lock().collect::<Vec<_>>());
- assert_eq!(vec![entity], key.keys_to_unlock().collect::<Vec<_>>());
+ let key = LockKey::single_exclusive(entity);
+ assert_eq!(
+ vec![&StringKey::Exclusive(entity.to_string())],
+ key.keys_to_lock().collect::<Vec<_>>()
+ );
- let key = LockKey::new([
+ let key = LockKey::new_exclusive([
"b".to_string(),
"c".to_string(),
"a".to_string(),
"c".to_string(),
]);
- assert_eq!(vec!["a", "b", "c"], key.keys_to_lock().collect::<Vec<_>>());
assert_eq!(
- vec!["c", "b", "a"],
- key.keys_to_unlock().collect::<Vec<_>>()
+ vec![
+ &StringKey::Exclusive("a".to_string()),
+ &StringKey::Exclusive("b".to_string()),
+ &StringKey::Exclusive("c".to_string())
+ ],
+ key.keys_to_lock().collect::<Vec<_>>()
);
}
diff --git a/src/common/procedure/src/watcher.rs b/src/common/procedure/src/watcher.rs
index 75cf777beece..584aae520df7 100644
--- a/src/common/procedure/src/watcher.rs
+++ b/src/common/procedure/src/watcher.rs
@@ -98,7 +98,7 @@ mod tests {
}
fn lock_key(&self) -> LockKey {
- LockKey::single("test.submit")
+ LockKey::single_exclusive("test.submit")
}
}
diff --git a/src/meta-srv/src/procedure/region_failover.rs b/src/meta-srv/src/procedure/region_failover.rs
index 50ab0e742307..37468437b2aa 100644
--- a/src/meta-srv/src/procedure/region_failover.rs
+++ b/src/meta-srv/src/procedure/region_failover.rs
@@ -373,7 +373,7 @@ impl Procedure for RegionFailoverProcedure {
fn lock_key(&self) -> LockKey {
let region_ident = &self.node.failed_region;
let region_key = region_lock_key(region_ident.table_id, region_ident.region_number);
- LockKey::single(region_key)
+ LockKey::single_exclusive(region_key)
}
}
diff --git a/src/meta-srv/src/procedure/region_migration.rs b/src/meta-srv/src/procedure/region_migration.rs
index de1dd46d4fca..ec46686f4735 100644
--- a/src/meta-srv/src/procedure/region_migration.rs
+++ b/src/meta-srv/src/procedure/region_migration.rs
@@ -419,7 +419,7 @@ impl Procedure for RegionMigrationProcedure {
fn lock_key(&self) -> LockKey {
let key = self.context.persistent_ctx.lock_key();
- LockKey::single(key)
+ LockKey::single_exclusive(key)
}
}
@@ -455,7 +455,11 @@ mod tests {
let procedure = RegionMigrationProcedure::new(persistent_context, context);
let key = procedure.lock_key();
- let keys = key.keys_to_lock().cloned().collect::<Vec<_>>();
+ let keys = key
+ .keys_to_lock()
+ .cloned()
+ .map(|s| s.into_string())
+ .collect::<Vec<_>>();
assert!(keys.contains(&expected_key));
}
|
refactor
|
allow procedure to acquire share lock (#3061)
|
0b4ac987cd979f24d3096e8b7fb925bce8695a61
|
2023-07-25 13:02:10
|
fys
|
refactor: arrange lease kvs randomly in lease_based selector (#2028)
| false
|
diff --git a/src/meta-srv/src/selector/lease_based.rs b/src/meta-srv/src/selector/lease_based.rs
index 3e45e76c6105..83a42605103e 100644
--- a/src/meta-srv/src/selector/lease_based.rs
+++ b/src/meta-srv/src/selector/lease_based.rs
@@ -13,6 +13,8 @@
// limitations under the License.
use api::v1::meta::Peer;
+use rand::seq::SliceRandom;
+use rand::thread_rng;
use crate::error::Result;
use crate::lease;
@@ -34,9 +36,7 @@ impl Selector for LeaseBasedSelector {
.into_iter()
.collect();
- // TODO(jiachun): At the moment we are just pushing the latest to the forefront,
- // and it is better to use load-based strategies in the future.
- lease_kvs.sort_by(|a, b| b.1.timestamp_millis.cmp(&a.1.timestamp_millis));
+ lease_kvs.shuffle(&mut thread_rng());
let peers = lease_kvs
.into_iter()
|
refactor
|
arrange lease kvs randomly in lease_based selector (#2028)
|
cbc2620a5945aed9bf2226d94acfbd1091be4ad0
|
2023-06-20 13:15:29
|
LFC
|
feat: start region alive keepers (#1796)
| false
|
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index 6cc2c787997e..dc8b52179319 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -85,6 +85,7 @@ impl RemoteCatalogManager {
catalog_name: catalog_name.to_string(),
backend: self.backend.clone(),
engine_manager: self.engine_manager.clone(),
+ region_alive_keepers: self.region_alive_keepers.clone(),
}) as _
}
@@ -132,10 +133,17 @@ impl RemoteCatalogManager {
increment_gauge!(crate::metrics::METRIC_CATALOG_MANAGER_CATALOG_COUNT, 1.0);
+ let region_alive_keepers = self.region_alive_keepers.clone();
joins.push(common_runtime::spawn_bg(async move {
- let max_table_id =
- initiate_schemas(node_id, backend, engine_manager, &catalog_name, catalog)
- .await?;
+ let max_table_id = initiate_schemas(
+ node_id,
+ backend,
+ engine_manager,
+ &catalog_name,
+ catalog,
+ region_alive_keepers,
+ )
+ .await?;
info!(
"Catalog name: {}, max table id allocated: {}",
&catalog_name, max_table_id
@@ -164,6 +172,7 @@ impl RemoteCatalogManager {
self.engine_manager.clone(),
catalog_name,
schema_name,
+ self.region_alive_keepers.clone(),
);
let catalog_provider = self.new_catalog_provider(catalog_name);
@@ -209,6 +218,7 @@ fn new_schema_provider(
engine_manager: TableEngineManagerRef,
catalog_name: &str,
schema_name: &str,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
) -> SchemaProviderRef {
Arc::new(RemoteSchemaProvider {
catalog_name: catalog_name.to_string(),
@@ -216,6 +226,7 @@ fn new_schema_provider(
node_id,
backend,
engine_manager,
+ region_alive_keepers,
}) as _
}
@@ -249,6 +260,7 @@ async fn initiate_schemas(
engine_manager: TableEngineManagerRef,
catalog_name: &str,
catalog: CatalogProviderRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
) -> Result<u32> {
let mut schemas = iter_remote_schemas(&backend, catalog_name).await;
let mut joins = Vec::new();
@@ -268,6 +280,7 @@ async fn initiate_schemas(
engine_manager.clone(),
&catalog_name,
&schema_name,
+ region_alive_keepers.clone(),
);
catalog
.register_schema(schema_name.clone(), schema.clone())
@@ -611,18 +624,7 @@ impl CatalogManager for RemoteCatalogManager {
&[crate::metrics::db_label(catalog, schema)],
);
schema_provider
- .register_table(table_name.to_string(), request.table.clone())
- .await?;
-
- let table_ident = TableIdent {
- catalog: request.catalog,
- schema: request.schema,
- table: request.table_name,
- table_id: request.table_id,
- engine: request.table.table_info().meta.engine.clone(),
- };
- self.region_alive_keepers
- .register_table(table_ident, request.table)
+ .register_table(table_name.to_string(), request.table)
.await?;
Ok(true)
@@ -678,6 +680,7 @@ impl CatalogManager for RemoteCatalogManager {
self.engine_manager.clone(),
&catalog_name,
&schema_name,
+ self.region_alive_keepers.clone(),
);
catalog_provider
.register_schema(schema_name, schema_provider)
@@ -813,6 +816,7 @@ pub struct RemoteCatalogProvider {
catalog_name: String,
backend: KvBackendRef,
engine_manager: TableEngineManagerRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
}
impl RemoteCatalogProvider {
@@ -821,12 +825,14 @@ impl RemoteCatalogProvider {
backend: KvBackendRef,
engine_manager: TableEngineManagerRef,
node_id: u64,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
) -> Self {
Self {
node_id,
catalog_name,
backend,
engine_manager,
+ region_alive_keepers,
}
}
@@ -844,6 +850,7 @@ impl RemoteCatalogProvider {
node_id: self.node_id,
backend: self.backend.clone(),
engine_manager: self.engine_manager.clone(),
+ region_alive_keepers: self.region_alive_keepers.clone(),
};
Arc::new(provider) as Arc<_>
}
@@ -906,6 +913,7 @@ pub struct RemoteSchemaProvider {
node_id: u64,
backend: KvBackendRef,
engine_manager: TableEngineManagerRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
}
impl RemoteSchemaProvider {
@@ -915,6 +923,7 @@ impl RemoteSchemaProvider {
node_id: u64,
engine_manager: TableEngineManagerRef,
backend: KvBackendRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
) -> Self {
Self {
catalog_name,
@@ -922,6 +931,7 @@ impl RemoteSchemaProvider {
node_id,
backend,
engine_manager,
+ region_alive_keepers,
}
}
@@ -1004,6 +1014,18 @@ impl SchemaProvider for RemoteSchemaProvider {
&table_value.as_bytes().context(InvalidCatalogValueSnafu)?,
)
.await?;
+
+ let table_ident = TableIdent {
+ catalog: table_info.catalog_name.clone(),
+ schema: table_info.schema_name.clone(),
+ table: table_info.name.clone(),
+ table_id: table_info.ident.table_id,
+ engine: table_info.meta.engine.clone(),
+ };
+ self.region_alive_keepers
+ .register_table(table_ident, table)
+ .await?;
+
debug!(
"Successfully set catalog table entry, key: {}, table value: {:?}",
table_key, table_value
diff --git a/src/catalog/src/remote/region_alive_keeper.rs b/src/catalog/src/remote/region_alive_keeper.rs
index 327e846b3b7a..61daee4cf1fd 100644
--- a/src/catalog/src/remote/region_alive_keeper.rs
+++ b/src/catalog/src/remote/region_alive_keeper.rs
@@ -14,6 +14,7 @@
use std::collections::HashMap;
use std::future::Future;
+use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use async_trait::async_trait;
@@ -30,7 +31,7 @@ use table::engine::manager::TableEngineManagerRef;
use table::engine::{CloseTableResult, EngineContext, TableEngineRef};
use table::requests::CloseTableRequest;
use table::TableRef;
-use tokio::sync::{mpsc, Mutex};
+use tokio::sync::{mpsc, oneshot, Mutex};
use tokio::task::JoinHandle;
use tokio::time::{Duration, Instant};
@@ -40,6 +41,8 @@ use crate::error::{Result, TableEngineNotFoundSnafu};
pub struct RegionAliveKeepers {
table_engine_manager: TableEngineManagerRef,
keepers: Arc<Mutex<HashMap<TableIdent, Arc<RegionAliveKeeper>>>>,
+ heartbeat_interval_millis: u64,
+ started: AtomicBool,
/// The epoch when [RegionAliveKeepers] is created. It's used to get a monotonically non-decreasing
/// elapsed time when submitting heartbeats to Metasrv (because [Instant] is monotonically
@@ -49,23 +52,24 @@ pub struct RegionAliveKeepers {
}
impl RegionAliveKeepers {
- pub fn new(table_engine_manager: TableEngineManagerRef) -> Self {
+ pub fn new(
+ table_engine_manager: TableEngineManagerRef,
+ heartbeat_interval_millis: u64,
+ ) -> Self {
Self {
table_engine_manager,
keepers: Arc::new(Mutex::new(HashMap::new())),
+ heartbeat_interval_millis,
+ started: AtomicBool::new(false),
epoch: Instant::now(),
}
}
- async fn find_keeper(&self, table_ident: &TableIdent) -> Option<Arc<RegionAliveKeeper>> {
+ pub async fn find_keeper(&self, table_ident: &TableIdent) -> Option<Arc<RegionAliveKeeper>> {
self.keepers.lock().await.get(table_ident).cloned()
}
- pub(crate) async fn register_table(
- &self,
- table_ident: TableIdent,
- table: TableRef,
- ) -> Result<()> {
+ pub async fn register_table(&self, table_ident: TableIdent, table: TableRef) -> Result<()> {
let keeper = self.find_keeper(&table_ident).await;
if keeper.is_some() {
return Ok(());
@@ -78,17 +82,29 @@ impl RegionAliveKeepers {
engine_name: &table_ident.engine,
})?;
- let keeper = Arc::new(RegionAliveKeeper::new(table_engine, table_ident.clone()));
+ let keeper = Arc::new(RegionAliveKeeper::new(
+ table_engine,
+ table_ident.clone(),
+ self.heartbeat_interval_millis,
+ ));
for r in table.table_info().meta.region_numbers.iter() {
keeper.register_region(*r).await;
}
- info!("Register RegionAliveKeeper for table {table_ident}");
- self.keepers.lock().await.insert(table_ident, keeper);
+ let mut keepers = self.keepers.lock().await;
+ keepers.insert(table_ident.clone(), keeper.clone());
+
+ if self.started.load(Ordering::Relaxed) {
+ keeper.start().await;
+
+ info!("RegionAliveKeeper for table {table_ident} is started!");
+ } else {
+ info!("RegionAliveKeeper for table {table_ident} is registered but not started yet!");
+ }
Ok(())
}
- pub(crate) async fn deregister_table(&self, table_ident: &TableIdent) {
+ pub async fn deregister_table(&self, table_ident: &TableIdent) {
if self.keepers.lock().await.remove(table_ident).is_some() {
info!("Deregister RegionAliveKeeper for table {table_ident}");
}
@@ -114,10 +130,17 @@ impl RegionAliveKeepers {
keeper.deregister_region(region_ident.region_number).await
}
- pub async fn start(&self, heartbeat_interval_millis: u64) {
- for keeper in self.keepers.lock().await.values() {
- keeper.start(heartbeat_interval_millis).await;
+ pub async fn start(&self) {
+ let keepers = self.keepers.lock().await;
+ for keeper in keepers.values() {
+ keeper.start().await;
}
+ self.started.store(true, Ordering::Relaxed);
+
+ info!(
+ "RegionAliveKeepers for tables {:?} are started!",
+ keepers.keys().map(|x| x.to_string()).collect::<Vec<_>>(),
+ );
}
pub fn epoch(&self) -> Instant {
@@ -171,18 +194,26 @@ impl HeartbeatResponseHandler for RegionAliveKeepers {
/// opened regions to Metasrv, in heartbeats. If Metasrv decides some region could be resided in this
/// Datanode, it will "extend" the region's "lease", with a deadline for [RegionAliveKeeper] to
/// countdown.
-struct RegionAliveKeeper {
+pub struct RegionAliveKeeper {
table_engine: TableEngineRef,
table_ident: TableIdent,
countdown_task_handles: Arc<Mutex<HashMap<RegionNumber, Arc<CountdownTaskHandle>>>>,
+ heartbeat_interval_millis: u64,
+ started: AtomicBool,
}
impl RegionAliveKeeper {
- fn new(table_engine: TableEngineRef, table_ident: TableIdent) -> Self {
+ fn new(
+ table_engine: TableEngineRef,
+ table_ident: TableIdent,
+ heartbeat_interval_millis: u64,
+ ) -> Self {
Self {
table_engine,
table_ident,
countdown_task_handles: Arc::new(Mutex::new(HashMap::new())),
+ heartbeat_interval_millis,
+ started: AtomicBool::new(false),
}
}
@@ -210,14 +241,22 @@ impl RegionAliveKeeper {
|| on_task_finished,
));
- self.countdown_task_handles
- .lock()
- .await
- .insert(region, handle);
- info!(
- "Register alive countdown for new region {region} in table {}",
- self.table_ident
- )
+ let mut handles = self.countdown_task_handles.lock().await;
+ handles.insert(region, handle.clone());
+
+ if self.started.load(Ordering::Relaxed) {
+ handle.start(self.heartbeat_interval_millis).await;
+
+ info!(
+ "Region alive countdown for region {region} in table {} is started!",
+ self.table_ident
+ );
+ } else {
+ info!(
+ "Region alive countdown for region {region} in table {} is registered but not started yet!",
+ self.table_ident
+ );
+ }
}
async fn deregister_region(&self, region: RegionNumber) {
@@ -235,14 +274,18 @@ impl RegionAliveKeeper {
}
}
- async fn start(&self, heartbeat_interval_millis: u64) {
- for handle in self.countdown_task_handles.lock().await.values() {
- handle.start(heartbeat_interval_millis).await;
+ async fn start(&self) {
+ let handles = self.countdown_task_handles.lock().await;
+ for handle in handles.values() {
+ handle.start(self.heartbeat_interval_millis).await;
}
+
+ self.started.store(true, Ordering::Relaxed);
info!(
- "RegionAliveKeeper for table {} is started!",
+ "Region alive countdowns for regions {:?} in table {} are started!",
+ handles.keys().copied().collect::<Vec<_>>(),
self.table_ident
- )
+ );
}
async fn keep_lived(&self, designated_regions: Vec<RegionNumber>, deadline: Instant) {
@@ -253,15 +296,24 @@ impl RegionAliveKeeper {
// Else the region alive keeper might be triggered by lagging messages, we can safely ignore it.
}
}
+
+ pub async fn deadline(&self, region: RegionNumber) -> Option<Instant> {
+ let mut deadline = None;
+ if let Some(handle) = self.find_handle(®ion).await {
+ let (s, r) = oneshot::channel();
+ if handle.tx.send(CountdownCommand::Deadline(s)).await.is_ok() {
+ deadline = r.await.ok()
+ }
+ }
+ deadline
+ }
}
#[derive(Debug)]
enum CountdownCommand {
Start(u64),
Reset(Instant),
-
- #[cfg(test)]
- Deadline(tokio::sync::oneshot::Sender<Instant>),
+ Deadline(oneshot::Sender<Instant>),
}
struct CountdownTaskHandle {
@@ -362,7 +414,10 @@ impl CountdownTask {
},
Some(CountdownCommand::Reset(deadline)) => {
if countdown.deadline() < deadline {
- debug!("Reset deadline to region {region} of table {table_ident} to {deadline:?}");
+ debug!(
+ "Reset deadline of region {region} of table {table_ident} to approximately {} seconds later",
+ (deadline - Instant::now()).as_secs_f32(),
+ );
countdown.set(tokio::time::sleep_until(deadline));
}
// Else the countdown could be either:
@@ -378,10 +433,8 @@ impl CountdownTask {
);
break;
},
-
- #[cfg(test)]
Some(CountdownCommand::Deadline(tx)) => {
- tx.send(countdown.deadline()).unwrap()
+ let _ = tx.send(countdown.deadline());
}
}
}
@@ -433,7 +486,6 @@ mod test {
use table::engine::{TableEngine, TableReference};
use table::requests::{CreateTableRequest, TableOptions};
use table::test_util::EmptyTable;
- use tokio::sync::oneshot;
use super::*;
use crate::remote::mock::MockTableEngine;
@@ -441,7 +493,7 @@ mod test {
async fn prepare_keepers() -> (TableIdent, RegionAliveKeepers) {
let table_engine = Arc::new(MockTableEngine::default());
let table_engine_manager = Arc::new(MemoryTableEngineManager::new(table_engine));
- let keepers = RegionAliveKeepers::new(table_engine_manager);
+ let keepers = RegionAliveKeepers::new(table_engine_manager, 5000);
let catalog = "my_catalog";
let schema = "my_schema";
@@ -483,7 +535,7 @@ mod test {
async fn test_handle_heartbeat_response() {
let (table_ident, keepers) = prepare_keepers().await;
- keepers.start(5000).await;
+ keepers.start().await;
let startup_protection_until = Instant::now() + Duration::from_secs(21);
let duration_since_epoch = (Instant::now() - keepers.epoch).as_millis() as _;
@@ -517,8 +569,7 @@ mod test {
keep_alive_until: Instant,
is_kept_live: bool,
) {
- let handles = keeper.countdown_task_handles.lock().await;
- let deadline = deadline(&handles.get(®ion_number).unwrap().tx).await;
+ let deadline = keeper.deadline(region_number).await.unwrap();
if is_kept_live {
assert!(deadline > startup_protection_until && deadline == keep_alive_until);
} else {
@@ -555,11 +606,16 @@ mod test {
})
.await;
- keepers.start(5000).await;
+ keepers.start().await;
for keeper in keepers.keepers.lock().await.values() {
- for handle in keeper.countdown_task_handles.lock().await.values() {
+ let regions = {
+ let handles = keeper.countdown_task_handles.lock().await;
+ handles.keys().copied().collect::<Vec<_>>()
+ };
+ for region in regions {
// assert countdown tasks are started
- assert!(deadline(&handle.tx).await <= Instant::now() + Duration::from_secs(20));
+ let deadline = keeper.deadline(region).await.unwrap();
+ assert!(deadline <= Instant::now() + Duration::from_secs(20));
}
}
@@ -598,22 +654,13 @@ mod test {
table_id: 1024,
engine: "mito".to_string(),
};
- let keeper = RegionAliveKeeper::new(table_engine, table_ident);
+ let keeper = RegionAliveKeeper::new(table_engine, table_ident, 1000);
let region = 1;
assert!(keeper.find_handle(®ion).await.is_none());
keeper.register_region(region).await;
assert!(keeper.find_handle(®ion).await.is_some());
- let sender = &keeper
- .countdown_task_handles
- .lock()
- .await
- .get(®ion)
- .unwrap()
- .tx
- .clone();
-
let ten_seconds_later = || Instant::now() + Duration::from_secs(10);
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
@@ -622,12 +669,12 @@ mod test {
let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
// assert if keeper is not started, keep_lived is of no use
- assert!(deadline(sender).await > far_future);
+ assert!(keeper.deadline(region).await.unwrap() > far_future);
- keeper.start(1000).await;
+ keeper.start().await;
keeper.keep_lived(vec![1, 2, 3], ten_seconds_later()).await;
// assert keep_lived works if keeper is started
- assert!(deadline(sender).await <= ten_seconds_later());
+ assert!(keeper.deadline(region).await.unwrap() <= ten_seconds_later());
keeper.deregister_region(region).await;
assert!(keeper.find_handle(®ion).await.is_none());
@@ -726,6 +773,12 @@ mod test {
task.run().await;
});
+ async fn deadline(tx: &mpsc::Sender<CountdownCommand>) -> Instant {
+ let (s, r) = oneshot::channel();
+ tx.send(CountdownCommand::Deadline(s)).await.unwrap();
+ r.await.unwrap()
+ }
+
// if countdown task is not started, its deadline is set to far future
assert!(deadline(&tx).await > Instant::now() + Duration::from_secs(86400 * 365 * 29));
@@ -747,10 +800,4 @@ mod test {
tokio::time::sleep(Duration::from_millis(2000)).await;
assert!(!table_engine.table_exists(ctx, &table_ref));
}
-
- async fn deadline(tx: &mpsc::Sender<CountdownCommand>) -> Instant {
- let (s, r) = oneshot::channel();
- tx.send(CountdownCommand::Deadline(s)).await.unwrap();
- r.await.unwrap()
- }
}
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index 776c6be6c901..9d3f539f830e 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -19,6 +19,7 @@ mod tests {
use std::assert_matches::assert_matches;
use std::collections::HashSet;
use std::sync::Arc;
+ use std::time::Duration;
use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use catalog::remote::mock::{MockKvBackend, MockTableEngine};
@@ -29,11 +30,27 @@ mod tests {
};
use catalog::{CatalogManager, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE};
+ use common_meta::ident::TableIdent;
use datatypes::schema::RawSchema;
use futures_util::StreamExt;
use table::engine::manager::{MemoryTableEngineManager, TableEngineManagerRef};
use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
+ use table::test_util::EmptyTable;
+ use tokio::time::Instant;
+
+ struct TestingComponents {
+ kv_backend: KvBackendRef,
+ catalog_manager: Arc<RemoteCatalogManager>,
+ table_engine_manager: TableEngineManagerRef,
+ region_alive_keepers: Arc<RegionAliveKeepers>,
+ }
+
+ impl TestingComponents {
+ fn table_engine(&self) -> TableEngineRef {
+ self.table_engine_manager.engine(MITO_ENGINE).unwrap()
+ }
+ }
#[tokio::test]
async fn test_backend() {
@@ -121,14 +138,7 @@ mod tests {
assert!(ret.is_none());
}
- async fn prepare_components(
- node_id: u64,
- ) -> (
- KvBackendRef,
- TableEngineRef,
- Arc<RemoteCatalogManager>,
- TableEngineManagerRef,
- ) {
+ async fn prepare_components(node_id: u64) -> TestingComponents {
let cached_backend = Arc::new(CachedMetaKvBackend::wrap(
Arc::new(MockKvBackend::default()),
));
@@ -136,30 +146,34 @@ mod tests {
let table_engine = Arc::new(MockTableEngine::default());
let engine_manager = Arc::new(MemoryTableEngineManager::alias(
MITO_ENGINE.to_string(),
- table_engine.clone(),
+ table_engine,
));
+ let region_alive_keepers = Arc::new(RegionAliveKeepers::new(engine_manager.clone(), 5000));
+
let catalog_manager = RemoteCatalogManager::new(
engine_manager.clone(),
node_id,
cached_backend.clone(),
- Arc::new(RegionAliveKeepers::new(engine_manager.clone())),
+ region_alive_keepers.clone(),
);
catalog_manager.start().await.unwrap();
- (
- cached_backend,
- table_engine,
- Arc::new(catalog_manager),
- engine_manager as Arc<_>,
- )
+ TestingComponents {
+ kv_backend: cached_backend,
+ catalog_manager: Arc::new(catalog_manager),
+ table_engine_manager: engine_manager,
+ region_alive_keepers,
+ }
}
#[tokio::test]
async fn test_remote_catalog_default() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
- let (_, _, catalog_manager, _) = prepare_components(node_id).await;
+ let TestingComponents {
+ catalog_manager, ..
+ } = prepare_components(node_id).await;
assert_eq!(
vec![DEFAULT_CATALOG_NAME.to_string()],
catalog_manager.catalog_names().await.unwrap()
@@ -180,14 +194,16 @@ mod tests {
async fn test_remote_catalog_register_nonexistent() {
common_telemetry::init_default_ut_logging();
let node_id = 42;
- let (_, table_engine, catalog_manager, _) = prepare_components(node_id).await;
+ let components = prepare_components(node_id).await;
+
// register a new table with an nonexistent catalog
let catalog_name = "nonexistent_catalog".to_string();
let schema_name = "nonexistent_schema".to_string();
let table_name = "fail_table".to_string();
// this schema has no effect
let table_schema = RawSchema::new(vec![]);
- let table = table_engine
+ let table = components
+ .table_engine()
.create_table(
&EngineContext {},
CreateTableRequest {
@@ -213,7 +229,7 @@ mod tests {
table_id: 1,
table,
};
- let res = catalog_manager.register_table(reg_req).await;
+ let res = components.catalog_manager.register_table(reg_req).await;
// because nonexistent_catalog does not exist yet.
assert_matches!(
@@ -225,7 +241,8 @@ mod tests {
#[tokio::test]
async fn test_register_table() {
let node_id = 42;
- let (_, table_engine, catalog_manager, _) = prepare_components(node_id).await;
+ let components = prepare_components(node_id).await;
+ let catalog_manager = &components.catalog_manager;
let default_catalog = catalog_manager
.catalog(DEFAULT_CATALOG_NAME)
.await
@@ -249,7 +266,8 @@ mod tests {
let table_id = 1;
// this schema has no effect
let table_schema = RawSchema::new(vec![]);
- let table = table_engine
+ let table = components
+ .table_engine()
.create_table(
&EngineContext {},
CreateTableRequest {
@@ -285,8 +303,10 @@ mod tests {
#[tokio::test]
async fn test_register_catalog_schema_table() {
let node_id = 42;
- let (backend, table_engine, catalog_manager, engine_manager) =
- prepare_components(node_id).await;
+ let components = prepare_components(node_id).await;
+ let backend = &components.kv_backend;
+ let catalog_manager = components.catalog_manager.clone();
+ let engine_manager = components.table_engine_manager.clone();
let catalog_name = "test_catalog".to_string();
let schema_name = "nonexistent_schema".to_string();
@@ -295,6 +315,7 @@ mod tests {
backend.clone(),
engine_manager.clone(),
node_id,
+ components.region_alive_keepers.clone(),
));
// register catalog to catalog manager
@@ -308,7 +329,8 @@ mod tests {
HashSet::from_iter(catalog_manager.catalog_names().await.unwrap().into_iter())
);
- let table_to_register = table_engine
+ let table_to_register = components
+ .table_engine()
.create_table(
&EngineContext {},
CreateTableRequest {
@@ -355,6 +377,7 @@ mod tests {
node_id,
engine_manager,
backend.clone(),
+ components.region_alive_keepers.clone(),
));
let prev = new_catalog
@@ -374,4 +397,94 @@ mod tests {
.collect()
)
}
+
+ #[tokio::test]
+ async fn test_register_table_before_and_after_region_alive_keeper_started() {
+ let components = prepare_components(42).await;
+ let catalog_manager = &components.catalog_manager;
+ let region_alive_keepers = &components.region_alive_keepers;
+
+ let table_before = TableIdent {
+ catalog: DEFAULT_CATALOG_NAME.to_string(),
+ schema: DEFAULT_SCHEMA_NAME.to_string(),
+ table: "table_before".to_string(),
+ table_id: 1,
+ engine: MITO_ENGINE.to_string(),
+ };
+ let request = RegisterTableRequest {
+ catalog: table_before.catalog.clone(),
+ schema: table_before.schema.clone(),
+ table_name: table_before.table.clone(),
+ table_id: table_before.table_id,
+ table: Arc::new(EmptyTable::new(CreateTableRequest {
+ id: table_before.table_id,
+ catalog_name: table_before.catalog.clone(),
+ schema_name: table_before.schema.clone(),
+ table_name: table_before.table.clone(),
+ desc: None,
+ schema: RawSchema::new(vec![]),
+ region_numbers: vec![0],
+ primary_key_indices: vec![],
+ create_if_not_exists: false,
+ table_options: Default::default(),
+ engine: MITO_ENGINE.to_string(),
+ })),
+ };
+ assert!(catalog_manager.register_table(request).await.unwrap());
+
+ let keeper = region_alive_keepers
+ .find_keeper(&table_before)
+ .await
+ .unwrap();
+ let deadline = keeper.deadline(0).await.unwrap();
+ let far_future = Instant::now() + Duration::from_secs(86400 * 365 * 29);
+ // assert region alive countdown is not started
+ assert!(deadline > far_future);
+
+ region_alive_keepers.start().await;
+
+ let table_after = TableIdent {
+ catalog: DEFAULT_CATALOG_NAME.to_string(),
+ schema: DEFAULT_SCHEMA_NAME.to_string(),
+ table: "table_after".to_string(),
+ table_id: 2,
+ engine: MITO_ENGINE.to_string(),
+ };
+ let request = RegisterTableRequest {
+ catalog: table_after.catalog.clone(),
+ schema: table_after.schema.clone(),
+ table_name: table_after.table.clone(),
+ table_id: table_after.table_id,
+ table: Arc::new(EmptyTable::new(CreateTableRequest {
+ id: table_after.table_id,
+ catalog_name: table_after.catalog.clone(),
+ schema_name: table_after.schema.clone(),
+ table_name: table_after.table.clone(),
+ desc: None,
+ schema: RawSchema::new(vec![]),
+ region_numbers: vec![0],
+ primary_key_indices: vec![],
+ create_if_not_exists: false,
+ table_options: Default::default(),
+ engine: MITO_ENGINE.to_string(),
+ })),
+ };
+ assert!(catalog_manager.register_table(request).await.unwrap());
+
+ let keeper = region_alive_keepers
+ .find_keeper(&table_after)
+ .await
+ .unwrap();
+ let deadline = keeper.deadline(0).await.unwrap();
+ // assert countdown is started for the table registered after [RegionAliveKeepers] started
+ assert!(deadline <= Instant::now() + Duration::from_secs(20));
+
+ let keeper = region_alive_keepers
+ .find_keeper(&table_before)
+ .await
+ .unwrap();
+ let deadline = keeper.deadline(0).await.unwrap();
+ // assert countdown is started for the table registered before [RegionAliveKeepers] started, too
+ assert!(deadline <= Instant::now() + Duration::from_secs(20));
+ }
}
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index efd0c1ec2061..6c9e3e036513 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -30,6 +30,7 @@ use snafu::ResultExt;
use tokio::sync::mpsc;
use tokio::time::Instant;
+use crate::datanode::DatanodeOptions;
use crate::error::{self, MetaClientInitSnafu, Result};
pub(crate) mod handler;
@@ -57,23 +58,23 @@ impl HeartbeatTask {
/// Create a new heartbeat task instance.
pub fn new(
node_id: u64,
- server_addr: String,
- server_hostname: Option<String>,
+ opts: &DatanodeOptions,
meta_client: Arc<MetaClient>,
catalog_manager: CatalogManagerRef,
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
+ heartbeat_interval_millis: u64,
region_alive_keepers: Arc<RegionAliveKeepers>,
) -> Self {
Self {
node_id,
// We use datanode's start time millis as the node's epoch.
node_epoch: common_time::util::current_time_millis() as u64,
- server_addr,
- server_hostname,
+ server_addr: opts.rpc_addr.clone(),
+ server_hostname: opts.rpc_hostname.clone(),
running: Arc::new(AtomicBool::new(false)),
meta_client,
catalog_manager,
- interval: 5_000, // default interval is set to 5 secs
+ interval: heartbeat_interval_millis,
resp_handler_executor,
region_alive_keepers,
}
@@ -140,7 +141,7 @@ impl HeartbeatTask {
let addr = resolve_addr(&self.server_addr, &self.server_hostname);
info!("Starting heartbeat to Metasrv with interval {interval}. My node id is {node_id}, address is {addr}.");
- self.region_alive_keepers.start(interval).await;
+ self.region_alive_keepers.start().await;
let meta_client = self.meta_client.clone();
let catalog_manager_clone = self.catalog_manager.clone();
diff --git a/src/datanode/src/heartbeat/handler/close_region.rs b/src/datanode/src/heartbeat/handler/close_region.rs
index 1dc0157fe723..abc492d40f03 100644
--- a/src/datanode/src/heartbeat/handler/close_region.rs
+++ b/src/datanode/src/heartbeat/handler/close_region.rs
@@ -23,6 +23,7 @@ use common_meta::heartbeat::handler::{
HandleControl, HeartbeatResponseHandler, HeartbeatResponseHandlerContext,
};
use common_meta::instruction::{Instruction, InstructionReply, SimpleReply};
+use common_meta::RegionIdent;
use common_telemetry::{error, info, warn};
use snafu::ResultExt;
use store_api::storage::RegionNumber;
@@ -55,25 +56,8 @@ impl HeartbeatResponseHandler for CloseRegionHandler {
let mailbox = ctx.mailbox.clone();
let self_ref = Arc::new(self.clone());
- let region_alive_keepers = self.region_alive_keepers.clone();
common_runtime::spawn_bg(async move {
- let table_ident = ®ion_ident.table_ident;
- let table_ref = TableReference::full(
- &table_ident.catalog,
- &table_ident.schema,
- &table_ident.table,
- );
- let result = self_ref
- .close_region_inner(
- table_ident.engine.clone(),
- &table_ref,
- vec![region_ident.region_number],
- )
- .await;
-
- if matches!(result, Ok(true)) {
- region_alive_keepers.deregister_region(®ion_ident).await;
- }
+ let result = self_ref.close_region_inner(region_ident).await;
if let Err(e) = mailbox
.send((meta, CloseRegionHandler::map_result(result)))
@@ -152,20 +136,21 @@ impl CloseRegionHandler {
Ok(true)
}
- async fn close_region_inner(
- &self,
- engine: String,
- table_ref: &TableReference<'_>,
- region_numbers: Vec<RegionNumber>,
- ) -> Result<bool> {
- let engine =
- self.table_engine_manager
- .engine(&engine)
- .context(error::TableEngineNotFoundSnafu {
- engine_name: &engine,
- })?;
+ async fn close_region_inner(&self, region_ident: RegionIdent) -> Result<bool> {
+ let table_ident = ®ion_ident.table_ident;
+ let engine_name = &table_ident.engine;
+ let engine = self
+ .table_engine_manager
+ .engine(engine_name)
+ .context(error::TableEngineNotFoundSnafu { engine_name })?;
let ctx = EngineContext::default();
+ let table_ref = &TableReference::full(
+ &table_ident.catalog,
+ &table_ident.schema,
+ &table_ident.table,
+ );
+ let region_numbers = vec![region_ident.region_number];
if self
.regions_closed(
table_ref.catalog,
@@ -203,7 +188,15 @@ impl CloseRegionHandler {
})? {
CloseTableResult::NotFound | CloseTableResult::Released(_) => {
// Deregister table if The table released.
- self.deregister_table(table_ref).await
+ let deregistered = self.deregister_table(table_ref).await?;
+
+ if deregistered {
+ self.region_alive_keepers
+ .deregister_table(table_ident)
+ .await;
+ }
+
+ Ok(deregistered)
}
CloseTableResult::PartialClosed(regions) => {
// Requires caller to update the region_numbers
@@ -211,6 +204,11 @@ impl CloseRegionHandler {
"Close partial regions: {:?} in table: {}",
regions, table_ref
);
+
+ self.region_alive_keepers
+ .deregister_region(®ion_ident)
+ .await;
+
Ok(true)
}
};
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 5e5a63006fbf..3cd1e57e6d2a 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -195,8 +195,12 @@ impl Instance {
let kv_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
- let region_alive_keepers =
- Arc::new(RegionAliveKeepers::new(engine_manager.clone()));
+ let heartbeat_interval_millis = 5000;
+
+ let region_alive_keepers = Arc::new(RegionAliveKeepers::new(
+ engine_manager.clone(),
+ heartbeat_interval_millis,
+ ));
let catalog_manager = Arc::new(RemoteCatalogManager::new(
engine_manager.clone(),
@@ -222,11 +226,11 @@ impl Instance {
let heartbeat_task = Some(HeartbeatTask::new(
opts.node_id.context(MissingNodeIdSnafu)?,
- opts.rpc_addr.clone(),
- opts.rpc_hostname.clone(),
+ opts,
meta_client,
catalog_manager.clone(),
Arc::new(handlers_executor),
+ heartbeat_interval_millis,
region_alive_keepers,
));
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index 1796a6c8755b..5b4ba4de3d5f 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -14,6 +14,7 @@
use std::assert_matches::assert_matches;
use std::sync::Arc;
+use std::time::Duration;
use api::v1::greptime_request::Request as GrpcRequest;
use api::v1::meta::HeartbeatResponse;
@@ -32,8 +33,10 @@ use datatypes::prelude::ConcreteDataType;
use servers::query_handler::grpc::GrpcQueryHandler;
use session::context::QueryContext;
use table::engine::manager::TableEngineManagerRef;
+use table::TableRef;
use test_util::MockInstance;
use tokio::sync::mpsc::{self, Receiver};
+use tokio::time::Instant;
use crate::heartbeat::handler::close_region::CloseRegionHandler;
use crate::heartbeat::handler::open_region::OpenRegionHandler;
@@ -64,7 +67,7 @@ async fn test_close_region_handler() {
CloseRegionHandler::new(
catalog_manager_ref.clone(),
engine_manager_ref.clone(),
- Arc::new(RegionAliveKeepers::new(engine_manager_ref.clone())),
+ Arc::new(RegionAliveKeepers::new(engine_manager_ref.clone(), 5000)),
),
)]));
@@ -134,43 +137,57 @@ async fn test_open_region_handler() {
..
} = prepare_handler_test("test_open_region_handler").await;
- let region_alive_keeper = Arc::new(RegionAliveKeepers::new(engine_manager_ref.clone()));
+ let region_alive_keepers = Arc::new(RegionAliveKeepers::new(engine_manager_ref.clone(), 5000));
+ region_alive_keepers.start().await;
let executor = Arc::new(HandlerGroupExecutor::new(vec![
Arc::new(OpenRegionHandler::new(
catalog_manager_ref.clone(),
engine_manager_ref.clone(),
- region_alive_keeper.clone(),
+ region_alive_keepers.clone(),
)),
Arc::new(CloseRegionHandler::new(
catalog_manager_ref.clone(),
engine_manager_ref.clone(),
- region_alive_keeper,
+ region_alive_keepers.clone(),
)),
]));
- prepare_table(instance.inner()).await;
+ let instruction = open_region_instruction();
+ let Instruction::OpenRegion(region_ident) = instruction.clone() else { unreachable!() };
+ let table_ident = ®ion_ident.table_ident;
+
+ let table = prepare_table(instance.inner()).await;
+ region_alive_keepers
+ .register_table(table_ident.clone(), table)
+ .await
+ .unwrap();
// Opens a opened table
- handle_instruction(executor.clone(), mailbox.clone(), open_region_instruction()).await;
+ handle_instruction(executor.clone(), mailbox.clone(), instruction.clone()).await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
reply,
InstructionReply::OpenRegion(SimpleReply { result: true, .. })
);
+ let keeper = region_alive_keepers.find_keeper(table_ident).await.unwrap();
+ let deadline = keeper.deadline(0).await.unwrap();
+ assert!(deadline <= Instant::now() + Duration::from_secs(20));
+
// Opens a non-exist table
+ let non_exist_table_ident = TableIdent {
+ catalog: "greptime".to_string(),
+ schema: "public".to_string(),
+ table: "non-exist".to_string(),
+ table_id: 2024,
+ engine: "mito".to_string(),
+ };
handle_instruction(
executor.clone(),
mailbox.clone(),
Instruction::OpenRegion(RegionIdent {
- table_ident: TableIdent {
- catalog: "greptime".to_string(),
- schema: "public".to_string(),
- table: "non-exist".to_string(),
- table_id: 2024,
- engine: "mito".to_string(),
- },
+ table_ident: non_exist_table_ident.clone(),
region_number: 0,
cluster_id: 1,
datanode_id: 2,
@@ -183,6 +200,11 @@ async fn test_open_region_handler() {
InstructionReply::OpenRegion(SimpleReply { result: false, .. })
);
+ assert!(region_alive_keepers
+ .find_keeper(&non_exist_table_ident)
+ .await
+ .is_none());
+
// Closes demo table
handle_instruction(
executor.clone(),
@@ -197,8 +219,13 @@ async fn test_open_region_handler() {
);
assert_test_table_not_found(instance.inner()).await;
+ assert!(region_alive_keepers
+ .find_keeper(table_ident)
+ .await
+ .is_none());
+
// Opens demo table
- handle_instruction(executor.clone(), mailbox.clone(), open_region_instruction()).await;
+ handle_instruction(executor.clone(), mailbox.clone(), instruction).await;
let (_, reply) = rx.recv().await.unwrap();
assert_matches!(
reply,
@@ -275,10 +302,10 @@ fn open_region_instruction() -> Instruction {
})
}
-async fn prepare_table(instance: &Instance) {
+async fn prepare_table(instance: &Instance) -> TableRef {
test_util::create_test_table(instance, ConcreteDataType::timestamp_millisecond_datatype())
.await
- .unwrap();
+ .unwrap()
}
async fn assert_test_table_not_found(instance: &Instance) {
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index 91e344a00dfe..d59f9f5670ae 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -22,6 +22,7 @@ use servers::Mode;
use snafu::ResultExt;
use table::engine::{EngineContext, TableEngineRef};
use table::requests::{CreateTableRequest, TableOptions};
+use table::TableRef;
use crate::datanode::{
DatanodeOptions, FileConfig, ObjectStoreConfig, ProcedureConfig, StorageConfig, WalConfig,
@@ -84,7 +85,7 @@ fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGuard)
pub(crate) async fn create_test_table(
instance: &Instance,
ts_type: ConcreteDataType,
-) -> Result<()> {
+) -> Result<TableRef> {
let column_schemas = vec![
ColumnSchema::new("host", ConcreteDataType::string_datatype(), true),
ColumnSchema::new("cpu", ConcreteDataType::float64_datatype(), true),
@@ -125,8 +126,8 @@ pub(crate) async fn create_test_table(
.unwrap()
.unwrap();
schema_provider
- .register_table(table_name.to_string(), table)
+ .register_table(table_name.to_string(), table.clone())
.await
.unwrap();
- Ok(())
+ Ok(table)
}
diff --git a/src/table/src/test_util/empty_table.rs b/src/table/src/test_util/empty_table.rs
index 679ace68876b..0503515642df 100644
--- a/src/table/src/test_util/empty_table.rs
+++ b/src/table/src/test_util/empty_table.rs
@@ -37,8 +37,10 @@ impl EmptyTable {
.next_column_id(0)
.options(req.table_options)
.region_numbers(req.region_numbers)
+ .engine(req.engine)
.build();
let table_info = TableInfoBuilder::default()
+ .table_id(req.id)
.catalog_name(req.catalog_name)
.schema_name(req.schema_name)
.name(req.table_name)
diff --git a/src/table/src/test_util/memtable.rs b/src/table/src/test_util/memtable.rs
index f2e942ce8d9b..3d27650f2099 100644
--- a/src/table/src/test_util/memtable.rs
+++ b/src/table/src/test_util/memtable.rs
@@ -56,7 +56,7 @@ impl MemTable {
Self::new_with_catalog(
table_name,
recordbatch,
- 0,
+ 1,
"greptime".to_string(),
"public".to_string(),
regions,
|
feat
|
start region alive keepers (#1796)
|
f74a955504b18c2a5b95964664ec25b3ce370d48
|
2025-01-22 05:03:11
|
Zhenchi
|
feat: bloom filter as fulltext index v2 (Part 1) (#5406)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index f0b659d66e14..efe48899a1fb 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5301,9 +5301,11 @@ dependencies = [
"futures",
"greptime-proto",
"itertools 0.10.5",
+ "jieba-rs",
"mockall",
"pin-project",
"prost 0.12.6",
+ "puffin",
"rand",
"regex",
"regex-automata 0.4.8",
diff --git a/src/common/base/src/range_read.rs b/src/common/base/src/range_read.rs
index fb0fc61fb036..5fabc8cacbcf 100644
--- a/src/common/base/src/range_read.rs
+++ b/src/common/base/src/range_read.rs
@@ -223,7 +223,6 @@ impl FileReader {
}
}
-#[cfg(any(test, feature = "testing"))]
impl SizeAwareRangeReader for FileReader {
fn with_file_size_hint(&mut self, _file_size_hint: u64) {
// do nothing
diff --git a/src/index/Cargo.toml b/src/index/Cargo.toml
index 4186834d465d..f149c76565ad 100644
--- a/src/index/Cargo.toml
+++ b/src/index/Cargo.toml
@@ -22,9 +22,11 @@ fst.workspace = true
futures.workspace = true
greptime-proto.workspace = true
itertools.workspace = true
+jieba-rs = "0.7"
mockall.workspace = true
pin-project.workspace = true
prost.workspace = true
+puffin.workspace = true
regex.workspace = true
regex-automata.workspace = true
serde.workspace = true
@@ -33,6 +35,7 @@ snafu.workspace = true
tantivy = { version = "0.22", features = ["zstd-compression"] }
tantivy-jieba = "0.11.0"
tokio.workspace = true
+tokio-util.workspace = true
uuid.workspace = true
[dev-dependencies]
diff --git a/src/index/src/bloom_filter.rs b/src/index/src/bloom_filter.rs
index 69f6cb94e05e..eb818a0f5a09 100644
--- a/src/index/src/bloom_filter.rs
+++ b/src/index/src/bloom_filter.rs
@@ -17,8 +17,5 @@ pub mod creator;
pub mod error;
pub mod reader;
-pub type Bytes = Vec<u8>;
-pub type BytesRef<'a> = &'a [u8];
-
/// The seed used for the Bloom filter.
pub const SEED: u128 = 42;
diff --git a/src/index/src/bloom_filter/applier.rs b/src/index/src/bloom_filter/applier.rs
index b847edb18f20..c60b99d008c4 100644
--- a/src/index/src/bloom_filter/applier.rs
+++ b/src/index/src/bloom_filter/applier.rs
@@ -20,7 +20,7 @@ use itertools::Itertools;
use crate::bloom_filter::error::Result;
use crate::bloom_filter::reader::BloomFilterReader;
-use crate::bloom_filter::Bytes;
+use crate::Bytes;
pub struct BloomFilterApplier {
reader: Box<dyn BloomFilterReader + Send>,
diff --git a/src/index/src/bloom_filter/creator.rs b/src/index/src/bloom_filter/creator.rs
index b4030d28fd7c..0b6810a688c3 100644
--- a/src/index/src/bloom_filter/creator.rs
+++ b/src/index/src/bloom_filter/creator.rs
@@ -26,8 +26,9 @@ use prost::Message;
use snafu::ResultExt;
use crate::bloom_filter::error::{IoSnafu, Result};
-use crate::bloom_filter::{Bytes, SEED};
+use crate::bloom_filter::SEED;
use crate::external_provider::ExternalTempFileProvider;
+use crate::Bytes;
/// The false positive rate of the Bloom filter.
pub const FALSE_POSITIVE_RATE: f64 = 0.01;
diff --git a/src/index/src/bloom_filter/creator/finalize_segment.rs b/src/index/src/bloom_filter/creator/finalize_segment.rs
index 072d661f5691..84f358f05304 100644
--- a/src/index/src/bloom_filter/creator/finalize_segment.rs
+++ b/src/index/src/bloom_filter/creator/finalize_segment.rs
@@ -25,8 +25,8 @@ use snafu::ResultExt;
use super::intermediate_codec::IntermediateBloomFilterCodecV1;
use crate::bloom_filter::creator::{FALSE_POSITIVE_RATE, SEED};
use crate::bloom_filter::error::{IntermediateSnafu, IoSnafu, Result};
-use crate::bloom_filter::Bytes;
use crate::external_provider::ExternalTempFileProvider;
+use crate::Bytes;
/// The minimum memory usage threshold for flushing in-memory Bloom filters to disk.
const MIN_MEMORY_USAGE_THRESHOLD: usize = 1024 * 1024; // 1MB
diff --git a/src/index/src/fulltext_index.rs b/src/index/src/fulltext_index.rs
index 8b0bde3d64e9..3a7f58c8ab4d 100644
--- a/src/index/src/fulltext_index.rs
+++ b/src/index/src/fulltext_index.rs
@@ -17,6 +17,7 @@ use serde::{Deserialize, Serialize};
pub mod create;
pub mod error;
pub mod search;
+pub mod tokenizer;
#[cfg(test)]
mod tests;
diff --git a/src/index/src/fulltext_index/create.rs b/src/index/src/fulltext_index/create.rs
index 99567a3f723e..46f18999cc0a 100644
--- a/src/index/src/fulltext_index/create.rs
+++ b/src/index/src/fulltext_index/create.rs
@@ -12,11 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+mod bloom_filter;
mod tantivy;
use async_trait::async_trait;
-pub use tantivy::{TantivyFulltextIndexCreator, ROWID_FIELD_NAME, TEXT_FIELD_NAME};
+use puffin::puffin_manager::{PuffinWriter, PutOptions};
+pub use crate::fulltext_index::create::bloom_filter::BloomFilterFulltextIndexCreator;
+pub use crate::fulltext_index::create::tantivy::{
+ TantivyFulltextIndexCreator, ROWID_FIELD_NAME, TEXT_FIELD_NAME,
+};
use crate::fulltext_index::error::Result;
/// `FulltextIndexCreator` is for creating a fulltext index.
@@ -26,7 +31,15 @@ pub trait FulltextIndexCreator: Send {
async fn push_text(&mut self, text: &str) -> Result<()>;
/// Finalizes the creation of the index.
- async fn finish(&mut self) -> Result<()>;
+ async fn finish(
+ &mut self,
+ puffin_writer: &mut (impl PuffinWriter + Send),
+ blob_key: &str,
+ put_options: PutOptions,
+ ) -> Result<u64>;
+
+ /// Aborts the creation of the index.
+ async fn abort(&mut self) -> Result<()>;
/// Returns the memory usage in bytes during the creation of the index.
fn memory_usage(&self) -> usize;
diff --git a/src/index/src/fulltext_index/create/bloom_filter.rs b/src/index/src/fulltext_index/create/bloom_filter.rs
new file mode 100644
index 000000000000..ba6d4eceedf1
--- /dev/null
+++ b/src/index/src/fulltext_index/create/bloom_filter.rs
@@ -0,0 +1,127 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::atomic::AtomicUsize;
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use common_error::ext::BoxedError;
+use puffin::puffin_manager::{PuffinWriter, PutOptions};
+use snafu::{OptionExt, ResultExt};
+use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
+
+use crate::bloom_filter::creator::BloomFilterCreator;
+use crate::external_provider::ExternalTempFileProvider;
+use crate::fulltext_index::create::FulltextIndexCreator;
+use crate::fulltext_index::error::{
+ AbortedSnafu, BiErrorsSnafu, BloomFilterFinishSnafu, ExternalSnafu, PuffinAddBlobSnafu, Result,
+};
+use crate::fulltext_index::tokenizer::{Analyzer, ChineseTokenizer, EnglishTokenizer};
+use crate::fulltext_index::Config;
+
+const PIPE_BUFFER_SIZE_FOR_SENDING_BLOB: usize = 8192;
+
+/// `BloomFilterFulltextIndexCreator` is for creating a fulltext index using a bloom filter.
+pub struct BloomFilterFulltextIndexCreator {
+ inner: Option<BloomFilterCreator>,
+ analyzer: Analyzer,
+}
+
+impl BloomFilterFulltextIndexCreator {
+ pub fn new(
+ config: Config,
+ rows_per_segment: usize,
+ intermediate_provider: Arc<dyn ExternalTempFileProvider>,
+ global_memory_usage: Arc<AtomicUsize>,
+ global_memory_usage_threshold: Option<usize>,
+ ) -> Self {
+ let tokenizer = match config.analyzer {
+ crate::fulltext_index::Analyzer::English => Box::new(EnglishTokenizer) as _,
+ crate::fulltext_index::Analyzer::Chinese => Box::new(ChineseTokenizer) as _,
+ };
+ let analyzer = Analyzer::new(tokenizer, config.case_sensitive);
+
+ let inner = BloomFilterCreator::new(
+ rows_per_segment,
+ intermediate_provider,
+ global_memory_usage,
+ global_memory_usage_threshold,
+ );
+ Self {
+ inner: Some(inner),
+ analyzer,
+ }
+ }
+}
+
+#[async_trait]
+impl FulltextIndexCreator for BloomFilterFulltextIndexCreator {
+ async fn push_text(&mut self, text: &str) -> Result<()> {
+ let tokens = self.analyzer.analyze_text(text)?;
+ self.inner
+ .as_mut()
+ .context(AbortedSnafu)?
+ .push_row_elems(tokens)
+ .await
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ Ok(())
+ }
+
+ async fn finish(
+ &mut self,
+ puffin_writer: &mut (impl PuffinWriter + Send),
+ blob_key: &str,
+ put_options: PutOptions,
+ ) -> Result<u64> {
+ let creator = self.inner.as_mut().context(AbortedSnafu)?;
+
+ let (tx, rx) = tokio::io::duplex(PIPE_BUFFER_SIZE_FOR_SENDING_BLOB);
+
+ let (index_finish, puffin_add_blob) = futures::join!(
+ creator.finish(tx.compat_write()),
+ puffin_writer.put_blob(blob_key, rx.compat(), put_options)
+ );
+
+ match (
+ puffin_add_blob.context(PuffinAddBlobSnafu),
+ index_finish.context(BloomFilterFinishSnafu),
+ ) {
+ (Err(e1), Err(e2)) => BiErrorsSnafu {
+ first: Box::new(e1),
+ second: Box::new(e2),
+ }
+ .fail()?,
+
+ (Ok(_), e @ Err(_)) => e?,
+ (e @ Err(_), Ok(_)) => e.map(|_| ())?,
+ (Ok(written_bytes), Ok(_)) => {
+ return Ok(written_bytes);
+ }
+ }
+ Ok(0)
+ }
+
+ async fn abort(&mut self) -> Result<()> {
+ self.inner.take().context(AbortedSnafu)?;
+ Ok(())
+ }
+
+ fn memory_usage(&self) -> usize {
+ self.inner
+ .as_ref()
+ .map(|i| i.memory_usage())
+ .unwrap_or_default()
+ }
+}
diff --git a/src/index/src/fulltext_index/create/tantivy.rs b/src/index/src/fulltext_index/create/tantivy.rs
index aa5966b218c3..2ddc8299aee7 100644
--- a/src/index/src/fulltext_index/create/tantivy.rs
+++ b/src/index/src/fulltext_index/create/tantivy.rs
@@ -12,9 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::path::Path;
+use std::path::{Path, PathBuf};
use async_trait::async_trait;
+use common_error::ext::BoxedError;
+use puffin::puffin_manager::{PuffinWriter, PutOptions};
use snafu::{OptionExt, ResultExt};
use tantivy::indexer::NoMergePolicy;
use tantivy::schema::{Schema, STORED, TEXT};
@@ -24,7 +26,9 @@ use tantivy::{doc, Index, IndexWriter};
use tantivy_jieba::JiebaTokenizer;
use crate::fulltext_index::create::FulltextIndexCreator;
-use crate::fulltext_index::error::{FinishedSnafu, IoSnafu, JoinSnafu, Result, TantivySnafu};
+use crate::fulltext_index::error::{
+ ExternalSnafu, FinishedSnafu, IoSnafu, JoinSnafu, Result, TantivySnafu,
+};
use crate::fulltext_index::{Analyzer, Config};
pub const TEXT_FIELD_NAME: &str = "greptime_fulltext_text";
@@ -43,6 +47,9 @@ pub struct TantivyFulltextIndexCreator {
/// The current max row id.
max_rowid: u64,
+
+ /// The directory path in filesystem to store the index.
+ path: PathBuf,
}
impl TantivyFulltextIndexCreator {
@@ -59,7 +66,7 @@ impl TantivyFulltextIndexCreator {
let rowid_field = schema_builder.add_u64_field(ROWID_FIELD_NAME, STORED);
let schema = schema_builder.build();
- let mut index = Index::create_in_dir(path, schema).context(TantivySnafu)?;
+ let mut index = Index::create_in_dir(&path, schema).context(TantivySnafu)?;
index.settings_mut().docstore_compression = Compressor::Zstd(ZstdCompressor::default());
index.set_tokenizers(Self::build_tokenizer(&config));
@@ -76,6 +83,7 @@ impl TantivyFulltextIndexCreator {
text_field,
rowid_field,
max_rowid: 0,
+ path: path.as_ref().to_path_buf(),
})
}
@@ -115,14 +123,37 @@ impl FulltextIndexCreator for TantivyFulltextIndexCreator {
Ok(())
}
- async fn finish(&mut self) -> Result<()> {
+ async fn finish(
+ &mut self,
+ puffin_writer: &mut (impl PuffinWriter + Send),
+ blob_key: &str,
+ put_options: PutOptions,
+ ) -> Result<u64> {
+ let mut writer = self.writer.take().context(FinishedSnafu)?;
+ common_runtime::spawn_blocking_global(move || {
+ writer.commit().context(TantivySnafu)?;
+ writer.wait_merging_threads().context(TantivySnafu)
+ })
+ .await
+ .context(JoinSnafu)??;
+
+ puffin_writer
+ .put_dir(blob_key, self.path.clone(), put_options)
+ .await
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)
+ }
+
+ async fn abort(&mut self) -> Result<()> {
let mut writer = self.writer.take().context(FinishedSnafu)?;
common_runtime::spawn_blocking_global(move || {
writer.commit().context(TantivySnafu)?;
writer.wait_merging_threads().context(TantivySnafu)
})
.await
- .context(JoinSnafu)?
+ .context(JoinSnafu)??;
+
+ tokio::fs::remove_dir_all(&self.path).await.context(IoSnafu)
}
fn memory_usage(&self) -> usize {
@@ -134,6 +165,7 @@ impl FulltextIndexCreator for TantivyFulltextIndexCreator {
#[cfg(test)]
mod tests {
use common_test_util::temp_dir::create_temp_dir;
+ use futures::AsyncRead;
use tantivy::collector::DocSetCollector;
use tantivy::query::QueryParser;
use tantivy::schema::Value;
@@ -141,6 +173,39 @@ mod tests {
use super::*;
+ struct MockPuffinWriter;
+
+ #[async_trait]
+ impl PuffinWriter for MockPuffinWriter {
+ async fn put_blob<R>(
+ &mut self,
+ _key: &str,
+ _raw_data: R,
+ _options: PutOptions,
+ ) -> puffin::error::Result<u64>
+ where
+ R: AsyncRead + Send,
+ {
+ unreachable!()
+ }
+
+ async fn put_dir(
+ &mut self,
+ _key: &str,
+ _dir: PathBuf,
+ _options: PutOptions,
+ ) -> puffin::error::Result<u64> {
+ Ok(0)
+ }
+ fn set_footer_lz4_compressed(&mut self, _lz4_compressed: bool) {
+ unreachable!()
+ }
+
+ async fn finish(self) -> puffin::error::Result<u64> {
+ Ok(0)
+ }
+ }
+
#[tokio::test]
async fn test_creator_basic() {
let memory_limits = [1, 64_000_000, usize::MAX];
@@ -241,7 +306,10 @@ mod tests {
for text in texts {
creator.push_text(text).await.unwrap();
}
- creator.finish().await.unwrap();
+ creator
+ .finish(&mut MockPuffinWriter, "", PutOptions::default())
+ .await
+ .unwrap();
}
async fn query_and_check(path: &Path, cases: &[(&str, Vec<u32>)]) {
diff --git a/src/index/src/fulltext_index/error.rs b/src/index/src/fulltext_index/error.rs
index 26a433110449..6cf7f7494393 100644
--- a/src/index/src/fulltext_index/error.rs
+++ b/src/index/src/fulltext_index/error.rs
@@ -76,6 +76,34 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Aborted creator"))]
+ Aborted {
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to add blob to puffin file"))]
+ PuffinAddBlob {
+ source: puffin::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to finish bloom filter"))]
+ BloomFilterFinish {
+ source: crate::bloom_filter::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("BiErrors, first: {first}, second: {second}"))]
+ BiErrors {
+ first: Box<Error>,
+ second: Box<Error>,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
impl ErrorExt for Error {
@@ -86,7 +114,12 @@ impl ErrorExt for Error {
Tantivy { .. } | TantivyDocNotFound { .. } => StatusCode::Internal,
TantivyParser { .. } => StatusCode::InvalidSyntax,
- Io { .. } | Finished { .. } | Join { .. } => StatusCode::Unexpected,
+ BiErrors { .. } => StatusCode::Internal,
+
+ Io { .. } | Finished { .. } | Join { .. } | Aborted { .. } => StatusCode::Unexpected,
+
+ BloomFilterFinish { source, .. } => source.status_code(),
+ PuffinAddBlob { source, .. } => source.status_code(),
External { source, .. } => source.status_code(),
}
diff --git a/src/index/src/fulltext_index/tests.rs b/src/index/src/fulltext_index/tests.rs
index 3e7c88c6a25b..90449f9dde32 100644
--- a/src/index/src/fulltext_index/tests.rs
+++ b/src/index/src/fulltext_index/tests.rs
@@ -13,17 +13,37 @@
// limitations under the License.
use std::collections::BTreeSet;
+use std::sync::Arc;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
+use puffin::puffin_manager::file_accessor::MockFileAccessor;
+use puffin::puffin_manager::fs_puffin_manager::FsPuffinManager;
+use puffin::puffin_manager::stager::BoundedStager;
+use puffin::puffin_manager::{DirGuard, PuffinManager, PuffinReader, PuffinWriter, PutOptions};
use crate::fulltext_index::create::{FulltextIndexCreator, TantivyFulltextIndexCreator};
use crate::fulltext_index::search::{FulltextIndexSearcher, RowId, TantivyFulltextIndexSearcher};
use crate::fulltext_index::{Analyzer, Config};
-async fn create_index(prefix: &str, texts: Vec<&str>, config: Config) -> TempDir {
- let tempdir = create_temp_dir(prefix);
+async fn new_bounded_stager(prefix: &str) -> (TempDir, Arc<BoundedStager>) {
+ let staging_dir = create_temp_dir(prefix);
+ let path = staging_dir.path().to_path_buf();
+ (
+ staging_dir,
+ Arc::new(BoundedStager::new(path, 102400).await.unwrap()),
+ )
+}
+
+async fn create_index(
+ prefix: &str,
+ puffin_writer: &mut (impl PuffinWriter + Send),
+ blob_key: &str,
+ texts: Vec<&str>,
+ config: Config,
+) {
+ let tantivy_path = create_temp_dir(prefix);
- let mut creator = TantivyFulltextIndexCreator::new(tempdir.path(), config, 1024 * 1024)
+ let mut creator = TantivyFulltextIndexCreator::new(tantivy_path.path(), config, 1024 * 1024)
.await
.unwrap();
@@ -31,8 +51,10 @@ async fn create_index(prefix: &str, texts: Vec<&str>, config: Config) -> TempDir
creator.push_text(text).await.unwrap();
}
- creator.finish().await.unwrap();
- tempdir
+ creator
+ .finish(puffin_writer, blob_key, PutOptions::default())
+ .await
+ .unwrap();
}
async fn test_search(
@@ -42,9 +64,18 @@ async fn test_search(
query: &str,
expected: impl IntoIterator<Item = RowId>,
) {
- let index_path = create_index(prefix, texts, config).await;
-
- let searcher = TantivyFulltextIndexSearcher::new(index_path.path()).unwrap();
+ let (_staging_dir, stager) = new_bounded_stager(prefix).await;
+ let file_accessor = Arc::new(MockFileAccessor::new(prefix));
+ let puffin_manager = FsPuffinManager::new(stager, file_accessor);
+
+ let file_name = "fulltext_index";
+ let blob_key = "fulltext_index";
+ let mut writer = puffin_manager.writer(file_name).await.unwrap();
+ create_index(prefix, &mut writer, blob_key, texts, config).await;
+
+ let reader = puffin_manager.reader(file_name).await.unwrap();
+ let index_dir = reader.dir(blob_key).await.unwrap();
+ let searcher = TantivyFulltextIndexSearcher::new(index_dir.path()).unwrap();
let results = searcher.search(query).await.unwrap();
let expected = expected.into_iter().collect::<BTreeSet<_>>();
diff --git a/src/index/src/fulltext_index/tokenizer.rs b/src/index/src/fulltext_index/tokenizer.rs
new file mode 100644
index 000000000000..721ffdd3b9be
--- /dev/null
+++ b/src/index/src/fulltext_index/tokenizer.rs
@@ -0,0 +1,125 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use jieba_rs::Jieba;
+
+use crate::fulltext_index::error::Result;
+use crate::Bytes;
+
+/// `Tokenizer` tokenizes a text into a list of tokens.
+pub trait Tokenizer: Send {
+ fn tokenize<'a>(&self, text: &'a str) -> Vec<&'a str>;
+}
+
+/// `EnglishTokenizer` tokenizes an English text.
+///
+/// It splits the text by non-alphabetic characters.
+#[derive(Debug, Default)]
+pub struct EnglishTokenizer;
+
+impl Tokenizer for EnglishTokenizer {
+ fn tokenize<'a>(&self, text: &'a str) -> Vec<&'a str> {
+ text.split(|c: char| !c.is_alphanumeric())
+ .filter(|s| !s.is_empty())
+ .collect()
+ }
+}
+
+/// `ChineseTokenizer` tokenizes a Chinese text.
+///
+/// It uses the Jieba tokenizer to split the text into Chinese words.
+#[derive(Debug, Default)]
+pub struct ChineseTokenizer;
+
+impl Tokenizer for ChineseTokenizer {
+ fn tokenize<'a>(&self, text: &'a str) -> Vec<&'a str> {
+ let jieba = Jieba::new();
+ jieba.cut(text, false)
+ }
+}
+
+/// `Analyzer` analyzes a text into a list of tokens.
+///
+/// It uses a `Tokenizer` to tokenize the text and optionally lowercases the tokens.
+pub struct Analyzer {
+ tokenizer: Box<dyn Tokenizer>,
+ case_sensitive: bool,
+}
+
+impl Analyzer {
+ /// Creates a new `Analyzer` with the given `Tokenizer` and case sensitivity.
+ pub fn new(tokenizer: Box<dyn Tokenizer>, case_sensitive: bool) -> Self {
+ Self {
+ tokenizer,
+ case_sensitive,
+ }
+ }
+
+ /// Analyzes the given text into a list of tokens.
+ pub fn analyze_text(&self, text: &str) -> Result<Vec<Bytes>> {
+ let res = self
+ .tokenizer
+ .tokenize(text)
+ .iter()
+ .map(|s| {
+ if self.case_sensitive {
+ s.as_bytes().to_vec()
+ } else {
+ s.to_lowercase().as_bytes().to_vec()
+ }
+ })
+ .collect();
+ Ok(res)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_english_tokenizer() {
+ let tokenizer = EnglishTokenizer;
+ let text = "Hello, world! This is a test0.";
+ let tokens = tokenizer.tokenize(text);
+ assert_eq!(tokens, vec!["Hello", "world", "This", "is", "a", "test0"]);
+ }
+
+ #[test]
+ fn test_chinese_tokenizer() {
+ let tokenizer = ChineseTokenizer;
+ let text = "我喜欢苹果";
+ let tokens = tokenizer.tokenize(text);
+ assert_eq!(tokens, vec!["我", "喜欢", "苹果"]);
+ }
+
+ #[test]
+ fn test_analyzer() {
+ let tokenizer = EnglishTokenizer;
+ let analyzer = Analyzer::new(Box::new(tokenizer), false);
+ let text = "Hello, world! This is a test.";
+ let tokens = analyzer.analyze_text(text).unwrap();
+ assert_eq!(
+ tokens,
+ vec![
+ b"hello".to_vec(),
+ b"world".to_vec(),
+ b"this".to_vec(),
+ b"is".to_vec(),
+ b"a".to_vec(),
+ b"test".to_vec()
+ ]
+ );
+ }
+}
diff --git a/src/index/src/inverted_index.rs b/src/index/src/inverted_index.rs
index 7a34bae21381..9dc5c87014d5 100644
--- a/src/index/src/inverted_index.rs
+++ b/src/index/src/inverted_index.rs
@@ -18,5 +18,3 @@ pub mod format;
pub mod search;
pub type FstMap = fst::Map<Vec<u8>>;
-pub type Bytes = Vec<u8>;
-pub type BytesRef<'a> = &'a [u8];
diff --git a/src/index/src/inverted_index/create.rs b/src/index/src/inverted_index/create.rs
index 15674d696cd6..b56d09dc9984 100644
--- a/src/index/src/inverted_index/create.rs
+++ b/src/index/src/inverted_index/create.rs
@@ -19,7 +19,7 @@ use async_trait::async_trait;
use crate::inverted_index::error::Result;
use crate::inverted_index::format::writer::InvertedIndexWriter;
-use crate::inverted_index::BytesRef;
+use crate::BytesRef;
/// `InvertedIndexCreator` provides functionality to construct an inverted index
#[async_trait]
diff --git a/src/index/src/inverted_index/create/sort.rs b/src/index/src/inverted_index/create/sort.rs
index 81ca9aeca690..cb92bfa1adb8 100644
--- a/src/index/src/inverted_index/create/sort.rs
+++ b/src/index/src/inverted_index/create/sort.rs
@@ -21,7 +21,7 @@ use common_base::BitVec;
use futures::Stream;
use crate::inverted_index::error::Result;
-use crate::inverted_index::{Bytes, BytesRef};
+use crate::{Bytes, BytesRef};
/// A stream of sorted values along with their associated bitmap
pub type SortedStream = Box<dyn Stream<Item = Result<(Bytes, BitVec)>> + Send + Unpin>;
diff --git a/src/index/src/inverted_index/create/sort/external_sort.rs b/src/index/src/inverted_index/create/sort/external_sort.rs
index f4e1d9f9101d..cdd6e848c94e 100644
--- a/src/index/src/inverted_index/create/sort/external_sort.rs
+++ b/src/index/src/inverted_index/create/sort/external_sort.rs
@@ -33,7 +33,7 @@ use crate::inverted_index::create::sort::merge_stream::MergeSortedStream;
use crate::inverted_index::create::sort::{SortOutput, SortedStream, Sorter};
use crate::inverted_index::create::sort_create::SorterFactory;
use crate::inverted_index::error::{IntermediateSnafu, Result};
-use crate::inverted_index::{Bytes, BytesRef};
+use crate::{Bytes, BytesRef};
/// `ExternalSorter` manages the sorting of data using both in-memory structures and external files.
/// It dumps data to external files when the in-memory buffer crosses a certain memory threshold.
diff --git a/src/index/src/inverted_index/create/sort/intermediate_rw.rs b/src/index/src/inverted_index/create/sort/intermediate_rw.rs
index dbadd5498b4d..85fc76e9518d 100644
--- a/src/index/src/inverted_index/create/sort/intermediate_rw.rs
+++ b/src/index/src/inverted_index/create/sort/intermediate_rw.rs
@@ -46,7 +46,7 @@ use crate::inverted_index::create::sort::SortedStream;
use crate::inverted_index::error::{
CloseSnafu, FlushSnafu, ReadSnafu, Result, UnknownIntermediateCodecMagicSnafu, WriteSnafu,
};
-use crate::inverted_index::Bytes;
+use crate::Bytes;
/// `IntermediateWriter` serializes and writes intermediate data to the wrapped `writer`
pub struct IntermediateWriter<W> {
diff --git a/src/index/src/inverted_index/create/sort/intermediate_rw/codec_v1.rs b/src/index/src/inverted_index/create/sort/intermediate_rw/codec_v1.rs
index 8e4feb9902fa..05a4eeb57d7b 100644
--- a/src/index/src/inverted_index/create/sort/intermediate_rw/codec_v1.rs
+++ b/src/index/src/inverted_index/create/sort/intermediate_rw/codec_v1.rs
@@ -20,7 +20,7 @@ use common_base::BitVec;
use snafu::ResultExt;
use crate::inverted_index::error::{CommonIoSnafu, Error, Result};
-use crate::inverted_index::Bytes;
+use crate::Bytes;
const U64_LENGTH: usize = std::mem::size_of::<u64>();
diff --git a/src/index/src/inverted_index/create/sort/merge_stream.rs b/src/index/src/inverted_index/create/sort/merge_stream.rs
index 84debecb8ada..0e60f7d8af84 100644
--- a/src/index/src/inverted_index/create/sort/merge_stream.rs
+++ b/src/index/src/inverted_index/create/sort/merge_stream.rs
@@ -22,7 +22,7 @@ use pin_project::pin_project;
use crate::inverted_index::create::sort::SortedStream;
use crate::inverted_index::error::Result;
-use crate::inverted_index::Bytes;
+use crate::Bytes;
/// A [`Stream`] implementation that merges two sorted streams into a single sorted stream
#[pin_project]
diff --git a/src/index/src/inverted_index/create/sort_create.rs b/src/index/src/inverted_index/create/sort_create.rs
index b491c0a8b444..46c0c76269d1 100644
--- a/src/index/src/inverted_index/create/sort_create.rs
+++ b/src/index/src/inverted_index/create/sort_create.rs
@@ -22,7 +22,7 @@ use crate::inverted_index::create::sort::{SortOutput, Sorter};
use crate::inverted_index::create::InvertedIndexCreator;
use crate::inverted_index::error::{InconsistentRowCountSnafu, Result};
use crate::inverted_index::format::writer::InvertedIndexWriter;
-use crate::inverted_index::BytesRef;
+use crate::BytesRef;
type IndexName = String;
type SegmentRowCount = NonZeroUsize;
@@ -120,7 +120,7 @@ mod tests {
use crate::inverted_index::create::sort::SortedStream;
use crate::inverted_index::error::Error;
use crate::inverted_index::format::writer::MockInvertedIndexWriter;
- use crate::inverted_index::Bytes;
+ use crate::Bytes;
#[tokio::test]
async fn test_sort_index_creator_basic() {
diff --git a/src/index/src/inverted_index/format/writer.rs b/src/index/src/inverted_index/format/writer.rs
index 176b1f1561f1..f167766f6feb 100644
--- a/src/index/src/inverted_index/format/writer.rs
+++ b/src/index/src/inverted_index/format/writer.rs
@@ -23,7 +23,7 @@ use futures::Stream;
use crate::inverted_index::error::Result;
pub use crate::inverted_index::format::writer::blob::InvertedIndexBlobWriter;
-use crate::inverted_index::Bytes;
+use crate::Bytes;
pub type ValueStream = Box<dyn Stream<Item = Result<(Bytes, BitVec)>> + Send + Unpin>;
diff --git a/src/index/src/inverted_index/format/writer/blob.rs b/src/index/src/inverted_index/format/writer/blob.rs
index d53dfee855ab..ff4898d0dd68 100644
--- a/src/index/src/inverted_index/format/writer/blob.rs
+++ b/src/index/src/inverted_index/format/writer/blob.rs
@@ -103,7 +103,7 @@ mod tests {
use super::*;
use crate::inverted_index::format::reader::{InvertedIndexBlobReader, InvertedIndexReader};
- use crate::inverted_index::Bytes;
+ use crate::Bytes;
fn unpack(fst_value: u64) -> [u32; 2] {
bytemuck::cast::<u64, [u32; 2]>(fst_value)
diff --git a/src/index/src/inverted_index/format/writer/single.rs b/src/index/src/inverted_index/format/writer/single.rs
index 07d10b388016..e1018732034a 100644
--- a/src/index/src/inverted_index/format/writer/single.rs
+++ b/src/index/src/inverted_index/format/writer/single.rs
@@ -19,7 +19,7 @@ use greptime_proto::v1::index::{InvertedIndexMeta, InvertedIndexStats};
use snafu::ResultExt;
use crate::inverted_index::error::{FstCompileSnafu, FstInsertSnafu, Result, WriteSnafu};
-use crate::inverted_index::Bytes;
+use crate::Bytes;
/// `SingleIndexWriter` writes values to the blob storage for an individual inverted index
pub struct SingleIndexWriter<W, S> {
@@ -149,7 +149,7 @@ mod tests {
use super::*;
use crate::inverted_index::error::Error;
- use crate::inverted_index::Bytes;
+ use crate::Bytes;
#[tokio::test]
async fn test_single_index_writer_write_empty() {
diff --git a/src/index/src/inverted_index/search/fst_apply/keys_apply.rs b/src/index/src/inverted_index/search/fst_apply/keys_apply.rs
index 118ba1edba05..79da9b0e0c58 100644
--- a/src/index/src/inverted_index/search/fst_apply/keys_apply.rs
+++ b/src/index/src/inverted_index/search/fst_apply/keys_apply.rs
@@ -23,7 +23,8 @@ use crate::inverted_index::error::{
};
use crate::inverted_index::search::fst_apply::FstApplier;
use crate::inverted_index::search::predicate::Predicate;
-use crate::inverted_index::{Bytes, FstMap};
+use crate::inverted_index::FstMap;
+use crate::Bytes;
/// `KeysFstApplier` is responsible for applying a search using a set of predefined keys
/// against an FstMap to fetch associated values.
diff --git a/src/index/src/inverted_index/search/predicate.rs b/src/index/src/inverted_index/search/predicate.rs
index 25101e0ece5b..dbbc36127081 100644
--- a/src/index/src/inverted_index/search/predicate.rs
+++ b/src/index/src/inverted_index/search/predicate.rs
@@ -14,7 +14,7 @@
use std::collections::HashSet;
-use crate::inverted_index::Bytes;
+use crate::Bytes;
/// Enumerates types of predicates for value filtering.
#[derive(Debug, Clone, PartialEq, Eq)]
diff --git a/src/index/src/lib.rs b/src/index/src/lib.rs
index e490dbc06464..91850424adce 100644
--- a/src/index/src/lib.rs
+++ b/src/index/src/lib.rs
@@ -20,3 +20,6 @@ pub mod error;
pub mod external_provider;
pub mod fulltext_index;
pub mod inverted_index;
+
+pub type Bytes = Vec<u8>;
+pub type BytesRef<'a> = &'a [u8];
diff --git a/src/mito2/src/cache/index/inverted_index.rs b/src/mito2/src/cache/index/inverted_index.rs
index aaedcd8f89f9..1cca175f5941 100644
--- a/src/mito2/src/cache/index/inverted_index.rs
+++ b/src/mito2/src/cache/index/inverted_index.rs
@@ -131,7 +131,7 @@ mod test {
use futures::stream;
use index::inverted_index::format::reader::{InvertedIndexBlobReader, InvertedIndexReader};
use index::inverted_index::format::writer::{InvertedIndexBlobWriter, InvertedIndexWriter};
- use index::inverted_index::Bytes;
+ use index::Bytes;
use prometheus::register_int_counter_vec;
use rand::{Rng, RngCore};
diff --git a/src/mito2/src/sst/index/bloom_filter/applier.rs b/src/mito2/src/sst/index/bloom_filter/applier.rs
index 887832ce4722..2ae85e059476 100644
--- a/src/mito2/src/sst/index/bloom_filter/applier.rs
+++ b/src/mito2/src/sst/index/bloom_filter/applier.rs
@@ -441,8 +441,9 @@ mod tests {
// - column_id: 3
let region_metadata = mock_region_metadata();
let prefix = "test_bloom_filter_applier_";
+ let (d, factory) = PuffinManagerFactory::new_for_test_async(prefix).await;
let object_store = mock_object_store();
- let intm_mgr = new_intm_mgr(prefix).await;
+ let intm_mgr = new_intm_mgr(d.path().to_string_lossy()).await;
let memory_usage_threshold = Some(1024);
let file_id = FileId::random();
let region_dir = "region_dir".to_string();
@@ -459,7 +460,6 @@ mod tests {
let batch = new_batch("tag2", 10..20);
indexer.update(&batch).await.unwrap();
- let (_d, factory) = PuffinManagerFactory::new_for_test_async(prefix).await;
let puffin_manager = factory.build(object_store.clone());
let mut puffin_writer = puffin_manager.writer(&path).await.unwrap();
diff --git a/src/mito2/src/sst/index/bloom_filter/applier/builder.rs b/src/mito2/src/sst/index/bloom_filter/applier/builder.rs
index 14b55cb047e6..956c5ce38edb 100644
--- a/src/mito2/src/sst/index/bloom_filter/applier/builder.rs
+++ b/src/mito2/src/sst/index/bloom_filter/applier/builder.rs
@@ -20,7 +20,7 @@ use datafusion_expr::expr::InList;
use datafusion_expr::{BinaryExpr, Expr, Operator};
use datatypes::data_type::ConcreteDataType;
use datatypes::value::Value;
-use index::bloom_filter::Bytes;
+use index::Bytes;
use object_store::ObjectStore;
use puffin::puffin_manager::cache::PuffinMetadataCacheRef;
use snafu::{OptionExt, ResultExt};
diff --git a/src/mito2/src/sst/index/bloom_filter/creator.rs b/src/mito2/src/sst/index/bloom_filter/creator.rs
index 6676375cb610..0f97ea102711 100644
--- a/src/mito2/src/sst/index/bloom_filter/creator.rs
+++ b/src/mito2/src/sst/index/bloom_filter/creator.rs
@@ -441,8 +441,9 @@ pub(crate) mod tests {
#[tokio::test]
async fn test_bloom_filter_indexer() {
let prefix = "test_bloom_filter_indexer_";
+ let tempdir = common_test_util::temp_dir::create_temp_dir(prefix);
let object_store = mock_object_store();
- let intm_mgr = new_intm_mgr(prefix).await;
+ let intm_mgr = new_intm_mgr(tempdir.path().to_string_lossy()).await;
let region_metadata = mock_region_metadata();
let memory_usage_threshold = Some(1024);
diff --git a/src/mito2/src/sst/index/fulltext_index.rs b/src/mito2/src/sst/index/fulltext_index.rs
index 04c2e6daba9f..86d8a35b9d29 100644
--- a/src/mito2/src/sst/index/fulltext_index.rs
+++ b/src/mito2/src/sst/index/fulltext_index.rs
@@ -15,4 +15,5 @@
pub(crate) mod applier;
pub(crate) mod creator;
-const INDEX_BLOB_TYPE: &str = "greptime-fulltext-index-v1";
+const INDEX_BLOB_TYPE_TANTIVY: &str = "greptime-fulltext-index-v1";
+const INDEX_BLOB_TYPE_BLOOM: &str = "greptime-fulltext-index-bloom";
diff --git a/src/mito2/src/sst/index/fulltext_index/applier.rs b/src/mito2/src/sst/index/fulltext_index/applier.rs
index 8eda10160843..7d3230781edc 100644
--- a/src/mito2/src/sst/index/fulltext_index/applier.rs
+++ b/src/mito2/src/sst/index/fulltext_index/applier.rs
@@ -24,7 +24,7 @@ use store_api::storage::ColumnId;
use crate::error::{ApplyFulltextIndexSnafu, PuffinBuildReaderSnafu, PuffinReadBlobSnafu, Result};
use crate::metrics::INDEX_APPLY_ELAPSED;
use crate::sst::file::FileId;
-use crate::sst::index::fulltext_index::INDEX_BLOB_TYPE;
+use crate::sst::index::fulltext_index::INDEX_BLOB_TYPE_TANTIVY;
use crate::sst::index::puffin_manager::{PuffinManagerFactory, SstPuffinDir};
use crate::sst::index::TYPE_FULLTEXT_INDEX;
use crate::sst::location;
@@ -118,7 +118,7 @@ impl FulltextIndexApplier {
.reader(&file_path)
.await
.context(PuffinBuildReaderSnafu)?
- .dir(&format!("{INDEX_BLOB_TYPE}-{column_id}"))
+ .dir(&format!("{INDEX_BLOB_TYPE_TANTIVY}-{column_id}"))
.await
{
Ok(dir) => Ok(Some(dir)),
diff --git a/src/mito2/src/sst/index/fulltext_index/creator.rs b/src/mito2/src/sst/index/fulltext_index/creator.rs
index 41fa15bd7c72..3275f00a14ab 100644
--- a/src/mito2/src/sst/index/fulltext_index/creator.rs
+++ b/src/mito2/src/sst/index/fulltext_index/creator.rs
@@ -13,25 +13,26 @@
// limitations under the License.
use std::collections::HashMap;
-use std::path::PathBuf;
use common_telemetry::warn;
use datatypes::schema::FulltextAnalyzer;
-use index::fulltext_index::create::{FulltextIndexCreator, TantivyFulltextIndexCreator};
+use index::fulltext_index::create::{
+ BloomFilterFulltextIndexCreator, FulltextIndexCreator, TantivyFulltextIndexCreator,
+};
use index::fulltext_index::{Analyzer, Config};
use puffin::blob_metadata::CompressionCodec;
-use puffin::puffin_manager::{PuffinWriter, PutOptions};
+use puffin::puffin_manager::PutOptions;
use snafu::{ensure, ResultExt};
use store_api::metadata::RegionMetadataRef;
use store_api::storage::{ColumnId, ConcreteDataType, RegionId};
use crate::error::{
CastVectorSnafu, CreateFulltextCreatorSnafu, FieldTypeMismatchSnafu, FulltextFinishSnafu,
- FulltextPushTextSnafu, IndexOptionsSnafu, OperateAbortedIndexSnafu, PuffinAddBlobSnafu, Result,
+ FulltextPushTextSnafu, IndexOptionsSnafu, OperateAbortedIndexSnafu, Result,
};
use crate::read::Batch;
use crate::sst::file::FileId;
-use crate::sst::index::fulltext_index::INDEX_BLOB_TYPE;
+use crate::sst::index::fulltext_index::{INDEX_BLOB_TYPE_BLOOM, INDEX_BLOB_TYPE_TANTIVY};
use crate::sst::index::intermediate::IntermediateManager;
use crate::sst::index::puffin_manager::SstPuffinWriter;
use crate::sst::index::statistics::{ByteCount, RowCount, Statistics};
@@ -85,16 +86,17 @@ impl FulltextIndexer {
case_sensitive: options.case_sensitive,
};
+ // TODO(zhongzc): according to fulltext options, choose in the Tantivy flavor or Bloom Filter flavor.
let creator = TantivyFulltextIndexCreator::new(&intm_path, config, mem_limit)
.await
.context(CreateFulltextCreatorSnafu)?;
+ let inner = AltFulltextCreator::Tantivy(creator);
creators.insert(
column_id,
SingleCreator {
column_id,
- inner: Box::new(creator),
- intm_path,
+ inner,
compress,
},
);
@@ -209,9 +211,7 @@ struct SingleCreator {
/// Column ID.
column_id: ColumnId,
/// Inner creator.
- inner: Box<dyn FulltextIndexCreator>,
- /// Intermediate path where the index is written to.
- intm_path: PathBuf,
+ inner: AltFulltextCreator,
/// Whether the index should be compressed.
compress: bool,
}
@@ -238,10 +238,7 @@ impl SingleCreator {
.as_string()
.context(FieldTypeMismatchSnafu)?
.unwrap_or_default();
- self.inner
- .push_text(text)
- .await
- .context(FulltextPushTextSnafu)?;
+ self.inner.push_text(text).await?;
}
}
_ => {
@@ -249,10 +246,7 @@ impl SingleCreator {
// Ensure that the number of texts pushed is the same as the number of rows in the SST,
// so that the texts are aligned with the row ids.
for _ in 0..batch.num_rows() {
- self.inner
- .push_text("")
- .await
- .context(FulltextPushTextSnafu)?;
+ self.inner.push_text("").await?;
}
}
}
@@ -261,27 +255,79 @@ impl SingleCreator {
}
async fn finish(&mut self, puffin_writer: &mut SstPuffinWriter) -> Result<ByteCount> {
- self.inner.finish().await.context(FulltextFinishSnafu)?;
-
let options = PutOptions {
compression: self.compress.then_some(CompressionCodec::Zstd),
};
-
- let key = format!("{INDEX_BLOB_TYPE}-{}", self.column_id);
- puffin_writer
- .put_dir(&key, self.intm_path.clone(), options)
+ self.inner
+ .finish(puffin_writer, &self.column_id, options)
.await
- .context(PuffinAddBlobSnafu)
}
async fn abort(&mut self) -> Result<()> {
- if let Err(err) = self.inner.finish().await {
- warn!(err; "Failed to finish fulltext index creator, col_id: {:?}, dir_path: {:?}", self.column_id, self.intm_path);
+ self.inner.abort(&self.column_id).await;
+ Ok(())
+ }
+}
+
+#[allow(dead_code, clippy::large_enum_variant)]
+/// `AltFulltextCreator` is an alternative fulltext index creator that can be either Tantivy or BloomFilter.
+enum AltFulltextCreator {
+ Tantivy(TantivyFulltextIndexCreator),
+ Bloom(BloomFilterFulltextIndexCreator),
+}
+
+impl AltFulltextCreator {
+ async fn push_text(&mut self, text: &str) -> Result<()> {
+ match self {
+ Self::Tantivy(creator) => creator.push_text(text).await.context(FulltextPushTextSnafu),
+ Self::Bloom(creator) => creator.push_text(text).await.context(FulltextPushTextSnafu),
}
- if let Err(err) = tokio::fs::remove_dir_all(&self.intm_path).await {
- warn!(err; "Failed to remove fulltext index directory, col_id: {:?}, dir_path: {:?}", self.column_id, self.intm_path);
+ }
+
+ fn memory_usage(&self) -> usize {
+ match self {
+ Self::Tantivy(creator) => creator.memory_usage(),
+ Self::Bloom(creator) => creator.memory_usage(),
+ }
+ }
+
+ async fn finish(
+ &mut self,
+ puffin_writer: &mut SstPuffinWriter,
+ column_id: &ColumnId,
+ put_options: PutOptions,
+ ) -> Result<ByteCount> {
+ match self {
+ Self::Tantivy(creator) => {
+ let key = format!("{INDEX_BLOB_TYPE_TANTIVY}-{}", column_id);
+ creator
+ .finish(puffin_writer, &key, put_options)
+ .await
+ .context(FulltextFinishSnafu)
+ }
+ Self::Bloom(creator) => {
+ let key = format!("{INDEX_BLOB_TYPE_BLOOM}-{}", column_id);
+ creator
+ .finish(puffin_writer, &key, put_options)
+ .await
+ .context(FulltextFinishSnafu)
+ }
+ }
+ }
+
+ async fn abort(&mut self, column_id: &ColumnId) {
+ match self {
+ Self::Tantivy(creator) => {
+ if let Err(err) = creator.abort().await {
+ warn!(err; "Failed to abort the fulltext index creator in the Tantivy flavor, col_id: {:?}", column_id);
+ }
+ }
+ Self::Bloom(creator) => {
+ if let Err(err) = creator.abort().await {
+ warn!(err; "Failed to abort the fulltext index creator in the Bloom Filter flavor, col_id: {:?}", column_id);
+ }
+ }
}
- Ok(())
}
}
@@ -299,7 +345,7 @@ mod tests {
use index::fulltext_index::search::RowId;
use object_store::services::Memory;
use object_store::ObjectStore;
- use puffin::puffin_manager::PuffinManager;
+ use puffin::puffin_manager::{PuffinManager, PuffinWriter};
use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder, RegionMetadataRef};
use store_api::storage::{ConcreteDataType, RegionId};
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs
index 138b15b82eb9..b09ac93f71f1 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/comparison.rs
@@ -14,7 +14,7 @@
use datafusion_expr::{Expr as DfExpr, Operator};
use index::inverted_index::search::predicate::{Bound, Predicate, Range, RangePredicate};
-use index::inverted_index::Bytes;
+use index::Bytes;
use crate::error::Result;
use crate::sst::index::inverted_index::applier::builder::InvertedIndexApplierBuilder;
diff --git a/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs b/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs
index 35a5caad56a6..765d87250019 100644
--- a/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs
+++ b/src/mito2/src/sst/index/inverted_index/applier/builder/eq_list.rs
@@ -17,7 +17,7 @@ use std::collections::HashSet;
use datafusion_expr::{BinaryExpr, Expr as DfExpr, Operator};
use datatypes::data_type::ConcreteDataType;
use index::inverted_index::search::predicate::{InListPredicate, Predicate};
-use index::inverted_index::Bytes;
+use index::Bytes;
use crate::error::Result;
use crate::sst::index::inverted_index::applier::builder::InvertedIndexApplierBuilder;
diff --git a/src/puffin/Cargo.toml b/src/puffin/Cargo.toml
index 7116bbef52b9..078e3aebeb61 100644
--- a/src/puffin/Cargo.toml
+++ b/src/puffin/Cargo.toml
@@ -20,6 +20,7 @@ common-error.workspace = true
common-macro.workspace = true
common-runtime.workspace = true
common-telemetry.workspace = true
+common-test-util.workspace = true
derive_builder.workspace = true
futures.workspace = true
lz4_flex = "0.11"
@@ -36,4 +37,3 @@ uuid.workspace = true
[dev-dependencies]
common-base = { workspace = true, features = ["testing"] }
-common-test-util.workspace = true
diff --git a/src/puffin/src/puffin_manager/file_accessor.rs b/src/puffin/src/puffin_manager/file_accessor.rs
index 351423b054e7..193aa037f530 100644
--- a/src/puffin/src/puffin_manager/file_accessor.rs
+++ b/src/puffin/src/puffin_manager/file_accessor.rs
@@ -13,8 +13,11 @@
// limitations under the License.
use async_trait::async_trait;
-use common_base::range_read::SizeAwareRangeReader;
+use common_base::range_read::{FileReader, SizeAwareRangeReader};
+use common_test_util::temp_dir::{create_temp_dir, TempDir};
use futures::AsyncWrite;
+use tokio::fs::File;
+use tokio_util::compat::{Compat, TokioAsyncReadCompatExt};
use crate::error::Result;
@@ -31,3 +34,37 @@ pub trait PuffinFileAccessor: Send + Sync + 'static {
/// Creates a writer for the given puffin file.
async fn writer(&self, puffin_file_name: &str) -> Result<Self::Writer>;
}
+
+pub struct MockFileAccessor {
+ tempdir: TempDir,
+}
+
+impl MockFileAccessor {
+ pub fn new(prefix: &str) -> Self {
+ let tempdir = create_temp_dir(prefix);
+ Self { tempdir }
+ }
+}
+
+#[async_trait]
+impl PuffinFileAccessor for MockFileAccessor {
+ type Reader = FileReader;
+ type Writer = Compat<File>;
+
+ async fn reader(&self, puffin_file_name: &str) -> Result<Self::Reader> {
+ Ok(FileReader::new(self.tempdir.path().join(puffin_file_name))
+ .await
+ .unwrap())
+ }
+
+ async fn writer(&self, puffin_file_name: &str) -> Result<Self::Writer> {
+ let p = self.tempdir.path().join(puffin_file_name);
+ if let Some(p) = p.parent() {
+ if !tokio::fs::try_exists(p).await.unwrap() {
+ tokio::fs::create_dir_all(p).await.unwrap();
+ }
+ }
+ let f = tokio::fs::File::create(p).await.unwrap();
+ Ok(f.compat())
+ }
+}
diff --git a/src/puffin/src/puffin_manager/tests.rs b/src/puffin/src/puffin_manager/tests.rs
index c4057a5f5bcb..23756aec646c 100644
--- a/src/puffin/src/puffin_manager/tests.rs
+++ b/src/puffin/src/puffin_manager/tests.rs
@@ -15,16 +15,12 @@
use std::collections::HashMap;
use std::sync::Arc;
-use async_trait::async_trait;
-use common_base::range_read::{FileReader, RangeReader};
+use common_base::range_read::RangeReader;
use common_test_util::temp_dir::{create_temp_dir, TempDir};
-use tokio::fs::File;
use tokio::io::AsyncReadExt as _;
-use tokio_util::compat::{Compat, TokioAsyncReadCompatExt};
use crate::blob_metadata::CompressionCodec;
-use crate::error::Result;
-use crate::puffin_manager::file_accessor::PuffinFileAccessor;
+use crate::puffin_manager::file_accessor::MockFileAccessor;
use crate::puffin_manager::fs_puffin_manager::FsPuffinManager;
use crate::puffin_manager::stager::BoundedStager;
use crate::puffin_manager::{
@@ -371,37 +367,3 @@ async fn check_dir(
assert_eq!(buf, *raw_data);
}
}
-
-pub struct MockFileAccessor {
- tempdir: TempDir,
-}
-
-impl MockFileAccessor {
- pub fn new(prefix: &str) -> Self {
- let tempdir = create_temp_dir(prefix);
- Self { tempdir }
- }
-}
-
-#[async_trait]
-impl PuffinFileAccessor for MockFileAccessor {
- type Reader = FileReader;
- type Writer = Compat<File>;
-
- async fn reader(&self, puffin_file_name: &str) -> Result<Self::Reader> {
- Ok(FileReader::new(self.tempdir.path().join(puffin_file_name))
- .await
- .unwrap())
- }
-
- async fn writer(&self, puffin_file_name: &str) -> Result<Self::Writer> {
- let p = self.tempdir.path().join(puffin_file_name);
- if let Some(p) = p.parent() {
- if !tokio::fs::try_exists(p).await.unwrap() {
- tokio::fs::create_dir_all(p).await.unwrap();
- }
- }
- let f = tokio::fs::File::create(p).await.unwrap();
- Ok(f.compat())
- }
-}
|
feat
|
bloom filter as fulltext index v2 (Part 1) (#5406)
|
803780030d4d3ef5524f88580f2750d98fdeabcf
|
2024-08-05 12:35:14
|
LFC
|
fix: too large shadow-rs consts (#4506)
| false
|
diff --git a/src/common/version/build.rs b/src/common/version/build.rs
index 6bf44d026ca5..9a57b42afa9d 100644
--- a/src/common/version/build.rs
+++ b/src/common/version/build.rs
@@ -12,9 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::BTreeSet;
use std::env;
use build_data::{format_timestamp, get_source_time};
+use shadow_rs::{CARGO_METADATA, CARGO_TREE};
fn main() -> shadow_rs::SdResult<()> {
println!("cargo:rerun-if-changed=.git/refs/heads");
@@ -33,6 +35,10 @@ fn main() -> shadow_rs::SdResult<()> {
// made as a submodule in another repo.
let src_path = env::var("CARGO_WORKSPACE_DIR").or_else(|_| env::var("CARGO_MANIFEST_DIR"))?;
let out_path = env::var("OUT_DIR")?;
- let _ = shadow_rs::Shadow::build_with(src_path, out_path, Default::default())?;
+ let _ = shadow_rs::Shadow::build_with(
+ src_path,
+ out_path,
+ BTreeSet::from([CARGO_METADATA, CARGO_TREE]),
+ )?;
Ok(())
}
|
fix
|
too large shadow-rs consts (#4506)
|
1ea43da9ea90469d8a34d18995a373b8e16a279c
|
2024-07-31 09:09:39
|
Jeremyhi
|
feat: default export catalog name (#4464)
| false
|
diff --git a/src/cmd/src/cli/export.rs b/src/cmd/src/cli/export.rs
index 1328dd756014..171a0f2fa7fe 100644
--- a/src/cmd/src/cli/export.rs
+++ b/src/cmd/src/cli/export.rs
@@ -21,11 +21,12 @@ use base64::engine::general_purpose;
use base64::Engine;
use clap::{Parser, ValueEnum};
use client::DEFAULT_SCHEMA_NAME;
+use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_telemetry::{debug, error, info, warn};
use serde_json::Value;
use servers::http::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
-use snafu::{OptionExt, ResultExt};
+use snafu::ResultExt;
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
use tokio::sync::Semaphore;
@@ -34,8 +35,7 @@ use tracing_appender::non_blocking::WorkerGuard;
use crate::cli::{Instance, Tool};
use crate::error::{
- EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result,
- SerdeJsonSnafu,
+ EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, Result, SerdeJsonSnafu,
};
type TableReference = (String, String, String);
@@ -539,11 +539,11 @@ impl Tool for Export {
/// Split at `-`.
fn split_database(database: &str) -> Result<(String, Option<String>)> {
- let (catalog, schema) = database
- .split_once('-')
- .with_context(|| InvalidDatabaseNameSnafu {
- database: database.to_string(),
- })?;
+ let (catalog, schema) = match database.split_once('-') {
+ Some((catalog, schema)) => (catalog, schema),
+ None => (DEFAULT_CATALOG_NAME, database),
+ };
+
if schema == "*" {
Ok((catalog.to_string(), None))
} else {
@@ -558,10 +558,26 @@ mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_telemetry::logging::LoggingOptions;
+ use crate::cli::export::split_database;
use crate::error::Result as CmdResult;
use crate::options::GlobalOptions;
use crate::{cli, standalone, App};
+ #[test]
+ fn test_split_database() {
+ let result = split_database("catalog-schema").unwrap();
+ assert_eq!(result, ("catalog".to_string(), Some("schema".to_string())));
+
+ let result = split_database("schema").unwrap();
+ assert_eq!(result, ("greptime".to_string(), Some("schema".to_string())));
+
+ let result = split_database("catalog-*").unwrap();
+ assert_eq!(result, ("catalog".to_string(), None));
+
+ let result = split_database("*").unwrap();
+ assert_eq!(result, ("greptime".to_string(), None));
+ }
+
#[tokio::test(flavor = "multi_thread")]
async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
let output_dir = tempfile::tempdir().unwrap();
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index 2e4e252cc4d8..db347df9e5e6 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -298,13 +298,6 @@ pub enum Error {
error: std::io::Error,
},
- #[snafu(display("Invalid database name: {}", database))]
- InvalidDatabaseName {
- #[snafu(implicit)]
- location: Location,
- database: String,
- },
-
#[snafu(display("Failed to create directory {}", dir))]
CreateDir {
dir: String,
@@ -384,8 +377,7 @@ impl ErrorExt for Error {
| Error::ConnectEtcd { .. }
| Error::NotDataFromOutput { .. }
| Error::CreateDir { .. }
- | Error::EmptyResult { .. }
- | Error::InvalidDatabaseName { .. } => StatusCode::InvalidArguments,
+ | Error::EmptyResult { .. } => StatusCode::InvalidArguments,
Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(),
|
feat
|
default export catalog name (#4464)
|
d275cdd570a3579e695c16b60f2c13cff9f94747
|
2024-10-30 10:09:48
|
Lei, HUANG
|
feat: Support altering table TTL (#4848)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 0d8058f6a9ab..264ceb2de22b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2149,6 +2149,7 @@ dependencies = [
"paste",
"prost 0.12.6",
"snafu 0.8.5",
+ "store-api",
"table",
]
@@ -4531,7 +4532,7 @@ dependencies = [
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=9faf45e8bd83cba106ddfb09bba85784bf9ade2a#9faf45e8bd83cba106ddfb09bba85784bf9ade2a"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=255f87a3318ace3f88a67f76995a0e14910983f4#255f87a3318ace3f88a67f76995a0e14910983f4"
dependencies = [
"prost 0.12.6",
"serde",
@@ -11497,6 +11498,7 @@ dependencies = [
"datatypes",
"derive_builder 0.12.0",
"futures",
+ "humantime",
"serde",
"serde_json",
"snafu 0.8.5",
diff --git a/Cargo.toml b/Cargo.toml
index f06c83a458ba..d89289b30470 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -121,7 +121,7 @@ etcd-client = { version = "0.13" }
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "9faf45e8bd83cba106ddfb09bba85784bf9ade2a" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "255f87a3318ace3f88a67f76995a0e14910983f4" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
diff --git a/src/common/grpc-expr/Cargo.toml b/src/common/grpc-expr/Cargo.toml
index 246a999a8499..650b7263258d 100644
--- a/src/common/grpc-expr/Cargo.toml
+++ b/src/common/grpc-expr/Cargo.toml
@@ -18,6 +18,7 @@ common-time.workspace = true
datatypes.workspace = true
prost.workspace = true
snafu.workspace = true
+store-api.workspace = true
table.workspace = true
[dev-dependencies]
diff --git a/src/common/grpc-expr/src/alter.rs b/src/common/grpc-expr/src/alter.rs
index ac49069412cd..5dcc861e9d8a 100644
--- a/src/common/grpc-expr/src/alter.rs
+++ b/src/common/grpc-expr/src/alter.rs
@@ -22,12 +22,13 @@ use api::v1::{
use common_query::AddColumnLocation;
use datatypes::schema::{ColumnSchema, RawSchema};
use snafu::{ensure, OptionExt, ResultExt};
+use store_api::region_request::ChangeOption;
use table::metadata::TableId;
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ChangeColumnTypeRequest};
use crate::error::{
- InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result,
- UnknownLocationTypeSnafu,
+ InvalidChangeTableOptionRequestSnafu, InvalidColumnDefSnafu, MissingFieldSnafu,
+ MissingTimestampColumnSnafu, Result, UnknownLocationTypeSnafu,
};
const LOCATION_TYPE_FIRST: i32 = LocationType::First as i32;
@@ -92,6 +93,15 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result<Alter
Kind::RenameTable(RenameTable { new_table_name }) => {
AlterKind::RenameTable { new_table_name }
}
+ Kind::ChangeTableOptions(api::v1::ChangeTableOptions {
+ change_table_options,
+ }) => AlterKind::ChangeTableOptions {
+ options: change_table_options
+ .iter()
+ .map(ChangeOption::try_from)
+ .collect::<std::result::Result<Vec<_>, _>>()
+ .context(InvalidChangeTableOptionRequestSnafu)?,
+ },
};
let request = AlterTableRequest {
diff --git a/src/common/grpc-expr/src/error.rs b/src/common/grpc-expr/src/error.rs
index f025c4d5a5b7..5d59c469831a 100644
--- a/src/common/grpc-expr/src/error.rs
+++ b/src/common/grpc-expr/src/error.rs
@@ -19,6 +19,7 @@ use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use snafu::{Location, Snafu};
+use store_api::metadata::MetadataError;
#[derive(Snafu)]
#[snafu(visibility(pub))]
@@ -118,6 +119,12 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Invalid change table option request"))]
+ InvalidChangeTableOptionRequest {
+ #[snafu(source)]
+ error: MetadataError,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -141,6 +148,7 @@ impl ErrorExt for Error {
Error::UnknownColumnDataType { .. } | Error::InvalidFulltextColumnType { .. } => {
StatusCode::InvalidArguments
}
+ Error::InvalidChangeTableOptionRequest { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs b/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs
index 33be226dda59..7338968153c9 100644
--- a/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs
+++ b/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs
@@ -43,10 +43,10 @@ impl AlterLogicalTablesProcedure {
&self.data.physical_columns,
);
- // Updates physical table's metadata
+ // Updates physical table's metadata, and we don't need to touch per-region settings.
self.context
.table_metadata_manager
- .update_table_info(physical_table_info, new_raw_table_info)
+ .update_table_info(physical_table_info, None, new_raw_table_info)
.await?;
Ok(())
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index 8bece74e7859..ae0d6dc6bd4f 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -43,10 +43,10 @@ use crate::ddl::DdlContext;
use crate::error::{Error, Result};
use crate::instruction::CacheIdent;
use crate::key::table_info::TableInfoValue;
-use crate::key::DeserializedValueWithBytes;
+use crate::key::{DeserializedValueWithBytes, RegionDistribution};
use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
use crate::rpc::ddl::AlterTableTask;
-use crate::rpc::router::{find_leader_regions, find_leaders};
+use crate::rpc::router::{find_leader_regions, find_leaders, region_distribution};
use crate::{metrics, ClusterId};
/// The alter table procedure
@@ -101,6 +101,9 @@ impl AlterTableProcedure {
.get_physical_table_route(table_id)
.await?;
+ self.data.region_distribution =
+ Some(region_distribution(&physical_table_route.region_routes));
+
let leaders = find_leaders(&physical_table_route.region_routes);
let mut alter_region_tasks = Vec::with_capacity(leaders.len());
@@ -161,8 +164,14 @@ impl AlterTableProcedure {
self.on_update_metadata_for_rename(new_table_name.to_string(), table_info_value)
.await?;
} else {
- self.on_update_metadata_for_alter(new_info.into(), table_info_value)
- .await?;
+ // region distribution is set in submit_alter_region_requests
+ let region_distribution = self.data.region_distribution.as_ref().unwrap().clone();
+ self.on_update_metadata_for_alter(
+ new_info.into(),
+ region_distribution,
+ table_info_value,
+ )
+ .await?;
}
info!("Updated table metadata for table {table_ref}, table_id: {table_id}");
@@ -271,6 +280,8 @@ pub struct AlterTableData {
table_id: TableId,
/// Table info value before alteration.
table_info_value: Option<DeserializedValueWithBytes<TableInfoValue>>,
+ /// Region distribution for table in case we need to update region options.
+ region_distribution: Option<RegionDistribution>,
}
impl AlterTableData {
@@ -281,6 +292,7 @@ impl AlterTableData {
table_id,
cluster_id,
table_info_value: None,
+ region_distribution: None,
}
}
diff --git a/src/common/meta/src/ddl/alter_table/region_request.rs b/src/common/meta/src/ddl/alter_table/region_request.rs
index 07563603954f..41bd1481c9bc 100644
--- a/src/common/meta/src/ddl/alter_table/region_request.rs
+++ b/src/common/meta/src/ddl/alter_table/region_request.rs
@@ -106,6 +106,7 @@ fn create_proto_alter_kind(
})))
}
Kind::RenameTable(_) => Ok(None),
+ Kind::ChangeTableOptions(v) => Ok(Some(alter_request::Kind::ChangeTableOptions(v.clone()))),
}
}
diff --git a/src/common/meta/src/ddl/alter_table/update_metadata.rs b/src/common/meta/src/ddl/alter_table/update_metadata.rs
index 6ef6e2e7fc88..6d27dabe2718 100644
--- a/src/common/meta/src/ddl/alter_table/update_metadata.rs
+++ b/src/common/meta/src/ddl/alter_table/update_metadata.rs
@@ -20,7 +20,7 @@ use table::requests::AlterKind;
use crate::ddl::alter_table::AlterTableProcedure;
use crate::error::{self, Result};
use crate::key::table_info::TableInfoValue;
-use crate::key::DeserializedValueWithBytes;
+use crate::key::{DeserializedValueWithBytes, RegionDistribution};
impl AlterTableProcedure {
/// Builds new_meta
@@ -51,7 +51,9 @@ impl AlterTableProcedure {
AlterKind::RenameTable { new_table_name } => {
new_info.name = new_table_name.to_string();
}
- AlterKind::DropColumns { .. } | AlterKind::ChangeColumnTypes { .. } => {}
+ AlterKind::DropColumns { .. }
+ | AlterKind::ChangeColumnTypes { .. }
+ | AlterKind::ChangeTableOptions { .. } => {}
}
Ok(new_info)
@@ -75,11 +77,16 @@ impl AlterTableProcedure {
pub(crate) async fn on_update_metadata_for_alter(
&self,
new_table_info: RawTableInfo,
+ region_distribution: RegionDistribution,
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
) -> Result<()> {
let table_metadata_manager = &self.context.table_metadata_manager;
table_metadata_manager
- .update_table_info(current_table_info_value, new_table_info)
+ .update_table_info(
+ current_table_info_value,
+ Some(region_distribution),
+ new_table_info,
+ )
.await?;
Ok(())
diff --git a/src/common/meta/src/ddl/create_logical_tables/update_metadata.rs b/src/common/meta/src/ddl/create_logical_tables/update_metadata.rs
index 0309a046138f..75d235812f83 100644
--- a/src/common/meta/src/ddl/create_logical_tables/update_metadata.rs
+++ b/src/common/meta/src/ddl/create_logical_tables/update_metadata.rs
@@ -58,10 +58,10 @@ impl CreateLogicalTablesProcedure {
&new_table_info.name,
);
- // Update physical table's metadata
+ // Update physical table's metadata and we don't need to touch per-region settings.
self.context
.table_metadata_manager
- .update_table_info(&physical_table_info, new_table_info)
+ .update_table_info(&physical_table_info, None, new_table_info)
.await?;
// Invalid physical table cache
diff --git a/src/common/meta/src/ddl/test_util/datanode_handler.rs b/src/common/meta/src/ddl/test_util/datanode_handler.rs
index 5cb7d4a0f20e..b82609b9859b 100644
--- a/src/common/meta/src/ddl/test_util/datanode_handler.rs
+++ b/src/common/meta/src/ddl/test_util/datanode_handler.rs
@@ -29,7 +29,10 @@ use crate::test_util::MockDatanodeHandler;
#[async_trait::async_trait]
impl MockDatanodeHandler for () {
async fn handle(&self, _peer: &Peer, _request: RegionRequest) -> Result<RegionResponse> {
- unreachable!()
+ Ok(RegionResponse {
+ affected_rows: 0,
+ extensions: Default::default(),
+ })
}
async fn handle_query(
diff --git a/src/common/meta/src/ddl/tests/alter_table.rs b/src/common/meta/src/ddl/tests/alter_table.rs
index 36a1ff0ecece..7ede16b125fe 100644
--- a/src/common/meta/src/ddl/tests/alter_table.rs
+++ b/src/common/meta/src/ddl/tests/alter_table.rs
@@ -19,13 +19,14 @@ use std::sync::Arc;
use api::v1::alter_expr::Kind;
use api::v1::region::{region_request, RegionRequest};
use api::v1::{
- AddColumn, AddColumns, AlterExpr, ColumnDataType, ColumnDef as PbColumnDef, DropColumn,
- DropColumns, SemanticType,
+ AddColumn, AddColumns, AlterExpr, ChangeTableOption, ChangeTableOptions, ColumnDataType,
+ ColumnDef as PbColumnDef, DropColumn, DropColumns, SemanticType,
};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use store_api::storage::RegionId;
+use table::requests::TTL_KEY;
use tokio::sync::mpsc::{self};
use crate::ddl::alter_table::AlterTableProcedure;
@@ -34,6 +35,7 @@ use crate::ddl::test_util::create_table::test_create_table_task;
use crate::ddl::test_util::datanode_handler::{
DatanodeWatcher, RequestOutdatedErrorDatanodeHandler,
};
+use crate::key::datanode_table::DatanodeTableKey;
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::peer::Peer;
@@ -293,12 +295,21 @@ async fn test_on_update_metadata_add_columns() {
let table_name = "foo";
let table_id = 1024;
let task = test_create_table_task(table_name, table_id);
+
+ let region_id = RegionId::new(table_id, 0);
+ let mock_table_routes = vec![RegionRoute {
+ region: Region::new_test(region_id),
+ leader_peer: Some(Peer::default()),
+ follower_peers: vec![],
+ leader_state: None,
+ leader_down_since: None,
+ }];
// Puts a value to table name key.
ddl_context
.table_metadata_manager
.create_table_metadata(
task.table_info.clone(),
- TableRouteValue::physical(vec![]),
+ TableRouteValue::physical(mock_table_routes),
HashMap::new(),
)
.await
@@ -326,6 +337,7 @@ async fn test_on_update_metadata_add_columns() {
let mut procedure =
AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
procedure.on_prepare().await.unwrap();
+ procedure.submit_alter_region_requests().await.unwrap();
procedure.on_update_metadata().await.unwrap();
let table_info = ddl_context
@@ -343,3 +355,76 @@ async fn test_on_update_metadata_add_columns() {
table_info.meta.next_column_id
);
}
+
+#[tokio::test]
+async fn test_on_update_table_options() {
+ let node_manager = Arc::new(MockDatanodeManager::new(()));
+ let ddl_context = new_ddl_context(node_manager);
+ let cluster_id = 1;
+ let table_name = "foo";
+ let table_id = 1024;
+ let task = test_create_table_task(table_name, table_id);
+
+ let region_id = RegionId::new(table_id, 0);
+ let mock_table_routes = vec![RegionRoute {
+ region: Region::new_test(region_id),
+ leader_peer: Some(Peer::default()),
+ follower_peers: vec![],
+ leader_state: None,
+ leader_down_since: None,
+ }];
+ // Puts a value to table name key.
+ ddl_context
+ .table_metadata_manager
+ .create_table_metadata(
+ task.table_info.clone(),
+ TableRouteValue::physical(mock_table_routes),
+ HashMap::new(),
+ )
+ .await
+ .unwrap();
+
+ let task = AlterTableTask {
+ alter_table: AlterExpr {
+ catalog_name: DEFAULT_CATALOG_NAME.to_string(),
+ schema_name: DEFAULT_SCHEMA_NAME.to_string(),
+ table_name: table_name.to_string(),
+ kind: Some(Kind::ChangeTableOptions(ChangeTableOptions {
+ change_table_options: vec![ChangeTableOption {
+ key: TTL_KEY.to_string(),
+ value: "1d".to_string(),
+ }],
+ })),
+ },
+ };
+ let mut procedure =
+ AlterTableProcedure::new(cluster_id, table_id, task, ddl_context.clone()).unwrap();
+ procedure.on_prepare().await.unwrap();
+ procedure.submit_alter_region_requests().await.unwrap();
+ procedure.on_update_metadata().await.unwrap();
+
+ let table_info = ddl_context
+ .table_metadata_manager
+ .table_info_manager()
+ .get(table_id)
+ .await
+ .unwrap()
+ .unwrap()
+ .into_inner()
+ .table_info;
+
+ let datanode_key = DatanodeTableKey::new(0, table_id);
+ let region_info = ddl_context
+ .table_metadata_manager
+ .datanode_table_manager()
+ .get(&datanode_key)
+ .await
+ .unwrap()
+ .unwrap()
+ .region_info;
+
+ assert_eq!(
+ region_info.region_options,
+ HashMap::from(&table_info.meta.options)
+ );
+}
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index 0e7709df0b68..51dc74e64614 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -652,6 +652,18 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display(
+ "Datanode table info not found, table id: {}, datanode id: {}",
+ table_id,
+ datanode_id
+ ))]
+ DatanodeTableInfoNotFound {
+ datanode_id: DatanodeId,
+ table_id: TableId,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -752,6 +764,7 @@ impl ErrorExt for Error {
PostgresExecution { .. } => StatusCode::Internal,
#[cfg(feature = "pg_kvbackend")]
ConnectPostgres { .. } => StatusCode::Internal,
+ Error::DatanodeTableInfoNotFound { .. } => StatusCode::Internal,
}
}
diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs
index 0f703b9430a3..39ca065393fc 100644
--- a/src/common/meta/src/key.rs
+++ b/src/common/meta/src/key.rs
@@ -133,7 +133,6 @@ use self::flow::flow_name::FlowNameValue;
use self::schema_name::{SchemaManager, SchemaNameKey, SchemaNameValue};
use self::table_route::{TableRouteManager, TableRouteValue};
use self::tombstone::TombstoneManager;
-use crate::ddl::utils::region_storage_path;
use crate::error::{self, Result, SerdeJsonSnafu};
use crate::key::node_address::NodeAddressValue;
use crate::key::table_route::TableRouteKey;
@@ -593,8 +592,6 @@ impl TableMetadataManager {
table_info.meta.region_numbers = region_numbers;
let table_id = table_info.ident.table_id;
let engine = table_info.meta.engine.clone();
- let region_storage_path =
- region_storage_path(&table_info.catalog_name, &table_info.schema_name);
// Creates table name.
let table_name = TableNameKey::new(
@@ -606,7 +603,7 @@ impl TableMetadataManager {
.table_name_manager()
.build_create_txn(&table_name, table_id)?;
- let region_options = (&table_info.meta.options).into();
+ let region_options = table_info.to_region_options();
// Creates table info.
let table_info_value = TableInfoValue::new(table_info);
let (create_table_info_txn, on_create_table_info_failure) = self
@@ -625,6 +622,7 @@ impl TableMetadataManager {
]);
if let TableRouteValue::Physical(x) = &table_route_value {
+ let region_storage_path = table_info_value.region_storage_path();
let create_datanode_table_txn = self.datanode_table_manager().build_create_txn(
table_id,
&engine,
@@ -926,13 +924,15 @@ impl TableMetadataManager {
}
/// Updates table info and returns an error if different metadata exists.
+ /// And cascade-ly update all redundant table options for each region
+ /// if region_distribution is present.
pub async fn update_table_info(
&self,
current_table_info_value: &DeserializedValueWithBytes<TableInfoValue>,
+ region_distribution: Option<RegionDistribution>,
new_table_info: RawTableInfo,
) -> Result<()> {
let table_id = current_table_info_value.table_info.ident.table_id;
-
let new_table_info_value = current_table_info_value.update(new_table_info);
// Updates table info.
@@ -940,8 +940,19 @@ impl TableMetadataManager {
.table_info_manager()
.build_update_txn(table_id, current_table_info_value, &new_table_info_value)?;
- let mut r = self.kv_backend.txn(update_table_info_txn).await?;
+ let txn = if let Some(region_distribution) = region_distribution {
+ // region options induced from table info.
+ let new_region_options = new_table_info_value.table_info.to_region_options();
+ let update_datanode_table_options_txn = self
+ .datanode_table_manager
+ .build_update_table_options_txn(table_id, region_distribution, new_region_options)
+ .await?;
+ Txn::merge_all([update_table_info_txn, update_datanode_table_options_txn])
+ } else {
+ update_table_info_txn
+ };
+ let mut r = self.kv_backend.txn(txn).await?;
// Checks whether metadata was already updated.
if !r.succeeded {
let mut set = TxnOpGetResponseSet::from(&mut r.responses);
@@ -1669,12 +1680,12 @@ mod tests {
DeserializedValueWithBytes::from_inner(TableInfoValue::new(table_info.clone()));
// should be ok.
table_metadata_manager
- .update_table_info(¤t_table_info_value, new_table_info.clone())
+ .update_table_info(¤t_table_info_value, None, new_table_info.clone())
.await
.unwrap();
// if table info was updated, it should be ok.
table_metadata_manager
- .update_table_info(¤t_table_info_value, new_table_info.clone())
+ .update_table_info(¤t_table_info_value, None, new_table_info.clone())
.await
.unwrap();
@@ -1696,7 +1707,7 @@ mod tests {
// if the current_table_info_value is wrong, it should return an error.
// The ABA problem.
assert!(table_metadata_manager
- .update_table_info(&wrong_table_info_value, new_table_info)
+ .update_table_info(&wrong_table_info_value, None, new_table_info)
.await
.is_err())
}
diff --git a/src/common/meta/src/key/datanode_table.rs b/src/common/meta/src/key/datanode_table.rs
index e904983cc8d6..a0f0e9e511b8 100644
--- a/src/common/meta/src/key/datanode_table.rs
+++ b/src/common/meta/src/key/datanode_table.rs
@@ -23,7 +23,7 @@ use store_api::storage::RegionNumber;
use table::metadata::TableId;
use super::MetadataKey;
-use crate::error::{InvalidMetadataSnafu, Result};
+use crate::error::{DatanodeTableInfoNotFoundSnafu, InvalidMetadataSnafu, Result};
use crate::key::{
MetadataValue, RegionDistribution, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX,
};
@@ -209,6 +209,49 @@ impl DatanodeTableManager {
Ok(txn)
}
+ /// Builds a transaction to updates the redundant table options (including WAL options)
+ /// for given table id, if provided.
+ ///
+ /// Note that the provided `new_region_options` must be a
+ /// complete set of all options rather than incremental changes.
+ pub(crate) async fn build_update_table_options_txn(
+ &self,
+ table_id: TableId,
+ region_distribution: RegionDistribution,
+ new_region_options: HashMap<String, String>,
+ ) -> Result<Txn> {
+ assert!(!region_distribution.is_empty());
+ // safety: region_distribution must not be empty
+ let (any_datanode, _) = region_distribution.first_key_value().unwrap();
+
+ let mut region_info = self
+ .kv_backend
+ .get(&DatanodeTableKey::new(*any_datanode, table_id).to_bytes())
+ .await
+ .transpose()
+ .context(DatanodeTableInfoNotFoundSnafu {
+ datanode_id: *any_datanode,
+ table_id,
+ })?
+ .and_then(|r| DatanodeTableValue::try_from_raw_value(&r.value))?
+ .region_info;
+ // substitute region options only.
+ region_info.region_options = new_region_options;
+
+ let mut txns = Vec::with_capacity(region_distribution.len());
+
+ for (datanode, regions) in region_distribution.into_iter() {
+ let key = DatanodeTableKey::new(datanode, table_id);
+ let key_bytes = key.to_bytes();
+ let value_bytes = DatanodeTableValue::new(table_id, regions, region_info.clone())
+ .try_as_raw_value()?;
+ txns.push(TxnOp::Put(key_bytes, value_bytes));
+ }
+
+ let txn = Txn::new().and_then(txns);
+ Ok(txn)
+ }
+
/// Builds the update datanode table transactions. It only executes while the primary keys comparing successes.
pub(crate) fn build_update_txn(
&self,
diff --git a/src/common/meta/src/key/table_info.rs b/src/common/meta/src/key/table_info.rs
index b3ca42b6ff8b..615043f85326 100644
--- a/src/common/meta/src/key/table_info.rs
+++ b/src/common/meta/src/key/table_info.rs
@@ -23,6 +23,7 @@ use table::table_name::TableName;
use table::table_reference::TableReference;
use super::TABLE_INFO_KEY_PATTERN;
+use crate::ddl::utils::region_storage_path;
use crate::error::{InvalidMetadataSnafu, Result};
use crate::key::txn_helper::TxnOpGetResponseSet;
use crate::key::{DeserializedValueWithBytes, MetadataKey, MetadataValue, TABLE_INFO_KEY_PREFIX};
@@ -125,6 +126,11 @@ impl TableInfoValue {
table_name: self.table_info.name.to_string(),
}
}
+
+ /// Builds storage path for all regions in table.
+ pub fn region_storage_path(&self) -> String {
+ region_storage_path(&self.table_info.catalog_name, &self.table_info.schema_name)
+ }
}
pub type TableInfoManagerRef = Arc<TableInfoManager>;
diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs
index 15928e1e22c2..8e78fa4ab351 100644
--- a/src/mito2/src/region/version.rs
+++ b/src/mito2/src/region/version.rs
@@ -98,6 +98,18 @@ impl VersionControl {
Ok(())
}
+ /// Applies region option changes and generates a new version.
+ pub(crate) fn alter_options(&self, options: RegionOptions) {
+ let version = self.current().version;
+ let new_version = Arc::new(
+ VersionBuilder::from_version(version)
+ .options(options)
+ .build(),
+ );
+ let mut version_data = self.data.write().unwrap();
+ version_data.version = new_version;
+ }
+
/// Apply edit to current version.
pub(crate) fn apply_edit(
&self,
diff --git a/src/mito2/src/worker/handle_alter.rs b/src/mito2/src/worker/handle_alter.rs
index 27aadbfc0d02..d73b24c46aa3 100644
--- a/src/mito2/src/worker/handle_alter.rs
+++ b/src/mito2/src/worker/handle_alter.rs
@@ -19,7 +19,7 @@ use std::sync::Arc;
use common_telemetry::{debug, info};
use snafu::ResultExt;
use store_api::metadata::{RegionMetadata, RegionMetadataBuilder, RegionMetadataRef};
-use store_api::region_request::RegionAlterRequest;
+use store_api::region_request::{AlterKind, ChangeOption, RegionAlterRequest};
use store_api::storage::RegionId;
use crate::error::{
@@ -27,6 +27,8 @@ use crate::error::{
};
use crate::flush::FlushReason;
use crate::manifest::action::RegionChange;
+use crate::region::version::VersionRef;
+use crate::region::MitoRegionRef;
use crate::request::{DdlRequest, OptionOutputTx, SenderDdlRequest};
use crate::worker::RegionWorkerLoop;
@@ -45,6 +47,13 @@ impl<S> RegionWorkerLoop<S> {
// Get the version before alter.
let version = region.version();
+
+ // fast path for memory state changes like options.
+ if let AlterKind::ChangeRegionOptions { options } = request.kind {
+ self.handle_alter_region_options(region, version, options, sender);
+ return;
+ }
+
if version.metadata.schema_version != request.schema_version {
// This is possible if we retry the request.
debug!(
@@ -67,6 +76,7 @@ impl<S> RegionWorkerLoop<S> {
sender.send(Err(e).context(InvalidRegionRequestSnafu));
return;
}
+
// Checks whether we need to alter the region.
if !request.need_alter(&version.metadata) {
debug!(
@@ -111,7 +121,17 @@ impl<S> RegionWorkerLoop<S> {
version.metadata.schema_version,
region.metadata().schema_version
);
+ self.handle_alter_region_metadata(region, version, request, sender);
+ }
+ /// Handles region metadata changes.
+ fn handle_alter_region_metadata(
+ &mut self,
+ region: MitoRegionRef,
+ version: VersionRef,
+ request: RegionAlterRequest,
+ sender: OptionOutputTx,
+ ) {
let new_meta = match metadata_after_alteration(&version.metadata, request) {
Ok(new_meta) => new_meta,
Err(e) => {
@@ -120,11 +140,38 @@ impl<S> RegionWorkerLoop<S> {
}
};
// Persist the metadata to region's manifest.
- let change = RegionChange {
- metadata: new_meta.clone(),
- };
+ let change = RegionChange { metadata: new_meta };
self.handle_manifest_region_change(region, change, sender)
}
+
+ /// Handles requests that changes region options, like TTL. It only affects memory state
+ /// since changes are persisted in the `DatanodeTableValue` in metasrv.
+ fn handle_alter_region_options(
+ &mut self,
+ region: MitoRegionRef,
+ version: VersionRef,
+ options: Vec<ChangeOption>,
+ sender: OptionOutputTx,
+ ) {
+ let mut current_options = version.options.clone();
+ for option in options {
+ match option {
+ ChangeOption::TTL(new_ttl) => {
+ info!(
+ "Update region ttl: {}, previous: {:?} new: {:?}",
+ region.region_id, current_options.ttl, new_ttl
+ );
+ if new_ttl.is_zero() {
+ current_options.ttl = None;
+ } else {
+ current_options.ttl = Some(new_ttl);
+ }
+ }
+ }
+ }
+ region.version_control.alter_options(current_options);
+ sender.send(Ok(0));
+ }
}
/// Creates a metadata after applying the alter `request` to the old `metadata`.
diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs
index 925e2dc8f51c..d4a13a134597 100644
--- a/src/mito2/src/worker/handle_open.rs
+++ b/src/mito2/src/worker/handle_open.rs
@@ -22,6 +22,7 @@ use snafu::{OptionExt, ResultExt};
use store_api::logstore::LogStore;
use store_api::region_request::RegionOpenRequest;
use store_api::storage::RegionId;
+use table::requests::STORAGE_KEY;
use crate::error::{
ObjectStoreNotFoundSnafu, OpenDalSnafu, OpenRegionSnafu, RegionNotFoundSnafu, Result,
@@ -38,7 +39,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
region_id: RegionId,
request: &RegionOpenRequest,
) -> Result<()> {
- let object_store = if let Some(storage_name) = request.options.get("storage") {
+ let object_store = if let Some(storage_name) = request.options.get(STORAGE_KEY) {
self.object_store_manager
.find(storage_name)
.context(ObjectStoreNotFoundSnafu {
diff --git a/src/operator/src/expr_factory.rs b/src/operator/src/expr_factory.rs
index e9be2e712fab..499441603f1b 100644
--- a/src/operator/src/expr_factory.rs
+++ b/src/operator/src/expr_factory.rs
@@ -18,9 +18,9 @@ use api::helper::ColumnDataTypeWrapper;
use api::v1::alter_expr::Kind;
use api::v1::column_def::options_from_column_schema;
use api::v1::{
- AddColumn, AddColumns, AlterExpr, ChangeColumnType, ChangeColumnTypes, ColumnDataType,
- ColumnDataTypeExtension, CreateFlowExpr, CreateTableExpr, CreateViewExpr, DropColumn,
- DropColumns, ExpireAfter, RenameTable, SemanticType, TableName,
+ AddColumn, AddColumns, AlterExpr, ChangeColumnType, ChangeColumnTypes, ChangeTableOptions,
+ ColumnDataType, ColumnDataTypeExtension, CreateFlowExpr, CreateTableExpr, CreateViewExpr,
+ DropColumn, DropColumns, ExpireAfter, RenameTable, SemanticType, TableName,
};
use common_error::ext::BoxedError;
use common_grpc_expr::util::ColumnExpr;
@@ -438,7 +438,7 @@ pub(crate) fn to_alter_expr(
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
- let kind = match alter_table.alter_operation() {
+ let kind = match alter_table.alter_operation {
AlterTableOperation::AddConstraint(_) => {
return NotSupportedSnafu {
feat: "ADD CONSTRAINT",
@@ -451,7 +451,7 @@ pub(crate) fn to_alter_expr(
} => Kind::AddColumns(AddColumns {
add_columns: vec![AddColumn {
column_def: Some(
- sql_column_def_to_grpc_column_def(column_def, Some(&query_ctx.timezone()))
+ sql_column_def_to_grpc_column_def(&column_def, Some(&query_ctx.timezone()))
.map_err(BoxedError::new)
.context(ExternalSnafu)?,
),
@@ -463,13 +463,13 @@ pub(crate) fn to_alter_expr(
target_type,
} => {
let target_type =
- sql_data_type_to_concrete_data_type(target_type).context(ParseSqlSnafu)?;
+ sql_data_type_to_concrete_data_type(&target_type).context(ParseSqlSnafu)?;
let (target_type, target_type_extension) = ColumnDataTypeWrapper::try_from(target_type)
.map(|w| w.to_parts())
.context(ColumnDataTypeSnafu)?;
Kind::ChangeColumnTypes(ChangeColumnTypes {
change_column_types: vec![ChangeColumnType {
- column_name: column_name.value.to_string(),
+ column_name: column_name.value,
target_type: target_type as i32,
target_type_extension,
}],
@@ -483,6 +483,11 @@ pub(crate) fn to_alter_expr(
AlterTableOperation::RenameTable { new_table_name } => Kind::RenameTable(RenameTable {
new_table_name: new_table_name.to_string(),
}),
+ AlterTableOperation::ChangeTableOptions { options } => {
+ Kind::ChangeTableOptions(ChangeTableOptions {
+ change_table_options: options.into_iter().map(Into::into).collect(),
+ })
+ }
};
Ok(AlterExpr {
@@ -744,7 +749,7 @@ mod tests {
#[test]
fn test_to_alter_change_column_type_expr() {
- let sql = "ALTER TABLE monitor MODIFY mem_usage STRING;";
+ let sql = "ALTER TABLE monitor MODIFY COLUMN mem_usage STRING;";
let stmt =
ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
.unwrap()
diff --git a/src/sql/src/parsers/alter_parser.rs b/src/sql/src/parsers/alter_parser.rs
index b6db219626b3..eb231e6df40b 100644
--- a/src/sql/src/parsers/alter_parser.rs
+++ b/src/sql/src/parsers/alter_parser.rs
@@ -15,12 +15,12 @@
use common_query::AddColumnLocation;
use snafu::ResultExt;
use sqlparser::keywords::Keyword;
-use sqlparser::parser::ParserError;
+use sqlparser::parser::{Parser, ParserError};
use sqlparser::tokenizer::Token;
use crate::error::{self, Result};
use crate::parser::ParserContext;
-use crate::statements::alter::{AlterTable, AlterTableOperation};
+use crate::statements::alter::{AlterTable, AlterTableOperation, ChangeTableOption};
use crate::statements::statement::Statement;
impl ParserContext<'_> {
@@ -94,6 +94,14 @@ impl ParserContext<'_> {
}
};
AlterTableOperation::RenameTable { new_table_name }
+ } else if self.parser.parse_keyword(Keyword::SET) {
+ let options = self
+ .parser
+ .parse_comma_separated(parse_string_options)?
+ .into_iter()
+ .map(|(key, value)| ChangeTableOption { key, value })
+ .collect();
+ AlterTableOperation::ChangeTableOptions { options }
} else {
return Err(ParserError::ParserError(format!(
"expect keyword ADD or DROP or MODIFY or RENAME after ALTER TABLE, found {}",
@@ -104,6 +112,22 @@ impl ParserContext<'_> {
}
}
+fn parse_string_options(parser: &mut Parser) -> std::result::Result<(String, String), ParserError> {
+ let name = parser.parse_literal_string()?;
+ parser.expect_token(&Token::Eq)?;
+ let value = if parser.parse_keyword(Keyword::NULL) {
+ "".to_string()
+ } else if let Ok(v) = parser.parse_literal_string() {
+ v
+ } else {
+ return Err(ParserError::ParserError(format!(
+ "Unexpected option value for alter table statements, expect string literal or NULL, got: `{}`",
+ parser.next_token()
+ )));
+ };
+ Ok((name, value))
+}
+
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
@@ -272,7 +296,7 @@ mod tests {
)
.unwrap();
- let sql_2 = "ALTER TABLE my_metric_1 MODIFY a STRING";
+ let sql_2 = "ALTER TABLE my_metric_1 MODIFY COLUMN a STRING";
let mut result_2 = ParserContext::create_with_dialect(
sql_2,
&GreptimeDbDialect {},
@@ -406,4 +430,44 @@ mod tests {
_ => unreachable!(),
}
}
+
+ fn check_parse_alter_table(sql: &str, expected: &[(&str, &str)]) {
+ let result =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, result.len());
+ let Statement::Alter(alter) = &result[0] else {
+ unreachable!()
+ };
+ assert_eq!("test_table", alter.table_name.0[0].value);
+ let AlterTableOperation::ChangeTableOptions { options } = &alter.alter_operation else {
+ unreachable!()
+ };
+ let res = options
+ .iter()
+ .map(|o| (o.key.as_str(), o.value.as_str()))
+ .collect::<Vec<_>>();
+ assert_eq!(expected, &res);
+ }
+
+ #[test]
+ fn test_parse_alter_column() {
+ check_parse_alter_table("ALTER TABLE test_table SET 'a'='A';", &[("a", "A")]);
+ check_parse_alter_table(
+ "ALTER TABLE test_table SET 'a'='A','b'='B'",
+ &[("a", "A"), ("b", "B")],
+ );
+ check_parse_alter_table(
+ "ALTER TABLE test_table SET 'a'='A','b'='B','c'='C';",
+ &[("a", "A"), ("b", "B"), ("c", "C")],
+ );
+ check_parse_alter_table("ALTER TABLE test_table SET 'a'=NULL;", &[("a", "")]);
+
+ ParserContext::create_with_dialect(
+ "ALTER TABLE test_table SET a INTEGER",
+ &GreptimeDbDialect {},
+ ParseOptions::default(),
+ )
+ .unwrap_err();
+ }
}
diff --git a/src/sql/src/statements/alter.rs b/src/sql/src/statements/alter.rs
index 5d679549a187..3270d002c8a6 100644
--- a/src/sql/src/statements/alter.rs
+++ b/src/sql/src/statements/alter.rs
@@ -14,14 +14,15 @@
use std::fmt::{Debug, Display};
+use api::v1;
use common_query::AddColumnLocation;
use sqlparser::ast::{ColumnDef, DataType, Ident, ObjectName, TableConstraint};
use sqlparser_derive::{Visit, VisitMut};
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct AlterTable {
- table_name: ObjectName,
- alter_operation: AlterTableOperation,
+ pub table_name: ObjectName,
+ pub alter_operation: AlterTableOperation,
}
impl AlterTable {
@@ -67,6 +68,8 @@ pub enum AlterTableOperation {
column_name: Ident,
target_type: DataType,
},
+ /// `MODIFY <table attrs key> = <table attr value>`
+ ChangeTableOptions { options: Vec<ChangeTableOption> },
/// `DROP COLUMN <name>`
DropColumn { name: Ident },
/// `RENAME <new_table_name>`
@@ -97,6 +100,27 @@ impl Display for AlterTableOperation {
} => {
write!(f, r#"MODIFY COLUMN {column_name} {target_type}"#)
}
+ AlterTableOperation::ChangeTableOptions { options } => {
+ for ChangeTableOption { key, value } in options {
+ write!(f, r#"MODIFY '{key}'='{value}', "#)?;
+ }
+ Ok(())
+ }
+ }
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
+pub struct ChangeTableOption {
+ pub key: String,
+ pub value: String,
+}
+
+impl From<ChangeTableOption> for v1::ChangeTableOption {
+ fn from(c: ChangeTableOption) -> Self {
+ v1::ChangeTableOption {
+ key: c.key,
+ value: c.value,
}
}
}
diff --git a/src/store-api/Cargo.toml b/src/store-api/Cargo.toml
index c3ee5c393039..2d11b31ebe45 100644
--- a/src/store-api/Cargo.toml
+++ b/src/store-api/Cargo.toml
@@ -22,6 +22,7 @@ datafusion-physical-plan.workspace = true
datatypes.workspace = true
derive_builder.workspace = true
futures.workspace = true
+humantime.workspace = true
serde.workspace = true
serde_json.workspace = true
snafu.workspace = true
diff --git a/src/store-api/src/metadata.rs b/src/store-api/src/metadata.rs
index a94879675e5b..3a08523b900d 100644
--- a/src/store-api/src/metadata.rs
+++ b/src/store-api/src/metadata.rs
@@ -523,6 +523,9 @@ impl RegionMetadataBuilder {
AlterKind::AddColumns { columns } => self.add_columns(columns)?,
AlterKind::DropColumns { names } => self.drop_columns(&names),
AlterKind::ChangeColumnTypes { columns } => self.change_column_types(columns),
+ AlterKind::ChangeRegionOptions { options: _ } => {
+ // nothing to be done with RegionMetadata
+ }
}
Ok(self)
}
@@ -738,6 +741,14 @@ pub enum MetadataError {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Invalid region option change request, key: {}, value: {}", key, value))]
+ InvalidRegionOptionChangeRequest {
+ key: String,
+ value: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
impl ErrorExt for MetadataError {
diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs
index 2453b833402c..867f23175445 100644
--- a/src/store-api/src/region_request.rs
+++ b/src/store-api/src/region_request.rs
@@ -14,25 +14,29 @@
use std::collections::HashMap;
use std::fmt;
+use std::time::Duration;
use api::helper::ColumnDataTypeWrapper;
use api::v1::add_column_location::LocationType;
+use api::v1::region::alter_request::Kind;
use api::v1::region::{
alter_request, compact_request, region_request, AlterRequest, AlterRequests, CloseRequest,
CompactRequest, CreateRequest, CreateRequests, DeleteRequests, DropRequest, DropRequests,
FlushRequest, InsertRequests, OpenRequest, TruncateRequest,
};
-use api::v1::{self, Rows, SemanticType};
+use api::v1::{self, ChangeTableOption, Rows, SemanticType};
pub use common_base::AffectedRows;
use datatypes::data_type::ConcreteDataType;
+use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt};
use strum::IntoStaticStr;
use crate::logstore::entry;
use crate::metadata::{
- ColumnMetadata, InvalidRawRegionRequestSnafu, InvalidRegionRequestSnafu, MetadataError,
- RegionMetadata, Result,
+ ColumnMetadata, InvalidRawRegionRequestSnafu, InvalidRegionOptionChangeRequestSnafu,
+ InvalidRegionRequestSnafu, MetadataError, RegionMetadata, Result,
};
+use crate::mito_engine_options::TTL_KEY;
use crate::path_utils::region_dir;
use crate::storage::{ColumnId, RegionId, ScanRequest};
@@ -389,6 +393,8 @@ pub enum AlterKind {
/// Columns to change.
columns: Vec<ChangeColumnType>,
},
+ /// Change region options.
+ ChangeRegionOptions { options: Vec<ChangeOption> },
}
impl AlterKind {
@@ -412,6 +418,7 @@ impl AlterKind {
col_to_change.validate(metadata)?;
}
}
+ AlterKind::ChangeRegionOptions { .. } => {}
}
Ok(())
}
@@ -429,6 +436,11 @@ impl AlterKind {
AlterKind::ChangeColumnTypes { columns } => columns
.iter()
.any(|col_to_change| col_to_change.need_alter(metadata)),
+ AlterKind::ChangeRegionOptions { .. } => {
+ // we need to update region options for `ChangeTableOptions`.
+ // todo: we need to check if ttl has ever changed.
+ true
+ }
}
}
@@ -473,6 +485,13 @@ impl TryFrom<alter_request::Kind> for AlterKind {
let names = x.drop_columns.into_iter().map(|x| x.name).collect();
AlterKind::DropColumns { names }
}
+ Kind::ChangeTableOptions(change_options) => AlterKind::ChangeRegionOptions {
+ options: change_options
+ .change_table_options
+ .iter()
+ .map(TryFrom::try_from)
+ .collect::<Result<Vec<_>>>()?,
+ },
};
Ok(alter_kind)
@@ -639,6 +658,30 @@ impl From<v1::ChangeColumnType> for ChangeColumnType {
}
}
+#[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
+pub enum ChangeOption {
+ TTL(Duration),
+}
+
+impl TryFrom<&ChangeTableOption> for ChangeOption {
+ type Error = MetadataError;
+
+ fn try_from(value: &ChangeTableOption) -> std::result::Result<Self, Self::Error> {
+ let ChangeTableOption { key, value } = value;
+ if key == TTL_KEY {
+ let ttl = if value.is_empty() {
+ Duration::from_secs(0)
+ } else {
+ humantime::parse_duration(value)
+ .map_err(|_| InvalidRegionOptionChangeRequestSnafu { key, value }.build())?
+ };
+ Ok(Self::TTL(ttl))
+ } else {
+ InvalidRegionOptionChangeRequestSnafu { key, value }.fail()
+ }
+ }
+}
+
#[derive(Debug, Clone, Default)]
pub struct RegionFlushRequest {
pub row_group_size: Option<usize>,
diff --git a/src/table/src/error.rs b/src/table/src/error.rs
index 9b633bb5bb98..e6ea1e19325f 100644
--- a/src/table/src/error.rs
+++ b/src/table/src/error.rs
@@ -137,6 +137,9 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Table options value is not valid, key: `{}`, value: `{}`", key, value))]
+ InvalidTableOptionValue { key: String, value: String },
}
impl ErrorExt for Error {
@@ -157,6 +160,7 @@ impl ErrorExt for Error {
Error::Unsupported { .. } => StatusCode::Unsupported,
Error::ParseTableOption { .. } => StatusCode::InvalidArguments,
Error::MissingTimeIndexColumn { .. } => StatusCode::IllegalState,
+ Error::InvalidTableOptionValue { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index ec9404a8b1f1..3e7521750866 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -24,6 +24,7 @@ use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRe
use derive_builder::Builder;
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
+use store_api::region_request::ChangeOption;
use store_api::storage::{ColumnDescriptor, ColumnDescriptorBuilder, ColumnId, RegionId};
use crate::error::{self, Result};
@@ -209,9 +210,35 @@ impl TableMeta {
.next_column_id(self.next_column_id);
Ok(meta_builder)
}
+ AlterKind::ChangeTableOptions { options } => self.change_table_options(options),
}
}
+ /// Creates a [TableMetaBuilder] with modified table options.
+ fn change_table_options(&self, requests: &[ChangeOption]) -> Result<TableMetaBuilder> {
+ let mut new_options = self.options.clone();
+
+ for request in requests {
+ match request {
+ ChangeOption::TTL(new_ttl) => {
+ if new_ttl.is_zero() {
+ new_options.ttl = None;
+ } else {
+ new_options.ttl = Some(*new_ttl);
+ }
+ }
+ }
+ }
+ let mut builder = TableMetaBuilder::default();
+ builder
+ .options(new_options)
+ .schema(self.schema.clone())
+ .primary_key_indices(self.primary_key_indices.clone())
+ .engine(self.engine.clone())
+ .next_column_id(self.next_column_id);
+ Ok(builder)
+ }
+
/// Allocate a new column for the table.
///
/// This method would bump the `next_column_id` of the meta.
@@ -823,6 +850,13 @@ impl RawTableInfo {
self.meta.primary_key_indices = primary_key_indices;
self.meta.value_indices = value_indices;
}
+
+ /// Extracts region options from table info.
+ ///
+ /// All "region options" are actually a copy of table options for redundancy.
+ pub fn to_region_options(&self) -> HashMap<String, String> {
+ HashMap::from(&self.meta.options)
+ }
}
impl From<TableInfo> for RawTableInfo {
@@ -857,7 +891,6 @@ impl TryFrom<RawTableInfo> for TableInfo {
#[cfg(test)]
mod tests {
-
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use datatypes::data_type::ConcreteDataType;
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index 2aebc47055f0..a4fcce7bcdcf 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -30,6 +30,7 @@ use greptime_proto::v1::region::compact_request;
use serde::{Deserialize, Serialize};
use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, PHYSICAL_TABLE_METADATA_KEY};
use store_api::mito_engine_options::is_mito_engine_option_key;
+use store_api::region_request::ChangeOption;
use crate::error::{ParseTableOptionSnafu, Result};
use crate::metadata::{TableId, TableVersion};
@@ -80,7 +81,7 @@ pub struct TableOptions {
}
pub const WRITE_BUFFER_SIZE_KEY: &str = "write_buffer_size";
-pub const TTL_KEY: &str = "ttl";
+pub const TTL_KEY: &str = store_api::mito_engine_options::TTL_KEY;
pub const STORAGE_KEY: &str = "storage";
pub const COMMENT_KEY: &str = "comment";
pub const AUTO_CREATE_TABLE_KEY: &str = "auto_create_table";
@@ -212,8 +213,35 @@ pub enum AlterKind {
RenameTable {
new_table_name: String,
},
+ ChangeTableOptions {
+ options: Vec<ChangeOption>,
+ },
}
+// #[derive(Debug, Clone, Serialize, Deserialize)]
+// pub enum ChangeTableOptionRequest {
+// TTL(Duration),
+// }
+
+// impl TryFrom<&ChangeTableOption> for ChangeTableOptionRequest {
+// type Error = Error;
+//
+// fn try_from(value: &ChangeTableOption) -> std::result::Result<Self, Self::Error> {
+// let ChangeTableOption { key, value } = value;
+// if key == TTL_KEY {
+// let ttl = if value.is_empty() {
+// Duration::from_secs(0)
+// } else {
+// humantime::parse_duration(value)
+// .map_err(|_| error::InvalidTableOptionValueSnafu { key, value }.build())?
+// };
+// Ok(Self::TTL(ttl))
+// } else {
+// UnsupportedTableOptionChangeSnafu { key }.fail()
+// }
+// }
+// }
+
#[derive(Debug)]
pub struct InsertRequest {
pub catalog_name: String,
diff --git a/tests/cases/standalone/common/alter/alter_table_options.result b/tests/cases/standalone/common/alter/alter_table_options.result
new file mode 100644
index 000000000000..13daf0f89b23
--- /dev/null
+++ b/tests/cases/standalone/common/alter/alter_table_options.result
@@ -0,0 +1,88 @@
+CREATE TABLE ato(i INTEGER, j TIMESTAMP TIME INDEX);
+
+Affected Rows: 0
+
+ALTER TABLE ato SET 'ttl'='1d';
+
+Affected Rows: 0
+
+SHOW CREATE TABLE ato;
+
++-------+------------------------------------+
+| Table | Create Table |
++-------+------------------------------------+
+| ato | CREATE TABLE IF NOT EXISTS "ato" ( |
+| | "i" INT NULL, |
+| | "j" TIMESTAMP(3) NOT NULL, |
+| | TIME INDEX ("j") |
+| | ) |
+| | |
+| | ENGINE=mito |
+| | WITH( |
+| | ttl = '1day' |
+| | ) |
++-------+------------------------------------+
+
+ALTER TABLE ato SET 'ttl'='2d';
+
+Affected Rows: 0
+
+SHOW CREATE TABLE ato;
+
++-------+------------------------------------+
+| Table | Create Table |
++-------+------------------------------------+
+| ato | CREATE TABLE IF NOT EXISTS "ato" ( |
+| | "i" INT NULL, |
+| | "j" TIMESTAMP(3) NOT NULL, |
+| | TIME INDEX ("j") |
+| | ) |
+| | |
+| | ENGINE=mito |
+| | WITH( |
+| | ttl = '2days' |
+| | ) |
++-------+------------------------------------+
+
+ALTER TABLE ato SET 'ttl'=NULL;
+
+Affected Rows: 0
+
+SHOW CREATE TABLE ato;
+
++-------+------------------------------------+
+| Table | Create Table |
++-------+------------------------------------+
+| ato | CREATE TABLE IF NOT EXISTS "ato" ( |
+| | "i" INT NULL, |
+| | "j" TIMESTAMP(3) NOT NULL, |
+| | TIME INDEX ("j") |
+| | ) |
+| | |
+| | ENGINE=mito |
+| | |
++-------+------------------------------------+
+
+ALTER TABLE ato SET 'ttl'='0d';
+
+Affected Rows: 0
+
+SHOW CREATE TABLE ato;
+
++-------+------------------------------------+
+| Table | Create Table |
++-------+------------------------------------+
+| ato | CREATE TABLE IF NOT EXISTS "ato" ( |
+| | "i" INT NULL, |
+| | "j" TIMESTAMP(3) NOT NULL, |
+| | TIME INDEX ("j") |
+| | ) |
+| | |
+| | ENGINE=mito |
+| | |
++-------+------------------------------------+
+
+DROP TABLE ato;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/alter/alter_table_options.sql b/tests/cases/standalone/common/alter/alter_table_options.sql
new file mode 100644
index 000000000000..4b30a8c28f1d
--- /dev/null
+++ b/tests/cases/standalone/common/alter/alter_table_options.sql
@@ -0,0 +1,19 @@
+CREATE TABLE ato(i INTEGER, j TIMESTAMP TIME INDEX);
+
+ALTER TABLE ato SET 'ttl'='1d';
+
+SHOW CREATE TABLE ato;
+
+ALTER TABLE ato SET 'ttl'='2d';
+
+SHOW CREATE TABLE ato;
+
+ALTER TABLE ato SET 'ttl'=NULL;
+
+SHOW CREATE TABLE ato;
+
+ALTER TABLE ato SET 'ttl'='0d';
+
+SHOW CREATE TABLE ato;
+
+DROP TABLE ato;
\ No newline at end of file
diff --git a/tests/cases/standalone/common/alter/change_col_type.result b/tests/cases/standalone/common/alter/change_col_type.result
index f0a641d28bc2..aa992a292c66 100644
--- a/tests/cases/standalone/common/alter/change_col_type.result
+++ b/tests/cases/standalone/common/alter/change_col_type.result
@@ -6,23 +6,23 @@ INSERT INTO test VALUES (1, 1, 1, false), (2, 2, 2, true);
Affected Rows: 2
-ALTER TABLE test MODIFY "I" STRING;
+ALTER TABLE test MODIFY COLUMN "I" STRING;
Error: 4002(TableColumnNotFound), Column I not exists in table test
-ALTER TABLE test MODIFY k DATE;
+ALTER TABLE test MODIFY COLUMN k DATE;
Error: 1004(InvalidArguments), Invalid alter table(test) request: column 'k' cannot be cast automatically to type 'Date'
-ALTER TABLE test MODIFY id STRING;
+ALTER TABLE test MODIFY COLUMN id STRING;
Error: 1004(InvalidArguments), Invalid alter table(test) request: Not allowed to change primary key index column 'id'
-ALTER TABLE test MODIFY j STRING;
+ALTER TABLE test MODIFY COLUMN j STRING;
Error: 1004(InvalidArguments), Invalid alter table(test) request: Not allowed to change timestamp index column 'j' datatype
-ALTER TABLE test MODIFY I STRING;
+ALTER TABLE test MODIFY COLUMN I STRING;
Affected Rows: 0
@@ -61,7 +61,7 @@ DESCRIBE test;
| k | Boolean | | YES | | FIELD |
+--------+----------------------+-----+------+---------+---------------+
-ALTER TABLE test MODIFY I INTEGER;
+ALTER TABLE test MODIFY COLUMN I INTEGER;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/alter/change_col_type.sql b/tests/cases/standalone/common/alter/change_col_type.sql
index 0fe8c28e9b90..3282385c875a 100644
--- a/tests/cases/standalone/common/alter/change_col_type.sql
+++ b/tests/cases/standalone/common/alter/change_col_type.sql
@@ -2,15 +2,15 @@ CREATE TABLE test(id INTEGER PRIMARY KEY, i INTEGER NULL, j TIMESTAMP TIME INDEX
INSERT INTO test VALUES (1, 1, 1, false), (2, 2, 2, true);
-ALTER TABLE test MODIFY "I" STRING;
+ALTER TABLE test MODIFY COLUMN "I" STRING;
-ALTER TABLE test MODIFY k DATE;
+ALTER TABLE test MODIFY COLUMN k DATE;
-ALTER TABLE test MODIFY id STRING;
+ALTER TABLE test MODIFY COLUMN id STRING;
-ALTER TABLE test MODIFY j STRING;
+ALTER TABLE test MODIFY COLUMN j STRING;
-ALTER TABLE test MODIFY I STRING;
+ALTER TABLE test MODIFY COLUMN I STRING;
SELECT * FROM test;
@@ -21,7 +21,7 @@ SELECT * FROM test;
DESCRIBE test;
-ALTER TABLE test MODIFY I INTEGER;
+ALTER TABLE test MODIFY COLUMN I INTEGER;
-- SQLNESS SORT_RESULT 3 1
SELECT * FROM test;
diff --git a/tests/cases/standalone/common/alter/change_col_type_not_null.result b/tests/cases/standalone/common/alter/change_col_type_not_null.result
index 79f03c9cb023..497fb5dca606 100644
--- a/tests/cases/standalone/common/alter/change_col_type_not_null.result
+++ b/tests/cases/standalone/common/alter/change_col_type_not_null.result
@@ -15,7 +15,7 @@ SELECT * FROM test;
| 1970-01-01T00:00:00.002 | 2 |
+-------------------------+---+
-ALTER TABLE test MODIFY j STRING;
+ALTER TABLE test MODIFY COLUMN j STRING;
Error: 1004(InvalidArguments), Invalid alter table(test) request: column 'j' must be nullable to ensure safe conversion.
diff --git a/tests/cases/standalone/common/alter/change_col_type_not_null.sql b/tests/cases/standalone/common/alter/change_col_type_not_null.sql
index c91ae44a2c14..5d2e6f954bc1 100644
--- a/tests/cases/standalone/common/alter/change_col_type_not_null.sql
+++ b/tests/cases/standalone/common/alter/change_col_type_not_null.sql
@@ -4,7 +4,7 @@ INSERT INTO test VALUES (1, 1), (2, 2);
SELECT * FROM test;
-ALTER TABLE test MODIFY j STRING;
+ALTER TABLE test MODIFY COLUMN j STRING;
SELECT * FROM test;
|
feat
|
Support altering table TTL (#4848)
|
ef5d1a6a653c259ec3cd7703a30b339477d32073
|
2024-07-02 17:26:21
|
zyy17
|
ci: update centos yum source and specify cargo-binstall version (#4248)
| false
|
diff --git a/docker/ci/centos/Dockerfile b/docker/ci/centos/Dockerfile
index 1ca6f32acb3b..8cddcfc53963 100644
--- a/docker/ci/centos/Dockerfile
+++ b/docker/ci/centos/Dockerfile
@@ -1,5 +1,9 @@
FROM centos:7
+# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
+RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
+RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
+
RUN yum install -y epel-release \
openssl \
openssl-devel \
diff --git a/docker/dev-builder/centos/Dockerfile b/docker/dev-builder/centos/Dockerfile
index ded906fbede5..af7e778e03d4 100644
--- a/docker/dev-builder/centos/Dockerfile
+++ b/docker/dev-builder/centos/Dockerfile
@@ -2,6 +2,10 @@ FROM centos:7 as builder
ENV LANG en_US.utf8
+# Note: CentOS 7 has reached EOL since 2024-07-01 thus `mirror.centos.org` is no longer available and we need to use `vault.centos.org` instead.
+RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
+RUN sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
+
# Install dependencies
RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
RUN yum install -y epel-release \
@@ -25,6 +29,10 @@ ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN}
+
+# Install cargo-binstall with a specific version to adapt the current rust toolchain.
+# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
+RUN cargo install cargo-binstall --version 1.6.6 --locked
+
# Install nextest.
-RUN cargo install cargo-binstall --locked
RUN cargo binstall cargo-nextest --no-confirm
diff --git a/docker/dev-builder/ubuntu/Dockerfile b/docker/dev-builder/ubuntu/Dockerfile
index 1e0a902eea47..3f76d80eff2c 100644
--- a/docker/dev-builder/ubuntu/Dockerfile
+++ b/docker/dev-builder/ubuntu/Dockerfile
@@ -55,6 +55,9 @@ ENV PATH /root/.cargo/bin/:$PATH
ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN}
+# Install cargo-binstall with a specific version to adapt the current rust toolchain.
+# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
+RUN cargo install cargo-binstall --version 1.6.6 --locked
+
# Install nextest.
-RUN cargo install cargo-binstall --locked
RUN cargo binstall cargo-nextest --no-confirm
diff --git a/docker/dev-builder/ubuntu/Dockerfile-18.10 b/docker/dev-builder/ubuntu/Dockerfile-18.10
index 73d99415ed35..1e3357be810b 100644
--- a/docker/dev-builder/ubuntu/Dockerfile-18.10
+++ b/docker/dev-builder/ubuntu/Dockerfile-18.10
@@ -43,6 +43,9 @@ ENV PATH /root/.cargo/bin/:$PATH
ARG RUST_TOOLCHAIN
RUN rustup toolchain install ${RUST_TOOLCHAIN}
+# Install cargo-binstall with a specific version to adapt the current rust toolchain.
+# Note: if we use the latest version, we may encounter the following `use of unstable library feature 'io_error_downcast'` error.
+RUN cargo install cargo-binstall --version 1.6.6 --locked
+
# Install nextest.
-RUN cargo install cargo-binstall --locked
RUN cargo binstall cargo-nextest --no-confirm
|
ci
|
update centos yum source and specify cargo-binstall version (#4248)
|
c09775d17f7449e5ef105cb09f46a247ba4d8f02
|
2022-11-23 09:10:03
|
dennis zhuang
|
feat: adds metrics, tracing and retry layer to object-store (#621)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 21c57b60edc7..7b2f738a9732 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1789,6 +1789,7 @@ dependencies = [
"axum 0.6.0-rc.2",
"axum-macros",
"axum-test-helper",
+ "backon",
"catalog",
"client",
"common-base",
@@ -3665,6 +3666,7 @@ dependencies = [
"http",
"log",
"md-5",
+ "metrics",
"once_cell",
"parking_lot",
"percent-encoding",
@@ -3677,6 +3679,7 @@ dependencies = [
"thiserror",
"time 0.3.14",
"tokio",
+ "tracing",
"ureq",
]
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index c673947a47a3..f96ec5597759 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -13,6 +13,7 @@ api = { path = "../api" }
async-trait = "0.1"
axum = "0.6.0-rc.2"
axum-macros = "0.3.0-rc.1"
+backon = "0.2"
catalog = { path = "../catalog" }
common-base = { path = "../common/base" }
common-catalog = { path = "../common/catalog" }
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 4646b525e5d2..dd94c9afc1ac 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -16,6 +16,7 @@ use std::sync::Arc;
use std::time::Duration;
use std::{fs, path};
+use backon::ExponentialBackoff;
use catalog::remote::MetaKvBackend;
use catalog::CatalogManagerRef;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
@@ -26,7 +27,7 @@ use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_client::MetaClientOpts;
use mito::config::EngineConfig as TableEngineConfig;
use mito::engine::MitoEngine;
-use object_store::layers::LoggingLayer;
+use object_store::layers::{LoggingLayer, MetricsLayer, RetryLayer, TracingLayer};
use object_store::services::fs::Builder;
use object_store::{util, ObjectStore};
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
@@ -189,7 +190,15 @@ pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result
.build()
.context(error::InitBackendSnafu { dir: &data_dir })?;
- let object_store = ObjectStore::new(accessor).layer(LoggingLayer); // Add logging
+ let object_store = ObjectStore::new(accessor)
+ // Add retry
+ .layer(RetryLayer::new(ExponentialBackoff::default().with_jitter()))
+ // Add metrics
+ .layer(MetricsLayer)
+ // Add logging
+ .layer(LoggingLayer)
+ // Add tracing
+ .layer(TracingLayer);
Ok(object_store)
}
diff --git a/src/object-store/Cargo.toml b/src/object-store/Cargo.toml
index e7e63109e132..c85ae8c1729d 100644
--- a/src/object-store/Cargo.toml
+++ b/src/object-store/Cargo.toml
@@ -6,7 +6,7 @@ license = "Apache-2.0"
[dependencies]
futures = { version = "0.3" }
-opendal = "0.20"
+opendal = { version = "0.20", features = ["layers-tracing", "layers-metrics"]}
tokio = { version = "1.0", features = ["full"] }
[dev-dependencies]
|
feat
|
adds metrics, tracing and retry layer to object-store (#621)
|
e9f7579091bee9e5b4fdf264219d2e58bc06149b
|
2023-11-06 16:48:47
|
Niwaka
|
feat: support region ddl for custom_storage (#2679)
| false
|
diff --git a/src/mito2/src/engine/create_test.rs b/src/mito2/src/engine/create_test.rs
index bfe9af8cbbdd..eb1cb7169013 100644
--- a/src/mito2/src/engine/create_test.rs
+++ b/src/mito2/src/engine/create_test.rs
@@ -166,3 +166,35 @@ async fn test_engine_create_with_options() {
region.version().options.ttl.unwrap()
);
}
+
+#[tokio::test]
+async fn test_engine_create_with_custom_store() {
+ let mut env = TestEnv::new();
+ let engine = env
+ .create_engine_with_multiple_object_stores(MitoConfig::default(), None, None, &["Gcs"])
+ .await;
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new()
+ .insert_option("storage", "Gcs")
+ .build();
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+ assert!(engine.is_region_exists(region_id));
+ let region = engine.get_region(region_id).unwrap();
+ let region_dir = region.access_layer.region_dir();
+
+ let object_store_manager = env.get_object_store_manager().unwrap();
+ assert!(object_store_manager
+ .find("Gcs")
+ .unwrap()
+ .is_exist(region_dir)
+ .await
+ .unwrap());
+ assert!(!object_store_manager
+ .default_object_store()
+ .is_exist(region_dir)
+ .await
+ .unwrap());
+}
diff --git a/src/mito2/src/engine/drop_test.rs b/src/mito2/src/engine/drop_test.rs
index 0ffbddca65a7..c4a4790cb62f 100644
--- a/src/mito2/src/engine/drop_test.rs
+++ b/src/mito2/src/engine/drop_test.rs
@@ -23,6 +23,7 @@ use store_api::storage::RegionId;
use crate::config::MitoConfig;
use crate::engine::listener::DropListener;
+use crate::engine::MitoEngine;
use crate::test_util::{
build_rows_for_key, flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv,
};
@@ -82,3 +83,84 @@ async fn test_engine_drop_region() {
let object_store = env.get_object_store().unwrap();
assert!(!object_store.is_exist(®ion_dir).await.unwrap());
}
+
+#[tokio::test]
+async fn test_engine_drop_region_for_custom_store() {
+ common_telemetry::init_default_ut_logging();
+ async fn setup(engine: &MitoEngine, region_id: RegionId, storage_name: &str) {
+ let request = CreateRequestBuilder::new()
+ .insert_option("storage", storage_name)
+ .region_dir(storage_name)
+ .build();
+ let column_schema = rows_schema(&request);
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+ let rows = Rows {
+ schema: column_schema.clone(),
+ rows: build_rows_for_key("a", 0, 2, 0),
+ };
+ put_rows(engine, region_id, rows).await;
+ flush_region(engine, region_id, None).await;
+ }
+ let mut env = TestEnv::with_prefix("drop");
+ let listener = Arc::new(DropListener::new(Duration::from_millis(100)));
+ let engine = env
+ .create_engine_with_multiple_object_stores(
+ MitoConfig::default(),
+ None,
+ Some(listener.clone()),
+ &["Gcs"],
+ )
+ .await;
+ let object_store_manager = env.get_object_store_manager().unwrap();
+
+ let global_region_id = RegionId::new(1, 1);
+ setup(&engine, global_region_id, "default").await;
+ let custom_region_id = RegionId::new(2, 1);
+ setup(&engine, custom_region_id, "Gcs").await;
+
+ let global_region = engine.get_region(global_region_id).unwrap();
+ let global_region_dir = global_region.access_layer.region_dir().to_string();
+
+ let custom_region = engine.get_region(custom_region_id).unwrap();
+ let custom_region_dir = custom_region.access_layer.region_dir().to_string();
+
+ // Both these regions should exist before dropping the custom region.
+ assert!(object_store_manager
+ .find("Gcs")
+ .unwrap()
+ .is_exist(&custom_region_dir)
+ .await
+ .unwrap());
+ assert!(object_store_manager
+ .find("default")
+ .unwrap()
+ .is_exist(&global_region_dir)
+ .await
+ .unwrap());
+
+ // Drop the custom region.
+ engine
+ .handle_request(custom_region_id, RegionRequest::Drop(RegionDropRequest {}))
+ .await
+ .unwrap();
+ assert!(!engine.is_region_exists(custom_region_id));
+
+ // Wait for drop task.
+ listener.wait().await;
+
+ assert!(!object_store_manager
+ .find("Gcs")
+ .unwrap()
+ .is_exist(&custom_region_dir)
+ .await
+ .unwrap());
+ assert!(object_store_manager
+ .find("default")
+ .unwrap()
+ .is_exist(&global_region_dir)
+ .await
+ .unwrap());
+}
diff --git a/src/mito2/src/engine/open_test.rs b/src/mito2/src/engine/open_test.rs
index 74cc1e0df810..39c703c5c7a5 100644
--- a/src/mito2/src/engine/open_test.rs
+++ b/src/mito2/src/engine/open_test.rs
@@ -172,3 +172,56 @@ async fn test_engine_region_open_with_options() {
region.version().options.ttl.unwrap()
);
}
+
+#[tokio::test]
+async fn test_engine_region_open_with_custom_store() {
+ let mut env = TestEnv::new();
+ let engine = env
+ .create_engine_with_multiple_object_stores(MitoConfig::default(), None, None, &["Gcs"])
+ .await;
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new()
+ .insert_option("storage", "Gcs")
+ .build();
+ let region_dir = request.region_dir.clone();
+
+ // Create a custom region.
+ engine
+ .handle_request(region_id, RegionRequest::Create(request.clone()))
+ .await
+ .unwrap();
+
+ // Close the custom region.
+ engine
+ .handle_request(region_id, RegionRequest::Close(RegionCloseRequest {}))
+ .await
+ .unwrap();
+
+ // Open the custom region.
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Open(RegionOpenRequest {
+ engine: String::new(),
+ region_dir,
+ options: HashMap::from([("storage".to_string(), "Gcs".to_string())]),
+ }),
+ )
+ .await
+ .unwrap();
+
+ // The region should not be opened with the default object store.
+ let region = engine.get_region(region_id).unwrap();
+ let object_store_manager = env.get_object_store_manager().unwrap();
+ assert!(!object_store_manager
+ .default_object_store()
+ .is_exist(region.access_layer.region_dir())
+ .await
+ .unwrap());
+ assert!(object_store_manager
+ .find("Gcs")
+ .unwrap()
+ .is_exist(region.access_layer.region_dir())
+ .await
+ .unwrap());
+}
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index e6772244a045..5d76901a36c6 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -142,6 +142,12 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Object store not found: {}", object_store))]
+ ObjectStoreNotFound {
+ object_store: String,
+ location: Location,
+ },
+
#[snafu(display("Region {} is corrupted, reason: {}", region_id, reason))]
RegionCorrupted {
region_id: RegionId,
@@ -427,7 +433,8 @@ impl ErrorExt for Error {
| CreateDefault { .. }
| InvalidParquet { .. } => StatusCode::Unexpected,
RegionNotFound { .. } => StatusCode::RegionNotFound,
- InvalidScanIndex { .. }
+ ObjectStoreNotFound { .. }
+ | InvalidScanIndex { .. }
| InvalidMeta { .. }
| InvalidRequest { .. }
| FillDefault { .. }
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index a224ee1e195b..0baa8dac50b1 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -21,8 +21,8 @@ use std::sync::Arc;
use common_telemetry::{debug, error, info, warn};
use common_time::util::current_time_millis;
use futures::StreamExt;
+use object_store::manager::ObjectStoreManagerRef;
use object_store::util::{join_dir, normalize_dir};
-use object_store::ObjectStore;
use snafu::{ensure, OptionExt};
use store_api::logstore::LogStore;
use store_api::metadata::{ColumnMetadata, RegionMetadata};
@@ -31,7 +31,7 @@ use store_api::storage::{ColumnId, RegionId};
use crate::access_layer::AccessLayer;
use crate::cache::CacheManagerRef;
use crate::config::MitoConfig;
-use crate::error::{EmptyRegionDirSnafu, RegionCorruptedSnafu, Result};
+use crate::error::{EmptyRegionDirSnafu, ObjectStoreNotFoundSnafu, RegionCorruptedSnafu, Result};
use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
use crate::memtable::MemtableBuilderRef;
use crate::region::options::RegionOptions;
@@ -48,7 +48,7 @@ pub(crate) struct RegionOpener {
region_id: RegionId,
metadata: Option<RegionMetadata>,
memtable_builder: MemtableBuilderRef,
- object_store: ObjectStore,
+ object_store_manager: ObjectStoreManagerRef,
region_dir: String,
scheduler: SchedulerRef,
options: HashMap<String, String>,
@@ -61,14 +61,14 @@ impl RegionOpener {
region_id: RegionId,
region_dir: &str,
memtable_builder: MemtableBuilderRef,
- object_store: ObjectStore,
+ object_store_manager: ObjectStoreManagerRef,
scheduler: SchedulerRef,
) -> RegionOpener {
RegionOpener {
region_id,
metadata: None,
memtable_builder,
- object_store,
+ object_store_manager,
region_dir: normalize_dir(region_dir),
scheduler,
options: HashMap::new(),
@@ -105,7 +105,6 @@ impl RegionOpener {
wal: &Wal<S>,
) -> Result<MitoRegion> {
let region_id = self.region_id;
- let options = self.manifest_options(config);
// Tries to open the region.
match self.maybe_open(config, wal).await {
@@ -136,19 +135,22 @@ impl RegionOpener {
);
}
}
+ let options = RegionOptions::try_from(&self.options)?;
+ let object_store = self.object_store(&options.storage)?.clone();
- let metadata = Arc::new(self.metadata.unwrap());
// Create a manifest manager for this region and writes regions to the manifest file.
- let manifest_manager = RegionManifestManager::new(metadata.clone(), options).await?;
+ let region_manifest_options = self.manifest_options(config, &options)?;
+ let metadata = Arc::new(self.metadata.unwrap());
+ let manifest_manager =
+ RegionManifestManager::new(metadata.clone(), region_manifest_options).await?;
let mutable = self.memtable_builder.build(&metadata);
- let options = RegionOptions::try_from(&self.options)?;
let version = VersionBuilder::new(metadata, mutable)
.options(options)
.build();
let version_control = Arc::new(VersionControl::new(version));
- let access_layer = Arc::new(AccessLayer::new(self.region_dir, self.object_store.clone()));
+ let access_layer = Arc::new(AccessLayer::new(self.region_dir, object_store));
Ok(MitoRegion {
region_id,
@@ -203,8 +205,10 @@ impl RegionOpener {
config: &MitoConfig,
wal: &Wal<S>,
) -> Result<Option<MitoRegion>> {
- let options = self.manifest_options(config);
- let Some(manifest_manager) = RegionManifestManager::open(options).await? else {
+ let region_options = RegionOptions::try_from(&self.options)?;
+ let region_manifest_options = self.manifest_options(config, ®ion_options)?;
+ let Some(manifest_manager) = RegionManifestManager::open(region_manifest_options).await?
+ else {
return Ok(None);
};
@@ -212,24 +216,21 @@ impl RegionOpener {
let metadata = manifest.metadata.clone();
let region_id = self.region_id;
- let access_layer = Arc::new(AccessLayer::new(
- self.region_dir.clone(),
- self.object_store.clone(),
- ));
+ let object_store = self.object_store(®ion_options.storage)?.clone();
+ let access_layer = Arc::new(AccessLayer::new(self.region_dir.clone(), object_store));
let file_purger = Arc::new(LocalFilePurger::new(
self.scheduler.clone(),
access_layer.clone(),
self.cache_manager.clone(),
));
let mutable = self.memtable_builder.build(&metadata);
- let options = RegionOptions::try_from(&self.options)?;
let version = VersionBuilder::new(metadata, mutable)
.add_files(file_purger.clone(), manifest.files.values().cloned())
.flushed_entry_id(manifest.flushed_entry_id)
.flushed_sequence(manifest.flushed_sequence)
.truncated_entry_id(manifest.truncated_entry_id)
.compaction_time_window(manifest.compaction_time_window)
- .options(options)
+ .options(region_options)
.build();
let flushed_entry_id = version.flushed_entry_id;
let version_control = Arc::new(VersionControl::new(version));
@@ -249,12 +250,31 @@ impl RegionOpener {
}
/// Returns a new manifest options.
- fn manifest_options(&self, config: &MitoConfig) -> RegionManifestOptions {
- RegionManifestOptions {
+ fn manifest_options(
+ &self,
+ config: &MitoConfig,
+ options: &RegionOptions,
+ ) -> Result<RegionManifestOptions> {
+ let object_store = self.object_store(&options.storage)?.clone();
+ Ok(RegionManifestOptions {
manifest_dir: new_manifest_dir(&self.region_dir),
- object_store: self.object_store.clone(),
+ object_store,
compress_type: config.manifest_compress_type,
checkpoint_distance: config.manifest_checkpoint_distance,
+ })
+ }
+
+ /// Returns an object store corresponding to `name`. If `name` is `None`, this method returns the default object store.
+ fn object_store(&self, name: &Option<String>) -> Result<&object_store::ObjectStore> {
+ if let Some(name) = name {
+ Ok(self
+ .object_store_manager
+ .find(name)
+ .context(ObjectStoreNotFoundSnafu {
+ object_store: name.to_string(),
+ })?)
+ } else {
+ Ok(self.object_store_manager.default_object_store())
}
}
}
diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs
index c8ef80ddf513..98f863168b08 100644
--- a/src/mito2/src/region/options.rs
+++ b/src/mito2/src/region/options.rs
@@ -35,6 +35,8 @@ pub struct RegionOptions {
pub ttl: Option<Duration>,
/// Compaction options.
pub compaction: CompactionOptions,
+ /// Custom storage.
+ pub storage: Option<String>,
}
impl TryFrom<&HashMap<String, String>> for RegionOptions {
@@ -54,6 +56,7 @@ impl TryFrom<&HashMap<String, String>> for RegionOptions {
Ok(RegionOptions {
ttl: options.ttl,
compaction,
+ storage: options.storage,
})
}
}
@@ -124,12 +127,16 @@ struct RegionOptionsWithoutEnum {
/// Region SST files TTL.
#[serde(with = "humantime_serde")]
ttl: Option<Duration>,
+ storage: Option<String>,
}
impl Default for RegionOptionsWithoutEnum {
fn default() -> Self {
let options = RegionOptions::default();
- RegionOptionsWithoutEnum { ttl: options.ttl }
+ RegionOptionsWithoutEnum {
+ ttl: options.ttl,
+ storage: options.storage,
+ }
}
}
@@ -181,6 +188,17 @@ mod tests {
assert_eq!(expect, options);
}
+ #[test]
+ fn test_with_storage() {
+ let map = make_map(&[("storage", "S3")]);
+ let options = RegionOptions::try_from(&map).unwrap();
+ let expect = RegionOptions {
+ storage: Some("s3".to_string()),
+ ..Default::default()
+ };
+ assert_eq!(expect, options);
+ }
+
#[test]
fn test_without_compaction_type() {
// If `compaction.type` is not provided, we ignore all compaction
@@ -222,6 +240,7 @@ mod tests {
("compaction.twcs.max_inactive_window_files", "2"),
("compaction.twcs.time_window", "2h"),
("compaction.type", "twcs"),
+ ("storage", "S3"),
]);
let options = RegionOptions::try_from(&map).unwrap();
let expect = RegionOptions {
@@ -231,6 +250,7 @@ mod tests {
max_inactive_window_files: 2,
time_window: Some(Duration::from_secs(3600 * 2)),
}),
+ storage: Some("s3".to_string()),
};
assert_eq!(expect, options);
}
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index 7d49bb2348ad..69bd22d26e1b 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -124,6 +124,10 @@ impl TestEnv {
self.data_home.path()
}
+ pub fn get_object_store_manager(&self) -> Option<Arc<ObjectStoreManager>> {
+ self.object_store_manager.clone()
+ }
+
/// Creates a new engine with specific config under this env.
pub async fn create_engine(&mut self, config: MitoConfig) -> MitoEngine {
let (log_store, object_store_manager) = self.create_log_and_object_store_manager().await;
@@ -151,6 +155,35 @@ impl TestEnv {
MitoEngine::new_for_test(config, logstore, object_store_manager, manager, listener)
}
+ pub async fn create_engine_with_multiple_object_stores(
+ &mut self,
+ config: MitoConfig,
+ manager: Option<WriteBufferManagerRef>,
+ listener: Option<EventListenerRef>,
+ custom_storage_names: &[&str],
+ ) -> MitoEngine {
+ let (logstore, mut object_store_manager) = self.create_log_and_object_store_manager().await;
+ for storage_name in custom_storage_names {
+ let data_path = self
+ .data_home
+ .path()
+ .join("data")
+ .join(storage_name)
+ .as_path()
+ .display()
+ .to_string();
+ let mut builder = Fs::default();
+ builder.root(&data_path);
+ let object_store = ObjectStore::new(builder).unwrap().finish();
+ object_store_manager.add(storage_name, object_store);
+ }
+ let logstore = Arc::new(logstore);
+ let object_store_manager = Arc::new(object_store_manager);
+ self.logstore = Some(logstore.clone());
+ self.object_store_manager = Some(object_store_manager.clone());
+ MitoEngine::new_for_test(config, logstore, object_store_manager, manager, listener)
+ }
+
/// Reopen the engine.
pub async fn reopen_engine(&mut self, engine: MitoEngine, config: MitoConfig) -> MitoEngine {
engine.stop().await.unwrap();
diff --git a/src/mito2/src/worker/handle_create.rs b/src/mito2/src/worker/handle_create.rs
index a44c82153f00..7647866494f2 100644
--- a/src/mito2/src/worker/handle_create.rs
+++ b/src/mito2/src/worker/handle_create.rs
@@ -55,13 +55,12 @@ impl<S: LogStore> RegionWorkerLoop<S> {
}
builder.primary_key(request.primary_key);
let metadata = builder.build().context(InvalidMetadataSnafu)?;
-
// Create a MitoRegion from the RegionMetadata.
let region = RegionOpener::new(
region_id,
&request.region_dir,
self.memtable_builder.clone(),
- self.object_store_manager.default_object_store().clone(),
+ self.object_store_manager.clone(),
self.scheduler.clone(),
)
.metadata(metadata)
diff --git a/src/mito2/src/worker/handle_open.rs b/src/mito2/src/worker/handle_open.rs
index e902c7896812..a90a64e395b0 100644
--- a/src/mito2/src/worker/handle_open.rs
+++ b/src/mito2/src/worker/handle_open.rs
@@ -19,12 +19,12 @@ use std::sync::Arc;
use common_query::Output;
use common_telemetry::info;
use object_store::util::join_path;
-use snafu::ResultExt;
+use snafu::{OptionExt, ResultExt};
use store_api::logstore::LogStore;
use store_api::region_request::RegionOpenRequest;
use store_api::storage::RegionId;
-use crate::error::{OpenDalSnafu, RegionNotFoundSnafu, Result};
+use crate::error::{ObjectStoreNotFoundSnafu, OpenDalSnafu, RegionNotFoundSnafu, Result};
use crate::metrics::REGION_COUNT;
use crate::region::opener::RegionOpener;
use crate::worker::handle_drop::remove_region_dir_once;
@@ -39,21 +39,23 @@ impl<S: LogStore> RegionWorkerLoop<S> {
if self.regions.is_region_exists(region_id) {
return Ok(Output::AffectedRows(0));
}
-
+ let object_store = if let Some(storage_name) = request.options.get("storage") {
+ self.object_store_manager
+ .find(storage_name)
+ .context(ObjectStoreNotFoundSnafu {
+ object_store: storage_name.to_string(),
+ })?
+ } else {
+ self.object_store_manager.default_object_store()
+ };
// Check if this region is pending drop. And clean the entire dir if so.
if !self.dropping_regions.is_region_exists(region_id)
- && self
- .object_store_manager
- .default_object_store()
+ && object_store
.is_exist(&join_path(&request.region_dir, DROPPING_MARKER_FILE))
.await
.context(OpenDalSnafu)?
{
- let result = remove_region_dir_once(
- &request.region_dir,
- self.object_store_manager.default_object_store(),
- )
- .await;
+ let result = remove_region_dir_once(&request.region_dir, object_store).await;
info!("Region {} is dropped, result: {:?}", region_id, result);
return RegionNotFoundSnafu { region_id }.fail();
}
@@ -65,7 +67,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
region_id,
&request.region_dir,
self.memtable_builder.clone(),
- self.object_store_manager.default_object_store().clone(),
+ self.object_store_manager.clone(),
self.scheduler.clone(),
)
.options(request.options)
diff --git a/src/object-store/src/manager.rs b/src/object-store/src/manager.rs
index 138e56b1d5d1..fb6d73321967 100644
--- a/src/object-store/src/manager.rs
+++ b/src/object-store/src/manager.rs
@@ -21,6 +21,7 @@ pub type ObjectStoreManagerRef = Arc<ObjectStoreManager>;
/// Manages multiple object stores so that users can configure a storage for each table.
/// This struct certainly have one default object store, and can have zero or more custom object stores.
+#[derive(Debug)]
pub struct ObjectStoreManager {
stores: HashMap<String, ObjectStore>,
default_object_store: ObjectStore,
@@ -30,19 +31,19 @@ impl ObjectStoreManager {
/// Creates a new manager from the object store used as a default one.
pub fn new(name: &str, object_store: ObjectStore) -> Self {
ObjectStoreManager {
- stores: [(name.to_string(), object_store.clone())].into(),
+ stores: [(name.to_lowercase(), object_store.clone())].into(),
default_object_store: object_store,
}
}
/// Adds an object store to the manager.
pub fn add(&mut self, name: &str, object_store: ObjectStore) {
- self.stores.insert(name.to_string(), object_store);
+ self.stores.insert(name.to_lowercase(), object_store);
}
/// Finds an object store corresponding to the name.
pub fn find(&self, name: &str) -> Option<&ObjectStore> {
- self.stores.get(name)
+ self.stores.get(&name.to_lowercase())
}
pub fn default_object_store(&self) -> &ObjectStore {
@@ -68,10 +69,12 @@ mod tests {
#[test]
fn test_manager_behavior() {
let dir = create_temp_dir("default");
- let mut manager = ObjectStoreManager::new("default", new_object_store(&dir));
+ let mut manager = ObjectStoreManager::new("Default", new_object_store(&dir));
assert!(manager.find("default").is_some());
+ assert!(manager.find("Default").is_some());
assert!(manager.find("Gcs").is_none());
+ assert!(manager.find("gcs").is_none());
let dir = create_temp_dir("default");
manager.add("Gcs", new_object_store(&dir));
@@ -79,5 +82,6 @@ mod tests {
// Should not overwrite the default object store with the new one.
assert!(manager.find("default").is_some());
assert!(manager.find("Gcs").is_some());
+ assert!(manager.find("gcs").is_some());
}
}
|
feat
|
support region ddl for custom_storage (#2679)
|
8a119aa0b291bff2446e5da45c54e0a7060e3784
|
2024-07-05 12:24:23
|
Weny Xu
|
feat: add naive region failover test for metric table (#4269)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 53f089bf5172..875b59c768ce 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -419,7 +419,7 @@ jobs:
timeout-minutes: 60
strategy:
matrix:
- target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions"]
+ target: ["fuzz_migrate_mito_regions", "fuzz_failover_mito_regions", "fuzz_failover_metric_regions"]
mode:
- name: "Remote WAL"
minio: true
diff --git a/Cargo.lock b/Cargo.lock
index 3a8f62aeb665..3ff9e27bbf1c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -11490,6 +11490,7 @@ dependencies = [
"libfuzzer-sys",
"nix 0.28.0",
"partition",
+ "paste",
"rand",
"rand_chacha",
"reqwest",
diff --git a/tests-fuzz/Cargo.toml b/tests-fuzz/Cargo.toml
index 1cb647b0221c..b0c33a1e28ff 100644
--- a/tests-fuzz/Cargo.toml
+++ b/tests-fuzz/Cargo.toml
@@ -40,6 +40,7 @@ lazy_static = { workspace = true }
libfuzzer-sys = "0.4"
nix = { version = "0.28", features = ["process", "signal"], optional = true }
partition = { workspace = true }
+paste.workspace = true
rand = { workspace = true }
rand_chacha = "0.3.1"
reqwest = { workspace = true }
@@ -128,6 +129,13 @@ test = false
bench = false
doc = false
+[[bin]]
+name = "fuzz_failover_metric_regions"
+path = "targets/failover/fuzz_failover_metric_regions.rs"
+test = false
+bench = false
+doc = false
+
[[bin]]
name = "fuzz_migrate_mito_regions"
path = "targets/migration/fuzz_migrate_mito_regions.rs"
diff --git a/tests-fuzz/src/utils.rs b/tests-fuzz/src/utils.rs
index d1b75e51d3c8..90ffe0c40e22 100644
--- a/tests-fuzz/src/utils.rs
+++ b/tests-fuzz/src/utils.rs
@@ -28,6 +28,7 @@ use std::env;
use common_telemetry::info;
use common_telemetry::tracing::log::LevelFilter;
+use paste::paste;
use snafu::ResultExt;
use sqlx::mysql::{MySqlConnectOptions, MySqlPoolOptions};
use sqlx::{ConnectOptions, MySql, Pool};
@@ -126,9 +127,28 @@ pub const GT_FUZZ_INPUT_MAX_COLUMNS: &str = "GT_FUZZ_INPUT_MAX_COLUMNS";
pub const GT_FUZZ_INPUT_MAX_ALTER_ACTIONS: &str = "GT_FUZZ_INPUT_MAX_ALTER_ACTIONS";
pub const GT_FUZZ_INPUT_MAX_INSERT_ACTIONS: &str = "GT_FUZZ_INPUT_MAX_INSERT_ACTIONS";
+macro_rules! make_get_from_env_helper {
+ ($key:expr, $default: expr) => {
+ paste! {
+ #[doc = "Retrieves `" $key "` environment variable \
+ or returns a default value (`" $default "`) if the environment variable is not set.
+ "]
+ pub fn [<get_ $key:lower>]() -> usize {
+ get_from_env_or_default_value($key, $default)
+ }
+ }
+ };
+}
+
+make_get_from_env_helper!(GT_FUZZ_INPUT_MAX_ALTER_ACTIONS, 256);
+make_get_from_env_helper!(GT_FUZZ_INPUT_MAX_INSERT_ACTIONS, 8);
+make_get_from_env_helper!(GT_FUZZ_INPUT_MAX_ROWS, 2048);
+make_get_from_env_helper!(GT_FUZZ_INPUT_MAX_TABLES, 64);
+make_get_from_env_helper!(GT_FUZZ_INPUT_MAX_COLUMNS, 32);
+
/// Retrieves a value from the environment variables
/// or returns a default value if the environment variable is not set.
-pub fn get_from_env_or_default_value(key: &str, default_value: usize) -> usize {
+fn get_from_env_or_default_value(key: &str, default_value: usize) -> usize {
let _ = dotenv::dotenv();
if let Ok(value) = env::var(key) {
value.parse().unwrap()
diff --git a/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs b/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs
new file mode 100644
index 000000000000..d9dc4ba6fa0f
--- /dev/null
+++ b/tests-fuzz/targets/failover/fuzz_failover_metric_regions.rs
@@ -0,0 +1,303 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![no_main]
+use std::collections::HashMap;
+use std::env;
+use std::sync::Arc;
+use std::time::Duration;
+
+use arbitrary::{Arbitrary, Unstructured};
+use common_telemetry::info;
+use libfuzzer_sys::fuzz_target;
+use rand::{Rng, SeedableRng};
+use rand_chacha::{ChaCha20Rng, ChaChaRng};
+use snafu::{ensure, ResultExt};
+use sqlx::{Executor, MySql, Pool};
+use tests_fuzz::context::{TableContext, TableContextRef};
+use tests_fuzz::error::{self, Result};
+use tests_fuzz::fake::{
+ merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map,
+ MappedGenerator, WordGenerator,
+};
+use tests_fuzz::generator::create_expr::{
+ CreateLogicalTableExprGeneratorBuilder, CreatePhysicalTableExprGeneratorBuilder,
+};
+use tests_fuzz::generator::insert_expr::InsertExprGeneratorBuilder;
+use tests_fuzz::generator::Generator;
+use tests_fuzz::ir::{
+ generate_random_timestamp_for_mysql, generate_random_value, CreateTableExpr, InsertIntoExpr,
+};
+use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
+use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator;
+use tests_fuzz::translator::DslTranslator;
+use tests_fuzz::utils::cluster_info::wait_for_all_datanode_online;
+use tests_fuzz::utils::partition::{
+ fetch_partitions, region_distribution, wait_for_all_regions_evicted,
+};
+use tests_fuzz::utils::pod_failure::{inject_datanode_pod_failure, recover_pod_failure};
+use tests_fuzz::utils::{
+ compact_table, flush_memtable, get_gt_fuzz_input_max_rows, get_gt_fuzz_input_max_tables,
+ init_greptime_connections_via_env, Connections, GT_FUZZ_CLUSTER_NAME,
+ GT_FUZZ_CLUSTER_NAMESPACE,
+};
+use tests_fuzz::validator::row::count_values;
+
+struct FuzzContext {
+ greptime: Pool<MySql>,
+ kube: kube::client::Client,
+ namespace: String,
+ cluster_name: String,
+}
+
+impl FuzzContext {
+ async fn close(self) {
+ self.greptime.close().await;
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+struct FuzzInput {
+ seed: u64,
+ rows: usize,
+ tables: usize,
+}
+
+impl Arbitrary<'_> for FuzzInput {
+ fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
+ let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
+ let mut rng = ChaChaRng::seed_from_u64(seed);
+ let max_rows = get_gt_fuzz_input_max_rows();
+ let rows = rng.gen_range(2..max_rows);
+ let max_tables = get_gt_fuzz_input_max_tables();
+ let tables = rng.gen_range(1..max_tables);
+ Ok(FuzzInput { rows, seed, tables })
+ }
+}
+
+fn generate_create_physical_table_expr<R: Rng + 'static>(rng: &mut R) -> Result<CreateTableExpr> {
+ let physical_table_if_not_exists = rng.gen_bool(0.5);
+ let create_physical_table_expr = CreatePhysicalTableExprGeneratorBuilder::default()
+ .name_generator(Box::new(MappedGenerator::new(
+ WordGenerator,
+ merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
+ )))
+ .if_not_exists(physical_table_if_not_exists)
+ .build()
+ .unwrap();
+ create_physical_table_expr.generate(rng)
+}
+
+async fn create_physical_table<R: Rng + 'static>(
+ ctx: &FuzzContext,
+ rng: &mut R,
+) -> Result<TableContextRef> {
+ // Create a physical table and a logical table on top of it
+ let create_physical_table_expr = generate_create_physical_table_expr(rng).unwrap();
+ let translator = CreateTableExprTranslator;
+ let sql = translator.translate(&create_physical_table_expr)?;
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql: &sql })?;
+ info!("Create physical table: {sql}, result: {result:?}");
+
+ Ok(Arc::new(TableContext::from(&create_physical_table_expr)))
+}
+
+fn generate_create_logical_table_expr<R: Rng + 'static>(
+ physical_table_ctx: TableContextRef,
+ rng: &mut R,
+) -> Result<CreateTableExpr> {
+ let labels = rng.gen_range(1..=5);
+ let logical_table_if_not_exists = rng.gen_bool(0.5);
+
+ let create_logical_table_expr = CreateLogicalTableExprGeneratorBuilder::default()
+ .name_generator(Box::new(MappedGenerator::new(
+ WordGenerator,
+ merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map),
+ )))
+ .physical_table_ctx(physical_table_ctx)
+ .labels(labels)
+ .if_not_exists(logical_table_if_not_exists)
+ .build()
+ .unwrap();
+ create_logical_table_expr.generate(rng)
+}
+
+fn generate_insert_expr<R: Rng + 'static>(
+ rows: usize,
+ rng: &mut R,
+ table_ctx: TableContextRef,
+) -> Result<InsertIntoExpr> {
+ let insert_generator = InsertExprGeneratorBuilder::default()
+ .omit_column_list(false)
+ .table_ctx(table_ctx)
+ .rows(rows)
+ .value_generator(Box::new(generate_random_value))
+ .ts_value_generator(Box::new(generate_random_timestamp_for_mysql))
+ .build()
+ .unwrap();
+ insert_generator.generate(rng)
+}
+
+async fn insert_values<R: Rng + 'static>(
+ rows: usize,
+ ctx: &FuzzContext,
+ rng: &mut R,
+ logical_table_ctx: TableContextRef,
+) -> Result<InsertIntoExpr> {
+ let insert_expr = generate_insert_expr(rows, rng, logical_table_ctx.clone())?;
+ let translator = InsertIntoExprTranslator;
+ let sql = translator.translate(&insert_expr)?;
+ let result = ctx
+ .greptime
+ // unprepared query, see <https://github.com/GreptimeTeam/greptimedb/issues/3500>
+ .execute(sql.as_str())
+ .await
+ .context(error::ExecuteQuerySnafu { sql: &sql })?;
+
+ ensure!(
+ result.rows_affected() == rows as u64,
+ error::AssertSnafu {
+ reason: format!(
+ "expected rows affected: {}, actual: {}",
+ rows,
+ result.rows_affected(),
+ )
+ }
+ );
+
+ Ok(insert_expr)
+}
+
+async fn execute_failover(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
+ let mut rng = ChaCha20Rng::seed_from_u64(input.seed);
+ // Creates a physical table.
+ let physical_table_ctx = create_physical_table(&ctx, &mut rng).await?;
+
+ let mut tables = HashMap::with_capacity(input.tables);
+
+ for _ in 0..input.tables {
+ let translator = CreateTableExprTranslator;
+ let create_logical_table_expr =
+ generate_create_logical_table_expr(physical_table_ctx.clone(), &mut rng).unwrap();
+ if tables.contains_key(&create_logical_table_expr.table_name) {
+ // Ignores same name logical table.
+ continue;
+ }
+ let sql = translator.translate(&create_logical_table_expr)?;
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql: &sql })?;
+ info!("Create logical table: {sql}, result: {result:?}");
+ let logical_table_ctx = Arc::new(TableContext::from(&create_logical_table_expr));
+
+ let insert_expr =
+ insert_values(input.rows, &ctx, &mut rng, logical_table_ctx.clone()).await?;
+ if rng.gen_bool(0.1) {
+ flush_memtable(&ctx.greptime, &physical_table_ctx.name).await?;
+ }
+ if rng.gen_bool(0.1) {
+ compact_table(&ctx.greptime, &physical_table_ctx.name).await?;
+ }
+
+ tables.insert(
+ logical_table_ctx.name.clone(),
+ (logical_table_ctx.clone(), insert_expr),
+ );
+ }
+
+ let partitions = fetch_partitions(&ctx.greptime, physical_table_ctx.name.clone()).await?;
+ let region_distribution = region_distribution(partitions);
+ let selected_datanode = *region_distribution.keys().next().unwrap();
+ let selected_regions = region_distribution
+ .get(&selected_datanode)
+ .cloned()
+ .unwrap();
+
+ // Injects pod failures
+ info!("Injects pod failures to datanode: {selected_datanode}, regions: {selected_regions:?}");
+ let chaos_name = inject_datanode_pod_failure(
+ ctx.kube.clone(),
+ &ctx.namespace,
+ &ctx.cluster_name,
+ selected_datanode,
+ 360,
+ )
+ .await?;
+
+ // Waits for num of regions on `selected_datanode` become to 0.
+ wait_for_all_regions_evicted(
+ ctx.greptime.clone(),
+ selected_datanode,
+ Duration::from_secs(300),
+ )
+ .await;
+
+ // Recovers pod failures
+ recover_pod_failure(ctx.kube.clone(), &ctx.namespace, &chaos_name).await?;
+ wait_for_all_datanode_online(ctx.greptime.clone(), Duration::from_secs(60)).await;
+
+ // Validates value rows
+ info!("Validates num of rows");
+
+ for (table_ctx, insert_expr) in tables.values() {
+ let sql = format!("select count(1) as count from {}", table_ctx.name);
+ let values = count_values(&ctx.greptime, &sql).await?;
+ assert_eq!(values.count as usize, insert_expr.values_list.len());
+ }
+
+ // Clean up
+ for (table_ctx, _) in tables.values() {
+ let sql = format!("DROP TABLE {}", table_ctx.name);
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql })?;
+ info!("Drop table: {}\n\nResult: {result:?}\n\n", table_ctx.name);
+ }
+
+ let sql = format!("DROP TABLE {}", physical_table_ctx.name);
+ let result = sqlx::query(&sql)
+ .execute(&ctx.greptime)
+ .await
+ .context(error::ExecuteQuerySnafu { sql })?;
+ info!(
+ "Drop table: {}\n\nResult: {result:?}\n\n",
+ physical_table_ctx.name
+ );
+
+ ctx.close().await;
+ Ok(())
+}
+
+fuzz_target!(|input: FuzzInput| {
+ common_telemetry::init_default_ut_logging();
+ common_runtime::block_on_write(async {
+ let Connections { mysql } = init_greptime_connections_via_env().await;
+ let ctx = FuzzContext {
+ greptime: mysql.expect("mysql connection init must be succeed"),
+ kube: kube::client::Client::try_default()
+ .await
+ .expect("init kube client"),
+ namespace: env::var(GT_FUZZ_CLUSTER_NAMESPACE).unwrap_or("my-greptimedb".to_string()),
+ cluster_name: env::var(GT_FUZZ_CLUSTER_NAME).unwrap_or("my-greptimedb".to_string()),
+ };
+ execute_failover(ctx, input)
+ .await
+ .unwrap_or_else(|err| panic!("fuzz test must be succeed: {err:?}"));
+ })
+});
diff --git a/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs b/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs
index 2b6ebd06bc41..3034ce5c095d 100644
--- a/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs
+++ b/tests-fuzz/targets/failover/fuzz_failover_mito_regions.rs
@@ -51,10 +51,10 @@ use tests_fuzz::utils::partition::{
};
use tests_fuzz::utils::pod_failure::{inject_datanode_pod_failure, recover_pod_failure};
use tests_fuzz::utils::{
- compact_table, flush_memtable, get_from_env_or_default_value,
+ compact_table, flush_memtable, get_gt_fuzz_input_max_columns,
+ get_gt_fuzz_input_max_insert_actions, get_gt_fuzz_input_max_rows, get_gt_fuzz_input_max_tables,
init_greptime_connections_via_env, Connections, GT_FUZZ_CLUSTER_NAME,
- GT_FUZZ_CLUSTER_NAMESPACE, GT_FUZZ_INPUT_MAX_COLUMNS, GT_FUZZ_INPUT_MAX_INSERT_ACTIONS,
- GT_FUZZ_INPUT_MAX_ROWS, GT_FUZZ_INPUT_MAX_TABLES,
+ GT_FUZZ_CLUSTER_NAMESPACE,
};
use tests_fuzz::validator::row::count_values;
use tokio::sync::Semaphore;
@@ -85,13 +85,13 @@ impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
let mut rng = ChaChaRng::seed_from_u64(seed);
- let max_columns = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_COLUMNS, 64);
+ let max_columns = get_gt_fuzz_input_max_columns();
let columns = rng.gen_range(2..max_columns);
- let max_rows = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_ROWS, 2048);
+ let max_rows = get_gt_fuzz_input_max_rows();
let rows = rng.gen_range(2..max_rows);
- let max_tables = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_TABLES, 64);
- let tables = rng.gen_range(1..max_tables);
- let max_inserts = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_INSERT_ACTIONS, 8);
+ let max_tables = get_gt_fuzz_input_max_tables();
+ let tables = rng.gen_range(2..max_tables);
+ let max_inserts = get_gt_fuzz_input_max_insert_actions();
let inserts = rng.gen_range(2..max_inserts);
Ok(FuzzInput {
columns,
@@ -283,8 +283,6 @@ async fn execute_failover(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
let partitions = collect_table_partitions(&ctx, &table_ctxs).await?;
let region_distribution = region_distribution(partitions);
- // Ensures num of datanode > 1.
- assert!(region_distribution.len() > 1);
pretty_print_region_distribution(®ion_distribution);
let nodes = region_distribution.keys().cloned().collect::<Vec<_>>();
let selected_datanode = nodes
@@ -321,7 +319,7 @@ async fn execute_failover(ctx: FuzzContext, input: FuzzInput) -> Result<()> {
wait_for_all_datanode_online(ctx.greptime.clone(), Duration::from_secs(60)).await;
// Validates value rows
- info!("Validates num of values");
+ info!("Validates num of rows");
for (table_ctx, expected_rows) in table_ctxs.iter().zip(affected_rows) {
let sql = format!("select count(1) as count from {}", table_ctx.name);
let values = count_values(&ctx.greptime, &sql).await?;
diff --git a/tests-fuzz/targets/fuzz_alter_logical_table.rs b/tests-fuzz/targets/fuzz_alter_logical_table.rs
index 40f1a699b89b..39e671ca6efb 100644
--- a/tests-fuzz/targets/fuzz_alter_logical_table.rs
+++ b/tests-fuzz/targets/fuzz_alter_logical_table.rs
@@ -43,8 +43,7 @@ use tests_fuzz::translator::mysql::alter_expr::AlterTableExprTranslator;
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
use tests_fuzz::utils::{
- get_from_env_or_default_value, init_greptime_connections_via_env, Connections,
- GT_FUZZ_INPUT_MAX_ALTER_ACTIONS,
+ get_gt_fuzz_input_max_alter_actions, init_greptime_connections_via_env, Connections,
};
use tests_fuzz::validator;
@@ -68,7 +67,7 @@ impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
let mut rng = ChaChaRng::seed_from_u64(seed);
- let max_actions = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_ALTER_ACTIONS, 256);
+ let max_actions = get_gt_fuzz_input_max_alter_actions();
let actions = rng.gen_range(1..max_actions);
Ok(FuzzInput { seed, actions })
diff --git a/tests-fuzz/targets/fuzz_alter_table.rs b/tests-fuzz/targets/fuzz_alter_table.rs
index edb8ce92ef01..7fd8f7d7ac83 100644
--- a/tests-fuzz/targets/fuzz_alter_table.rs
+++ b/tests-fuzz/targets/fuzz_alter_table.rs
@@ -41,8 +41,7 @@ use tests_fuzz::translator::mysql::alter_expr::AlterTableExprTranslator;
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
use tests_fuzz::utils::{
- get_from_env_or_default_value, init_greptime_connections_via_env, Connections,
- GT_FUZZ_INPUT_MAX_COLUMNS,
+ get_gt_fuzz_input_max_columns, init_greptime_connections_via_env, Connections,
};
use tests_fuzz::validator;
struct FuzzContext {
@@ -70,7 +69,7 @@ enum AlterTableOption {
}
fn generate_create_table_expr<R: Rng + 'static>(rng: &mut R) -> Result<CreateTableExpr> {
- let max_columns = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_COLUMNS, 30);
+ let max_columns = get_gt_fuzz_input_max_columns();
let columns = rng.gen_range(2..max_columns);
let create_table_generator = CreateTableExprGeneratorBuilder::default()
.name_generator(Box::new(MappedGenerator::new(
diff --git a/tests-fuzz/targets/fuzz_create_table.rs b/tests-fuzz/targets/fuzz_create_table.rs
index eaaf2356fe1c..9411afa1a3a7 100644
--- a/tests-fuzz/targets/fuzz_create_table.rs
+++ b/tests-fuzz/targets/fuzz_create_table.rs
@@ -32,8 +32,7 @@ use tests_fuzz::ir::CreateTableExpr;
use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::DslTranslator;
use tests_fuzz::utils::{
- get_from_env_or_default_value, init_greptime_connections_via_env, Connections,
- GT_FUZZ_INPUT_MAX_COLUMNS,
+ get_gt_fuzz_input_max_columns, init_greptime_connections_via_env, Connections,
};
use tests_fuzz::validator;
@@ -57,7 +56,7 @@ impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
let mut rng = ChaChaRng::seed_from_u64(seed);
- let max_columns = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_COLUMNS, 30);
+ let max_columns = get_gt_fuzz_input_max_columns();
let columns = rng.gen_range(2..max_columns);
Ok(FuzzInput { columns, seed })
}
diff --git a/tests-fuzz/targets/fuzz_insert.rs b/tests-fuzz/targets/fuzz_insert.rs
index 1e75ef3b37ab..a712701b8533 100644
--- a/tests-fuzz/targets/fuzz_insert.rs
+++ b/tests-fuzz/targets/fuzz_insert.rs
@@ -40,8 +40,8 @@ use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator;
use tests_fuzz::translator::DslTranslator;
use tests_fuzz::utils::{
- flush_memtable, get_from_env_or_default_value, init_greptime_connections_via_env, Connections,
- GT_FUZZ_INPUT_MAX_COLUMNS, GT_FUZZ_INPUT_MAX_ROWS,
+ flush_memtable, get_gt_fuzz_input_max_columns, get_gt_fuzz_input_max_rows,
+ init_greptime_connections_via_env, Connections,
};
use tests_fuzz::validator;
@@ -66,9 +66,9 @@ impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
let mut rng = ChaChaRng::seed_from_u64(seed);
- let max_columns = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_COLUMNS, 30);
+ let max_columns = get_gt_fuzz_input_max_columns();
let columns = rng.gen_range(2..max_columns);
- let max_row = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_ROWS, 2048);
+ let max_row = get_gt_fuzz_input_max_rows();
let rows = rng.gen_range(1..max_row);
Ok(FuzzInput {
columns,
diff --git a/tests-fuzz/targets/fuzz_insert_logical_table.rs b/tests-fuzz/targets/fuzz_insert_logical_table.rs
index a9ca79bc2c30..22226dbb9de1 100644
--- a/tests-fuzz/targets/fuzz_insert_logical_table.rs
+++ b/tests-fuzz/targets/fuzz_insert_logical_table.rs
@@ -43,9 +43,8 @@ use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator;
use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator;
use tests_fuzz::translator::DslTranslator;
use tests_fuzz::utils::{
- compact_table, flush_memtable, get_from_env_or_default_value,
- init_greptime_connections_via_env, Connections, GT_FUZZ_INPUT_MAX_ROWS,
- GT_FUZZ_INPUT_MAX_TABLES,
+ compact_table, flush_memtable, get_gt_fuzz_input_max_rows, get_gt_fuzz_input_max_tables,
+ init_greptime_connections_via_env, Connections,
};
use tests_fuzz::validator;
struct FuzzContext {
@@ -69,9 +68,9 @@ impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
let mut rng = ChaChaRng::seed_from_u64(seed);
- let max_tables = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_TABLES, 32);
+ let max_tables = get_gt_fuzz_input_max_tables();
let tables = rng.gen_range(1..max_tables);
- let max_row = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_ROWS, 2048);
+ let max_row = get_gt_fuzz_input_max_rows();
let rows = rng.gen_range(1..max_row);
Ok(FuzzInput { tables, seed, rows })
}
diff --git a/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs b/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs
index 5ef880666bfc..662ca0c67256 100644
--- a/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs
+++ b/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs
@@ -44,9 +44,7 @@ use tests_fuzz::translator::DslTranslator;
use tests_fuzz::utils::config::{get_conf_path, write_config_file};
use tests_fuzz::utils::health::HttpHealthChecker;
use tests_fuzz::utils::process::{ProcessManager, ProcessState, UnstableProcessController};
-use tests_fuzz::utils::{
- get_from_env_or_default_value, load_unstable_test_env_variables, GT_FUZZ_INPUT_MAX_TABLES,
-};
+use tests_fuzz::utils::{get_gt_fuzz_input_max_tables, load_unstable_test_env_variables};
use tests_fuzz::{error, validator};
use tokio::sync::watch;
@@ -70,7 +68,7 @@ impl Arbitrary<'_> for FuzzInput {
fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result<Self> {
let seed = u.int_in_range(u64::MIN..=u64::MAX)?;
let mut rng = ChaChaRng::seed_from_u64(seed);
- let max_tables = get_from_env_or_default_value(GT_FUZZ_INPUT_MAX_TABLES, 256);
+ let max_tables = get_gt_fuzz_input_max_tables();
let tables = rng.gen_range(1..max_tables);
Ok(FuzzInput { seed, tables })
}
|
feat
|
add naive region failover test for metric table (#4269)
|
89d58538c704505b4eedc72ce779a93185ca4f5e
|
2024-04-16 20:10:16
|
Eugene Tolbakov
|
chore(mito): set null value data size to i64 (#3722)
| false
|
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index 64635eeae2d0..1483b82adccd 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -1230,7 +1230,9 @@ impl<'a> ValueRef<'a> {
/// The size is estimated and only considers the data size.
pub fn data_size(&self) -> usize {
match *self {
- ValueRef::Null => 0,
+ // Since the `Null` type is also considered to occupy space, we have opted to use the
+ // size of `i64` as an initial approximation.
+ ValueRef::Null => 8,
ValueRef::Boolean(_) => 1,
ValueRef::UInt8(_) => 1,
ValueRef::UInt16(_) => 2,
@@ -2339,6 +2341,7 @@ mod tests {
#[test]
fn test_value_ref_estimated_size() {
+ check_value_ref_size_eq(&ValueRef::Null, 8);
check_value_ref_size_eq(&ValueRef::Boolean(true), 1);
check_value_ref_size_eq(&ValueRef::UInt8(1), 1);
check_value_ref_size_eq(&ValueRef::UInt16(1), 2);
|
chore
|
set null value data size to i64 (#3722)
|
840f81e0fd83579ea0e41a8c9aa4fe9abb335a81
|
2024-06-25 09:31:46
|
Eugene Tolbakov
|
fix(sql): improve compound signed number processing (#4200)
| false
|
diff --git a/src/sql/src/statements/transform/expand_interval.rs b/src/sql/src/statements/transform/expand_interval.rs
index b9e73648e3f6..f22792f9bf71 100644
--- a/src/sql/src/statements/transform/expand_interval.rs
+++ b/src/sql/src/statements/transform/expand_interval.rs
@@ -23,9 +23,8 @@ use sqlparser::ast::{Expr, Interval, Value};
use crate::statements::transform::TransformRule;
lazy_static! {
- /// Matches either one or more digits `(\d+)` or one or more non-digits `(\D+)` characters
- /// Negative sign before digits is matched optionally
- static ref INTERVAL_SHORT_NAME_PATTERN: Regex = Regex::new(r"(-?\d+|\D+)").unwrap();
+ /// Matches either one or more digits `(\d+)` or one or more ASCII characters `[a-zA-Z]` or plus/minus signs
+ static ref INTERVAL_SHORT_NAME_PATTERN: Regex = Regex::new(r"([+-]?\d+|[a-zA-Z]+|\+|-)").unwrap();
static ref INTERVAL_SHORT_NAME_MAPPING: HashMap<&'static str, &'static str> = HashMap::from([
("y","years"),
@@ -144,7 +143,7 @@ mod tests {
use crate::statements::transform::TransformRule;
#[test]
- fn test_transform_interval_conversions() {
+ fn test_transform_interval_basic_conversions() {
let test_cases = vec![
("1y", "1 years"),
("4mon", "4 months"),
@@ -158,7 +157,6 @@ mod tests {
("200ms", "200 microseconds"),
("350us", "350 microseconds"),
("400ns", "400 nanoseconds"),
- ("2y4w1h", "2 years 4 weeks 1 hours"),
];
for (input, expected) in test_cases {
let result = expand_interval_name(input).unwrap();
@@ -171,6 +169,37 @@ mod tests {
}
}
+ #[test]
+ fn test_transform_interval_compound_conversions() {
+ let test_cases = vec![
+ ("2y4mon6w", "2 years 4 months 6 weeks"),
+ ("5d3h1m", "5 days 3 hours 1 minutes"),
+ (
+ "10s312millis789ms",
+ "10 seconds 312 milliseconds 789 microseconds",
+ ),
+ (
+ "23mils987us754ns",
+ "23 milliseconds 987 microseconds 754 nanoseconds",
+ ),
+ ("-1d-5h", "-1 days -5 hours"),
+ ("-2y-4mon-6w", "-2 years -4 months -6 weeks"),
+ ("-5d-3h-1m", "-5 days -3 hours -1 minutes"),
+ (
+ "-10s-312millis-789ms",
+ "-10 seconds -312 milliseconds -789 microseconds",
+ ),
+ (
+ "-23mils-987us-754ns",
+ "-23 milliseconds -987 microseconds -754 nanoseconds",
+ ),
+ ];
+ for (input, expected) in test_cases {
+ let result = expand_interval_name(input).unwrap();
+ assert_eq!(result, expected);
+ }
+ }
+
#[test]
fn test_visit_expr_when_interval_is_single_quoted_string_expr() {
let interval_transformation_rule = ExpandIntervalTransformRule {};
diff --git a/tests/cases/standalone/common/types/interval/interval.result b/tests/cases/standalone/common/types/interval/interval.result
index aaa6657d6077..c4c4ad8e60d2 100644
--- a/tests/cases/standalone/common/types/interval/interval.result
+++ b/tests/cases/standalone/common/types/interval/interval.result
@@ -281,6 +281,22 @@ SELECT INTERVAL '-2mon';
| 0 years -2 mons 0 days 0 hours 0 mins 0.000000000 secs |
+---------------------------------------------------------+
+SELECT INTERVAL '-1h5m';
+
++---------------------------------------------------------+
+| IntervalMonthDayNano("18446740773709551616") |
++---------------------------------------------------------+
+| 0 years 0 mons 0 days 0 hours -55 mins 0.000000000 secs |
++---------------------------------------------------------+
+
+SELECT INTERVAL '-1h-5m';
+
++---------------------------------------------------------+
+| IntervalMonthDayNano("18446740173709551616") |
++---------------------------------------------------------+
+| 0 years 0 mons 0 days -1 hours -5 mins 0.000000000 secs |
++---------------------------------------------------------+
+
SELECT INTERVAL '1y2w3d4h';
+---------------------------------------------------------+
diff --git a/tests/cases/standalone/common/types/interval/interval.sql b/tests/cases/standalone/common/types/interval/interval.sql
index 1402c26134e8..9988d27644bd 100644
--- a/tests/cases/standalone/common/types/interval/interval.sql
+++ b/tests/cases/standalone/common/types/interval/interval.sql
@@ -71,6 +71,10 @@ SELECT INTERVAL '55h';
SELECT INTERVAL '-2mon';
+SELECT INTERVAL '-1h5m';
+
+SELECT INTERVAL '-1h-5m';
+
SELECT INTERVAL '1y2w3d4h';
SELECT INTERVAL '7 days' - INTERVAL '1d';
|
fix
|
improve compound signed number processing (#4200)
|
1138f32af90f5f299d3cf249807c387b68509fe3
|
2024-07-29 22:25:19
|
taobo
|
feat: support setting time range in Copy From statement (#4405)
| false
|
diff --git a/src/common/query/src/logical_plan.rs b/src/common/query/src/logical_plan.rs
index 6cfee747c239..7fd081c219f3 100644
--- a/src/common/query/src/logical_plan.rs
+++ b/src/common/query/src/logical_plan.rs
@@ -25,7 +25,7 @@ use datafusion::logical_expr::{LogicalPlan, LogicalPlanBuilder};
use datafusion_common::Column;
use datafusion_expr::col;
use datatypes::prelude::ConcreteDataType;
-pub use expr::build_filter_from_timestamp;
+pub use expr::{build_filter_from_timestamp, build_same_type_ts_filter};
pub use self::accumulator::{Accumulator, AggregateFunctionCreator, AggregateFunctionCreatorRef};
pub use self::udaf::AggregateFunction;
diff --git a/src/common/query/src/logical_plan/expr.rs b/src/common/query/src/logical_plan/expr.rs
index 2d30bee2afb2..3b9f6cd120b8 100644
--- a/src/common/query/src/logical_plan/expr.rs
+++ b/src/common/query/src/logical_plan/expr.rs
@@ -18,6 +18,35 @@ use common_time::Timestamp;
use datafusion_common::{Column, ScalarValue};
use datafusion_expr::expr::Expr;
use datafusion_expr::{and, binary_expr, Operator};
+use datatypes::data_type::DataType;
+use datatypes::schema::ColumnSchema;
+use datatypes::value::Value;
+
+/// Builds a filter for a timestamp column with the same type as the timestamp column.
+/// Returns [None] if time range is [None] or full time range.
+pub fn build_same_type_ts_filter(
+ ts_schema: &ColumnSchema,
+ time_range: Option<TimestampRange>,
+) -> Option<Expr> {
+ let ts_type = ts_schema.data_type.clone();
+ let time_range = time_range?;
+ let start = time_range
+ .start()
+ .and_then(|start| ts_type.try_cast(Value::Timestamp(start)));
+ let end = time_range
+ .end()
+ .and_then(|end| ts_type.try_cast(Value::Timestamp(end)));
+
+ let time_range = match (start, end) {
+ (Some(Value::Timestamp(start)), Some(Value::Timestamp(end))) => {
+ TimestampRange::new(start, end)
+ }
+ (Some(Value::Timestamp(start)), None) => Some(TimestampRange::from_start(start)),
+ (None, Some(Value::Timestamp(end))) => Some(TimestampRange::until_end(end, false)),
+ _ => return None,
+ };
+ build_filter_from_timestamp(&ts_schema.name, time_range.as_ref())
+}
/// Builds an `Expr` that filters timestamp column from given timestamp range.
/// Returns [None] if time range is [None] or full time range.
diff --git a/src/common/recordbatch/src/adapter.rs b/src/common/recordbatch/src/adapter.rs
index 0af2c8ac33dd..85236381b2ce 100644
--- a/src/common/recordbatch/src/adapter.rs
+++ b/src/common/recordbatch/src/adapter.rs
@@ -22,19 +22,25 @@ use std::task::{Context, Poll};
use datafusion::arrow::compute::cast;
use datafusion::arrow::datatypes::SchemaRef as DfSchemaRef;
use datafusion::error::Result as DfResult;
+use datafusion::execution::context::ExecutionProps;
+use datafusion::logical_expr::utils::conjunction;
+use datafusion::logical_expr::Expr;
+use datafusion::physical_expr::create_physical_expr;
use datafusion::physical_plan::metrics::{BaselineMetrics, MetricValue};
use datafusion::physical_plan::{
- accept, displayable, ExecutionPlan, ExecutionPlanVisitor,
+ accept, displayable, ExecutionPlan, ExecutionPlanVisitor, PhysicalExpr,
RecordBatchStream as DfRecordBatchStream,
};
use datafusion_common::arrow::error::ArrowError;
-use datafusion_common::DataFusionError;
+use datafusion_common::{DataFusionError, ToDFSchema};
+use datatypes::arrow::array::Array;
use datatypes::schema::{Schema, SchemaRef};
use futures::ready;
use pin_project::pin_project;
use snafu::ResultExt;
use crate::error::{self, Result};
+use crate::filter::batch_filter;
use crate::{
DfRecordBatch, DfSendableRecordBatchStream, OrderOption, RecordBatch, RecordBatchStream,
SendableRecordBatchStream, Stream,
@@ -50,6 +56,7 @@ pub struct RecordBatchStreamTypeAdapter<T, E> {
stream: T,
projected_schema: DfSchemaRef,
projection: Vec<usize>,
+ predicate: Option<Arc<dyn PhysicalExpr>>,
phantom: PhantomData<E>,
}
@@ -69,9 +76,28 @@ where
stream,
projected_schema,
projection,
+ predicate: None,
phantom: Default::default(),
}
}
+
+ pub fn with_filter(mut self, filters: Vec<Expr>) -> Result<Self> {
+ let filters = if let Some(expr) = conjunction(filters) {
+ let df_schema = self
+ .projected_schema
+ .clone()
+ .to_dfschema_ref()
+ .context(error::PhysicalExprSnafu)?;
+
+ let filters = create_physical_expr(&expr, &df_schema, &ExecutionProps::new())
+ .context(error::PhysicalExprSnafu)?;
+ Some(filters)
+ } else {
+ None
+ };
+ self.predicate = filters;
+ Ok(self)
+ }
}
impl<T, E> DfRecordBatchStream for RecordBatchStreamTypeAdapter<T, E>
@@ -99,6 +125,8 @@ where
let projected_schema = this.projected_schema.clone();
let projection = this.projection.clone();
+ let predicate = this.predicate.clone();
+
let batch = batch.map(|b| {
b.and_then(|b| {
let projected_column = b.project(&projection)?;
@@ -121,6 +149,11 @@ where
}
}
let record_batch = DfRecordBatch::try_new(projected_schema, columns)?;
+ let record_batch = if let Some(predicate) = predicate {
+ batch_filter(&record_batch, &predicate)?
+ } else {
+ record_batch
+ };
Ok(record_batch)
})
});
diff --git a/src/common/recordbatch/src/error.rs b/src/common/recordbatch/src/error.rs
index f5424d410a14..f2114f645fdc 100644
--- a/src/common/recordbatch/src/error.rs
+++ b/src/common/recordbatch/src/error.rs
@@ -73,6 +73,14 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Create physical expr error"))]
+ PhysicalExpr {
+ #[snafu(source)]
+ error: datafusion::error::DataFusionError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Fail to format record batch"))]
Format {
#[snafu(source)]
@@ -167,7 +175,8 @@ impl ErrorExt for Error {
| Error::PollStream { .. }
| Error::Format { .. }
| Error::ToArrowScalar { .. }
- | Error::ProjectArrowRecordBatch { .. } => StatusCode::Internal,
+ | Error::ProjectArrowRecordBatch { .. }
+ | Error::PhysicalExpr { .. } => StatusCode::Internal,
Error::ArrowCompute { .. } => StatusCode::IllegalState,
diff --git a/src/common/recordbatch/src/filter.rs b/src/common/recordbatch/src/filter.rs
index 195abb118135..8c1ebe7d530e 100644
--- a/src/common/recordbatch/src/filter.rs
+++ b/src/common/recordbatch/src/filter.rs
@@ -14,11 +14,18 @@
//! Util record batch stream wrapper that can perform precise filter.
+use std::sync::Arc;
+
+use datafusion::error::Result as DfResult;
use datafusion::logical_expr::{Expr, Literal, Operator};
+use datafusion::physical_plan::PhysicalExpr;
use datafusion_common::arrow::array::{ArrayRef, Datum, Scalar};
use datafusion_common::arrow::buffer::BooleanBuffer;
use datafusion_common::arrow::compute::kernels::cmp;
-use datafusion_common::ScalarValue;
+use datafusion_common::cast::{as_boolean_array, as_null_array};
+use datafusion_common::{internal_err, DataFusionError, ScalarValue};
+use datatypes::arrow::array::{Array, BooleanArray, RecordBatch};
+use datatypes::arrow::compute::filter_record_batch;
use datatypes::vectors::VectorRef;
use snafu::ResultExt;
@@ -144,13 +151,43 @@ impl SimpleFilterEvaluator {
}
}
+/// Evaluate the predicate on the input [RecordBatch], and return a new [RecordBatch].
+/// Copy from datafusion::physical_plan::src::filter.rs
+pub fn batch_filter(
+ batch: &RecordBatch,
+ predicate: &Arc<dyn PhysicalExpr>,
+) -> DfResult<RecordBatch> {
+ predicate
+ .evaluate(batch)
+ .and_then(|v| v.into_array(batch.num_rows()))
+ .and_then(|array| {
+ let filter_array = match as_boolean_array(&array) {
+ Ok(boolean_array) => Ok(boolean_array.clone()),
+ Err(_) => {
+ let Ok(null_array) = as_null_array(&array) else {
+ return internal_err!(
+ "Cannot create filter_array from non-boolean predicates"
+ );
+ };
+
+ // if the predicate is null, then the result is also null
+ Ok::<BooleanArray, DataFusionError>(BooleanArray::new_null(null_array.len()))
+ }
+ }?;
+ Ok(filter_record_batch(batch, &filter_array)?)
+ })
+}
+
#[cfg(test)]
mod test {
use std::sync::Arc;
- use datafusion::logical_expr::BinaryExpr;
- use datafusion_common::Column;
+ use datafusion::execution::context::ExecutionProps;
+ use datafusion::logical_expr::{col, lit, BinaryExpr};
+ use datafusion::physical_expr::create_physical_expr;
+ use datafusion_common::{Column, DFSchema};
+ use datatypes::arrow::datatypes::{DataType, Field, Schema};
use super::*;
@@ -281,4 +318,35 @@ mod test {
let result = evaluator.evaluate_scalar(&input_3).unwrap();
assert!(!result);
}
+
+ #[test]
+ fn batch_filter_test() {
+ let expr = col("ts").gt(lit(123456u64));
+ let schema = Schema::new(vec![
+ Field::new("a", DataType::Int32, true),
+ Field::new("ts", DataType::UInt64, false),
+ ]);
+ let df_schema = DFSchema::try_from(schema.clone()).unwrap();
+ let props = ExecutionProps::new();
+ let physical_expr = create_physical_expr(&expr, &df_schema, &props).unwrap();
+ let batch = RecordBatch::try_new(
+ Arc::new(schema),
+ vec![
+ Arc::new(datatypes::arrow::array::Int32Array::from(vec![4, 5, 6])),
+ Arc::new(datatypes::arrow::array::UInt64Array::from(vec![
+ 123456, 123457, 123458,
+ ])),
+ ],
+ )
+ .unwrap();
+ let new_batch = batch_filter(&batch, &physical_expr).unwrap();
+ assert_eq!(new_batch.num_rows(), 2);
+ let first_column_values = new_batch
+ .column(0)
+ .as_any()
+ .downcast_ref::<datatypes::arrow::array::Int32Array>()
+ .unwrap();
+ let expected = datatypes::arrow::array::Int32Array::from(vec![5, 6]);
+ assert_eq!(first_column_values, &expected);
+ }
}
diff --git a/src/operator/src/error.rs b/src/operator/src/error.rs
index 4c657ceeb0eb..8e7991fd06b1 100644
--- a/src/operator/src/error.rs
+++ b/src/operator/src/error.rs
@@ -754,6 +754,13 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+ #[snafu(display("Create physical expr error"))]
+ PhysicalExpr {
+ #[snafu(source)]
+ error: common_recordbatch::error::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -788,7 +795,8 @@ impl ErrorExt for Error {
| Error::ViewColumnsMismatch { .. }
| Error::InvalidViewStmt { .. }
| Error::ConvertIdentifier { .. }
- | Error::InvalidPartition { .. } => StatusCode::InvalidArguments,
+ | Error::InvalidPartition { .. }
+ | Error::PhysicalExpr { .. } => StatusCode::InvalidArguments,
Error::TableAlreadyExists { .. } | Error::ViewAlreadyExists { .. } => {
StatusCode::TableAlreadyExists
diff --git a/src/operator/src/statement/copy_table_from.rs b/src/operator/src/statement/copy_table_from.rs
index a0818d6ea35e..b3a151b58166 100644
--- a/src/operator/src/statement/copy_table_from.rs
+++ b/src/operator/src/statement/copy_table_from.rs
@@ -36,6 +36,7 @@ use datafusion::parquet::arrow::arrow_reader::ArrowReaderMetadata;
use datafusion::parquet::arrow::ParquetRecordBatchStreamBuilder;
use datafusion::physical_plan::metrics::ExecutionPlanMetricsSet;
use datafusion_common::Statistics;
+use datafusion_expr::Expr;
use datatypes::arrow::compute::can_cast_types;
use datatypes::arrow::datatypes::{Schema, SchemaRef};
use datatypes::vectors::Helper;
@@ -225,6 +226,7 @@ impl StatementExecutor {
object_store: &ObjectStore,
file_metadata: &FileMetadata,
projection: Vec<usize>,
+ filters: Vec<Expr>,
) -> Result<DfSendableRecordBatchStream> {
match file_metadata {
FileMetadata::Csv {
@@ -252,11 +254,11 @@ impl StatementExecutor {
)
.await?;
- Ok(Box::pin(RecordBatchStreamTypeAdapter::new(
- projected_schema,
- stream,
- Some(projection),
- )))
+ Ok(Box::pin(
+ RecordBatchStreamTypeAdapter::new(projected_schema, stream, Some(projection))
+ .with_filter(filters)
+ .context(error::PhysicalExprSnafu)?,
+ ))
}
FileMetadata::Json {
format,
@@ -286,11 +288,11 @@ impl StatementExecutor {
)
.await?;
- Ok(Box::pin(RecordBatchStreamTypeAdapter::new(
- projected_schema,
- stream,
- Some(projection),
- )))
+ Ok(Box::pin(
+ RecordBatchStreamTypeAdapter::new(projected_schema, stream, Some(projection))
+ .with_filter(filters)
+ .context(error::PhysicalExprSnafu)?,
+ ))
}
FileMetadata::Parquet { metadata, path, .. } => {
let meta = object_store
@@ -317,11 +319,11 @@ impl StatementExecutor {
.project(&projection)
.context(error::ProjectSchemaSnafu)?,
);
- Ok(Box::pin(RecordBatchStreamTypeAdapter::new(
- projected_schema,
- stream,
- Some(projection),
- )))
+ Ok(Box::pin(
+ RecordBatchStreamTypeAdapter::new(projected_schema, stream, Some(projection))
+ .with_filter(filters)
+ .context(error::PhysicalExprSnafu)?,
+ ))
}
FileMetadata::Orc { path, .. } => {
let meta = object_store
@@ -345,11 +347,11 @@ impl StatementExecutor {
.context(error::ProjectSchemaSnafu)?,
);
- Ok(Box::pin(RecordBatchStreamTypeAdapter::new(
- projected_schema,
- stream,
- Some(projection),
- )))
+ Ok(Box::pin(
+ RecordBatchStreamTypeAdapter::new(projected_schema, stream, Some(projection))
+ .with_filter(filters)
+ .context(error::PhysicalExprSnafu)?,
+ ))
}
}
}
@@ -370,6 +372,14 @@ impl StatementExecutor {
let (object_store, entries) = self.list_copy_from_entries(&req).await?;
let mut files = Vec::with_capacity(entries.len());
let table_schema = table.schema().arrow_schema().clone();
+ let filters = table
+ .schema()
+ .timestamp_column()
+ .and_then(|c| {
+ common_query::logical_plan::build_same_type_ts_filter(c, req.timestamp_range)
+ })
+ .into_iter()
+ .collect::<Vec<_>>();
for entry in entries.iter() {
if entry.metadata().mode() != EntryMode::FILE {
@@ -414,6 +424,7 @@ impl StatementExecutor {
&object_store,
&file_metadata,
file_schema_projection,
+ filters.clone(),
)
.await?;
diff --git a/tests/cases/standalone/common/copy/copy_from_fs_csv.result b/tests/cases/standalone/common/copy/copy_from_fs_csv.result
index d46b5b75fb72..19b3fda3f9f4 100644
--- a/tests/cases/standalone/common/copy/copy_from_fs_csv.result
+++ b/tests/cases/standalone/common/copy/copy_from_fs_csv.result
@@ -2,19 +2,24 @@ CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time inde
Affected Rows: 0
-insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+insert into
+ demo(host, cpu, memory, ts)
+values
+ ('host1', 66.6, 1024, 1655276557000),
+ ('host2', 88.8, 333.3, 1655276558000),
+ ('host3', 99.9, 444.4, 1722077263000);
-Affected Rows: 2
+Affected Rows: 3
Copy demo TO '/tmp/demo/export/csv/demo.csv' with (format='csv');
-Affected Rows: 2
+Affected Rows: 3
CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
Affected Rows: 0
-Copy with_filename FROM '/tmp/demo/export/csv/demo.csv' with (format='csv');
+Copy with_filename FROM '/tmp/demo/export/csv/demo.csv' with (format='csv', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:39');
Affected Rows: 2
@@ -31,26 +36,25 @@ CREATE TABLE with_path(host string, cpu double, memory double, ts timestamp time
Affected Rows: 0
-Copy with_path FROM '/tmp/demo/export/csv/' with (format='csv');
+Copy with_path FROM '/tmp/demo/export/csv/' with (format='csv', start_time='2023-06-15 07:02:37');
-Affected Rows: 2
+Affected Rows: 1
select * from with_path order by ts;
+-------+------+--------+---------------------+
| host | cpu | memory | ts |
+-------+------+--------+---------------------+
-| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
-| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
+| host3 | 99.9 | 444.4 | 2024-07-27T10:47:43 |
+-------+------+--------+---------------------+
CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp time index);
Affected Rows: 0
-Copy with_pattern FROM '/tmp/demo/export/csv/' WITH (pattern = 'demo.*',format='csv');
+Copy with_pattern FROM '/tmp/demo/export/csv/' WITH (pattern = 'demo.*', format='csv', end_time='2025-06-15 07:02:39');
-Affected Rows: 2
+Affected Rows: 3
select * from with_pattern order by ts;
@@ -59,6 +63,7 @@ select * from with_pattern order by ts;
+-------+------+--------+---------------------+
| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
+| host3 | 99.9 | 444.4 | 2024-07-27T10:47:43 |
+-------+------+--------+---------------------+
drop table demo;
diff --git a/tests/cases/standalone/common/copy/copy_from_fs_csv.sql b/tests/cases/standalone/common/copy/copy_from_fs_csv.sql
index b38c566e2a24..cd6b91f4f816 100644
--- a/tests/cases/standalone/common/copy/copy_from_fs_csv.sql
+++ b/tests/cases/standalone/common/copy/copy_from_fs_csv.sql
@@ -1,24 +1,29 @@
CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
-insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+insert into
+ demo(host, cpu, memory, ts)
+values
+ ('host1', 66.6, 1024, 1655276557000),
+ ('host2', 88.8, 333.3, 1655276558000),
+ ('host3', 99.9, 444.4, 1722077263000);
Copy demo TO '/tmp/demo/export/csv/demo.csv' with (format='csv');
CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
-Copy with_filename FROM '/tmp/demo/export/csv/demo.csv' with (format='csv');
+Copy with_filename FROM '/tmp/demo/export/csv/demo.csv' with (format='csv', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:39');
select * from with_filename order by ts;
CREATE TABLE with_path(host string, cpu double, memory double, ts timestamp time index);
-Copy with_path FROM '/tmp/demo/export/csv/' with (format='csv');
+Copy with_path FROM '/tmp/demo/export/csv/' with (format='csv', start_time='2023-06-15 07:02:37');
select * from with_path order by ts;
CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp time index);
-Copy with_pattern FROM '/tmp/demo/export/csv/' WITH (pattern = 'demo.*',format='csv');
+Copy with_pattern FROM '/tmp/demo/export/csv/' WITH (pattern = 'demo.*', format='csv', end_time='2025-06-15 07:02:39');
select * from with_pattern order by ts;
diff --git a/tests/cases/standalone/common/copy/copy_from_fs_json.result b/tests/cases/standalone/common/copy/copy_from_fs_json.result
index 3f8826e251f4..bd71b5d624f1 100644
--- a/tests/cases/standalone/common/copy/copy_from_fs_json.result
+++ b/tests/cases/standalone/common/copy/copy_from_fs_json.result
@@ -2,19 +2,24 @@ CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time inde
Affected Rows: 0
-insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+insert into
+ demo(host, cpu, memory, ts)
+values
+ ('host1', 66.6, 1024, 1655276557000),
+ ('host2', 88.8, 333.3, 1655276558000),
+ ('host3', 99.9, 444.4, 1722077263000);
-Affected Rows: 2
+Affected Rows: 3
Copy demo TO '/tmp/demo/export/json/demo.json' with (format='json');
-Affected Rows: 2
+Affected Rows: 3
CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
Affected Rows: 0
-Copy with_filename FROM '/tmp/demo/export/json/demo.json' with (format='json');
+Copy with_filename FROM '/tmp/demo/export/json/demo.json' with (format='json', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:39');
Affected Rows: 2
@@ -31,7 +36,7 @@ CREATE TABLE with_path(host string, cpu double, memory double, ts timestamp time
Affected Rows: 0
-Copy with_path FROM '/tmp/demo/export/json/' with (format='json');
+Copy with_path FROM '/tmp/demo/export/json/' with (format='json', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:39');
Affected Rows: 2
@@ -48,7 +53,7 @@ CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp t
Affected Rows: 0
-Copy with_pattern FROM '/tmp/demo/export/json/' WITH (pattern = 'demo.*',format='json');
+Copy with_pattern FROM '/tmp/demo/export/json/' WITH (pattern = 'demo.*',format='json', end_time='2022-06-15 07:02:39');
Affected Rows: 2
diff --git a/tests/cases/standalone/common/copy/copy_from_fs_json.sql b/tests/cases/standalone/common/copy/copy_from_fs_json.sql
index e1f751bd836e..c182bb82dcb8 100644
--- a/tests/cases/standalone/common/copy/copy_from_fs_json.sql
+++ b/tests/cases/standalone/common/copy/copy_from_fs_json.sql
@@ -1,24 +1,29 @@
CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
-insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+insert into
+ demo(host, cpu, memory, ts)
+values
+ ('host1', 66.6, 1024, 1655276557000),
+ ('host2', 88.8, 333.3, 1655276558000),
+ ('host3', 99.9, 444.4, 1722077263000);
Copy demo TO '/tmp/demo/export/json/demo.json' with (format='json');
CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
-Copy with_filename FROM '/tmp/demo/export/json/demo.json' with (format='json');
+Copy with_filename FROM '/tmp/demo/export/json/demo.json' with (format='json', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:39');
select * from with_filename order by ts;
CREATE TABLE with_path(host string, cpu double, memory double, ts timestamp time index);
-Copy with_path FROM '/tmp/demo/export/json/' with (format='json');
+Copy with_path FROM '/tmp/demo/export/json/' with (format='json', start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:39');
select * from with_path order by ts;
CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp time index);
-Copy with_pattern FROM '/tmp/demo/export/json/' WITH (pattern = 'demo.*',format='json');
+Copy with_pattern FROM '/tmp/demo/export/json/' WITH (pattern = 'demo.*',format='json', end_time='2022-06-15 07:02:39');
select * from with_pattern order by ts;
diff --git a/tests/cases/standalone/common/copy/copy_from_fs_parquet.result b/tests/cases/standalone/common/copy/copy_from_fs_parquet.result
index 54ec2f1af308..474475663226 100644
--- a/tests/cases/standalone/common/copy/copy_from_fs_parquet.result
+++ b/tests/cases/standalone/common/copy/copy_from_fs_parquet.result
@@ -2,31 +2,41 @@ CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time inde
Affected Rows: 0
-insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+CREATE TABLE demo_2(host string, cpu double, memory double, ts TIMESTAMP time index);
-Affected Rows: 2
+Affected Rows: 0
-Copy demo TO '/tmp/demo/export/parquet_files/demo.parquet';
+insert into
+ demo(host, cpu, memory, ts)
+values
+ ('host1', 66.6, 1024, 1655276557000),
+ ('host2', 88.8, 333.3, 1655276558000),
+ ('host3', 111.1, 444.4, 1722077263000);
-Affected Rows: 2
+Affected Rows: 3
-CREATE TABLE demo_2(host string, cpu double, memory double, ts TIMESTAMP time index);
+insert into
+ demo_2(host, cpu, memory, ts)
+values
+ ('host4', 77.7, 1111, 1655276555000),
+ ('host5', 99.9, 444.4, 1655276556000),
+ ('host6', 222.2, 555.5, 1722077264000);
-Affected Rows: 0
+Affected Rows: 3
-insert into demo_2(host, cpu, memory, ts) values ('host3', 77.7, 1111, 1655276555000), ('host4', 99.9, 444.4, 1655276556000);
+Copy demo TO '/tmp/demo/export/parquet_files/demo.parquet';
-Affected Rows: 2
+Affected Rows: 3
Copy demo_2 TO '/tmp/demo/export/parquet_files/demo_2.parquet';
-Affected Rows: 2
+Affected Rows: 3
CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
Affected Rows: 0
-Copy with_filename FROM '/tmp/demo/export/parquet_files/demo.parquet';
+Copy with_filename FROM '/tmp/demo/export/parquet_files/demo.parquet' with (start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:39');
Affected Rows: 2
@@ -45,37 +55,37 @@ Affected Rows: 0
Copy with_path FROM '/tmp/demo/export/parquet_files/';
-Affected Rows: 4
+Affected Rows: 6
select * from with_path order by ts;
-+-------+------+--------+---------------------+
-| host | cpu | memory | ts |
-+-------+------+--------+---------------------+
-| host3 | 77.7 | 1111.0 | 2022-06-15T07:02:35 |
-| host4 | 99.9 | 444.4 | 2022-06-15T07:02:36 |
-| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
-| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
-+-------+------+--------+---------------------+
++-------+-------+--------+---------------------+
+| host | cpu | memory | ts |
++-------+-------+--------+---------------------+
+| host4 | 77.7 | 1111.0 | 2022-06-15T07:02:35 |
+| host5 | 99.9 | 444.4 | 2022-06-15T07:02:36 |
+| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
+| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
+| host3 | 111.1 | 444.4 | 2024-07-27T10:47:43 |
+| host6 | 222.2 | 555.5 | 2024-07-27T10:47:44 |
++-------+-------+--------+---------------------+
CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp time index);
Affected Rows: 0
-Copy with_pattern FROM '/tmp/demo/export/parquet_files/' WITH (PATTERN = 'demo.*');
+Copy with_pattern FROM '/tmp/demo/export/parquet_files/' WITH (PATTERN = 'demo.*', start_time='2022-06-15 07:02:39');
-Affected Rows: 4
+Affected Rows: 2
select * from with_pattern order by ts;
-+-------+------+--------+---------------------+
-| host | cpu | memory | ts |
-+-------+------+--------+---------------------+
-| host3 | 77.7 | 1111.0 | 2022-06-15T07:02:35 |
-| host4 | 99.9 | 444.4 | 2022-06-15T07:02:36 |
-| host1 | 66.6 | 1024.0 | 2022-06-15T07:02:37 |
-| host2 | 88.8 | 333.3 | 2022-06-15T07:02:38 |
-+-------+------+--------+---------------------+
++-------+-------+--------+---------------------+
+| host | cpu | memory | ts |
++-------+-------+--------+---------------------+
+| host3 | 111.1 | 444.4 | 2024-07-27T10:47:43 |
+| host6 | 222.2 | 555.5 | 2024-07-27T10:47:44 |
++-------+-------+--------+---------------------+
CREATE TABLE without_limit_rows(host string, cpu double, memory double, ts timestamp time index);
@@ -83,30 +93,30 @@ Affected Rows: 0
Copy without_limit_rows FROM '/tmp/demo/export/parquet_files/';
-Affected Rows: 4
+Affected Rows: 6
select count(*) from without_limit_rows;
+----------+
| COUNT(*) |
+----------+
-| 4 |
+| 6 |
+----------+
CREATE TABLE with_limit_rows_segment(host string, cpu double, memory double, ts timestamp time index);
Affected Rows: 0
-Copy with_limit_rows_segment FROM '/tmp/demo/export/parquet_files/' LIMIT 2;
+Copy with_limit_rows_segment FROM '/tmp/demo/export/parquet_files/' LIMIT 3;
-Affected Rows: 2
+Affected Rows: 3
select count(*) from with_limit_rows_segment;
+----------+
| COUNT(*) |
+----------+
-| 2 |
+| 3 |
+----------+
Copy with_limit_rows_segment FROM '/tmp/demo/export/parquet_files/' LIMIT hello;
diff --git a/tests/cases/standalone/common/copy/copy_from_fs_parquet.sql b/tests/cases/standalone/common/copy/copy_from_fs_parquet.sql
index 83cdc4f74ceb..10319e128149 100644
--- a/tests/cases/standalone/common/copy/copy_from_fs_parquet.sql
+++ b/tests/cases/standalone/common/copy/copy_from_fs_parquet.sql
@@ -1,18 +1,28 @@
CREATE TABLE demo(host string, cpu double, memory double, ts TIMESTAMP time index);
-insert into demo(host, cpu, memory, ts) values ('host1', 66.6, 1024, 1655276557000), ('host2', 88.8, 333.3, 1655276558000);
+CREATE TABLE demo_2(host string, cpu double, memory double, ts TIMESTAMP time index);
-Copy demo TO '/tmp/demo/export/parquet_files/demo.parquet';
+insert into
+ demo(host, cpu, memory, ts)
+values
+ ('host1', 66.6, 1024, 1655276557000),
+ ('host2', 88.8, 333.3, 1655276558000),
+ ('host3', 111.1, 444.4, 1722077263000);
-CREATE TABLE demo_2(host string, cpu double, memory double, ts TIMESTAMP time index);
+insert into
+ demo_2(host, cpu, memory, ts)
+values
+ ('host4', 77.7, 1111, 1655276555000),
+ ('host5', 99.9, 444.4, 1655276556000),
+ ('host6', 222.2, 555.5, 1722077264000);
-insert into demo_2(host, cpu, memory, ts) values ('host3', 77.7, 1111, 1655276555000), ('host4', 99.9, 444.4, 1655276556000);
+Copy demo TO '/tmp/demo/export/parquet_files/demo.parquet';
Copy demo_2 TO '/tmp/demo/export/parquet_files/demo_2.parquet';
CREATE TABLE with_filename(host string, cpu double, memory double, ts timestamp time index);
-Copy with_filename FROM '/tmp/demo/export/parquet_files/demo.parquet';
+Copy with_filename FROM '/tmp/demo/export/parquet_files/demo.parquet' with (start_time='2022-06-15 07:02:37', end_time='2022-06-15 07:02:39');
select * from with_filename order by ts;
@@ -24,7 +34,7 @@ select * from with_path order by ts;
CREATE TABLE with_pattern(host string, cpu double, memory double, ts timestamp time index);
-Copy with_pattern FROM '/tmp/demo/export/parquet_files/' WITH (PATTERN = 'demo.*');
+Copy with_pattern FROM '/tmp/demo/export/parquet_files/' WITH (PATTERN = 'demo.*', start_time='2022-06-15 07:02:39');
select * from with_pattern order by ts;
@@ -36,7 +46,7 @@ select count(*) from without_limit_rows;
CREATE TABLE with_limit_rows_segment(host string, cpu double, memory double, ts timestamp time index);
-Copy with_limit_rows_segment FROM '/tmp/demo/export/parquet_files/' LIMIT 2;
+Copy with_limit_rows_segment FROM '/tmp/demo/export/parquet_files/' LIMIT 3;
select count(*) from with_limit_rows_segment;
|
feat
|
support setting time range in Copy From statement (#4405)
|
9f2d53c3df4575f361e666dc3b9858beb6718e38
|
2024-07-12 13:47:18
|
irenjj
|
refactor: Remove the StandaloneKafkaConfig struct (#4253)
| false
|
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index cf025ccf4e2e..47e503c1874c 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -40,7 +40,7 @@ use common_telemetry::info;
use common_telemetry::logging::{LoggingOptions, TracingOptions};
use common_time::timezone::set_default_timezone;
use common_version::{short_version, version};
-use common_wal::config::StandaloneWalConfig;
+use common_wal::config::DatanodeWalConfig;
use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
use datanode::datanode::{Datanode, DatanodeBuilder};
use file_engine::config::EngineConfig as FileEngineConfig;
@@ -130,7 +130,7 @@ pub struct StandaloneOptions {
pub opentsdb: OpentsdbOptions,
pub influxdb: InfluxdbOptions,
pub prom_store: PromStoreOptions,
- pub wal: StandaloneWalConfig,
+ pub wal: DatanodeWalConfig,
pub storage: StorageConfig,
pub metadata_store: KvBackendConfig,
pub procedure: ProcedureConfig,
@@ -155,7 +155,7 @@ impl Default for StandaloneOptions {
opentsdb: OpentsdbOptions::default(),
influxdb: InfluxdbOptions::default(),
prom_store: PromStoreOptions::default(),
- wal: StandaloneWalConfig::default(),
+ wal: DatanodeWalConfig::default(),
storage: StorageConfig::default(),
metadata_store: KvBackendConfig::default(),
procedure: ProcedureConfig::default(),
@@ -204,7 +204,7 @@ impl StandaloneOptions {
DatanodeOptions {
node_id: Some(0),
enable_telemetry: cloned_opts.enable_telemetry,
- wal: cloned_opts.wal.into(),
+ wal: cloned_opts.wal,
storage: cloned_opts.storage,
region_engine: cloned_opts.region_engine,
grpc: cloned_opts.grpc,
diff --git a/src/cmd/tests/load_config_test.rs b/src/cmd/tests/load_config_test.rs
index cf1cf6bc5048..fff3f3e8c9a6 100644
--- a/src/cmd/tests/load_config_test.rs
+++ b/src/cmd/tests/load_config_test.rs
@@ -24,7 +24,7 @@ use common_grpc::channel_manager::{
use common_runtime::global::RuntimeOptions;
use common_telemetry::logging::LoggingOptions;
use common_wal::config::raft_engine::RaftEngineConfig;
-use common_wal::config::{DatanodeWalConfig, StandaloneWalConfig};
+use common_wal::config::DatanodeWalConfig;
use datanode::config::{DatanodeOptions, RegionEngineConfig, StorageConfig};
use file_engine::config::EngineConfig;
use frontend::frontend::FrontendOptions;
@@ -206,7 +206,7 @@ fn test_load_standalone_example_config() {
},
component: StandaloneOptions {
default_timezone: Some("UTC".to_string()),
- wal: StandaloneWalConfig::RaftEngine(RaftEngineConfig {
+ wal: DatanodeWalConfig::RaftEngine(RaftEngineConfig {
dir: Some("/tmp/greptimedb/wal".to_string()),
sync_period: Some(Duration::from_secs(10)),
..Default::default()
diff --git a/src/common/meta/src/wal_options_allocator.rs b/src/common/meta/src/wal_options_allocator.rs
index 09b03c5b7dca..5fb3db6e20eb 100644
--- a/src/common/meta/src/wal_options_allocator.rs
+++ b/src/common/meta/src/wal_options_allocator.rs
@@ -123,6 +123,7 @@ pub fn prepare_wal_options(
#[cfg(test)]
mod tests {
+ use common_wal::config::kafka::common::KafkaTopicConfig;
use common_wal::config::kafka::MetasrvKafkaConfig;
use common_wal::test_util::run_test_with_kafka_wal;
@@ -160,9 +161,13 @@ mod tests {
.collect::<Vec<_>>();
// Creates a topic manager.
- let config = MetasrvKafkaConfig {
+ let kafka_topic = KafkaTopicConfig {
replication_factor: broker_endpoints.len() as i16,
+ ..Default::default()
+ };
+ let config = MetasrvKafkaConfig {
broker_endpoints,
+ kafka_topic,
..Default::default()
};
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
diff --git a/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs b/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs
index fb0130d0dfc7..ec88e37cd14d 100644
--- a/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs
+++ b/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs
@@ -56,11 +56,11 @@ impl TopicManager {
/// Creates a new topic manager.
pub fn new(config: MetasrvKafkaConfig, kv_backend: KvBackendRef) -> Self {
// Topics should be created.
- let topics = (0..config.num_topics)
- .map(|topic_id| format!("{}_{topic_id}", config.topic_name_prefix))
+ let topics = (0..config.kafka_topic.num_topics)
+ .map(|topic_id| format!("{}_{topic_id}", config.kafka_topic.topic_name_prefix))
.collect::<Vec<_>>();
- let selector = match config.selector_type {
+ let selector = match config.kafka_topic.selector_type {
TopicSelectorType::RoundRobin => RoundRobinTopicSelector::with_shuffle(),
};
@@ -76,7 +76,7 @@ impl TopicManager {
/// The initializer first tries to restore persisted topics from the kv backend.
/// If not enough topics retrieved, the initializer will try to contact the Kafka cluster and request creating more topics.
pub async fn start(&self) -> Result<()> {
- let num_topics = self.config.num_topics;
+ let num_topics = self.config.kafka_topic.num_topics;
ensure!(num_topics > 0, InvalidNumTopicsSnafu { num_topics });
// Topics should be created.
@@ -185,9 +185,9 @@ impl TopicManager {
match client
.create_topic(
topic.clone(),
- self.config.num_partitions,
- self.config.replication_factor,
- self.config.create_topic_timeout.as_millis() as i32,
+ self.config.kafka_topic.num_partitions,
+ self.config.kafka_topic.replication_factor,
+ self.config.kafka_topic.create_topic_timeout.as_millis() as i32,
)
.await
{
@@ -242,6 +242,7 @@ impl TopicManager {
#[cfg(test)]
mod tests {
+ use common_wal::config::kafka::common::KafkaTopicConfig;
use common_wal::test_util::run_test_with_kafka_wal;
use super::*;
@@ -283,9 +284,13 @@ mod tests {
.collect::<Vec<_>>();
// Creates a topic manager.
- let config = MetasrvKafkaConfig {
+ let kafka_topic = KafkaTopicConfig {
replication_factor: broker_endpoints.len() as i16,
+ ..Default::default()
+ };
+ let config = MetasrvKafkaConfig {
broker_endpoints,
+ kafka_topic,
..Default::default()
};
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
diff --git a/src/common/wal/src/config.rs b/src/common/wal/src/config.rs
index 0b47c32ee21d..6edee1703c81 100644
--- a/src/common/wal/src/config.rs
+++ b/src/common/wal/src/config.rs
@@ -17,7 +17,7 @@ pub mod raft_engine;
use serde::{Deserialize, Serialize};
-use crate::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig, StandaloneKafkaConfig};
+use crate::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig};
use crate::config::raft_engine::RaftEngineConfig;
/// Wal configurations for metasrv.
@@ -43,80 +43,43 @@ impl Default for DatanodeWalConfig {
}
}
-/// Wal configurations for standalone.
-#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
-#[serde(tag = "provider", rename_all = "snake_case")]
-pub enum StandaloneWalConfig {
- RaftEngine(RaftEngineConfig),
- Kafka(StandaloneKafkaConfig),
-}
-
-impl Default for StandaloneWalConfig {
- fn default() -> Self {
- Self::RaftEngine(RaftEngineConfig::default())
- }
-}
-
-impl From<StandaloneWalConfig> for MetasrvWalConfig {
- fn from(config: StandaloneWalConfig) -> Self {
+impl From<DatanodeWalConfig> for MetasrvWalConfig {
+ fn from(config: DatanodeWalConfig) -> Self {
match config {
- StandaloneWalConfig::RaftEngine(_) => Self::RaftEngine,
- StandaloneWalConfig::Kafka(config) => Self::Kafka(MetasrvKafkaConfig {
+ DatanodeWalConfig::RaftEngine(_) => Self::RaftEngine,
+ DatanodeWalConfig::Kafka(config) => Self::Kafka(MetasrvKafkaConfig {
broker_endpoints: config.broker_endpoints,
- num_topics: config.num_topics,
- selector_type: config.selector_type,
- topic_name_prefix: config.topic_name_prefix,
- num_partitions: config.num_partitions,
- replication_factor: config.replication_factor,
- create_topic_timeout: config.create_topic_timeout,
backoff: config.backoff,
+ kafka_topic: config.kafka_topic,
}),
}
}
}
-impl From<MetasrvWalConfig> for StandaloneWalConfig {
+impl From<MetasrvWalConfig> for DatanodeWalConfig {
fn from(config: MetasrvWalConfig) -> Self {
match config {
MetasrvWalConfig::RaftEngine => Self::RaftEngine(RaftEngineConfig::default()),
- MetasrvWalConfig::Kafka(config) => Self::Kafka(StandaloneKafkaConfig {
+ MetasrvWalConfig::Kafka(config) => Self::Kafka(DatanodeKafkaConfig {
broker_endpoints: config.broker_endpoints,
- num_topics: config.num_topics,
- selector_type: config.selector_type,
- topic_name_prefix: config.topic_name_prefix,
- num_partitions: config.num_partitions,
- replication_factor: config.replication_factor,
- create_topic_timeout: config.create_topic_timeout,
backoff: config.backoff,
+ kafka_topic: config.kafka_topic,
..Default::default()
}),
}
}
}
-impl From<StandaloneWalConfig> for DatanodeWalConfig {
- fn from(config: StandaloneWalConfig) -> Self {
- match config {
- StandaloneWalConfig::RaftEngine(config) => Self::RaftEngine(config),
- StandaloneWalConfig::Kafka(config) => Self::Kafka(DatanodeKafkaConfig {
- broker_endpoints: config.broker_endpoints,
- max_batch_bytes: config.max_batch_bytes,
- consumer_wait_timeout: config.consumer_wait_timeout,
- backoff: config.backoff,
- }),
- }
- }
-}
-
#[cfg(test)]
mod tests {
use std::time::Duration;
use common_base::readable_size::ReadableSize;
+ use tests::kafka::common::KafkaTopicConfig;
use super::*;
use crate::config::kafka::common::BackoffConfig;
- use crate::config::{DatanodeKafkaConfig, MetasrvKafkaConfig, StandaloneKafkaConfig};
+ use crate::config::{DatanodeKafkaConfig, MetasrvKafkaConfig};
use crate::TopicSelectorType;
#[test]
@@ -168,11 +131,6 @@ mod tests {
let toml_str = r#"
provider = "kafka"
broker_endpoints = ["127.0.0.1:9092"]
- num_topics = 32
- selector_type = "round_robin"
- topic_name_prefix = "greptimedb_wal_topic"
- replication_factor = 1
- create_topic_timeout = "30s"
max_batch_bytes = "1MB"
linger = "200ms"
consumer_wait_timeout = "100ms"
@@ -180,24 +138,32 @@ mod tests {
backoff_max = "10s"
backoff_base = 2
backoff_deadline = "5mins"
+ num_topics = 32
+ num_partitions = 1
+ selector_type = "round_robin"
+ replication_factor = 1
+ create_topic_timeout = "30s"
+ topic_name_prefix = "greptimedb_wal_topic"
"#;
// Deserialized to MetasrvWalConfig.
let metasrv_wal_config: MetasrvWalConfig = toml::from_str(toml_str).unwrap();
let expected = MetasrvKafkaConfig {
broker_endpoints: vec!["127.0.0.1:9092".to_string()],
- num_topics: 32,
- selector_type: TopicSelectorType::RoundRobin,
- topic_name_prefix: "greptimedb_wal_topic".to_string(),
- num_partitions: 1,
- replication_factor: 1,
- create_topic_timeout: Duration::from_secs(30),
backoff: BackoffConfig {
init: Duration::from_millis(500),
max: Duration::from_secs(10),
base: 2,
deadline: Some(Duration::from_secs(60 * 5)),
},
+ kafka_topic: KafkaTopicConfig {
+ num_topics: 32,
+ selector_type: TopicSelectorType::RoundRobin,
+ topic_name_prefix: "greptimedb_wal_topic".to_string(),
+ num_partitions: 1,
+ replication_factor: 1,
+ create_topic_timeout: Duration::from_secs(30),
+ },
};
assert_eq!(metasrv_wal_config, MetasrvWalConfig::Kafka(expected));
@@ -213,28 +179,15 @@ mod tests {
base: 2,
deadline: Some(Duration::from_secs(60 * 5)),
},
- };
- assert_eq!(datanode_wal_config, DatanodeWalConfig::Kafka(expected));
-
- // Deserialized to StandaloneWalConfig.
- let standalone_wal_config: StandaloneWalConfig = toml::from_str(toml_str).unwrap();
- let expected = StandaloneKafkaConfig {
- broker_endpoints: vec!["127.0.0.1:9092".to_string()],
- num_topics: 32,
- selector_type: TopicSelectorType::RoundRobin,
- topic_name_prefix: "greptimedb_wal_topic".to_string(),
- num_partitions: 1,
- replication_factor: 1,
- create_topic_timeout: Duration::from_secs(30),
- max_batch_bytes: ReadableSize::mb(1),
- consumer_wait_timeout: Duration::from_millis(100),
- backoff: BackoffConfig {
- init: Duration::from_millis(500),
- max: Duration::from_secs(10),
- base: 2,
- deadline: Some(Duration::from_secs(60 * 5)),
+ kafka_topic: KafkaTopicConfig {
+ num_topics: 32,
+ selector_type: TopicSelectorType::RoundRobin,
+ topic_name_prefix: "greptimedb_wal_topic".to_string(),
+ num_partitions: 1,
+ replication_factor: 1,
+ create_topic_timeout: Duration::from_secs(30),
},
};
- assert_eq!(standalone_wal_config, StandaloneWalConfig::Kafka(expected));
+ assert_eq!(datanode_wal_config, DatanodeWalConfig::Kafka(expected));
}
}
diff --git a/src/common/wal/src/config/kafka.rs b/src/common/wal/src/config/kafka.rs
index f47e444521f2..27265d00987e 100644
--- a/src/common/wal/src/config/kafka.rs
+++ b/src/common/wal/src/config/kafka.rs
@@ -15,8 +15,6 @@
pub mod common;
pub mod datanode;
pub mod metasrv;
-pub mod standalone;
pub use datanode::DatanodeKafkaConfig;
pub use metasrv::MetasrvKafkaConfig;
-pub use standalone::StandaloneKafkaConfig;
diff --git a/src/common/wal/src/config/kafka/common.rs b/src/common/wal/src/config/kafka/common.rs
index ea708d96159c..e61823938546 100644
--- a/src/common/wal/src/config/kafka/common.rs
+++ b/src/common/wal/src/config/kafka/common.rs
@@ -17,6 +17,8 @@ use std::time::Duration;
use serde::{Deserialize, Serialize};
use serde_with::with_prefix;
+use crate::{TopicSelectorType, TOPIC_NAME_PREFIX};
+
with_prefix!(pub backoff_prefix "backoff_");
/// Backoff configurations for kafka clients.
@@ -46,3 +48,35 @@ impl Default for BackoffConfig {
}
}
}
+
+/// Topic configurations for kafka clients.
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+#[serde(default)]
+pub struct KafkaTopicConfig {
+ /// Number of topics to be created upon start.
+ pub num_topics: usize,
+ /// Number of partitions per topic.
+ pub num_partitions: i32,
+ /// The type of the topic selector with which to select a topic for a region.
+ pub selector_type: TopicSelectorType,
+ /// The replication factor of each topic.
+ pub replication_factor: i16,
+ /// The timeout of topic creation.
+ #[serde(with = "humantime_serde")]
+ pub create_topic_timeout: Duration,
+ /// Topic name prefix.
+ pub topic_name_prefix: String,
+}
+
+impl Default for KafkaTopicConfig {
+ fn default() -> Self {
+ Self {
+ num_topics: 64,
+ num_partitions: 1,
+ selector_type: TopicSelectorType::RoundRobin,
+ replication_factor: 1,
+ create_topic_timeout: Duration::from_secs(30),
+ topic_name_prefix: TOPIC_NAME_PREFIX.to_string(),
+ }
+ }
+}
diff --git a/src/common/wal/src/config/kafka/datanode.rs b/src/common/wal/src/config/kafka/datanode.rs
index ae97c1017cf5..b01e0635f637 100644
--- a/src/common/wal/src/config/kafka/datanode.rs
+++ b/src/common/wal/src/config/kafka/datanode.rs
@@ -17,7 +17,7 @@ use std::time::Duration;
use common_base::readable_size::ReadableSize;
use serde::{Deserialize, Serialize};
-use crate::config::kafka::common::{backoff_prefix, BackoffConfig};
+use crate::config::kafka::common::{backoff_prefix, BackoffConfig, KafkaTopicConfig};
use crate::BROKER_ENDPOINT;
/// Kafka wal configurations for datanode.
@@ -36,6 +36,9 @@ pub struct DatanodeKafkaConfig {
/// The backoff config.
#[serde(flatten, with = "backoff_prefix")]
pub backoff: BackoffConfig,
+ /// The kafka topic config.
+ #[serde(flatten)]
+ pub kafka_topic: KafkaTopicConfig,
}
impl Default for DatanodeKafkaConfig {
@@ -46,6 +49,7 @@ impl Default for DatanodeKafkaConfig {
max_batch_bytes: ReadableSize::mb(1),
consumer_wait_timeout: Duration::from_millis(100),
backoff: BackoffConfig::default(),
+ kafka_topic: KafkaTopicConfig::default(),
}
}
}
diff --git a/src/common/wal/src/config/kafka/metasrv.rs b/src/common/wal/src/config/kafka/metasrv.rs
index 99efe762fbc0..519992e17579 100644
--- a/src/common/wal/src/config/kafka/metasrv.rs
+++ b/src/common/wal/src/config/kafka/metasrv.rs
@@ -12,12 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::time::Duration;
-
use serde::{Deserialize, Serialize};
-use crate::config::kafka::common::{backoff_prefix, BackoffConfig};
-use crate::{TopicSelectorType, BROKER_ENDPOINT, TOPIC_NAME_PREFIX};
+use crate::config::kafka::common::{backoff_prefix, BackoffConfig, KafkaTopicConfig};
+use crate::BROKER_ENDPOINT;
/// Kafka wal configurations for metasrv.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
@@ -25,37 +23,21 @@ use crate::{TopicSelectorType, BROKER_ENDPOINT, TOPIC_NAME_PREFIX};
pub struct MetasrvKafkaConfig {
/// The broker endpoints of the Kafka cluster.
pub broker_endpoints: Vec<String>,
- /// The number of topics to be created upon start.
- pub num_topics: usize,
- /// The type of the topic selector with which to select a topic for a region.
- pub selector_type: TopicSelectorType,
- /// Topic name prefix.
- pub topic_name_prefix: String,
- /// The number of partitions per topic.
- pub num_partitions: i32,
- /// The replication factor of each topic.
- pub replication_factor: i16,
- /// The timeout of topic creation.
- #[serde(with = "humantime_serde")]
- pub create_topic_timeout: Duration,
/// The backoff config.
#[serde(flatten, with = "backoff_prefix")]
pub backoff: BackoffConfig,
+ /// The kafka config.
+ #[serde(flatten)]
+ pub kafka_topic: KafkaTopicConfig,
}
impl Default for MetasrvKafkaConfig {
fn default() -> Self {
let broker_endpoints = vec![BROKER_ENDPOINT.to_string()];
- let replication_factor = broker_endpoints.len() as i16;
Self {
broker_endpoints,
- num_topics: 64,
- selector_type: TopicSelectorType::RoundRobin,
- topic_name_prefix: TOPIC_NAME_PREFIX.to_string(),
- num_partitions: 1,
- replication_factor,
- create_topic_timeout: Duration::from_secs(30),
backoff: BackoffConfig::default(),
+ kafka_topic: KafkaTopicConfig::default(),
}
}
}
diff --git a/tests-integration/src/tests/test_util.rs b/tests-integration/src/tests/test_util.rs
index 7bb29ce3318e..491a93086953 100644
--- a/tests-integration/src/tests/test_util.rs
+++ b/tests-integration/src/tests/test_util.rs
@@ -21,6 +21,7 @@ use common_query::Output;
use common_recordbatch::util;
use common_telemetry::warn;
use common_test_util::find_workspace_path;
+use common_wal::config::kafka::common::KafkaTopicConfig;
use common_wal::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig};
use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
use frontend::instance::Instance;
@@ -231,8 +232,11 @@ pub(crate) async fn standalone_with_kafka_wal() -> Option<Box<dyn RebuildableMoc
}))
.with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
- topic_name_prefix: test_name.to_string(),
- num_topics: 3,
+ kafka_topic: KafkaTopicConfig {
+ topic_name_prefix: test_name.to_string(),
+ num_topics: 3,
+ ..Default::default()
+ },
..Default::default()
}));
let instance = TestContext::new(MockInstanceBuilder::Standalone(builder)).await;
@@ -261,8 +265,11 @@ pub(crate) async fn distributed_with_kafka_wal() -> Option<Box<dyn RebuildableMo
}))
.with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
- topic_name_prefix: test_name.to_string(),
- num_topics: 3,
+ kafka_topic: KafkaTopicConfig {
+ topic_name_prefix: test_name.to_string(),
+ num_topics: 3,
+ ..Default::default()
+ },
..Default::default()
}));
let instance = TestContext::new(MockInstanceBuilder::Distributed(builder)).await;
diff --git a/tests-integration/tests/region_migration.rs b/tests-integration/tests/region_migration.rs
index 24d77a834592..73316685c60d 100644
--- a/tests-integration/tests/region_migration.rs
+++ b/tests-integration/tests/region_migration.rs
@@ -23,6 +23,7 @@ use common_recordbatch::RecordBatches;
use common_telemetry::info;
use common_test_util::recordbatch::check_output_stream;
use common_test_util::temp_dir::create_temp_dir;
+use common_wal::config::kafka::common::KafkaTopicConfig;
use common_wal::config::kafka::{DatanodeKafkaConfig, MetasrvKafkaConfig};
use common_wal::config::{DatanodeWalConfig, MetasrvWalConfig};
use datatypes::prelude::ScalarVector;
@@ -118,8 +119,11 @@ pub async fn test_region_migration(store_type: StorageType, endpoints: Vec<Strin
}))
.with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
- num_topics: 3,
- topic_name_prefix: Uuid::new_v4().to_string(),
+ kafka_topic: KafkaTopicConfig {
+ num_topics: 3,
+ topic_name_prefix: Uuid::new_v4().to_string(),
+ ..Default::default()
+ },
..Default::default()
}))
.with_shared_home_dir(Arc::new(home_dir))
@@ -247,8 +251,11 @@ pub async fn test_metric_table_region_migration_by_sql(
}))
.with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
- num_topics: 3,
- topic_name_prefix: Uuid::new_v4().to_string(),
+ kafka_topic: KafkaTopicConfig {
+ num_topics: 3,
+ topic_name_prefix: Uuid::new_v4().to_string(),
+ ..Default::default()
+ },
..Default::default()
}))
.with_shared_home_dir(Arc::new(home_dir))
@@ -369,8 +376,11 @@ pub async fn test_region_migration_by_sql(store_type: StorageType, endpoints: Ve
}))
.with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
- num_topics: 3,
- topic_name_prefix: Uuid::new_v4().to_string(),
+ kafka_topic: KafkaTopicConfig {
+ num_topics: 3,
+ topic_name_prefix: Uuid::new_v4().to_string(),
+ ..Default::default()
+ },
..Default::default()
}))
.with_shared_home_dir(Arc::new(home_dir))
@@ -490,8 +500,11 @@ pub async fn test_region_migration_multiple_regions(
}))
.with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
- num_topics: 3,
- topic_name_prefix: Uuid::new_v4().to_string(),
+ kafka_topic: KafkaTopicConfig {
+ num_topics: 3,
+ topic_name_prefix: Uuid::new_v4().to_string(),
+ ..Default::default()
+ },
..Default::default()
}))
.with_shared_home_dir(Arc::new(home_dir))
@@ -626,8 +639,11 @@ pub async fn test_region_migration_all_regions(store_type: StorageType, endpoint
}))
.with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
- num_topics: 3,
- topic_name_prefix: Uuid::new_v4().to_string(),
+ kafka_topic: KafkaTopicConfig {
+ num_topics: 3,
+ topic_name_prefix: Uuid::new_v4().to_string(),
+ ..Default::default()
+ },
..Default::default()
}))
.with_shared_home_dir(Arc::new(home_dir))
@@ -757,8 +773,11 @@ pub async fn test_region_migration_incorrect_from_peer(
}))
.with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
- num_topics: 3,
- topic_name_prefix: Uuid::new_v4().to_string(),
+ kafka_topic: KafkaTopicConfig {
+ num_topics: 3,
+ topic_name_prefix: Uuid::new_v4().to_string(),
+ ..Default::default()
+ },
..Default::default()
}))
.with_shared_home_dir(Arc::new(home_dir))
@@ -831,8 +850,11 @@ pub async fn test_region_migration_incorrect_region_id(
}))
.with_metasrv_wal_config(MetasrvWalConfig::Kafka(MetasrvKafkaConfig {
broker_endpoints: endpoints,
- num_topics: 3,
- topic_name_prefix: Uuid::new_v4().to_string(),
+ kafka_topic: KafkaTopicConfig {
+ num_topics: 3,
+ topic_name_prefix: Uuid::new_v4().to_string(),
+ ..Default::default()
+ },
..Default::default()
}))
.with_shared_home_dir(Arc::new(home_dir))
|
refactor
|
Remove the StandaloneKafkaConfig struct (#4253)
|
934bc139677be572f816ee28e80708cf7ac85eab
|
2024-09-27 16:47:36
|
Lei, HUANG
|
feat(mito): limit compaction output file size (#4754)
| false
|
diff --git a/src/mito2/src/compaction.rs b/src/mito2/src/compaction.rs
index 0f33471b21ac..3cea492071c7 100644
--- a/src/mito2/src/compaction.rs
+++ b/src/mito2/src/compaction.rs
@@ -240,7 +240,11 @@ impl CompactionScheduler {
request: CompactionRequest,
options: compact_request::Options,
) -> Result<()> {
- let picker = new_picker(options.clone(), &request.current_version.options.compaction);
+ let picker = new_picker(
+ &options,
+ &request.current_version.options.compaction,
+ request.current_version.options.append_mode,
+ );
let region_id = request.region_id();
let CompactionRequest {
engine_config,
@@ -500,7 +504,7 @@ pub struct CompactionOutput {
pub inputs: Vec<FileHandle>,
/// Whether to remove deletion markers.
pub filter_deleted: bool,
- /// Compaction output time range.
+ /// Compaction output time range. Only windowed compaction specifies output time range.
pub output_time_range: Option<TimestampRange>,
}
diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs
index 9c8c0e02bd1e..12b9dd5fefb6 100644
--- a/src/mito2/src/compaction/compactor.rs
+++ b/src/mito2/src/compaction/compactor.rs
@@ -38,7 +38,7 @@ use crate::memtable::MemtableBuilderProvider;
use crate::read::Source;
use crate::region::opener::new_manifest_dir;
use crate::region::options::RegionOptions;
-use crate::region::version::{VersionBuilder, VersionControl, VersionRef};
+use crate::region::version::{VersionBuilder, VersionRef};
use crate::region::ManifestContext;
use crate::region::RegionState::Writable;
use crate::schedule::scheduler::LocalScheduler;
@@ -164,8 +164,7 @@ pub async fn open_compaction_region(
.compaction_time_window(manifest.compaction_time_window)
.options(req.region_options.clone())
.build();
- let version_control = Arc::new(VersionControl::new(version));
- version_control.current().version
+ Arc::new(version)
};
Ok(CompactionRegion {
@@ -395,8 +394,9 @@ impl Compactor for DefaultCompactor {
) -> Result<()> {
let picker_output = {
let picker_output = new_picker(
- compact_request_options,
+ &compact_request_options,
&compaction_region.region_options.compaction,
+ compaction_region.region_options.append_mode,
)
.pick(compaction_region);
diff --git a/src/mito2/src/compaction/picker.rs b/src/mito2/src/compaction/picker.rs
index 30c8d2844638..9397c2bf6470 100644
--- a/src/mito2/src/compaction/picker.rs
+++ b/src/mito2/src/compaction/picker.rs
@@ -119,10 +119,11 @@ impl PickerOutput {
/// Create a new picker based on the compaction request options and compaction options.
pub fn new_picker(
- compact_request_options: compact_request::Options,
+ compact_request_options: &compact_request::Options,
compaction_options: &CompactionOptions,
+ append_mode: bool,
) -> Arc<dyn Picker> {
- if let compact_request::Options::StrictWindow(window) = &compact_request_options {
+ if let compact_request::Options::StrictWindow(window) = compact_request_options {
let window = if window.window_seconds == 0 {
None
} else {
@@ -131,13 +132,15 @@ pub fn new_picker(
Arc::new(WindowedCompactionPicker::new(window)) as Arc<_>
} else {
match compaction_options {
- CompactionOptions::Twcs(twcs_opts) => Arc::new(TwcsPicker::new(
- twcs_opts.max_active_window_runs,
- twcs_opts.max_active_window_files,
- twcs_opts.max_inactive_window_runs,
- twcs_opts.max_inactive_window_files,
- twcs_opts.time_window_seconds(),
- )) as Arc<_>,
+ CompactionOptions::Twcs(twcs_opts) => Arc::new(TwcsPicker {
+ max_active_window_runs: twcs_opts.max_active_window_runs,
+ max_active_window_files: twcs_opts.max_active_window_files,
+ max_inactive_window_runs: twcs_opts.max_inactive_window_runs,
+ max_inactive_window_files: twcs_opts.max_inactive_window_files,
+ time_window_seconds: twcs_opts.time_window_seconds(),
+ max_output_file_size: twcs_opts.max_output_file_size.map(|r| r.as_bytes()),
+ append_mode,
+ }) as Arc<_>,
}
}
}
diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs
index 4bbad692f05f..c6d2a112aad4 100644
--- a/src/mito2/src/compaction/twcs.rs
+++ b/src/mito2/src/compaction/twcs.rs
@@ -35,30 +35,23 @@ const LEVEL_COMPACTED: Level = 1;
/// candidates.
#[derive(Debug)]
pub struct TwcsPicker {
- max_active_window_runs: usize,
- max_active_window_files: usize,
- max_inactive_window_runs: usize,
- max_inactive_window_files: usize,
- time_window_seconds: Option<i64>,
+ /// Max allowed sorted runs in active window.
+ pub max_active_window_runs: usize,
+ /// Max allowed files in active window.
+ pub max_active_window_files: usize,
+ /// Max allowed sorted runs in inactive windows.
+ pub max_inactive_window_runs: usize,
+ /// Max allowed files in inactive windows.
+ pub max_inactive_window_files: usize,
+ /// Compaction time window in seconds.
+ pub time_window_seconds: Option<i64>,
+ /// Max allowed compaction output file size.
+ pub max_output_file_size: Option<u64>,
+ /// Whether the target region is in append mode.
+ pub append_mode: bool,
}
impl TwcsPicker {
- pub fn new(
- max_active_window_runs: usize,
- max_active_window_files: usize,
- max_inactive_window_runs: usize,
- max_inactive_window_files: usize,
- time_window_seconds: Option<i64>,
- ) -> Self {
- Self {
- max_inactive_window_runs,
- max_active_window_runs,
- time_window_seconds,
- max_active_window_files,
- max_inactive_window_files,
- }
- }
-
/// Builds compaction output from files.
/// For active writing window, we allow for at most `max_active_window_runs` files to alleviate
/// fragmentation. For other windows, we allow at most 1 file at each window.
@@ -82,47 +75,114 @@ impl TwcsPicker {
)
};
- // we only remove deletion markers once no file in current window overlaps with any other window.
let found_runs = sorted_runs.len();
- let filter_deleted = !files.overlapping && (found_runs == 1 || max_runs == 1);
+ // We only remove deletion markers once no file in current window overlaps with any other window
+ // and region is not in append mode.
+ let filter_deleted =
+ !files.overlapping && (found_runs == 1 || max_runs == 1) && !self.append_mode;
- if found_runs > max_runs {
+ let inputs = if found_runs > max_runs {
let files_to_compact = reduce_runs(sorted_runs, max_runs);
- info!("Building compaction output, active window: {:?}, current window: {}, max runs: {}, found runs: {}, output size: {}, remove deletion markers: {}", active_window, *window,max_runs, found_runs, files_to_compact.len(), filter_deleted);
- for inputs in files_to_compact {
- output.push(CompactionOutput {
- output_file_id: FileId::random(),
- output_level: LEVEL_COMPACTED, // always compact to l1
- inputs,
- filter_deleted,
- output_time_range: None, // we do not enforce output time range in twcs compactions.
- });
- }
+ let files_to_compact_len = files_to_compact.len();
+ info!(
+ "Building compaction output, active window: {:?}, \
+ current window: {}, \
+ max runs: {}, \
+ found runs: {}, \
+ output size: {}, \
+ max output size: {:?}, \
+ remove deletion markers: {}",
+ active_window,
+ *window,
+ max_runs,
+ found_runs,
+ files_to_compact_len,
+ self.max_output_file_size,
+ filter_deleted
+ );
+ files_to_compact
} else if files.files.len() > max_files {
- debug!(
- "Enforcing max file num in window: {}, active: {:?}, max: {}, current: {}",
+ info!(
+ "Enforcing max file num in window: {}, active: {:?}, max: {}, current: {}, max output size: {:?}, filter delete: {}",
*window,
active_window,
max_files,
- files.files.len()
+ files.files.len(),
+ self.max_output_file_size,
+ filter_deleted,
);
// Files in window exceeds file num limit
- let to_merge = enforce_file_num(&files.files, max_files);
+ vec![enforce_file_num(&files.files, max_files)]
+ } else {
+ debug!("Skip building compaction output, active window: {:?}, current window: {}, max runs: {}, found runs: {}, ", active_window, *window, max_runs, found_runs);
+ continue;
+ };
+
+ let split_inputs = if !filter_deleted
+ && let Some(max_output_file_size) = self.max_output_file_size
+ {
+ let len_before_split = inputs.len();
+ let maybe_split = enforce_max_output_size(inputs, max_output_file_size);
+ if maybe_split.len() != len_before_split {
+ info!("Compaction output file size exceeds threshold {}, split compaction inputs to: {:?}", max_output_file_size, maybe_split);
+ }
+ maybe_split
+ } else {
+ inputs
+ };
+
+ for input in split_inputs {
+ debug_assert!(input.len() > 1);
output.push(CompactionOutput {
output_file_id: FileId::random(),
output_level: LEVEL_COMPACTED, // always compact to l1
- inputs: to_merge,
+ inputs: input,
filter_deleted,
- output_time_range: None,
+ output_time_range: None, // we do not enforce output time range in twcs compactions.
});
- } else {
- debug!("Skip building compaction output, active window: {:?}, current window: {}, max runs: {}, found runs: {}, ", active_window, *window, max_runs, found_runs);
}
}
output
}
}
+/// Limits the size of compaction output in a naive manner.
+/// todo(hl): we can find the output file size more precisely by checking the time range
+/// of each row group and adding the sizes of those non-overlapping row groups. But now
+/// we'd better not to expose the SST details in this level.
+fn enforce_max_output_size(
+ inputs: Vec<Vec<FileHandle>>,
+ max_output_file_size: u64,
+) -> Vec<Vec<FileHandle>> {
+ inputs
+ .into_iter()
+ .flat_map(|input| {
+ debug_assert!(input.len() > 1);
+ let estimated_output_size = input.iter().map(|f| f.size()).sum::<u64>();
+ if estimated_output_size < max_output_file_size {
+ // total file size does not exceed the threshold, just return the original input.
+ return vec![input];
+ }
+ let mut splits = vec![];
+ let mut new_input = vec![];
+ let mut new_input_size = 0;
+ for f in input {
+ if new_input_size + f.size() > max_output_file_size {
+ splits.push(std::mem::take(&mut new_input));
+ new_input_size = 0;
+ }
+ new_input_size += f.size();
+ new_input.push(f);
+ }
+ if !new_input.is_empty() {
+ splits.push(new_input);
+ }
+ splits
+ })
+ .filter(|p| p.len() > 1)
+ .collect()
+}
+
/// Merges consecutive files so that file num does not exceed `max_file_num`, and chooses
/// the solution with minimum overhead according to files sizes to be merged.
/// `enforce_file_num` only merges consecutive files so that it won't create overlapping outputs.
@@ -305,10 +365,12 @@ fn find_latest_window_in_seconds<'a>(
#[cfg(test)]
mod tests {
use std::collections::HashSet;
+ use std::sync::Arc;
use super::*;
use crate::compaction::test_util::{new_file_handle, new_file_handles};
- use crate::sst::file::Level;
+ use crate::sst::file::{FileMeta, Level};
+ use crate::test_util::NoopFilePurger;
#[test]
fn test_get_latest_window_in_seconds() {
@@ -525,8 +587,16 @@ mod tests {
let mut windows = assign_to_windows(self.input_files.iter(), self.window_size);
let active_window =
find_latest_window_in_seconds(self.input_files.iter(), self.window_size);
- let output = TwcsPicker::new(4, usize::MAX, 1, usize::MAX, None)
- .build_output(&mut windows, active_window);
+ let output = TwcsPicker {
+ max_active_window_runs: 4,
+ max_active_window_files: usize::MAX,
+ max_inactive_window_runs: 1,
+ max_inactive_window_files: usize::MAX,
+ time_window_seconds: None,
+ max_output_file_size: None,
+ append_mode: false,
+ }
+ .build_output(&mut windows, active_window);
let output = output
.iter()
@@ -641,5 +711,43 @@ mod tests {
.check();
}
+ fn make_file_handles(inputs: &[(i64, i64, u64)]) -> Vec<FileHandle> {
+ inputs
+ .iter()
+ .map(|(start, end, size)| {
+ FileHandle::new(
+ FileMeta {
+ region_id: Default::default(),
+ file_id: Default::default(),
+ time_range: (
+ Timestamp::new_millisecond(*start),
+ Timestamp::new_millisecond(*end),
+ ),
+ level: 0,
+ file_size: *size,
+ available_indexes: Default::default(),
+ index_file_size: 0,
+ num_rows: 0,
+ num_row_groups: 0,
+ },
+ Arc::new(NoopFilePurger),
+ )
+ })
+ .collect()
+ }
+
+ #[test]
+ fn test_limit_output_size() {
+ let mut files = make_file_handles(&[(1, 1, 1)].repeat(6));
+ let runs = find_sorted_runs(&mut files);
+ assert_eq!(6, runs.len());
+ let files_to_merge = reduce_runs(runs, 2);
+
+ let enforced = enforce_max_output_size(files_to_merge, 2);
+ assert_eq!(2, enforced.len());
+ assert_eq!(2, enforced[0].len());
+ assert_eq!(2, enforced[1].len());
+ }
+
// TODO(hl): TTL tester that checks if get_expired_ssts function works as expected.
}
diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs
index cc866e550210..4abc5925b705 100644
--- a/src/mito2/src/region/options.rs
+++ b/src/mito2/src/region/options.rs
@@ -204,6 +204,8 @@ pub struct TwcsOptions {
/// Compaction time window defined when creating tables.
#[serde(with = "humantime_serde")]
pub time_window: Option<Duration>,
+ /// Compaction time window defined when creating tables.
+ pub max_output_file_size: Option<ReadableSize>,
/// Whether to use remote compaction.
#[serde_as(as = "DisplayFromStr")]
pub remote_compaction: bool,
@@ -236,6 +238,7 @@ impl Default for TwcsOptions {
max_inactive_window_runs: 1,
max_inactive_window_files: 1,
time_window: None,
+ max_output_file_size: None,
remote_compaction: false,
fallback_to_local: true,
}
@@ -597,6 +600,7 @@ mod tests {
("compaction.twcs.max_active_window_files", "11"),
("compaction.twcs.max_inactive_window_runs", "2"),
("compaction.twcs.max_inactive_window_files", "3"),
+ ("compaction.twcs.max_output_file_size", "1GB"),
("compaction.twcs.time_window", "2h"),
("compaction.type", "twcs"),
("compaction.twcs.remote_compaction", "false"),
@@ -624,6 +628,7 @@ mod tests {
max_inactive_window_runs: 2,
max_inactive_window_files: 3,
time_window: Some(Duration::from_secs(3600 * 2)),
+ max_output_file_size: Some(ReadableSize::gb(1)),
remote_compaction: false,
fallback_to_local: true,
}),
@@ -656,6 +661,7 @@ mod tests {
max_inactive_window_runs: 2,
max_inactive_window_files: usize::MAX,
time_window: Some(Duration::from_secs(3600 * 2)),
+ max_output_file_size: None,
remote_compaction: false,
fallback_to_local: true,
}),
@@ -693,6 +699,7 @@ mod tests {
"compaction.twcs.max_active_window_files": "11",
"compaction.twcs.max_inactive_window_runs": "2",
"compaction.twcs.max_inactive_window_files": "7",
+ "compaction.twcs.max_output_file_size": "7MB",
"compaction.twcs.time_window": "2h"
},
"storage": "S3",
@@ -722,6 +729,7 @@ mod tests {
max_inactive_window_runs: 2,
max_inactive_window_files: 7,
time_window: Some(Duration::from_secs(3600 * 2)),
+ max_output_file_size: Some(ReadableSize::mb(7)),
remote_compaction: false,
fallback_to_local: true,
}),
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index de81bd1a2144..8b7eb420d2d0 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -370,12 +370,7 @@ impl<'a> ParserContext<'a> {
.map(parse_option_string)
.collect::<Result<HashMap<String, String>>>()?;
for key in options.keys() {
- ensure!(
- validate_table_option(key),
- InvalidTableOptionSnafu {
- key: key.to_string()
- }
- );
+ ensure!(validate_table_option(key), InvalidTableOptionSnafu { key });
}
Ok(options.into())
}
diff --git a/src/store-api/src/mito_engine_options.rs b/src/store-api/src/mito_engine_options.rs
index e641a1d2fcda..0e0f3fdac790 100644
--- a/src/store-api/src/mito_engine_options.rs
+++ b/src/store-api/src/mito_engine_options.rs
@@ -33,6 +33,7 @@ pub fn is_mito_engine_option_key(key: &str) -> bool {
"compaction.twcs.max_active_window_files",
"compaction.twcs.max_inactive_window_runs",
"compaction.twcs.max_inactive_window_files",
+ "compaction.twcs.max_output_file_size",
"compaction.twcs.time_window",
"compaction.twcs.remote_compaction",
"compaction.twcs.fallback_to_local",
|
feat
|
limit compaction output file size (#4754)
|
fae331d2ba165c17e1e7f30db7a99a516a3a11c2
|
2022-11-14 08:24:35
|
Lei, Huang
|
feat: Move create table logic to frontend (#455)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index be2e1b092621..d4e548bfd180 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2099,6 +2099,7 @@ dependencies = [
"common-catalog",
"common-error",
"common-grpc",
+ "common-insert",
"common-query",
"common-recordbatch",
"common-runtime",
@@ -2110,6 +2111,7 @@ dependencies = [
"datanode",
"datatypes",
"futures",
+ "futures-util",
"itertools",
"meta-client",
"meta-srv",
diff --git a/README.md b/README.md
index 1c538ec76258..51df8540102f 100644
--- a/README.md
+++ b/README.md
@@ -30,30 +30,35 @@ docker build --network host -f docker/Dockerfile -t greptimedb .
## Usage
-### Start Datanode
+### Start in standalone mode
```
-// Start datanode with default options.
-cargo run -- datanode start
+// Start datanode and frontend with default options.
+cargo run -- --log-level=debug standalone start
OR
-// Start datanode with `http-addr` option.
-cargo run -- datanode start --http-addr=0.0.0.0:9999
+// Start with `http-addr` option.
+cargo run -- --log-level=debug standalone start --http-addr=0.0.0.0:9999
OR
+// Start with `mysql-addr` option.
+cargo run -- --log-level=debug standalone start --mysql-addr=0.0.0.0:9999
+
+OR
// Start datanode with `log-dir` and `log-level` options.
-cargo run -- --log-dir=logs --log-level=debug datanode start
+cargo run -- --log-dir=logs --log-level=debug standalone start --mysql-addr=0.0.0.0:4102
+
```
-Start datanode with config file:
+Start with config file:
```
-cargo run -- --log-dir=logs --log-level=debug datanode start -c ./config/datanode.example.toml
+cargo run -- --log-dir=logs --log-level=debug standalone start -c ./config/standalone.example.toml
```
-Start datanode by runing docker container:
+Start datanode by running docker container:
```
docker run -p 3000:3000 \
@@ -62,46 +67,24 @@ docker run -p 3000:3000 \
greptimedb
```
-### Start Frontend
-
-Frontend should connect to Datanode, so **Datanode must have been started** at first!
-
-```
-// Connects to local Datanode at its default GRPC port: 3001
-
-// Start Frontend with default options.
-cargo run -- frontend start
-
-OR
-
-// Start Frontend with `mysql-addr` option.
-cargo run -- frontend start --mysql-addr=0.0.0.0:9999
-
-OR
-
-// Start datanode with `log-dir` and `log-level` options.
-cargo run -- --log-dir=logs --log-level=debug frontend start
-```
-
-Start datanode with config file:
-
-```
-cargo run -- --log-dir=logs --log-level=debug frontend start -c ./config/frontend.example.toml
-```
-
### SQL Operations
1. Connecting DB by [mysql client](https://dev.mysql.com/downloads/mysql/):
```
- # The datanode listen on port 3306 by default.
- mysql -h 127.0.0.1 -P 3306
+ # The standalone instance listen on port 4002 by default.
+ mysql -h 127.0.0.1 -P 4002
```
+2. Create a database;
+```SQL
+CREATE DATABASE hello_greptime;
+```
+
2. Create table:
```SQL
- CREATE TABLE monitor (
+ CREATE TABLE hello_greptime.monitor (
host STRING,
ts TIMESTAMP,
cpu DOUBLE DEFAULT 0,
@@ -113,22 +96,22 @@ cargo run -- --log-dir=logs --log-level=debug frontend start -c ./config/fronten
3. Insert data:
```SQL
- INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host1', 66.6, 1024, 1660897955000);
- INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host2', 77.7, 2048, 1660897956000);
- INSERT INTO monitor(host, cpu, memory, ts) VALUES ('host3', 88.8, 4096, 1660897957000);
+ INSERT INTO hello_greptime.monitor(host, cpu, memory, ts) VALUES ('host1', 66.6, 1024, 1660897955000);
+ INSERT INTO hello_greptime.monitor(host, cpu, memory, ts) VALUES ('host2', 77.7, 2048, 1660897956000);
+ INSERT INTO hello_greptime.monitor(host, cpu, memory, ts) VALUES ('host3', 88.8, 4096, 1660897957000);
```
4. Query data:
```SQL
- mysql> SELECT * FROM monitor;
- +-------+---------------+------+--------+
- | host | ts | cpu | memory |
- +-------+---------------+------+--------+
- | host1 | 1660897955000 | 66.6 | 1024 |
- | host2 | 1660897956000 | 77.7 | 2048 |
- | host3 | 1660897957000 | 88.8 | 4096 |
- +-------+---------------+------+--------+
+ mysql> SELECT * FROM hello_greptime.monitor;
+ +-------+---------------------+------+--------+
+ | host | ts | cpu | memory |
+ +-------+---------------------+------+--------+
+ | host1 | 2022-08-19 08:32:35 | 66.6 | 1024 |
+ | host2 | 2022-08-19 08:32:36 | 77.7 | 2048 |
+ | host3 | 2022-08-19 08:32:37 | 88.8 | 4096 |
+ +-------+---------------------+------+--------+
3 rows in set (0.01 sec)
```
You can delete your data by removing `/tmp/greptimedb`.
diff --git a/benchmarks/src/bin/nyc-taxi.rs b/benchmarks/src/bin/nyc-taxi.rs
index 02b5a5a8e1ee..bde094183789 100644
--- a/benchmarks/src/bin/nyc-taxi.rs
+++ b/benchmarks/src/bin/nyc-taxi.rs
@@ -97,11 +97,13 @@ async fn write_data(
let row_count = record_batch.num_rows();
let insert_batch = convert_record_batch(record_batch).into();
let insert_expr = InsertExpr {
+ schema_name: "public".to_string(),
table_name: TABLE_NAME.to_string(),
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
values: vec![insert_batch],
})),
options: HashMap::default(),
+ region_number: 0,
};
let now = Instant::now();
db.insert(insert_expr).await.unwrap();
@@ -342,6 +344,8 @@ fn create_table_expr() -> CreateExpr {
primary_keys: vec!["VendorID".to_string()],
create_if_not_exists: false,
table_options: Default::default(),
+ region_ids: vec![0],
+ table_id: Some(0),
}
}
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
new file mode 100644
index 000000000000..eec26b1f59f7
--- /dev/null
+++ b/config/standalone.example.toml
@@ -0,0 +1,14 @@
+mode = 'standalone'
+datanode_rpc_addr = '127.0.0.1:3001'
+http_addr = '0.0.0.0:4000'
+
+[grpc_options]
+addr = '0.0.0.0:4001'
+runtime_size = 4
+
+[mysql_options]
+addr = '0.0.0.0:4003'
+runtime_size = 4
+
+[influxdb_options]
+enable = true
diff --git a/src/api/greptime/v1/admin.proto b/src/api/greptime/v1/admin.proto
index 5dcd021d2f98..03e058523e91 100644
--- a/src/api/greptime/v1/admin.proto
+++ b/src/api/greptime/v1/admin.proto
@@ -41,6 +41,8 @@ message CreateExpr {
repeated string primary_keys = 7;
bool create_if_not_exists = 8;
map<string, string> table_options = 9;
+ optional uint32 table_id = 10;
+ repeated uint32 region_ids = 11;
}
message AlterExpr {
@@ -48,12 +50,17 @@ message AlterExpr {
optional string schema_name = 2;
string table_name = 3;
oneof kind {
- AddColumn add_column = 4;
+ AddColumns add_columns = 4;
}
}
+message AddColumns {
+ repeated AddColumn add_columns = 1;
+}
+
message AddColumn {
ColumnDef column_def = 1;
+ bool is_key = 2;
}
message CreateDatabaseExpr {
diff --git a/src/api/greptime/v1/database.proto b/src/api/greptime/v1/database.proto
index 117571b333a3..cbefdf19da33 100644
--- a/src/api/greptime/v1/database.proto
+++ b/src/api/greptime/v1/database.proto
@@ -38,14 +38,15 @@ message PhysicalPlan {
}
message InsertExpr {
- string table_name = 1;
+ string schema_name = 1;
+ string table_name = 2;
message Values {
- repeated bytes values = 1;
+ repeated bytes values = 3;
}
oneof expr {
- Values values = 2;
+ Values values = 4;
// TODO(LFC): Remove field "sql" in InsertExpr.
// When Frontend instance received an insertion SQL (`insert into ...`), it's anticipated to parse the SQL and
@@ -54,10 +55,12 @@ message InsertExpr {
// Then why the "sql" field exists here? It's because the Frontend needs table schema to create the values to insert,
// which is currently not able to find anywhere. (Maybe the table schema is suppose to be fetched from Meta?)
// The "sql" field is meant to be removed in the future.
- string sql = 3;
+ string sql = 5;
}
- map<string, bytes> options = 4;
+ /// The region number of current insert request.
+ uint32 region_number = 6;
+ map<string, bytes> options = 7;
}
// TODO(jiachun)
diff --git a/src/api/src/lib.rs b/src/api/src/lib.rs
index d5e3d6188e98..ae2c2912fb8c 100644
--- a/src/api/src/lib.rs
+++ b/src/api/src/lib.rs
@@ -1,6 +1,7 @@
pub mod error;
pub mod helper;
pub mod prometheus;
+pub mod result;
pub mod serde;
pub mod v1;
diff --git a/src/datanode/src/server/grpc/handler.rs b/src/api/src/result.rs
similarity index 90%
rename from src/datanode/src/server/grpc/handler.rs
rename to src/api/src/result.rs
index c90c5f8b10fe..34b65470d702 100644
--- a/src/datanode/src/server/grpc/handler.rs
+++ b/src/api/src/result.rs
@@ -1,8 +1,9 @@
-use api::v1::{
+use common_error::prelude::ErrorExt;
+
+use crate::v1::{
admin_result, codec::SelectResult, object_result, AdminResult, MutateResult, ObjectResult,
ResultHeader, SelectResult as SelectResultRaw,
};
-use common_error::prelude::ErrorExt;
pub const PROTOCOL_VERSION: u32 = 1;
@@ -10,14 +11,14 @@ pub type Success = u32;
pub type Failure = u32;
#[derive(Default)]
-pub(crate) struct ObjectResultBuilder {
+pub struct ObjectResultBuilder {
version: u32,
code: u32,
err_msg: Option<String>,
result: Option<Body>,
}
-pub(crate) enum Body {
+pub enum Body {
Mutate((Success, Failure)),
Select(SelectResult),
}
@@ -80,7 +81,7 @@ impl ObjectResultBuilder {
}
}
-pub(crate) fn build_err_result(err: &impl ErrorExt) -> ObjectResult {
+pub fn build_err_result(err: &impl ErrorExt) -> ObjectResult {
ObjectResultBuilder::new()
.status_code(err.status_code() as u32)
.err_msg(err.to_string())
@@ -88,7 +89,7 @@ pub(crate) fn build_err_result(err: &impl ErrorExt) -> ObjectResult {
}
#[derive(Debug)]
-pub(crate) struct AdminResultBuilder {
+pub struct AdminResultBuilder {
version: u32,
code: u32,
err_msg: Option<String>,
@@ -144,11 +145,11 @@ impl Default for AdminResultBuilder {
#[cfg(test)]
mod tests {
- use api::v1::{object_result, MutateResult};
use common_error::status_code::StatusCode;
use super::*;
- use crate::error::UnsupportedExprSnafu;
+ use crate::error::UnknownColumnDataTypeSnafu;
+ use crate::v1::{object_result, MutateResult};
#[test]
fn test_object_result_builder() {
@@ -175,14 +176,13 @@ mod tests {
#[test]
fn test_build_err_result() {
- let err = UnsupportedExprSnafu { name: "select" }.build();
+ let err = UnknownColumnDataTypeSnafu { datatype: 1 }.build();
let err_result = build_err_result(&err);
let header = err_result.header.unwrap();
let result = err_result.result;
assert_eq!(PROTOCOL_VERSION, header.version);
- assert_eq!(StatusCode::Internal as u32, header.code);
- assert_eq!("Unsupported expr type: select", header.err_msg);
+ assert_eq!(StatusCode::InvalidArguments as u32, header.code);
assert!(result.is_none());
}
}
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index 072a59f41d66..ed2a8018a2bd 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -171,12 +171,6 @@ pub enum Error {
source: meta_client::error::Error,
},
- #[snafu(display("Failed to bump table id"))]
- BumpTableId { msg: String, backtrace: Backtrace },
-
- #[snafu(display("Failed to parse table id from metasrv, data: {:?}", data))]
- ParseTableId { data: String, backtrace: Backtrace },
-
#[snafu(display("Failed to deserialize partition rule from string: {:?}", data))]
DeserializePartitionRule {
data: String,
@@ -232,9 +226,6 @@ impl ErrorExt for Error {
Error::SystemCatalogTableScan { source } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(),
Error::InvalidTableSchema { source, .. } => source.status_code(),
- Error::BumpTableId { .. } | Error::ParseTableId { .. } => {
- StatusCode::StorageUnavailable
- }
Error::DeserializePartitionRule { .. } => StatusCode::Unexpected,
Error::InvalidSchemaInCatalog { .. } => StatusCode::Unexpected,
Error::Internal { source, .. } => source.status_code(),
diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs
index 57b68a91825e..466a4d4533b5 100644
--- a/src/catalog/src/lib.rs
+++ b/src/catalog/src/lib.rs
@@ -69,9 +69,6 @@ pub trait CatalogManager: CatalogList {
/// Starts a catalog manager.
async fn start(&self) -> Result<()>;
- /// Returns next available table id.
- async fn next_table_id(&self) -> Result<TableId>;
-
/// Registers a table given given catalog/schema to catalog manager,
/// returns table registered.
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize>;
diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs
index 8b057ee495d5..7d9887bedb01 100644
--- a/src/catalog/src/local/manager.rs
+++ b/src/catalog/src/local/manager.rs
@@ -16,6 +16,7 @@ use table::engine::{EngineContext, TableEngineRef};
use table::metadata::TableId;
use table::requests::OpenTableRequest;
use table::table::numbers::NumbersTable;
+use table::table::TableIdProvider;
use table::TableRef;
use crate::error::{
@@ -278,6 +279,13 @@ impl CatalogList for LocalCatalogManager {
}
}
+#[async_trait::async_trait]
+impl TableIdProvider for LocalCatalogManager {
+ async fn next_table_id(&self) -> table::Result<TableId> {
+ Ok(self.next_table_id.fetch_add(1, Ordering::Relaxed))
+ }
+}
+
#[async_trait::async_trait]
impl CatalogManager for LocalCatalogManager {
/// Start [LocalCatalogManager] to load all information from system catalog table.
@@ -286,11 +294,6 @@ impl CatalogManager for LocalCatalogManager {
self.init().await
}
- #[inline]
- async fn next_table_id(&self) -> Result<TableId> {
- Ok(self.next_table_id.fetch_add(1, Ordering::Relaxed))
- }
-
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
let started = self.init_lock.lock().await;
diff --git a/src/catalog/src/local/memory.rs b/src/catalog/src/local/memory.rs
index cfa6434cf39c..7e515a914789 100644
--- a/src/catalog/src/local/memory.rs
+++ b/src/catalog/src/local/memory.rs
@@ -8,6 +8,7 @@ use std::sync::RwLock;
use common_catalog::consts::MIN_USER_TABLE_ID;
use snafu::OptionExt;
use table::metadata::TableId;
+use table::table::TableIdProvider;
use table::TableRef;
use crate::error::{CatalogNotFoundSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu};
@@ -41,6 +42,13 @@ impl Default for MemoryCatalogManager {
}
}
+#[async_trait::async_trait]
+impl TableIdProvider for MemoryCatalogManager {
+ async fn next_table_id(&self) -> table::error::Result<TableId> {
+ Ok(self.table_id.fetch_add(1, Ordering::Relaxed))
+ }
+}
+
#[async_trait::async_trait]
impl CatalogManager for MemoryCatalogManager {
async fn start(&self) -> Result<()> {
@@ -48,10 +56,6 @@ impl CatalogManager for MemoryCatalogManager {
Ok(())
}
- async fn next_table_id(&self) -> Result<TableId> {
- Ok(self.table_id.fetch_add(1, Ordering::Relaxed))
- }
-
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
let catalogs = self.catalogs.write().unwrap();
let catalog = catalogs
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index 41f5993921dd..5537d0f66e88 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -2,18 +2,15 @@ use std::any::Any;
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::Arc;
-use std::time::Duration;
use arc_swap::ArcSwap;
use async_stream::stream;
-use backoff::exponential::ExponentialBackoffBuilder;
-use backoff::ExponentialBackoff;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use common_catalog::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
};
-use common_telemetry::{debug, error, info};
+use common_telemetry::{debug, info};
use futures::Stream;
use futures_util::StreamExt;
use snafu::{OptionExt, ResultExt};
@@ -25,8 +22,8 @@ use table::TableRef;
use tokio::sync::Mutex;
use crate::error::{
- BumpTableIdSnafu, CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu,
- OpenTableSnafu, ParseTableIdSnafu, SchemaNotFoundSnafu, TableExistsSnafu,
+ CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu,
+ SchemaNotFoundSnafu, TableExistsSnafu,
};
use crate::error::{InvalidTableSchemaSnafu, Result};
use crate::remote::{Kv, KvBackendRef};
@@ -378,63 +375,6 @@ impl CatalogManager for RemoteCatalogManager {
Ok(())
}
- /// Bump table id in a CAS manner with backoff.
- async fn next_table_id(&self) -> Result<TableId> {
- let key = common_catalog::consts::TABLE_ID_KEY_PREFIX.as_bytes();
- let op = || async {
- // TODO(hl): optimize this get
- let (prev, prev_bytes) = match self.backend.get(key).await? {
- None => (MIN_USER_TABLE_ID, vec![]),
- Some(kv) => (parse_table_id(&kv.1)?, kv.1),
- };
-
- match self
- .backend
- .compare_and_set(key, &prev_bytes, &(prev + 1).to_le_bytes())
- .await
- {
- Ok(cas_res) => match cas_res {
- Ok(_) => Ok(prev),
- Err(e) => {
- info!("Table id {:?} already occupied", e);
- Err(backoff::Error::transient(
- BumpTableIdSnafu {
- msg: "Table id occupied",
- }
- .build(),
- ))
- }
- },
- Err(e) => {
- error!(e;"Failed to CAS table id");
- Err(backoff::Error::permanent(
- BumpTableIdSnafu {
- msg: format!("Failed to perform CAS operation: {:?}", e),
- }
- .build(),
- ))
- }
- }
- };
-
- let retry_policy: ExponentialBackoff = ExponentialBackoffBuilder::new()
- .with_initial_interval(Duration::from_millis(4))
- .with_multiplier(2.0)
- .with_max_interval(Duration::from_millis(1000))
- .with_max_elapsed_time(Some(Duration::from_millis(3000)))
- .build();
-
- backoff::future::retry(retry_policy, op).await.map_err(|e| {
- BumpTableIdSnafu {
- msg: format!(
- "Bump table id exceeds max fail times, last error msg: {:?}",
- e
- ),
- }
- .build()
- })
- }
-
async fn register_table(&self, request: RegisterTableRequest) -> Result<usize> {
let catalog_name = request.catalog;
let schema_name = request.schema;
@@ -614,16 +554,6 @@ impl CatalogProvider for RemoteCatalogProvider {
}
}
-/// Parse u8 slice to `TableId`
-fn parse_table_id(val: &[u8]) -> Result<TableId> {
- Ok(TableId::from_le_bytes(val.try_into().map_err(|_| {
- ParseTableIdSnafu {
- data: format!("{:?}", val),
- }
- .build()
- })?))
-}
-
pub struct RemoteSchemaProvider {
catalog_name: String,
schema_name: String,
@@ -745,17 +675,3 @@ impl SchemaProvider for RemoteSchemaProvider {
Ok(self.tables.load().contains_key(name))
}
}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn test_parse_table_id() {
- assert_eq!(12, parse_table_id(&12_i32.to_le_bytes()).unwrap());
- let mut data = vec![];
- data.extend_from_slice(&12_i32.to_le_bytes());
- data.push(0);
- assert!(parse_table_id(&data).is_err());
- }
-}
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index f29740ef284c..5cbc5b37b14c 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -11,8 +11,8 @@ mod tests {
use catalog::remote::{
KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
};
- use catalog::{CatalogManager, CatalogManagerRef, RegisterTableRequest};
- use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
+ use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
+ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use datatypes::schema::Schema;
use futures_util::StreamExt;
@@ -61,7 +61,9 @@ mod tests {
);
}
- async fn prepare_components(node_id: u64) -> (KvBackendRef, TableEngineRef, CatalogManagerRef) {
+ async fn prepare_components(
+ node_id: u64,
+ ) -> (KvBackendRef, TableEngineRef, Arc<RemoteCatalogManager>) {
let backend = Arc::new(MockKvBackend::default()) as KvBackendRef;
let table_engine = Arc::new(MockTableEngine::default());
let catalog_manager =
@@ -277,19 +279,4 @@ mod tests {
new_catalog.schema_names().unwrap().into_iter().collect()
)
}
-
- #[tokio::test]
- async fn test_next_table_id() {
- let node_id = 42;
- let (_, _, catalog_manager) = prepare_components(node_id).await;
- assert_eq!(
- MIN_USER_TABLE_ID,
- catalog_manager.next_table_id().await.unwrap()
- );
-
- assert_eq!(
- MIN_USER_TABLE_ID + 1,
- catalog_manager.next_table_id().await.unwrap()
- );
- }
}
diff --git a/src/client/examples/insert.rs b/src/client/examples/insert.rs
index 43f625adfbed..13850ebc11ac 100644
--- a/src/client/examples/insert.rs
+++ b/src/client/examples/insert.rs
@@ -15,11 +15,13 @@ async fn run() {
let db = Database::new("greptime", client);
let expr = InsertExpr {
+ schema_name: "public".to_string(),
table_name: "demo".to_string(),
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
values: insert_batches(),
})),
options: HashMap::default(),
+ region_number: 0,
};
db.insert(expr).await.unwrap();
}
diff --git a/src/client/examples/logical.rs b/src/client/examples/logical.rs
index 44c967c08fb7..843cb0f8a8a3 100644
--- a/src/client/examples/logical.rs
+++ b/src/client/examples/logical.rs
@@ -49,6 +49,8 @@ async fn run() {
primary_keys: vec!["key".to_string()],
create_if_not_exists: false,
table_options: Default::default(),
+ table_id: Some(1024),
+ region_ids: vec![0],
};
let admin = Admin::new("create table", client.clone());
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index bff78ebda511..17e0908a483e 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -124,8 +124,6 @@ impl Database {
obj_result.try_into()
}
- // TODO(jiachun) update/delete
-
pub async fn object(&self, expr: ObjectExpr) -> Result<GrpcObjectResult> {
let res = self.objects(vec![expr]).await?.pop().unwrap();
Ok(res)
diff --git a/src/cmd/src/bin/greptime.rs b/src/cmd/src/bin/greptime.rs
index 75243a645f88..8615bcd6e45a 100644
--- a/src/cmd/src/bin/greptime.rs
+++ b/src/cmd/src/bin/greptime.rs
@@ -1,10 +1,10 @@
use std::fmt;
use clap::Parser;
-use cmd::datanode;
use cmd::error::Result;
use cmd::frontend;
use cmd::metasrv;
+use cmd::{datanode, standalone};
use common_telemetry::logging::error;
use common_telemetry::logging::info;
@@ -33,6 +33,8 @@ enum SubCommand {
Frontend(frontend::Command),
#[clap(name = "metasrv")]
Metasrv(metasrv::Command),
+ #[clap(name = "standalone")]
+ Standalone(standalone::Command),
}
impl SubCommand {
@@ -41,6 +43,7 @@ impl SubCommand {
SubCommand::Datanode(cmd) => cmd.run().await,
SubCommand::Frontend(cmd) => cmd.run().await,
SubCommand::Metasrv(cmd) => cmd.run().await,
+ SubCommand::Standalone(cmd) => cmd.run().await,
}
}
}
@@ -51,6 +54,7 @@ impl fmt::Display for SubCommand {
SubCommand::Datanode(..) => write!(f, "greptime-datanode"),
SubCommand::Frontend(..) => write!(f, "greptime-frontend"),
SubCommand::Metasrv(..) => write!(f, "greptime-metasrv"),
+ SubCommand::Standalone(..) => write!(f, "greptime-standalone"),
}
}
}
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index 9d65a565a95f..866783a8f568 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -11,6 +11,12 @@ pub enum Error {
source: datanode::error::Error,
},
+ #[snafu(display("Failed to build frontend, source: {}", source))]
+ BuildFrontend {
+ #[snafu(backtrace)]
+ source: frontend::error::Error,
+ },
+
#[snafu(display("Failed to start frontend, source: {}", source))]
StartFrontend {
#[snafu(backtrace)]
@@ -38,6 +44,9 @@ pub enum Error {
#[snafu(display("Missing config, msg: {}", msg))]
MissingConfig { msg: String, backtrace: Backtrace },
+
+ #[snafu(display("Illegal config: {}", msg))]
+ IllegalConfig { msg: String, backtrace: Backtrace },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -51,6 +60,8 @@ impl ErrorExt for Error {
Error::ReadConfig { .. } | Error::ParseConfig { .. } | Error::MissingConfig { .. } => {
StatusCode::InvalidArguments
}
+ Error::IllegalConfig { .. } => StatusCode::InvalidArguments,
+ Error::BuildFrontend { source, .. } => source.status_code(),
}
}
diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs
index 09ac5bbd3552..56e81f9cdd6a 100644
--- a/src/cmd/src/lib.rs
+++ b/src/cmd/src/lib.rs
@@ -2,4 +2,5 @@ pub mod datanode;
pub mod error;
pub mod frontend;
pub mod metasrv;
+pub mod standalone;
mod toml_loader;
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
new file mode 100644
index 000000000000..02823dfaded9
--- /dev/null
+++ b/src/cmd/src/standalone.rs
@@ -0,0 +1,192 @@
+use clap::Parser;
+use common_telemetry::info;
+use datanode::datanode::{Datanode, DatanodeOptions};
+use datanode::instance::InstanceRef;
+use frontend::frontend::{Frontend, FrontendOptions, Mode};
+use frontend::grpc::GrpcOptions;
+use frontend::influxdb::InfluxdbOptions;
+use frontend::instance::Instance as FeInstance;
+use frontend::mysql::MysqlOptions;
+use frontend::opentsdb::OpentsdbOptions;
+use frontend::postgres::PostgresOptions;
+use snafu::ResultExt;
+use tokio::try_join;
+
+use crate::error::{
+ BuildFrontendSnafu, Error, IllegalConfigSnafu, Result, StartDatanodeSnafu, StartFrontendSnafu,
+};
+use crate::toml_loader;
+
+#[derive(Parser)]
+pub struct Command {
+ #[clap(subcommand)]
+ subcmd: SubCommand,
+}
+
+impl Command {
+ pub async fn run(self) -> Result<()> {
+ self.subcmd.run().await
+ }
+}
+
+#[derive(Parser)]
+enum SubCommand {
+ Start(StartCommand),
+}
+
+impl SubCommand {
+ async fn run(self) -> Result<()> {
+ match self {
+ SubCommand::Start(cmd) => cmd.run().await,
+ }
+ }
+}
+
+#[derive(Debug, Parser)]
+struct StartCommand {
+ #[clap(long)]
+ http_addr: Option<String>,
+ #[clap(long)]
+ rpc_addr: Option<String>,
+ #[clap(long)]
+ mysql_addr: Option<String>,
+ #[clap(long)]
+ postgres_addr: Option<String>,
+ #[clap(long)]
+ opentsdb_addr: Option<String>,
+ #[clap(short, long)]
+ influxdb_enable: bool,
+ #[clap(short, long)]
+ config_file: Option<String>,
+}
+
+impl StartCommand {
+ async fn run(self) -> Result<()> {
+ let fe_opts = FrontendOptions::try_from(self)?;
+ let dn_opts = DatanodeOptions::default();
+
+ let mut datanode = Datanode::new(dn_opts.clone())
+ .await
+ .context(StartDatanodeSnafu)?;
+ let mut frontend = build_frontend(fe_opts, &dn_opts, datanode.get_instance()).await?;
+
+ try_join!(
+ async { datanode.start().await.context(StartDatanodeSnafu) },
+ async { frontend.start().await.context(StartFrontendSnafu) }
+ )?;
+
+ Ok(())
+ }
+}
+
+/// Build frontend instance in standalone mode
+async fn build_frontend(
+ fe_opts: FrontendOptions,
+ dn_opts: &DatanodeOptions,
+ datanode_instance: InstanceRef,
+) -> Result<Frontend<FeInstance>> {
+ let grpc_server_addr = &dn_opts.rpc_addr;
+ info!(
+ "Build frontend with datanode gRPC addr: {}",
+ grpc_server_addr
+ );
+ let mut frontend_instance = FeInstance::try_new(&fe_opts)
+ .await
+ .context(BuildFrontendSnafu)?;
+ frontend_instance.set_catalog_manager(datanode_instance.catalog_manager().clone());
+ Ok(Frontend::new(fe_opts, frontend_instance))
+}
+
+impl TryFrom<StartCommand> for FrontendOptions {
+ type Error = Error;
+
+ fn try_from(cmd: StartCommand) -> std::result::Result<Self, Self::Error> {
+ let mut opts: FrontendOptions = if let Some(path) = cmd.config_file {
+ toml_loader::from_file!(&path)?
+ } else {
+ FrontendOptions::default()
+ };
+
+ opts.mode = Mode::Standalone;
+
+ if let Some(addr) = cmd.http_addr {
+ opts.http_addr = Some(addr);
+ }
+ if let Some(addr) = cmd.rpc_addr {
+ // frontend grpc addr conflict with datanode default grpc addr
+ let datanode_grpc_addr = DatanodeOptions::default().rpc_addr;
+ if addr == datanode_grpc_addr {
+ return IllegalConfigSnafu {
+ msg: format!(
+ "gRPC listen address conflicts with datanode reserved gRPC addr: {}",
+ datanode_grpc_addr
+ ),
+ }
+ .fail();
+ }
+ opts.grpc_options = Some(GrpcOptions {
+ addr,
+ ..Default::default()
+ });
+ }
+
+ if let Some(addr) = cmd.mysql_addr {
+ opts.mysql_options = Some(MysqlOptions {
+ addr,
+ ..Default::default()
+ })
+ }
+ if let Some(addr) = cmd.postgres_addr {
+ opts.postgres_options = Some(PostgresOptions {
+ addr,
+ ..Default::default()
+ })
+ }
+
+ if let Some(addr) = cmd.opentsdb_addr {
+ opts.opentsdb_options = Some(OpentsdbOptions {
+ addr,
+ ..Default::default()
+ });
+ }
+
+ if cmd.influxdb_enable {
+ opts.influxdb_options = Some(InfluxdbOptions { enable: true });
+ }
+
+ Ok(opts)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_read_config_file() {
+ let cmd = StartCommand {
+ http_addr: None,
+ rpc_addr: None,
+ mysql_addr: None,
+ postgres_addr: None,
+ opentsdb_addr: None,
+ config_file: Some(format!(
+ "{}/../../config/standalone.example.toml",
+ std::env::current_dir().unwrap().as_path().to_str().unwrap()
+ )),
+ influxdb_enable: false,
+ };
+
+ let fe_opts = FrontendOptions::try_from(cmd).unwrap();
+ assert_eq!(Mode::Standalone, fe_opts.mode);
+ assert_eq!("127.0.0.1:3001".to_string(), fe_opts.datanode_rpc_addr);
+ assert_eq!(Some("0.0.0.0:4000".to_string()), fe_opts.http_addr);
+ assert_eq!(
+ "0.0.0.0:4001".to_string(),
+ fe_opts.grpc_options.unwrap().addr
+ );
+ assert_eq!("0.0.0.0:4003", fe_opts.mysql_options.as_ref().unwrap().addr);
+ assert_eq!(4, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
+ assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
+ }
+}
diff --git a/src/common/insert/src/insert.rs b/src/common/insert/src/insert.rs
index 132d95a000c0..18d0520f2a39 100644
--- a/src/common/insert/src/insert.rs
+++ b/src/common/insert/src/insert.rs
@@ -5,49 +5,46 @@ use std::{
sync::Arc,
};
-use api::{
- helper::ColumnDataTypeWrapper,
- v1::{
- codec::InsertBatch,
- column::{SemanticType, Values},
- Column,
- },
+use api::v1::{
+ codec::InsertBatch,
+ column::{SemanticType, Values},
+ AddColumns, Column,
};
+use api::v1::{AddColumn, ColumnDef, CreateExpr};
use common_base::BitVec;
use common_time::timestamp::Timestamp;
-use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
+use datatypes::schema::SchemaRef;
use datatypes::{data_type::ConcreteDataType, value::Value, vectors::VectorBuilder};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableId;
use table::{
- requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest, InsertRequest},
+ requests::{AddColumnRequest, AlterKind, AlterTableRequest, InsertRequest},
Table,
};
use crate::error::{
- ColumnDataTypeSnafu, ColumnNotFoundSnafu, CreateSchemaSnafu, DecodeInsertSnafu,
- DuplicatedTimestampColumnSnafu, IllegalInsertDataSnafu, MissingTimestampColumnSnafu, Result,
+ ColumnNotFoundSnafu, DecodeInsertSnafu, DuplicatedTimestampColumnSnafu, IllegalInsertDataSnafu,
+ MissingTimestampColumnSnafu, Result,
};
const TAG_SEMANTIC_TYPE: i32 = SemanticType::Tag as i32;
const TIMESTAMP_SEMANTIC_TYPE: i32 = SemanticType::Timestamp as i32;
#[inline]
-fn build_column_schema(column_name: &str, datatype: i32, nullable: bool) -> Result<ColumnSchema> {
- let datatype_wrapper = ColumnDataTypeWrapper::try_new(datatype).context(ColumnDataTypeSnafu)?;
-
- Ok(ColumnSchema::new(
- column_name,
- datatype_wrapper.into(),
- nullable,
- ))
+fn build_column_def(column_name: &str, datatype: i32, nullable: bool) -> ColumnDef {
+ ColumnDef {
+ name: column_name.to_string(),
+ datatype,
+ is_nullable: nullable,
+ default_constraint: None,
+ }
}
pub fn find_new_columns(
schema: &SchemaRef,
insert_batches: &[InsertBatch],
-) -> Result<Option<Vec<AddColumnRequest>>> {
- let mut requests = Vec::default();
+) -> Result<Option<AddColumns>> {
+ let mut columns_to_add = Vec::default();
let mut new_columns: HashSet<String> = HashSet::default();
for InsertBatch { columns, row_count } in insert_batches {
@@ -65,10 +62,9 @@ pub fn find_new_columns(
if schema.column_schema_by_name(column_name).is_none()
&& !new_columns.contains(column_name)
{
- let column_schema = build_column_schema(column_name, *datatype, true)?;
-
- requests.push(AddColumnRequest {
- column_schema,
+ let column_def = Some(build_column_def(column_name, *datatype, true));
+ columns_to_add.push(AddColumn {
+ column_def,
is_key: *semantic_type == TAG_SEMANTIC_TYPE,
});
new_columns.insert(column_name.to_string());
@@ -76,10 +72,12 @@ pub fn find_new_columns(
}
}
- if requests.is_empty() {
+ if columns_to_add.is_empty() {
Ok(None)
} else {
- Ok(Some(requests))
+ Ok(Some(AddColumns {
+ add_columns: columns_to_add,
+ }))
}
}
@@ -98,15 +96,15 @@ pub fn build_alter_table_request(
}
/// Try to build create table request from insert data.
-pub fn build_create_table_request(
+pub fn build_create_expr_from_insertion(
catalog_name: &str,
schema_name: &str,
- table_id: TableId,
+ table_id: Option<TableId>,
table_name: &str,
insert_batches: &[InsertBatch],
-) -> Result<CreateTableRequest> {
+) -> Result<CreateExpr> {
let mut new_columns: HashSet<String> = HashSet::default();
- let mut column_schemas = Vec::default();
+ let mut column_defs = Vec::default();
let mut primary_key_indices = Vec::default();
let mut timestamp_index = usize::MAX;
@@ -124,9 +122,8 @@ pub fn build_create_table_request(
{
if !new_columns.contains(column_name) {
let mut is_nullable = true;
- let mut is_time_index = false;
match *semantic_type {
- TAG_SEMANTIC_TYPE => primary_key_indices.push(column_schemas.len()),
+ TAG_SEMANTIC_TYPE => primary_key_indices.push(column_defs.len()),
TIMESTAMP_SEMANTIC_TYPE => {
ensure!(
timestamp_index == usize::MAX,
@@ -135,42 +132,42 @@ pub fn build_create_table_request(
duplicated: column_name,
}
);
- timestamp_index = column_schemas.len();
- is_time_index = true;
+ timestamp_index = column_defs.len();
// Timestamp column must not be null.
is_nullable = false;
}
_ => {}
}
- let column_schema = build_column_schema(column_name, *datatype, is_nullable)?
- .with_time_index(is_time_index);
- column_schemas.push(column_schema);
+ let column_def = build_column_def(column_name, *datatype, is_nullable);
+ column_defs.push(column_def);
new_columns.insert(column_name.to_string());
}
}
ensure!(timestamp_index != usize::MAX, MissingTimestampColumnSnafu);
+ let timestamp_field_name = columns[timestamp_index].column_name.clone();
- let schema = Arc::new(
- SchemaBuilder::try_from(column_schemas)
- .unwrap()
- .build()
- .context(CreateSchemaSnafu)?,
- );
+ let primary_keys = primary_key_indices
+ .iter()
+ .map(|idx| columns[*idx].column_name.clone())
+ .collect::<Vec<_>>();
- return Ok(CreateTableRequest {
- id: table_id,
- catalog_name: catalog_name.to_string(),
- schema_name: schema_name.to_string(),
+ let expr = CreateExpr {
+ catalog_name: Some(catalog_name.to_string()),
+ schema_name: Some(schema_name.to_string()),
table_name: table_name.to_string(),
- desc: None,
- schema,
+ desc: Some("Created on insertion".to_string()),
+ column_defs,
+ time_index: timestamp_field_name,
+ primary_keys,
create_if_not_exists: true,
- primary_key_indices,
- table_options: HashMap::new(),
- region_numbers: vec![0],
- });
+ table_options: Default::default(),
+ table_id,
+ region_ids: vec![0], // TODO:(hl): region id should be allocated by frontend
+ };
+
+ return Ok(expr);
}
IllegalInsertDataSnafu.fail()
@@ -233,7 +230,7 @@ pub fn insertion_expr_to_request(
}
#[inline]
-pub fn insert_batches(bytes_vec: Vec<Vec<u8>>) -> Result<Vec<InsertBatch>> {
+pub fn insert_batches(bytes_vec: &[Vec<u8>]) -> Result<Vec<InsertBatch>> {
bytes_vec
.iter()
.map(|bytes| bytes.deref().try_into().context(DecodeInsertSnafu))
@@ -365,6 +362,7 @@ mod tests {
use std::any::Any;
use std::sync::Arc;
+ use api::helper::ColumnDataTypeWrapper;
use api::v1::{
codec::InsertBatch,
column::{self, SemanticType, Values},
@@ -379,50 +377,114 @@ mod tests {
schema::{ColumnSchema, SchemaBuilder, SchemaRef},
value::Value,
};
+ use snafu::ResultExt;
use table::error::Result as TableResult;
use table::metadata::TableInfoRef;
use table::Table;
use super::{
- build_column_schema, build_create_table_request, convert_values, find_new_columns,
- insert_batches, insertion_expr_to_request, is_null, TAG_SEMANTIC_TYPE,
- TIMESTAMP_SEMANTIC_TYPE,
+ build_create_expr_from_insertion, convert_values, find_new_columns, insert_batches,
+ insertion_expr_to_request, is_null, TAG_SEMANTIC_TYPE, TIMESTAMP_SEMANTIC_TYPE,
};
+ use crate::error;
+ use crate::error::ColumnDataTypeSnafu;
+
+ #[inline]
+ fn build_column_schema(
+ column_name: &str,
+ datatype: i32,
+ nullable: bool,
+ ) -> error::Result<ColumnSchema> {
+ let datatype_wrapper =
+ ColumnDataTypeWrapper::try_new(datatype).context(ColumnDataTypeSnafu)?;
+
+ Ok(ColumnSchema::new(
+ column_name,
+ datatype_wrapper.into(),
+ nullable,
+ ))
+ }
#[test]
fn test_build_create_table_request() {
- let table_id = 10;
+ let table_id = Some(10);
let table_name = "test_metric";
- assert!(build_create_table_request("", "", table_id, table_name, &[]).is_err());
+ assert!(build_create_expr_from_insertion("", "", table_id, table_name, &[]).is_err());
+
+ let mock_batch_bytes = mock_insert_batches();
+ let insert_batches = insert_batches(&mock_batch_bytes).unwrap();
- let insert_batches = insert_batches(mock_insert_batches()).unwrap();
+ let create_expr =
+ build_create_expr_from_insertion("", "", table_id, table_name, &insert_batches)
+ .unwrap();
+
+ assert_eq!(table_id, create_expr.table_id);
+ assert_eq!(table_name, create_expr.table_name);
+ assert_eq!(Some("Created on insertion".to_string()), create_expr.desc);
+ assert_eq!(
+ vec![create_expr.column_defs[0].name.clone()],
+ create_expr.primary_keys
+ );
- let req =
- build_create_table_request("", "", table_id, table_name, &insert_batches).unwrap();
- assert_eq!(table_id, req.id);
- assert_eq!(table_name, req.table_name);
- assert!(req.desc.is_none());
- assert_eq!(vec![0], req.primary_key_indices);
+ let column_defs = create_expr.column_defs;
+ assert_eq!(column_defs[3].name, create_expr.time_index);
+ assert_eq!(4, column_defs.len());
- let schema = req.schema;
- assert_eq!(Some(3), schema.timestamp_index());
- assert_eq!(4, schema.num_columns());
assert_eq!(
ConcreteDataType::string_datatype(),
- schema.column_schema_by_name("host").unwrap().data_type
+ ConcreteDataType::from(
+ ColumnDataTypeWrapper::try_new(
+ column_defs
+ .iter()
+ .find(|c| c.name == "host")
+ .unwrap()
+ .datatype
+ )
+ .unwrap()
+ )
);
+
assert_eq!(
ConcreteDataType::float64_datatype(),
- schema.column_schema_by_name("cpu").unwrap().data_type
+ ConcreteDataType::from(
+ ColumnDataTypeWrapper::try_new(
+ column_defs
+ .iter()
+ .find(|c| c.name == "cpu")
+ .unwrap()
+ .datatype
+ )
+ .unwrap()
+ )
);
+
assert_eq!(
ConcreteDataType::float64_datatype(),
- schema.column_schema_by_name("memory").unwrap().data_type
+ ConcreteDataType::from(
+ ColumnDataTypeWrapper::try_new(
+ column_defs
+ .iter()
+ .find(|c| c.name == "memory")
+ .unwrap()
+ .datatype
+ )
+ .unwrap()
+ )
);
+
assert_eq!(
ConcreteDataType::timestamp_millis_datatype(),
- schema.column_schema_by_name("ts").unwrap().data_type
+ ConcreteDataType::from(
+ ColumnDataTypeWrapper::try_new(
+ column_defs
+ .iter()
+ .find(|c| c.name == "ts")
+ .unwrap()
+ .datatype
+ )
+ .unwrap()
+ )
);
}
@@ -440,22 +502,32 @@ mod tests {
assert!(find_new_columns(&schema, &[]).unwrap().is_none());
- let insert_batches = insert_batches(mock_insert_batches()).unwrap();
- let new_columns = find_new_columns(&schema, &insert_batches).unwrap().unwrap();
+ let mock_insert_bytes = mock_insert_batches();
+ let insert_batches = insert_batches(&mock_insert_bytes).unwrap();
+ let add_columns = find_new_columns(&schema, &insert_batches).unwrap().unwrap();
- assert_eq!(2, new_columns.len());
- let host_column = &new_columns[0];
+ assert_eq!(2, add_columns.add_columns.len());
+ let host_column = &add_columns.add_columns[0];
assert!(host_column.is_key);
+
assert_eq!(
ConcreteDataType::string_datatype(),
- host_column.column_schema.data_type
+ ConcreteDataType::from(
+ ColumnDataTypeWrapper::try_new(host_column.column_def.as_ref().unwrap().datatype)
+ .unwrap()
+ )
);
- let memory_column = &new_columns[1];
+
+ let memory_column = &add_columns.add_columns[1];
assert!(!memory_column.is_key);
+
assert_eq!(
ConcreteDataType::float64_datatype(),
- memory_column.column_schema.data_type
- )
+ ConcreteDataType::from(
+ ColumnDataTypeWrapper::try_new(memory_column.column_def.as_ref().unwrap().datatype)
+ .unwrap()
+ )
+ );
}
#[test]
@@ -465,7 +537,7 @@ mod tests {
let values = insert_expr::Values {
values: mock_insert_batches(),
};
- let insert_batches = insert_batches(values.values).unwrap();
+ let insert_batches = insert_batches(&values.values).unwrap();
let insert_req =
insertion_expr_to_request("greptime", "public", "demo", insert_batches, table).unwrap();
diff --git a/src/common/insert/src/lib.rs b/src/common/insert/src/lib.rs
index 2d36dfa33c5f..7ad7542e5402 100644
--- a/src/common/insert/src/lib.rs
+++ b/src/common/insert/src/lib.rs
@@ -1,6 +1,6 @@
pub mod error;
mod insert;
pub use insert::{
- build_alter_table_request, build_create_table_request, find_new_columns, insert_batches,
+ build_alter_table_request, build_create_expr_from_insertion, find_new_columns, insert_batches,
insertion_expr_to_request,
};
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 466a0102f1cd..83f4fbcbd20b 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -82,4 +82,8 @@ impl Datanode {
self.services.start(&self.opts).await?;
Ok(())
}
+
+ pub fn get_instance(&self) -> InstanceRef {
+ self.instance.clone()
+ }
}
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 2f7e7c40a946..97f5727720e7 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -284,16 +284,15 @@ pub enum Error {
#[snafu(display("Insert batch is empty"))]
EmptyInsertBatch,
- #[snafu(display("Failed to build frontend instance, source: {}", source))]
- BuildFrontend {
- #[snafu(backtrace)]
- source: frontend::error::Error,
- },
+ #[snafu(display(
+ "Table id provider not found, cannot execute SQL directly on datanode in distributed mode"
+ ))]
+ TableIdProviderNotFound { backtrace: Backtrace },
- #[snafu(display("Failed to start frontend instance, source: {}", source))]
- StartFrontend {
+ #[snafu(display("Failed to bump table id, source: {}", source))]
+ BumpTableId {
#[snafu(backtrace)]
- source: frontend::error::Error,
+ source: table::error::Error,
},
}
@@ -363,9 +362,8 @@ impl ErrorExt for Error {
Error::MetaClientInit { source, .. } => source.status_code(),
Error::InsertData { source, .. } => source.status_code(),
Error::EmptyInsertBatch => StatusCode::InvalidArguments,
- Error::BuildFrontend { source, .. } | Error::StartFrontend { source, .. } => {
- source.status_code()
- }
+ Error::TableIdProviderNotFound { .. } => StatusCode::Unsupported,
+ Error::BumpTableId { source, .. } => source.status_code(),
}
}
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index 5cbe41404d59..bc5fda55f62b 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -13,6 +13,7 @@ use object_store::{services::fs::Builder, util, ObjectStore};
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
use snafu::prelude::*;
use storage::{config::EngineConfig as StorageEngineConfig, EngineImpl};
+use table::table::TableIdProviderRef;
use table_engine::config::EngineConfig as TableEngineConfig;
use table_engine::engine::MitoEngine;
@@ -35,6 +36,7 @@ pub struct Instance {
pub(crate) catalog_manager: CatalogManagerRef,
pub(crate) physical_planner: PhysicalPlanner,
pub(crate) script_executor: ScriptExecutor,
+ pub(crate) table_id_provider: Option<TableIdProviderRef>,
#[allow(unused)]
pub(crate) meta_client: Option<Arc<MetaClient>>,
pub(crate) heartbeat_task: Option<HeartbeatTask>,
@@ -66,7 +68,7 @@ impl Instance {
));
// create remote catalog manager
- let (catalog_manager, factory) = match opts.mode {
+ let (catalog_manager, factory, table_id_provider) = match opts.mode {
Mode::Standalone => {
let catalog = Arc::new(
catalog::local::LocalCatalogManager::try_new(table_engine.clone())
@@ -74,7 +76,11 @@ impl Instance {
.context(CatalogSnafu)?,
);
let factory = QueryEngineFactory::new(catalog.clone());
- (catalog as CatalogManagerRef, factory)
+ (
+ catalog.clone() as CatalogManagerRef,
+ factory,
+ Some(catalog as TableIdProviderRef),
+ )
}
Mode::Distributed => {
@@ -86,7 +92,7 @@ impl Instance {
}),
));
let factory = QueryEngineFactory::new(catalog.clone());
- (catalog as CatalogManagerRef, factory)
+ (catalog as CatalogManagerRef, factory, None)
}
};
@@ -110,6 +116,7 @@ impl Instance {
script_executor,
meta_client,
heartbeat_task,
+ table_id_provider,
})
}
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index e44345aa9ec3..1dee0a0e27c3 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -1,9 +1,7 @@
-use std::ops::Deref;
-
-use api::v1::codec::RegionNumber;
+use api::result::{build_err_result, AdminResultBuilder, ObjectResultBuilder};
use api::v1::{
- admin_expr, codec::InsertBatch, insert_expr, object_expr, select_expr, AdminExpr, AdminResult,
- CreateDatabaseExpr, ObjectExpr, ObjectResult, SelectExpr,
+ admin_expr, insert_expr, object_expr, select_expr, AdminExpr, AdminResult, CreateDatabaseExpr,
+ ObjectExpr, ObjectResult, SelectExpr,
};
use async_trait::async_trait;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
@@ -11,12 +9,11 @@ use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_insert::insertion_expr_to_request;
use common_query::Output;
-use common_telemetry::logging::{debug, info};
use query::plan::LogicalPlan;
use servers::query_handler::{GrpcAdminHandler, GrpcQueryHandler};
use snafu::prelude::*;
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
-use table::requests::{AddColumnRequest, CreateDatabaseRequest};
+use table::requests::CreateDatabaseRequest;
use crate::error::{
CatalogNotFoundSnafu, CatalogSnafu, DecodeLogicalPlanSnafu, EmptyInsertBatchSnafu,
@@ -24,78 +21,10 @@ use crate::error::{
UnsupportedExprSnafu,
};
use crate::instance::Instance;
-use crate::server::grpc::handler::{build_err_result, AdminResultBuilder, ObjectResultBuilder};
use crate::server::grpc::plan::PhysicalPlanner;
use crate::server::grpc::select::to_object_result;
-use crate::sql::SqlRequest;
impl Instance {
- async fn add_new_columns_to_table(
- &self,
- table_name: &str,
- add_columns: Vec<AddColumnRequest>,
- ) -> Result<()> {
- let column_names = add_columns
- .iter()
- .map(|req| req.column_schema.name.clone())
- .collect::<Vec<_>>();
-
- let alter_request = common_insert::build_alter_table_request(table_name, add_columns);
-
- debug!(
- "Adding new columns: {:?} to table: {}",
- column_names, table_name
- );
-
- let _result = self
- .sql_handler()
- .execute(SqlRequest::Alter(alter_request))
- .await?;
-
- info!(
- "Added new columns: {:?} to table: {}",
- column_names, table_name
- );
- Ok(())
- }
-
- async fn create_table_by_insert_batches(
- &self,
- catalog_name: &str,
- schema_name: &str,
- table_name: &str,
- insert_batches: &[InsertBatch],
- ) -> Result<()> {
- // Create table automatically, build schema from data.
- let table_id = self
- .catalog_manager
- .next_table_id()
- .await
- .context(CatalogSnafu)?;
- let create_table_request = common_insert::build_create_table_request(
- catalog_name,
- schema_name,
- table_id,
- table_name,
- insert_batches,
- )
- .context(InsertDataSnafu)?;
-
- info!(
- "Try to create table: {} automatically with request: {:?}",
- table_name, create_table_request,
- );
-
- let _result = self
- .sql_handler()
- .execute(SqlRequest::CreateTable(create_table_request))
- .await?;
-
- info!("Success to create table: {} automatically", table_name);
-
- Ok(())
- }
-
pub async fn execute_grpc_insert(
&self,
catalog_name: &str,
@@ -113,34 +42,14 @@ impl Instance {
.context(SchemaNotFoundSnafu { name: schema_name })?;
let insert_batches =
- common_insert::insert_batches(values.values).context(InsertDataSnafu)?;
+ common_insert::insert_batches(&values.values).context(InsertDataSnafu)?;
ensure!(!insert_batches.is_empty(), EmptyInsertBatchSnafu);
- let table = if let Some(table) = schema_provider.table(table_name).context(CatalogSnafu)? {
- let schema = table.schema();
- if let Some(add_columns) = common_insert::find_new_columns(&schema, &insert_batches)
- .context(InsertDataSnafu)?
- {
- self.add_new_columns_to_table(table_name, add_columns)
- .await?;
- }
-
- table
- } else {
- self.create_table_by_insert_batches(
- catalog_name,
- schema_name,
- table_name,
- &insert_batches,
- )
- .await?;
-
- schema_provider
- .table(table_name)
- .context(CatalogSnafu)?
- .context(TableNotFoundSnafu { table_name })?
- };
+ let table = schema_provider
+ .table(table_name)
+ .context(CatalogSnafu)?
+ .context(TableNotFoundSnafu { table_name })?;
let insert = insertion_expr_to_request(
catalog_name,
@@ -253,16 +162,8 @@ impl GrpcQueryHandler for Instance {
reason: "missing `expr` in `InsertExpr`",
})?;
- // TODO(fys): _region_id is for later use.
- let _region_id: Option<RegionNumber> = insert_expr
- .options
- .get("region_id")
- .map(|id| {
- id.deref()
- .try_into()
- .context(servers::error::DecodeRegionNumberSnafu)
- })
- .transpose()?;
+ // TODO(fys): _region_number is for later use.
+ let _region_number: u32 = insert_expr.region_number;
match expr {
insert_expr::Expr::Values(values) => {
diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs
index 0e770bf37130..470a8134bcbb 100644
--- a/src/datanode/src/instance/sql.rs
+++ b/src/datanode/src/instance/sql.rs
@@ -11,7 +11,8 @@ use sql::statements::statement::Statement;
use table::requests::CreateDatabaseRequest;
use crate::error::{
- CatalogNotFoundSnafu, CatalogSnafu, ExecuteSqlSnafu, ParseSqlSnafu, Result, SchemaNotFoundSnafu,
+ BumpTableIdSnafu, CatalogNotFoundSnafu, CatalogSnafu, ExecuteSqlSnafu, ParseSqlSnafu, Result,
+ SchemaNotFoundSnafu, TableIdProviderNotFoundSnafu,
};
use crate::instance::Instance;
use crate::metric;
@@ -67,10 +68,12 @@ impl Instance {
Statement::CreateTable(c) => {
let table_id = self
- .catalog_manager
+ .table_id_provider
+ .as_ref()
+ .context(TableIdProviderNotFoundSnafu)?
.next_table_id()
.await
- .context(CatalogSnafu)?;
+ .context(BumpTableIdSnafu)?;
let _engine_name = c.engine.clone();
// TODO(hl): Select table engine by engine_name
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
index 7b2eb6ee369f..edef85601acf 100644
--- a/src/datanode/src/mock.rs
+++ b/src/datanode/src/mock.rs
@@ -1,11 +1,15 @@
+use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use catalog::remote::MetaKvBackend;
+use common_catalog::consts::MIN_USER_TABLE_ID;
use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_srv::mocks::MockInfo;
use query::QueryEngineFactory;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
+use table::metadata::TableId;
+use table::table::{TableIdProvider, TableIdProviderRef};
use table_engine::config::EngineConfig as TableEngineConfig;
use crate::datanode::DatanodeOptions;
@@ -53,6 +57,8 @@ impl Instance {
"127.0.0.1:3302".to_string(),
meta_client.as_ref().unwrap().clone(),
));
+
+ let table_id_provider = Some(catalog_manager.clone() as TableIdProviderRef);
Ok(Self {
query_engine,
sql_handler,
@@ -61,6 +67,7 @@ impl Instance {
script_executor,
meta_client,
heartbeat_task,
+ table_id_provider,
})
}
@@ -105,12 +112,32 @@ impl Instance {
catalog_manager,
physical_planner: PhysicalPlanner::new(query_engine),
script_executor,
+ table_id_provider: Some(Arc::new(LocalTableIdProvider::default())),
meta_client: Some(meta_client),
heartbeat_task: Some(heartbeat_task),
})
}
}
+struct LocalTableIdProvider {
+ inner: Arc<AtomicU32>,
+}
+
+impl Default for LocalTableIdProvider {
+ fn default() -> Self {
+ Self {
+ inner: Arc::new(AtomicU32::new(MIN_USER_TABLE_ID)),
+ }
+ }
+}
+
+#[async_trait::async_trait]
+impl TableIdProvider for LocalTableIdProvider {
+ async fn next_table_id(&self) -> table::Result<TableId> {
+ Ok(self.inner.fetch_add(1, Ordering::Relaxed))
+ }
+}
+
async fn mock_meta_client(mock_info: MockInfo, node_id: u64) -> MetaClient {
let MockInfo {
server_addr,
diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs
index 4ab6a7ff32d5..e15811c72871 100644
--- a/src/datanode/src/server.rs
+++ b/src/datanode/src/server.rs
@@ -1,28 +1,21 @@
-pub mod grpc;
-
+use std::default::Default;
use std::net::SocketAddr;
use std::sync::Arc;
-use common_error::prelude::BoxedError;
use common_runtime::Builder as RuntimeBuilder;
-use common_telemetry::info;
-use frontend::frontend::{Frontend, FrontendOptions, Mode};
-use frontend::instance::Instance as FrontendInstanceImpl;
use servers::grpc::GrpcServer;
use servers::server::Server;
use snafu::ResultExt;
-use tokio::try_join;
use crate::datanode::DatanodeOptions;
-use crate::error::{
- BuildFrontendSnafu, ParseAddrSnafu, Result, RuntimeResourceSnafu, StartServerSnafu,
-};
+use crate::error::{ParseAddrSnafu, Result, RuntimeResourceSnafu, StartServerSnafu};
use crate::instance::InstanceRef;
+pub mod grpc;
+
/// All rpc services.
pub struct Services {
grpc_server: GrpcServer,
- frontend: Option<Frontend<FrontendInstanceImpl>>,
}
impl Services {
@@ -35,54 +28,19 @@ impl Services {
.context(RuntimeResourceSnafu)?,
);
- let frontend = match opts.mode {
- Mode::Standalone => Some(Self::build_frontend(opts).await?),
- Mode::Distributed => {
- info!("Starting datanode in distributed mode, only gRPC server will be started.");
- None
- }
- };
Ok(Self {
- grpc_server: GrpcServer::new(instance.clone(), instance.clone(), grpc_runtime),
- frontend,
+ grpc_server: GrpcServer::new(instance.clone(), instance, grpc_runtime),
})
}
- /// Build frontend instance in standalone mode
- async fn build_frontend(opts: &DatanodeOptions) -> Result<Frontend<FrontendInstanceImpl>> {
- let grpc_server_addr = &opts.rpc_addr;
- info!(
- "Build frontend with datanode gRPC addr: {}",
- grpc_server_addr
- );
- let options = FrontendOptions {
- mode: Mode::Standalone,
- datanode_rpc_addr: grpc_server_addr.clone(),
- ..Default::default()
- };
- let frontend_instance = FrontendInstanceImpl::try_new(&options)
- .await
- .context(BuildFrontendSnafu)?;
- Ok(Frontend::new(options, frontend_instance))
- }
-
pub async fn start(&mut self, opts: &DatanodeOptions) -> Result<()> {
let grpc_addr: SocketAddr = opts.rpc_addr.parse().context(ParseAddrSnafu {
addr: &opts.rpc_addr,
})?;
-
- try_join!(self.grpc_server.start(grpc_addr), async {
- if let Some(ref mut frontend_instance) = self.frontend {
- info!("Starting frontend instance");
- frontend_instance
- .start()
- .await
- .map_err(BoxedError::new)
- .context(servers::error::StartFrontendSnafu)?;
- }
- Ok(())
- })
- .context(StartServerSnafu)?;
+ self.grpc_server
+ .start(grpc_addr)
+ .await
+ .context(StartServerSnafu)?;
Ok(())
}
}
diff --git a/src/datanode/src/server/grpc.rs b/src/datanode/src/server/grpc.rs
index 7805b0093fb2..13988d2c7d8a 100644
--- a/src/datanode/src/server/grpc.rs
+++ b/src/datanode/src/server/grpc.rs
@@ -1,4 +1,3 @@
mod ddl;
-pub(crate) mod handler;
pub(crate) mod plan;
pub mod select;
diff --git a/src/datanode/src/server/grpc/ddl.rs b/src/datanode/src/server/grpc/ddl.rs
index b67542498966..735874651e08 100644
--- a/src/datanode/src/server/grpc/ddl.rs
+++ b/src/datanode/src/server/grpc/ddl.rs
@@ -1,24 +1,70 @@
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
+use api::result::AdminResultBuilder;
use api::v1::{alter_expr::Kind, AdminResult, AlterExpr, ColumnDef, CreateExpr};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::{ErrorExt, StatusCode};
use common_query::Output;
+use common_telemetry::{error, info};
use datatypes::schema::ColumnDefaultConstraint;
use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
use futures::TryFutureExt;
use snafu::prelude::*;
+use table::metadata::TableId;
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest};
-use crate::error::{self, CatalogSnafu, ColumnDefaultConstraintSnafu, MissingFieldSnafu, Result};
+use crate::error::{
+ self, BumpTableIdSnafu, ColumnDefaultConstraintSnafu, MissingFieldSnafu, Result,
+};
use crate::instance::Instance;
-use crate::server::grpc::handler::AdminResultBuilder;
use crate::sql::SqlRequest;
impl Instance {
+ /// Handle gRPC create table requests.
pub(crate) async fn handle_create(&self, expr: CreateExpr) -> AdminResult {
- let request = self.create_expr_to_request(expr).await;
+ // Respect CreateExpr's table id and region ids if present, or allocate table id
+ // from local table id provider and set region id to 0.
+ let table_id = if let Some(table_id) = expr.table_id {
+ info!(
+ "Creating table {:?}.{:?}.{:?} with table id from frontend: {}",
+ expr.catalog_name, expr.schema_name, expr.table_name, table_id
+ );
+ table_id
+ } else {
+ match self.table_id_provider.as_ref() {
+ None => {
+ return AdminResultBuilder::default()
+ .status_code(StatusCode::Internal as u32)
+ .err_msg("Table id provider absent in standalone mode".to_string())
+ .build();
+ }
+ Some(table_id_provider) => {
+ match table_id_provider
+ .next_table_id()
+ .await
+ .context(BumpTableIdSnafu)
+ {
+ Ok(table_id) => {
+ info!(
+ "Creating table {:?}.{:?}.{:?} with table id from catalog manager: {}",
+ &expr.catalog_name, &expr.schema_name, expr.table_name, table_id
+ );
+ table_id
+ }
+ Err(e) => {
+ error!(e;"Failed to create table id when creating table: {:?}.{:?}.{:?}", &expr.catalog_name, &expr.schema_name, expr.table_name);
+ return AdminResultBuilder::default()
+ .status_code(e.status_code() as u32)
+ .err_msg(e.to_string())
+ .build();
+ }
+ }
+ }
+ }
+ };
+
+ let request = create_expr_to_request(table_id, expr).await;
let result = futures::future::ready(request)
.and_then(|request| self.sql_handler().execute(SqlRequest::CreateTable(request)))
.await;
@@ -37,7 +83,7 @@ impl Instance {
}
pub(crate) async fn handle_alter(&self, expr: AlterExpr) -> AdminResult {
- let request = match self.alter_expr_to_request(expr).transpose() {
+ let request = match alter_expr_to_request(expr).transpose() {
Some(req) => req,
None => {
return AdminResultBuilder::default()
@@ -62,77 +108,76 @@ impl Instance {
.build(),
}
}
+}
- async fn create_expr_to_request(&self, expr: CreateExpr) -> Result<CreateTableRequest> {
- let schema = create_table_schema(&expr)?;
-
- let primary_key_indices = expr
- .primary_keys
- .iter()
- .map(|key| {
- schema
- .column_index_by_name(key)
- .context(error::KeyColumnNotFoundSnafu { name: key })
- })
- .collect::<Result<Vec<usize>>>()?;
-
- let catalog_name = expr
- .catalog_name
- .unwrap_or_else(|| DEFAULT_CATALOG_NAME.to_string());
- let schema_name = expr
- .schema_name
- .unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string());
+async fn create_expr_to_request(table_id: TableId, expr: CreateExpr) -> Result<CreateTableRequest> {
+ let schema = create_table_schema(&expr)?;
+ let primary_key_indices = expr
+ .primary_keys
+ .iter()
+ .map(|key| {
+ schema
+ .column_index_by_name(key)
+ .context(error::KeyColumnNotFoundSnafu { name: key })
+ })
+ .collect::<Result<Vec<usize>>>()?;
- let table_id = self
- .catalog_manager()
- .next_table_id()
- .await
- .context(CatalogSnafu)?;
+ let catalog_name = expr
+ .catalog_name
+ .unwrap_or_else(|| DEFAULT_CATALOG_NAME.to_string());
+ let schema_name = expr
+ .schema_name
+ .unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string());
- let region_id = expr
- .table_options
- .get(&"region_id".to_string())
- .unwrap()
- .parse::<u32>()
- .unwrap();
+ let region_ids = if expr.region_ids.is_empty() {
+ vec![0]
+ } else {
+ expr.region_ids
+ };
- Ok(CreateTableRequest {
- id: table_id,
- catalog_name,
- schema_name,
- table_name: expr.table_name,
- desc: expr.desc,
- schema,
- region_numbers: vec![region_id],
- primary_key_indices,
- create_if_not_exists: expr.create_if_not_exists,
- table_options: expr.table_options,
- })
- }
+ Ok(CreateTableRequest {
+ id: table_id,
+ catalog_name,
+ schema_name,
+ table_name: expr.table_name,
+ desc: expr.desc,
+ schema,
+ region_numbers: region_ids,
+ primary_key_indices,
+ create_if_not_exists: expr.create_if_not_exists,
+ table_options: expr.table_options,
+ })
+}
- fn alter_expr_to_request(&self, expr: AlterExpr) -> Result<Option<AlterTableRequest>> {
- match expr.kind {
- Some(Kind::AddColumn(add_column)) => {
- let column_def = add_column.column_def.context(MissingFieldSnafu {
+fn alter_expr_to_request(expr: AlterExpr) -> Result<Option<AlterTableRequest>> {
+ match expr.kind {
+ Some(Kind::AddColumns(add_columns)) => {
+ let mut add_column_requests = vec![];
+ for add_column_expr in add_columns.add_columns {
+ let column_def = add_column_expr.column_def.context(MissingFieldSnafu {
field: "column_def",
})?;
- let alter_kind = AlterKind::AddColumns {
- columns: vec![AddColumnRequest {
- column_schema: create_column_schema(&column_def)?,
- // FIXME(dennis): supports adding key column
- is_key: false,
- }],
- };
- let request = AlterTableRequest {
- catalog_name: expr.catalog_name,
- schema_name: expr.schema_name,
- table_name: expr.table_name,
- alter_kind,
- };
- Ok(Some(request))
+
+ let schema = create_column_schema(&column_def)?;
+ add_column_requests.push(AddColumnRequest {
+ column_schema: schema,
+ is_key: add_column_expr.is_key,
+ })
}
- None => Ok(None),
+
+ let alter_kind = AlterKind::AddColumns {
+ columns: add_column_requests,
+ };
+
+ let request = AlterTableRequest {
+ catalog_name: expr.catalog_name,
+ schema_name: expr.schema_name,
+ table_name: expr.table_name,
+ alter_kind,
+ };
+ Ok(Some(request))
}
+ None => Ok(None),
}
}
@@ -191,8 +236,7 @@ fn create_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
#[cfg(test)]
mod tests {
- use std::collections::HashMap;
-
+ use common_catalog::consts::MIN_USER_TABLE_ID;
use datatypes::prelude::ConcreteDataType;
use datatypes::value::Value;
@@ -207,7 +251,7 @@ mod tests {
instance.start().await.unwrap();
let expr = testing_create_expr();
- let request = instance.create_expr_to_request(expr).await.unwrap();
+ let request = create_expr_to_request(1024, expr).await.unwrap();
assert_eq!(request.id, common_catalog::consts::MIN_USER_TABLE_ID);
assert_eq!(request.catalog_name, "greptime".to_string());
assert_eq!(request.schema_name, "public".to_string());
@@ -219,7 +263,7 @@ mod tests {
let mut expr = testing_create_expr();
expr.primary_keys = vec!["host".to_string(), "not-exist-column".to_string()];
- let result = instance.create_expr_to_request(expr).await;
+ let result = create_expr_to_request(1025, expr).await;
assert!(result.is_err());
assert!(result
.unwrap_err()
@@ -312,9 +356,6 @@ mod tests {
default_constraint: None,
},
];
- let table_options = [("region_id".to_string(), "0".to_string())]
- .into_iter()
- .collect::<HashMap<_, _>>();
CreateExpr {
catalog_name: None,
schema_name: None,
@@ -324,7 +365,9 @@ mod tests {
time_index: "ts".to_string(),
primary_keys: vec!["ts".to_string(), "host".to_string()],
create_if_not_exists: true,
- table_options,
+ table_options: Default::default(),
+ table_id: Some(MIN_USER_TABLE_ID),
+ region_ids: vec![0],
}
}
diff --git a/src/datanode/src/server/grpc/select.rs b/src/datanode/src/server/grpc/select.rs
index 5edf646adef6..5aa5f412e20c 100644
--- a/src/datanode/src/server/grpc/select.rs
+++ b/src/datanode/src/server/grpc/select.rs
@@ -1,6 +1,7 @@
use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
+use api::result::{build_err_result, ObjectResultBuilder};
use api::v1::{codec::SelectResult, column::SemanticType, column::Values, Column, ObjectResult};
use arrow::array::{Array, BooleanArray, PrimitiveArray};
use common_base::BitVec;
@@ -12,7 +13,6 @@ use datatypes::schema::SchemaRef;
use snafu::{OptionExt, ResultExt};
use crate::error::{self, ConversionSnafu, Result};
-use crate::server::grpc::handler::{build_err_result, ObjectResultBuilder};
pub async fn to_object_result(output: Result<Output>) -> ObjectResult {
let result = match output {
diff --git a/src/datanode/src/sql/create.rs b/src/datanode/src/sql/create.rs
index 99cb61951476..98a1bf16213d 100644
--- a/src/datanode/src/sql/create.rs
+++ b/src/datanode/src/sql/create.rs
@@ -5,6 +5,7 @@ use catalog::{RegisterSchemaRequest, RegisterTableRequest};
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_query::Output;
use common_telemetry::tracing::info;
+use common_telemetry::tracing::log::error;
use datatypes::schema::SchemaBuilder;
use snafu::{ensure, OptionExt, ResultExt};
use sql::ast::TableConstraint;
@@ -16,9 +17,9 @@ use table::metadata::TableId;
use table::requests::*;
use crate::error::{
- self, ConstraintNotSupportedSnafu, CreateSchemaSnafu, CreateTableSnafu,
- InsertSystemCatalogSnafu, InvalidPrimaryKeySnafu, KeyColumnNotFoundSnafu, RegisterSchemaSnafu,
- Result,
+ self, CatalogNotFoundSnafu, CatalogSnafu, ConstraintNotSupportedSnafu, CreateSchemaSnafu,
+ CreateTableSnafu, InsertSystemCatalogSnafu, InvalidPrimaryKeySnafu, KeyColumnNotFoundSnafu,
+ RegisterSchemaSnafu, Result, SchemaNotFoundSnafu,
};
use crate::sql::SqlHandler;
@@ -40,10 +41,36 @@ impl SqlHandler {
pub(crate) async fn create_table(&self, req: CreateTableRequest) -> Result<Output> {
let ctx = EngineContext {};
+ // first check if catalog and schema exist
+ let catalog = self
+ .catalog_manager
+ .catalog(&req.catalog_name)
+ .context(CatalogSnafu)?
+ .with_context(|| {
+ error!(
+ "Failed to create table {}.{}.{}, catalog not found",
+ &req.catalog_name, &req.schema_name, &req.table_name
+ );
+ CatalogNotFoundSnafu {
+ name: &req.catalog_name,
+ }
+ })?;
+ catalog
+ .schema(&req.schema_name)
+ .context(CatalogSnafu)?
+ .with_context(|| {
+ error!(
+ "Failed to create table {}.{}.{}, schema not found",
+ &req.catalog_name, &req.schema_name, &req.table_name
+ );
+ SchemaNotFoundSnafu {
+ name: &req.schema_name,
+ }
+ })?;
+
// determine catalog and schema from the very beginning
let table_name = req.table_name.clone();
let table_id = req.id;
-
let table = self
.table_engine
.create_table(&ctx, req)
diff --git a/src/datanode/src/tests/grpc_test.rs b/src/datanode/src/tests/grpc_test.rs
index 3f54f782359b..14e05050bdee 100644
--- a/src/datanode/src/tests/grpc_test.rs
+++ b/src/datanode/src/tests/grpc_test.rs
@@ -4,30 +4,37 @@ use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
-use api::v1::ColumnDataType;
use api::v1::{
admin_result, alter_expr::Kind, codec::InsertBatch, column, column::SemanticType, insert_expr,
AddColumn, AlterExpr, Column, ColumnDef, CreateExpr, InsertExpr, MutateResult,
};
+use api::v1::{AddColumns, ColumnDataType};
use client::admin::Admin;
use client::{Client, Database, ObjectResult};
+use common_catalog::consts::MIN_USER_TABLE_ID;
use common_runtime::Builder as RuntimeBuilder;
+use frontend::frontend::FrontendOptions;
+use frontend::frontend::Mode::Standalone;
+use frontend::grpc::GrpcOptions;
use servers::grpc::GrpcServer;
use servers::server::Server;
use crate::instance::Instance;
use crate::tests::test_util::{self, TestGuard};
-async fn setup_grpc_server(name: &str, port: usize) -> (String, TestGuard, Arc<GrpcServer>) {
+async fn setup_grpc_server(
+ name: &str,
+ port: usize,
+) -> (String, TestGuard, Arc<GrpcServer>, Arc<GrpcServer>) {
common_telemetry::init_default_ut_logging();
let (mut opts, guard) = test_util::create_tmp_dir_and_datanode_opts(name);
- let addr = format!("127.0.0.1:{}", port);
- opts.rpc_addr = addr.clone();
+ let datanode_grpc_addr = format!("127.0.0.1:{}", port);
+ opts.rpc_addr = datanode_grpc_addr.clone();
let instance = Arc::new(Instance::with_mock_meta_client(&opts).await.unwrap());
instance.start().await.unwrap();
- let addr_cloned = addr.clone();
+ let datanode_grpc_addr = datanode_grpc_addr.clone();
let runtime = Arc::new(
RuntimeBuilder::default()
.worker_threads(2)
@@ -36,30 +43,65 @@ async fn setup_grpc_server(name: &str, port: usize) -> (String, TestGuard, Arc<G
.unwrap(),
);
- let grpc_server = Arc::new(GrpcServer::new(instance.clone(), instance, runtime));
- let grpc_server_clone = grpc_server.clone();
+ let fe_grpc_addr = format!("127.0.0.1:{}", port + 1);
+ let fe_opts = FrontendOptions {
+ mode: Standalone,
+ datanode_rpc_addr: datanode_grpc_addr.clone(),
+ grpc_options: Some(GrpcOptions {
+ addr: fe_grpc_addr.clone(),
+ runtime_size: 8,
+ }),
+ ..Default::default()
+ };
+
+ let datanode_grpc_server = Arc::new(GrpcServer::new(
+ instance.clone(),
+ instance.clone(),
+ runtime.clone(),
+ ));
+
+ let mut fe_instance = frontend::instance::Instance::try_new(&fe_opts)
+ .await
+ .unwrap();
+ fe_instance.set_catalog_manager(instance.catalog_manager.clone());
+ let fe_instance_ref = Arc::new(fe_instance);
+ let fe_grpc_server = Arc::new(GrpcServer::new(
+ fe_instance_ref.clone(),
+ fe_instance_ref,
+ runtime,
+ ));
+ let grpc_server_clone = fe_grpc_server.clone();
+
+ let fe_grpc_addr_clone = fe_grpc_addr.clone();
tokio::spawn(async move {
- let addr = addr_cloned.parse::<SocketAddr>().unwrap();
+ let addr = fe_grpc_addr_clone.parse::<SocketAddr>().unwrap();
grpc_server_clone.start(addr).await.unwrap()
});
+ let dn_grpc_addr_clone = datanode_grpc_addr.clone();
+ let dn_grpc_server_clone = datanode_grpc_server.clone();
+ tokio::spawn(async move {
+ let addr = dn_grpc_addr_clone.parse::<SocketAddr>().unwrap();
+ dn_grpc_server_clone.start(addr).await.unwrap()
+ });
+
// wait for GRPC server to start
tokio::time::sleep(Duration::from_secs(1)).await;
- (addr, guard, grpc_server)
+ (fe_grpc_addr, guard, fe_grpc_server, datanode_grpc_server)
}
#[tokio::test(flavor = "multi_thread")]
async fn test_auto_create_table() {
- let (addr, _guard, grpc_server) = setup_grpc_server("auto_create_table", 3991).await;
+ let (addr, _guard, fe_grpc_server, dn_grpc_server) =
+ setup_grpc_server("auto_create_table", 3991).await;
let grpc_client = Client::with_urls(vec![addr]);
let db = Database::new("greptime", grpc_client);
-
insert_and_assert(&db).await;
-
- grpc_server.shutdown().await.unwrap();
+ let _ = fe_grpc_server.shutdown().await;
+ let _ = dn_grpc_server.shutdown().await;
}
fn expect_data() -> (Column, Column, Column, Column) {
@@ -120,7 +162,8 @@ fn expect_data() -> (Column, Column, Column, Column) {
#[ignore]
async fn test_insert_and_select() {
common_telemetry::init_default_ut_logging();
- let (addr, _guard, grpc_server) = setup_grpc_server("insert_and_select", 3990).await;
+ let (addr, _guard, fe_grpc_server, dn_grpc_server) =
+ setup_grpc_server("insert_and_select", 3990).await;
let grpc_client = Client::with_urls(vec![addr]);
@@ -145,8 +188,11 @@ async fn test_insert_and_select() {
is_nullable: true,
default_constraint: None,
};
- let kind = Kind::AddColumn(AddColumn {
- column_def: Some(add_column),
+ let kind = Kind::AddColumns(AddColumns {
+ add_columns: vec![AddColumn {
+ column_def: Some(add_column),
+ is_key: false,
+ }],
});
let expr = AlterExpr {
table_name: "test_table".to_string(),
@@ -160,7 +206,8 @@ async fn test_insert_and_select() {
// insert
insert_and_assert(&db).await;
- grpc_server.shutdown().await.unwrap();
+ let _ = fe_grpc_server.shutdown().await;
+ let _ = dn_grpc_server.shutdown().await;
}
async fn insert_and_assert(db: &Database) {
@@ -178,12 +225,14 @@ async fn insert_and_assert(db: &Database) {
}
.into()];
let expr = InsertExpr {
+ schema_name: "public".to_string(),
table_name: "demo".to_string(),
expr: Some(insert_expr::Expr::Values(insert_expr::Values { values })),
options: HashMap::default(),
+ region_number: 0,
};
let result = db.insert(expr).await;
- assert!(result.is_ok());
+ result.unwrap();
// select
let result = db
@@ -249,6 +298,8 @@ fn testing_create_expr() -> CreateExpr {
time_index: "ts".to_string(),
primary_keys: vec!["ts".to_string(), "host".to_string()],
create_if_not_exists: true,
- table_options: HashMap::from([("region_id".to_string(), "0".to_string())]),
+ table_options: Default::default(),
+ table_id: Some(MIN_USER_TABLE_ID),
+ region_ids: vec![0],
}
}
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index 268a686de41b..934d1fe0271a 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -4,6 +4,7 @@ use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, SchemaBuilder};
+use frontend::frontend::Mode;
use snafu::ResultExt;
use table::engine::EngineContext;
use table::engine::TableEngineRef;
@@ -32,6 +33,7 @@ pub fn create_tmp_dir_and_datanode_opts(name: &str) -> (DatanodeOptions, TestGua
storage: ObjectStoreConfig::File {
data_dir: data_tmp_dir.path().to_str().unwrap().to_string(),
},
+ mode: Mode::Standalone,
..Default::default()
};
(
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 57eaf38cbc39..998e9adea64d 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -18,12 +18,14 @@ common-catalog = { path = "../common/catalog" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
+common-insert = { path = "../common/insert" }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
datatypes = { path = "../datatypes" }
futures = "0.3"
+futures-util = "0.3"
itertools = "0.10"
meta-client = { path = "../meta-client" }
moka = { version = "0.9", features = ["future"] }
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index 4a8874428d4b..0a22365b951f 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -7,7 +7,8 @@ use catalog::error::{
};
use catalog::remote::{Kv, KvBackendRef};
use catalog::{
- CatalogList, CatalogProvider, CatalogProviderRef, SchemaProvider, SchemaProviderRef,
+ CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, RegisterSchemaRequest,
+ RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider, SchemaProviderRef,
};
use common_catalog::{CatalogKey, SchemaKey, TableGlobalKey, TableGlobalValue};
use futures::StreamExt;
@@ -41,6 +42,45 @@ impl FrontendCatalogManager {
}
}
+// FIXME(hl): Frontend only needs a CatalogList, should replace with trait upcasting
+// as soon as it's stable: https://github.com/rust-lang/rust/issues/65991
+#[async_trait::async_trait]
+impl CatalogManager for FrontendCatalogManager {
+ async fn start(&self) -> catalog::error::Result<()> {
+ Ok(())
+ }
+
+ async fn register_table(
+ &self,
+ _request: RegisterTableRequest,
+ ) -> catalog::error::Result<usize> {
+ unimplemented!()
+ }
+
+ async fn register_schema(
+ &self,
+ _request: RegisterSchemaRequest,
+ ) -> catalog::error::Result<usize> {
+ unimplemented!()
+ }
+
+ async fn register_system_table(
+ &self,
+ _request: RegisterSystemTableRequest,
+ ) -> catalog::error::Result<()> {
+ unimplemented!()
+ }
+
+ fn table(
+ &self,
+ _catalog: &str,
+ _schema: &str,
+ _table_name: &str,
+ ) -> catalog::error::Result<Option<TableRef>> {
+ unimplemented!()
+ }
+}
+
impl CatalogList for FrontendCatalogManager {
fn as_any(&self) -> &dyn Any {
self
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index 63b9a2055f31..e0faeb480687 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -121,7 +121,7 @@ pub enum Error {
backtrace: Backtrace,
},
- #[snafu(display("Invaild InsertRequest, reason: {}", reason))]
+ #[snafu(display("Invalid InsertRequest, reason: {}", reason))]
InvalidInsertRequest {
reason: String,
backtrace: Backtrace,
@@ -194,6 +194,66 @@ pub enum Error {
backtrace: Backtrace,
},
+ #[snafu(display("Failed to bump table id when creating table, source: {}", source))]
+ BumpTableId {
+ #[snafu(backtrace)]
+ source: table::error::Error,
+ },
+
+ #[snafu(display("Failed to create table, source: {}", source))]
+ CreateTable {
+ #[snafu(backtrace)]
+ source: client::Error,
+ },
+
+ #[snafu(display("Failed to alter table, source: {}", source))]
+ AlterTable {
+ #[snafu(backtrace)]
+ source: client::Error,
+ },
+
+ #[snafu(display("Failed to insert values to table, source: {}", source))]
+ Insert {
+ #[snafu(backtrace)]
+ source: client::Error,
+ },
+
+ #[snafu(display("Failed to select from table, source: {}", source))]
+ Select {
+ #[snafu(backtrace)]
+ source: client::Error,
+ },
+
+ #[snafu(display("Failed to create table on insertion, source: {}", source))]
+ CreateTableOnInsertion {
+ #[snafu(backtrace)]
+ source: client::Error,
+ },
+
+ #[snafu(display("Failed to alter table on insertion, source: {}", source))]
+ AlterTableOnInsertion {
+ #[snafu(backtrace)]
+ source: client::Error,
+ },
+
+ #[snafu(display("Failed to build CreateExpr on insertion: {}", source))]
+ BuildCreateExprOnInsertion {
+ #[snafu(backtrace)]
+ source: common_insert::error::Error,
+ },
+
+ #[snafu(display("Failed to find new columns on insertion: {}", source))]
+ FindNewColumnsOnInsertion {
+ #[snafu(backtrace)]
+ source: common_insert::error::Error,
+ },
+
+ #[snafu(display("Failed to deserialize insert batching: {}", source))]
+ DeserializeInsertBatch {
+ #[snafu(backtrace)]
+ source: common_insert::error::Error,
+ },
+
#[snafu(display("Failed to find catalog by name: {}", catalog_name))]
CatalogNotFound {
catalog_name: String,
@@ -266,13 +326,23 @@ impl ErrorExt for Error {
Error::TableNotFound { .. } => StatusCode::TableNotFound,
Error::ColumnNotFound { .. } => StatusCode::TableColumnNotFound,
- Error::JoinTask { .. }
- | Error::SchemaNotFound { .. }
- | Error::CatalogNotFound { .. } => StatusCode::Unexpected,
+ Error::JoinTask { .. } => StatusCode::Unexpected,
Error::Catalog { source, .. } => source.status_code(),
Error::ParseCatalogEntry { source, .. } => source.status_code(),
Error::RequestMeta { source } => source.status_code(),
+ Error::BumpTableId { source, .. } => source.status_code(),
+ Error::SchemaNotFound { .. } => StatusCode::InvalidArguments,
+ Error::CatalogNotFound { .. } => StatusCode::InvalidArguments,
+ Error::CreateTable { source, .. } => source.status_code(),
+ Error::AlterTable { source, .. } => source.status_code(),
+ Error::Insert { source, .. } => source.status_code(),
+ Error::BuildCreateExprOnInsertion { source, .. } => source.status_code(),
+ Error::CreateTableOnInsertion { source, .. } => source.status_code(),
+ Error::AlterTableOnInsertion { source, .. } => source.status_code(),
+ Error::Select { source, .. } => source.status_code(),
+ Error::FindNewColumnsOnInsertion { source, .. } => source.status_code(),
+ Error::DeserializeInsertBatch { source, .. } => source.status_code(),
}
}
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index d69b69f2c2c4..8b66af9072a0 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -7,18 +7,24 @@ use std::sync::Arc;
use std::time::Duration;
use api::helper::ColumnDataTypeWrapper;
+use api::result::ObjectResultBuilder;
+use api::v1::alter_expr::Kind;
+use api::v1::codec::InsertBatch;
+use api::v1::object_expr::Expr;
use api::v1::{
- insert_expr, AdminExpr, AdminResult, AlterExpr, ColumnDataType, ColumnDef as GrpcColumnDef,
- CreateDatabaseExpr, CreateExpr, InsertExpr, ObjectExpr, ObjectResult as GrpcObjectResult,
+ insert_expr, AddColumns, AdminExpr, AdminResult, AlterExpr, ColumnDataType,
+ ColumnDef as GrpcColumnDef, CreateDatabaseExpr, CreateExpr, InsertExpr, ObjectExpr,
+ ObjectResult as GrpcObjectResult,
};
use async_trait::async_trait;
use catalog::remote::MetaKvBackend;
-use catalog::{CatalogList, CatalogProviderRef, SchemaProviderRef};
+use catalog::{CatalogManagerRef, CatalogProviderRef, SchemaProviderRef};
use client::admin::{admin_result_to_output, Admin};
use client::{Client, Database, Select};
-use common_error::prelude::BoxedError;
+use common_error::prelude::{BoxedError, StatusCode};
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_query::Output;
+use common_telemetry::{debug, error, info};
use datatypes::schema::ColumnSchema;
use meta_client::client::MetaClientBuilder;
use meta_client::MetaClientOpts;
@@ -34,10 +40,16 @@ use sql::statements::insert::Insert;
use sql::statements::statement::Statement;
use sql::statements::{column_def_to_schema, table_idents_to_full_name};
use sql::{dialect::GenericDialect, parser::ParserContext};
+use table::table::TableIdProviderRef;
use crate::catalog::FrontendCatalogManager;
use crate::datanode::DatanodeClients;
-use crate::error::{self, ConvertColumnDefaultConstraintSnafu, Result};
+use crate::error::{
+ self, AlterTableOnInsertionSnafu, AlterTableSnafu, BuildCreateExprOnInsertionSnafu,
+ BumpTableIdSnafu, CatalogNotFoundSnafu, CatalogSnafu, ConvertColumnDefaultConstraintSnafu,
+ CreateTableOnInsertionSnafu, CreateTableSnafu, DeserializeInsertBatchSnafu,
+ FindNewColumnsOnInsertionSnafu, InsertSnafu, Result, SchemaNotFoundSnafu, SelectSnafu,
+};
use crate::frontend::{FrontendOptions, Mode};
use crate::sql::insert_to_request;
use crate::table::route::TableRoutes;
@@ -65,7 +77,10 @@ pub struct Instance {
// But in distribute mode, frontend should fetch datanodes' addresses from metasrv.
client: Client,
/// catalog manager is None in standalone mode, datanode will keep their own
- catalog_manager: Option<FrontendCatalogManager>,
+ catalog_manager: Option<CatalogManagerRef>,
+ /// Table id provider, in standalone mode is left to None, but in distributed mode,
+ /// table id should be generated by metasrv.
+ table_id_provider: Option<TableIdProviderRef>,
// TODO(fys): it should be a trait that corresponds to two implementations:
// Standalone and Distributed, then the code behind it doesn't need to use so
// many match statements.
@@ -77,6 +92,7 @@ impl Default for Instance {
Self {
client: Client::default(),
catalog_manager: None,
+ table_id_provider: None,
mode: Mode::Standalone,
}
}
@@ -116,7 +132,7 @@ impl Instance {
let datanode_clients = Arc::new(DatanodeClients::new());
let catalog_manager =
FrontendCatalogManager::new(meta_backend, table_routes, datanode_clients);
- Some(catalog_manager)
+ Some(Arc::new(catalog_manager))
} else {
None
};
@@ -133,6 +149,276 @@ impl Instance {
Admin::new("greptime", self.client.clone())
}
+ pub fn set_catalog_manager(&mut self, catalog_manager: CatalogManagerRef) {
+ self.catalog_manager = Some(catalog_manager);
+ }
+
+ pub async fn handle_select(&self, expr: Select) -> Result<Output> {
+ self.database()
+ .select(expr)
+ .await
+ .and_then(Output::try_from)
+ .context(SelectSnafu)
+ }
+
+ /// Convert `CreateTable` statement to `CreateExpr` gRPC request.
+ async fn create_to_expr(&self, create: CreateTable) -> Result<CreateExpr> {
+ let (catalog_name, schema_name, table_name) =
+ table_idents_to_full_name(&create.name).context(error::ParseSqlSnafu)?;
+
+ let table_id = match &self.table_id_provider {
+ Some(provider) => Some(provider.next_table_id().await.context(BumpTableIdSnafu)?),
+ None => None,
+ };
+ // FIXME(hl): Region id should be generated from metasrv
+ let region_ids = vec![0];
+
+ let time_index = find_time_index(&create.constraints)?;
+ let expr = CreateExpr {
+ catalog_name: Some(catalog_name),
+ schema_name: Some(schema_name),
+ table_name,
+ desc: None,
+ column_defs: columns_to_expr(&create.columns, &time_index)?,
+ time_index,
+ primary_keys: find_primary_keys(&create.constraints)?,
+ create_if_not_exists: create.if_not_exists,
+ // TODO(LFC): Fill in other table options.
+ table_options: HashMap::from([("engine".to_string(), create.engine)]),
+ table_id,
+ region_ids,
+ };
+ Ok(expr)
+ }
+
+ /// Handle create expr.
+ pub async fn handle_create_table(&self, expr: CreateExpr) -> Result<Output> {
+ let result = self.admin().create(expr.clone()).await;
+ if let Err(e) = &result {
+ error!(e; "Failed to create table by expr: {:?}", expr);
+ }
+ result
+ .and_then(admin_result_to_output)
+ .context(CreateTableSnafu)
+ }
+
+ /// Handle create database expr.
+ pub async fn handle_create_database(&self, expr: CreateDatabaseExpr) -> Result<Output> {
+ self.admin()
+ .create_database(expr)
+ .await
+ .and_then(admin_result_to_output)
+ .context(CreateTableSnafu)
+ }
+
+ /// Handle alter expr
+ pub async fn handle_alter(&self, expr: AlterExpr) -> Result<Output> {
+ self.admin()
+ .alter(expr)
+ .await
+ .and_then(admin_result_to_output)
+ .context(AlterTableSnafu)
+ }
+
+ /// Handle batch inserts
+ pub async fn handle_inserts(&self, insert_expr: &[InsertExpr]) -> Result<Output> {
+ let mut success = 0;
+ for expr in insert_expr {
+ match self.handle_insert(expr).await? {
+ Output::AffectedRows(rows) => success += rows,
+ _ => unreachable!("Insert should not yield output other than AffectedRows"),
+ }
+ }
+ Ok(Output::AffectedRows(success))
+ }
+
+ /// Handle insert. for 'values' insertion, create/alter the destination table on demand.
+ pub async fn handle_insert(&self, insert_expr: &InsertExpr) -> Result<Output> {
+ let table_name = &insert_expr.table_name;
+ let catalog_name = "greptime";
+ let schema_name = "public";
+
+ if let Some(expr) = &insert_expr.expr {
+ match expr {
+ api::v1::insert_expr::Expr::Values(values) => {
+ // TODO(hl): gRPC should also support partitioning.
+ let region_number = 0;
+ self.handle_insert_values(
+ catalog_name,
+ schema_name,
+ table_name,
+ region_number,
+ values,
+ )
+ .await
+ }
+ api::v1::insert_expr::Expr::Sql(_) => {
+ // Frontend does not comprehend insert request that is raw SQL string
+ self.database()
+ .insert(insert_expr.clone())
+ .await
+ .and_then(Output::try_from)
+ .context(InsertSnafu)
+ }
+ }
+ } else {
+ // expr is empty
+ Ok(Output::AffectedRows(0))
+ }
+ }
+
+ /// Handle insert requests in frontend
+ /// If insert is SQL string flavor, just forward to datanode
+ /// If insert is parsed InsertExpr, frontend should comprehend the schema and create/alter table on demand.
+ pub async fn handle_insert_values(
+ &self,
+ catalog_name: &str,
+ schema_name: &str,
+ table_name: &str,
+ region_number: u32,
+ values: &insert_expr::Values,
+ ) -> Result<Output> {
+ let insert_batches =
+ common_insert::insert_batches(&values.values).context(DeserializeInsertBatchSnafu)?;
+ self.create_or_alter_table_on_demand(
+ catalog_name,
+ schema_name,
+ table_name,
+ &insert_batches,
+ )
+ .await?;
+ self.database()
+ .insert(InsertExpr {
+ schema_name: schema_name.to_string(),
+ table_name: table_name.to_string(),
+ region_number,
+ options: Default::default(),
+ expr: Some(insert_expr::Expr::Values(values.clone())),
+ })
+ .await
+ .and_then(Output::try_from)
+ .context(InsertSnafu)
+ }
+
+ // check if table already exist:
+ // - if table does not exist, create table by inferred CreateExpr
+ // - if table exist, check if schema matches. If any new column found, alter table by inferred `AlterExpr`
+ async fn create_or_alter_table_on_demand(
+ &self,
+ catalog_name: &str,
+ schema_name: &str,
+ table_name: &str,
+ insert_batches: &[InsertBatch],
+ ) -> Result<()> {
+ match self
+ .catalog_manager
+ .as_ref()
+ .expect("catalog manager cannot be None")
+ .catalog(catalog_name)
+ .context(CatalogSnafu)?
+ .context(CatalogNotFoundSnafu { catalog_name })?
+ .schema(schema_name)
+ .context(CatalogSnafu)?
+ .context(SchemaNotFoundSnafu {
+ schema_info: schema_name,
+ })?
+ .table(table_name)
+ .context(CatalogSnafu)?
+ {
+ None => {
+ info!(
+ "Table {}.{}.{} does not exist, try create table",
+ catalog_name, schema_name, table_name,
+ );
+ self.create_table_by_insert_batches(
+ catalog_name,
+ schema_name,
+ table_name,
+ insert_batches,
+ )
+ .await?;
+ info!(
+ "Successfully created table on insertion: {}.{}.{}",
+ catalog_name, schema_name, table_name
+ );
+ }
+ Some(table) => {
+ let schema = table.schema();
+ if let Some(add_columns) = common_insert::find_new_columns(&schema, insert_batches)
+ .context(FindNewColumnsOnInsertionSnafu)?
+ {
+ info!(
+ "Find new columns {:?} on insertion, try to alter table: {}.{}.{}",
+ add_columns, catalog_name, schema_name, table_name
+ );
+ self.add_new_columns_to_table(table_name, add_columns)
+ .await?;
+ info!(
+ "Successfully altered table on insertion: {}.{}.{}",
+ catalog_name, schema_name, table_name
+ );
+ }
+ }
+ };
+ Ok(())
+ }
+
+ /// Infer create table expr from inserting data
+ async fn create_table_by_insert_batches(
+ &self,
+ catalog_name: &str,
+ schema_name: &str,
+ table_name: &str,
+ insert_batches: &[InsertBatch],
+ ) -> Result<Output> {
+ // Create table automatically, build schema from data.
+ let table_id = match &self.table_id_provider {
+ Some(provider) => Some(provider.next_table_id().await.context(BumpTableIdSnafu)?),
+ None => None,
+ };
+
+ let create_expr = common_insert::build_create_expr_from_insertion(
+ catalog_name,
+ schema_name,
+ table_id,
+ table_name,
+ insert_batches,
+ )
+ .context(BuildCreateExprOnInsertionSnafu)?;
+
+ info!(
+ "Try to create table: {} automatically with request: {:?}",
+ table_name, create_expr,
+ );
+ self.admin()
+ .create(create_expr)
+ .await
+ .and_then(admin_result_to_output)
+ .context(CreateTableOnInsertionSnafu)
+ }
+
+ async fn add_new_columns_to_table(
+ &self,
+ table_name: &str,
+ add_columns: AddColumns,
+ ) -> Result<Output> {
+ debug!(
+ "Adding new columns: {:?} to table: {}",
+ add_columns, table_name
+ );
+ let expr = AlterExpr {
+ table_name: table_name.to_string(),
+ schema_name: None,
+ catalog_name: None,
+ kind: Some(Kind::AddColumns(add_columns)),
+ };
+ self.admin()
+ .alter(expr)
+ .await
+ .and_then(admin_result_to_output)
+ .context(AlterTableOnInsertionSnafu)
+ }
+
fn get_catalog(&self, catalog_name: &str) -> Result<CatalogProviderRef> {
self.catalog_manager
.as_ref()
@@ -159,6 +445,11 @@ impl Instance {
let insert_request = insert_to_request(&schema_provider, *insert)?;
+ let batch = crate::table::insert::insert_request_to_insert_batch(&insert_request)?;
+
+ self.create_or_alter_table_on_demand(&catalog, &schema, &table, &[batch])
+ .await?;
+
let table = schema_provider
.table(&table)
.context(error::CatalogSnafu)?
@@ -181,10 +472,11 @@ impl FrontendInstance for Instance {
#[cfg(test)]
impl Instance {
- pub fn with_client(client: Client) -> Self {
+ pub fn with_client_and_catalog_manager(client: Client, catalog: CatalogManagerRef) -> Self {
Self {
client,
- catalog_manager: None,
+ catalog_manager: Some(catalog),
+ table_id_provider: None,
mode: Mode::Standalone,
}
}
@@ -208,65 +500,58 @@ impl SqlQueryHandler for Instance {
match stmt {
Statement::Query(_) => self
- .database()
- .select(Select::Sql(query.to_string()))
+ .handle_select(Select::Sql(query.to_string()))
.await
- .and_then(|object_result| object_result.try_into())
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu { query }),
- Statement::Insert(insert) => {
- match self.mode {
- Mode::Standalone => {
- // TODO(dennis): respect schema_name when inserting data
- let (_catalog_name, _schema_name, table_name) = insert
- .full_table_name()
- .context(error::ParseSqlSnafu)
- .map_err(BoxedError::new)
- .context(server_error::ExecuteInsertSnafu {
- msg: "Failed to get table name",
- })?;
-
- let expr = InsertExpr {
- table_name,
- expr: Some(insert_expr::Expr::Sql(query.to_string())),
- options: HashMap::default(),
- };
- self.database()
- .insert(expr)
- .await
- .and_then(|object_result| object_result.try_into())
- .map_err(BoxedError::new)
- .context(server_error::ExecuteQuerySnafu { query })
- }
- Mode::Distributed => {
- let affected = self
- .sql_dist_insert(insert)
- .await
- .map_err(BoxedError::new)
- .context(server_error::ExecuteInsertSnafu {
- msg: "execute insert failed",
- })?;
- Ok(Output::AffectedRows(affected))
- }
+ Statement::Insert(insert) => match self.mode {
+ Mode::Standalone => {
+ let (_, schema_name, table_name) = insert
+ .full_table_name()
+ .context(error::ParseSqlSnafu)
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteInsertSnafu {
+ msg: "Failed to get table name",
+ })?;
+
+ let expr = InsertExpr {
+ schema_name,
+ table_name,
+ expr: Some(insert_expr::Expr::Sql(query.to_string())),
+ region_number: 0,
+ options: HashMap::default(),
+ };
+ self.handle_insert(&expr)
+ .await
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu { query })
}
- }
+ Mode::Distributed => {
+ let affected = self
+ .sql_dist_insert(insert)
+ .await
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteInsertSnafu {
+ msg: "execute insert failed",
+ })?;
+ Ok(Output::AffectedRows(affected))
+ }
+ },
Statement::CreateTable(create) => {
- let expr = create_to_expr(create)
+ let expr = self
+ .create_to_expr(create)
+ .await
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu { query })?;
- self.admin()
- .create(expr)
+ self.handle_create_table(expr)
.await
- .and_then(admin_result_to_output)
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu { query })
}
Statement::ShowDatabases(_) | Statement::ShowTables(_) => self
- .database()
- .select(Select::Sql(query.to_string()))
+ .handle_select(Select::Sql(query.to_string()))
.await
- .and_then(|object_result| object_result.try_into())
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu { query }),
@@ -274,28 +559,26 @@ impl SqlQueryHandler for Instance {
let expr = CreateDatabaseExpr {
database_name: c.name.to_string(),
};
- self.admin()
- .create_database(expr)
+ self.handle_create_database(expr)
.await
- .and_then(admin_result_to_output)
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu { query })
}
Statement::Alter(alter_stmt) => self
- .admin()
- .alter(
+ .handle_alter(
AlterExpr::try_from(alter_stmt)
.map_err(BoxedError::new)
.context(server_error::ExecuteAlterSnafu { query })?,
)
.await
- .and_then(admin_result_to_output)
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu { query }),
Statement::ShowCreateTable(_) => {
return server_error::NotSupportedSnafu { feat: query }.fail()
}
}
+ .map_err(BoxedError::new)
+ .context(server_error::ExecuteQuerySnafu { query })
}
async fn insert_script(&self, _name: &str, _script: &str) -> server_error::Result<()> {
@@ -313,29 +596,6 @@ impl SqlQueryHandler for Instance {
}
}
-fn create_to_expr(create: CreateTable) -> Result<CreateExpr> {
- let (catalog_name, schema_name, table_name) =
- table_idents_to_full_name(&create.name).context(error::ParseSqlSnafu)?;
-
- let time_index = find_time_index(&create.constraints)?;
- let expr = CreateExpr {
- catalog_name: Some(catalog_name),
- schema_name: Some(schema_name),
- table_name,
- column_defs: columns_to_expr(&create.columns, &time_index)?,
- time_index,
- primary_keys: find_primary_keys(&create.constraints)?,
- create_if_not_exists: create.if_not_exists,
- // TODO(LFC): Fill in other table options.
- table_options: HashMap::from([
- ("engine".to_string(), create.engine),
- ("region_id".to_string(), "0".to_string()),
- ]),
- ..Default::default()
- };
- Ok(expr)
-}
-
fn find_primary_keys(constraints: &[TableConstraint]) -> Result<Vec<String>> {
let primary_keys = constraints
.iter()
@@ -421,13 +681,44 @@ fn columns_to_expr(column_defs: &[ColumnDef], time_index: &str) -> Result<Vec<Gr
#[async_trait]
impl GrpcQueryHandler for Instance {
async fn do_query(&self, query: ObjectExpr) -> server_error::Result<GrpcObjectResult> {
- self.database()
- .object(query.clone())
- .await
- .map_err(BoxedError::new)
- .with_context(|_| server_error::ExecuteQuerySnafu {
- query: format!("{:?}", query),
- })
+ if let Some(expr) = &query.expr {
+ match expr {
+ Expr::Insert(insert) => {
+ let result = self.handle_insert(insert).await;
+ result
+ .map(|o| match o {
+ Output::AffectedRows(rows) => ObjectResultBuilder::new()
+ .status_code(StatusCode::Success as u32)
+ .mutate_result(rows as u32, 0u32)
+ .build(),
+ _ => {
+ unreachable!()
+ }
+ })
+ .map_err(BoxedError::new)
+ .with_context(|_| server_error::ExecuteQuerySnafu {
+ query: format!("{:?}", query),
+ })
+ }
+ _ => self
+ .database()
+ .object(query.clone())
+ .await
+ .map_err(BoxedError::new)
+ .with_context(|_| server_error::ExecuteQuerySnafu {
+ query: format!("{:?}", query),
+ }),
+ }
+ } else {
+ // why?
+ self.database()
+ .object(query.clone())
+ .await
+ .map_err(BoxedError::new)
+ .with_context(|_| server_error::ExecuteQuerySnafu {
+ query: format!("{:?}", query),
+ })
+ }
}
}
@@ -617,9 +908,11 @@ mod tests {
}
.into()];
let insert_expr = InsertExpr {
+ schema_name: "public".to_string(),
table_name: "demo".to_string(),
expr: Some(insert_expr::Expr::Values(insert_expr::Values { values })),
options: HashMap::default(),
+ region_number: 0,
};
let object_expr = ObjectExpr {
header: Some(ExprHeader::default()),
@@ -708,16 +1001,18 @@ mod tests {
default_constraint: None,
},
];
- let mut table_options = HashMap::with_capacity(1);
- table_options.insert("region_id".to_string(), "0".to_string());
CreateExpr {
+ catalog_name: None,
+ schema_name: None,
table_name: "demo".to_string(),
+ desc: None,
column_defs,
time_index: "ts".to_string(),
primary_keys: vec!["ts".to_string(), "host".to_string()],
create_if_not_exists: true,
- table_options,
- ..Default::default()
+ table_options: Default::default(),
+ table_id: None,
+ region_ids: vec![0],
}
}
}
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
index da6de4e7159b..98bf71d69972 100644
--- a/src/frontend/src/instance/influxdb.rs
+++ b/src/frontend/src/instance/influxdb.rs
@@ -17,8 +17,7 @@ impl InfluxdbLineProtocolHandler for Instance {
match self.mode {
Mode::Standalone => {
let exprs: Vec<InsertExpr> = request.try_into()?;
- self.database()
- .batch_insert(exprs)
+ self.handle_inserts(&exprs)
.await
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu {
@@ -45,7 +44,14 @@ impl Instance {
for insert in inserts {
let self_clone = self.clone();
-
+ let insert_batch = crate::table::insert::insert_request_to_insert_batch(&insert)?;
+ self.create_or_alter_table_on_demand(
+ &insert.catalog_name,
+ &insert.schema_name,
+ &insert.table_name,
+ &[insert_batch],
+ )
+ .await?;
// TODO(fys): need a separate runtime here
let join = tokio::spawn(async move {
let catalog = self_clone.get_catalog(&insert.catalog_name)?;
diff --git a/src/frontend/src/instance/opentsdb.rs b/src/frontend/src/instance/opentsdb.rs
index 1645bc977849..f4e5543e4c1b 100644
--- a/src/frontend/src/instance/opentsdb.rs
+++ b/src/frontend/src/instance/opentsdb.rs
@@ -1,12 +1,11 @@
use async_trait::async_trait;
-use client::ObjectResult;
use common_error::prelude::BoxedError;
use servers::error as server_error;
use servers::opentsdb::codec::DataPoint;
use servers::query_handler::OpentsdbProtocolHandler;
use snafu::prelude::*;
-use crate::error::{self, Result};
+use crate::error::Result;
use crate::frontend::Mode;
use crate::instance::Instance;
@@ -41,27 +40,7 @@ impl OpentsdbProtocolHandler for Instance {
impl Instance {
async fn insert_opentsdb_metric(&self, data_point: &DataPoint) -> Result<()> {
let expr = data_point.as_grpc_insert();
-
- let result = self.database().insert(expr.clone()).await;
-
- let object_result = match result {
- Ok(result) => result,
- Err(_) => {
- return Err(result.context(error::RequestDatanodeSnafu).unwrap_err());
- }
- };
-
- match object_result {
- ObjectResult::Mutate(mutate) => {
- if mutate.success != 1 || mutate.failure != 0 {
- return error::ExecOpentsdbPutSnafu {
- reason: format!("illegal result: {:?}", mutate),
- }
- .fail();
- }
- }
- ObjectResult::Select(_) => unreachable!(),
- }
+ self.handle_insert(&expr).await?;
Ok(())
}
}
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index f48c9e463f2b..2f02387b063e 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -94,11 +94,15 @@ impl PrometheusProtocolHandler for Instance {
match self.mode {
Mode::Standalone => {
let exprs = prometheus::write_request_to_insert_exprs(request)?;
-
- self.database()
- .batch_insert(exprs)
+ let futures = exprs
+ .iter()
+ .map(|e| self.handle_insert(e))
+ .collect::<Vec<_>>();
+ let res = futures_util::future::join_all(futures)
.await
- .map_err(BoxedError::new)
+ .into_iter()
+ .collect::<Result<Vec<_>, crate::error::Error>>();
+ res.map_err(BoxedError::new)
.context(error::ExecuteInsertSnafu {
msg: "failed to write prometheus remote request",
})?;
@@ -167,6 +171,7 @@ mod tests {
#[tokio::test]
async fn test_prometheus_remote_write_and_read() {
+ common_telemetry::init_default_ut_logging();
let instance = tests::create_frontend_instance().await;
let write_request = WriteRequest {
@@ -174,7 +179,7 @@ mod tests {
..Default::default()
};
- assert!(instance.write(write_request).await.is_ok());
+ instance.write(write_request).await.unwrap();
let read_request = ReadRequest {
queries: vec![
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 29c382bc15e5..0efd828b8d57 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -145,9 +145,8 @@ async fn start_server(
server_and_addr: Option<(Box<dyn Server>, SocketAddr)>,
) -> servers::error::Result<Option<SocketAddr>> {
if let Some((server, addr)) = server_and_addr {
- let res = server.start(addr).await.map(Some)?;
info!("Starting server at {}", addr);
- Ok(res)
+ server.start(addr).await.map(Some)
} else {
Ok(None)
}
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 65a2a83b84d8..6c8a8d253df6 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -1,4 +1,3 @@
-mod insert;
pub(crate) mod route;
use std::any::Any;
@@ -30,6 +29,7 @@ use crate::mock::{DatanodeInstance, TableScanPlan};
use crate::partitioning::{Operator, PartitionExpr, PartitionRuleRef};
use crate::spliter::WriteSpliter;
use crate::table::route::TableRoutes;
+pub mod insert;
#[derive(Clone)]
pub struct DistTable {
diff --git a/src/frontend/src/table/insert.rs b/src/frontend/src/table/insert.rs
index 4bde389066d7..795237331cc1 100644
--- a/src/frontend/src/table/insert.rs
+++ b/src/frontend/src/table/insert.rs
@@ -3,6 +3,7 @@ use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::codec;
+use api::v1::codec::InsertBatch;
use api::v1::insert_expr;
use api::v1::insert_expr::Expr;
use api::v1::Column;
@@ -73,12 +74,12 @@ impl DistTable {
}
}
-fn to_insert_expr(region_id: RegionNumber, insert: InsertRequest) -> Result<InsertExpr> {
+pub fn insert_request_to_insert_batch(insert: &InsertRequest) -> Result<InsertBatch> {
let mut row_count = None;
let columns = insert
.columns_values
- .into_iter()
+ .iter()
.map(|(column_name, vector)| {
match row_count {
Some(rows) => ensure!(
@@ -97,12 +98,12 @@ fn to_insert_expr(region_id: RegionNumber, insert: InsertRequest) -> Result<Inse
.context(error::ColumnDataTypeSnafu)?;
let mut column = Column {
- column_name,
+ column_name: column_name.clone(),
datatype: datatype.datatype() as i32,
..Default::default()
};
- column.push_vals(0, vector);
+ column.push_vals(0, vector.clone());
Ok(column)
})
.collect::<Result<Vec<_>>>()?;
@@ -111,32 +112,28 @@ fn to_insert_expr(region_id: RegionNumber, insert: InsertRequest) -> Result<Inse
columns,
row_count: row_count.map(|rows| rows as u32).unwrap_or(0),
};
+ Ok(insert_batch)
+}
- let mut options = HashMap::with_capacity(1);
- options.insert(
- // TODO(fys): Temporarily hard code here
- "region_id".to_string(),
- codec::RegionNumber { id: region_id }.into(),
- );
-
+fn to_insert_expr(region_number: RegionNumber, insert: InsertRequest) -> Result<InsertExpr> {
+ let table_name = insert.table_name.clone();
+ let insert_batch = insert_request_to_insert_batch(&insert)?;
Ok(InsertExpr {
- table_name: insert.table_name,
- options,
+ schema_name: insert.schema_name,
+ table_name,
expr: Some(Expr::Values(insert_expr::Values {
values: vec![insert_batch.into()],
})),
+ region_number,
+ options: Default::default(),
})
}
#[cfg(test)]
mod tests {
- use std::{collections::HashMap, ops::Deref};
+ use std::collections::HashMap;
- use api::v1::{
- codec::{self, InsertBatch},
- insert_expr::Expr,
- ColumnDataType, InsertExpr,
- };
+ use api::v1::{codec::InsertBatch, insert_expr::Expr, ColumnDataType, InsertExpr};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datatypes::{prelude::ConcreteDataType, types::StringType, vectors::VectorBuilder};
use table::requests::InsertRequest;
@@ -205,8 +202,7 @@ mod tests {
}
}
- let bytes = insert_expr.options.get("region_id").unwrap();
- let region_id: codec::RegionNumber = bytes.deref().try_into().unwrap();
- assert_eq!(12, region_id.id);
+ let region_number = insert_expr.region_number;
+ assert_eq!(12, region_number);
}
}
diff --git a/src/frontend/src/tests.rs b/src/frontend/src/tests.rs
index 76a823fb2247..5ea03cbb6ae6 100644
--- a/src/frontend/src/tests.rs
+++ b/src/frontend/src/tests.rs
@@ -19,9 +19,13 @@ async fn create_datanode_instance() -> Arc<DatanodeInstance> {
}
pub(crate) async fn create_frontend_instance() -> Arc<Instance> {
- let datanode_instance = create_datanode_instance().await;
+ let datanode_instance: Arc<DatanodeInstance> = create_datanode_instance().await;
+ let dn_catalog_manager = datanode_instance.catalog_manager().clone();
let (_, client) = create_datanode_client(datanode_instance).await;
- Arc::new(Instance::with_client(client))
+ Arc::new(Instance::with_client_and_catalog_manager(
+ client,
+ dn_catalog_manager,
+ ))
}
pub(crate) async fn create_datanode_client(
diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs
index 784a4bc72ec3..47ced111224c 100644
--- a/src/servers/src/influxdb.rs
+++ b/src/servers/src/influxdb.rs
@@ -4,6 +4,7 @@ use api::v1::{
insert_expr::{self, Expr},
InsertExpr,
};
+use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_grpc::writer::{LinesWriter, Precision};
use influxdb_line_protocol::{parse_lines, FieldValue};
use snafu::ResultExt;
@@ -80,6 +81,9 @@ impl TryFrom<&InfluxdbRequest> for Vec<InsertExpr> {
type Error = Error;
fn try_from(value: &InfluxdbRequest) -> Result<Self, Self::Error> {
+ // InfluxDB uses default catalog name and schema name
+ let schema_name = DEFAULT_SCHEMA_NAME.to_string();
+
let mut writers: HashMap<TableName, LinesWriter> = HashMap::new();
let lines = parse_lines(&value.lines)
.collect::<influxdb_line_protocol::Result<Vec<_>>>()
@@ -150,11 +154,13 @@ impl TryFrom<&InfluxdbRequest> for Vec<InsertExpr> {
Ok(writers
.into_iter()
.map(|(table_name, writer)| InsertExpr {
+ schema_name: schema_name.clone(),
table_name,
expr: Some(Expr::Values(insert_expr::Values {
values: vec![writer.finish().into()],
})),
options: HashMap::default(),
+ region_number: 0,
})
.collect())
}
diff --git a/src/servers/src/opentsdb/codec.rs b/src/servers/src/opentsdb/codec.rs
index 7edbf63e6029..aa743324c6c6 100644
--- a/src/servers/src/opentsdb/codec.rs
+++ b/src/servers/src/opentsdb/codec.rs
@@ -2,6 +2,7 @@ use std::collections::HashMap;
use api::v1::codec::InsertBatch;
use api::v1::{column, column::SemanticType, insert_expr, Column, ColumnDataType, InsertExpr};
+use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_grpc::writer::Precision;
use table::requests::InsertRequest;
@@ -133,6 +134,7 @@ impl DataPoint {
// TODO(fys): will remove in the future.
pub fn as_grpc_insert(&self) -> InsertExpr {
+ let schema_name = DEFAULT_SCHEMA_NAME.to_string();
let mut columns = Vec::with_capacity(2 + self.tags.len());
let ts_column = Column {
@@ -177,11 +179,13 @@ impl DataPoint {
row_count: 1,
};
InsertExpr {
+ schema_name,
table_name: self.metric.clone(),
expr: Some(insert_expr::Expr::Values(insert_expr::Values {
values: vec![batch.into()],
})),
options: HashMap::default(),
+ region_number: 0,
}
}
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index 8a908a962cf9..1070aba65eba 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -11,6 +11,7 @@ use api::v1::{
codec::SelectResult, column, column::SemanticType, insert_expr, Column, ColumnDataType,
InsertExpr,
};
+use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_grpc::writer::Precision::MILLISECOND;
use openmetrics_parser::{MetricsExposition, PrometheusType, PrometheusValue};
use snafu::{OptionExt, ResultExt};
@@ -339,6 +340,8 @@ pub fn write_request_to_insert_exprs(mut request: WriteRequest) -> Result<Vec<In
// TODO(fys): it will remove in the future.
fn timeseries_to_insert_expr(mut timeseries: TimeSeries) -> Result<InsertExpr> {
+ let schema_name = DEFAULT_SCHEMA_NAME.to_string();
+
// TODO(dennis): save exemplars into a column
let labels = std::mem::take(&mut timeseries.labels);
let samples = std::mem::take(&mut timeseries.samples);
@@ -399,6 +402,7 @@ fn timeseries_to_insert_expr(mut timeseries: TimeSeries) -> Result<InsertExpr> {
row_count: row_count as u32,
};
Ok(InsertExpr {
+ schema_name,
table_name: table_name.context(error::InvalidPromRemoteRequestSnafu {
msg: "missing '__name__' label in timeseries",
})?,
@@ -407,6 +411,7 @@ fn timeseries_to_insert_expr(mut timeseries: TimeSeries) -> Result<InsertExpr> {
values: vec![batch.into()],
})),
options: HashMap::default(),
+ region_number: 0,
})
}
diff --git a/src/sql/src/statements/alter.rs b/src/sql/src/statements/alter.rs
index ad2a12478019..8861fc02988c 100644
--- a/src/sql/src/statements/alter.rs
+++ b/src/sql/src/statements/alter.rs
@@ -1,4 +1,4 @@
-use api::v1::{alter_expr, AlterExpr};
+use api::v1::{alter_expr, AddColumn, AlterExpr};
use sqlparser::ast::{ColumnDef, ObjectName, TableConstraint};
use crate::error::UnsupportedAlterTableStatementSnafu;
@@ -51,8 +51,11 @@ impl TryFrom<AlterTable> for AlterExpr {
.fail();
}
AlterTableOperation::AddColumn { column_def } => {
- alter_expr::Kind::AddColumn(api::v1::AddColumn {
- column_def: Some(sql_column_def_to_grpc_column_def(column_def)?),
+ alter_expr::Kind::AddColumns(api::v1::AddColumns {
+ add_columns: vec![AddColumn {
+ column_def: Some(sql_column_def_to_grpc_column_def(column_def)?),
+ is_key: false,
+ }],
})
}
};
diff --git a/src/table/src/table.rs b/src/table/src/table.rs
index 7f3fbe854abd..55e74051ebc2 100644
--- a/src/table/src/table.rs
+++ b/src/table/src/table.rs
@@ -11,7 +11,7 @@ use common_query::physical_plan::PhysicalPlanRef;
use datatypes::schema::SchemaRef;
use crate::error::Result;
-use crate::metadata::{FilterPushDownType, TableInfoRef, TableType};
+use crate::metadata::{FilterPushDownType, TableId, TableInfoRef, TableType};
use crate::requests::{AlterTableRequest, InsertRequest};
/// Table abstraction.
@@ -61,3 +61,10 @@ pub trait Table: Send + Sync {
}
pub type TableRef = Arc<dyn Table>;
+
+#[async_trait::async_trait]
+pub trait TableIdProvider {
+ async fn next_table_id(&self) -> Result<TableId>;
+}
+
+pub type TableIdProviderRef = Arc<dyn TableIdProvider + Send + Sync>;
|
feat
|
Move create table logic to frontend (#455)
|
3d651522c28aef56ef78ad67c99468e7e25b40a4
|
2023-12-13 14:32:00
|
Yue Deng
|
feat: add build() function to return the database build info (#2919)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 888f34f05245..c0bbae37fd71 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1664,6 +1664,7 @@ name = "common-function"
version = "0.4.4"
dependencies = [
"arc-swap",
+ "build-data",
"chrono-tz 0.6.3",
"common-error",
"common-macro",
diff --git a/src/common/function/Cargo.toml b/src/common/function/Cargo.toml
index 31a212d21035..3db195668e60 100644
--- a/src/common/function/Cargo.toml
+++ b/src/common/function/Cargo.toml
@@ -6,6 +6,7 @@ license.workspace = true
[dependencies]
arc-swap = "1.0"
+build-data = "0.1"
chrono-tz = "0.6"
common-error.workspace = true
common-macro.workspace = true
diff --git a/src/common/function/src/scalars/function.rs b/src/common/function/src/function.rs
similarity index 100%
rename from src/common/function/src/scalars/function.rs
rename to src/common/function/src/function.rs
diff --git a/src/common/function/src/scalars/function_registry.rs b/src/common/function/src/function_registry.rs
similarity index 96%
rename from src/common/function/src/scalars/function_registry.rs
rename to src/common/function/src/function_registry.rs
index 5d6751df7503..a1274779c3a0 100644
--- a/src/common/function/src/scalars/function_registry.rs
+++ b/src/common/function/src/function_registry.rs
@@ -18,12 +18,13 @@ use std::sync::{Arc, RwLock};
use once_cell::sync::Lazy;
+use crate::function::FunctionRef;
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
use crate::scalars::date::DateFunction;
-use crate::scalars::function::FunctionRef;
use crate::scalars::math::MathFunction;
use crate::scalars::numpy::NumpyFunction;
use crate::scalars::timestamp::TimestampFunction;
+use crate::system::SystemFunction;
#[derive(Default)]
pub struct FunctionRegistry {
@@ -79,7 +80,7 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
DateFunction::register(&function_registry);
AggregateFunctions::register(&function_registry);
-
+ SystemFunction::register(&function_registry);
Arc::new(function_registry)
});
diff --git a/src/common/function/src/lib.rs b/src/common/function/src/lib.rs
index 5d3ab6d42069..e8bf35bc19dc 100644
--- a/src/common/function/src/lib.rs
+++ b/src/common/function/src/lib.rs
@@ -13,5 +13,8 @@
// limitations under the License.
pub mod scalars;
+pub mod system;
+pub mod function;
+pub mod function_registry;
pub mod helper;
diff --git a/src/common/function/src/scalars.rs b/src/common/function/src/scalars.rs
index 3aa19632e7df..143d3f9cbbcd 100644
--- a/src/common/function/src/scalars.rs
+++ b/src/common/function/src/scalars.rs
@@ -13,16 +13,11 @@
// limitations under the License.
pub mod aggregate;
-mod date;
+pub(crate) mod date;
pub mod expression;
-pub mod function;
-pub mod function_registry;
pub mod math;
pub mod numpy;
#[cfg(test)]
pub(crate) mod test;
-mod timestamp;
+pub(crate) mod timestamp;
pub mod udf;
-
-pub use function::{Function, FunctionRef};
-pub use function_registry::{FunctionRegistry, FUNCTION_REGISTRY};
diff --git a/src/common/function/src/scalars/aggregate.rs b/src/common/function/src/scalars/aggregate.rs
index 829296498078..7ed453098318 100644
--- a/src/common/function/src/scalars/aggregate.rs
+++ b/src/common/function/src/scalars/aggregate.rs
@@ -33,7 +33,7 @@ pub use polyval::PolyvalAccumulatorCreator;
pub use scipy_stats_norm_cdf::ScipyStatsNormCdfAccumulatorCreator;
pub use scipy_stats_norm_pdf::ScipyStatsNormPdfAccumulatorCreator;
-use crate::scalars::FunctionRegistry;
+use crate::function_registry::FunctionRegistry;
/// A function creates `AggregateFunctionCreator`.
/// "Aggregator" *is* AggregatorFunction. Since the later one is long, we named an short alias for it.
diff --git a/src/common/function/src/scalars/date.rs b/src/common/function/src/scalars/date.rs
index 0e16019d527b..86b0c7db6202 100644
--- a/src/common/function/src/scalars/date.rs
+++ b/src/common/function/src/scalars/date.rs
@@ -19,7 +19,7 @@ mod date_sub;
use date_add::DateAddFunction;
use date_sub::DateSubFunction;
-use crate::scalars::function_registry::FunctionRegistry;
+use crate::function_registry::FunctionRegistry;
pub(crate) struct DateFunction;
diff --git a/src/common/function/src/scalars/date/date_add.rs b/src/common/function/src/scalars/date/date_add.rs
index e299f7947297..1052acb86863 100644
--- a/src/common/function/src/scalars/date/date_add.rs
+++ b/src/common/function/src/scalars/date/date_add.rs
@@ -22,8 +22,8 @@ use datatypes::value::ValueRef;
use datatypes::vectors::VectorRef;
use snafu::ensure;
+use crate::function::{Function, FunctionContext};
use crate::helper;
-use crate::scalars::function::{Function, FunctionContext};
/// A function adds an interval value to Timestamp, Date or DateTime, and return the result.
#[derive(Clone, Debug, Default)]
@@ -149,7 +149,6 @@ mod tests {
};
use super::{DateAddFunction, *};
- use crate::scalars::Function;
#[test]
fn test_date_add_misc() {
diff --git a/src/common/function/src/scalars/date/date_sub.rs b/src/common/function/src/scalars/date/date_sub.rs
index 15660850f558..b1f87e880ab7 100644
--- a/src/common/function/src/scalars/date/date_sub.rs
+++ b/src/common/function/src/scalars/date/date_sub.rs
@@ -22,8 +22,8 @@ use datatypes::value::ValueRef;
use datatypes::vectors::VectorRef;
use snafu::ensure;
+use crate::function::{Function, FunctionContext};
use crate::helper;
-use crate::scalars::function::{Function, FunctionContext};
/// A function subtracts an interval value to Timestamp, Date or DateTime, and return the result.
#[derive(Clone, Debug, Default)]
@@ -150,7 +150,6 @@ mod tests {
};
use super::{DateSubFunction, *};
- use crate::scalars::Function;
#[test]
fn test_date_sub_misc() {
diff --git a/src/common/function/src/scalars/math.rs b/src/common/function/src/scalars/math.rs
index b38bf553b804..1e9609c5171d 100644
--- a/src/common/function/src/scalars/math.rs
+++ b/src/common/function/src/scalars/math.rs
@@ -28,9 +28,8 @@ pub use pow::PowFunction;
pub use rate::RateFunction;
use snafu::ResultExt;
-use super::function::FunctionContext;
-use super::Function;
-use crate::scalars::function_registry::FunctionRegistry;
+use crate::function::{Function, FunctionContext};
+use crate::function_registry::FunctionRegistry;
pub(crate) struct MathFunction;
diff --git a/src/common/function/src/scalars/math/pow.rs b/src/common/function/src/scalars/math/pow.rs
index 5a1922a4fc70..5e6cc0f089e5 100644
--- a/src/common/function/src/scalars/math/pow.rs
+++ b/src/common/function/src/scalars/math/pow.rs
@@ -25,8 +25,8 @@ use datatypes::with_match_primitive_type_id;
use num::traits::Pow;
use num_traits::AsPrimitive;
+use crate::function::{Function, FunctionContext};
use crate::scalars::expression::{scalar_binary_op, EvalContext};
-use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct PowFunction;
@@ -83,6 +83,7 @@ mod tests {
use datatypes::vectors::{Float32Vector, Int8Vector};
use super::*;
+ use crate::function::FunctionContext;
#[test]
fn test_pow_function() {
let pow = PowFunction;
diff --git a/src/common/function/src/scalars/math/rate.rs b/src/common/function/src/scalars/math/rate.rs
index 4b6e160916df..7afc07177d1d 100644
--- a/src/common/function/src/scalars/math/rate.rs
+++ b/src/common/function/src/scalars/math/rate.rs
@@ -23,7 +23,7 @@ use datatypes::prelude::*;
use datatypes::vectors::{Helper, VectorRef};
use snafu::ResultExt;
-use crate::scalars::function::{Function, FunctionContext};
+use crate::function::{Function, FunctionContext};
/// generates rates from a sequence of adjacent data points.
#[derive(Clone, Debug, Default)]
diff --git a/src/common/function/src/scalars/numpy.rs b/src/common/function/src/scalars/numpy.rs
index 7e1c1145bd83..33c82d44e3d6 100644
--- a/src/common/function/src/scalars/numpy.rs
+++ b/src/common/function/src/scalars/numpy.rs
@@ -19,7 +19,7 @@ use std::sync::Arc;
use clip::ClipFunction;
-use crate::scalars::function_registry::FunctionRegistry;
+use crate::function_registry::FunctionRegistry;
pub(crate) struct NumpyFunction;
diff --git a/src/common/function/src/scalars/numpy/clip.rs b/src/common/function/src/scalars/numpy/clip.rs
index cdcc4d562220..02e1256207c8 100644
--- a/src/common/function/src/scalars/numpy/clip.rs
+++ b/src/common/function/src/scalars/numpy/clip.rs
@@ -24,8 +24,8 @@ use datatypes::prelude::*;
use datatypes::vectors::PrimitiveVector;
use paste::paste;
+use crate::function::{Function, FunctionContext};
use crate::scalars::expression::{scalar_binary_op, EvalContext};
-use crate::scalars::function::{Function, FunctionContext};
/// numpy.clip function, <https://numpy.org/doc/stable/reference/generated/numpy.clip.html>
#[derive(Clone, Debug, Default)]
diff --git a/src/common/function/src/scalars/test.rs b/src/common/function/src/scalars/test.rs
index 1ffef23be545..573c2e715b1b 100644
--- a/src/common/function/src/scalars/test.rs
+++ b/src/common/function/src/scalars/test.rs
@@ -20,8 +20,8 @@ use common_query::prelude::{Signature, Volatility};
use datatypes::data_type::ConcreteDataType;
use datatypes::prelude::VectorRef;
+use crate::function::{Function, FunctionContext};
use crate::scalars::expression::{scalar_binary_op, EvalContext};
-use crate::scalars::function::{Function, FunctionContext};
#[derive(Clone, Default)]
pub(crate) struct TestAndFunction;
diff --git a/src/common/function/src/scalars/timestamp.rs b/src/common/function/src/scalars/timestamp.rs
index 395b8b244379..fecf884ce02b 100644
--- a/src/common/function/src/scalars/timestamp.rs
+++ b/src/common/function/src/scalars/timestamp.rs
@@ -19,7 +19,7 @@ mod to_unixtime;
use greatest::GreatestFunction;
use to_unixtime::ToUnixtimeFunction;
-use crate::scalars::function_registry::FunctionRegistry;
+use crate::function_registry::FunctionRegistry;
pub(crate) struct TimestampFunction;
diff --git a/src/common/function/src/scalars/timestamp/greatest.rs b/src/common/function/src/scalars/timestamp/greatest.rs
index db116dc04598..fd3fe0a16897 100644
--- a/src/common/function/src/scalars/timestamp/greatest.rs
+++ b/src/common/function/src/scalars/timestamp/greatest.rs
@@ -27,7 +27,7 @@ use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::{Helper, VectorRef};
use snafu::{ensure, ResultExt};
-use crate::scalars::function::{Function, FunctionContext};
+use crate::function::{Function, FunctionContext};
#[derive(Clone, Debug, Default)]
pub struct GreatestFunction;
@@ -113,10 +113,7 @@ mod tests {
use datatypes::value::Value;
use datatypes::vectors::{DateVector, StringVector, Vector};
- use super::GreatestFunction;
- use crate::scalars::function::FunctionContext;
- use crate::scalars::Function;
-
+ use super::*;
#[test]
fn test_greatest_takes_string_vector() {
let function = GreatestFunction;
diff --git a/src/common/function/src/scalars/timestamp/to_unixtime.rs b/src/common/function/src/scalars/timestamp/to_unixtime.rs
index 0bd8c2255e5e..4d914ecba919 100644
--- a/src/common/function/src/scalars/timestamp/to_unixtime.rs
+++ b/src/common/function/src/scalars/timestamp/to_unixtime.rs
@@ -23,7 +23,7 @@ use datatypes::prelude::ConcreteDataType;
use datatypes::vectors::{Int64Vector, VectorRef};
use snafu::ensure;
-use crate::scalars::function::{Function, FunctionContext};
+use crate::function::{Function, FunctionContext};
/// A function to convert the column into the unix timestamp in seconds.
#[derive(Clone, Debug, Default)]
@@ -152,7 +152,6 @@ mod tests {
};
use super::{ToUnixtimeFunction, *};
- use crate::scalars::Function;
#[test]
fn test_string_to_unixtime() {
diff --git a/src/common/function/src/scalars/udf.rs b/src/common/function/src/scalars/udf.rs
index bf332720a2e3..da67f321bf8c 100644
--- a/src/common/function/src/scalars/udf.rs
+++ b/src/common/function/src/scalars/udf.rs
@@ -23,7 +23,7 @@ use datatypes::prelude::*;
use datatypes::vectors::Helper;
use snafu::ResultExt;
-use crate::scalars::function::{FunctionContext, FunctionRef};
+use crate::function::{FunctionContext, FunctionRef};
/// Create a ScalarUdf from function.
pub fn create_udf(func: FunctionRef) -> ScalarUdf {
@@ -72,7 +72,7 @@ mod tests {
use datatypes::vectors::{BooleanVector, ConstantVector};
use super::*;
- use crate::scalars::function::Function;
+ use crate::function::Function;
use crate::scalars::test::TestAndFunction;
#[test]
diff --git a/src/common/function/src/system.rs b/src/common/function/src/system.rs
new file mode 100644
index 000000000000..2f34c99d1600
--- /dev/null
+++ b/src/common/function/src/system.rs
@@ -0,0 +1,29 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub mod build;
+
+use std::sync::Arc;
+
+use build::BuildFunction;
+
+use crate::function_registry::FunctionRegistry;
+
+pub(crate) struct SystemFunction;
+
+impl SystemFunction {
+ pub fn register(registry: &FunctionRegistry) {
+ registry.register(Arc::new(BuildFunction));
+ }
+}
diff --git a/src/common/function/src/system/build.rs b/src/common/function/src/system/build.rs
new file mode 100644
index 000000000000..43433ce425ce
--- /dev/null
+++ b/src/common/function/src/system/build.rs
@@ -0,0 +1,102 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt;
+use std::sync::Arc;
+
+use common_query::error::Result;
+use common_query::prelude::{Signature, Volatility};
+use datatypes::prelude::*;
+use datatypes::vectors::{StringVector, VectorRef};
+
+use crate::function::{Function, FunctionContext};
+
+const DEFAULT_VALUE: &str = "unknown";
+
+/// Generates build information
+#[derive(Clone, Debug, Default)]
+pub struct BuildFunction;
+
+impl fmt::Display for BuildFunction {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "BUILD")
+ }
+}
+
+impl Function for BuildFunction {
+ fn name(&self) -> &str {
+ "build"
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::string_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ Signature::uniform(
+ 0,
+ vec![ConcreteDataType::string_datatype()],
+ Volatility::Immutable,
+ )
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, _columns: &[VectorRef]) -> Result<VectorRef> {
+ let build_info = format!(
+ "branch: {}\ncommit: {}\ncommit short: {}\ndirty: {}\nversion: {}",
+ build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
+ build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
+ build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
+ build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string()),
+ env!("CARGO_PKG_VERSION")
+ );
+
+ let v = Arc::new(StringVector::from(vec![build_info]));
+ Ok(v)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use common_query::prelude::TypeSignature;
+
+ use super::*;
+ #[test]
+ fn test_build_function() {
+ let build = BuildFunction;
+ assert_eq!("build", build.name());
+ assert_eq!(
+ ConcreteDataType::string_datatype(),
+ build.return_type(&[]).unwrap()
+ );
+ assert!(matches!(build.signature(),
+ Signature {
+ type_signature: TypeSignature::Uniform(0, valid_types),
+ volatility: Volatility::Immutable
+ } if valid_types == vec![ConcreteDataType::string_datatype()]
+ ));
+ let build_info = format!(
+ "branch: {}\ncommit: {}\ncommit short: {}\ndirty: {}\nversion: {}",
+ build_data::get_git_branch().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
+ build_data::get_git_commit().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
+ build_data::get_git_commit_short().unwrap_or_else(|_| DEFAULT_VALUE.to_string()),
+ build_data::get_git_dirty().map_or(DEFAULT_VALUE.to_string(), |v| v.to_string()),
+ env!("CARGO_PKG_VERSION")
+ );
+ let vector = build.eval(FunctionContext::default(), &[]).unwrap();
+ let expect: VectorRef = Arc::new(StringVector::from(vec![build_info]));
+ assert_eq!(expect, vector);
+ }
+}
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index b5926c0c2230..abf1064de826 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -17,8 +17,8 @@ use std::sync::Arc;
use async_trait::async_trait;
use common_error::ext::BoxedError;
+use common_function::function::FunctionRef;
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
-use common_function::scalars::FunctionRef;
use common_query::prelude::ScalarUdf;
use common_query::Output;
use common_recordbatch::SendableRecordBatchStream;
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index b0b7c55ebfb1..a05093e32978 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -24,9 +24,9 @@ use std::sync::Arc;
use async_trait::async_trait;
use common_base::Plugins;
use common_error::ext::BoxedError;
+use common_function::function::FunctionRef;
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
use common_function::scalars::udf::create_udf;
-use common_function::scalars::FunctionRef;
use common_query::physical_plan::{DfPhysicalPlanAdapter, PhysicalPlan, PhysicalPlanAdapter};
use common_query::prelude::ScalarUdf;
use common_query::Output;
diff --git a/src/query/src/query_engine.rs b/src/query/src/query_engine.rs
index 9e94037ba3aa..ec271a818fc9 100644
--- a/src/query/src/query_engine.rs
+++ b/src/query/src/query_engine.rs
@@ -22,8 +22,9 @@ use std::sync::Arc;
use async_trait::async_trait;
use catalog::CatalogManagerRef;
use common_base::Plugins;
+use common_function::function::FunctionRef;
+use common_function::function_registry::FUNCTION_REGISTRY;
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
-use common_function::scalars::{FunctionRef, FUNCTION_REGISTRY};
use common_query::prelude::ScalarUdf;
use common_query::Output;
use datatypes::schema::Schema;
diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs
index 61f603232a74..30c7fbca193f 100644
--- a/src/script/src/python/engine.rs
+++ b/src/script/src/python/engine.rs
@@ -21,7 +21,8 @@ use std::task::{Context, Poll};
use async_trait::async_trait;
use common_error::ext::BoxedError;
-use common_function::scalars::{Function, FUNCTION_REGISTRY};
+use common_function::function::Function;
+use common_function::function_registry::FUNCTION_REGISTRY;
use common_query::error::{PyUdfSnafu, UdfTempRecordBatchSnafu};
use common_query::prelude::Signature;
use common_query::Output;
@@ -150,7 +151,7 @@ impl Function for PyUDF {
fn eval(
&self,
- _func_ctx: common_function::scalars::function::FunctionContext,
+ _func_ctx: common_function::function::FunctionContext,
columns: &[datatypes::vectors::VectorRef],
) -> common_query::error::Result<datatypes::vectors::VectorRef> {
// FIXME(discord9): exec_parsed require a RecordBatch(basically a Vector+Schema), where schema can't pop out from nowhere, right?
diff --git a/src/script/src/python/pyo3/builtins.rs b/src/script/src/python/pyo3/builtins.rs
index 6051a5307b99..c4c10cc0e499 100644
--- a/src/script/src/python/pyo3/builtins.rs
+++ b/src/script/src/python/pyo3/builtins.rs
@@ -14,7 +14,8 @@
use std::sync::Arc;
-use common_function::scalars::{FunctionRef, FUNCTION_REGISTRY};
+use common_function::function::FunctionRef;
+use common_function::function_registry::FUNCTION_REGISTRY;
use datafusion::arrow::array::{ArrayRef, NullArray};
use datafusion::physical_plan::expressions;
use datafusion_expr::ColumnarValue;
diff --git a/src/script/src/python/rspython/builtins.rs b/src/script/src/python/rspython/builtins.rs
index 8521a4dc9858..da8c3d6a51a8 100644
--- a/src/script/src/python/rspython/builtins.rs
+++ b/src/script/src/python/rspython/builtins.rs
@@ -288,9 +288,9 @@ pub(crate) mod greptime_builtin {
use std::sync::Arc;
use arrow::compute::kernels::{aggregate, boolean};
- use common_function::scalars::function::FunctionContext;
+ use common_function::function::{Function, FunctionContext, FunctionRef};
+ use common_function::function_registry::FUNCTION_REGISTRY;
use common_function::scalars::math::PowFunction;
- use common_function::scalars::{Function, FunctionRef, FUNCTION_REGISTRY};
use datafusion::arrow::datatypes::DataType as ArrowDataType;
use datafusion::dataframe::DataFrame as DfDataFrame;
use datafusion::physical_plan::expressions;
diff --git a/tests/cases/standalone/common/function/system.result b/tests/cases/standalone/common/function/system.result
new file mode 100644
index 000000000000..0e054d1fecd5
--- /dev/null
+++ b/tests/cases/standalone/common/function/system.result
@@ -0,0 +1,10 @@
+-- SQLNESS REPLACE branch:\s+.+ branch: BRANCH
+-- SQLNESS REPLACE commit:\s+.+ commit: COMMIT
+-- SQLNESS REPLACE commit+\s+short:\s+.+ commit short: COMMIT SHORT
+-- SQLNESS REPLACE dirty:\s+.+ dirty: DIRTY
+-- SQLNESS REPLACE version:\s+.+ version: VERSION
+-- SQLNESS REPLACE [\s\-]+
+SELECT build();
+
+++|build()|++|branch:BRANCH|commit:COMMIT|commitshort:COMMITSHORT|dirty:DIRTY|version:VERSION++
+
diff --git a/tests/cases/standalone/common/function/system.sql b/tests/cases/standalone/common/function/system.sql
new file mode 100644
index 000000000000..af5393721470
--- /dev/null
+++ b/tests/cases/standalone/common/function/system.sql
@@ -0,0 +1,7 @@
+-- SQLNESS REPLACE branch:\s+.+ branch: BRANCH
+-- SQLNESS REPLACE commit:\s+.+ commit: COMMIT
+-- SQLNESS REPLACE commit+\s+short:\s+.+ commit short: COMMIT SHORT
+-- SQLNESS REPLACE dirty:\s+.+ dirty: DIRTY
+-- SQLNESS REPLACE version:\s+.+ version: VERSION
+-- SQLNESS REPLACE [\s\-]+
+SELECT build();
|
feat
|
add build() function to return the database build info (#2919)
|
e22aa819be107eed9069231444a5743e79941526
|
2025-02-12 00:52:10
|
Ruihang Xia
|
feat: support server-side keep-alive for mysql and pg protocols (#5496)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 45d242094d5c..2920b50303a0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -10561,6 +10561,7 @@ dependencies = [
"session",
"snafu 0.8.5",
"snap",
+ "socket2",
"sql",
"store-api",
"strum 0.25.0",
diff --git a/config/config.md b/config/config.md
index 0ad4ba95c1ae..5f348e0a6d28 100644
--- a/config/config.md
+++ b/config/config.md
@@ -40,6 +40,7 @@
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
+| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
@@ -49,6 +50,7 @@
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
+| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
@@ -234,6 +236,7 @@
| `mysql.enable` | Bool | `true` | Whether to enable. |
| `mysql.addr` | String | `127.0.0.1:4002` | The addr to bind the MySQL server. |
| `mysql.runtime_size` | Integer | `2` | The number of server worker threads. |
+| `mysql.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `mysql.tls` | -- | -- | -- |
| `mysql.tls.mode` | String | `disable` | TLS mode, refer to https://www.postgresql.org/docs/current/libpq-ssl.html<br/>- `disable` (default value)<br/>- `prefer`<br/>- `require`<br/>- `verify-ca`<br/>- `verify-full` |
| `mysql.tls.cert_path` | String | Unset | Certificate file path. |
@@ -243,6 +246,7 @@
| `postgres.enable` | Bool | `true` | Whether to enable |
| `postgres.addr` | String | `127.0.0.1:4003` | The addr to bind the PostgresSQL server. |
| `postgres.runtime_size` | Integer | `2` | The number of server worker threads. |
+| `postgres.keep_alive` | String | `0s` | Server-side keep-alive time.<br/>Set to 0 (default) to disable. |
| `postgres.tls` | -- | -- | PostgresSQL server TLS options, see `mysql.tls` section. |
| `postgres.tls.mode` | String | `disable` | TLS mode. |
| `postgres.tls.cert_path` | String | Unset | Certificate file path. |
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index 741eaec500e4..addea0454a05 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -74,6 +74,9 @@ enable = true
addr = "127.0.0.1:4002"
## The number of server worker threads.
runtime_size = 2
+## Server-side keep-alive time.
+## Set to 0 (default) to disable.
+keep_alive = "0s"
# MySQL server TLS options.
[mysql.tls]
@@ -105,6 +108,9 @@ enable = true
addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2
+## Server-side keep-alive time.
+## Set to 0 (default) to disable.
+keep_alive = "0s"
## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls]
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index d751f23a7104..005ff282f6e5 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -78,6 +78,9 @@ enable = true
addr = "127.0.0.1:4002"
## The number of server worker threads.
runtime_size = 2
+## Server-side keep-alive time.
+## Set to 0 (default) to disable.
+keep_alive = "0s"
# MySQL server TLS options.
[mysql.tls]
@@ -109,6 +112,9 @@ enable = true
addr = "127.0.0.1:4003"
## The number of server worker threads.
runtime_size = 2
+## Server-side keep-alive time.
+## Set to 0 (default) to disable.
+keep_alive = "0s"
## PostgresSQL server TLS options, see `mysql.tls` section.
[postgres.tls]
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 298c48c73c84..cb3284c9f860 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -227,6 +227,7 @@ where
Arc::new(MysqlSpawnConfig::new(
opts.tls.should_force_tls(),
tls_server_config,
+ opts.keep_alive.as_secs(),
opts.reject_no_database.unwrap_or(false),
)),
);
@@ -248,6 +249,7 @@ where
ServerSqlQueryHandlerAdapter::arc(instance.clone()),
opts.tls.should_force_tls(),
tls_server_config,
+ opts.keep_alive.as_secs(),
common_runtime::global_runtime(),
user_provider.clone(),
)) as Box<dyn Server>;
diff --git a/src/frontend/src/service_config/mysql.rs b/src/frontend/src/service_config/mysql.rs
index 623cedd149b4..20753554fff2 100644
--- a/src/frontend/src/service_config/mysql.rs
+++ b/src/frontend/src/service_config/mysql.rs
@@ -23,6 +23,12 @@ pub struct MysqlOptions {
#[serde(default = "Default::default")]
pub tls: TlsOption,
pub reject_no_database: Option<bool>,
+ /// Server-side keep-alive time.
+ ///
+ /// Set to 0 (default) to disable.
+ #[serde(default = "Default::default")]
+ #[serde(with = "humantime_serde")]
+ pub keep_alive: std::time::Duration,
}
impl Default for MysqlOptions {
@@ -33,6 +39,7 @@ impl Default for MysqlOptions {
runtime_size: 2,
tls: TlsOption::default(),
reject_no_database: None,
+ keep_alive: std::time::Duration::from_secs(0),
}
}
}
diff --git a/src/frontend/src/service_config/postgres.rs b/src/frontend/src/service_config/postgres.rs
index e27f2b7bcb91..015c968386c5 100644
--- a/src/frontend/src/service_config/postgres.rs
+++ b/src/frontend/src/service_config/postgres.rs
@@ -22,6 +22,12 @@ pub struct PostgresOptions {
pub runtime_size: usize,
#[serde(default = "Default::default")]
pub tls: TlsOption,
+ /// Server-side keep-alive time.
+ ///
+ /// Set to 0 (default) to disable.
+ #[serde(default = "Default::default")]
+ #[serde(with = "humantime_serde")]
+ pub keep_alive: std::time::Duration,
}
impl Default for PostgresOptions {
@@ -31,6 +37,7 @@ impl Default for PostgresOptions {
addr: "127.0.0.1:4003".to_string(),
runtime_size: 2,
tls: Default::default(),
+ keep_alive: std::time::Duration::from_secs(0),
}
}
}
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 450a8529a08d..514c99f80ebf 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -76,6 +76,7 @@ notify.workspace = true
object-pool = "0.5"
once_cell.workspace = true
openmetrics-parser = "0.4"
+socket2 = "0.5"
# use crates.io version after current revision is merged in next release
# opensrv-mysql = "0.7.0"
opensrv-mysql = { git = "https://github.com/datafuselabs/opensrv", rev = "6bbc3b65e6b19212c4f7fc4f40c20daf6f452deb" }
diff --git a/src/servers/src/mysql/server.rs b/src/servers/src/mysql/server.rs
index dae01b3f1a41..12b9c689a1d2 100644
--- a/src/servers/src/mysql/server.rs
+++ b/src/servers/src/mysql/server.rs
@@ -72,6 +72,8 @@ pub struct MysqlSpawnConfig {
// tls config
force_tls: bool,
tls: Arc<ReloadableTlsServerConfig>,
+ // keep-alive config
+ keep_alive_secs: u64,
// other shim config
reject_no_database: bool,
}
@@ -80,11 +82,13 @@ impl MysqlSpawnConfig {
pub fn new(
force_tls: bool,
tls: Arc<ReloadableTlsServerConfig>,
+ keep_alive_secs: u64,
reject_no_database: bool,
) -> MysqlSpawnConfig {
MysqlSpawnConfig {
force_tls,
tls,
+ keep_alive_secs,
reject_no_database,
}
}
@@ -218,7 +222,10 @@ impl Server for MysqlServer {
}
async fn start(&self, listening: SocketAddr) -> Result<SocketAddr> {
- let (stream, addr) = self.base_server.bind(listening).await?;
+ let (stream, addr) = self
+ .base_server
+ .bind(listening, self.spawn_config.keep_alive_secs)
+ .await?;
let io_runtime = self.base_server.io_runtime();
let join_handle = common_runtime::spawn_global(self.accept(io_runtime, stream));
diff --git a/src/servers/src/postgres/server.rs b/src/servers/src/postgres/server.rs
index 70f74a32ece3..4ffb3ce2c01c 100644
--- a/src/servers/src/postgres/server.rs
+++ b/src/servers/src/postgres/server.rs
@@ -35,6 +35,7 @@ pub struct PostgresServer {
base_server: BaseTcpServer,
make_handler: Arc<MakePostgresServerHandler>,
tls_server_config: Arc<ReloadableTlsServerConfig>,
+ keep_alive_secs: u64,
}
impl PostgresServer {
@@ -43,6 +44,7 @@ impl PostgresServer {
query_handler: ServerSqlQueryHandlerRef,
force_tls: bool,
tls_server_config: Arc<ReloadableTlsServerConfig>,
+ keep_alive_secs: u64,
io_runtime: Runtime,
user_provider: Option<UserProviderRef>,
) -> PostgresServer {
@@ -58,6 +60,7 @@ impl PostgresServer {
base_server: BaseTcpServer::create_server("Postgres", io_runtime),
make_handler,
tls_server_config,
+ keep_alive_secs,
}
}
@@ -116,7 +119,10 @@ impl Server for PostgresServer {
}
async fn start(&self, listening: SocketAddr) -> Result<SocketAddr> {
- let (stream, addr) = self.base_server.bind(listening).await?;
+ let (stream, addr) = self
+ .base_server
+ .bind(listening, self.keep_alive_secs)
+ .await?;
let io_runtime = self.base_server.io_runtime();
let join_handle = common_runtime::spawn_global(self.accept(io_runtime, stream));
diff --git a/src/servers/src/server.rs b/src/servers/src/server.rs
index 5d93c7e8af91..35a5c618594a 100644
--- a/src/servers/src/server.rs
+++ b/src/servers/src/server.rs
@@ -144,6 +144,7 @@ impl AcceptTask {
&mut self,
addr: SocketAddr,
name: &str,
+ keep_alive_secs: u64,
) -> Result<(Abortable<TcpListenerStream>, SocketAddr)> {
match self.abort_registration.take() {
Some(registration) => {
@@ -157,6 +158,15 @@ impl AcceptTask {
let addr = listener.local_addr()?;
info!("{name} server started at {addr}");
+ // set keep-alive
+ if keep_alive_secs > 0 {
+ let socket_ref = socket2::SockRef::from(&listener);
+ let keep_alive = socket2::TcpKeepalive::new()
+ .with_time(std::time::Duration::from_secs(keep_alive_secs))
+ .with_interval(std::time::Duration::from_secs(keep_alive_secs));
+ socket_ref.set_tcp_keepalive(&keep_alive)?;
+ }
+
let stream = TcpListenerStream::new(listener);
let stream = Abortable::new(stream, registration);
Ok((stream, addr))
@@ -205,12 +215,16 @@ impl BaseTcpServer {
task.shutdown(&self.name).await
}
+ /// Bind the server to the given address and set the keep-alive time.
+ ///
+ /// If `keep_alive_secs` is 0, the keep-alive will not be set.
pub(crate) async fn bind(
&self,
addr: SocketAddr,
+ keep_alive_secs: u64,
) -> Result<(Abortable<TcpListenerStream>, SocketAddr)> {
let mut task = self.accept_task.lock().await;
- task.bind(addr, &self.name).await
+ task.bind(addr, &self.name, keep_alive_secs).await
}
pub(crate) async fn start_with(&self, join_handle: JoinHandle<()>) -> Result<()> {
diff --git a/src/servers/tests/mysql/mysql_server_test.rs b/src/servers/tests/mysql/mysql_server_test.rs
index a9f7f8309aa8..cb800e4e4d01 100644
--- a/src/servers/tests/mysql/mysql_server_test.rs
+++ b/src/servers/tests/mysql/mysql_server_test.rs
@@ -71,6 +71,7 @@ fn create_mysql_server(table: TableRef, opts: MysqlOpts<'_>) -> Result<Box<dyn S
Arc::new(MysqlSpawnConfig::new(
opts.tls.should_force_tls(),
tls_server_config,
+ 0,
opts.reject_no_database,
)),
))
diff --git a/src/servers/tests/postgres/mod.rs b/src/servers/tests/postgres/mod.rs
index 6ff659fec9c6..9e2b930ab39d 100644
--- a/src/servers/tests/postgres/mod.rs
+++ b/src/servers/tests/postgres/mod.rs
@@ -69,6 +69,7 @@ fn create_postgres_server(
instance,
tls.should_force_tls(),
tls_server_config,
+ 0,
io_runtime,
user_provider,
)))
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 1e848e596444..2118bb6a285a 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -589,6 +589,7 @@ pub async fn setup_mysql_server_with_user_provider(
ReloadableTlsServerConfig::try_new(opts.tls.clone())
.expect("Failed to load certificates and keys"),
),
+ 0,
opts.reject_no_database.unwrap_or(false),
)),
));
@@ -641,6 +642,7 @@ pub async fn setup_pg_server_with_user_provider(
ServerSqlQueryHandlerAdapter::arc(fe_instance_ref),
opts.tls.should_force_tls(),
tls_server_config,
+ 0,
runtime,
user_provider,
)) as Box<dyn Server>);
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 75ea27b0cd0a..41867295198f 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -923,6 +923,7 @@ watch = false
enable = true
addr = "127.0.0.1:4002"
runtime_size = 2
+keep_alive = "0s"
[mysql.tls]
mode = "disable"
@@ -934,6 +935,7 @@ watch = false
enable = true
addr = "127.0.0.1:4003"
runtime_size = 2
+keep_alive = "0s"
[postgres.tls]
mode = "disable"
|
feat
|
support server-side keep-alive for mysql and pg protocols (#5496)
|
6599bb5a4610412992657991b7915c0b6cc32854
|
2023-11-13 14:34:44
|
Ruihang Xia
|
feat: do not require worker's number to be power of 2 (#2732)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 3cb68234a259..f66dc5de72c9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4880,6 +4880,7 @@ dependencies = [
"log-store",
"memcomparable",
"moka",
+ "num_cpus",
"object-store",
"parquet",
"paste",
@@ -8250,7 +8251,6 @@ dependencies = [
"lazy_static",
"mime_guess",
"mysql_async",
- "num_cpus",
"once_cell",
"openmetrics-parser",
"opensrv-mysql",
@@ -9282,6 +9282,7 @@ dependencies = [
"itertools 0.10.5",
"meta-client",
"meta-srv",
+ "num_cpus",
"object-store",
"once_cell",
"opentelemetry-proto",
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index 46c6ce796730..58477ee30f01 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -42,6 +42,7 @@ lazy_static = "1.4"
log-store = { workspace = true, optional = true }
memcomparable = "0.2"
moka = { workspace = true, features = ["sync"] }
+num_cpus = "1.13"
object-store.workspace = true
parquet = { workspace = true, features = ["async"] }
paste.workspace = true
diff --git a/src/mito2/src/config.rs b/src/mito2/src/config.rs
index 5350c2c8e1a0..66d688a39f53 100644
--- a/src/mito2/src/config.rs
+++ b/src/mito2/src/config.rs
@@ -21,8 +21,6 @@ use common_datasource::compression::CompressionType;
use common_telemetry::warn;
use serde::{Deserialize, Serialize};
-/// Default region worker num.
-const DEFAULT_NUM_WORKERS: usize = 1;
/// Default max running background job.
const DEFAULT_MAX_BG_JOB: usize = 4;
@@ -72,7 +70,7 @@ pub struct MitoConfig {
impl Default for MitoConfig {
fn default() -> Self {
MitoConfig {
- num_workers: DEFAULT_NUM_WORKERS,
+ num_workers: num_cpus::get() / 2,
worker_channel_size: 128,
worker_request_batch_size: 64,
manifest_checkpoint_distance: 10,
@@ -94,9 +92,8 @@ impl MitoConfig {
// Sanitize worker num.
let num_workers_before = self.num_workers;
if self.num_workers == 0 {
- self.num_workers = DEFAULT_NUM_WORKERS;
+ self.num_workers = num_cpus::get() / 2;
}
- self.num_workers = self.num_workers.next_power_of_two();
if num_workers_before != self.num_workers {
warn!(
"Sanitize worker num {} to {}",
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index bc2463f224fe..ebff32be789d 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -114,7 +114,6 @@ impl WorkerGroup {
log_store: Arc<S>,
object_store_manager: ObjectStoreManagerRef,
) -> WorkerGroup {
- assert!(config.num_workers.is_power_of_two());
let config = Arc::new(config);
let write_buffer_manager = Arc::new(WriteBufferManagerImpl::new(
config.global_write_buffer_size.as_bytes() as usize,
@@ -210,7 +209,6 @@ impl WorkerGroup {
write_buffer_manager: Option<WriteBufferManagerRef>,
listener: Option<crate::engine::listener::EventListenerRef>,
) -> WorkerGroup {
- assert!(config.num_workers.is_power_of_two());
let config = Arc::new(config);
let write_buffer_manager = write_buffer_manager.unwrap_or_else(|| {
Arc::new(WriteBufferManagerImpl::new(
@@ -248,7 +246,7 @@ impl WorkerGroup {
}
fn value_to_index(value: usize, num_workers: usize) -> usize {
- value & (num_workers - 1)
+ value % num_workers
}
/// Worker start config.
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 8141a278ea69..c3de33629640 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -52,7 +52,6 @@ influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", bran
itertools.workspace = true
lazy_static.workspace = true
mime_guess = "2.0"
-num_cpus = "1.13"
once_cell.workspace = true
openmetrics-parser = "0.4"
opensrv-mysql = "0.4"
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index 170328800e70..4c09640de424 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -68,6 +68,7 @@ uuid.workspace = true
datafusion-expr.workspace = true
datafusion.workspace = true
itertools.workspace = true
+num_cpus = "1.13"
opentelemetry-proto.workspace = true
partition.workspace = true
paste.workspace = true
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 6a54707a1ad3..574334de00d7 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -732,7 +732,7 @@ auto_flush_interval = "1h"
[[datanode.region_engine]]
[datanode.region_engine.mito]
-num_workers = 1
+num_workers = {}
worker_channel_size = 128
worker_request_batch_size = 64
manifest_checkpoint_distance = 10
@@ -754,7 +754,8 @@ enable_jaeger_tracing = false
[logging]
enable_jaeger_tracing = false"#,
- store_type
+ store_type,
+ num_cpus::get() / 2
);
let body_text = drop_lines_with_inconsistent_results(res_get.text().await);
assert_eq!(body_text, expected_toml_str);
|
feat
|
do not require worker's number to be power of 2 (#2732)
|
45e68603a11ec1f6ea0c8c1cc6d7fe394ab0c81c
|
2025-01-23 17:13:10
|
zyy17
|
ci: update dev-builder version (#5435)
| false
|
diff --git a/Makefile b/Makefile
index f029f78d858e..2ce92fc318ae 100644
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,7 @@ CARGO_BUILD_OPTS := --locked
IMAGE_REGISTRY ?= docker.io
IMAGE_NAMESPACE ?= greptime
IMAGE_TAG ?= latest
-DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-a5c00e85-20241024184445
+DEV_BUILDER_IMAGE_TAG ?= 2024-10-19-d9a5518c-20250123102018
BUILDX_MULTI_PLATFORM_BUILD ?= false
BUILDX_BUILDER_NAME ?= gtbuilder
BASE_IMAGE ?= ubuntu
|
ci
|
update dev-builder version (#5435)
|
7e68ecc498a7b4428a3639f069312f5f21ff9ef1
|
2023-12-01 12:22:43
|
Yingwen
|
feat: do not concat batches in MergeReader (#2833)
| false
|
diff --git a/src/mito2/src/read/merge.rs b/src/mito2/src/read/merge.rs
index ad1f51cf1dc3..e152956714f8 100644
--- a/src/mito2/src/read/merge.rs
+++ b/src/mito2/src/read/merge.rs
@@ -28,9 +28,6 @@ use crate::memtable::BoxedBatchIterator;
use crate::metrics::{MERGE_FILTER_ROWS_TOTAL, READ_STAGE_ELAPSED};
use crate::read::{Batch, BatchReader, BoxedBatchReader, Source};
-/// Minimum batch size to output.
-const MIN_BATCH_SIZE: usize = 64;
-
/// Reader to merge sorted batches.
///
/// The merge reader merges [Batch]es from multiple sources that yield sorted batches.
@@ -49,11 +46,8 @@ pub struct MergeReader {
///
/// `Node` in this heap **must** not be empty.
cold: BinaryHeap<Node>,
- /// Batches to output.
- batch_merger: BatchMerger,
- /// Suggested size of each batch. The batch returned by the reader can have more rows than the
- /// batch size.
- batch_size: usize,
+ /// Batch to output.
+ output_batch: Option<Batch>,
/// Local metrics.
metrics: Metrics,
}
@@ -62,15 +56,7 @@ pub struct MergeReader {
impl BatchReader for MergeReader {
async fn next_batch(&mut self) -> Result<Option<Batch>> {
let start = Instant::now();
- while !self.hot.is_empty() && self.batch_merger.num_rows() < self.batch_size {
- if let Some(current_key) = self.batch_merger.primary_key() {
- // If the hottest node has a different key, we have finish collecting current key.
- // Safety: hot is not empty.
- if self.hot.peek().unwrap().primary_key() != current_key {
- break;
- }
- }
-
+ while !self.hot.is_empty() && self.output_batch.is_none() {
if self.hot.len() == 1 {
// No need to do merge sort if only one batch in the hot heap.
self.fetch_batch_from_hottest().await?;
@@ -82,17 +68,14 @@ impl BatchReader for MergeReader {
}
}
- if self.batch_merger.is_empty() {
- // Nothing fetched.
+ if let Some(batch) = self.output_batch.take() {
self.metrics.scan_cost += start.elapsed();
- // Update deleted rows num.
- self.metrics.num_deleted_rows = self.batch_merger.num_deleted_rows();
- Ok(None)
+ self.metrics.num_output_rows += batch.num_rows();
+ Ok(Some(batch))
} else {
- let batch = self.batch_merger.merge_batches()?;
+ // Nothing fetched.
self.metrics.scan_cost += start.elapsed();
- self.metrics.num_output_rows += batch.as_ref().map(|b| b.num_rows()).unwrap_or(0);
- Ok(batch)
+ Ok(None)
}
}
}
@@ -115,7 +98,7 @@ impl Drop for MergeReader {
impl MergeReader {
/// Creates and initializes a new [MergeReader].
- pub async fn new(sources: Vec<Source>, batch_size: usize) -> Result<MergeReader> {
+ pub async fn new(sources: Vec<Source>) -> Result<MergeReader> {
let start = Instant::now();
let mut metrics = Metrics::default();
@@ -132,8 +115,7 @@ impl MergeReader {
let mut reader = MergeReader {
hot,
cold,
- batch_merger: BatchMerger::new(),
- batch_size,
+ output_batch: None,
metrics,
};
// Initializes the reader.
@@ -168,7 +150,7 @@ impl MergeReader {
let mut hottest = self.hot.pop().unwrap();
let batch = hottest.fetch_batch(&mut self.metrics).await?;
- self.batch_merger.push(batch)?;
+ Self::maybe_output_batch(batch, &mut self.output_batch, &mut self.metrics)?;
self.reheap(hottest)
}
@@ -199,7 +181,11 @@ impl MergeReader {
// They have duplicate timestamps. Outputs timestamps before the duplicated timestamp.
// Batch itself doesn't contain duplicate timestamps so timestamps before `pos`
// must be less than `next_min_ts`.
- self.batch_merger.push(top.slice(0, pos))?;
+ Self::maybe_output_batch(
+ top.slice(0, pos),
+ &mut self.output_batch,
+ &mut self.metrics,
+ )?;
// This keep the duplicate timestamp in the node.
top_node.skip_rows(pos, &mut self.metrics).await?;
// The merge window should contain this timestamp so only nodes in the hot heap
@@ -209,7 +195,11 @@ impl MergeReader {
}
Err(pos) => {
// No duplicate timestamp. Outputs timestamp before `pos`.
- self.batch_merger.push(top.slice(0, pos))?;
+ Self::maybe_output_batch(
+ top.slice(0, pos),
+ &mut self.output_batch,
+ &mut self.metrics,
+ )?;
top_node.skip_rows(pos, &mut self.metrics).await?;
self.reheap(top_node)?;
}
@@ -300,16 +290,37 @@ impl MergeReader {
Ok(())
}
+
+ /// Removeds deleted entries and sets the `batch` to the `output_batch`.
+ ///
+ /// Ignores the `batch` if it is empty.
+ fn maybe_output_batch(
+ mut batch: Batch,
+ output_batch: &mut Option<Batch>,
+ metrics: &mut Metrics,
+ ) -> Result<()> {
+ debug_assert!(output_batch.is_none());
+
+ let num_rows = batch.num_rows();
+ batch.filter_deleted()?;
+ // Update deleted rows metrics.
+ metrics.num_deleted_rows += num_rows - batch.num_rows();
+ if batch.is_empty() {
+ return Ok(());
+ }
+ *output_batch = Some(batch);
+
+ Ok(())
+ }
}
/// Builder to build and initialize a [MergeReader].
+#[derive(Default)]
pub struct MergeReaderBuilder {
/// Input sources.
///
/// All source must yield batches with the same schema.
sources: Vec<Source>,
- /// Batch size of the reader.
- batch_size: usize,
}
impl MergeReaderBuilder {
@@ -330,25 +341,10 @@ impl MergeReaderBuilder {
self
}
- /// Sets the batch size of the reader.
- pub fn batch_size(&mut self, size: usize) -> &mut Self {
- self.batch_size = if size == 0 { MIN_BATCH_SIZE } else { size };
- self
- }
-
/// Builds and initializes the reader, then resets the builder.
pub async fn build(&mut self) -> Result<MergeReader> {
let sources = mem::take(&mut self.sources);
- MergeReader::new(sources, self.batch_size).await
- }
-}
-
-impl Default for MergeReaderBuilder {
- fn default() -> Self {
- MergeReaderBuilder {
- sources: Vec::new(),
- batch_size: MIN_BATCH_SIZE,
- }
+ MergeReader::new(sources).await
}
}
@@ -371,89 +367,6 @@ struct Metrics {
num_deleted_rows: usize,
}
-/// Helper to collect and merge small batches for same primary key.
-struct BatchMerger {
- /// Buffered non-empty batches to merge.
- batches: Vec<Batch>,
- /// Number of rows in the batch.
- num_rows: usize,
- /// Number of rows deleted.
- num_deleted_rows: usize,
-}
-
-impl BatchMerger {
- /// Returns a empty merger.
- fn new() -> BatchMerger {
- BatchMerger {
- batches: Vec::new(),
- num_rows: 0,
- num_deleted_rows: 0,
- }
- }
-
- /// Returns the number of rows.
- fn num_rows(&self) -> usize {
- self.num_rows
- }
-
- /// Returns the number of rows deleted.
- fn num_deleted_rows(&self) -> usize {
- self.num_deleted_rows
- }
-
- /// Returns true if the merger is empty.
- fn is_empty(&self) -> bool {
- self.num_rows() == 0
- }
-
- /// Returns the primary key of current merger and `None` if the merger is empty.
- fn primary_key(&self) -> Option<&[u8]> {
- self.batches.first().map(|batch| batch.primary_key())
- }
-
- /// Removeds deleted entries and pushes a `batch` into the merger.
- ///
- /// Ignores the `batch` if it is empty.
- ///
- /// # Panics
- /// Panics if the `batch` has another primary key.
- fn push(&mut self, mut batch: Batch) -> Result<()> {
- debug_assert!(self
- .batches
- .last()
- .map(|b| b.primary_key() == batch.primary_key())
- .unwrap_or(true));
-
- let num_rows = batch.num_rows();
- batch.filter_deleted()?;
- self.num_deleted_rows += num_rows - batch.num_rows();
- if batch.is_empty() {
- return Ok(());
- }
-
- self.num_rows += batch.num_rows();
- self.batches.push(batch);
-
- Ok(())
- }
-
- /// Merge all buffered batches and returns the merged batch. Then
- /// reset the buffer.
- fn merge_batches(&mut self) -> Result<Option<Batch>> {
- if self.batches.is_empty() {
- return Ok(None);
- }
-
- // Reset number of rows.
- self.num_rows = 0;
- if self.batches.len() == 1 {
- return Ok(self.batches.pop());
- }
- let batches = mem::take(&mut self.batches);
- Batch::concat(batches).map(Some)
- }
-}
-
/// A `Node` represent an individual input data source to be merged.
struct Node {
/// Data source of this `Node`.
@@ -669,17 +582,19 @@ mod tests {
&[
new_batch(
b"k1",
- &[1, 2, 4, 5, 7],
- &[11, 12, 14, 15, 17],
- &[
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- ],
- &[21, 22, 24, 25, 27],
+ &[1, 2],
+ &[11, 12],
+ &[OpType::Put, OpType::Put],
+ &[21, 22],
),
+ new_batch(
+ b"k1",
+ &[4, 5],
+ &[14, 15],
+ &[OpType::Put, OpType::Put],
+ &[24, 25],
+ ),
+ new_batch(b"k1", &[7], &[17], &[OpType::Put], &[27]),
new_batch(b"k2", &[3], &[13], &[OpType::Put], &[23]),
],
)
@@ -718,13 +633,10 @@ mod tests {
check_reader_result(
&mut reader,
&[
- new_batch(
- b"k1",
- &[1, 2, 3, 4],
- &[10, 11, 10, 11],
- &[OpType::Put, OpType::Put, OpType::Put, OpType::Put],
- &[21, 32, 23, 34],
- ),
+ new_batch(b"k1", &[1], &[10], &[OpType::Put], &[21]),
+ new_batch(b"k1", &[2], &[11], &[OpType::Put], &[32]),
+ new_batch(b"k1", &[3], &[10], &[OpType::Put], &[23]),
+ new_batch(b"k1", &[4], &[11], &[OpType::Put], &[34]),
new_batch(b"k2", &[3], &[10], &[OpType::Put], &[23]),
],
)
@@ -785,18 +697,16 @@ mod tests {
&[
new_batch(
b"k1",
- &[1, 2, 3, 4],
- &[11, 12, 10, 14],
- &[OpType::Put, OpType::Put, OpType::Put, OpType::Put],
- &[21, 22, 33, 24],
- ),
- new_batch(
- b"k2",
- &[1, 3, 10],
- &[11, 13, 20],
- &[OpType::Put, OpType::Put, OpType::Put],
- &[21, 23, 30],
+ &[1, 2],
+ &[11, 12],
+ &[OpType::Put, OpType::Put],
+ &[21, 22],
),
+ new_batch(b"k1", &[3], &[10], &[OpType::Put], &[33]),
+ new_batch(b"k1", &[4], &[14], &[OpType::Put], &[24]),
+ new_batch(b"k2", &[1], &[11], &[OpType::Put], &[21]),
+ new_batch(b"k2", &[3], &[13], &[OpType::Put], &[23]),
+ new_batch(b"k2", &[10], &[20], &[OpType::Put], &[30]),
],
)
.await;
@@ -900,13 +810,16 @@ mod tests {
.unwrap();
check_reader_result(
&mut reader,
- &[new_batch(
- b"k1",
- &[1, 2, 3],
- &[10, 11, 11],
- &[OpType::Put, OpType::Put, OpType::Put],
- &[21, 32, 33],
- )],
+ &[
+ new_batch(b"k1", &[1], &[10], &[OpType::Put], &[21]),
+ new_batch(
+ b"k1",
+ &[2, 3],
+ &[11, 11],
+ &[OpType::Put, OpType::Put],
+ &[32, 33],
+ ),
+ ],
)
.await;
}
@@ -945,19 +858,18 @@ mod tests {
.unwrap();
check_reader_result(
&mut reader,
- &[new_batch(
- b"k1",
- &[1, 6, 8, 10, 20],
- &[11, 11, 11, 10, 11],
- &[
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- ],
- &[31, 36, 38, 30, 40],
- )],
+ &[
+ new_batch(b"k1", &[1], &[11], &[OpType::Put], &[31]),
+ new_batch(
+ b"k1",
+ &[6, 8],
+ &[11, 11],
+ &[OpType::Put, OpType::Put],
+ &[36, 38],
+ ),
+ new_batch(b"k1", &[10], &[10], &[OpType::Put], &[30]),
+ new_batch(b"k1", &[20], &[11], &[OpType::Put], &[40]),
+ ],
)
.await;
}
@@ -965,7 +877,6 @@ mod tests {
#[tokio::test]
async fn test_merge_many_duplicates() {
let mut builder = MergeReaderBuilder::new();
- builder.batch_size(3);
for i in 0..10 {
let batches: Vec<_> = (0..8)
.map(|ts| new_batch(b"k1", &[ts], &[i], &[OpType::Put], &[100]))
@@ -974,184 +885,9 @@ mod tests {
builder.push_batch_reader(Box::new(reader));
}
let mut reader = builder.build().await.unwrap();
- check_reader_result(
- &mut reader,
- &[
- new_batch(
- b"k1",
- &[0, 1, 2],
- &[9, 9, 9],
- &[OpType::Put, OpType::Put, OpType::Put],
- &[100, 100, 100],
- ),
- new_batch(
- b"k1",
- &[3, 4, 5],
- &[9, 9, 9],
- &[OpType::Put, OpType::Put, OpType::Put],
- &[100, 100, 100],
- ),
- new_batch(
- b"k1",
- &[6, 7],
- &[9, 9],
- &[OpType::Put, OpType::Put],
- &[100, 100],
- ),
- ],
- )
- .await;
- }
-
- #[tokio::test]
- async fn test_merge_more_than_batch_size() {
- let batches: Vec<_> = (0..MIN_BATCH_SIZE as i64 * 2)
- .map(|ts| new_batch(b"k1", &[ts], &[10], &[OpType::Put], &[100]))
+ let expect: Vec<_> = (0..8)
+ .map(|ts| new_batch(b"k1", &[ts], &[9], &[OpType::Put], &[100]))
.collect();
- let reader = VecBatchReader::new(&batches);
- let mut reader = MergeReaderBuilder::new()
- .push_batch_reader(Box::new(reader))
- // Still use the default batch size.
- .batch_size(0)
- .build()
- .await
- .unwrap();
- let ts1: Vec<_> = (0..MIN_BATCH_SIZE as i64).collect();
- let ts2: Vec<_> = (MIN_BATCH_SIZE as i64..MIN_BATCH_SIZE as i64 * 2).collect();
- let seqs = vec![10; MIN_BATCH_SIZE];
- let op_types = vec![OpType::Put; MIN_BATCH_SIZE];
- let fields = vec![100; MIN_BATCH_SIZE];
- check_reader_result(
- &mut reader,
- &[
- new_batch(b"k1", &ts1, &seqs, &op_types, &fields),
- new_batch(b"k1", &ts2, &seqs, &op_types, &fields),
- ],
- )
- .await;
- }
-
- #[tokio::test]
- async fn test_merge_more_than_batch_size_overlapping() {
- let reader1 = VecBatchReader::new(&[new_batch(
- b"k1",
- &[1, 2, 3, 4, 5, 6, 7, 8, 9],
- &[11, 10, 11, 10, 11, 10, 11, 10, 11],
- &[
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- ],
- &[21, 22, 23, 24, 25, 26, 27, 28, 29],
- )]);
- let reader2 = VecBatchReader::new(&[new_batch(
- b"k1",
- &[1, 2, 3, 4, 5, 6, 7, 8, 9],
- &[10, 11, 10, 11, 10, 11, 10, 11, 10],
- &[
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- OpType::Put,
- ],
- &[31, 32, 33, 34, 35, 36, 37, 38, 39],
- )]);
- let mut reader = MergeReaderBuilder::new()
- .push_batch_iter(Box::new(reader1))
- .push_batch_reader(Box::new(reader2))
- .batch_size(3)
- .build()
- .await
- .unwrap();
- check_reader_result(
- &mut reader,
- &[
- new_batch(
- b"k1",
- &[1, 2, 3],
- &[11, 11, 11],
- &[OpType::Put, OpType::Put, OpType::Put],
- &[21, 32, 23],
- ),
- new_batch(
- b"k1",
- &[4, 5, 6],
- &[11, 11, 11],
- &[OpType::Put, OpType::Put, OpType::Put],
- &[34, 25, 36],
- ),
- new_batch(
- b"k1",
- &[7, 8, 9],
- &[11, 11, 11],
- &[OpType::Put, OpType::Put, OpType::Put],
- &[27, 38, 29],
- ),
- ],
- )
- .await;
- }
-
- #[test]
- fn test_batch_merger_empty() {
- let mut merger = BatchMerger::new();
- assert!(merger.is_empty());
- assert!(merger.merge_batches().unwrap().is_none());
- assert!(merger.primary_key().is_none());
- }
-
- #[test]
- fn test_merge_one_batch() {
- let mut merger = BatchMerger::new();
- let expect = new_batch(b"k1", &[1], &[10], &[OpType::Put], &[21]);
- merger.push(expect.clone()).unwrap();
- let batch = merger.merge_batches().unwrap().unwrap();
- assert_eq!(1, batch.num_rows());
- assert_eq!(expect, batch,);
- assert!(merger.is_empty());
- }
-
- #[test]
- fn test_merge_batches() {
- let mut merger = BatchMerger::new();
- merger
- .push(new_batch(b"k1", &[1], &[10], &[OpType::Put], &[21]))
- .unwrap();
- assert_eq!(1, merger.num_rows());
- assert!(!merger.is_empty());
- merger
- .push(new_batch(b"k1", &[2], &[10], &[OpType::Put], &[22]))
- .unwrap();
- assert_eq!(2, merger.num_rows());
- merger
- .push(new_batch(b"k1", &[3], &[10], &[OpType::Delete], &[23]))
- .unwrap();
- assert_eq!(2, merger.num_rows());
-
- let batch = merger.merge_batches().unwrap().unwrap();
- assert_eq!(2, batch.num_rows());
- assert_eq!(
- batch,
- new_batch(
- b"k1",
- &[1, 2],
- &[10, 10],
- &[OpType::Put, OpType::Put,],
- &[21, 22]
- )
- );
- assert!(merger.is_empty());
- assert_eq!(1, merger.num_deleted_rows());
+ check_reader_result(&mut reader, &expect).await;
}
}
|
feat
|
do not concat batches in MergeReader (#2833)
|
7310ec0bb31aa5de9af70e57800a9ec1334addc4
|
2023-09-24 07:42:33
|
JeremyHi
|
chore: refactor options (#2476)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 349938a47461..1eaf5b4eaf36 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1612,12 +1612,14 @@ dependencies = [
"datatypes",
"either",
"etcd-client",
+ "file-engine",
"frontend",
"futures",
"lazy_static",
"meta-client",
"meta-srv",
"metrics",
+ "mito2",
"nu-ansi-term",
"partition",
"prost",
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index 42e6f377a061..c87dc8292c1d 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -8,7 +8,7 @@ interval_millis = 5000
retry_interval_millis = 5000
# HTTP server options, see `standalone.example.toml`.
-[http_options]
+[http]
addr = "127.0.0.1:4000"
timeout = "30s"
body_limit = "64MB"
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index b07ca011b1e5..c83f65f7488e 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -38,12 +38,14 @@ datanode = { workspace = true }
datatypes = { workspace = true }
either = "1.8"
etcd-client.workspace = true
+file-engine = { workspace = true }
frontend = { workspace = true }
futures.workspace = true
lazy_static.workspace = true
meta-client = { workspace = true }
meta-srv = { workspace = true }
metrics.workspace = true
+mito2 = { workspace = true }
nu-ansi-term = "0.46"
partition = { workspace = true }
prost.workspace = true
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 365f4ea9447d..61f0bab6a3d8 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -383,7 +383,7 @@ mod tests {
Some("11"),
),
(
- // http_options.addr = 127.0.0.1:24000
+ // http.addr = 127.0.0.1:24000
[
env_prefix.to_string(),
"http".to_uppercase(),
diff --git a/src/cmd/src/options.rs b/src/cmd/src/options.rs
index ffb65eb77fc3..caeef4960144 100644
--- a/src/cmd/src/options.rs
+++ b/src/cmd/src/options.rs
@@ -29,11 +29,11 @@ pub const ENV_LIST_SEP: &str = ",";
/// Options mixed up from datanode, frontend and metasrv.
pub struct MixOptions {
pub data_home: String,
- pub procedure_cfg: ProcedureConfig,
- pub kv_store_cfg: KvStoreConfig,
- pub fe_opts: FrontendOptions,
- pub dn_opts: DatanodeOptions,
- pub logging_opts: LoggingOptions,
+ pub procedure: ProcedureConfig,
+ pub kv_store: KvStoreConfig,
+ pub frontend: FrontendOptions,
+ pub datanode: DatanodeOptions,
+ pub logging: LoggingOptions,
}
pub enum Options {
@@ -56,7 +56,7 @@ impl Options {
Options::Datanode(opts) => &opts.logging,
Options::Frontend(opts) => &opts.logging,
Options::Metasrv(opts) => &opts.logging,
- Options::Standalone(opts) => &opts.logging_opts,
+ Options::Standalone(opts) => &opts.logging,
Options::Cli(opts) => opts,
}
}
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index b47e50c847c1..73c298b4b41e 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -24,14 +24,16 @@ use common_meta::kv_backend::KvBackendRef;
use common_procedure::ProcedureManagerRef;
use common_telemetry::info;
use common_telemetry::logging::LoggingOptions;
-use datanode::config::{DatanodeOptions, ProcedureConfig, StorageConfig};
+use datanode::config::{DatanodeOptions, ProcedureConfig, RegionEngineConfig, StorageConfig};
use datanode::datanode::{Datanode, DatanodeBuilder};
use datanode::region_server::RegionServer;
+use file_engine::config::EngineConfig as FileEngineConfig;
use frontend::frontend::FrontendOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance, StandaloneDatanodeManager};
use frontend::service_config::{
GrpcOptions, InfluxdbOptions, MysqlOptions, OpentsdbOptions, PostgresOptions, PromStoreOptions,
};
+use mito2::config::MitoConfig;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -85,18 +87,20 @@ impl SubCommand {
pub struct StandaloneOptions {
pub mode: Mode,
pub enable_telemetry: bool,
- pub http_options: HttpOptions,
- pub grpc_options: GrpcOptions,
- pub mysql_options: MysqlOptions,
- pub postgres_options: PostgresOptions,
- pub opentsdb_options: OpentsdbOptions,
- pub influxdb_options: InfluxdbOptions,
- pub prom_store_options: PromStoreOptions,
+ pub http: HttpOptions,
+ pub grpc: GrpcOptions,
+ pub mysql: MysqlOptions,
+ pub postgres: PostgresOptions,
+ pub opentsdb: OpentsdbOptions,
+ pub influxdb: InfluxdbOptions,
+ pub prom_store: PromStoreOptions,
pub wal: WalConfig,
pub storage: StorageConfig,
pub kv_store: KvStoreConfig,
pub procedure: ProcedureConfig,
pub logging: LoggingOptions,
+ /// Options for different store engines.
+ pub region_engine: Vec<RegionEngineConfig>,
}
impl Default for StandaloneOptions {
@@ -104,18 +108,22 @@ impl Default for StandaloneOptions {
Self {
mode: Mode::Standalone,
enable_telemetry: true,
- http_options: HttpOptions::default(),
- grpc_options: GrpcOptions::default(),
- mysql_options: MysqlOptions::default(),
- postgres_options: PostgresOptions::default(),
- opentsdb_options: OpentsdbOptions::default(),
- influxdb_options: InfluxdbOptions::default(),
- prom_store_options: PromStoreOptions::default(),
+ http: HttpOptions::default(),
+ grpc: GrpcOptions::default(),
+ mysql: MysqlOptions::default(),
+ postgres: PostgresOptions::default(),
+ opentsdb: OpentsdbOptions::default(),
+ influxdb: InfluxdbOptions::default(),
+ prom_store: PromStoreOptions::default(),
wal: WalConfig::default(),
storage: StorageConfig::default(),
kv_store: KvStoreConfig::default(),
procedure: ProcedureConfig::default(),
logging: LoggingOptions::default(),
+ region_engine: vec![
+ RegionEngineConfig::Mito(MitoConfig::default()),
+ RegionEngineConfig::File(FileEngineConfig::default()),
+ ],
}
}
}
@@ -124,13 +132,13 @@ impl StandaloneOptions {
fn frontend_options(self) -> FrontendOptions {
FrontendOptions {
mode: self.mode,
- http: self.http_options,
- grpc: self.grpc_options,
- mysql: self.mysql_options,
- postgres: self.postgres_options,
- opentsdb: self.opentsdb_options,
- influxdb: self.influxdb_options,
- prom_store: self.prom_store_options,
+ http: self.http,
+ grpc: self.grpc,
+ mysql: self.mysql,
+ postgres: self.postgres,
+ opentsdb: self.opentsdb,
+ influxdb: self.influxdb,
+ prom_store: self.prom_store,
meta_client: None,
logging: self.logging,
..Default::default()
@@ -143,6 +151,7 @@ impl StandaloneOptions {
enable_telemetry: self.enable_telemetry,
wal: self.wal,
storage: self.storage,
+ region_engine: self.region_engine,
..Default::default()
}
}
@@ -232,7 +241,7 @@ impl StartCommand {
);
if let Some(addr) = &self.http_addr {
- opts.http_options.addr = addr.clone()
+ opts.http.addr = addr.clone()
}
if let Some(addr) = &self.rpc_addr {
@@ -246,42 +255,42 @@ impl StartCommand {
}
.fail();
}
- opts.grpc_options.addr = addr.clone()
+ opts.grpc.addr = addr.clone()
}
if let Some(addr) = &self.mysql_addr {
- opts.mysql_options.enable = true;
- opts.mysql_options.addr = addr.clone();
- opts.mysql_options.tls = tls_opts.clone();
+ opts.mysql.enable = true;
+ opts.mysql.addr = addr.clone();
+ opts.mysql.tls = tls_opts.clone();
}
if let Some(addr) = &self.postgres_addr {
- opts.postgres_options.enable = true;
- opts.postgres_options.addr = addr.clone();
- opts.postgres_options.tls = tls_opts;
+ opts.postgres.enable = true;
+ opts.postgres.addr = addr.clone();
+ opts.postgres.tls = tls_opts;
}
if let Some(addr) = &self.opentsdb_addr {
- opts.opentsdb_options.enable = true;
- opts.opentsdb_options.addr = addr.clone();
+ opts.opentsdb.enable = true;
+ opts.opentsdb.addr = addr.clone();
}
if self.influxdb_enable {
- opts.influxdb_options.enable = self.influxdb_enable;
+ opts.influxdb.enable = self.influxdb_enable;
}
- let kv_store_cfg = opts.kv_store.clone();
- let procedure_cfg = opts.procedure.clone();
- let fe_opts = opts.clone().frontend_options();
- let logging_opts = opts.logging.clone();
- let dn_opts = opts.datanode_options();
+ let kv_store = opts.kv_store.clone();
+ let procedure = opts.procedure.clone();
+ let frontend = opts.clone().frontend_options();
+ let logging = opts.logging.clone();
+ let datanode = opts.datanode_options();
Ok(Options::Standalone(Box::new(MixOptions {
- procedure_cfg,
- kv_store_cfg,
- data_home: dn_opts.storage.data_home.to_string(),
- fe_opts,
- dn_opts,
- logging_opts,
+ procedure,
+ kv_store,
+ data_home: datanode.storage.data_home.to_string(),
+ frontend,
+ datanode,
+ logging,
})))
}
@@ -290,8 +299,8 @@ impl StartCommand {
#[allow(clippy::diverging_sub_expression)]
async fn build(self, opts: MixOptions) -> Result<Instance> {
let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
- let fe_opts = opts.fe_opts;
- let dn_opts = opts.dn_opts;
+ let fe_opts = opts.frontend;
+ let dn_opts = opts.datanode;
info!("Standalone start command: {:#?}", self);
info!(
@@ -300,13 +309,10 @@ impl StartCommand {
);
let kv_dir = kv_store_dir(&opts.data_home);
- let (kv_store, procedure_manager) = FeInstance::try_build_standalone_components(
- kv_dir,
- opts.kv_store_cfg,
- opts.procedure_cfg,
- )
- .await
- .context(StartFrontendSnafu)?;
+ let (kv_store, procedure_manager) =
+ FeInstance::try_build_standalone_components(kv_dir, opts.kv_store, opts.procedure)
+ .await
+ .context(StartFrontendSnafu)?;
let datanode =
DatanodeBuilder::new(dn_opts.clone(), Some(kv_store.clone()), plugins.clone())
@@ -436,9 +442,9 @@ mod tests {
checkpoint_margin = 9
gc_duration = '7s'
- [http_options]
+ [http]
addr = "127.0.0.1:4000"
- timeout = "30s"
+ timeout = "33s"
body_limit = "128MB"
[logging]
@@ -456,12 +462,12 @@ mod tests {
else {
unreachable!()
};
- let fe_opts = options.fe_opts;
- let dn_opts = options.dn_opts;
- let logging_opts = options.logging_opts;
+ let fe_opts = options.frontend;
+ let dn_opts = options.datanode;
+ let logging_opts = options.logging;
assert_eq!(Mode::Standalone, fe_opts.mode);
assert_eq!("127.0.0.1:4000".to_string(), fe_opts.http.addr);
- assert_eq!(Duration::from_secs(30), fe_opts.http.timeout);
+ assert_eq!(Duration::from_secs(33), fe_opts.http.timeout);
assert_eq!(ReadableSize::mb(128), fe_opts.http.body_limit);
assert_eq!("127.0.0.1:4001".to_string(), fe_opts.grpc.addr);
assert!(fe_opts.mysql.enable);
@@ -503,8 +509,8 @@ mod tests {
unreachable!()
};
- assert_eq!("/tmp/greptimedb/test/logs", opts.logging_opts.dir);
- assert_eq!("debug", opts.logging_opts.level.unwrap());
+ assert_eq!("/tmp/greptimedb/test/logs", opts.logging.dir);
+ assert_eq!("debug", opts.logging.level.unwrap());
}
#[test]
@@ -548,7 +554,7 @@ mod tests {
// http.addr = 127.0.0.1:24000
[
env_prefix.to_string(),
- "http_options".to_uppercase(),
+ "http".to_uppercase(),
"addr".to_uppercase(),
]
.join(ENV_VAR_SEP),
@@ -573,17 +579,17 @@ mod tests {
};
// Should be read from env, env > default values.
- assert_eq!(opts.logging_opts.dir, "/other/log/dir");
+ assert_eq!(opts.logging.dir, "/other/log/dir");
// Should be read from config file, config file > env > default values.
- assert_eq!(opts.logging_opts.level.as_ref().unwrap(), "debug");
+ assert_eq!(opts.logging.level.as_ref().unwrap(), "debug");
// Should be read from cli, cli > config file > env > default values.
- assert_eq!(opts.fe_opts.http.addr, "127.0.0.1:14000");
- assert_eq!(ReadableSize::mb(64), opts.fe_opts.http.body_limit);
+ assert_eq!(opts.frontend.http.addr, "127.0.0.1:14000");
+ assert_eq!(ReadableSize::mb(64), opts.frontend.http.body_limit);
// Should be default value.
- assert_eq!(opts.fe_opts.grpc.addr, GrpcOptions::default().addr);
+ assert_eq!(opts.frontend.grpc.addr, GrpcOptions::default().addr);
},
);
}
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index f8d5a567f05b..995e911acaad 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -401,7 +401,7 @@ async fn test_config() {
let toml_str = r#"
mode = "distributed"
- [http_options]
+ [http]
addr = "127.0.0.1:4000"
timeout = "30s"
body_limit = "2GB"
|
chore
|
refactor options (#2476)
|
8f9676aad2ba11f7e48252363c734b5aa69b2148
|
2024-03-26 15:01:01
|
Ruihang Xia
|
fix: incorrect version info in (#3586)
| false
|
diff --git a/src/cmd/build.rs b/src/cmd/build.rs
index 5b7f1458843c..87615c2c990f 100644
--- a/src/cmd/build.rs
+++ b/src/cmd/build.rs
@@ -13,5 +13,8 @@
// limitations under the License.
fn main() {
+ // Trigger this script if the git branch/commit changes
+ println!("cargo:rerun-if-changed=.git/refs/heads");
+
common_version::setup_build_info();
}
|
fix
|
incorrect version info in (#3586)
|
c126b480fd924af1b4da6f42dd24413752241439
|
2022-07-20 11:33:58
|
Lei, Huang
|
doc: add openssl install instructions to README.md (#99)
| false
|
diff --git a/README.md b/README.md
index e740f721af76..3a1e3c01c3a9 100644
--- a/README.md
+++ b/README.md
@@ -10,6 +10,7 @@ To compile GreptimeDB from source, you'll need the following:
- Rust
- C++ toolchain
- cmake
+- OpenSSL
#### Rust
The easiest way to install Rust is to use [`rustup`](https://rustup.rs/), which will check our `rust-toolchain` file and install correct Rust version for you.
@@ -20,11 +21,28 @@ The [`prost-build`](https://github.com/tokio-rs/prost/tree/master/prost-build) d
#### cmake
Follow the instructions for your operating system on the [`cmake`](https://cmake.org/install/) site.
-For MacOS users, you can also use `homebrew` to install `cmake`.
+For macOS users, you can also use `homebrew` to install `cmake`.
```bash
brew install cmake
```
+#### OpenSSL
+
+For Ubuntu:
+```bash
+sudo apt install libssl-dev
+```
+
+For RedHat-based: Fedora, Oracle Linux, etc:
+```bash
+sudo dnf install openssl-devel
+```
+
+For macOS:
+```bash
+brew install openssl
+```
+
## Usage
```
|
doc
|
add openssl install instructions to README.md (#99)
|
5d8084a32fa7fa4d7ff18f6da04e8a12e7c981c3
|
2024-05-13 14:00:25
|
Jeremyhi
|
chore: store-addr to store-addrs (#3925)
| false
|
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index 5832e61d2cce..fbc605acaf58 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -99,8 +99,8 @@ struct StartCommand {
bind_addr: Option<String>,
#[clap(long)]
server_addr: Option<String>,
- #[clap(long)]
- store_addr: Option<String>,
+ #[clap(long, aliases = ["store-addr"], value_delimiter = ',', num_args = 1..)]
+ store_addrs: Option<Vec<String>>,
#[clap(short, long)]
config_file: Option<String>,
#[clap(short, long)]
@@ -155,8 +155,8 @@ impl StartCommand {
opts.server_addr.clone_from(addr);
}
- if let Some(addr) = &self.store_addr {
- opts.store_addr.clone_from(addr);
+ if let Some(addrs) = &self.store_addrs {
+ opts.store_addrs.clone_from(addrs);
}
if let Some(selector_type) = &self.selector {
@@ -236,7 +236,7 @@ mod tests {
let cmd = StartCommand {
bind_addr: Some("127.0.0.1:3002".to_string()),
server_addr: Some("127.0.0.1:3002".to_string()),
- store_addr: Some("127.0.0.1:2380".to_string()),
+ store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
selector: Some("LoadBased".to_string()),
..Default::default()
};
@@ -245,7 +245,7 @@ mod tests {
unreachable!()
};
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
- assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
+ assert_eq!(vec!["127.0.0.1:2380".to_string()], options.store_addrs);
assert_eq!(SelectorType::LoadBased, options.selector);
}
@@ -281,7 +281,7 @@ mod tests {
};
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
- assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
+ assert_eq!(vec!["127.0.0.1:2379".to_string()], options.store_addrs);
assert_eq!(SelectorType::LeaseBased, options.selector);
assert_eq!("debug", options.logging.level.as_ref().unwrap());
assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir);
@@ -315,7 +315,7 @@ mod tests {
let cmd = StartCommand {
bind_addr: Some("127.0.0.1:3002".to_string()),
server_addr: Some("127.0.0.1:3002".to_string()),
- store_addr: Some("127.0.0.1:2380".to_string()),
+ store_addrs: Some(vec!["127.0.0.1:2380".to_string()]),
selector: Some("LoadBased".to_string()),
..Default::default()
};
@@ -401,7 +401,7 @@ mod tests {
assert_eq!(opts.http.addr, "127.0.0.1:14000");
// Should be default value.
- assert_eq!(opts.store_addr, "127.0.0.1:2379");
+ assert_eq!(opts.store_addrs, vec!["127.0.0.1:2379".to_string()]);
},
);
}
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index e6cf40bb4d92..02f89ca9b56a 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -241,8 +241,8 @@ pub async fn metasrv_builder(
async fn create_etcd_client(opts: &MetasrvOptions) -> Result<Client> {
let etcd_endpoints = opts
- .store_addr
- .split(',')
+ .store_addrs
+ .iter()
.map(|x| x.trim())
.filter(|x| !x.is_empty())
.collect::<Vec<_>>();
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 323428adca01..68b18f2f27b8 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -70,7 +70,7 @@ pub struct MetasrvOptions {
/// The address the server advertises to the clients.
pub server_addr: String,
/// The address of the store, e.g., etcd.
- pub store_addr: String,
+ pub store_addrs: Vec<String>,
/// The type of selector.
pub selector: SelectorType,
/// Whether to use the memory store.
@@ -124,7 +124,7 @@ impl Default for MetasrvOptions {
Self {
bind_addr: "127.0.0.1:3002".to_string(),
server_addr: "127.0.0.1:3002".to_string(),
- store_addr: "127.0.0.1:2379".to_string(),
+ store_addrs: vec!["127.0.0.1:2379".to_string()],
selector: SelectorType::default(),
use_memory_store: false,
enable_region_failover: false,
|
chore
|
store-addr to store-addrs (#3925)
|
f1d17a8ba51dcc34195394c0d4812c3d3376aa90
|
2024-07-09 09:00:14
|
dennis zhuang
|
fix: panic while reading information_schema. KEY_COLUMN_USAGE (#4318)
| false
|
diff --git a/src/catalog/src/information_schema/key_column_usage.rs b/src/catalog/src/information_schema/key_column_usage.rs
index 5cefc7449af2..bc4c04ccb8da 100644
--- a/src/catalog/src/information_schema/key_column_usage.rs
+++ b/src/catalog/src/information_schema/key_column_usage.rs
@@ -27,6 +27,7 @@ use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, V
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
use datatypes::value::Value;
use datatypes::vectors::{ConstantVector, StringVector, StringVectorBuilder, UInt32VectorBuilder};
+use futures_util::TryStreamExt;
use snafu::{OptionExt, ResultExt};
use store_api::storage::{ScanRequest, TableId};
@@ -211,69 +212,56 @@ impl InformationSchemaKeyColumnUsageBuilder {
.context(UpgradeWeakCatalogManagerRefSnafu)?;
let predicates = Predicates::from_scan_request(&request);
- let mut primary_constraints = vec![];
-
for schema_name in catalog_manager.schema_names(&catalog_name).await? {
- if !catalog_manager
- .schema_exists(&catalog_name, &schema_name)
- .await?
- {
- continue;
- }
+ let mut stream = catalog_manager.tables(&catalog_name, &schema_name);
- for table_name in catalog_manager
- .table_names(&catalog_name, &schema_name)
- .await?
- {
- if let Some(table) = catalog_manager
- .table(&catalog_name, &schema_name, &table_name)
- .await?
- {
- let keys = &table.table_info().meta.primary_key_indices;
- let schema = table.schema();
+ while let Some(table) = stream.try_next().await? {
+ let mut primary_constraints = vec![];
- for (idx, column) in schema.column_schemas().iter().enumerate() {
- if column.is_time_index() {
- self.add_key_column_usage(
- &predicates,
- &schema_name,
- TIME_INDEX_CONSTRAINT_NAME,
- &catalog_name,
- &schema_name,
- &table_name,
- &column.name,
- 1, //always 1 for time index
- );
- }
- if keys.contains(&idx) {
- primary_constraints.push((
- catalog_name.clone(),
- schema_name.clone(),
- table_name.clone(),
- column.name.clone(),
- ));
- }
- // TODO(dimbtp): foreign key constraint not supported yet
+ let table_info = table.table_info();
+ let table_name = &table_info.name;
+ let keys = &table_info.meta.primary_key_indices;
+ let schema = table.schema();
+
+ for (idx, column) in schema.column_schemas().iter().enumerate() {
+ if column.is_time_index() {
+ self.add_key_column_usage(
+ &predicates,
+ &schema_name,
+ TIME_INDEX_CONSTRAINT_NAME,
+ &catalog_name,
+ &schema_name,
+ table_name,
+ &column.name,
+ 1, //always 1 for time index
+ );
+ }
+ if keys.contains(&idx) {
+ primary_constraints.push((
+ catalog_name.clone(),
+ schema_name.clone(),
+ table_name.to_string(),
+ column.name.clone(),
+ ));
}
- } else {
- unreachable!();
+ // TODO(dimbtp): foreign key constraint not supported yet
}
- }
- }
- for (i, (catalog_name, schema_name, table_name, column_name)) in
- primary_constraints.into_iter().enumerate()
- {
- self.add_key_column_usage(
- &predicates,
- &schema_name,
- PRI_CONSTRAINT_NAME,
- &catalog_name,
- &schema_name,
- &table_name,
- &column_name,
- i as u32 + 1,
- );
+ for (i, (catalog_name, schema_name, table_name, column_name)) in
+ primary_constraints.into_iter().enumerate()
+ {
+ self.add_key_column_usage(
+ &predicates,
+ &schema_name,
+ PRI_CONSTRAINT_NAME,
+ &catalog_name,
+ &schema_name,
+ &table_name,
+ &column_name,
+ i as u32 + 1,
+ );
+ }
+ }
}
self.finish()
diff --git a/tests/cases/standalone/common/show/show_index.result b/tests/cases/standalone/common/show/show_index.result
index 79eec5a3f26f..4f1f7ea86bb3 100644
--- a/tests/cases/standalone/common/show/show_index.result
+++ b/tests/cases/standalone/common/show/show_index.result
@@ -11,10 +11,31 @@ CREATE TABLE IF NOT EXISTS system_metrics (
Affected Rows: 0
+CREATE TABLE IF NOT EXISTS test (
+ a STRING,
+ b STRING,
+ c DOUBLE,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY(a, b),
+ TIME INDEX(ts)
+);
+
+Affected Rows: 0
+
SHOW INDEX;
Error: 2000(InvalidSyntax), Unexpected token while parsing SQL statement: SHOW INDEX;, expected: '{FROM | IN} table', found: ;
+SHOW INDEX FROM test;
+
++-------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
+| Table | Non_unique | Key_name | Seq_in_index | Column_name | Collation | Cardinality | Sub_part | Packed | Null | Index_type | Comment | Index_comment | Visible | Expression |
++-------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
+| test | 1 | PRIMARY | 1 | a | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
+| test | 1 | PRIMARY | 2 | b | A | | | | YES | greptime-inverted-index-v1 | | | YES | |
+| test | 1 | TIME INDEX | 1 | ts | A | | | | NO | greptime-inverted-index-v1 | | | YES | |
++-------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
+
SHOW INDEX FROM system_metrics;
+----------------+------------+------------+--------------+-------------+-----------+-------------+----------+--------+------+----------------------------+---------+---------------+---------+------------+
@@ -51,3 +72,7 @@ DROP TABLE system_metrics;
Affected Rows: 0
+DROP TABLE test;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/show/show_index.sql b/tests/cases/standalone/common/show/show_index.sql
index 3405e0494e93..3f804db3845f 100644
--- a/tests/cases/standalone/common/show/show_index.sql
+++ b/tests/cases/standalone/common/show/show_index.sql
@@ -9,8 +9,19 @@ CREATE TABLE IF NOT EXISTS system_metrics (
TIME INDEX(ts)
);
+CREATE TABLE IF NOT EXISTS test (
+ a STRING,
+ b STRING,
+ c DOUBLE,
+ ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY(a, b),
+ TIME INDEX(ts)
+);
+
SHOW INDEX;
+SHOW INDEX FROM test;
+
SHOW INDEX FROM system_metrics;
SHOW INDEX FROM system_metrics in public;
@@ -20,3 +31,5 @@ SHOW INDEX FROM system_metrics like '%util%';
SHOW INDEX FROM system_metrics WHERE Key_name = 'TIME INDEX';
DROP TABLE system_metrics;
+
+DROP TABLE test;
|
fix
|
panic while reading information_schema. KEY_COLUMN_USAGE (#4318)
|
6a4e2e597598aa6103647f1798fdea1bbec98b73
|
2022-12-13 14:38:22
|
dennis zhuang
|
feat: promql create and skeleton (#720)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 7f1389eadc69..ad26abc63d9b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -46,7 +46,7 @@ version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"const-random",
"getrandom 0.2.7",
"once_cell",
@@ -70,7 +70,7 @@ checksum = "befdff0b4683a0824fc8719ce639a252d9d62cd89c8d0004c39e2417128c1eb8"
dependencies = [
"axum 0.6.1",
"bytes",
- "cfg-if",
+ "cfg-if 1.0.0",
"http",
"indexmap",
"schemars",
@@ -650,7 +650,7 @@ checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7"
dependencies = [
"addr2line",
"cc",
- "cfg-if",
+ "cfg-if 1.0.0",
"libc",
"miniz_oxide",
"object",
@@ -805,7 +805,7 @@ dependencies = [
"arrayref",
"arrayvec 0.7.2",
"cc",
- "cfg-if",
+ "cfg-if 1.0.0",
"constant_time_eq",
"digest",
]
@@ -916,6 +916,12 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c"
+[[package]]
+name = "cactus"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf034765b7d19a011c6d619e880582bf95e8186b580e6fab56589872dd87dcf5"
+
[[package]]
name = "camino"
version = "1.1.1"
@@ -942,7 +948,7 @@ checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa"
dependencies = [
"camino",
"cargo-platform",
- "semver",
+ "semver 1.0.14",
"serde",
"serde_json",
]
@@ -1018,12 +1024,32 @@ dependencies = [
"nom",
]
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+[[package]]
+name = "cfgrammar"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf74ea341ae8905eac9a234b6a5a845e118c25bbbdecf85ec77431a8b3bfa0be"
+dependencies = [
+ "indexmap",
+ "lazy_static",
+ "num-traits",
+ "regex",
+ "serde",
+ "vob",
+]
+
[[package]]
name = "chrono"
version = "0.4.22"
@@ -1616,7 +1642,7 @@ version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
]
[[package]]
@@ -1697,7 +1723,7 @@ version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-epoch",
@@ -1711,7 +1737,7 @@ version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"crossbeam-utils",
]
@@ -1721,7 +1747,7 @@ version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"crossbeam-epoch",
"crossbeam-utils",
]
@@ -1733,7 +1759,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1"
dependencies = [
"autocfg",
- "cfg-if",
+ "cfg-if 1.0.0",
"crossbeam-utils",
"memoffset",
"once_cell",
@@ -1746,7 +1772,7 @@ version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"crossbeam-utils",
]
@@ -1756,7 +1782,7 @@ version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"once_cell",
]
@@ -1864,7 +1890,7 @@ version = "5.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"hashbrown",
"lock_api",
"once_cell",
@@ -2135,7 +2161,7 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"dirs-sys-next",
]
@@ -2173,7 +2199,7 @@ version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53ecafc952c4528d9b51a458d1a8904b81783feff9fde08ab6ed2545ff396872"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"libc",
"socket2",
"winapi",
@@ -2224,7 +2250,7 @@ version = "0.8.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
]
[[package]]
@@ -2233,6 +2259,26 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d"
+[[package]]
+name = "enum-iterator"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "45a0ac4aeb3a18f92eaf09c6bb9b3ac30ff61ca95514fc58cbead1c9a6bf5401"
+dependencies = [
+ "enum-iterator-derive",
+]
+
+[[package]]
+name = "enum-iterator-derive"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "828de45d0ca18782232dfb8f3ea9cc428e8ced380eb26a520baaacfc70de39ce"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
[[package]]
name = "enum_dispatch"
version = "0.3.8"
@@ -2340,9 +2386,21 @@ version = "3.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e11dcc7e4d79a8c89b9ab4c6f5c30b1fc4a83c420792da3542fd31179ed5f517"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"rustix",
- "windows-sys",
+ "windows-sys 0.36.1",
+]
+
+[[package]]
+name = "filetime"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9"
+dependencies = [
+ "cfg-if 1.0.0",
+ "libc",
+ "redox_syscall 0.2.16",
+ "windows-sys 0.42.0",
]
[[package]]
@@ -2664,13 +2722,22 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "unicode-width",
+]
+
[[package]]
name = "getrandom"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"libc",
"wasi 0.9.0+wasi-snapshot-preview1",
]
@@ -2681,7 +2748,7 @@ version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"js-sys",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
@@ -2993,7 +3060,7 @@ version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
]
[[package]]
@@ -3237,7 +3304,7 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"winapi",
]
@@ -3280,7 +3347,7 @@ version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
]
[[package]]
@@ -3308,6 +3375,59 @@ dependencies = [
"tokio",
]
+[[package]]
+name = "lrlex"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22b832738fbfa58ad036580929e973b3b6bd31c6d6c7f18f6b5ea7b626675c85"
+dependencies = [
+ "getopts",
+ "lazy_static",
+ "lrpar",
+ "num-traits",
+ "regex",
+ "serde",
+ "try_from",
+ "vergen",
+]
+
+[[package]]
+name = "lrpar"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2f270b952b07995fe874b10a5ed7dd28c80aa2130e37a7de7ed667d034e0a521"
+dependencies = [
+ "bincode 1.3.3",
+ "cactus",
+ "cfgrammar",
+ "filetime",
+ "indexmap",
+ "lazy_static",
+ "lrtable",
+ "num-traits",
+ "packedvec",
+ "regex",
+ "serde",
+ "static_assertions",
+ "vergen",
+ "vob",
+]
+
+[[package]]
+name = "lrtable"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a854115c6a10772ac154261592b082436abc869c812575cadcf9d7ceda8eff0b"
+dependencies = [
+ "cfgrammar",
+ "fnv",
+ "num-traits",
+ "serde",
+ "sparsevec",
+ "static_assertions",
+ "vob",
+]
+
[[package]]
name = "lru"
version = "0.8.1"
@@ -3597,7 +3717,7 @@ dependencies = [
"libc",
"log",
"wasi 0.11.0+wasi-snapshot-preview1",
- "windows-sys",
+ "windows-sys 0.36.1",
]
[[package]]
@@ -3646,7 +3766,7 @@ dependencies = [
"once_cell",
"parking_lot",
"quanta",
- "rustc_version",
+ "rustc_version 0.4.0",
"scheduled-thread-pool",
"skeptic",
"smallvec",
@@ -3816,7 +3936,7 @@ checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6"
dependencies = [
"bitflags",
"cc",
- "cfg-if",
+ "cfg-if 1.0.0",
"libc",
"memoffset",
]
@@ -3828,7 +3948,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc"
dependencies = [
"bitflags",
- "cfg-if",
+ "cfg-if 1.0.0",
"libc",
"memoffset",
]
@@ -4165,6 +4285,16 @@ version = "6.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff"
+[[package]]
+name = "packedvec"
+version = "1.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bde3c690ec20e4a2b4fb46f0289a451181eb50011a1e2acc8d85e2fde9062a45"
+dependencies = [
+ "num-traits",
+ "serde",
+]
+
[[package]]
name = "page_size"
version = "0.4.2"
@@ -4198,13 +4328,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
dependencies = [
"backtrace",
- "cfg-if",
+ "cfg-if 1.0.0",
"libc",
"petgraph",
"redox_syscall 0.2.16",
"smallvec",
"thread-id",
- "windows-sys",
+ "windows-sys 0.36.1",
]
[[package]]
@@ -4558,7 +4688,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab4609a838d88b73d8238967b60dd115cc08d38e2bbaf51ee1e4b695f89122e2"
dependencies = [
"autocfg",
- "cfg-if",
+ "cfg-if 1.0.0",
"libc",
"log",
"wepoll-ffi",
@@ -4697,6 +4827,27 @@ dependencies = [
"unicode-ident",
]
+[[package]]
+name = "promql"
+version = "0.1.0"
+dependencies = [
+ "common-error",
+ "promql-parser",
+ "snafu",
+]
+
+[[package]]
+name = "promql-parser"
+version = "0.0.1"
+source = "git+https://github.com/GreptimeTeam/promql-parser.git?rev=71d8a90#71d8a90979304a7f128b3125f37a209384a81051"
+dependencies = [
+ "cfgrammar",
+ "lazy_static",
+ "lrlex",
+ "lrpar",
+ "regex",
+]
+
[[package]]
name = "prost"
version = "0.9.0"
@@ -5237,7 +5388,7 @@ version = "0.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"ordered-multimap",
]
@@ -5264,13 +5415,22 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+[[package]]
+name = "rustc_version"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee"
+dependencies = [
+ "semver 0.11.0",
+]
+
[[package]]
name = "rustc_version"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
- "semver",
+ "semver 1.0.14",
]
[[package]]
@@ -5284,7 +5444,7 @@ dependencies = [
"io-lifetimes",
"libc",
"linux-raw-sys",
- "windows-sys",
+ "windows-sys 0.36.1",
]
[[package]]
@@ -5352,7 +5512,7 @@ version = "0.0.0"
source = "git+https://github.com/discord9/RustPython?rev=183e8dab#183e8dabe0027e31630368e36c6be83b5f9cb3f8"
dependencies = [
"ascii",
- "cfg-if",
+ "cfg-if 1.0.0",
"hexf-parse",
"lexical-parse-float",
"libc",
@@ -5469,7 +5629,7 @@ dependencies = [
"ascii",
"base64",
"blake2",
- "cfg-if",
+ "cfg-if 1.0.0",
"crc32fast",
"crossbeam-utils",
"csv-core",
@@ -5535,7 +5695,7 @@ dependencies = [
"bitflags",
"bstr",
"caseless",
- "cfg-if",
+ "cfg-if 1.0.0",
"chrono",
"crossbeam-utils",
"exitcode",
@@ -5566,7 +5726,7 @@ dependencies = [
"paste",
"rand 0.8.5",
"result-like",
- "rustc_version",
+ "rustc_version 0.4.0",
"rustpython-ast",
"rustpython-codegen",
"rustpython-common",
@@ -5611,7 +5771,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d1cd5ae51d3f7bf65d7969d579d502168ef578f289452bd8ccc91de28fda20e"
dependencies = [
"bitflags",
- "cfg-if",
+ "cfg-if 1.0.0",
"clipboard-win",
"dirs-next",
"fd-lock",
@@ -5708,7 +5868,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2"
dependencies = [
"lazy_static",
- "windows-sys",
+ "windows-sys 0.36.1",
]
[[package]]
@@ -5829,6 +5989,15 @@ dependencies = [
"libc",
]
+[[package]]
+name = "semver"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6"
+dependencies = [
+ "semver-parser",
+]
+
[[package]]
name = "semver"
version = "1.0.14"
@@ -5838,6 +6007,15 @@ dependencies = [
"serde",
]
+[[package]]
+name = "semver-parser"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7"
+dependencies = [
+ "pest",
+]
+
[[package]]
name = "serde"
version = "1.0.145"
@@ -5989,7 +6167,7 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"cpufeatures",
"digest",
]
@@ -6000,7 +6178,7 @@ version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"cpufeatures",
"digest",
]
@@ -6011,7 +6189,7 @@ version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"cpufeatures",
"digest",
]
@@ -6193,6 +6371,18 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "sparsevec"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "928d1ef5df00aec8c5643c2ac37db4dd282763013c0fcc81efbb8e13db8dd8ec"
+dependencies = [
+ "num-traits",
+ "packedvec",
+ "serde",
+ "vob",
+]
+
[[package]]
name = "spin"
version = "0.5.2"
@@ -6647,7 +6837,7 @@ version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"fastrand",
"libc",
"redox_syscall 0.2.16",
@@ -7165,7 +7355,7 @@ version = "0.1.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fce9567bd60a67d08a16488756721ba392f24f29006402881e43b19aac64307"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"log",
"pin-project-lite",
"tracing-attributes",
@@ -7288,13 +7478,22 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642"
+[[package]]
+name = "try_from"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "283d3b89e1368717881a9d51dad843cc435380d8109c9e47d38780a324698d8b"
+dependencies = [
+ "cfg-if 0.1.10",
+]
+
[[package]]
name = "twox-hash"
version = "1.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"rand 0.8.5",
"static_assertions",
]
@@ -7587,6 +7786,21 @@ version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
+[[package]]
+name = "vergen"
+version = "7.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "447f9238a4553957277b3ee09d80babeae0811f1b3baefb093de1c0448437a37"
+dependencies = [
+ "anyhow",
+ "cfg-if 1.0.0",
+ "enum-iterator",
+ "getset",
+ "rustversion",
+ "thiserror",
+ "time 0.3.14",
+]
+
[[package]]
name = "version_check"
version = "0.9.4"
@@ -7599,6 +7813,17 @@ version = "0.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b60dcd6a64dd45abf9bd426970c9843726da7fc08f44cd6fcebf68c21220a63"
+[[package]]
+name = "vob"
+version = "3.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cbdb3eee5dd38a27129832bca4a3171888e699a6ac36de86547975466997986f"
+dependencies = [
+ "num-traits",
+ "rustc_version 0.3.3",
+ "serde",
+]
+
[[package]]
name = "volatile"
version = "0.3.0"
@@ -7656,7 +7881,7 @@ version = "0.2.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"wasm-bindgen-macro",
]
@@ -7681,7 +7906,7 @@ version = "0.4.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d"
dependencies = [
- "cfg-if",
+ "cfg-if 1.0.0",
"js-sys",
"wasm-bindgen",
"web-sys",
@@ -7828,6 +8053,27 @@ dependencies = [
"windows_x86_64_msvc 0.36.1",
]
+[[package]]
+name = "windows-sys"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc 0.42.0",
+ "windows_i686_gnu 0.42.0",
+ "windows_i686_msvc 0.42.0",
+ "windows_x86_64_gnu 0.42.0",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc 0.42.0",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e"
+
[[package]]
name = "windows_aarch64_msvc"
version = "0.36.1"
@@ -7840,6 +8086,12 @@ version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec7711666096bd4096ffa835238905bb33fb87267910e154b18b44eaabb340f2"
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4"
+
[[package]]
name = "windows_i686_gnu"
version = "0.36.1"
@@ -7852,6 +8104,12 @@ version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "763fc57100a5f7042e3057e7e8d9bdd7860d330070251a73d003563a3bb49e1b"
+[[package]]
+name = "windows_i686_gnu"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7"
+
[[package]]
name = "windows_i686_msvc"
version = "0.36.1"
@@ -7864,6 +8122,12 @@ version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bc7cbfe58828921e10a9f446fcaaf649204dcfe6c1ddd712c5eebae6bda1106"
+[[package]]
+name = "windows_i686_msvc"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246"
+
[[package]]
name = "windows_x86_64_gnu"
version = "0.36.1"
@@ -7876,6 +8140,18 @@ version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6868c165637d653ae1e8dc4d82c25d4f97dd6605eaa8d784b5c6e0ab2a252b65"
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028"
+
[[package]]
name = "windows_x86_64_msvc"
version = "0.36.1"
@@ -7888,6 +8164,12 @@ version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e4d40883ae9cae962787ca76ba76390ffa29214667a111db9e0a1ad8377e809"
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.42.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5"
+
[[package]]
name = "winreg"
version = "0.10.1"
diff --git a/Cargo.toml b/Cargo.toml
index 77d94f0f3721..a960138d4b7f 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -27,6 +27,7 @@ members = [
"src/meta-srv",
"src/mito",
"src/object-store",
+ "src/promql",
"src/query",
"src/script",
"src/servers",
diff --git a/src/promql/Cargo.toml b/src/promql/Cargo.toml
new file mode 100644
index 000000000000..16f7d78b18f9
--- /dev/null
+++ b/src/promql/Cargo.toml
@@ -0,0 +1,9 @@
+[package]
+name = "promql"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+common-error = { path = "../common/error" }
+promql-parser = { git = "https://github.com/GreptimeTeam/promql-parser.git", rev = "71d8a90" }
+snafu = { version = "0.7", features = ["backtraces"] }
diff --git a/src/promql/src/engine.rs b/src/promql/src/engine.rs
new file mode 100644
index 000000000000..910d75d747da
--- /dev/null
+++ b/src/promql/src/engine.rs
@@ -0,0 +1,36 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use promql_parser::parser::Value;
+
+use crate::error::Result;
+
+mod evaluator;
+mod functions;
+
+pub use evaluator::*;
+
+pub struct Context {}
+
+pub struct Query {}
+
+pub struct Engine {}
+
+impl Engine {
+ pub fn exec(_ctx: &Context, _q: Query) -> Result<Arc<dyn Value>> {
+ unimplemented!();
+ }
+}
diff --git a/src/promql/src/engine/evaluator.rs b/src/promql/src/engine/evaluator.rs
new file mode 100644
index 000000000000..cb8d3c9f385e
--- /dev/null
+++ b/src/promql/src/engine/evaluator.rs
@@ -0,0 +1,29 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use promql_parser::parser::{Expr, Value};
+
+use crate::engine::Context;
+use crate::error::Result;
+
+/// An evaluator evaluates given expressions over given fixed timestamps.
+pub struct Evaluator {}
+
+impl Evaluator {
+ pub fn eval(_ctx: &Context, _expr: &Expr) -> Result<Arc<dyn Value>> {
+ unimplemented!();
+ }
+}
diff --git a/src/promql/src/engine/functions.rs b/src/promql/src/engine/functions.rs
new file mode 100644
index 000000000000..9e0fcc186d8e
--- /dev/null
+++ b/src/promql/src/engine/functions.rs
@@ -0,0 +1,15 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! PromQL functions
diff --git a/src/promql/src/error.rs b/src/promql/src/error.rs
new file mode 100644
index 000000000000..14eefab56a15
--- /dev/null
+++ b/src/promql/src/error.rs
@@ -0,0 +1,50 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+
+use common_error::prelude::*;
+
+common_error::define_opaque_error!(Error);
+
+#[derive(Debug, Snafu)]
+#[snafu(visibility(pub))]
+pub enum InnerError {
+ #[snafu(display("Unsupported expr type: {}", name))]
+ UnsupportedExpr { name: String, backtrace: Backtrace },
+}
+
+impl ErrorExt for InnerError {
+ fn status_code(&self) -> StatusCode {
+ use InnerError::*;
+ match self {
+ UnsupportedExpr { .. } => StatusCode::InvalidArguments,
+ }
+ }
+ fn backtrace_opt(&self) -> Option<&Backtrace> {
+ ErrorCompat::backtrace(self)
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+}
+
+impl From<InnerError> for Error {
+ fn from(e: InnerError) -> Error {
+ Error::new(e)
+ }
+}
+
+pub type Result<T> = std::result::Result<T, Error>;
diff --git a/src/promql/src/lib.rs b/src/promql/src/lib.rs
new file mode 100644
index 000000000000..11415d1838a2
--- /dev/null
+++ b/src/promql/src/lib.rs
@@ -0,0 +1,16 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub mod engine;
+pub mod error;
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 3f7d09272139..1912346761c6 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -40,12 +40,12 @@ regex = "1.6"
rustls = "0.20"
rustls-pemfile = "1.0"
schemars = "0.8"
-strum = { version = "0.24", features = ["derive"] }
serde = "1.0"
serde_json = "1.0"
session = { path = "../session" }
snafu = { version = "0.7", features = ["backtraces"] }
snap = "1"
+strum = { version = "0.24", features = ["derive"] }
table = { path = "../table" }
tokio = { version = "1.20", features = ["full"] }
tokio-rustls = "0.23"
|
feat
|
promql create and skeleton (#720)
|
8b60c27c2e96008e78c30234d764cd7fcb609aaf
|
2024-10-31 11:45:45
|
Ruihang Xia
|
feat: enhance windowed-sort optimizer rule (#4910)
| false
|
diff --git a/src/file-engine/src/engine.rs b/src/file-engine/src/engine.rs
index e6313f4322cc..a29a3add23d6 100644
--- a/src/file-engine/src/engine.rs
+++ b/src/file-engine/src/engine.rs
@@ -91,8 +91,9 @@ impl RegionEngine for FileRegionEngine {
request: ScanRequest,
) -> Result<RegionScannerRef, BoxedError> {
let stream = self.handle_query(region_id, request).await?;
+ let metadata = self.get_metadata(region_id).await?;
// We don't support enabling append mode for file engine.
- let scanner = Box::new(SinglePartitionScanner::new(stream, false));
+ let scanner = Box::new(SinglePartitionScanner::new(stream, false, metadata));
Ok(scanner)
}
diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs
index 1a789c8d5f21..9b7a71a36c51 100644
--- a/src/mito2/src/read/seq_scan.rs
+++ b/src/mito2/src/read/seq_scan.rs
@@ -27,6 +27,7 @@ use common_telemetry::tracing;
use datafusion::physical_plan::{DisplayAs, DisplayFormatType};
use datatypes::schema::SchemaRef;
use snafu::ResultExt;
+use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{PartitionRange, RegionScanner, ScannerProperties};
use store_api::storage::TimeSeriesRowSelector;
use tokio::sync::Semaphore;
@@ -321,6 +322,10 @@ impl RegionScanner for SeqScan {
let predicate = self.stream_ctx.input.predicate();
predicate.map(|p| !p.exprs().is_empty()).unwrap_or(false)
}
+
+ fn metadata(&self) -> RegionMetadataRef {
+ self.stream_ctx.input.mapper.metadata().clone()
+ }
}
impl DisplayAs for SeqScan {
diff --git a/src/mito2/src/read/unordered_scan.rs b/src/mito2/src/read/unordered_scan.rs
index 2401504f7915..707b7d4ba65c 100644
--- a/src/mito2/src/read/unordered_scan.rs
+++ b/src/mito2/src/read/unordered_scan.rs
@@ -26,6 +26,7 @@ use datafusion::physical_plan::{DisplayAs, DisplayFormatType};
use datatypes::schema::SchemaRef;
use futures::{Stream, StreamExt};
use snafu::ResultExt;
+use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{PartitionRange, RegionScanner, ScannerProperties};
use crate::error::{PartitionOutOfRangeSnafu, Result};
@@ -229,6 +230,10 @@ impl RegionScanner for UnorderedScan {
let predicate = self.stream_ctx.input.predicate();
predicate.map(|p| !p.exprs().is_empty()).unwrap_or(false)
}
+
+ fn metadata(&self) -> RegionMetadataRef {
+ self.stream_ctx.input.mapper.metadata().clone()
+ }
}
impl DisplayAs for UnorderedScan {
diff --git a/src/query/src/optimizer/parallelize_scan.rs b/src/query/src/optimizer/parallelize_scan.rs
index 19f5db39d333..02cd04df87b6 100644
--- a/src/query/src/optimizer/parallelize_scan.rs
+++ b/src/query/src/optimizer/parallelize_scan.rs
@@ -17,6 +17,7 @@ use std::sync::Arc;
use common_telemetry::debug;
use datafusion::config::ConfigOptions;
use datafusion::physical_optimizer::PhysicalOptimizerRule;
+use datafusion::physical_plan::sorts::sort::SortExec;
use datafusion::physical_plan::ExecutionPlan;
use datafusion_common::tree_node::{Transformed, TreeNode};
use datafusion_common::{DataFusionError, Result};
@@ -48,9 +49,16 @@ impl ParallelizeScan {
plan: Arc<dyn ExecutionPlan>,
config: &ConfigOptions,
) -> Result<Arc<dyn ExecutionPlan>> {
+ let mut first_order_expr = None;
+
let result = plan
.transform_down(|plan| {
- if let Some(region_scan_exec) = plan.as_any().downcast_ref::<RegionScanExec>() {
+ if let Some(sort_exec) = plan.as_any().downcast_ref::<SortExec>() {
+ // save the first order expr
+ first_order_expr = sort_exec.expr().first().cloned();
+ } else if let Some(region_scan_exec) =
+ plan.as_any().downcast_ref::<RegionScanExec>()
+ {
if region_scan_exec.is_partition_set() {
return Ok(Transformed::no(plan));
}
@@ -66,10 +74,21 @@ impl ParallelizeScan {
"Assign {total_range_num} ranges to {expected_partition_num} partitions"
);
- // sort the ranges in each partition
- // TODO(ruihang): smart sort!
- for ranges in partition_ranges.iter_mut() {
- ranges.sort_by(|a, b| a.start.cmp(&b.start));
+ // Sort the ranges in each partition based on the order expr
+ //
+ // This optimistically assumes that the first order expr is on the time index column
+ // to skip the validation of the order expr. As it's not harmful if this condition
+ // is not met.
+ if let Some(order_expr) = &first_order_expr
+ && order_expr.options.descending
+ {
+ for ranges in partition_ranges.iter_mut() {
+ ranges.sort_by(|a, b| b.end.cmp(&a.end));
+ }
+ } else {
+ for ranges in partition_ranges.iter_mut() {
+ ranges.sort_by(|a, b| a.start.cmp(&b.start));
+ }
}
// update the partition ranges
diff --git a/src/query/src/optimizer/windowed_sort.rs b/src/query/src/optimizer/windowed_sort.rs
index 62d4495cf335..63150fc1f896 100644
--- a/src/query/src/optimizer/windowed_sort.rs
+++ b/src/query/src/optimizer/windowed_sort.rs
@@ -77,7 +77,6 @@ impl WindowedSortPhysicalRule {
};
if let Some(first_sort_expr) = sort_exec.expr().first()
- && !first_sort_expr.options.descending
&& let Some(column_expr) = first_sort_expr
.expr
.as_any()
@@ -87,18 +86,28 @@ impl WindowedSortPhysicalRule {
} else {
return Ok(Transformed::no(plan));
}
-
let first_sort_expr = sort_exec.expr().first().unwrap().clone();
- let part_sort_exec = Arc::new(PartSortExec::new(
- first_sort_expr.clone(),
- scanner_info.partition_ranges.clone(),
- sort_exec.input().clone(),
- ));
+
+ // PartSortExec is unnecessary if:
+ // - there is no tag column, and
+ // - the sort is ascending on the time index column
+ let new_input = if scanner_info.tag_columns.is_empty()
+ && !first_sort_expr.options.descending
+ {
+ sort_exec.input().clone()
+ } else {
+ Arc::new(PartSortExec::new(
+ first_sort_expr.clone(),
+ scanner_info.partition_ranges.clone(),
+ sort_exec.input().clone(),
+ ))
+ };
+
let windowed_sort_exec = WindowedSortExec::try_new(
first_sort_expr,
sort_exec.fetch(),
scanner_info.partition_ranges,
- part_sort_exec,
+ new_input,
)?;
return Ok(Transformed {
@@ -119,11 +128,13 @@ impl WindowedSortPhysicalRule {
struct ScannerInfo {
partition_ranges: Vec<Vec<PartitionRange>>,
time_index: String,
+ tag_columns: Vec<String>,
}
fn fetch_partition_range(input: Arc<dyn ExecutionPlan>) -> DataFusionResult<Option<ScannerInfo>> {
let mut partition_ranges = None;
let mut time_index = None;
+ let mut tag_columns = None;
input.transform_up(|plan| {
// Unappliable case, reset the state.
@@ -139,6 +150,7 @@ fn fetch_partition_range(input: Arc<dyn ExecutionPlan>) -> DataFusionResult<Opti
if let Some(region_scan_exec) = plan.as_any().downcast_ref::<RegionScanExec>() {
partition_ranges = Some(region_scan_exec.get_uncollapsed_partition_ranges());
time_index = region_scan_exec.time_index();
+ tag_columns = Some(region_scan_exec.tag_columns());
// set distinguish_partition_ranges to true, this is an incorrect workaround
region_scan_exec.with_distinguish_partition_range(true);
@@ -151,6 +163,7 @@ fn fetch_partition_range(input: Arc<dyn ExecutionPlan>) -> DataFusionResult<Opti
ScannerInfo {
partition_ranges: partition_ranges?,
time_index: time_index?,
+ tag_columns: tag_columns?,
}
};
diff --git a/src/query/src/part_sort.rs b/src/query/src/part_sort.rs
index c4b9d35b20f9..2b258187b5b6 100644
--- a/src/query/src/part_sort.rs
+++ b/src/query/src/part_sort.rs
@@ -175,6 +175,7 @@ struct PartSortStream {
input_complete: bool,
schema: SchemaRef,
partition_ranges: Vec<PartitionRange>,
+ #[allow(dead_code)] // this is used under #[debug_assertions]
partition: usize,
cur_part_idx: usize,
metrics: BaselineMetrics,
diff --git a/src/query/src/window_sort.rs b/src/query/src/window_sort.rs
index 305585b2679d..38b64e29aaa0 100644
--- a/src/query/src/window_sort.rs
+++ b/src/query/src/window_sort.rs
@@ -270,6 +270,7 @@ pub struct WindowedSortStream {
/// working ranges promise once input stream get a value out of current range, future values will never be in this range
all_avail_working_range: Vec<(TimeRange, BTreeSet<usize>)>,
/// The input partition ranges
+ #[allow(dead_code)] // this is used under #[debug_assertions]
ranges: Vec<PartitionRange>,
/// Execution metrics
metrics: BaselineMetrics,
diff --git a/src/store-api/src/region_engine.rs b/src/store-api/src/region_engine.rs
index 0832385c930b..8dd706395d1d 100644
--- a/src/store-api/src/region_engine.rs
+++ b/src/store-api/src/region_engine.rs
@@ -265,6 +265,9 @@ pub trait RegionScanner: Debug + DisplayAs + Send {
/// Returns the schema of the record batches.
fn schema(&self) -> SchemaRef;
+ /// Returns the metadata of the region.
+ fn metadata(&self) -> RegionMetadataRef;
+
/// Prepares the scanner with the given partition ranges.
///
/// This method is for the planner to adjust the scanner's behavior based on the partition ranges.
@@ -414,11 +417,16 @@ pub struct SinglePartitionScanner {
stream: Mutex<Option<SendableRecordBatchStream>>,
schema: SchemaRef,
properties: ScannerProperties,
+ metadata: RegionMetadataRef,
}
impl SinglePartitionScanner {
- /// Creates a new [SinglePartitionScanner] with the given stream.
- pub fn new(stream: SendableRecordBatchStream, append_mode: bool) -> Self {
+ /// Creates a new [SinglePartitionScanner] with the given stream and metadata.
+ pub fn new(
+ stream: SendableRecordBatchStream,
+ append_mode: bool,
+ metadata: RegionMetadataRef,
+ ) -> Self {
let schema = stream.schema();
Self {
stream: Mutex::new(Some(stream)),
@@ -426,6 +434,7 @@ impl SinglePartitionScanner {
properties: ScannerProperties::default()
.with_parallelism(1)
.with_append_mode(append_mode),
+ metadata,
}
}
}
@@ -468,6 +477,10 @@ impl RegionScanner for SinglePartitionScanner {
fn has_predicate(&self) -> bool {
false
}
+
+ fn metadata(&self) -> RegionMetadataRef {
+ self.metadata.clone()
+ }
}
impl DisplayAs for SinglePartitionScanner {
diff --git a/src/table/src/table/scan.rs b/src/table/src/table/scan.rs
index 9207c1e86d6e..cc94a054de84 100644
--- a/src/table/src/table/scan.rs
+++ b/src/table/src/table/scan.rs
@@ -154,6 +154,16 @@ impl RegionScanExec {
.timestamp_column()
.map(|x| x.name.clone())
}
+
+ pub fn tag_columns(&self) -> Vec<String> {
+ self.scanner
+ .lock()
+ .unwrap()
+ .metadata()
+ .primary_key_columns()
+ .map(|col| col.column_schema.name.clone())
+ .collect()
+ }
}
impl ExecutionPlan for RegionScanExec {
@@ -301,33 +311,45 @@ impl DfRecordBatchStream for StreamWithMetricWrapper {
mod test {
use std::sync::Arc;
+ use api::v1::SemanticType;
use common_recordbatch::{RecordBatch, RecordBatches};
use datafusion::prelude::SessionContext;
use datatypes::data_type::ConcreteDataType;
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
- use datatypes::vectors::Int32Vector;
+ use datatypes::vectors::{Int32Vector, TimestampMillisecondVector};
use futures::TryStreamExt;
+ use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
use store_api::region_engine::SinglePartitionScanner;
+ use store_api::storage::RegionId;
use super::*;
#[tokio::test]
async fn test_simple_table_scan() {
let ctx = SessionContext::new();
- let schema = Arc::new(Schema::new(vec![ColumnSchema::new(
- "a",
- ConcreteDataType::int32_datatype(),
- false,
- )]));
+ let schema = Arc::new(Schema::new(vec![
+ ColumnSchema::new("a", ConcreteDataType::int32_datatype(), false),
+ ColumnSchema::new(
+ "b",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ ]));
let batch1 = RecordBatch::new(
schema.clone(),
- vec![Arc::new(Int32Vector::from_slice([1, 2])) as _],
+ vec![
+ Arc::new(Int32Vector::from_slice([1, 2])) as _,
+ Arc::new(TimestampMillisecondVector::from_slice([1000, 2000])) as _,
+ ],
)
.unwrap();
let batch2 = RecordBatch::new(
schema.clone(),
- vec![Arc::new(Int32Vector::from_slice([3, 4, 5])) as _],
+ vec![
+ Arc::new(Int32Vector::from_slice([3, 4, 5])) as _,
+ Arc::new(TimestampMillisecondVector::from_slice([3000, 4000, 5000])) as _,
+ ],
)
.unwrap();
@@ -335,7 +357,26 @@ mod test {
RecordBatches::try_new(schema.clone(), vec![batch1.clone(), batch2.clone()]).unwrap();
let stream = recordbatches.as_stream();
- let scanner = Box::new(SinglePartitionScanner::new(stream, false));
+ let mut builder = RegionMetadataBuilder::new(RegionId::new(1234, 5678));
+ builder
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new("a", ConcreteDataType::int32_datatype(), false),
+ semantic_type: SemanticType::Tag,
+ column_id: 1,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "b",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Timestamp,
+ column_id: 2,
+ })
+ .primary_key(vec![1]);
+ let region_metadata = Arc::new(builder.build().unwrap());
+
+ let scanner = Box::new(SinglePartitionScanner::new(stream, false, region_metadata));
let plan = RegionScanExec::new(scanner);
let actual: SchemaRef = Arc::new(
plan.properties
diff --git a/tests/cases/standalone/common/order/windowed_sort.result b/tests/cases/standalone/common/order/windowed_sort.result
index 1cc0ab7720e3..9ecec83d2053 100644
--- a/tests/cases/standalone/common/order/windowed_sort.result
+++ b/tests/cases/standalone/common/order/windowed_sort.result
@@ -1,3 +1,4 @@
+-- Test without PK, with a windowed sort query.
CREATE TABLE test(i INTEGER, t TIMESTAMP TIME INDEX);
Affected Rows: 0
@@ -69,7 +70,39 @@ EXPLAIN ANALYZE SELECT * FROM test ORDER BY t LIMIT 5;
| 1_| 0_|_GlobalLimitExec: skip=0, fetch=5 REDACTED
|_|_|_SortPreservingMergeExec: [t@1 ASC NULLS LAST] REDACTED
|_|_|_WindowedSortExec REDACTED
-|_|_|_PartSortExec t@1 ASC NULLS LAST REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=2 (1 memtable ranges, 1 file 1 ranges) REDACTED
+|_|_|_|
+|_|_| Total rows: 5_|
++-+-+-+
+
+SELECT * FROM test ORDER BY t DESC LIMIT 5;
+
++---+-------------------------+
+| i | t |
++---+-------------------------+
+| 4 | 1970-01-01T00:00:00.012 |
+| 4 | 1970-01-01T00:00:00.011 |
+| 4 | 1970-01-01T00:00:00.010 |
+| 3 | 1970-01-01T00:00:00.009 |
+| 3 | 1970-01-01T00:00:00.008 |
++---+-------------------------+
+
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
+EXPLAIN ANALYZE SELECT * FROM test ORDER BY t DESC LIMIT 5;
+
++-+-+-+
+| stage | node | plan_|
++-+-+-+
+| 0_| 0_|_MergeScanExec: REDACTED
+|_|_|_|
+| 1_| 0_|_GlobalLimitExec: skip=0, fetch=5 REDACTED
+|_|_|_SortPreservingMergeExec: [t@1 DESC] REDACTED
+|_|_|_WindowedSortExec REDACTED
+|_|_|_PartSortExec t@1 DESC REDACTED
|_|_|_SeqScan: region=REDACTED, partition_count=2 (1 memtable ranges, 1 file 1 ranges) REDACTED
|_|_|_|
|_|_| Total rows: 5_|
@@ -79,3 +112,118 @@ DROP TABLE test;
Affected Rows: 0
+-- Test with PK, with a windowed sort query.
+CREATE TABLE test_pk(pk INTEGER PRIMARY KEY, i INTEGER, t TIMESTAMP TIME INDEX);
+
+Affected Rows: 0
+
+INSERT INTO test_pk VALUES (1, 1, 1), (2, NULL, 2), (3, 1, 3);
+
+Affected Rows: 3
+
+ADMIN FLUSH_TABLE('test_pk');
+
++------------------------------+
+| ADMIN FLUSH_TABLE('test_pk') |
++------------------------------+
+| 0 |
++------------------------------+
+
+INSERT INTO test_pk VALUES (4, 2, 4), (5, 2, 5), (6, NULL, 6);
+
+Affected Rows: 3
+
+ADMIN FLUSH_TABLE('test_pk');
+
++------------------------------+
+| ADMIN FLUSH_TABLE('test_pk') |
++------------------------------+
+| 0 |
++------------------------------+
+
+INSERT INTO test_pk VALUES (7, 3, 7), (8, 3, 8), (9, 3, 9);
+
+Affected Rows: 3
+
+ADMIN FLUSH_TABLE('test_pk');
+
++------------------------------+
+| ADMIN FLUSH_TABLE('test_pk') |
++------------------------------+
+| 0 |
++------------------------------+
+
+INSERT INTO test_pk VALUES (10, 4, 10), (11, 4, 11), (12, 4, 12);
+
+Affected Rows: 3
+
+SELECT * FROM test_pk ORDER BY t LIMIT 5;
+
++----+---+-------------------------+
+| pk | i | t |
++----+---+-------------------------+
+| 1 | 1 | 1970-01-01T00:00:00.001 |
+| 2 | | 1970-01-01T00:00:00.002 |
+| 3 | 1 | 1970-01-01T00:00:00.003 |
+| 4 | 2 | 1970-01-01T00:00:00.004 |
+| 5 | 2 | 1970-01-01T00:00:00.005 |
++----+---+-------------------------+
+
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
+EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t LIMIT 5;
+
++-+-+-+
+| stage | node | plan_|
++-+-+-+
+| 0_| 0_|_MergeScanExec: REDACTED
+|_|_|_|
+| 1_| 0_|_GlobalLimitExec: skip=0, fetch=5 REDACTED
+|_|_|_SortPreservingMergeExec: [t@2 ASC NULLS LAST] REDACTED
+|_|_|_WindowedSortExec REDACTED
+|_|_|_PartSortExec t@2 ASC NULLS LAST REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=2 (1 memtable ranges, 1 file 1 ranges) REDACTED
+|_|_|_|
+|_|_| Total rows: 5_|
++-+-+-+
+
+SELECT * FROM test_pk ORDER BY t DESC LIMIT 5;
+
++----+---+-------------------------+
+| pk | i | t |
++----+---+-------------------------+
+| 12 | 4 | 1970-01-01T00:00:00.012 |
+| 11 | 4 | 1970-01-01T00:00:00.011 |
+| 10 | 4 | 1970-01-01T00:00:00.010 |
+| 9 | 3 | 1970-01-01T00:00:00.009 |
+| 8 | 3 | 1970-01-01T00:00:00.008 |
++----+---+-------------------------+
+
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
+EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t DESC LIMIT 5;
+
++-+-+-+
+| stage | node | plan_|
++-+-+-+
+| 0_| 0_|_MergeScanExec: REDACTED
+|_|_|_|
+| 1_| 0_|_GlobalLimitExec: skip=0, fetch=5 REDACTED
+|_|_|_SortPreservingMergeExec: [t@2 DESC] REDACTED
+|_|_|_WindowedSortExec REDACTED
+|_|_|_PartSortExec t@2 DESC REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=2 (1 memtable ranges, 1 file 1 ranges) REDACTED
+|_|_|_|
+|_|_| Total rows: 5_|
++-+-+-+
+
+DROP TABLE test_pk;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/order/windowed_sort.sql b/tests/cases/standalone/common/order/windowed_sort.sql
index 7767825e3d0c..e8006f74ce17 100644
--- a/tests/cases/standalone/common/order/windowed_sort.sql
+++ b/tests/cases/standalone/common/order/windowed_sort.sql
@@ -1,3 +1,4 @@
+-- Test without PK, with a windowed sort query.
CREATE TABLE test(i INTEGER, t TIMESTAMP TIME INDEX);
INSERT INTO test VALUES (1, 1), (NULL, 2), (1, 3);
@@ -23,4 +24,50 @@ SELECT * FROM test ORDER BY t LIMIT 5;
-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
EXPLAIN ANALYZE SELECT * FROM test ORDER BY t LIMIT 5;
+SELECT * FROM test ORDER BY t DESC LIMIT 5;
+
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
+EXPLAIN ANALYZE SELECT * FROM test ORDER BY t DESC LIMIT 5;
+
DROP TABLE test;
+
+-- Test with PK, with a windowed sort query.
+CREATE TABLE test_pk(pk INTEGER PRIMARY KEY, i INTEGER, t TIMESTAMP TIME INDEX);
+
+INSERT INTO test_pk VALUES (1, 1, 1), (2, NULL, 2), (3, 1, 3);
+
+ADMIN FLUSH_TABLE('test_pk');
+
+INSERT INTO test_pk VALUES (4, 2, 4), (5, 2, 5), (6, NULL, 6);
+
+ADMIN FLUSH_TABLE('test_pk');
+
+INSERT INTO test_pk VALUES (7, 3, 7), (8, 3, 8), (9, 3, 9);
+
+ADMIN FLUSH_TABLE('test_pk');
+
+INSERT INTO test_pk VALUES (10, 4, 10), (11, 4, 11), (12, 4, 12);
+
+SELECT * FROM test_pk ORDER BY t LIMIT 5;
+
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
+EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t LIMIT 5;
+
+SELECT * FROM test_pk ORDER BY t DESC LIMIT 5;
+
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
+EXPLAIN ANALYZE SELECT * FROM test_pk ORDER BY t DESC LIMIT 5;
+
+DROP TABLE test_pk;
|
feat
|
enhance windowed-sort optimizer rule (#4910)
|
53906038559da758332cca1433aba4d991242fc2
|
2024-06-17 08:33:47
|
zyy17
|
refactor: add `Compactor` trait to abstract the compaction (#4097)
| false
|
diff --git a/src/mito2/src/compaction.rs b/src/mito2/src/compaction.rs
index dd08865581f1..2eb0cf3d8504 100644
--- a/src/mito2/src/compaction.rs
+++ b/src/mito2/src/compaction.rs
@@ -13,7 +13,8 @@
// limitations under the License.
mod buckets;
-mod picker;
+pub mod compactor;
+pub mod picker;
mod task;
#[cfg(test)]
mod test_util;
@@ -31,7 +32,6 @@ use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use datafusion_common::ScalarValue;
use datafusion_expr::Expr;
-pub use picker::CompactionPickerRef;
use snafu::{OptionExt, ResultExt};
use store_api::metadata::RegionMetadataRef;
use store_api::storage::RegionId;
@@ -40,8 +40,9 @@ use tokio::sync::mpsc::{self, Sender};
use crate::access_layer::AccessLayerRef;
use crate::cache::CacheManagerRef;
-use crate::compaction::twcs::TwcsPicker;
-use crate::compaction::window::WindowedCompactionPicker;
+use crate::compaction::compactor::{CompactionRegion, DefaultCompactor};
+use crate::compaction::picker::{new_picker, CompactionTask};
+use crate::compaction::task::CompactionTaskImpl;
use crate::config::MitoConfig;
use crate::error::{
CompactRegionSnafu, Error, RegionClosedSnafu, RegionDroppedSnafu, RegionTruncatedSnafu, Result,
@@ -52,7 +53,6 @@ use crate::read::projection::ProjectionMapper;
use crate::read::scan_region::ScanInput;
use crate::read::seq_scan::SeqScan;
use crate::read::BoxedBatchReader;
-use crate::region::options::CompactionOptions;
use crate::region::version::{VersionControlRef, VersionRef};
use crate::region::ManifestContextRef;
use crate::request::{OptionOutputTx, OutputTx, WorkerRequest};
@@ -90,17 +90,6 @@ impl CompactionRequest {
}
}
-/// Builds compaction picker according to [CompactionOptions].
-pub fn compaction_options_to_picker(strategy: &CompactionOptions) -> CompactionPickerRef {
- match strategy {
- CompactionOptions::Twcs(twcs_opts) => Arc::new(TwcsPicker::new(
- twcs_opts.max_active_window_files,
- twcs_opts.max_inactive_window_files,
- twcs_opts.time_window_seconds(),
- )) as Arc<_>,
- }
-}
-
/// Compaction scheduler tracks and manages compaction tasks.
pub(crate) struct CompactionScheduler {
scheduler: SchedulerRef,
@@ -232,34 +221,13 @@ impl CompactionScheduler {
request: CompactionRequest,
options: compact_request::Options,
) -> Result<()> {
- let picker = if let compact_request::Options::StrictWindow(window) = &options {
- let window = if window.window_seconds == 0 {
- None
- } else {
- Some(window.window_seconds)
- };
- Arc::new(WindowedCompactionPicker::new(window)) as Arc<_>
- } else {
- compaction_options_to_picker(&request.current_version.options.compaction)
- };
-
let region_id = request.region_id();
- debug!(
- "Pick compaction strategy {:?} for region: {}",
- picker, region_id
- );
-
- let pick_timer = COMPACTION_STAGE_ELAPSED
- .with_label_values(&["pick"])
- .start_timer();
- let Some(mut task) = picker.pick(request) else {
+ let Some(mut task) = self.build_compaction_task(request, options) else {
// Nothing to compact, remove it from the region status map.
self.region_status.remove(®ion_id);
return Ok(());
};
- drop(pick_timer);
-
// Submit the compaction task.
self.scheduler
.schedule(Box::pin(async move {
@@ -282,6 +250,70 @@ impl CompactionScheduler {
// Notifies all pending tasks.
status.on_failure(err);
}
+
+ fn build_compaction_task(
+ &self,
+ req: CompactionRequest,
+ options: compact_request::Options,
+ ) -> Option<Box<dyn CompactionTask>> {
+ let picker = new_picker(options, &req.current_version.options.compaction);
+ let region_id = req.region_id();
+ let CompactionRequest {
+ engine_config,
+ current_version,
+ access_layer,
+ request_sender,
+ waiters,
+ start_time,
+ cache_manager,
+ manifest_ctx,
+ listener,
+ } = req;
+ debug!(
+ "Pick compaction strategy {:?} for region: {}",
+ picker, region_id
+ );
+
+ let compaction_region = CompactionRegion {
+ region_id,
+ current_version: current_version.clone(),
+ region_options: current_version.options.clone(),
+ engine_config: engine_config.clone(),
+ region_metadata: current_version.metadata.clone(),
+ cache_manager: cache_manager.clone(),
+ access_layer: access_layer.clone(),
+ manifest_ctx: manifest_ctx.clone(),
+ };
+
+ let picker_output = {
+ let _pick_timer = COMPACTION_STAGE_ELAPSED
+ .with_label_values(&["pick"])
+ .start_timer();
+ picker.pick(&compaction_region)
+ };
+
+ let picker_output = if let Some(picker_output) = picker_output {
+ picker_output
+ } else {
+ // Nothing to compact, we are done. Notifies all waiters as we consume the compaction request.
+ for waiter in waiters {
+ waiter.send(Ok(0));
+ }
+ return None;
+ };
+
+ let task = CompactionTaskImpl {
+ request_sender,
+ waiters,
+ start_time,
+ listener,
+ picker_output,
+ compaction_region,
+ compactor: Arc::new(DefaultCompactor {}),
+ };
+
+ Some(Box::new(task))
+ }
}
impl Drop for CompactionScheduler {
@@ -395,8 +427,8 @@ impl CompactionStatus {
}
}
-#[derive(Debug)]
-pub(crate) struct CompactionOutput {
+#[derive(Debug, Clone)]
+pub struct CompactionOutput {
pub output_file_id: FileId,
/// Compaction output file level.
pub output_level: Level,
diff --git a/src/mito2/src/compaction/compactor.rs b/src/mito2/src/compaction/compactor.rs
new file mode 100644
index 000000000000..a6694c8ef7bd
--- /dev/null
+++ b/src/mito2/src/compaction/compactor.rs
@@ -0,0 +1,416 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::sync::Arc;
+use std::time::Duration;
+
+use api::v1::region::compact_request;
+use common_telemetry::info;
+use object_store::manager::ObjectStoreManager;
+use smallvec::SmallVec;
+use snafu::{OptionExt, ResultExt};
+use store_api::metadata::RegionMetadataRef;
+use store_api::storage::RegionId;
+
+use crate::access_layer::{AccessLayer, AccessLayerRef, SstWriteRequest};
+use crate::cache::{CacheManager, CacheManagerRef};
+use crate::compaction::build_sst_reader;
+use crate::compaction::picker::{new_picker, PickerOutput};
+use crate::config::MitoConfig;
+use crate::error::{EmptyRegionDirSnafu, JoinSnafu, ObjectStoreNotFoundSnafu, Result};
+use crate::manifest::action::{RegionEdit, RegionMetaAction, RegionMetaActionList};
+use crate::manifest::manager::{RegionManifestManager, RegionManifestOptions};
+use crate::manifest::storage::manifest_compress_type;
+use crate::memtable::time_partition::TimePartitions;
+use crate::memtable::MemtableBuilderProvider;
+use crate::read::Source;
+use crate::region::opener::new_manifest_dir;
+use crate::region::options::RegionOptions;
+use crate::region::version::{VersionBuilder, VersionControl, VersionRef};
+use crate::region::ManifestContext;
+use crate::region::RegionState::Writable;
+use crate::schedule::scheduler::LocalScheduler;
+use crate::sst::file::{FileMeta, IndexType};
+use crate::sst::file_purger::LocalFilePurger;
+use crate::sst::index::intermediate::IntermediateManager;
+use crate::sst::parquet::WriteOptions;
+
+/// CompactionRegion represents a region that needs to be compacted.
+/// It's the subset of MitoRegion.
+#[derive(Clone)]
+pub struct CompactionRegion {
+ pub region_id: RegionId,
+ pub region_options: RegionOptions,
+
+ pub(crate) engine_config: Arc<MitoConfig>,
+ pub(crate) region_metadata: RegionMetadataRef,
+ pub(crate) cache_manager: CacheManagerRef,
+ pub(crate) access_layer: AccessLayerRef,
+ pub(crate) manifest_ctx: Arc<ManifestContext>,
+ pub(crate) current_version: VersionRef,
+}
+
+/// CompactorRequest represents the request to compact a region.
+#[derive(Debug, Clone)]
+pub struct CompactorRequest {
+ pub region_id: RegionId,
+ pub region_dir: String,
+ pub region_options: HashMap<String, String>,
+ pub compaction_options: compact_request::Options,
+ pub picker_output: PickerOutput,
+}
+
+/// Open a compaction region from a compaction request.
+/// It's simple version of RegionOpener::open().
+pub async fn open_compaction_region(
+ req: &CompactorRequest,
+ mito_config: &MitoConfig,
+ object_store_manager: ObjectStoreManager,
+) -> Result<CompactionRegion> {
+ let region_options = RegionOptions::try_from(&req.region_options)?;
+ let object_store = {
+ let name = ®ion_options.storage;
+ if let Some(name) = name {
+ object_store_manager
+ .find(name)
+ .context(ObjectStoreNotFoundSnafu {
+ object_store: name.to_string(),
+ })?
+ } else {
+ object_store_manager.default_object_store()
+ }
+ };
+
+ let access_layer = {
+ let intermediate_manager =
+ IntermediateManager::init_fs(mito_config.inverted_index.intermediate_path.clone())
+ .await?;
+
+ Arc::new(AccessLayer::new(
+ req.region_dir.as_str(),
+ object_store.clone(),
+ intermediate_manager,
+ ))
+ };
+
+ let manifest_manager = {
+ let region_manifest_options = RegionManifestOptions {
+ manifest_dir: new_manifest_dir(req.region_dir.as_str()),
+ object_store: object_store.clone(),
+ compress_type: manifest_compress_type(mito_config.compress_manifest),
+ checkpoint_distance: mito_config.manifest_checkpoint_distance,
+ };
+
+ RegionManifestManager::open(region_manifest_options, Default::default())
+ .await?
+ .context(EmptyRegionDirSnafu {
+ region_id: req.region_id,
+ region_dir: req.region_dir.as_str(),
+ })?
+ };
+
+ let manifest = manifest_manager.manifest();
+ let region_metadata = manifest.metadata.clone();
+ let manifest_ctx = Arc::new(ManifestContext::new(manifest_manager, Writable));
+
+ let file_purger = {
+ let purge_scheduler = Arc::new(LocalScheduler::new(mito_config.max_background_jobs));
+ Arc::new(LocalFilePurger::new(
+ purge_scheduler.clone(),
+ access_layer.clone(),
+ None,
+ ))
+ };
+
+ let current_version = {
+ let memtable_builder = MemtableBuilderProvider::new(None, Arc::new(mito_config.clone()))
+ .builder_for_options(
+ region_options.memtable.as_ref(),
+ !region_options.append_mode,
+ );
+
+ // Initial memtable id is 0.
+ let mutable = Arc::new(TimePartitions::new(
+ region_metadata.clone(),
+ memtable_builder.clone(),
+ 0,
+ region_options.compaction.time_window(),
+ ));
+
+ let version = VersionBuilder::new(region_metadata.clone(), mutable)
+ .add_files(file_purger.clone(), manifest.files.values().cloned())
+ .flushed_entry_id(manifest.flushed_entry_id)
+ .flushed_sequence(manifest.flushed_sequence)
+ .truncated_entry_id(manifest.truncated_entry_id)
+ .compaction_time_window(manifest.compaction_time_window)
+ .options(region_options.clone())
+ .build();
+ let version_control = Arc::new(VersionControl::new(version));
+ version_control.current().version
+ };
+
+ Ok(CompactionRegion {
+ region_options: region_options.clone(),
+ manifest_ctx,
+ access_layer,
+ current_version,
+ region_id: req.region_id,
+ cache_manager: Arc::new(CacheManager::default()),
+ engine_config: Arc::new(mito_config.clone()),
+ region_metadata: region_metadata.clone(),
+ })
+}
+
+/// `[MergeOutput]` represents the output of merging SST files.
+#[derive(Default, Clone, Debug)]
+pub struct MergeOutput {
+ pub files_to_add: Vec<FileMeta>,
+ pub files_to_remove: Vec<FileMeta>,
+ pub compaction_time_window: Option<i64>,
+}
+
+impl MergeOutput {
+ pub fn is_empty(&self) -> bool {
+ self.files_to_add.is_empty() && self.files_to_remove.is_empty()
+ }
+}
+
+/// Compactor is the trait that defines the compaction logic.
+#[async_trait::async_trait]
+pub trait Compactor: Send + Sync + 'static {
+ /// Merge SST files for a region.
+ async fn merge_ssts(
+ &self,
+ compaction_region: &CompactionRegion,
+ picker_output: PickerOutput,
+ ) -> Result<MergeOutput>;
+
+ /// Update the manifest after merging SST files.
+ async fn update_manifest(
+ &self,
+ compaction_region: &CompactionRegion,
+ merge_output: MergeOutput,
+ ) -> Result<RegionEdit>;
+
+ /// Execute compaction for a region.
+ async fn compact(
+ &self,
+ compaction_region: &CompactionRegion,
+ compact_request_options: compact_request::Options,
+ ) -> Result<()>;
+}
+
+/// DefaultCompactor is the default implementation of Compactor.
+pub struct DefaultCompactor;
+
+#[async_trait::async_trait]
+impl Compactor for DefaultCompactor {
+ async fn merge_ssts(
+ &self,
+ compaction_region: &CompactionRegion,
+ mut picker_output: PickerOutput,
+ ) -> Result<MergeOutput> {
+ let mut futs = Vec::with_capacity(picker_output.outputs.len());
+ let mut compacted_inputs =
+ Vec::with_capacity(picker_output.outputs.iter().map(|o| o.inputs.len()).sum());
+
+ for output in picker_output.outputs.drain(..) {
+ compacted_inputs.extend(output.inputs.iter().map(|f| f.meta_ref().clone()));
+
+ info!(
+ "Compaction region {} output [{}]-> {}",
+ compaction_region.region_id,
+ output
+ .inputs
+ .iter()
+ .map(|f| f.file_id().to_string())
+ .collect::<Vec<_>>()
+ .join(","),
+ output.output_file_id
+ );
+
+ let write_opts = WriteOptions {
+ write_buffer_size: compaction_region.engine_config.sst_write_buffer_size,
+ ..Default::default()
+ };
+ let create_inverted_index = compaction_region
+ .engine_config
+ .inverted_index
+ .create_on_compaction
+ .auto();
+ let mem_threshold_index_create = compaction_region
+ .engine_config
+ .inverted_index
+ .mem_threshold_on_create
+ .map(|m| m.as_bytes() as _);
+ let index_write_buffer_size = Some(
+ compaction_region
+ .engine_config
+ .inverted_index
+ .write_buffer_size
+ .as_bytes() as usize,
+ );
+
+ let region_metadata = compaction_region.region_metadata.clone();
+ let sst_layer = compaction_region.access_layer.clone();
+ let region_id = compaction_region.region_id;
+ let file_id = output.output_file_id;
+ let cache_manager = compaction_region.cache_manager.clone();
+ let storage = compaction_region.region_options.storage.clone();
+ let index_options = compaction_region
+ .current_version
+ .options
+ .index_options
+ .clone();
+ let append_mode = compaction_region.current_version.options.append_mode;
+ futs.push(async move {
+ let reader = build_sst_reader(
+ region_metadata.clone(),
+ sst_layer.clone(),
+ Some(cache_manager.clone()),
+ &output.inputs,
+ append_mode,
+ output.filter_deleted,
+ output.output_time_range,
+ )
+ .await?;
+ let file_meta_opt = sst_layer
+ .write_sst(
+ SstWriteRequest {
+ file_id,
+ metadata: region_metadata,
+ source: Source::Reader(reader),
+ cache_manager,
+ storage,
+ create_inverted_index,
+ mem_threshold_index_create,
+ index_write_buffer_size,
+ index_options,
+ },
+ &write_opts,
+ )
+ .await?
+ .map(|sst_info| FileMeta {
+ region_id,
+ file_id,
+ time_range: sst_info.time_range,
+ level: output.output_level,
+ file_size: sst_info.file_size,
+ available_indexes: sst_info
+ .inverted_index_available
+ .then(|| SmallVec::from_iter([IndexType::InvertedIndex]))
+ .unwrap_or_default(),
+ index_file_size: sst_info.index_file_size,
+ });
+ Ok(file_meta_opt)
+ });
+ }
+ let mut output_files = Vec::with_capacity(futs.len());
+ while !futs.is_empty() {
+ let mut task_chunk =
+ Vec::with_capacity(crate::compaction::task::MAX_PARALLEL_COMPACTION);
+ for _ in 0..crate::compaction::task::MAX_PARALLEL_COMPACTION {
+ if let Some(task) = futs.pop() {
+ task_chunk.push(common_runtime::spawn_bg(task));
+ }
+ }
+ let metas = futures::future::try_join_all(task_chunk)
+ .await
+ .context(JoinSnafu)?
+ .into_iter()
+ .collect::<Result<Vec<_>>>()?;
+ output_files.extend(metas.into_iter().flatten());
+ }
+
+ let mut inputs: Vec<_> = compacted_inputs.into_iter().collect();
+ inputs.extend(
+ picker_output
+ .expired_ssts
+ .iter()
+ .map(|f| f.meta_ref().clone()),
+ );
+
+ Ok(MergeOutput {
+ files_to_add: output_files,
+ files_to_remove: inputs,
+ compaction_time_window: Some(picker_output.time_window_size),
+ })
+ }
+
+ async fn update_manifest(
+ &self,
+ compaction_region: &CompactionRegion,
+ merge_output: MergeOutput,
+ ) -> Result<RegionEdit> {
+ // Write region edit to manifest.
+ let edit = RegionEdit {
+ files_to_add: merge_output.files_to_add,
+ files_to_remove: merge_output.files_to_remove,
+ compaction_time_window: merge_output
+ .compaction_time_window
+ .map(|seconds| Duration::from_secs(seconds as u64)),
+ flushed_entry_id: None,
+ flushed_sequence: None,
+ };
+
+ let action_list = RegionMetaActionList::with_action(RegionMetaAction::Edit(edit.clone()));
+ // TODO: We might leak files if we fail to update manifest. We can add a cleanup task to remove them later.
+ compaction_region
+ .manifest_ctx
+ .update_manifest(Writable, action_list)
+ .await?;
+
+ Ok(edit)
+ }
+
+ // The default implementation of compact combines the merge_ssts and update_manifest functions.
+ // Note: It's local compaction and only used for testing purpose.
+ async fn compact(
+ &self,
+ compaction_region: &CompactionRegion,
+ compact_request_options: compact_request::Options,
+ ) -> Result<()> {
+ let picker_output = {
+ let picker_output = new_picker(
+ compact_request_options,
+ &compaction_region.region_options.compaction,
+ )
+ .pick(compaction_region);
+
+ if let Some(picker_output) = picker_output {
+ picker_output
+ } else {
+ info!(
+ "No files to compact for region_id: {}",
+ compaction_region.region_id
+ );
+ return Ok(());
+ }
+ };
+
+ let merge_output = self.merge_ssts(compaction_region, picker_output).await?;
+ if merge_output.is_empty() {
+ info!(
+ "No files to compact for region_id: {}",
+ compaction_region.region_id
+ );
+ return Ok(());
+ }
+ self.update_manifest(compaction_region, merge_output)
+ .await?;
+
+ Ok(())
+ }
+}
diff --git a/src/mito2/src/compaction/picker.rs b/src/mito2/src/compaction/picker.rs
index d97229e6ac21..715e8effecdc 100644
--- a/src/mito2/src/compaction/picker.rs
+++ b/src/mito2/src/compaction/picker.rs
@@ -15,17 +15,55 @@
use std::fmt::Debug;
use std::sync::Arc;
-use crate::compaction::CompactionRequest;
+use api::v1::region::compact_request;
-pub type CompactionPickerRef = Arc<dyn Picker + Send + Sync>;
+use crate::compaction::compactor::CompactionRegion;
+use crate::compaction::twcs::TwcsPicker;
+use crate::compaction::window::WindowedCompactionPicker;
+use crate::compaction::CompactionOutput;
+use crate::region::options::CompactionOptions;
+use crate::sst::file::FileHandle;
#[async_trait::async_trait]
-pub trait CompactionTask: Debug + Send + Sync + 'static {
+pub(crate) trait CompactionTask: Debug + Send + Sync + 'static {
async fn run(&mut self);
}
-/// Picker picks input SST files and builds the compaction task.
+/// Picker picks input SST files for compaction.
/// Different compaction strategy may implement different pickers.
-pub trait Picker: Debug + Send + 'static {
- fn pick(&self, req: CompactionRequest) -> Option<Box<dyn CompactionTask>>;
+pub trait Picker: Debug + Send + Sync + 'static {
+ /// Picks input SST files for compaction.
+ fn pick(&self, compaction_region: &CompactionRegion) -> Option<PickerOutput>;
+}
+
+/// PickerOutput is the output of a [`Picker`].
+/// It contains the outputs of the compaction and the expired SST files.
+#[derive(Default, Clone, Debug)]
+pub struct PickerOutput {
+ pub outputs: Vec<CompactionOutput>,
+ pub expired_ssts: Vec<FileHandle>,
+ pub time_window_size: i64,
+}
+
+/// Create a new picker based on the compaction request options and compaction options.
+pub fn new_picker(
+ compact_request_options: compact_request::Options,
+ compaction_options: &CompactionOptions,
+) -> Arc<dyn Picker> {
+ if let compact_request::Options::StrictWindow(window) = &compact_request_options {
+ let window = if window.window_seconds == 0 {
+ None
+ } else {
+ Some(window.window_seconds)
+ };
+ Arc::new(WindowedCompactionPicker::new(window)) as Arc<_>
+ } else {
+ match compaction_options {
+ CompactionOptions::Twcs(twcs_opts) => Arc::new(TwcsPicker::new(
+ twcs_opts.max_active_window_files,
+ twcs_opts.max_inactive_window_files,
+ twcs_opts.time_window_seconds(),
+ )) as Arc<_>,
+ }
+ }
}
diff --git a/src/mito2/src/compaction/task.rs b/src/mito2/src/compaction/task.rs
index fe7e637a9ea6..c76595097753 100644
--- a/src/mito2/src/compaction/task.rs
+++ b/src/mito2/src/compaction/task.rs
@@ -14,71 +14,51 @@
use std::fmt::{Debug, Formatter};
use std::sync::Arc;
-use std::time::{Duration, Instant};
+use std::time::Instant;
use common_telemetry::{error, info};
-use smallvec::SmallVec;
use snafu::ResultExt;
-use store_api::metadata::RegionMetadataRef;
-use store_api::storage::RegionId;
use tokio::sync::mpsc;
-use crate::access_layer::{AccessLayerRef, SstWriteRequest};
-use crate::cache::CacheManagerRef;
-use crate::compaction::picker::CompactionTask;
-use crate::compaction::{build_sst_reader, CompactionOutput};
-use crate::config::MitoConfig;
+use crate::compaction::compactor::{CompactionRegion, Compactor};
+use crate::compaction::picker::{CompactionTask, PickerOutput};
use crate::error;
use crate::error::CompactRegionSnafu;
-use crate::manifest::action::{RegionEdit, RegionMetaAction, RegionMetaActionList};
+use crate::manifest::action::RegionEdit;
use crate::metrics::{COMPACTION_FAILURE_COUNT, COMPACTION_STAGE_ELAPSED};
-use crate::read::Source;
-use crate::region::options::IndexOptions;
-use crate::region::{ManifestContextRef, RegionState};
use crate::request::{
BackgroundNotify, CompactionFailed, CompactionFinished, OutputTx, WorkerRequest,
};
-use crate::sst::file::{FileHandle, FileMeta, IndexType};
-use crate::sst::parquet::WriteOptions;
use crate::worker::WorkerListener;
-const MAX_PARALLEL_COMPACTION: usize = 8;
+/// Maximum number of compaction tasks in parallel.
+pub const MAX_PARALLEL_COMPACTION: usize = 8;
pub(crate) struct CompactionTaskImpl {
- pub engine_config: Arc<MitoConfig>,
- pub region_id: RegionId,
- pub metadata: RegionMetadataRef,
- pub sst_layer: AccessLayerRef,
- pub outputs: Vec<CompactionOutput>,
- pub expired_ssts: Vec<FileHandle>,
- pub compaction_time_window: Option<i64>,
+ pub compaction_region: CompactionRegion,
/// Request sender to notify the worker.
pub(crate) request_sender: mpsc::Sender<WorkerRequest>,
/// Senders that are used to notify waiters waiting for pending compaction tasks.
pub waiters: Vec<OutputTx>,
/// Start time of compaction task
pub start_time: Instant,
- pub(crate) cache_manager: CacheManagerRef,
- /// Target storage of the region.
- pub(crate) storage: Option<String>,
- /// Index options of the region.
- pub(crate) index_options: IndexOptions,
- /// The region is using append mode.
- pub(crate) append_mode: bool,
- /// Manifest context.
- pub(crate) manifest_ctx: ManifestContextRef,
/// Event listener.
pub(crate) listener: WorkerListener,
+ /// Compactor to handle compaction.
+ pub(crate) compactor: Arc<dyn Compactor>,
+ /// Output of the picker.
+ pub(crate) picker_output: PickerOutput,
}
impl Debug for CompactionTaskImpl {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TwcsCompactionTask")
- .field("region_id", &self.region_id)
- .field("outputs", &self.outputs)
- .field("expired_ssts", &self.expired_ssts)
- .field("compaction_time_window", &self.compaction_time_window)
- .field("append_mode", &self.append_mode)
+ .field("region_id", &self.compaction_region.region_id)
+ .field("picker_output", &self.picker_output)
+ .field(
+ "append_mode",
+ &self.compaction_region.region_options.append_mode,
+ )
.finish()
}
}
@@ -91,174 +71,54 @@ impl Drop for CompactionTaskImpl {
impl CompactionTaskImpl {
fn mark_files_compacting(&self, compacting: bool) {
- self.outputs
+ self.picker_output
+ .outputs
.iter()
- .flat_map(|o| o.inputs.iter())
- .for_each(|f| f.set_compacting(compacting))
- }
-
- /// Merges all SST files.
- /// Returns `(output files, input files)`.
- async fn merge_ssts(&mut self) -> error::Result<(Vec<FileMeta>, Vec<FileMeta>)> {
- let mut futs = Vec::with_capacity(self.outputs.len());
- let mut compacted_inputs =
- Vec::with_capacity(self.outputs.iter().map(|o| o.inputs.len()).sum());
-
- for output in self.outputs.drain(..) {
- compacted_inputs.extend(output.inputs.iter().map(|f| f.meta_ref().clone()));
-
- info!(
- "Compaction region {} output [{}]-> {}",
- self.region_id,
- output
- .inputs
- .iter()
- .map(|f| f.file_id().to_string())
- .collect::<Vec<_>>()
- .join(","),
- output.output_file_id
- );
-
- let write_opts = WriteOptions {
- write_buffer_size: self.engine_config.sst_write_buffer_size,
- ..Default::default()
- };
- let create_inverted_index = self
- .engine_config
- .inverted_index
- .create_on_compaction
- .auto();
- let mem_threshold_index_create = self
- .engine_config
- .inverted_index
- .mem_threshold_on_create
- .map(|m| m.as_bytes() as _);
- let index_write_buffer_size = Some(
- self.engine_config
- .inverted_index
- .write_buffer_size
- .as_bytes() as usize,
- );
-
- let metadata = self.metadata.clone();
- let sst_layer = self.sst_layer.clone();
- let region_id = self.region_id;
- let file_id = output.output_file_id;
- let cache_manager = self.cache_manager.clone();
- let storage = self.storage.clone();
- let index_options = self.index_options.clone();
- let append_mode = self.append_mode;
- futs.push(async move {
- let reader = build_sst_reader(
- metadata.clone(),
- sst_layer.clone(),
- Some(cache_manager.clone()),
- &output.inputs,
- append_mode,
- output.filter_deleted,
- output.output_time_range,
- )
- .await?;
- let file_meta_opt = sst_layer
- .write_sst(
- SstWriteRequest {
- file_id,
- metadata,
- source: Source::Reader(reader),
- cache_manager,
- storage,
- create_inverted_index,
- mem_threshold_index_create,
- index_write_buffer_size,
- index_options,
- },
- &write_opts,
- )
- .await?
- .map(|sst_info| FileMeta {
- region_id,
- file_id,
- time_range: sst_info.time_range,
- level: output.output_level,
- file_size: sst_info.file_size,
- available_indexes: sst_info
- .inverted_index_available
- .then(|| SmallVec::from_iter([IndexType::InvertedIndex]))
- .unwrap_or_default(),
- index_file_size: sst_info.index_file_size,
- });
- Ok(file_meta_opt)
- });
- }
-
- let mut output_files = Vec::with_capacity(futs.len());
- while !futs.is_empty() {
- let mut task_chunk = Vec::with_capacity(MAX_PARALLEL_COMPACTION);
- for _ in 0..MAX_PARALLEL_COMPACTION {
- if let Some(task) = futs.pop() {
- task_chunk.push(common_runtime::spawn_bg(task));
- }
- }
- let metas = futures::future::try_join_all(task_chunk)
- .await
- .context(error::JoinSnafu)?
- .into_iter()
- .collect::<error::Result<Vec<_>>>()?;
- output_files.extend(metas.into_iter().flatten());
- }
-
- let inputs = compacted_inputs.into_iter().collect();
- Ok((output_files, inputs))
+ .for_each(|o| o.inputs.iter().for_each(|f| f.set_compacting(compacting)));
}
async fn handle_compaction(&mut self) -> error::Result<RegionEdit> {
self.mark_files_compacting(true);
+
let merge_timer = COMPACTION_STAGE_ELAPSED
.with_label_values(&["merge"])
.start_timer();
- let (added, mut deleted) = match self.merge_ssts().await {
+
+ let compaction_result = match self
+ .compactor
+ .merge_ssts(&self.compaction_region, self.picker_output.clone())
+ .await
+ {
Ok(v) => v,
Err(e) => {
- error!(e; "Failed to compact region: {}", self.region_id);
+ error!(e; "Failed to compact region: {}", self.compaction_region.region_id);
merge_timer.stop_and_discard();
return Err(e);
}
};
- deleted.extend(self.expired_ssts.iter().map(|f| f.meta_ref().clone()));
let merge_time = merge_timer.stop_and_record();
+
info!(
"Compacted SST files, region_id: {}, input: {:?}, output: {:?}, window: {:?}, waiter_num: {}, merge_time: {}s",
- self.region_id,
- deleted,
- added,
- self.compaction_time_window,
+ self.compaction_region.region_id,
+ compaction_result.files_to_remove,
+ compaction_result.files_to_add,
+ compaction_result.compaction_time_window,
self.waiters.len(),
merge_time,
);
- self.listener.on_merge_ssts_finished(self.region_id).await;
+ self.listener
+ .on_merge_ssts_finished(self.compaction_region.region_id)
+ .await;
let _manifest_timer = COMPACTION_STAGE_ELAPSED
.with_label_values(&["write_manifest"])
.start_timer();
- // Write region edit to manifest.
- let edit = RegionEdit {
- files_to_add: added,
- files_to_remove: deleted,
- compaction_time_window: self
- .compaction_time_window
- .map(|seconds| Duration::from_secs(seconds as u64)),
- flushed_entry_id: None,
- flushed_sequence: None,
- };
- let action_list = RegionMetaActionList::with_action(RegionMetaAction::Edit(edit.clone()));
- // We might leak files if we fail to update manifest. We can add a cleanup task to
- // remove them later.
- self.manifest_ctx
- .update_manifest(RegionState::Writable, action_list)
- .await?;
- Ok(edit)
+ self.compactor
+ .update_manifest(&self.compaction_region, compaction_result)
+ .await
}
/// Handles compaction failure, notifies all waiters.
@@ -266,7 +126,7 @@ impl CompactionTaskImpl {
COMPACTION_FAILURE_COUNT.inc();
for waiter in self.waiters.drain(..) {
waiter.send(Err(err.clone()).context(CompactRegionSnafu {
- region_id: self.region_id,
+ region_id: self.compaction_region.region_id,
}));
}
}
@@ -276,7 +136,7 @@ impl CompactionTaskImpl {
if let Err(e) = self.request_sender.send(request).await {
error!(
"Failed to notify compaction job status for region {}, request: {:?}",
- self.region_id, e.0
+ self.compaction_region.region_id, e.0
);
}
}
@@ -287,25 +147,25 @@ impl CompactionTask for CompactionTaskImpl {
async fn run(&mut self) {
let notify = match self.handle_compaction().await {
Ok(edit) => BackgroundNotify::CompactionFinished(CompactionFinished {
- region_id: self.region_id,
+ region_id: self.compaction_region.region_id,
senders: std::mem::take(&mut self.waiters),
start_time: self.start_time,
edit,
}),
Err(e) => {
- error!(e; "Failed to compact region, region id: {}", self.region_id);
+ error!(e; "Failed to compact region, region id: {}", self.compaction_region.region_id);
let err = Arc::new(e);
// notify compaction waiters
self.on_failure(err.clone());
BackgroundNotify::CompactionFailed(CompactionFailed {
- region_id: self.region_id,
+ region_id: self.compaction_region.region_id,
err,
})
}
};
self.send_to_worker(WorkerRequest::Background {
- region_id: self.region_id,
+ region_id: self.compaction_region.region_id,
notify,
})
.await;
diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs
index f8b79cab523a..bcb5e49c2dd9 100644
--- a/src/mito2/src/compaction/twcs.rs
+++ b/src/mito2/src/compaction/twcs.rs
@@ -22,9 +22,9 @@ use common_time::timestamp_millis::BucketAligned;
use common_time::Timestamp;
use crate::compaction::buckets::infer_time_bucket;
-use crate::compaction::picker::{CompactionTask, Picker};
-use crate::compaction::task::CompactionTaskImpl;
-use crate::compaction::{get_expired_ssts, CompactionOutput, CompactionRequest};
+use crate::compaction::compactor::CompactionRegion;
+use crate::compaction::picker::{Picker, PickerOutput};
+use crate::compaction::{get_expired_ssts, CompactionOutput};
use crate::sst::file::{overlaps, FileHandle, FileId};
use crate::sst::version::LevelMeta;
@@ -110,25 +110,10 @@ impl TwcsPicker {
}
impl Picker for TwcsPicker {
- fn pick(&self, req: CompactionRequest) -> Option<Box<dyn CompactionTask>> {
- let CompactionRequest {
- engine_config,
- current_version,
- access_layer,
- request_sender,
- waiters,
- start_time,
- cache_manager,
- manifest_ctx,
- listener,
- ..
- } = req;
-
- let region_metadata = current_version.metadata.clone();
- let region_id = region_metadata.region_id;
-
- let levels = current_version.ssts.levels();
- let ttl = current_version.options.ttl;
+ fn pick(&self, compaction_region: &CompactionRegion) -> Option<PickerOutput> {
+ let region_id = compaction_region.region_id;
+ let levels = compaction_region.current_version.ssts.levels();
+ let ttl = compaction_region.current_version.options.ttl;
let expired_ssts = get_expired_ssts(levels, ttl, Timestamp::current_millis());
if !expired_ssts.is_empty() {
info!("Expired SSTs in region {}: {:?}", region_id, expired_ssts);
@@ -136,7 +121,8 @@ impl Picker for TwcsPicker {
expired_ssts.iter().for_each(|f| f.set_compacting(true));
}
- let compaction_time_window = current_version
+ let compaction_time_window = compaction_region
+ .current_version
.compaction_time_window
.map(|window| window.as_secs() as i64);
let time_window_size = compaction_time_window
@@ -157,31 +143,14 @@ impl Picker for TwcsPicker {
let outputs = self.build_output(&windows, active_window);
if outputs.is_empty() && expired_ssts.is_empty() {
- // Nothing to compact, we are done. Notifies all waiters as we consume the compaction request.
- for waiter in waiters {
- waiter.send(Ok(0));
- }
return None;
}
- let task = CompactionTaskImpl {
- engine_config,
- region_id,
- metadata: region_metadata,
- sst_layer: access_layer,
+
+ Some(PickerOutput {
outputs,
expired_ssts,
- compaction_time_window: Some(time_window_size),
- request_sender,
- waiters,
- start_time,
- cache_manager,
- storage: current_version.options.storage.clone(),
- index_options: current_version.options.index_options.clone(),
- append_mode: current_version.options.append_mode,
- manifest_ctx,
- listener,
- };
- Some(Box::new(task))
+ time_window_size,
+ })
}
}
diff --git a/src/mito2/src/compaction/window.rs b/src/mito2/src/compaction/window.rs
index a2b3f066efc2..1683d28f9a9c 100644
--- a/src/mito2/src/compaction/window.rs
+++ b/src/mito2/src/compaction/window.rs
@@ -23,9 +23,9 @@ use common_time::Timestamp;
use store_api::storage::RegionId;
use crate::compaction::buckets::infer_time_bucket;
-use crate::compaction::picker::{CompactionTask, Picker};
-use crate::compaction::task::CompactionTaskImpl;
-use crate::compaction::{get_expired_ssts, CompactionOutput, CompactionRequest};
+use crate::compaction::compactor::CompactionRegion;
+use crate::compaction::picker::{Picker, PickerOutput};
+use crate::compaction::{get_expired_ssts, CompactionOutput};
use crate::region::version::VersionRef;
use crate::sst::file::{FileHandle, FileId};
@@ -101,42 +101,18 @@ impl WindowedCompactionPicker {
}
impl Picker for WindowedCompactionPicker {
- fn pick(&self, req: CompactionRequest) -> Option<Box<dyn CompactionTask>> {
- let region_id = req.region_id();
- let CompactionRequest {
- engine_config,
- current_version,
- access_layer,
- request_sender,
- waiters,
- start_time,
- cache_manager,
- manifest_ctx,
- listener,
- } = req;
-
- let (outputs, expired_ssts, time_window) =
- self.pick_inner(region_id, ¤t_version, Timestamp::current_millis());
-
- let task = CompactionTaskImpl {
- engine_config: engine_config.clone(),
- region_id,
- metadata: current_version.metadata.clone().clone(),
- sst_layer: access_layer.clone(),
+ fn pick(&self, compaction_region: &CompactionRegion) -> Option<PickerOutput> {
+ let (outputs, expired_ssts, time_window) = self.pick_inner(
+ compaction_region.current_version.metadata.region_id,
+ &compaction_region.current_version,
+ Timestamp::current_millis(),
+ );
+
+ Some(PickerOutput {
outputs,
expired_ssts,
- compaction_time_window: Some(time_window),
- request_sender,
- waiters,
- start_time,
- cache_manager,
- storage: current_version.options.storage.clone(),
- index_options: current_version.options.index_options.clone(),
- append_mode: current_version.options.append_mode,
- manifest_ctx,
- listener,
- };
- Some(Box::new(task))
+ time_window_size: time_window,
+ })
}
}
diff --git a/src/mito2/src/lib.rs b/src/mito2/src/lib.rs
index acf65608cef8..cdd2416940ce 100644
--- a/src/mito2/src/lib.rs
+++ b/src/mito2/src/lib.rs
@@ -25,7 +25,7 @@ pub mod test_util;
mod access_layer;
mod cache;
-mod compaction;
+pub mod compaction;
pub mod config;
pub mod engine;
pub mod error;
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index f046471cfc2b..e20a00d35ae1 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -530,6 +530,6 @@ where
}
/// Returns the directory to the manifest files.
-fn new_manifest_dir(region_dir: &str) -> String {
+pub(crate) fn new_manifest_dir(region_dir: &str) -> String {
join_dir(region_dir, "manifest")
}
|
refactor
|
add `Compactor` trait to abstract the compaction (#4097)
|
76236646ef3c7a5f1f5b397cf75c5781c8c85c22
|
2022-12-28 11:59:52
|
fys
|
chore: extract some functions from "bootstrap_meta_srv" function (#795)
| false
|
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index db2e269861c3..a92cbbb1c3e8 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -18,18 +18,21 @@ use api::v1::meta::store_server::StoreServer;
use snafu::ResultExt;
use tokio::net::TcpListener;
use tokio_stream::wrappers::TcpListenerStream;
+use tonic::transport::server::Router;
use crate::election::etcd::EtcdElection;
-use crate::error;
use crate::metasrv::{MetaSrv, MetaSrvOptions};
use crate::service::admin;
use crate::service::store::etcd::EtcdStore;
+use crate::{error, Result};
// Bootstrap the rpc server to serve incoming request
-pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> crate::Result<()> {
- let kv_store = EtcdStore::with_endpoints([&opts.store_addr]).await?;
- let election = EtcdElection::with_endpoints(&opts.server_addr, [&opts.store_addr]).await?;
+pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> Result<()> {
+ let meta_srv = make_meta_srv(opts.clone()).await?;
+ bootstrap_meta_srv_with_router(opts, router(meta_srv)).await
+}
+pub async fn bootstrap_meta_srv_with_router(opts: MetaSrvOptions, router: Router) -> Result<()> {
let listener = TcpListener::bind(&opts.bind_addr)
.await
.context(error::TcpBindSnafu {
@@ -37,18 +40,27 @@ pub async fn bootstrap_meta_srv(opts: MetaSrvOptions) -> crate::Result<()> {
})?;
let listener = TcpListenerStream::new(listener);
- let meta_srv = MetaSrv::new(opts, kv_store, None, Some(election)).await;
- meta_srv.start().await;
+ router
+ .serve_with_incoming(listener)
+ .await
+ .context(error::StartGrpcSnafu)?;
+
+ Ok(())
+}
+pub fn router(meta_srv: MetaSrv) -> Router {
tonic::transport::Server::builder()
.accept_http1(true) // for admin services
.add_service(HeartbeatServer::new(meta_srv.clone()))
.add_service(RouterServer::new(meta_srv.clone()))
.add_service(StoreServer::new(meta_srv.clone()))
- .add_service(admin::make_admin_service(meta_srv.clone()))
- .serve_with_incoming(listener)
- .await
- .context(error::StartGrpcSnafu)?;
+ .add_service(admin::make_admin_service(meta_srv))
+}
- Ok(())
+pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
+ let kv_store = EtcdStore::with_endpoints([&opts.store_addr]).await?;
+ let election = EtcdElection::with_endpoints(&opts.server_addr, [&opts.store_addr]).await?;
+ let meta_srv = MetaSrv::new(opts, kv_store, None, Some(election)).await;
+ meta_srv.start().await;
+ Ok(meta_srv)
}
|
chore
|
extract some functions from "bootstrap_meta_srv" function (#795)
|
cc7eb3d3171d840b073a308ad0cb5a41a262e952
|
2023-09-13 20:00:56
|
Ruihang Xia
|
fix: querying temporary table (#2387)
| false
|
diff --git a/src/query/src/dist_plan/planner.rs b/src/query/src/dist_plan/planner.rs
index 2886dd9c9d7d..1a71e0434955 100644
--- a/src/query/src/dist_plan/planner.rs
+++ b/src/query/src/dist_plan/planner.rs
@@ -181,8 +181,8 @@ impl TreeNodeVisitor for TableNameExtractor {
info.schema_name.clone(),
info.name.clone(),
));
- return Ok(VisitRecursion::Stop);
}
+ return Ok(VisitRecursion::Stop);
}
}
match &scan.table_name {
|
fix
|
querying temporary table (#2387)
|
4e9c25104141c0b2508ed91c34309f2edfc99eaf
|
2024-10-25 08:43:34
|
Kaifeng Zheng
|
feat: add json_path_match udf (#4864)
| false
|
diff --git a/src/common/function/src/scalars/json.rs b/src/common/function/src/scalars/json.rs
index 133eb145a715..2c420c1661e1 100644
--- a/src/common/function/src/scalars/json.rs
+++ b/src/common/function/src/scalars/json.rs
@@ -16,6 +16,7 @@ use std::sync::Arc;
mod json_get;
mod json_is;
mod json_path_exists;
+mod json_path_match;
mod json_to_string;
mod parse_json;
@@ -49,5 +50,6 @@ impl JsonFunction {
registry.register(Arc::new(JsonIsObject));
registry.register(Arc::new(json_path_exists::JsonPathExistsFunction));
+ registry.register(Arc::new(json_path_match::JsonPathMatchFunction));
}
}
diff --git a/src/common/function/src/scalars/json/json_path_match.rs b/src/common/function/src/scalars/json/json_path_match.rs
new file mode 100644
index 000000000000..8ea1bf082b7f
--- /dev/null
+++ b/src/common/function/src/scalars/json/json_path_match.rs
@@ -0,0 +1,202 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::{self, Display};
+
+use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
+use common_query::prelude::Signature;
+use datafusion::logical_expr::Volatility;
+use datatypes::data_type::ConcreteDataType;
+use datatypes::prelude::VectorRef;
+use datatypes::scalars::ScalarVectorBuilder;
+use datatypes::vectors::{BooleanVectorBuilder, MutableVector};
+use snafu::ensure;
+
+use crate::function::{Function, FunctionContext};
+
+/// Check if the given JSON data match the given JSON path's predicate.
+#[derive(Clone, Debug, Default)]
+pub struct JsonPathMatchFunction;
+
+const NAME: &str = "json_path_match";
+
+impl Function for JsonPathMatchFunction {
+ fn name(&self) -> &str {
+ NAME
+ }
+
+ fn return_type(&self, _input_types: &[ConcreteDataType]) -> Result<ConcreteDataType> {
+ Ok(ConcreteDataType::boolean_datatype())
+ }
+
+ fn signature(&self) -> Signature {
+ Signature::exact(
+ vec![
+ ConcreteDataType::json_datatype(),
+ ConcreteDataType::string_datatype(),
+ ],
+ Volatility::Immutable,
+ )
+ }
+
+ fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ ensure!(
+ columns.len() == 2,
+ InvalidFuncArgsSnafu {
+ err_msg: format!(
+ "The length of the args is not correct, expect exactly two, have: {}",
+ columns.len()
+ ),
+ }
+ );
+ let jsons = &columns[0];
+ let paths = &columns[1];
+
+ let size = jsons.len();
+ let mut results = BooleanVectorBuilder::with_capacity(size);
+
+ for i in 0..size {
+ let json = jsons.get_ref(i);
+ let path = paths.get_ref(i);
+
+ match json.data_type() {
+ // JSON data type uses binary vector
+ ConcreteDataType::Binary(_) => {
+ let json = json.as_binary();
+ let path = path.as_string();
+ let result = match (json, path) {
+ (Ok(Some(json)), Ok(Some(path))) => {
+ if !jsonb::is_null(json) {
+ let json_path = jsonb::jsonpath::parse_json_path(path.as_bytes());
+ match json_path {
+ Ok(json_path) => jsonb::path_match(json, json_path).ok(),
+ Err(_) => None,
+ }
+ } else {
+ None
+ }
+ }
+ _ => None,
+ };
+
+ results.push(result);
+ }
+
+ _ => {
+ return UnsupportedInputDataTypeSnafu {
+ function: NAME,
+ datatypes: columns.iter().map(|c| c.data_type()).collect::<Vec<_>>(),
+ }
+ .fail();
+ }
+ }
+ }
+
+ Ok(results.to_vector())
+ }
+}
+
+impl Display for JsonPathMatchFunction {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "JSON_PATH_MATCH")
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use common_query::prelude::TypeSignature;
+ use datatypes::vectors::{BinaryVector, StringVector};
+
+ use super::*;
+
+ #[test]
+ fn test_json_path_match_function() {
+ let json_path_match = JsonPathMatchFunction;
+
+ assert_eq!("json_path_match", json_path_match.name());
+ assert_eq!(
+ ConcreteDataType::boolean_datatype(),
+ json_path_match
+ .return_type(&[ConcreteDataType::json_datatype()])
+ .unwrap()
+ );
+
+ assert!(matches!(json_path_match.signature(),
+ Signature {
+ type_signature: TypeSignature::Exact(valid_types),
+ volatility: Volatility::Immutable
+ } if valid_types == vec![ConcreteDataType::json_datatype(), ConcreteDataType::string_datatype()],
+ ));
+
+ let json_strings = [
+ Some(r#"{"a": {"b": 2}, "b": 2, "c": 3}"#.to_string()),
+ Some(r#"{"a": 1, "b": [1,2,3]}"#.to_string()),
+ Some(r#"{"a": 1 ,"b": [1,2,3]}"#.to_string()),
+ Some(r#"[1,2,3]"#.to_string()),
+ Some(r#"{"a":1,"b":[1,2,3]}"#.to_string()),
+ Some(r#"null"#.to_string()),
+ Some(r#"null"#.to_string()),
+ ];
+
+ let paths = vec![
+ Some("$.a.b == 2".to_string()),
+ Some("$.b[1 to last] >= 2".to_string()),
+ Some("$.c > 0".to_string()),
+ Some("$[0 to last] > 0".to_string()),
+ Some(r#"null"#.to_string()),
+ Some("$.c > 0".to_string()),
+ Some(r#"null"#.to_string()),
+ ];
+
+ let results = [
+ Some(true),
+ Some(true),
+ Some(false),
+ Some(true),
+ None,
+ None,
+ None,
+ ];
+
+ let jsonbs = json_strings
+ .into_iter()
+ .map(|s| s.map(|json| jsonb::parse_value(json.as_bytes()).unwrap().to_vec()))
+ .collect::<Vec<_>>();
+
+ let json_vector = BinaryVector::from(jsonbs);
+ let path_vector = StringVector::from(paths);
+ let args: Vec<VectorRef> = vec![Arc::new(json_vector), Arc::new(path_vector)];
+ let vector = json_path_match
+ .eval(FunctionContext::default(), &args)
+ .unwrap();
+
+ assert_eq!(7, vector.len());
+ for (i, expected) in results.iter().enumerate() {
+ let result = vector.get_ref(i);
+
+ match expected {
+ Some(expected_value) => {
+ assert!(!result.is_null());
+ let result_value = result.as_boolean().unwrap().unwrap();
+ assert_eq!(*expected_value, result_value);
+ }
+ None => {
+ assert!(result.is_null());
+ }
+ }
+ }
+ }
+}
diff --git a/tests/cases/standalone/common/function/json/json.result b/tests/cases/standalone/common/function/json/json.result
index bae33135488b..42db0b263e30 100644
--- a/tests/cases/standalone/common/function/json/json.result
+++ b/tests/cases/standalone/common/function/json/json.result
@@ -31,3 +31,60 @@ SELECT json_path_exists(parse_json('[1, 2]'), '[2]');
| false |
+----------------------------------------------------------+
+SELECT json_path_exists(parse_json('[1, 2]'), 'null');
+
++-----------------------------------------------------------+
+| json_path_exists(parse_json(Utf8("[1, 2]")),Utf8("null")) |
++-----------------------------------------------------------+
+| false |
++-----------------------------------------------------------+
+
+SELECT json_path_exists(parse_json('null'), '$.a');
+
++--------------------------------------------------------+
+| json_path_exists(parse_json(Utf8("null")),Utf8("$.a")) |
++--------------------------------------------------------+
+| false |
++--------------------------------------------------------+
+
+--- json_path_match ---
+SELECT json_path_match(parse_json('{"a": 1, "b": 2}'), '$.a == 1');
+
++------------------------------------------------------------------------+
+| json_path_match(parse_json(Utf8("{"a": 1, "b": 2}")),Utf8("$.a == 1")) |
++------------------------------------------------------------------------+
+| true |
++------------------------------------------------------------------------+
+
+SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] > 1');
+
++-----------------------------------------------------------------------------+
+| json_path_match(parse_json(Utf8("{"a":1,"b":[1,2,3]}")),Utf8("$.b[0] > 1")) |
++-----------------------------------------------------------------------------+
+| false |
++-----------------------------------------------------------------------------+
+
+SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] >= 2');
+
++--------------------------------------------------------------------------------------+
+| json_path_match(parse_json(Utf8("{"a":1,"b":[1,2,3]}")),Utf8("$.b[1 to last] >= 2")) |
++--------------------------------------------------------------------------------------+
+| true |
++--------------------------------------------------------------------------------------+
+
+SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), 'null');
+
++-----------------------------------------------------------------------+
+| json_path_match(parse_json(Utf8("{"a":1,"b":[1,2,3]}")),Utf8("null")) |
++-----------------------------------------------------------------------+
+| |
++-----------------------------------------------------------------------+
+
+SELECT json_path_match(parse_json('null'), '$.a == 1');
+
++------------------------------------------------------------+
+| json_path_match(parse_json(Utf8("null")),Utf8("$.a == 1")) |
++------------------------------------------------------------+
+| |
++------------------------------------------------------------+
+
diff --git a/tests/cases/standalone/common/function/json/json.sql b/tests/cases/standalone/common/function/json/json.sql
index 76914e257571..8980be33e310 100644
--- a/tests/cases/standalone/common/function/json/json.sql
+++ b/tests/cases/standalone/common/function/json/json.sql
@@ -6,3 +6,19 @@ SELECT json_path_exists(parse_json('{"a": 1, "b": 2}'), '$.c');
SELECT json_path_exists(parse_json('[1, 2]'), '[0]');
SELECT json_path_exists(parse_json('[1, 2]'), '[2]');
+
+SELECT json_path_exists(parse_json('[1, 2]'), 'null');
+
+SELECT json_path_exists(parse_json('null'), '$.a');
+
+--- json_path_match ---
+
+SELECT json_path_match(parse_json('{"a": 1, "b": 2}'), '$.a == 1');
+
+SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[0] > 1');
+
+SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), '$.b[1 to last] >= 2');
+
+SELECT json_path_match(parse_json('{"a":1,"b":[1,2,3]}'), 'null');
+
+SELECT json_path_match(parse_json('null'), '$.a == 1');
|
feat
|
add json_path_match udf (#4864)
|
d8b51cfaba5118ce06e239e97d16e4b68d736dbf
|
2024-06-08 05:04:03
|
cjw
|
refactor: remove double checks of memtable size (#4117)
| false
|
diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs
index fa95255c1a5c..15928e1e22c2 100644
--- a/src/mito2/src/region/version.rs
+++ b/src/mito2/src/region/version.rs
@@ -80,14 +80,11 @@ impl VersionControl {
/// Freezes the mutable memtable if it is not empty.
pub(crate) fn freeze_mutable(&self) -> Result<()> {
let version = self.current().version;
- if version.memtables.mutable.is_empty() {
+
+ let Some(new_memtables) = version.memtables.freeze_mutable(&version.metadata)? else {
return Ok(());
- }
- // Safety: Immutable memtable is None.
- let new_memtables = version
- .memtables
- .freeze_mutable(&version.metadata)?
- .unwrap();
+ };
+
// Create a new version with memtable switched.
let new_version = Arc::new(
VersionBuilder::from_version(version)
|
refactor
|
remove double checks of memtable size (#4117)
|
ba15c14103e3f484e3331669fac8c86d52680b60
|
2023-10-19 09:29:37
|
Wei
|
feat: get internal value size of ValueRef (#2613)
| false
|
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index 45e8f1ce02b5..287a27cb2837 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -654,6 +654,17 @@ impl ListValue {
Arc::new(new_item_field(output_type.item_type().as_arrow_type())),
))
}
+
+ /// use 'the first item size' * 'length of items' to estimate the size.
+ /// it could be inaccurate.
+ fn estimated_size(&self) -> usize {
+ if let Some(items) = &self.items {
+ if let Some(item) = items.first() {
+ return item.as_value_ref().data_size() * items.len();
+ }
+ }
+ 0
+ }
}
impl Default for ListValue {
@@ -1090,12 +1101,46 @@ impl<'a> PartialOrd for ListValueRef<'a> {
}
}
+impl<'a> ValueRef<'a> {
+ /// Returns the size of the underlying data in bytes,
+ /// The size is estimated and only considers the data size.
+ pub fn data_size(&self) -> usize {
+ match *self {
+ ValueRef::Null => 0,
+ ValueRef::Boolean(_) => 1,
+ ValueRef::UInt8(_) => 1,
+ ValueRef::UInt16(_) => 2,
+ ValueRef::UInt32(_) => 4,
+ ValueRef::UInt64(_) => 8,
+ ValueRef::Int8(_) => 1,
+ ValueRef::Int16(_) => 2,
+ ValueRef::Int32(_) => 4,
+ ValueRef::Int64(_) => 8,
+ ValueRef::Float32(_) => 4,
+ ValueRef::Float64(_) => 8,
+ ValueRef::String(v) => std::mem::size_of_val(v),
+ ValueRef::Binary(v) => std::mem::size_of_val(v),
+ ValueRef::Date(_) => 4,
+ ValueRef::DateTime(_) => 8,
+ ValueRef::Timestamp(_) => 16,
+ ValueRef::Time(_) => 16,
+ ValueRef::Duration(_) => 16,
+ ValueRef::Interval(_) => 24,
+ ValueRef::List(v) => match v {
+ ListValueRef::Indexed { vector, .. } => vector.memory_size() / vector.len(),
+ ListValueRef::Ref { val } => val.estimated_size(),
+ },
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use arrow::datatypes::DataType as ArrowDataType;
use num_traits::Float;
use super::*;
+ use crate::vectors::ListVectorBuilder;
#[test]
fn test_try_from_scalar_value() {
@@ -2158,4 +2203,90 @@ mod tests {
duration_to_scalar_value(TimeUnit::Nanosecond, Some(1))
);
}
+
+ fn check_value_ref_size_eq(value_ref: &ValueRef, size: usize) {
+ assert_eq!(value_ref.data_size(), size);
+ }
+
+ #[test]
+ fn test_value_ref_estimated_size() {
+ assert_eq!(std::mem::size_of::<ValueRef>(), 24);
+
+ check_value_ref_size_eq(&ValueRef::Boolean(true), 1);
+ check_value_ref_size_eq(&ValueRef::UInt8(1), 1);
+ check_value_ref_size_eq(&ValueRef::UInt16(1), 2);
+ check_value_ref_size_eq(&ValueRef::UInt32(1), 4);
+ check_value_ref_size_eq(&ValueRef::UInt64(1), 8);
+ check_value_ref_size_eq(&ValueRef::Int8(1), 1);
+ check_value_ref_size_eq(&ValueRef::Int16(1), 2);
+ check_value_ref_size_eq(&ValueRef::Int32(1), 4);
+ check_value_ref_size_eq(&ValueRef::Int64(1), 8);
+ check_value_ref_size_eq(&ValueRef::Float32(1.0.into()), 4);
+ check_value_ref_size_eq(&ValueRef::Float64(1.0.into()), 8);
+ check_value_ref_size_eq(&ValueRef::String("greptimedb"), 10);
+ check_value_ref_size_eq(&ValueRef::Binary(b"greptimedb"), 10);
+ check_value_ref_size_eq(&ValueRef::Date(Date::new(1)), 4);
+ check_value_ref_size_eq(&ValueRef::DateTime(DateTime::new(1)), 8);
+ check_value_ref_size_eq(&ValueRef::Timestamp(Timestamp::new_millisecond(1)), 16);
+ check_value_ref_size_eq(&ValueRef::Time(Time::new_millisecond(1)), 16);
+ check_value_ref_size_eq(
+ &ValueRef::Interval(Interval::from_month_day_nano(1, 2, 3)),
+ 24,
+ );
+ check_value_ref_size_eq(&ValueRef::Duration(Duration::new_millisecond(1)), 16);
+ check_value_ref_size_eq(
+ &ValueRef::List(ListValueRef::Ref {
+ val: &ListValue {
+ items: Some(Box::new(vec![
+ Value::String("hello world".into()),
+ Value::String("greptimedb".into()),
+ ])),
+ datatype: ConcreteDataType::string_datatype(),
+ },
+ }),
+ 22,
+ );
+
+ let data = vec![
+ Some(vec![Some(1), Some(2), Some(3)]),
+ None,
+ Some(vec![Some(4), None, Some(6)]),
+ ];
+ let mut builder =
+ ListVectorBuilder::with_type_capacity(ConcreteDataType::int32_datatype(), 8);
+ for vec_opt in &data {
+ if let Some(vec) = vec_opt {
+ let values = vec.iter().map(|v| Value::from(*v)).collect();
+ let values = Some(Box::new(values));
+ let list_value = ListValue::new(values, ConcreteDataType::int32_datatype());
+
+ builder.push(Some(ListValueRef::Ref { val: &list_value }));
+ } else {
+ builder.push(None);
+ }
+ }
+ let vector = builder.finish();
+
+ check_value_ref_size_eq(
+ &ValueRef::List(ListValueRef::Indexed {
+ vector: &vector,
+ idx: 0,
+ }),
+ 85,
+ );
+ check_value_ref_size_eq(
+ &ValueRef::List(ListValueRef::Indexed {
+ vector: &vector,
+ idx: 1,
+ }),
+ 85,
+ );
+ check_value_ref_size_eq(
+ &ValueRef::List(ListValueRef::Indexed {
+ vector: &vector,
+ idx: 2,
+ }),
+ 85,
+ )
+ }
}
|
feat
|
get internal value size of ValueRef (#2613)
|
cf25cf984bc60b58b4fc2c49e096dcc125d1876f
|
2024-03-18 18:54:13
|
JeremyHi
|
chore: avoid unnecessary cloning (#3537)
| false
|
diff --git a/src/promql/src/extension_plan/series_divide.rs b/src/promql/src/extension_plan/series_divide.rs
index 772ee079eb77..73fffbdeeb10 100644
--- a/src/promql/src/extension_plan/series_divide.rs
+++ b/src/promql/src/extension_plan/series_divide.rs
@@ -255,20 +255,18 @@ impl Stream for SeriesDivideStream {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
- if let Some(batch) = self.buffer.clone() {
+ if let Some(batch) = self.buffer.take() {
let same_length = self.find_first_diff_row(&batch) + 1;
if same_length >= batch.num_rows() {
let next_batch = match ready!(self.as_mut().fetch_next_batch(cx)) {
- Some(Ok(batch)) => batch,
+ Some(Ok(next_batch)) => next_batch,
None => {
- self.buffer = None;
self.num_series += 1;
return Poll::Ready(Some(Ok(batch)));
}
error => return Poll::Ready(error),
};
- let new_batch =
- compute::concat_batches(&batch.schema(), &[batch.clone(), next_batch])?;
+ let new_batch = compute::concat_batches(&batch.schema(), &[batch, next_batch])?;
self.buffer = Some(new_batch);
continue;
} else {
|
chore
|
avoid unnecessary cloning (#3537)
|
19543f98198ec4d559a9e0dbb0ad3b41b983d659
|
2024-05-20 16:58:00
|
shuiyisong
|
feat: support compression on gRPC server (#3961)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 34317baeb0bc..a4f63ea2a72b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -11196,6 +11196,7 @@ dependencies = [
"axum",
"base64 0.21.7",
"bytes",
+ "flate2",
"h2",
"http",
"http-body",
@@ -11213,6 +11214,7 @@ dependencies = [
"tower-layer",
"tower-service",
"tracing",
+ "zstd 0.12.4",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 8390a4f0f710..13a62f6682c3 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -169,7 +169,7 @@ tokio = { version = "1.36", features = ["full"] }
tokio-stream = { version = "0.1" }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
toml = "0.8.8"
-tonic = { version = "0.11", features = ["tls"] }
+tonic = { version = "0.11", features = ["tls", "gzip", "zstd"] }
uuid = { version = "1.7", features = ["serde", "v4", "fast-rng"] }
zstd = "0.13"
diff --git a/Makefile b/Makefile
index cb8e0ce273bb..6fa1f02f87dc 100644
--- a/Makefile
+++ b/Makefile
@@ -199,7 +199,7 @@ config-docs: ## Generate configuration documentation from toml files.
docker run --rm \
-v ${PWD}:/greptimedb \
-w /greptimedb/config \
- toml2docs/toml2docs:latest \
+ toml2docs/toml2docs:v0.1.1 \
-p '##' \
-t ./config-docs-template.md \
-o ./config.md
diff --git a/src/client/src/client.rs b/src/client/src/client.rs
index 5e82295c16f6..ea072a822d39 100644
--- a/src/client/src/client.rs
+++ b/src/client/src/client.rs
@@ -22,6 +22,7 @@ use arrow_flight::flight_service_client::FlightServiceClient;
use common_grpc::channel_manager::ChannelManager;
use parking_lot::RwLock;
use snafu::{OptionExt, ResultExt};
+use tonic::codec::CompressionEncoding;
use tonic::transport::Channel;
use crate::load_balance::{LoadBalance, Loadbalancer};
@@ -151,24 +152,34 @@ impl Client {
pub fn make_flight_client(&self) -> Result<FlightClient> {
let (addr, channel) = self.find_channel()?;
- Ok(FlightClient {
- addr,
- client: FlightServiceClient::new(channel)
- .max_decoding_message_size(self.max_grpc_recv_message_size())
- .max_encoding_message_size(self.max_grpc_send_message_size()),
- })
+
+ let client = FlightServiceClient::new(channel)
+ .max_decoding_message_size(self.max_grpc_recv_message_size())
+ .max_encoding_message_size(self.max_grpc_send_message_size())
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Zstd);
+
+ Ok(FlightClient { addr, client })
}
pub(crate) fn raw_region_client(&self) -> Result<PbRegionClient<Channel>> {
let (_, channel) = self.find_channel()?;
- Ok(PbRegionClient::new(channel)
+ let client = PbRegionClient::new(channel)
.max_decoding_message_size(self.max_grpc_recv_message_size())
- .max_encoding_message_size(self.max_grpc_send_message_size()))
+ .max_encoding_message_size(self.max_grpc_send_message_size())
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Zstd);
+ Ok(client)
}
pub fn make_prometheus_gateway_client(&self) -> Result<PrometheusGatewayClient<Channel>> {
let (_, channel) = self.find_channel()?;
- Ok(PrometheusGatewayClient::new(channel))
+ let client = PrometheusGatewayClient::new(channel)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd);
+ Ok(client)
}
pub async fn health_check(&self) -> Result<()> {
diff --git a/src/meta-srv/src/procedure/utils.rs b/src/meta-srv/src/procedure/utils.rs
index 042c31a78741..8ef02b411d04 100644
--- a/src/meta-srv/src/procedure/utils.rs
+++ b/src/meta-srv/src/procedure/utils.rs
@@ -27,6 +27,7 @@ pub mod mock {
use common_runtime::{Builder as RuntimeBuilder, Runtime};
use servers::grpc::region_server::{RegionServerHandler, RegionServerRequestHandler};
use tokio::sync::mpsc;
+ use tonic::codec::CompressionEncoding;
use tonic::transport::Server;
use tower::service_fn;
@@ -57,7 +58,13 @@ pub mod mock {
tokio::spawn(async move {
Server::builder()
- .add_service(RegionServer::new(handler))
+ .add_service(
+ RegionServer::new(handler)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd),
+ )
.serve_with_incoming(futures::stream::iter(vec![Ok::<_, Error>(server)]))
.await
});
diff --git a/src/servers/src/grpc/builder.rs b/src/servers/src/grpc/builder.rs
index e0e8e813748f..2155b36462a2 100644
--- a/src/servers/src/grpc/builder.rs
+++ b/src/servers/src/grpc/builder.rs
@@ -23,6 +23,7 @@ use common_runtime::Runtime;
use opentelemetry_proto::tonic::collector::metrics::v1::metrics_service_server::MetricsServiceServer;
use opentelemetry_proto::tonic::collector::trace::v1::trace_service_server::TraceServiceServer;
use tokio::sync::Mutex;
+use tonic::codec::CompressionEncoding;
use tonic::transport::server::RoutesBuilder;
use tower::ServiceBuilder;
@@ -45,11 +46,15 @@ macro_rules! add_service {
let max_recv_message_size = $builder.config().max_recv_message_size;
let max_send_message_size = $builder.config().max_send_message_size;
- $builder.routes_builder_mut().add_service(
- $service
- .max_decoding_message_size(max_recv_message_size)
- .max_encoding_message_size(max_send_message_size),
- )
+ let service_builder = $service
+ .max_decoding_message_size(max_recv_message_size)
+ .max_encoding_message_size(max_send_message_size)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd);
+
+ $builder.routes_builder_mut().add_service(service_builder);
};
}
@@ -123,16 +128,26 @@ impl GrpcServerBuilder {
otlp_handler: OpenTelemetryProtocolHandlerRef,
user_provider: Option<UserProviderRef>,
) -> Self {
+ let tracing_service = TraceServiceServer::new(OtlpService::new(otlp_handler.clone()))
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd);
+
let trace_server = ServiceBuilder::new()
.layer(AuthMiddlewareLayer::with(user_provider.clone()))
- .service(TraceServiceServer::new(OtlpService::new(
- otlp_handler.clone(),
- )));
+ .service(tracing_service);
self.routes_builder.add_service(trace_server);
+ let metrics_service = MetricsServiceServer::new(OtlpService::new(otlp_handler))
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd);
+
let metrics_server = ServiceBuilder::new()
.layer(AuthMiddlewareLayer::with(user_provider))
- .service(MetricsServiceServer::new(OtlpService::new(otlp_handler)));
+ .service(metrics_service);
self.routes_builder.add_service(metrics_server);
self
diff --git a/src/servers/tests/grpc/mod.rs b/src/servers/tests/grpc/mod.rs
index 183fabb5d44e..4155f5eac738 100644
--- a/src/servers/tests/grpc/mod.rs
+++ b/src/servers/tests/grpc/mod.rs
@@ -34,6 +34,7 @@ use table::TableRef;
use tests_integration::database::Database;
use tokio::net::TcpListener;
use tokio_stream::wrappers::TcpListenerStream;
+use tonic::codec::CompressionEncoding;
use crate::{create_testing_grpc_query_handler, LOCALHOST_WITH_0};
@@ -64,6 +65,10 @@ impl MockGrpcServer {
)
.into();
FlightServiceServer::new(service)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd)
}
}
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index 4131d36b30d0..bfa59966ae8e 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -53,6 +53,7 @@ use servers::grpc::region_server::RegionServerRequestHandler;
use servers::heartbeat_options::HeartbeatOptions;
use servers::Mode;
use tempfile::TempDir;
+use tonic::codec::CompressionEncoding;
use tonic::transport::Server;
use tower::service_fn;
use uuid::Uuid;
@@ -436,8 +437,20 @@ async fn create_datanode_client(datanode: &Datanode) -> (String, Client) {
let _handle = tokio::spawn(async move {
Server::builder()
- .add_service(FlightServiceServer::new(flight_handler))
- .add_service(RegionServer::new(region_server_handler))
+ .add_service(
+ FlightServiceServer::new(flight_handler)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd),
+ )
+ .add_service(
+ RegionServer::new(region_server_handler)
+ .accept_compressed(CompressionEncoding::Gzip)
+ .accept_compressed(CompressionEncoding::Zstd)
+ .send_compressed(CompressionEncoding::Gzip)
+ .send_compressed(CompressionEncoding::Zstd),
+ )
.serve_with_incoming(futures::stream::iter(vec![Ok::<_, std::io::Error>(server)]))
.await
});
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 5a14751bbc45..b6a56aace7dd 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -70,6 +70,7 @@ macro_rules! grpc_tests {
test_insert_and_select,
test_dbname,
test_grpc_message_size_ok,
+ test_grpc_zstd_compression,
test_grpc_message_size_limit_recv,
test_grpc_message_size_limit_send,
test_grpc_auth,
@@ -142,6 +143,25 @@ pub async fn test_grpc_message_size_ok(store_type: StorageType) {
guard.remove_all().await;
}
+pub async fn test_grpc_zstd_compression(store_type: StorageType) {
+ // server and client both support gzip
+ let config = GrpcServerConfig {
+ max_recv_message_size: 1024,
+ max_send_message_size: 1024,
+ };
+ let (addr, mut guard, fe_grpc_server) =
+ setup_grpc_server_with(store_type, "auto_create_table", None, Some(config)).await;
+
+ let grpc_client = Client::with_urls(vec![addr]);
+ let db = Database::new_with_dbname(
+ format!("{}-{}", DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME),
+ grpc_client,
+ );
+ db.sql("show tables;").await.unwrap();
+ let _ = fe_grpc_server.shutdown().await;
+ guard.remove_all().await;
+}
+
pub async fn test_grpc_message_size_limit_send(store_type: StorageType) {
let config = GrpcServerConfig {
max_recv_message_size: 1024,
|
feat
|
support compression on gRPC server (#3961)
|
610a895b6619c93f986e76c10aa1417388e6c0ee
|
2023-03-09 17:38:07
|
discord9
|
feat: `abi3` & `abi37` (#1151)
| false
|
diff --git a/src/script/Cargo.toml b/src/script/Cargo.toml
index e7f6f1efe551..114ac8736440 100644
--- a/src/script/Cargo.toml
+++ b/src/script/Cargo.toml
@@ -60,7 +60,7 @@ rustpython-vm = { git = "https://github.com/discord9/RustPython", optional = tru
"default",
"codegen",
] }
-pyo3 = { version = "0.18", optional = true }
+pyo3 = { version = "0.18", optional = true, features = ["abi3", "abi3-py37"] }
session = { path = "../session" }
snafu = { version = "0.7", features = ["backtraces"] }
sql = { path = "../sql" }
|
feat
|
`abi3` & `abi37` (#1151)
|
62fcb54258b1db36592487347a44e9c2b3192a4b
|
2022-11-21 12:45:14
|
LFC
|
fix: correctly open table when distributed datanode restart (#576)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index a26a0a365f34..b6bf3838e918 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3616,9 +3616,9 @@ dependencies = [
[[package]]
name = "opensrv-mysql"
-version = "0.1.0"
+version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bcb5fc2fda7e5e5f8478cd637285bbdd6196a9601e32293d0897e469a7dd020"
+checksum = "e4c24c12fd688cb5aa5b1a54c6ccb2e30fb9b5132debb0e89fcb432b3f73db8f"
dependencies = [
"async-trait",
"byteorder",
diff --git a/src/api/src/column_def.rs b/src/api/src/column_def.rs
new file mode 100644
index 000000000000..131ad75764cb
--- /dev/null
+++ b/src/api/src/column_def.rs
@@ -0,0 +1,38 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema};
+use snafu::ResultExt;
+
+use crate::error::{self, Result};
+use crate::helper::ColumnDataTypeWrapper;
+use crate::v1::ColumnDef;
+
+impl ColumnDef {
+ pub fn try_as_column_schema(&self) -> Result<ColumnSchema> {
+ let data_type = ColumnDataTypeWrapper::try_new(self.datatype)?;
+
+ let constraint = match &self.default_constraint {
+ None => None,
+ Some(v) => Some(
+ ColumnDefaultConstraint::try_from(&v[..])
+ .context(error::ConvertColumnDefaultConstraintSnafu { column: &self.name })?,
+ ),
+ };
+
+ ColumnSchema::new(&self.name, data_type.into(), self.is_nullable)
+ .with_default_constraint(constraint)
+ .context(error::InvalidColumnDefaultConstraintSnafu { column: &self.name })
+ }
+}
diff --git a/src/api/src/error.rs b/src/api/src/error.rs
index 2320e199d70f..562ea5a81841 100644
--- a/src/api/src/error.rs
+++ b/src/api/src/error.rs
@@ -33,6 +33,28 @@ pub enum Error {
from: ConcreteDataType,
backtrace: Backtrace,
},
+
+ #[snafu(display(
+ "Failed to convert column default constraint, column: {}, source: {}",
+ column,
+ source
+ ))]
+ ConvertColumnDefaultConstraint {
+ column: String,
+ #[snafu(backtrace)]
+ source: datatypes::error::Error,
+ },
+
+ #[snafu(display(
+ "Invalid column default constraint, column: {}, source: {}",
+ column,
+ source
+ ))]
+ InvalidColumnDefaultConstraint {
+ column: String,
+ #[snafu(backtrace)]
+ source: datatypes::error::Error,
+ },
}
impl ErrorExt for Error {
@@ -40,6 +62,8 @@ impl ErrorExt for Error {
match self {
Error::UnknownColumnDataType { .. } => StatusCode::InvalidArguments,
Error::IntoColumnDataType { .. } => StatusCode::Unexpected,
+ Error::ConvertColumnDefaultConstraint { source, .. }
+ | Error::InvalidColumnDefaultConstraint { source, .. } => source.status_code(),
}
}
fn backtrace_opt(&self) -> Option<&Backtrace> {
diff --git a/src/api/src/lib.rs b/src/api/src/lib.rs
index d6c415d8cf9f..73aa6c436306 100644
--- a/src/api/src/lib.rs
+++ b/src/api/src/lib.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+mod column_def;
pub mod error;
pub mod helper;
pub mod prometheus;
diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs
index 89fc62b6b90e..ed6783c68f28 100644
--- a/src/catalog/src/local/manager.rs
+++ b/src/catalog/src/local/manager.rs
@@ -241,6 +241,7 @@ impl LocalCatalogManager {
schema_name: t.schema_name.clone(),
table_name: t.table_name.clone(),
table_id: t.table_id,
+ region_numbers: vec![0],
};
let option = self
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index 5c4ddd680eba..5369f6ce0d07 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -316,11 +316,15 @@ impl RemoteCatalogManager {
..
} = table_value;
+ // unwrap safety: checked in yielding this table when `iter_remote_tables`
+ let region_numbers = regions_id_map.get(&self.node_id).unwrap();
+
let request = OpenTableRequest {
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
table_id,
+ region_numbers: region_numbers.clone(),
};
match self
.engine
@@ -361,7 +365,7 @@ impl RemoteCatalogManager {
table_name: table_name.clone(),
desc: None,
schema: Arc::new(schema),
- region_numbers: regions_id_map.get(&self.node_id).unwrap().clone(), // this unwrap is safe because region_id_map is checked in `iter_remote_tables`
+ region_numbers: region_numbers.clone(),
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
table_options: meta.options.clone(),
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index 07084248f2d6..564acc7ba53b 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -87,6 +87,7 @@ impl SystemCatalogTable {
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
table_id: SYSTEM_CATALOG_TABLE_ID,
+ region_numbers: vec![0],
};
let schema = Arc::new(build_system_catalog_schema());
let ctx = EngineContext::default();
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 44d4d4c4f595..d386bfa64ee5 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -14,7 +14,7 @@
use clap::Parser;
use common_telemetry::logging;
-use datanode::datanode::{Datanode, DatanodeOptions};
+use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
use meta_client::MetaClientOpts;
use servers::Mode;
use snafu::ResultExt;
@@ -47,7 +47,7 @@ impl SubCommand {
}
}
-#[derive(Debug, Parser)]
+#[derive(Debug, Parser, Default)]
struct StartCommand {
#[clap(long)]
node_id: Option<u64>,
@@ -59,6 +59,10 @@ struct StartCommand {
metasrv_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
+ #[clap(long)]
+ data_dir: Option<String>,
+ #[clap(long)]
+ wal_dir: Option<String>,
}
impl StartCommand {
@@ -115,6 +119,14 @@ impl TryFrom<StartCommand> for DatanodeOptions {
}
.fail();
}
+
+ if let Some(data_dir) = cmd.data_dir {
+ opts.storage = ObjectStoreConfig::File { data_dir };
+ }
+
+ if let Some(wal_dir) = cmd.wal_dir {
+ opts.wal_dir = wal_dir;
+ }
Ok(opts)
}
}
@@ -131,14 +143,11 @@ mod tests {
#[test]
fn test_read_from_config_file() {
let cmd = StartCommand {
- node_id: None,
- rpc_addr: None,
- mysql_addr: None,
- metasrv_addr: None,
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
+ ..Default::default()
};
let options: DatanodeOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
@@ -168,44 +177,30 @@ mod tests {
fn test_try_from_cmd() {
assert_eq!(
Mode::Standalone,
- DatanodeOptions::try_from(StartCommand {
- node_id: None,
- rpc_addr: None,
- mysql_addr: None,
- metasrv_addr: None,
- config_file: None
- })
- .unwrap()
- .mode
+ DatanodeOptions::try_from(StartCommand::default())
+ .unwrap()
+ .mode
);
let mode = DatanodeOptions::try_from(StartCommand {
node_id: Some(42),
- rpc_addr: None,
- mysql_addr: None,
metasrv_addr: Some("127.0.0.1:3002".to_string()),
- config_file: None,
+ ..Default::default()
})
.unwrap()
.mode;
assert_matches!(mode, Mode::Distributed);
assert!(DatanodeOptions::try_from(StartCommand {
- node_id: None,
- rpc_addr: None,
- mysql_addr: None,
metasrv_addr: Some("127.0.0.1:3002".to_string()),
- config_file: None,
+ ..Default::default()
})
.is_err());
// Providing node_id but leave metasrv_addr absent is ok since metasrv_addr has default value
DatanodeOptions::try_from(StartCommand {
node_id: Some(42),
- rpc_addr: None,
- mysql_addr: None,
- metasrv_addr: None,
- config_file: None,
+ ..Default::default()
})
.unwrap();
}
@@ -213,14 +208,11 @@ mod tests {
#[test]
fn test_merge_config() {
let dn_opts = DatanodeOptions::try_from(StartCommand {
- node_id: None,
- rpc_addr: None,
- mysql_addr: None,
- metasrv_addr: None,
config_file: Some(format!(
"{}/../../config/datanode.example.toml",
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
+ ..Default::default()
})
.unwrap();
assert_eq!(Some(42), dn_opts.node_id);
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 5acb41590082..a6ecd963a47b 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -208,10 +208,15 @@ pub enum Error {
source: api::error::Error,
},
- #[snafu(display("Invalid column default constraint, source: {}", source))]
- ColumnDefaultConstraint {
+ #[snafu(display(
+ "Invalid column proto definition, column: {}, source: {}",
+ column,
+ source
+ ))]
+ InvalidColumnDef {
+ column: String,
#[snafu(backtrace)]
- source: datatypes::error::Error,
+ source: api::error::Error,
},
#[snafu(display("Failed to parse SQL, source: {}", source))]
@@ -311,8 +316,7 @@ impl ErrorExt for Error {
source.status_code()
}
- Error::ColumnDefaultConstraint { source, .. }
- | Error::CreateSchema { source, .. }
+ Error::CreateSchema { source, .. }
| Error::ConvertSchema { source, .. }
| Error::VectorComputation { source } => source.status_code(),
@@ -337,9 +341,12 @@ impl ErrorExt for Error {
| Error::RegisterSchema { .. }
| Error::IntoPhysicalPlan { .. }
| Error::UnsupportedExpr { .. }
- | Error::ColumnDataType { .. }
| Error::Catalog { .. } => StatusCode::Internal,
+ Error::ColumnDataType { source } | Error::InvalidColumnDef { source, .. } => {
+ source.status_code()
+ }
+
Error::InitBackend { .. } => StatusCode::StorageUnavailable,
Error::OpenLogStore { source } => source.status_code(),
Error::StartScriptManager { source } => source.status_code(),
diff --git a/src/datanode/src/server/grpc/ddl.rs b/src/datanode/src/server/grpc/ddl.rs
index 1bb2c5d7f968..7a3980c6f6fb 100644
--- a/src/datanode/src/server/grpc/ddl.rs
+++ b/src/datanode/src/server/grpc/ddl.rs
@@ -14,23 +14,20 @@
use std::sync::Arc;
-use api::helper::ColumnDataTypeWrapper;
use api::result::AdminResultBuilder;
use api::v1::alter_expr::Kind;
-use api::v1::{AdminResult, AlterExpr, ColumnDef, CreateExpr, DropColumns};
+use api::v1::{AdminResult, AlterExpr, CreateExpr, DropColumns};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::{ErrorExt, StatusCode};
use common_query::Output;
use common_telemetry::{error, info};
-use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, SchemaBuilder, SchemaRef};
+use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
use futures::TryFutureExt;
use snafu::prelude::*;
use table::metadata::TableId;
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest};
-use crate::error::{
- self, BumpTableIdSnafu, ColumnDefaultConstraintSnafu, MissingFieldSnafu, Result,
-};
+use crate::error::{self, BumpTableIdSnafu, MissingFieldSnafu, Result};
use crate::instance::Instance;
use crate::sql::SqlRequest;
@@ -172,7 +169,12 @@ fn alter_expr_to_request(expr: AlterExpr) -> Result<Option<AlterTableRequest>> {
field: "column_def",
})?;
- let schema = create_column_schema(&column_def)?;
+ let schema =
+ column_def
+ .try_as_column_schema()
+ .context(error::InvalidColumnDefSnafu {
+ column: &column_def.name,
+ })?;
add_column_requests.push(AddColumnRequest {
column_schema: schema,
is_key: add_column_expr.is_key,
@@ -212,7 +214,10 @@ fn create_table_schema(expr: &CreateExpr) -> Result<SchemaRef> {
let column_schemas = expr
.column_defs
.iter()
- .map(create_column_schema)
+ .map(|x| {
+ x.try_as_column_schema()
+ .context(error::InvalidColumnDefSnafu { column: &x.name })
+ })
.collect::<Result<Vec<ColumnSchema>>>()?;
ensure!(
@@ -243,28 +248,12 @@ fn create_table_schema(expr: &CreateExpr) -> Result<SchemaRef> {
))
}
-fn create_column_schema(column_def: &ColumnDef) -> Result<ColumnSchema> {
- let data_type =
- ColumnDataTypeWrapper::try_new(column_def.datatype).context(error::ColumnDataTypeSnafu)?;
- let default_constraint = match &column_def.default_constraint {
- None => None,
- Some(v) => {
- Some(ColumnDefaultConstraint::try_from(&v[..]).context(ColumnDefaultConstraintSnafu)?)
- }
- };
- ColumnSchema::new(
- column_def.name.clone(),
- data_type.into(),
- column_def.is_nullable,
- )
- .with_default_constraint(default_constraint)
- .context(ColumnDefaultConstraintSnafu)
-}
-
#[cfg(test)]
mod tests {
+ use api::v1::ColumnDef;
use common_catalog::consts::MIN_USER_TABLE_ID;
use datatypes::prelude::ConcreteDataType;
+ use datatypes::schema::ColumnDefaultConstraint;
use datatypes::value::Value;
use super::*;
@@ -321,12 +310,11 @@ mod tests {
is_nullable: true,
default_constraint: None,
};
- let result = create_column_schema(&column_def);
- assert!(result.is_err());
- assert_eq!(
- result.unwrap_err().to_string(),
- "Column datatype error, source: Unknown proto column datatype: 1024"
- );
+ let result = column_def.try_as_column_schema();
+ assert!(matches!(
+ result.unwrap_err(),
+ api::error::Error::UnknownColumnDataType { .. }
+ ));
let column_def = ColumnDef {
name: "a".to_string(),
@@ -334,7 +322,7 @@ mod tests {
is_nullable: true,
default_constraint: None,
};
- let column_schema = create_column_schema(&column_def).unwrap();
+ let column_schema = column_def.try_as_column_schema().unwrap();
assert_eq!(column_schema.name, "a");
assert_eq!(column_schema.data_type, ConcreteDataType::string_datatype());
assert!(column_schema.is_nullable());
@@ -346,7 +334,7 @@ mod tests {
is_nullable: true,
default_constraint: Some(default_constraint.clone().try_into().unwrap()),
};
- let column_schema = create_column_schema(&column_def).unwrap();
+ let column_schema = column_def.try_as_column_schema().unwrap();
assert_eq!(column_schema.name, "a");
assert_eq!(column_schema.data_type, ConcreteDataType::string_datatype());
assert!(column_schema.is_nullable());
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index e3a268a8de27..9a23a2320a7b 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -65,6 +65,17 @@ pub enum Error {
source: api::error::Error,
},
+ #[snafu(display(
+ "Invalid column proto definition, column: {}, source: {}",
+ column,
+ source
+ ))]
+ InvalidColumnDef {
+ column: String,
+ #[snafu(backtrace)]
+ source: api::error::Error,
+ },
+
#[snafu(display(
"Failed to convert column default constraint, column: {}, source: {}",
column_name,
@@ -452,8 +463,11 @@ impl ErrorExt for Error {
| Error::RequestDatanode { source }
| Error::InvalidAdminResult { source } => source.status_code(),
- Error::ColumnDataType { .. }
- | Error::FindDatanode { .. }
+ Error::ColumnDataType { source } | Error::InvalidColumnDef { source, .. } => {
+ source.status_code()
+ }
+
+ Error::FindDatanode { .. }
| Error::GetCache { .. }
| Error::FindTableRoutes { .. }
| Error::SerializeJson { .. }
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index 689b957c96e8..f3b133096fd0 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -24,7 +24,7 @@ use common_catalog::{SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue};
use common_query::Output;
use common_telemetry::{debug, info};
use datatypes::prelude::ConcreteDataType;
-use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, RawSchema};
+use datatypes::schema::RawSchema;
use meta_client::client::MetaClient;
use meta_client::rpc::{
CreateRequest as MetaCreateRequest, Partition as MetaPartition, PutRequest, RouteResponse,
@@ -42,8 +42,8 @@ use table::metadata::{RawTableInfo, RawTableMeta, TableIdent, TableType};
use crate::catalog::FrontendCatalogManager;
use crate::datanode::DatanodeClients;
use crate::error::{
- self, CatalogEntrySerdeSnafu, ColumnDataTypeSnafu, ConvertColumnDefaultConstraintSnafu,
- PrimaryKeyNotFoundSnafu, RequestMetaSnafu, Result, StartMetaClientSnafu,
+ self, CatalogEntrySerdeSnafu, ColumnDataTypeSnafu, PrimaryKeyNotFoundSnafu, RequestMetaSnafu,
+ Result, StartMetaClientSnafu,
};
use crate::partitioning::{PartitionBound, PartitionDef};
@@ -229,17 +229,40 @@ fn create_table_global_value(
let node_id = region_routes[0]
.leader_peer
.as_ref()
- .context(error::FindLeaderPeerSnafu {
+ .with_context(|| error::FindLeaderPeerSnafu {
region: region_routes[0].region.id,
table_name: table_name.to_string(),
})?
.id;
+ let mut regions_id_map = HashMap::new();
+ for route in region_routes.iter() {
+ let node_id = route
+ .leader_peer
+ .as_ref()
+ .with_context(|| error::FindLeaderPeerSnafu {
+ region: route.region.id,
+ table_name: table_name.to_string(),
+ })?
+ .id;
+ regions_id_map
+ .entry(node_id)
+ .or_insert_with(Vec::new)
+ .push(route.region.id as u32);
+ }
+
let mut column_schemas = Vec::with_capacity(create_table.column_defs.len());
let mut column_name_to_index_map = HashMap::new();
for (idx, column) in create_table.column_defs.iter().enumerate() {
- column_schemas.push(create_column_schema(column)?);
+ let schema = column
+ .try_as_column_schema()
+ .context(error::InvalidColumnDefSnafu {
+ column: &column.name,
+ })?;
+ let schema = schema.with_time_index(column.name == create_table.time_index);
+
+ column_schemas.push(schema);
column_name_to_index_map.insert(column.name.clone(), idx);
}
@@ -291,34 +314,11 @@ fn create_table_global_value(
Ok(TableGlobalValue {
node_id,
- regions_id_map: HashMap::new(),
+ regions_id_map,
table_info,
})
}
-// Remove this duplication in the future
-fn create_column_schema(column_def: &api::v1::ColumnDef) -> Result<ColumnSchema> {
- let data_type =
- ColumnDataTypeWrapper::try_new(column_def.datatype).context(error::ColumnDataTypeSnafu)?;
- let default_constraint = match &column_def.default_constraint {
- None => None,
- Some(v) => Some(ColumnDefaultConstraint::try_from(&v[..]).context(
- ConvertColumnDefaultConstraintSnafu {
- column_name: &column_def.name,
- },
- )?),
- };
- ColumnSchema::new(
- column_def.name.clone(),
- data_type.into(),
- column_def.is_nullable,
- )
- .with_default_constraint(default_constraint)
- .context(ConvertColumnDefaultConstraintSnafu {
- column_name: &column_def.name,
- })
-}
-
fn parse_partitions(
create_table: &CreateExpr,
partitions: Option<Partitions>,
diff --git a/src/mito/src/engine.rs b/src/mito/src/engine.rs
index 7fbc6479fa69..627ef5bdf49e 100644
--- a/src/mito/src/engine.rs
+++ b/src/mito/src/engine.rs
@@ -404,7 +404,8 @@ impl<S: StorageEngine> MitoEngineInner<S> {
let table_id = request.table_id;
// TODO(dennis): supports multi regions;
- let region_number = 0;
+ assert_eq!(request.region_numbers.len(), 1);
+ let region_number = request.region_numbers[0];
let region_name = region_name(table_id, region_number);
let region = match self
@@ -804,6 +805,7 @@ mod tests {
table_name: test_util::TABLE_NAME.to_string(),
// the test table id is 1
table_id: 1,
+ region_numbers: vec![0],
};
let (engine, table, object_store, _dir) = {
diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs
index e8841f95f15b..cee0c2727aef 100644
--- a/src/query/src/optimizer.rs
+++ b/src/query/src/optimizer.rs
@@ -15,7 +15,6 @@
use std::str::FromStr;
use std::sync::Arc;
-use common_telemetry::debug;
use common_time::timestamp::{TimeUnit, Timestamp};
use datafusion::execution::context::ExecutionProps;
use datafusion::logical_plan::plan::Filter;
@@ -169,10 +168,6 @@ impl<'a> TypeConverter<'a> {
match (left, right) {
(Expr::Column(col), Expr::Literal(value)) => {
let casted_right = Self::cast_scalar_value(value, left_type)?;
- debug!(
- "Converting type, origin_left:{:?}, type:{:?}, right:{:?}, casted_right:{:?}",
- col, left_type, value, casted_right
- );
if casted_right.is_null() {
return Err(DataFusionError::Plan(format!(
"column:{:?} value:{:?} is invalid",
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index baf8845bd714..5a74c223fad2 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -29,7 +29,7 @@ metrics = "0.20"
num_cpus = "1.13"
once_cell = "1.16"
openmetrics-parser = "0.4"
-opensrv-mysql = "0.1"
+opensrv-mysql = "0.2"
pgwire = "0.5"
prost = "0.11"
regex = "1.6"
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index ce77d952cc29..c1614377a79a 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -12,15 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::io;
use std::sync::Arc;
+use std::time::Instant;
use async_trait::async_trait;
-use common_telemetry::error;
+use common_telemetry::{debug, error};
use opensrv_mysql::{
AsyncMysqlShim, ErrorKind, ParamParser, QueryResultWriter, StatementMetaWriter,
};
use rand::RngCore;
+use tokio::io::AsyncWrite;
use tokio::sync::RwLock;
use crate::context::AuthHashMethod::DoubleSha1;
@@ -63,7 +64,7 @@ impl MysqlInstanceShim {
}
#[async_trait]
-impl<W: io::Write + Send + Sync> AsyncMysqlShim<W> for MysqlInstanceShim {
+impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShim {
type Error = error::Error;
fn salt(&self) -> [u8; 20] {
@@ -108,15 +109,12 @@ impl<W: io::Write + Send + Sync> AsyncMysqlShim<W> for MysqlInstanceShim {
};
}
- async fn on_prepare<'a>(
- &'a mut self,
- _: &'a str,
- writer: StatementMetaWriter<'a, W>,
- ) -> Result<()> {
- writer.error(
+ async fn on_prepare<'a>(&'a mut self, _: &'a str, w: StatementMetaWriter<'a, W>) -> Result<()> {
+ w.error(
ErrorKind::ER_UNKNOWN_ERROR,
- "prepare statement is not supported yet".as_bytes(),
- )?;
+ b"prepare statement is not supported yet",
+ )
+ .await?;
Ok(())
}
@@ -124,12 +122,13 @@ impl<W: io::Write + Send + Sync> AsyncMysqlShim<W> for MysqlInstanceShim {
&'a mut self,
_: u32,
_: ParamParser<'a>,
- writer: QueryResultWriter<'a, W>,
+ w: QueryResultWriter<'a, W>,
) -> Result<()> {
- writer.error(
+ w.error(
ErrorKind::ER_UNKNOWN_ERROR,
- "prepare statement is not supported yet".as_bytes(),
- )?;
+ b"prepare statement is not supported yet",
+ )
+ .await?;
Ok(())
}
@@ -145,6 +144,9 @@ impl<W: io::Write + Send + Sync> AsyncMysqlShim<W> for MysqlInstanceShim {
query: &'a str,
writer: QueryResultWriter<'a, W>,
) -> Result<()> {
+ debug!("Start executing query: '{}'", query);
+ let start = Instant::now();
+
// TODO(LFC): Find a better way:
// `check` uses regex to filter out unsupported statements emitted by MySQL's federated
// components, this is quick and dirty, there must be a better way to do it.
@@ -154,7 +156,13 @@ impl<W: io::Write + Send + Sync> AsyncMysqlShim<W> for MysqlInstanceShim {
self.query_handler.do_query(query).await
};
+ debug!(
+ "Finished executing query: '{}', total time costs in microseconds: {}",
+ query,
+ start.elapsed().as_micros()
+ );
+
let mut writer = MysqlResultWriter::new(writer);
- writer.write(output).await
+ writer.write(query, output).await
}
}
diff --git a/src/servers/src/mysql/server.rs b/src/servers/src/mysql/server.rs
index e8064d20f1fc..f66669303c32 100644
--- a/src/servers/src/mysql/server.rs
+++ b/src/servers/src/mysql/server.rs
@@ -22,6 +22,7 @@ use common_telemetry::logging::{error, info};
use futures::StreamExt;
use opensrv_mysql::AsyncMysqlIntermediary;
use tokio;
+use tokio::io::BufWriter;
use tokio::net::TcpStream;
use crate::error::Result;
@@ -29,6 +30,9 @@ use crate::mysql::handler::MysqlInstanceShim;
use crate::query_handler::SqlQueryHandlerRef;
use crate::server::{AbortableStream, BaseTcpServer, Server};
+// Default size of ResultSet write buffer: 100KB
+const DEFAULT_RESULT_SET_WRITE_BUFFER_SIZE: usize = 100 * 1024;
+
pub struct MysqlServer {
base_server: BaseTcpServer,
query_handler: SqlQueryHandlerRef,
@@ -58,7 +62,8 @@ impl MysqlServer {
match tcp_stream {
Err(error) => error!("Broken pipe: {}", error), // IoError doesn't impl ErrorExt.
Ok(io_stream) => {
- if let Err(error) = Self::handle(io_stream, io_runtime, query_handler) {
+ if let Err(error) = Self::handle(io_stream, io_runtime, query_handler).await
+ {
error!(error; "Unexpected error when handling TcpStream");
};
}
@@ -67,15 +72,30 @@ impl MysqlServer {
})
}
- pub fn handle(
+ async fn handle(
stream: TcpStream,
io_runtime: Arc<Runtime>,
query_handler: SqlQueryHandlerRef,
) -> Result<()> {
info!("MySQL connection coming from: {}", stream.peer_addr()?);
let shim = MysqlInstanceShim::create(query_handler, stream.peer_addr()?.to_string());
- // TODO(LFC): Relate "handler" with MySQL session; also deal with panics there.
- let _handler = io_runtime.spawn(AsyncMysqlIntermediary::run_on(shim, stream));
+
+ let (r, w) = stream.into_split();
+ let w = BufWriter::with_capacity(DEFAULT_RESULT_SET_WRITE_BUFFER_SIZE, w);
+ // TODO(LFC): Use `output_stream` to write large MySQL ResultSet to client.
+ let spawn_result = io_runtime
+ .spawn(AsyncMysqlIntermediary::run_on(shim, r, w))
+ .await;
+ match spawn_result {
+ Ok(run_result) => {
+ if let Err(e) = run_result {
+ // TODO(LFC): Write this error and the below one to client as well, in MySQL text protocol.
+ // Looks like we have to expose opensrv-mysql's `PacketWriter`?
+ error!(e; "Internal error occurred during query exec, server actively close the channel to let client try next time.")
+ }
+ }
+ Err(e) => error!("IO runtime cannot execute task, error: {}", e),
+ }
Ok(())
}
}
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index 66e4278687d6..5a0373fbc390 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::io;
use std::ops::Deref;
use common_query::Output;
use common_recordbatch::{util, RecordBatch};
+use common_telemetry::error;
use common_time::datetime::DateTime;
use common_time::timestamp::TimeUnit;
use datatypes::prelude::{ConcreteDataType, Value};
@@ -25,6 +25,7 @@ use opensrv_mysql::{
Column, ColumnFlags, ColumnType, ErrorKind, OkResponse, QueryResultWriter, RowWriter,
};
use snafu::prelude::*;
+use tokio::io::AsyncWrite;
use crate::error::{self, Error, Result};
@@ -33,18 +34,18 @@ struct QueryResult {
schema: SchemaRef,
}
-pub struct MysqlResultWriter<'a, W: io::Write> {
+pub struct MysqlResultWriter<'a, W: AsyncWrite + Unpin> {
// `QueryResultWriter` will be consumed when the write completed (see
// QueryResultWriter::completed), thus we use an option to wrap it.
inner: Option<QueryResultWriter<'a, W>>,
}
-impl<'a, W: io::Write> MysqlResultWriter<'a, W> {
+impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
pub fn new(inner: QueryResultWriter<'a, W>) -> MysqlResultWriter<'a, W> {
MysqlResultWriter::<'a, W> { inner: Some(inner) }
}
- pub async fn write(&mut self, output: Result<Output>) -> Result<()> {
+ pub async fn write(&mut self, query: &str, output: Result<Output>) -> Result<()> {
let writer = self.inner.take().context(error::InternalSnafu {
err_msg: "inner MySQL writer is consumed",
})?;
@@ -59,48 +60,53 @@ impl<'a, W: io::Write> MysqlResultWriter<'a, W> {
recordbatches,
schema,
};
- Self::write_query_result(query_result, writer)?
+ Self::write_query_result(query, query_result, writer).await?
}
Output::RecordBatches(recordbatches) => {
let query_result = QueryResult {
schema: recordbatches.schema(),
recordbatches: recordbatches.take(),
};
- Self::write_query_result(query_result, writer)?
+ Self::write_query_result(query, query_result, writer).await?
}
- Output::AffectedRows(rows) => Self::write_affected_rows(writer, rows)?,
+ Output::AffectedRows(rows) => Self::write_affected_rows(writer, rows).await?,
},
- Err(error) => Self::write_query_error(error, writer)?,
+ Err(error) => Self::write_query_error(query, error, writer).await?,
}
Ok(())
}
- fn write_affected_rows(writer: QueryResultWriter<W>, rows: usize) -> Result<()> {
- writer.completed(OkResponse {
+ async fn write_affected_rows(w: QueryResultWriter<'a, W>, rows: usize) -> Result<()> {
+ w.completed(OkResponse {
affected_rows: rows as u64,
..Default::default()
- })?;
+ })
+ .await?;
Ok(())
}
- fn write_query_result(
+ async fn write_query_result(
+ query: &str,
query_result: QueryResult,
writer: QueryResultWriter<'a, W>,
) -> Result<()> {
match create_mysql_column_def(&query_result.schema) {
Ok(column_def) => {
- let mut row_writer = writer.start(&column_def)?;
+ let mut row_writer = writer.start(&column_def).await?;
for recordbatch in &query_result.recordbatches {
- Self::write_recordbatch(&mut row_writer, recordbatch)?;
+ Self::write_recordbatch(&mut row_writer, recordbatch).await?;
}
- row_writer.finish()?;
+ row_writer.finish().await?;
Ok(())
}
- Err(error) => Self::write_query_error(error, writer),
+ Err(error) => Self::write_query_error(query, error, writer).await,
}
}
- fn write_recordbatch(row_writer: &mut RowWriter<W>, recordbatch: &RecordBatch) -> Result<()> {
+ async fn write_recordbatch(
+ row_writer: &mut RowWriter<'_, W>,
+ recordbatch: &RecordBatch,
+ ) -> Result<()> {
for row in recordbatch.rows() {
let row = row.context(error::CollectRecordbatchSnafu)?;
for value in row.into_iter() {
@@ -133,13 +139,20 @@ impl<'a, W: io::Write> MysqlResultWriter<'a, W> {
}
}
}
- row_writer.end_row()?;
+ row_writer.end_row().await?;
}
Ok(())
}
- fn write_query_error(error: Error, writer: QueryResultWriter<'a, W>) -> Result<()> {
- writer.error(ErrorKind::ER_INTERNAL_ERROR, error.to_string().as_bytes())?;
+ async fn write_query_error(
+ query: &str,
+ error: Error,
+ w: QueryResultWriter<'a, W>,
+ ) -> Result<()> {
+ error!(error; "Failed to execute query '{}'", query);
+
+ let kind = ErrorKind::ER_INTERNAL_ERROR;
+ w.error(kind, error.to_string().as_bytes()).await?;
Ok(())
}
}
diff --git a/src/servers/tests/mysql/mysql_server_test.rs b/src/servers/tests/mysql/mysql_server_test.rs
index ce351fce6b2b..ba82e8d68f8a 100644
--- a/src/servers/tests/mysql/mysql_server_test.rs
+++ b/src/servers/tests/mysql/mysql_server_test.rs
@@ -184,6 +184,7 @@ async fn test_query_concurrently() -> Result<()> {
let should_recreate_conn = expected == 1;
if should_recreate_conn {
+ connection.disconnect().await.unwrap();
connection = create_connection(server_port, index % 2 == 0)
.await
.unwrap();
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index a921cf3bd2e7..bc0b1a8e348c 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -17,6 +17,7 @@ use std::collections::HashMap;
use datatypes::prelude::VectorRef;
use datatypes::schema::{ColumnSchema, SchemaRef};
+use store_api::storage::RegionNumber;
use crate::metadata::TableId;
@@ -56,6 +57,7 @@ pub struct OpenTableRequest {
pub schema_name: String,
pub table_name: String,
pub table_id: TableId,
+ pub region_numbers: Vec<RegionNumber>,
}
/// Alter table request
|
fix
|
correctly open table when distributed datanode restart (#576)
|
cf561df8545931b699de920c1f9a630f39f3f1a1
|
2023-12-25 18:17:22
|
SSebo
|
feat: export runtime metric to promethues (#2985)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index e343909ce95c..7369da7ba47d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1936,6 +1936,8 @@ dependencies = [
"prometheus",
"snafu",
"tokio",
+ "tokio-metrics",
+ "tokio-metrics-collector",
"tokio-test",
"tokio-util",
]
@@ -9696,6 +9698,31 @@ dependencies = [
"syn 2.0.42",
]
+[[package]]
+name = "tokio-metrics"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eace09241d62c98b7eeb1107d4c5c64ca3bd7da92e8c218c153ab3a78f9be112"
+dependencies = [
+ "futures-util",
+ "pin-project-lite",
+ "tokio",
+ "tokio-stream",
+]
+
+[[package]]
+name = "tokio-metrics-collector"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d767da47381602cc481653456823b3ebb600e83d5dd4e0293da9b5566c6c00f0"
+dependencies = [
+ "lazy_static",
+ "parking_lot 0.12.1",
+ "prometheus",
+ "tokio",
+ "tokio-metrics",
+]
+
[[package]]
name = "tokio-postgres"
version = "0.7.10"
diff --git a/src/common/runtime/Cargo.toml b/src/common/runtime/Cargo.toml
index a4d1460f349a..629da4f4e0d4 100644
--- a/src/common/runtime/Cargo.toml
+++ b/src/common/runtime/Cargo.toml
@@ -14,6 +14,8 @@ once_cell.workspace = true
paste.workspace = true
prometheus.workspace = true
snafu.workspace = true
+tokio-metrics = "0.3"
+tokio-metrics-collector = "0.2"
tokio-util.workspace = true
tokio.workspace = true
diff --git a/src/common/runtime/src/runtime.rs b/src/common/runtime/src/runtime.rs
index 6a776af25465..0ea041578e10 100644
--- a/src/common/runtime/src/runtime.rs
+++ b/src/common/runtime/src/runtime.rs
@@ -152,6 +152,7 @@ impl Builder {
.build()
.context(BuildRuntimeSnafu)?;
+ let name = self.runtime_name.clone();
let handle = runtime.handle().clone();
let (send_stop, recv_stop) = oneshot::channel();
// Block the runtime to shutdown.
@@ -159,8 +160,11 @@ impl Builder {
.name(format!("{}-blocker", self.thread_name))
.spawn(move || runtime.block_on(recv_stop));
+ #[cfg(tokio_unstable)]
+ register_collector(name.clone(), &handle);
+
Ok(Runtime {
- name: self.runtime_name.clone(),
+ name,
handle,
_dropper: Arc::new(Dropper {
close: Some(send_stop),
@@ -169,6 +173,14 @@ impl Builder {
}
}
+#[cfg(tokio_unstable)]
+pub fn register_collector(name: String, handle: &Handle) {
+ let name = name.replace("-", "_");
+ let monitor = tokio_metrics::RuntimeMonitor::new(handle);
+ let collector = tokio_metrics_collector::RuntimeCollector::new(monitor, name);
+ let _ = prometheus::register(Box::new(collector));
+}
+
fn on_thread_start(thread_name: String) -> impl Fn() + 'static {
move || {
METRIC_RUNTIME_THREADS_ALIVE
@@ -241,6 +253,13 @@ mod tests {
assert!(metric_text.contains("runtime_threads_idle{thread_name=\"test_runtime_metric\"}"));
assert!(metric_text.contains("runtime_threads_alive{thread_name=\"test_runtime_metric\"}"));
+
+ #[cfg(tokio_unstable)]
+ {
+ assert!(metric_text.contains("runtime_0_tokio_budget_forced_yield_count 0"));
+ assert!(metric_text.contains("runtime_0_tokio_injection_queue_depth 0"));
+ assert!(metric_text.contains("runtime_0_tokio_workers_count 5"));
+ }
}
#[test]
|
feat
|
export runtime metric to promethues (#2985)
|
0dd11f53f55ff1b342fda40d20a5125f568b244a
|
2024-10-09 12:41:57
|
Kaifeng Zheng
|
feat: add json format output for http interface (#4797)
| false
|
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index a2b72b548b1e..953ff9e73ae7 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -59,6 +59,7 @@ use crate::http::error_result::ErrorResponse;
use crate::http::greptime_result_v1::GreptimedbV1Response;
use crate::http::influxdb::{influxdb_health, influxdb_ping, influxdb_write_v1, influxdb_write_v2};
use crate::http::influxdb_result_v1::InfluxdbV1Response;
+use crate::http::json_result::JsonResponse;
use crate::http::prometheus::{
build_info_query, format_query, instant_query, label_values_query, labels_query, range_query,
series_query,
@@ -97,6 +98,7 @@ pub mod error_result;
pub mod greptime_manage_resp;
pub mod greptime_result_v1;
pub mod influxdb_result_v1;
+pub mod json_result;
pub mod table_result;
#[cfg(any(test, feature = "testing"))]
@@ -279,6 +281,7 @@ pub enum ResponseFormat {
#[default]
GreptimedbV1,
InfluxdbV1,
+ Json,
}
impl ResponseFormat {
@@ -289,6 +292,7 @@ impl ResponseFormat {
"table" => Some(ResponseFormat::Table),
"greptimedb_v1" => Some(ResponseFormat::GreptimedbV1),
"influxdb_v1" => Some(ResponseFormat::InfluxdbV1),
+ "json" => Some(ResponseFormat::Json),
_ => None,
}
}
@@ -300,6 +304,7 @@ impl ResponseFormat {
ResponseFormat::Table => "table",
ResponseFormat::GreptimedbV1 => "greptimedb_v1",
ResponseFormat::InfluxdbV1 => "influxdb_v1",
+ ResponseFormat::Json => "json",
}
}
}
@@ -356,6 +361,7 @@ pub enum HttpResponse {
Error(ErrorResponse),
GreptimedbV1(GreptimedbV1Response),
InfluxdbV1(InfluxdbV1Response),
+ Json(JsonResponse),
}
impl HttpResponse {
@@ -366,6 +372,7 @@ impl HttpResponse {
HttpResponse::Table(resp) => resp.with_execution_time(execution_time).into(),
HttpResponse::GreptimedbV1(resp) => resp.with_execution_time(execution_time).into(),
HttpResponse::InfluxdbV1(resp) => resp.with_execution_time(execution_time).into(),
+ HttpResponse::Json(resp) => resp.with_execution_time(execution_time).into(),
HttpResponse::Error(resp) => resp.with_execution_time(execution_time).into(),
}
}
@@ -375,6 +382,7 @@ impl HttpResponse {
HttpResponse::Csv(resp) => resp.with_limit(limit).into(),
HttpResponse::Table(resp) => resp.with_limit(limit).into(),
HttpResponse::GreptimedbV1(resp) => resp.with_limit(limit).into(),
+ HttpResponse::Json(resp) => resp.with_limit(limit).into(),
_ => self,
}
}
@@ -407,6 +415,7 @@ impl IntoResponse for HttpResponse {
HttpResponse::Table(resp) => resp.into_response(),
HttpResponse::GreptimedbV1(resp) => resp.into_response(),
HttpResponse::InfluxdbV1(resp) => resp.into_response(),
+ HttpResponse::Json(resp) => resp.into_response(),
HttpResponse::Error(resp) => resp.into_response(),
}
}
@@ -452,6 +461,12 @@ impl From<InfluxdbV1Response> for HttpResponse {
}
}
+impl From<JsonResponse> for HttpResponse {
+ fn from(value: JsonResponse) -> Self {
+ HttpResponse::Json(value)
+ }
+}
+
async fn serve_api(Extension(api): Extension<OpenApi>) -> impl IntoApiResponse {
Json(api)
}
@@ -1131,6 +1146,7 @@ mod test {
ResponseFormat::Csv,
ResponseFormat::Table,
ResponseFormat::Arrow,
+ ResponseFormat::Json,
] {
let recordbatches =
RecordBatches::try_new(schema.clone(), vec![recordbatch.clone()]).unwrap();
@@ -1141,6 +1157,7 @@ mod test {
ResponseFormat::Table => TableResponse::from_output(outputs).await,
ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, None).await,
+ ResponseFormat::Json => JsonResponse::from_output(outputs).await,
};
match json_resp {
@@ -1210,6 +1227,21 @@ mod test {
assert_eq!(rb.num_columns(), 2);
assert_eq!(rb.num_rows(), 4);
}
+
+ HttpResponse::Json(resp) => {
+ let output = &resp.output()[0];
+ if let GreptimeQueryOutput::Records(r) = output {
+ assert_eq!(r.num_rows(), 4);
+ assert_eq!(r.num_cols(), 2);
+ assert_eq!(r.schema.column_schemas[0].name, "numbers");
+ assert_eq!(r.schema.column_schemas[0].data_type, "UInt32");
+ assert_eq!(r.rows[0][0], serde_json::Value::from(1));
+ assert_eq!(r.rows[0][1], serde_json::Value::Null);
+ } else {
+ panic!("invalid output type");
+ }
+ }
+
HttpResponse::Error(err) => unreachable!("{err:?}"),
}
}
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index 1befc2224014..4925c79639ce 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -39,6 +39,7 @@ use crate::http::csv_result::CsvResponse;
use crate::http::error_result::ErrorResponse;
use crate::http::greptime_result_v1::GreptimedbV1Response;
use crate::http::influxdb_result_v1::InfluxdbV1Response;
+use crate::http::json_result::JsonResponse;
use crate::http::table_result::TableResponse;
use crate::http::{
ApiState, Epoch, GreptimeOptionsConfigState, GreptimeQueryOutput, HttpRecordsOutput,
@@ -138,6 +139,7 @@ pub async fn sql(
ResponseFormat::Table => TableResponse::from_output(outputs).await,
ResponseFormat::GreptimedbV1 => GreptimedbV1Response::from_output(outputs).await,
ResponseFormat::InfluxdbV1 => InfluxdbV1Response::from_output(outputs, epoch).await,
+ ResponseFormat::Json => JsonResponse::from_output(outputs).await,
};
if let Some(limit) = query_params.limit {
diff --git a/src/servers/src/http/json_result.rs b/src/servers/src/http/json_result.rs
new file mode 100644
index 000000000000..bf4e4d77704c
--- /dev/null
+++ b/src/servers/src/http/json_result.rs
@@ -0,0 +1,137 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use axum::http::{header, HeaderValue};
+use axum::response::{IntoResponse, Response};
+use common_error::status_code::StatusCode;
+use common_query::Output;
+use mime_guess::mime;
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+use serde_json::{json, Map, Value};
+
+use super::process_with_limit;
+use crate::http::error_result::ErrorResponse;
+use crate::http::header::{GREPTIME_DB_HEADER_EXECUTION_TIME, GREPTIME_DB_HEADER_FORMAT};
+use crate::http::{handler, GreptimeQueryOutput, HttpResponse, ResponseFormat};
+
+/// The json format here is different from the default json output of `GreptimedbV1` result.
+/// `JsonResponse` is intended to make it easier for user to consume data.
+#[derive(Serialize, Deserialize, Debug, JsonSchema)]
+pub struct JsonResponse {
+ output: Vec<GreptimeQueryOutput>,
+ execution_time_ms: u64,
+}
+
+impl JsonResponse {
+ pub async fn from_output(outputs: Vec<crate::error::Result<Output>>) -> HttpResponse {
+ match handler::from_output(outputs).await {
+ Err(err) => HttpResponse::Error(err),
+ Ok((output, _)) => {
+ if output.len() > 1 {
+ HttpResponse::Error(ErrorResponse::from_error_message(
+ StatusCode::InvalidArguments,
+ "cannot output multi-statements result in json format".to_string(),
+ ))
+ } else {
+ HttpResponse::Json(JsonResponse {
+ output,
+ execution_time_ms: 0,
+ })
+ }
+ }
+ }
+ }
+
+ pub fn output(&self) -> &[GreptimeQueryOutput] {
+ &self.output
+ }
+
+ pub fn with_execution_time(mut self, execution_time: u64) -> Self {
+ self.execution_time_ms = execution_time;
+ self
+ }
+
+ pub fn execution_time_ms(&self) -> u64 {
+ self.execution_time_ms
+ }
+
+ pub fn with_limit(mut self, limit: usize) -> Self {
+ self.output = process_with_limit(self.output, limit);
+ self
+ }
+}
+
+impl IntoResponse for JsonResponse {
+ fn into_response(mut self) -> Response {
+ debug_assert!(
+ self.output.len() <= 1,
+ "self.output has extra elements: {}",
+ self.output.len()
+ );
+
+ let execution_time = self.execution_time_ms;
+ let payload = match self.output.pop() {
+ None => String::default(),
+ Some(GreptimeQueryOutput::AffectedRows(n)) => json!({
+ "data": [],
+ "affected_rows": n,
+ "execution_time_ms": execution_time,
+ })
+ .to_string(),
+
+ Some(GreptimeQueryOutput::Records(records)) => {
+ let schema = records.schema();
+
+ let data: Vec<Map<String, Value>> = records
+ .rows
+ .iter()
+ .map(|row| {
+ schema
+ .column_schemas
+ .iter()
+ .enumerate()
+ .map(|(i, col)| (col.name.clone(), row[i].clone()))
+ .collect::<Map<String, Value>>()
+ })
+ .collect();
+
+ json!({
+ "data": data,
+ "execution_time_ms": execution_time,
+ })
+ .to_string()
+ }
+ };
+
+ (
+ [
+ (
+ header::CONTENT_TYPE,
+ HeaderValue::from_static(mime::APPLICATION_JSON.as_ref()),
+ ),
+ (
+ GREPTIME_DB_HEADER_FORMAT.clone(),
+ HeaderValue::from_static(ResponseFormat::Json.as_str()),
+ ),
+ (
+ GREPTIME_DB_HEADER_EXECUTION_TIME.clone(),
+ HeaderValue::from(execution_time),
+ ),
+ ],
+ payload,
+ )
+ .into_response()
+ }
+}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index f70df8176a10..7a030aad5696 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -182,6 +182,22 @@ pub async fn test_sql_api(store_type: StorageType) {
})).unwrap()
);
+ // test json result format
+ let res = client
+ .get("/v1/sql?format=json&sql=select * from numbers limit 10")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+
+ let body = res.json::<Value>().await;
+ let data = body.get("data").expect("Missing 'data' field in response");
+
+ let expected = json!([
+ {"number": 0}, {"number": 1}, {"number": 2}, {"number": 3}, {"number": 4},
+ {"number": 5}, {"number": 6}, {"number": 7}, {"number": 8}, {"number": 9}
+ ]);
+ assert_eq!(data, &expected);
+
// test insert and select
let res = client
.get("/v1/sql?sql=insert into demo values('host', 66.6, 1024, 0)")
@@ -1307,7 +1323,7 @@ transform:
.send()
.await;
assert_eq!(res.status(), StatusCode::OK);
- let body: serde_json::Value = res.json().await;
+ let body: Value = res.json().await;
let schema = &body["schema"];
let rows = &body["rows"];
assert_eq!(
|
feat
|
add json format output for http interface (#4797)
|
a14ec94653afd234b0ca7eedad1df71be3813462
|
2022-12-27 14:20:12
|
LFC
|
fix: ease the restriction of the original "SelectExpr" (#794)
| false
|
diff --git a/src/api/greptime/v1/database.proto b/src/api/greptime/v1/database.proto
index 358b01923f37..bd24087264c4 100644
--- a/src/api/greptime/v1/database.proto
+++ b/src/api/greptime/v1/database.proto
@@ -18,15 +18,12 @@ message ObjectExpr {
ExprHeader header = 1;
oneof expr {
InsertExpr insert = 2;
- SelectExpr select = 3;
- UpdateExpr update = 4;
- DeleteExpr delete = 5;
+ QueryRequest query = 3;
}
}
-// TODO(fys): Only support sql now, and will support promql etc in the future
-message SelectExpr {
- oneof expr {
+message QueryRequest {
+ oneof query {
string sql = 1;
bytes logical_plan = 2;
}
@@ -48,11 +45,6 @@ message InsertExpr {
uint32 region_number = 5;
}
-// TODO(jiachun)
-message UpdateExpr {}
-// TODO(jiachun)
-message DeleteExpr {}
-
message ObjectResult {
ResultHeader header = 1;
oneof result {
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index d1608442d8eb..14c1d5dc28f0 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -17,8 +17,8 @@ use std::sync::Arc;
use api::v1::codec::SelectResult as GrpcSelectResult;
use api::v1::column::SemanticType;
use api::v1::{
- object_expr, object_result, select_expr, DatabaseRequest, ExprHeader, InsertExpr,
- MutateResult as GrpcMutateResult, ObjectExpr, ObjectResult as GrpcObjectResult, SelectExpr,
+ object_expr, object_result, query_request, DatabaseRequest, ExprHeader, InsertExpr,
+ MutateResult as GrpcMutateResult, ObjectExpr, ObjectResult as GrpcObjectResult, QueryRequest,
};
use common_error::status_code::StatusCode;
use common_grpc::flight::{raw_flight_data_to_message, FlightMessage};
@@ -83,28 +83,28 @@ impl Database {
pub async fn select(&self, expr: Select) -> Result<ObjectResult> {
let select_expr = match expr {
- Select::Sql(sql) => SelectExpr {
- expr: Some(select_expr::Expr::Sql(sql)),
+ Select::Sql(sql) => QueryRequest {
+ query: Some(query_request::Query::Sql(sql)),
},
};
self.do_select(select_expr).await
}
pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<ObjectResult> {
- let select_expr = SelectExpr {
- expr: Some(select_expr::Expr::LogicalPlan(logical_plan)),
+ let select_expr = QueryRequest {
+ query: Some(query_request::Query::LogicalPlan(logical_plan)),
};
self.do_select(select_expr).await
}
- async fn do_select(&self, select_expr: SelectExpr) -> Result<ObjectResult> {
+ async fn do_select(&self, select_expr: QueryRequest) -> Result<ObjectResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
};
let expr = ObjectExpr {
header: Some(header),
- expr: Some(object_expr::Expr::Select(select_expr)),
+ expr: Some(object_expr::Expr::Query(select_expr)),
};
let obj_result = self.object(expr).await?;
diff --git a/src/datanode/src/instance/flight.rs b/src/datanode/src/instance/flight.rs
index 58e01a6d6d56..da22ffd2ac59 100644
--- a/src/datanode/src/instance/flight.rs
+++ b/src/datanode/src/instance/flight.rs
@@ -17,7 +17,7 @@ mod stream;
use std::pin::Pin;
use api::v1::object_expr::Expr;
-use api::v1::select_expr::Expr as SelectExpr;
+use api::v1::query_request::Query;
use api::v1::ObjectExpr;
use arrow_flight::flight_service_server::FlightService;
use arrow_flight::{
@@ -29,8 +29,7 @@ use common_query::Output;
use futures::Stream;
use prost::Message;
use session::context::QueryContext;
-use snafu::{ensure, OptionExt, ResultExt};
-use sql::statements::statement::Statement;
+use snafu::{OptionExt, ResultExt};
use tonic::{Request, Response, Streaming};
use crate::error::{self, Result};
@@ -81,18 +80,15 @@ impl FlightService for Instance {
.expr
.context(error::MissingRequiredFieldSnafu { name: "expr" })?;
match expr {
- Expr::Select(select_expr) => {
- let select_expr = select_expr
- .expr
+ Expr::Query(query_request) => {
+ let query = query_request
+ .query
.context(error::MissingRequiredFieldSnafu { name: "expr" })?;
- let stream = self.handle_select_expr(select_expr).await?;
+ let stream = self.handle_query(query).await?;
Ok(Response::new(Box::pin(stream) as TonicStream<FlightData>))
}
// TODO(LFC): Implement Insertion Flight interface.
Expr::Insert(_) => Err(tonic::Status::unimplemented("Not yet implemented")),
- Expr::Update(_) | Expr::Delete(_) => {
- Err(tonic::Status::unimplemented("Not yet implemented"))
- }
}
}
@@ -134,22 +130,16 @@ impl FlightService for Instance {
}
impl Instance {
- async fn handle_select_expr(&self, select_expr: SelectExpr) -> Result<GetStream> {
- let output = match select_expr {
- SelectExpr::Sql(sql) => {
+ async fn handle_query(&self, query: Query) -> Result<GetStream> {
+ let output = match query {
+ Query::Sql(sql) => {
let stmt = self
.query_engine
.sql_to_statement(&sql)
.context(error::ExecuteSqlSnafu)?;
- ensure!(
- matches!(stmt, Statement::Query(_)),
- error::InvalidSqlSnafu {
- msg: format!("expect SQL to be selection, actual: {sql}")
- }
- );
self.execute_stmt(stmt, QueryContext::arc()).await?
}
- SelectExpr::LogicalPlan(plan) => self.execute_logical(plan).await?,
+ Query::LogicalPlan(plan) => self.execute_logical(plan).await?,
};
let recordbatch_stream = match output {
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index 28534bc492d4..2a52a07e85e0 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -15,7 +15,7 @@
use api::result::{build_err_result, AdminResultBuilder, ObjectResultBuilder};
use api::v1::{
admin_expr, object_expr, AdminExpr, AdminResult, Column, CreateDatabaseExpr, ObjectExpr,
- ObjectResult, SelectExpr,
+ ObjectResult, QueryRequest,
};
use arrow_flight::flight_service_server::FlightService;
use arrow_flight::Ticket;
@@ -105,11 +105,11 @@ impl Instance {
}
}
- async fn handle_select(&self, select_expr: SelectExpr) -> Result<ObjectResult> {
+ async fn handle_query_request(&self, query_request: QueryRequest) -> Result<ObjectResult> {
let ticket = Request::new(Ticket {
ticket: ObjectExpr {
header: None,
- expr: Some(object_expr::Expr::Select(select_expr)),
+ expr: Some(object_expr::Expr::Query(query_request)),
}
.encode_to_vec(),
});
@@ -169,12 +169,12 @@ impl GrpcQueryHandler for Instance {
self.handle_insert(catalog_name, schema_name, table_name, insert_batches)
.await
}
- Some(object_expr::Expr::Select(select_expr)) => self
- .handle_select(select_expr.clone())
+ Some(object_expr::Expr::Query(query_request)) => self
+ .handle_query_request(query_request.clone())
.await
.map_err(BoxedError::new)
.context(servers::error::ExecuteQuerySnafu {
- query: format!("{select_expr:?}"),
+ query: format!("{query_request:?}"),
})?,
other => {
return servers::error::NotSupportedSnafu {
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 5ea5c35aa8c5..7b01890b76de 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -721,9 +721,9 @@ mod tests {
use api::v1::column::SemanticType;
use api::v1::{
- admin_expr, admin_result, column, object_expr, object_result, select_expr, Column,
+ admin_expr, admin_result, column, object_expr, object_result, query_request, Column,
ColumnDataType, ColumnDef as GrpcColumnDef, ExprHeader, FlightDataRaw, MutateResult,
- SelectExpr,
+ QueryRequest,
};
use common_grpc::flight::{raw_flight_data_to_message, FlightMessage};
use common_recordbatch::RecordBatch;
@@ -930,8 +930,8 @@ mod tests {
// select
let object_expr = ObjectExpr {
header: Some(ExprHeader::default()),
- expr: Some(object_expr::Expr::Select(SelectExpr {
- expr: Some(select_expr::Expr::Sql("select * from demo".to_string())),
+ expr: Some(Expr::Query(QueryRequest {
+ query: Some(query_request::Query::Sql("select * from demo".to_string())),
})),
};
let result = GrpcQueryHandler::do_query(&*instance, object_expr)
|
fix
|
ease the restriction of the original "SelectExpr" (#794)
|
fa12392d2c1c06c6c2b03586f2c349d06f48f787
|
2023-07-12 07:43:07
|
Weny Xu
|
fix: fix frontend meta client option issue (#1939)
| false
|
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 89f10d08d6df..7a674458320c 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -48,7 +48,6 @@ use datanode::instance::InstanceRef as DnInstanceRef;
use datatypes::schema::Schema;
use distributed::DistInstance;
use meta_client::client::{MetaClient, MetaClientBuilder};
-use meta_client::MetaClientOptions;
use partition::manager::PartitionRuleManager;
use partition::route::TableRoutes;
use query::parser::{PromQuery, QueryLanguageParser, QueryStatement};
@@ -216,21 +215,21 @@ impl Instance {
}
async fn create_meta_client(opts: &FrontendOptions) -> Result<Arc<MetaClient>> {
- let metasrv_addr = &opts
+ let meta_client_options = opts
.meta_client_options
.as_ref()
- .context(MissingMetasrvOptsSnafu)?
- .metasrv_addrs;
+ .context(MissingMetasrvOptsSnafu)?;
info!(
"Creating Frontend instance in distributed mode with Meta server addr {:?}",
- metasrv_addr
+ meta_client_options.metasrv_addrs
);
- let meta_config = MetaClientOptions::default();
let channel_config = ChannelConfig::new()
- .timeout(Duration::from_millis(meta_config.timeout_millis))
- .connect_timeout(Duration::from_millis(meta_config.connect_timeout_millis))
- .tcp_nodelay(meta_config.tcp_nodelay);
+ .timeout(Duration::from_millis(meta_client_options.timeout_millis))
+ .connect_timeout(Duration::from_millis(
+ meta_client_options.connect_timeout_millis,
+ ))
+ .tcp_nodelay(meta_client_options.tcp_nodelay);
let channel_manager = ChannelManager::with_config(channel_config);
channel_manager.start_channel_recycle();
@@ -243,7 +242,7 @@ impl Instance {
.channel_manager(channel_manager)
.build();
meta_client
- .start(metasrv_addr)
+ .start(&meta_client_options.metasrv_addrs)
.await
.context(error::StartMetaClientSnafu)?;
Ok(Arc::new(meta_client))
|
fix
|
fix frontend meta client option issue (#1939)
|
c9db093af742860975863ecb558b66200ab15b7f
|
2022-07-26 13:22:39
|
evenyag
|
feat: Cherry picks lost commits of flush (#111)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 23eb67428bc7..97aed491ecf0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3526,6 +3526,7 @@ dependencies = [
"futures",
"object-store",
"serde",
+ "serde_json",
"snafu",
"tokio",
]
diff --git a/src/common/telemetry/src/logging.rs b/src/common/telemetry/src/logging.rs
index 9bf0603313f0..d1b3e10ac047 100644
--- a/src/common/telemetry/src/logging.rs
+++ b/src/common/telemetry/src/logging.rs
@@ -73,6 +73,7 @@ pub fn init_global_logging(
.with_target("tower", Level::WARN)
.with_target("datafusion", Level::WARN)
.with_target("reqwest", Level::WARN)
+ .with_target("sqlparser", Level::WARN)
.with_default(
directives
.parse::<filter::LevelFilter>()
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index 0356bcd322e2..df4f77ab7efd 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -5,20 +5,23 @@ use async_trait::async_trait;
use common_telemetry::logging::info;
use object_store::{backend::fs::Backend, util, ObjectStore};
use snafu::ResultExt;
+use store_api::manifest::action::ProtocolAction;
use store_api::{
logstore::LogStore,
manifest::Manifest,
storage::{EngineContext, RegionDescriptor, StorageEngine},
};
+use crate::background::JobPoolImpl;
use crate::config::{EngineConfig, ObjectStoreConfig};
use crate::error::{self, Error, Result};
+use crate::flush::{FlushSchedulerImpl, FlushSchedulerRef, FlushStrategyRef, SizeBasedStrategy};
use crate::manifest::action::*;
use crate::manifest::region::RegionManifest;
+use crate::memtable::{DefaultMemtableBuilder, MemtableBuilderRef};
use crate::metadata::RegionMetadata;
-use crate::region::RegionImpl;
+use crate::region::{RegionImpl, StoreConfig};
use crate::sst::FsAccessLayer;
-use crate::wal::Wal;
/// [StorageEngine] implementation.
pub struct EngineImpl<S: LogStore> {
@@ -99,16 +102,16 @@ impl SharedData {
object_store,
})
}
+}
- #[inline]
- fn region_sst_dir(&self, region_name: &str) -> String {
- format!("{}/", region_name)
- }
+#[inline]
+pub fn region_sst_dir(region_name: &str) -> String {
+ format!("{}/", region_name)
+}
- #[inline]
- fn region_manifest_dir(&self, region_name: &str) -> String {
- format!("{}/manifest/", region_name)
- }
+#[inline]
+pub fn region_manifest_dir(region_name: &str) -> String {
+ format!("{}/manifest/", region_name)
}
type RegionMap<S> = HashMap<String, RegionImpl<S>>;
@@ -117,14 +120,23 @@ struct EngineInner<S: LogStore> {
log_store: Arc<S>,
regions: RwLock<RegionMap<S>>,
shared: SharedData,
+ memtable_builder: MemtableBuilderRef,
+ flush_scheduler: FlushSchedulerRef,
+ flush_strategy: FlushStrategyRef,
}
impl<S: LogStore> EngineInner<S> {
pub async fn new(config: EngineConfig, log_store: Arc<S>) -> Result<Self> {
+ let job_pool = Arc::new(JobPoolImpl {});
+ let flush_scheduler = Arc::new(FlushSchedulerImpl::new(job_pool));
+
Ok(Self {
log_store,
regions: RwLock::new(Default::default()),
shared: SharedData::new(config).await?,
+ memtable_builder: Arc::new(DefaultMemtableBuilder {}),
+ flush_scheduler,
+ flush_strategy: Arc::new(SizeBasedStrategy::default()),
})
}
@@ -144,29 +156,38 @@ impl<S: LogStore> EngineInner<S> {
.context(error::InvalidRegionDescSnafu {
region: ®ion_name,
})?;
- let wal = Wal::new(region_id, region_name.clone(), self.log_store.clone());
- let sst_dir = &self.shared.region_sst_dir(®ion_name);
+ let sst_dir = ®ion_sst_dir(®ion_name);
let sst_layer = Arc::new(FsAccessLayer::new(
sst_dir,
self.shared.object_store.clone(),
));
- let manifest_dir = self.shared.region_manifest_dir(®ion_name);
+ let manifest_dir = region_manifest_dir(®ion_name);
let manifest =
RegionManifest::new(region_id, &manifest_dir, self.shared.object_store.clone());
+ let store_config = StoreConfig {
+ log_store: self.log_store.clone(),
+ sst_layer,
+ manifest: manifest.clone(),
+ memtable_builder: self.memtable_builder.clone(),
+ flush_scheduler: self.flush_scheduler.clone(),
+ flush_strategy: self.flush_strategy.clone(),
+ };
+
let region = RegionImpl::new(
region_id,
region_name.clone(),
metadata.clone(),
- wal,
- sst_layer,
- manifest.clone(),
+ store_config,
);
// Persist region metadata
manifest
- .update(RegionMetaAction::Change(RegionChange {
- metadata: Arc::new(metadata),
- }))
+ .update(RegionMetaActionList::new(vec![
+ RegionMetaAction::Protocol(ProtocolAction::new()),
+ RegionMetaAction::Change(RegionChange {
+ metadata: Arc::new(metadata),
+ }),
+ ]))
.await?;
{
diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs
index 937a5455806e..207a70a7671c 100644
--- a/src/storage/src/error.rs
+++ b/src/storage/src/error.rs
@@ -5,6 +5,7 @@ use std::str::Utf8Error;
use common_error::prelude::*;
use datatypes::arrow;
use serde_json::error::Error as JsonError;
+use store_api::manifest::action::ProtocolVersion;
use store_api::manifest::ManifestVersion;
use crate::metadata::Error as MetadataError;
@@ -142,6 +143,34 @@ pub enum Error {
#[snafu(display("Task already cancelled"))]
Cancelled { backtrace: Backtrace },
+
+ #[snafu(display(
+ "Manifest protocol forbid to read, min_version: {}, supported_version: {}",
+ min_version,
+ supported_version
+ ))]
+ ManifestProtocolForbidRead {
+ min_version: ProtocolVersion,
+ supported_version: ProtocolVersion,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display(
+ "Manifest protocol forbid to write, min_version: {}, supported_version: {}",
+ min_version,
+ supported_version
+ ))]
+ ManifestProtocolForbidWrite {
+ min_version: ProtocolVersion,
+ supported_version: ProtocolVersion,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Failed to decode region action list, {}", msg))]
+ DecodeRegionMetaActionList { msg: String, backtrace: Backtrace },
+
+ #[snafu(display("Failed to read line, err: {}", source))]
+ Readline { source: IoError },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -162,7 +191,9 @@ impl ErrorExt for Error {
| EncodeJson { .. }
| DecodeJson { .. }
| JoinTask { .. }
- | Cancelled { .. } => StatusCode::Unexpected,
+ | Cancelled { .. }
+ | DecodeRegionMetaActionList { .. }
+ | Readline { .. } => StatusCode::Unexpected,
FlushIo { .. }
| InitBackend { .. }
@@ -173,7 +204,9 @@ impl ErrorExt for Error {
| DeleteObject { .. }
| WriteWal { .. }
| DecodeWalHeader { .. }
- | EncodeWalHeader { .. } => StatusCode::StorageUnavailable,
+ | EncodeWalHeader { .. }
+ | ManifestProtocolForbidRead { .. }
+ | ManifestProtocolForbidWrite { .. } => StatusCode::StorageUnavailable,
}
}
diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs
index 4d0bb5575d00..0cc51fa2bf65 100644
--- a/src/storage/src/flush.rs
+++ b/src/storage/src/flush.rs
@@ -205,7 +205,11 @@ impl<S: LogStore> FlushJob<S> {
files_to_remove: Vec::default(),
};
logging::debug!("Write region edit: {:?} to manifest.", edit);
- self.manifest.update(RegionMetaAction::Edit(edit)).await
+ self.manifest
+ .update(RegionMetaActionList::with_action(RegionMetaAction::Edit(
+ edit,
+ )))
+ .await
}
/// Generates random SST file name in format: `^[a-f\d]{8}(-[a-f\d]{4}){3}-[a-f\d]{12}.parquet$`
diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs
index e3a7fb8ef3e4..27ba91a6c1cf 100644
--- a/src/storage/src/lib.rs
+++ b/src/storage/src/lib.rs
@@ -7,14 +7,14 @@ pub mod config;
mod engine;
pub mod error;
mod flush;
-pub mod manifest;
+mod manifest;
pub mod memtable;
pub mod metadata;
mod proto;
mod region;
mod snapshot;
mod sst;
-pub mod sync;
+mod sync;
#[cfg(test)]
mod test_util;
mod version;
diff --git a/src/storage/src/manifest/action.rs b/src/storage/src/manifest/action.rs
index 2826231c22b7..9a237002a2f1 100644
--- a/src/storage/src/manifest/action.rs
+++ b/src/storage/src/manifest/action.rs
@@ -1,26 +1,35 @@
+use std::io::{BufRead, BufReader, Write};
+
use serde::{Deserialize, Serialize};
use serde_json as json;
-use snafu::ResultExt;
+use serde_json::ser::to_writer;
+use snafu::{ensure, OptionExt, ResultExt};
+use store_api::manifest::action::ProtocolAction;
+use store_api::manifest::action::ProtocolVersion;
+use store_api::manifest::ManifestVersion;
use store_api::manifest::MetaAction;
use store_api::manifest::Metadata;
use store_api::storage::RegionId;
use store_api::storage::SequenceNumber;
-use crate::error::{DecodeJsonSnafu, EncodeJsonSnafu, Result, Utf8Snafu};
+use crate::error::{
+ DecodeJsonSnafu, DecodeRegionMetaActionListSnafu, EncodeJsonSnafu,
+ ManifestProtocolForbidReadSnafu, ReadlineSnafu, Result,
+};
use crate::metadata::{RegionMetadataRef, VersionNumber};
use crate::sst::FileMeta;
-#[derive(Serialize, Deserialize, Clone, Debug)]
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct RegionChange {
pub metadata: RegionMetadataRef,
}
-#[derive(Serialize, Deserialize, Clone, Debug)]
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct RegionRemove {
pub region_id: RegionId,
}
-#[derive(Serialize, Deserialize, Clone, Debug)]
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct RegionEdit {
pub region_id: RegionId,
pub region_version: VersionNumber,
@@ -29,39 +38,186 @@ pub struct RegionEdit {
pub files_to_remove: Vec<FileMeta>,
}
-#[derive(Serialize, Deserialize, Clone, Debug)]
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct RegionManifestData {
pub region_meta: RegionMetadataRef,
// TODO(dennis): version metadata
}
-#[derive(Serialize, Deserialize, Clone, Debug)]
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
+pub struct RegionMetaActionList {
+ pub actions: Vec<RegionMetaAction>,
+ pub prev_version: ManifestVersion,
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum RegionMetaAction {
+ Protocol(ProtocolAction),
Change(RegionChange),
Remove(RegionRemove),
Edit(RegionEdit),
}
-impl RegionMetaAction {
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
+struct VersionHeader {
+ prev_version: ManifestVersion,
+}
+
+const NEWLINE: &[u8] = b"\n";
+
+impl RegionMetaActionList {
+ pub fn with_action(action: RegionMetaAction) -> Self {
+ Self {
+ actions: vec![action],
+ prev_version: 0,
+ }
+ }
+
+ pub fn new(actions: Vec<RegionMetaAction>) -> Self {
+ Self {
+ actions,
+ prev_version: 0,
+ }
+ }
+
+ /// Encode self into json in the form of string lines, starts with prev_version and then action json list.
pub(crate) fn encode(&self) -> Result<Vec<u8>> {
- Ok(json::to_string(self).context(EncodeJsonSnafu)?.into_bytes())
+ let mut bytes = Vec::default();
+
+ {
+ // Encode prev_version
+ let v = VersionHeader {
+ prev_version: self.prev_version,
+ };
+
+ to_writer(&mut bytes, &v).context(EncodeJsonSnafu)?;
+ // unwrap is fine here, because we write into a buffer.
+ bytes.write_all(NEWLINE).unwrap();
+ }
+
+ for action in &self.actions {
+ to_writer(&mut bytes, action).context(EncodeJsonSnafu)?;
+ bytes.write_all(NEWLINE).unwrap();
+ }
+
+ Ok(bytes)
}
- pub(crate) fn decode(bs: &[u8]) -> Result<Self> {
- json::from_str(std::str::from_utf8(bs).context(Utf8Snafu)?).context(DecodeJsonSnafu)
+ pub(crate) fn decode(
+ bs: &[u8],
+ reader_version: ProtocolVersion,
+ ) -> Result<(Self, Option<ProtocolAction>)> {
+ let mut lines = BufReader::new(bs).lines();
+
+ let mut action_list = RegionMetaActionList {
+ actions: Vec::default(),
+ prev_version: 0,
+ };
+
+ {
+ let first_line = lines
+ .next()
+ .with_context(|| DecodeRegionMetaActionListSnafu {
+ msg: format!(
+ "Invalid content in manifest: {}",
+ std::str::from_utf8(bs).unwrap_or("**invalid bytes**")
+ ),
+ })?
+ .context(ReadlineSnafu)?;
+
+ // Decode prev_version
+ let v: VersionHeader = json::from_str(&first_line).context(DecodeJsonSnafu)?;
+ action_list.prev_version = v.prev_version;
+ }
+
+ // Decode actions
+ let mut protocol_action = None;
+ let mut actions = Vec::default();
+ for line in lines {
+ let line = &line.context(ReadlineSnafu)?;
+ let action: RegionMetaAction = json::from_str(line).context(DecodeJsonSnafu)?;
+
+ if let RegionMetaAction::Protocol(p) = &action {
+ ensure!(
+ p.is_readable(reader_version),
+ ManifestProtocolForbidReadSnafu {
+ min_version: p.min_reader_version,
+ supported_version: reader_version,
+ }
+ );
+ protocol_action = Some(p.clone());
+ }
+
+ actions.push(action);
+ }
+ action_list.actions = actions;
+
+ Ok((action_list, protocol_action))
}
}
impl Metadata for RegionManifestData {}
-impl MetaAction for RegionMetaAction {
- type MetadataId = RegionId;
+impl MetaAction for RegionMetaActionList {
+ fn set_prev_version(&mut self, version: ManifestVersion) {
+ self.prev_version = version;
+ }
+}
- fn metadata_id(&self) -> RegionId {
- match self {
- RegionMetaAction::Change(c) => c.metadata.id,
- RegionMetaAction::Remove(r) => r.region_id,
- RegionMetaAction::Edit(e) => e.region_id,
- }
+#[cfg(test)]
+mod tests {
+ use common_telemetry::logging;
+
+ use super::*;
+
+ #[test]
+ fn test_encode_decode_action_list() {
+ common_telemetry::init_default_ut_logging();
+ let mut protocol = ProtocolAction::new();
+ protocol.min_reader_version = 1;
+ let mut action_list = RegionMetaActionList::new(vec![
+ RegionMetaAction::Protocol(protocol.clone()),
+ RegionMetaAction::Edit(RegionEdit {
+ region_id: 1,
+ region_version: 10,
+ flush_sequence: 99,
+ files_to_add: vec![
+ FileMeta {
+ file_path: "test1".to_string(),
+ level: 1,
+ },
+ FileMeta {
+ file_path: "test2".to_string(),
+ level: 2,
+ },
+ ],
+ files_to_remove: vec![FileMeta {
+ file_path: "test0".to_string(),
+ level: 0,
+ }],
+ }),
+ ]);
+ action_list.set_prev_version(3);
+
+ let bs = action_list.encode().unwrap();
+ // {"prev_version":3}
+ // {"Protocol":{"min_reader_version":1,"min_writer_version":0}}
+ // {"Edit":{"region_id":1,"region_version":10,"flush_sequence":99,"files_to_add":[{"file_path":"test1","level":1},{"file_path":"test2","level":2}],"files_to_remove":[{"file_path":"test0","level":0}]}}
+
+ logging::debug!(
+ "Encoded action list: \r\n{}",
+ String::from_utf8(bs.clone()).unwrap()
+ );
+
+ let e = RegionMetaActionList::decode(&bs, 0);
+ assert!(e.is_err());
+ assert_eq!(
+ "Manifest protocol forbid to read, min_version: 1, supported_version: 0",
+ format!("{}", e.err().unwrap())
+ );
+
+ let (decode_list, p) = RegionMetaActionList::decode(&bs, 1).unwrap();
+ assert_eq!(decode_list, action_list);
+ assert_eq!(p.unwrap(), protocol);
}
}
diff --git a/src/storage/src/manifest/region.rs b/src/storage/src/manifest/region.rs
index 1d266326ce5f..3011a2cb9d9c 100644
--- a/src/storage/src/manifest/region.rs
+++ b/src/storage/src/manifest/region.rs
@@ -4,13 +4,16 @@ use std::sync::{
Arc,
};
+use arc_swap::ArcSwap;
use async_trait::async_trait;
use common_telemetry::logging;
use object_store::ObjectStore;
+use snafu::ensure;
+use store_api::manifest::action::{self, ProtocolAction, ProtocolVersion};
use store_api::manifest::*;
use store_api::storage::RegionId;
-use crate::error::{Error, Result};
+use crate::error::{Error, ManifestProtocolForbidWriteSnafu, Result};
use crate::manifest::action::*;
use crate::manifest::storage::ManifestObjectStore;
use crate::manifest::storage::ObjectStoreLogIterator;
@@ -23,7 +26,7 @@ pub struct RegionManifest {
#[async_trait]
impl Manifest for RegionManifest {
type Error = Error;
- type MetaAction = RegionMetaAction;
+ type MetaAction = RegionMetaActionList;
type MetadataId = RegionId;
type Metadata = RegionManifestData;
@@ -33,8 +36,8 @@ impl Manifest for RegionManifest {
}
}
- async fn update(&self, action: RegionMetaAction) -> Result<ManifestVersion> {
- self.inner.save(&action).await
+ async fn update(&self, action_list: RegionMetaActionList) -> Result<ManifestVersion> {
+ self.inner.save(action_list).await
}
async fn load(&self) -> Result<Option<RegionManifestData>> {
@@ -49,13 +52,17 @@ impl Manifest for RegionManifest {
let mut iter = self.inner.scan(start_bound, MAX_VERSION).await?;
- match iter.next_action().await? {
- Some((_v, RegionMetaAction::Change(c))) => Ok(Some(RegionManifestData {
- region_meta: c.metadata,
- })),
- Some(_) => todo!(),
- None => Ok(None),
+ while let Some((_v, action_list)) = iter.next_action().await? {
+ for action in action_list.actions {
+ if let RegionMetaAction::Change(c) = action {
+ return Ok(Some(RegionManifestData {
+ region_meta: c.metadata,
+ }));
+ }
+ }
}
+
+ Ok(None)
}
async fn checkpoint(&self) -> Result<ManifestVersion> {
@@ -71,18 +78,26 @@ struct RegionManifestInner {
region_id: RegionId,
store: Arc<ManifestObjectStore>,
version: AtomicU64,
+ /// Current using protocol
+ protocol: ArcSwap<ProtocolAction>,
+ /// Current node supported protocols (reader_version, writer_version)
+ supported_reader_version: ProtocolVersion,
+ supported_writer_version: ProtocolVersion,
}
-struct RegionMetaActionIterator {
+struct RegionMetaActionListIterator {
log_iter: ObjectStoreLogIterator,
+ reader_version: ProtocolVersion,
}
-impl RegionMetaActionIterator {
- async fn next_action(&mut self) -> Result<Option<(ManifestVersion, RegionMetaAction)>> {
+impl RegionMetaActionListIterator {
+ async fn next_action(&mut self) -> Result<Option<(ManifestVersion, RegionMetaActionList)>> {
match self.log_iter.next_log().await? {
Some((v, bytes)) => {
- let action: RegionMetaAction = RegionMetaAction::decode(&bytes)?;
- Ok(Some((v, action)))
+ //TODO(dennis): save protocol into inner's protocol when recovering
+ let (action_list, _protocol) =
+ RegionMetaActionList::decode(&bytes, self.reader_version)?;
+ Ok(Some((v, action_list)))
}
None => Ok(None),
}
@@ -91,11 +106,16 @@ impl RegionMetaActionIterator {
impl RegionManifestInner {
fn new(region_id: RegionId, manifest_dir: &str, object_store: ObjectStore) -> Self {
+ let (reader_version, writer_version) = action::supported_protocol_version();
+
Self {
region_id,
store: Arc::new(ManifestObjectStore::new(manifest_dir, object_store)),
// TODO(dennis): recover the last version from history
version: AtomicU64::new(0),
+ protocol: ArcSwap::new(Arc::new(ProtocolAction::new())),
+ supported_reader_version: reader_version,
+ supported_writer_version: writer_version,
}
}
@@ -109,16 +129,26 @@ impl RegionManifestInner {
self.version.load(Ordering::Relaxed)
}
- async fn save(&self, action: &RegionMetaAction) -> Result<ManifestVersion> {
+ async fn save(&self, action_list: RegionMetaActionList) -> Result<ManifestVersion> {
+ let protocol = self.protocol.load();
+
+ ensure!(
+ protocol.is_writable(self.supported_writer_version),
+ ManifestProtocolForbidWriteSnafu {
+ min_version: protocol.min_writer_version,
+ supported_version: self.supported_writer_version,
+ }
+ );
+
let version = self.inc_version();
logging::debug!(
"Save region metadata action: {:?}, version: {}",
- action,
+ action_list,
version
);
- self.store.save(version, &action.encode()?).await?;
+ self.store.save(version, &action_list.encode()?).await?;
Ok(version)
}
@@ -127,9 +157,10 @@ impl RegionManifestInner {
&self,
start: ManifestVersion,
end: ManifestVersion,
- ) -> Result<RegionMetaActionIterator> {
- Ok(RegionMetaActionIterator {
+ ) -> Result<RegionMetaActionListIterator> {
+ Ok(RegionMetaActionListIterator {
log_iter: self.store.scan(start, end).await?,
+ reader_version: self.supported_reader_version,
})
}
}
@@ -172,9 +203,11 @@ mod tests {
assert!(manifest.load().await.unwrap().is_none());
manifest
- .update(RegionMetaAction::Change(RegionChange {
- metadata: region_meta.clone(),
- }))
+ .update(RegionMetaActionList::with_action(RegionMetaAction::Change(
+ RegionChange {
+ metadata: region_meta.clone(),
+ },
+ )))
.await
.unwrap();
@@ -193,9 +226,11 @@ mod tests {
let metadata: RegionMetadata = desc.try_into().unwrap();
let region_meta = Arc::new(metadata);
manifest
- .update(RegionMetaAction::Change(RegionChange {
- metadata: region_meta.clone(),
- }))
+ .update(RegionMetaActionList::with_action(RegionMetaAction::Change(
+ RegionChange {
+ metadata: region_meta.clone(),
+ },
+ )))
.await
.unwrap();
diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs
index 55c1ff5da29b..4303b273a0d6 100644
--- a/src/storage/src/region.rs
+++ b/src/storage/src/region.rs
@@ -9,11 +9,10 @@ use snafu::ensure;
use store_api::logstore::LogStore;
use store_api::storage::{ReadContext, Region, RegionId, RegionMeta, WriteContext, WriteResponse};
-use crate::background::JobPoolImpl;
use crate::error::{self, Error, Result};
-use crate::flush::{FlushSchedulerImpl, FlushSchedulerRef, FlushStrategyRef, SizeBasedStrategy};
+use crate::flush::{FlushSchedulerRef, FlushStrategyRef};
use crate::manifest::region::RegionManifest;
-use crate::memtable::{DefaultMemtableBuilder, MemtableVersion};
+use crate::memtable::{MemtableBuilderRef, MemtableVersion};
use crate::metadata::{RegionMetaImpl, RegionMetadata};
pub use crate::region::writer::{RegionWriter, RegionWriterRef, WriterContext};
use crate::snapshot::SnapshotImpl;
@@ -59,34 +58,42 @@ impl<S: LogStore> Region for RegionImpl<S> {
}
}
+/// Storage related config for region.
+///
+/// Contains all necessary storage related components needed by the region, such as logstore,
+/// manifest, memtable builder.
+pub struct StoreConfig<S> {
+ pub log_store: Arc<S>,
+ pub sst_layer: AccessLayerRef,
+ pub manifest: RegionManifest,
+ pub memtable_builder: MemtableBuilderRef,
+ pub flush_scheduler: FlushSchedulerRef,
+ pub flush_strategy: FlushStrategyRef,
+}
+
impl<S: LogStore> RegionImpl<S> {
pub fn new(
id: RegionId,
name: String,
metadata: RegionMetadata,
- wal: Wal<S>,
- sst_layer: AccessLayerRef,
- manifest: RegionManifest,
+ store_config: StoreConfig<S>,
) -> RegionImpl<S> {
- let memtable_builder = Arc::new(DefaultMemtableBuilder {});
let memtable_version = MemtableVersion::new();
- // TODO(yingwen): Pass flush scheduler to `RegionImpl::new`.
- let job_pool = Arc::new(JobPoolImpl {});
- let flush_scheduler = Arc::new(FlushSchedulerImpl::new(job_pool));
-
let version_control = VersionControl::new(metadata, memtable_version);
+ let wal = Wal::new(id, name.clone(), store_config.log_store);
+
let inner = Arc::new(RegionInner {
shared: Arc::new(SharedData {
id,
name,
version_control: Arc::new(version_control),
}),
- writer: Arc::new(RegionWriter::new(memtable_builder)),
+ writer: Arc::new(RegionWriter::new(store_config.memtable_builder)),
wal,
- flush_strategy: Arc::new(SizeBasedStrategy::default()),
- flush_scheduler,
- sst_layer,
- manifest,
+ flush_strategy: store_config.flush_strategy,
+ flush_scheduler: store_config.flush_scheduler,
+ sst_layer: store_config.sst_layer,
+ manifest: store_config.manifest,
});
RegionImpl { inner }
diff --git a/src/storage/src/region/tests.rs b/src/storage/src/region/tests.rs
index 9f91ce1bfd99..a072176a897f 100644
--- a/src/storage/src/region/tests.rs
+++ b/src/storage/src/region/tests.rs
@@ -1,18 +1,14 @@
//! Region tests.
+mod flush;
mod read_write;
use datatypes::type_id::LogicalTypeId;
-use log_store::fs::noop::NoopLogStore;
-use object_store::{backend::fs::Backend, ObjectStore};
-use store_api::manifest::Manifest;
use store_api::storage::consts;
use tempdir::TempDir;
use super::*;
-use crate::manifest::region::RegionManifest;
-use crate::sst::FsAccessLayer;
-use crate::test_util::{self, descriptor_util::RegionDescBuilder, schema_util};
+use crate::test_util::{self, config_util, descriptor_util::RegionDescBuilder, schema_util};
#[tokio::test]
async fn test_new_region() {
@@ -25,26 +21,15 @@ async fn test_new_region() {
.build();
let metadata = desc.try_into().unwrap();
- let wal = Wal::new(region_id, region_name, Arc::new(NoopLogStore::default()));
let store_dir = TempDir::new("test_new_region")
.unwrap()
.path()
.to_string_lossy()
.to_string();
- let accessor = Backend::build().root(&store_dir).finish().await.unwrap();
- let object_store = ObjectStore::new(accessor);
- let sst_layer = Arc::new(FsAccessLayer::new("/", object_store.clone()));
- let manifest = RegionManifest::new(region_id, "/manifest/", object_store);
-
- let region = RegionImpl::new(
- region_id,
- region_name.to_string(),
- metadata,
- wal,
- sst_layer,
- manifest,
- );
+ let store_config = config_util::new_store_config(&store_dir, region_id, region_name).await;
+
+ let region = RegionImpl::new(region_id, region_name.to_string(), metadata, store_config);
let expect_schema = schema_util::new_schema_ref(
&[
diff --git a/src/storage/src/region/tests/flush.rs b/src/storage/src/region/tests/flush.rs
new file mode 100644
index 000000000000..d2623f69e8de
--- /dev/null
+++ b/src/storage/src/region/tests/flush.rs
@@ -0,0 +1,108 @@
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Arc;
+
+use log_store::fs::noop::NoopLogStore;
+use store_api::storage::WriteResponse;
+use tempdir::TempDir;
+
+use crate::engine;
+use crate::flush::{FlushStrategy, FlushStrategyRef};
+use crate::region::tests::read_write::{self, Tester};
+use crate::region::{RegionImpl, SharedDataRef};
+use crate::test_util::config_util;
+
+const REGION_NAME: &str = "region-flush-0";
+
+/// Create a new region for flush test
+async fn new_region_for_flush(
+ store_dir: &str,
+ enable_version_column: bool,
+ flush_strategy: FlushStrategyRef,
+) -> RegionImpl<NoopLogStore> {
+ let region_id = 0;
+
+ let metadata = read_write::new_metadata(REGION_NAME, enable_version_column);
+
+ let mut store_config = config_util::new_store_config(store_dir, region_id, REGION_NAME).await;
+ store_config.flush_strategy = flush_strategy;
+
+ RegionImpl::new(region_id, REGION_NAME.to_string(), metadata, store_config)
+}
+
+struct FlushTester {
+ tester: Tester,
+}
+
+impl FlushTester {
+ async fn new(store_dir: &str, flush_strategy: FlushStrategyRef) -> FlushTester {
+ let region = new_region_for_flush(store_dir, false, flush_strategy).await;
+
+ FlushTester {
+ tester: Tester::with_region(region),
+ }
+ }
+
+ async fn put(&self, data: &[(i64, Option<i64>)]) -> WriteResponse {
+ self.tester.put(data).await
+ }
+}
+
+#[derive(Default)]
+struct FlushSwitch {
+ should_flush: AtomicBool,
+}
+
+impl FlushSwitch {
+ fn set_should_flush(&self, should_flush: bool) {
+ self.should_flush.store(should_flush, Ordering::Relaxed);
+ }
+}
+
+impl FlushStrategy for FlushSwitch {
+ fn should_flush(
+ &self,
+ _shared: &SharedDataRef,
+ _bytes_mutable: usize,
+ _bytes_total: usize,
+ ) -> bool {
+ self.should_flush.load(Ordering::Relaxed)
+ }
+}
+
+#[tokio::test]
+async fn test_flush() {
+ common_telemetry::init_default_ut_logging();
+
+ let dir = TempDir::new("flush").unwrap();
+ let store_dir = dir.path().to_str().unwrap();
+
+ let flush_switch = Arc::new(FlushSwitch::default());
+ // Always trigger flush before write.
+ let tester = FlushTester::new(store_dir, flush_switch.clone()).await;
+
+ let data = [(1000, Some(100))];
+ // Put one element so we have content to flush.
+ tester.put(&data).await;
+
+ // Now set should flush to true to trigger flush.
+ flush_switch.set_should_flush(true);
+ // Put element to trigger flush.
+ tester.put(&data).await;
+
+ // Now put another data to trigger write stall and wait until last flush done to
+ // ensure at least one parquet file is generated.
+ tester.put(&data).await;
+
+ // Check parquet files.
+ let sst_dir = format!("{}/{}", store_dir, engine::region_sst_dir(REGION_NAME));
+ let mut has_parquet_file = false;
+ for entry in std::fs::read_dir(sst_dir).unwrap() {
+ let entry = entry.unwrap();
+ let path = entry.path();
+ if !path.is_dir() {
+ assert_eq!("parquet", path.extension().unwrap());
+ has_parquet_file = true;
+ }
+ }
+ assert!(has_parquet_file);
+}
diff --git a/src/storage/src/region/tests/read_write.rs b/src/storage/src/region/tests/read_write.rs
index 7a06c3c5206a..7628b2ee4f3e 100644
--- a/src/storage/src/region/tests/read_write.rs
+++ b/src/storage/src/region/tests/read_write.rs
@@ -6,21 +6,24 @@ use datatypes::prelude::*;
use datatypes::type_id::LogicalTypeId;
use datatypes::vectors::Int64Vector;
use log_store::fs::noop::NoopLogStore;
-use object_store::{backend::fs::Backend, ObjectStore};
-use store_api::manifest::Manifest;
use store_api::storage::{
consts, Chunk, ChunkReader, PutOperation, ReadContext, Region, RegionMeta, ScanRequest,
SequenceNumber, Snapshot, WriteContext, WriteRequest, WriteResponse,
};
use tempdir::TempDir;
-use crate::manifest::region::RegionManifest;
-use crate::region::RegionImpl;
-use crate::sst::FsAccessLayer;
-use crate::test_util::{self, descriptor_util::RegionDescBuilder, write_batch_util};
-use crate::wal::Wal;
+use crate::region::{RegionImpl, RegionMetadata};
+use crate::test_util::{self, config_util, descriptor_util::RegionDescBuilder, write_batch_util};
use crate::write_batch::{PutData, WriteBatch};
+pub fn new_metadata(region_name: &str, enable_version_column: bool) -> RegionMetadata {
+ let desc = RegionDescBuilder::new(region_name)
+ .enable_version_column(enable_version_column)
+ .push_value_column(("v1", LogicalTypeId::Int64, true))
+ .build();
+ desc.try_into().unwrap()
+}
+
/// Create a new region for read/write test
async fn new_region_for_rw(
store_dir: &str,
@@ -28,28 +31,12 @@ async fn new_region_for_rw(
) -> RegionImpl<NoopLogStore> {
let region_id = 0;
let region_name = "region-rw-0";
- let sst_dir = format!("{}/{}/", store_dir, region_name);
- let manifest_dir = format!("{}/{}/maniffest/", store_dir, region_name);
- let desc = RegionDescBuilder::new(region_name)
- .enable_version_column(enable_version_column)
- .push_value_column(("v1", LogicalTypeId::Int64, true))
- .build();
- let metadata = desc.try_into().unwrap();
- let wal = Wal::new(region_id, region_name, Arc::new(NoopLogStore::default()));
- let accessor = Backend::build().root(store_dir).finish().await.unwrap();
- let object_store = ObjectStore::new(accessor);
- let sst_layer = Arc::new(FsAccessLayer::new(&sst_dir, object_store.clone()));
- let manifest = RegionManifest::new(region_id, &manifest_dir, object_store);
-
- RegionImpl::new(
- region_id,
- region_name.to_string(),
- metadata,
- wal,
- sst_layer,
- manifest,
- )
+ let metadata = new_metadata(region_name, enable_version_column);
+
+ let store_config = config_util::new_store_config(store_dir, region_id, region_name).await;
+
+ RegionImpl::new(region_id, region_name.to_string(), metadata, store_config)
}
fn new_write_batch_for_test(enable_version_column: bool) -> WriteBatch {
@@ -104,7 +91,7 @@ fn append_chunk_to(chunk: &Chunk, dst: &mut Vec<(i64, Option<i64>)>) {
}
/// Test region without considering version column.
-struct Tester {
+pub struct Tester {
region: RegionImpl<NoopLogStore>,
write_ctx: WriteContext,
read_ctx: ReadContext,
@@ -114,6 +101,10 @@ impl Tester {
async fn new(store_dir: &str) -> Tester {
let region = new_region_for_rw(store_dir, false).await;
+ Tester::with_region(region)
+ }
+
+ pub fn with_region(region: RegionImpl<NoopLogStore>) -> Tester {
Tester {
region,
write_ctx: WriteContext::default(),
@@ -124,7 +115,7 @@ impl Tester {
/// Put without version specified.
///
/// Format of data: (timestamp, v1), timestamp is key, v1 is value.
- async fn put(&self, data: &[(i64, Option<i64>)]) -> WriteResponse {
+ pub async fn put(&self, data: &[(i64, Option<i64>)]) -> WriteResponse {
// Build a batch without version.
let mut batch = new_write_batch_for_test(false);
let put_data = new_put_data(data);
diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs
index a8f579478fe3..30d20c288ff3 100644
--- a/src/storage/src/region/writer.rs
+++ b/src/storage/src/region/writer.rs
@@ -21,17 +21,28 @@ use crate::write_batch::WriteBatch;
pub type RegionWriterRef = Arc<RegionWriter>;
+// TODO(yingwen): Add benches for write and support group commit to improve write throughput.
+
+/// Region writer manages all write operations to the region.
pub struct RegionWriter {
+ /// Inner writer guarded by write lock, the write lock is used to ensure
+ /// all write operations are serialized.
inner: Mutex<WriterInner>,
+ /// Version lock, protects read-write-update to region `Version`.
+ ///
+ /// Increasing committed sequence should be guarded by this lock.
+ version_mutex: Mutex<()>,
}
impl RegionWriter {
pub fn new(memtable_builder: MemtableBuilderRef) -> RegionWriter {
RegionWriter {
inner: Mutex::new(WriterInner::new(memtable_builder)),
+ version_mutex: Mutex::new(()),
}
}
+ /// Write to region in the write lock.
pub async fn write<S: LogStore>(
&self,
ctx: &WriteContext,
@@ -39,17 +50,48 @@ impl RegionWriter {
writer_ctx: WriterContext<'_, S>,
) -> Result<WriteResponse> {
let mut inner = self.inner.lock().await;
- inner.write(ctx, request, writer_ctx).await
+ inner
+ .write(&self.version_mutex, ctx, request, writer_ctx)
+ .await
}
+ /// Apply version edit.
pub async fn apply_version_edit<S: LogStore>(
&self,
wal: &Wal<S>,
edit: VersionEdit,
shared: &SharedDataRef,
) -> Result<()> {
- let mut inner = self.inner.lock().await;
- inner.apply_version_edit(wal, edit, shared).await
+ // HACK: We won't acquire the write lock here because write stall would hold
+ // write lock thus we have no chance to get the lock and apply the version edit.
+ // So we add a version lock to ensure modification to `VersionControl` is
+ // serialized.
+ let version_control = &shared.version_control;
+
+ let _lock = self.version_mutex.lock().await;
+ let next_sequence = version_control.committed_sequence() + 1;
+
+ self.persist_manifest_version(wal, next_sequence, &edit)
+ .await?;
+
+ version_control.apply_edit(edit);
+
+ version_control.set_committed_sequence(next_sequence);
+
+ Ok(())
+ }
+
+ async fn persist_manifest_version<S: LogStore>(
+ &self,
+ wal: &Wal<S>,
+ seq: SequenceNumber,
+ edit: &VersionEdit,
+ ) -> Result<()> {
+ let header = WalHeader::with_last_manifest_version(edit.manifest_version);
+
+ wal.write_to_wal(seq, header, Payload::None).await?;
+
+ Ok(())
}
}
@@ -85,13 +127,13 @@ impl WriterInner {
}
}
- // TODO(yingwen): Support group commit so we can avoid taking mutable reference.
/// Write `WriteBatch` to region, now the schema of batch needs to be validated outside.
///
/// Mutable reference of writer ensure no other reference of this writer can modify the
/// version control (write is exclusive).
async fn write<S: LogStore>(
&mut self,
+ version_mutex: &Mutex<()>,
_ctx: &WriteContext,
request: WriteBatch,
writer_ctx: WriterContext<'_, S>,
@@ -102,6 +144,7 @@ impl WriterInner {
let version_control = writer_ctx.version_control();
let version = version_control.current();
+ let _lock = version_mutex.lock().await;
let committed_sequence = version_control.committed_sequence();
// Sequence for current write batch.
let next_sequence = committed_sequence + 1;
@@ -214,6 +257,10 @@ impl WriterInner {
// However the last flush job may fail, in which case, we just return error
// and abort current write request. The flush handle is left empty, so the next
// time we still have chance to trigger a new flush.
+ logging::info!("Write stall, region: {}", shared.name);
+
+ // TODO(yingwen): We should release the write lock during waiting flush done, which
+ // needs something like async condvar.
flush_handle.join().await.map_err(|e| {
logging::error!(
"Previous flush job failed, region: {}, err: {}",
@@ -250,39 +297,6 @@ impl WriterInner {
Ok(())
}
- async fn apply_version_edit<S: LogStore>(
- &mut self,
- wal: &Wal<S>,
- edit: VersionEdit,
- shared: &SharedDataRef,
- ) -> Result<()> {
- let version_control = &shared.version_control;
-
- let next_sequence = version_control.committed_sequence() + 1;
-
- self.persist_manifest_version(wal, next_sequence, &edit)
- .await?;
-
- version_control.apply_edit(edit);
-
- version_control.set_committed_sequence(next_sequence);
-
- Ok(())
- }
-
- async fn persist_manifest_version<S: LogStore>(
- &self,
- wal: &Wal<S>,
- seq: SequenceNumber,
- edit: &VersionEdit,
- ) -> Result<()> {
- let header = WalHeader::with_last_manifest_version(edit.manifest_version);
-
- wal.write_to_wal(seq, header, Payload::None).await?;
-
- Ok(())
- }
-
#[inline]
fn alloc_memtable_id(&mut self) -> MemtableId {
self.last_memtable_id += 1;
diff --git a/src/storage/src/sst.rs b/src/storage/src/sst.rs
index 35fb190e992c..c8e6ea05b171 100644
--- a/src/storage/src/sst.rs
+++ b/src/storage/src/sst.rs
@@ -107,7 +107,7 @@ impl FileHandleInner {
}
/// Immutable metadata of a sst file.
-#[derive(Serialize, Deserialize, Clone, Debug)]
+#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct FileMeta {
pub file_path: String,
/// SST level of the file.
diff --git a/src/storage/src/test_util.rs b/src/storage/src/test_util.rs
index 92828d8d5934..c18e61b19e50 100644
--- a/src/storage/src/test_util.rs
+++ b/src/storage/src/test_util.rs
@@ -1,3 +1,4 @@
+pub mod config_util;
pub mod descriptor_util;
pub mod schema_util;
pub mod write_batch_util;
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
new file mode 100644
index 000000000000..392d05533803
--- /dev/null
+++ b/src/storage/src/test_util/config_util.rs
@@ -0,0 +1,40 @@
+use std::sync::Arc;
+
+use log_store::fs::noop::NoopLogStore;
+use object_store::{backend::fs::Backend, ObjectStore};
+use store_api::manifest::Manifest;
+use store_api::storage::RegionId;
+
+use crate::background::JobPoolImpl;
+use crate::engine;
+use crate::flush::{FlushSchedulerImpl, SizeBasedStrategy};
+use crate::manifest::region::RegionManifest;
+use crate::memtable::DefaultMemtableBuilder;
+use crate::region::StoreConfig;
+use crate::sst::FsAccessLayer;
+
+/// Create a new StoreConfig for test.
+pub async fn new_store_config(
+ store_dir: &str,
+ region_id: RegionId,
+ region_name: &str,
+) -> StoreConfig<NoopLogStore> {
+ let sst_dir = engine::region_sst_dir(region_name);
+ let manifest_dir = engine::region_manifest_dir(region_name);
+
+ let accessor = Backend::build().root(store_dir).finish().await.unwrap();
+ let object_store = ObjectStore::new(accessor);
+ let sst_layer = Arc::new(FsAccessLayer::new(&sst_dir, object_store.clone()));
+ let manifest = RegionManifest::new(region_id, &manifest_dir, object_store);
+ let job_pool = Arc::new(JobPoolImpl {});
+ let flush_scheduler = Arc::new(FlushSchedulerImpl::new(job_pool));
+
+ StoreConfig {
+ log_store: Arc::new(NoopLogStore::default()),
+ sst_layer,
+ manifest,
+ memtable_builder: Arc::new(DefaultMemtableBuilder {}),
+ flush_scheduler,
+ flush_strategy: Arc::new(SizeBasedStrategy::default()),
+ }
+}
diff --git a/src/storage/src/version.rs b/src/storage/src/version.rs
index 6b5abb4f5e77..40bb398b8bea 100644
--- a/src/storage/src/version.rs
+++ b/src/storage/src/version.rs
@@ -97,16 +97,7 @@ impl VersionControl {
pub fn apply_edit(&self, edit: VersionEdit) {
let mut version_to_update = self.version.lock();
-
- if let Some(max_memtable_id) = edit.max_memtable_id {
- // Remove flushed memtables
- let memtable_version = version_to_update.memtables();
- let removed = memtable_version.remove_immutables(max_memtable_id);
- version_to_update.memtables = Arc::new(removed);
- }
-
version_to_update.apply_edit(edit);
-
version_to_update.commit();
}
}
@@ -189,6 +180,14 @@ impl Version {
if self.manifest_version < edit.manifest_version {
self.manifest_version = edit.manifest_version;
}
+
+ if let Some(max_memtable_id) = edit.max_memtable_id {
+ // Remove flushed memtables
+ let memtable_version = self.memtables();
+ let removed = memtable_version.remove_immutables(max_memtable_id);
+ self.memtables = Arc::new(removed);
+ }
+
let handles_to_add = edit.files_to_add.into_iter().map(FileHandle::new);
let merged_ssts = self.ssts.merge(handles_to_add);
diff --git a/src/storage/src/wal.rs b/src/storage/src/wal.rs
index 4994b4e1dd04..d3d9de446216 100644
--- a/src/storage/src/wal.rs
+++ b/src/storage/src/wal.rs
@@ -129,6 +129,7 @@ impl<S: LogStore> Wal<S> {
pub enum Payload<'a> {
None, // only header
WriteBatchArrow(&'a WriteBatch),
+ #[allow(dead_code)]
WriteBatchProto(&'a WriteBatch),
}
diff --git a/src/store-api/Cargo.toml b/src/store-api/Cargo.toml
index e2bb64282af3..6b6493686ada 100644
--- a/src/store-api/Cargo.toml
+++ b/src/store-api/Cargo.toml
@@ -19,4 +19,5 @@ snafu = { version = "0.7", features = ["backtraces"] }
[dev-dependencies]
async-stream = "0.3"
+serde_json = "1.0"
tokio = { version = "1.0", features = ["full"] }
diff --git a/src/store-api/src/manifest.rs b/src/store-api/src/manifest.rs
index b154f38cee8d..95eecccc336a 100644
--- a/src/store-api/src/manifest.rs
+++ b/src/store-api/src/manifest.rs
@@ -1,4 +1,5 @@
//! metadata service
+pub mod action;
mod storage;
use async_trait::async_trait;
@@ -15,12 +16,8 @@ pub trait Metadata: Clone {}
pub trait MetadataId: Clone + Copy {}
-/// The action to apply on metadata
pub trait MetaAction: Serialize + DeserializeOwned {
- type MetadataId: MetadataId;
-
- /// Returns the metadata id of the action
- fn metadata_id(&self) -> Self::MetadataId;
+ fn set_prev_version(&mut self, version: ManifestVersion);
}
/// Manifest service
diff --git a/src/store-api/src/manifest/action.rs b/src/store-api/src/manifest/action.rs
new file mode 100644
index 000000000000..89070d02ae70
--- /dev/null
+++ b/src/store-api/src/manifest/action.rs
@@ -0,0 +1,82 @@
+///! Common actions for manifest
+use serde::{Deserialize, Serialize};
+
+pub type ProtocolVersion = u16;
+
+/// Current reader and writer versions
+/// TODO(dennis): configurable
+const READER_VERSION: ProtocolVersion = 0;
+const WRITER_VERSION: ProtocolVersion = 0;
+
+/// The maximum protocol version we are currently allowed to use,
+/// TODO(dennis): reading from configuration.
+pub fn supported_protocol_version() -> (ProtocolVersion, ProtocolVersion) {
+ (READER_VERSION, WRITER_VERSION)
+}
+
+/// Protocol action that used to block older clients from reading or writing the log when backwards
+/// incompatible changes are made to the protocol. clients should be tolerant of messages and
+/// fields that they do not understand.
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
+pub struct ProtocolAction {
+ pub min_reader_version: ProtocolVersion,
+ pub min_writer_version: ProtocolVersion,
+}
+
+impl Default for ProtocolAction {
+ fn default() -> Self {
+ let (min_reader_version, min_writer_version) = supported_protocol_version();
+ Self {
+ min_reader_version,
+ min_writer_version,
+ }
+ }
+}
+
+impl ProtocolAction {
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ pub fn is_readable(&self, reader_version: ProtocolVersion) -> bool {
+ reader_version >= self.min_reader_version
+ }
+
+ pub fn is_writable(&self, writer_version: ProtocolVersion) -> bool {
+ writer_version >= self.min_writer_version
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use serde_json as json;
+
+ use super::*;
+
+ #[test]
+ fn test_protocol_action() {
+ let mut action = ProtocolAction::new();
+
+ assert!(action.is_readable(0));
+ assert!(action.is_writable(0));
+ action.min_reader_version = 2;
+ action.min_writer_version = 3;
+ assert!(!action.is_readable(0));
+ assert!(!action.is_writable(0));
+ assert!(action.is_readable(2));
+ assert!(action.is_writable(3));
+ assert!(action.is_readable(3));
+ assert!(action.is_writable(4));
+
+ let s = json::to_string(&action).unwrap();
+ assert_eq!("{\"min_reader_version\":2,\"min_writer_version\":3}", s);
+
+ let action_decoded: ProtocolAction = json::from_str(&s).unwrap();
+ assert!(!action_decoded.is_readable(0));
+ assert!(!action_decoded.is_writable(0));
+ assert!(action_decoded.is_readable(2));
+ assert!(action_decoded.is_writable(3));
+ assert!(action_decoded.is_readable(3));
+ assert!(action_decoded.is_writable(4));
+ }
+}
|
feat
|
Cherry picks lost commits of flush (#111)
|
1a21a6ea417810d0cc6ef0992ba61dc20856e68a
|
2023-04-11 08:51:29
|
localhost
|
chore: set metasrv and datanode heartbeat log level to trace (#1357)
| false
|
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index 9cd083969226..c49b9dbc4ae3 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -18,7 +18,7 @@ use std::time::Duration;
use api::v1::meta::{HeartbeatRequest, HeartbeatResponse, NodeStat, Peer};
use catalog::{datanode_stat, CatalogManagerRef};
-use common_telemetry::{error, info, warn};
+use common_telemetry::{error, info, trace, warn};
use meta_client::client::{HeartbeatSender, MetaClient};
use snafu::ResultExt;
@@ -84,7 +84,7 @@ impl HeartbeatTask {
}
async fn handle_response(resp: HeartbeatResponse) {
- info!("heartbeat response: {:?}", resp);
+ trace!("heartbeat response: {:?}", resp);
}
/// Start heartbeat task, spawn background task.
diff --git a/src/meta-client/examples/meta_client.rs b/src/meta-client/examples/meta_client.rs
index dd2fe618e6bc..ecfc1a0c120c 100644
--- a/src/meta-client/examples/meta_client.rs
+++ b/src/meta-client/examples/meta_client.rs
@@ -72,7 +72,7 @@ async fn run() {
tokio::spawn(async move {
while let Some(res) = receiver.message().await.unwrap() {
- event!(Level::INFO, "heartbeat response: {:#?}", res);
+ event!(Level::TRACE, "heartbeat response: {:#?}", res);
}
});
|
chore
|
set metasrv and datanode heartbeat log level to trace (#1357)
|
f0a519b71b40f8e0cd0b2925ee36af931373f898
|
2023-05-26 14:55:18
|
fys
|
chore: reduce the number of requests for meta (#1647)
| false
|
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index 58a58d62a1c9..8abeb3f96e0d 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -94,6 +94,18 @@ impl FrontendCatalogManager {
self.datanode_clients.clone()
}
+ pub async fn invalidate_schema(&self, catalog: &str, schema: &str) {
+ let schema_key = SchemaKey {
+ catalog_name: catalog.into(),
+ schema_name: schema.into(),
+ }
+ .to_string();
+
+ let key = schema_key.as_bytes();
+
+ self.backend_cache_invalidtor.invalidate_key(key).await;
+ }
+
pub async fn invalidate_table(&self, catalog: &str, schema: &str, table: &str) {
let tg_key = TableGlobalKey {
catalog_name: catalog.into(),
@@ -263,6 +275,7 @@ impl CatalogManager for FrontendCatalogManager {
catalog_name: catalog.to_string(),
}
.to_string();
+
Ok(self.backend.get(key.as_bytes()).await?.map(|_| {
Arc::new(FrontendCatalogProvider {
catalog_name: catalog.to_string(),
@@ -340,18 +353,27 @@ impl CatalogProvider for FrontendCatalogProvider {
}
async fn schema(&self, name: &str) -> catalog::error::Result<Option<SchemaProviderRef>> {
- let all_schemas = self.schema_names().await?;
- if all_schemas.contains(&name.to_string()) {
- Ok(Some(Arc::new(FrontendSchemaProvider {
- catalog_name: self.catalog_name.clone(),
+ let catalog = &self.catalog_name;
+
+ let schema_key = SchemaKey {
+ catalog_name: catalog.clone(),
+ schema_name: name.to_string(),
+ }
+ .to_string();
+
+ let val = self.backend.get(schema_key.as_bytes()).await?;
+
+ let provider = val.map(|_| {
+ Arc::new(FrontendSchemaProvider {
+ catalog_name: catalog.clone(),
schema_name: name.to_string(),
backend: self.backend.clone(),
partition_manager: self.partition_manager.clone(),
datanode_clients: self.datanode_clients.clone(),
- })))
- } else {
- Ok(None)
- }
+ }) as Arc<dyn SchemaProvider>
+ });
+
+ Ok(provider)
}
}
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index fbeecdb5b467..8d5cb9289dcf 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -463,8 +463,8 @@ impl DistInstance {
}
let key = SchemaKey {
- catalog_name: catalog,
- schema_name: expr.database_name,
+ catalog_name: catalog.clone(),
+ schema_name: expr.database_name.clone(),
};
let value = SchemaValue {};
let client = self
@@ -475,10 +475,12 @@ impl DistInstance {
let request = CompareAndPutRequest::new()
.with_key(key.to_string())
.with_value(value.as_bytes().context(CatalogEntrySerdeSnafu)?);
+
let response = client
.compare_and_put(request.into())
.await
.context(RequestMetaSnafu)?;
+
ensure!(
response.success,
SchemaExistsSnafu {
@@ -486,6 +488,14 @@ impl DistInstance {
}
);
+ // Since the database created on meta does not go through KvBackend, so we manually
+ // invalidate the cache here.
+ //
+ // TODO(fys): when the meta invalidation cache mechanism is established, remove it.
+ self.catalog_manager()
+ .invalidate_schema(&catalog, &expr.database_name)
+ .await;
+
Ok(Output::AffectedRows(1))
}
|
chore
|
reduce the number of requests for meta (#1647)
|
3973d6b01f593794f68b6a0a43d0b6ce252fbe20
|
2024-08-23 18:06:28
|
fys
|
chore: optimize common_version build (#4611)
| false
|
diff --git a/src/common/version/build.rs b/src/common/version/build.rs
index 73089d68cc57..7b784d50859c 100644
--- a/src/common/version/build.rs
+++ b/src/common/version/build.rs
@@ -19,7 +19,11 @@ use build_data::{format_timestamp, get_source_time};
use shadow_rs::{CARGO_METADATA, CARGO_TREE};
fn main() -> shadow_rs::SdResult<()> {
- println!("cargo:rerun-if-changed=../../../.git/refs/heads");
+ println!(
+ "cargo:rerun-if-changed={}/.git/refs/heads",
+ env!("CARGO_RUSTC_CURRENT_DIR")
+ );
+
println!(
"cargo:rustc-env=SOURCE_TIMESTAMP={}",
if let Ok(t) = get_source_time() {
|
chore
|
optimize common_version build (#4611)
|
e3c0b5482f379f71433eb2fbd0d8ea63c38018f1
|
2024-09-24 14:15:55
|
Ning Sun
|
feat: returning warning instead of error on unsupported `SET` statement (#4761)
| false
|
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index 4dc43e0d92e9..7c76d0dcfffc 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -46,7 +46,7 @@ use datafusion_expr::LogicalPlan;
use partition::manager::{PartitionRuleManager, PartitionRuleManagerRef};
use query::parser::QueryStatement;
use query::QueryEngineRef;
-use session::context::QueryContextRef;
+use session::context::{Channel, QueryContextRef};
use session::table_name::table_idents_to_full_name;
use snafu::{ensure, OptionExt, ResultExt};
use sql::statements::copy::{CopyDatabase, CopyDatabaseArgument, CopyTable, CopyTableArgument};
@@ -338,10 +338,18 @@ impl StatementExecutor {
"CLIENT_ENCODING" => validate_client_encoding(set_var)?,
_ => {
- return NotSupportedSnafu {
- feat: format!("Unsupported set variable {}", var_name),
+ // for postgres, we give unknown SET statements a warning with
+ // success, this is prevent the SET call becoming a blocker
+ // of connection establishment
+ //
+ if query_ctx.channel() == Channel::Postgres {
+ query_ctx.set_warning(format!("Unsupported set variable {}", var_name));
+ } else {
+ return NotSupportedSnafu {
+ feat: format!("Unsupported set variable {}", var_name),
+ }
+ .fail();
}
- .fail()
}
}
Ok(Output::new_with_affected_rows(0))
diff --git a/src/servers/src/postgres/handler.rs b/src/servers/src/postgres/handler.rs
index 158e2cab4da9..522c558cdc71 100644
--- a/src/servers/src/postgres/handler.rs
+++ b/src/servers/src/postgres/handler.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Debug;
use std::sync::Arc;
use async_trait::async_trait;
@@ -23,7 +24,7 @@ use common_telemetry::{debug, error, tracing};
use datafusion_common::ParamValues;
use datatypes::prelude::ConcreteDataType;
use datatypes::schema::SchemaRef;
-use futures::{future, stream, Stream, StreamExt};
+use futures::{future, stream, Sink, SinkExt, Stream, StreamExt};
use pgwire::api::portal::{Format, Portal};
use pgwire::api::query::{ExtendedQueryHandler, SimpleQueryHandler};
use pgwire::api::results::{
@@ -32,6 +33,7 @@ use pgwire::api::results::{
use pgwire::api::stmt::{QueryParser, StoredStatement};
use pgwire::api::{ClientInfo, Type};
use pgwire::error::{ErrorInfo, PgWireError, PgWireResult};
+use pgwire::messages::PgWireBackendMessage;
use query::query_engine::DescribeResult;
use session::context::QueryContextRef;
use session::Session;
@@ -49,11 +51,13 @@ impl SimpleQueryHandler for PostgresServerHandlerInner {
#[tracing::instrument(skip_all, fields(protocol = "postgres"))]
async fn do_query<'a, C>(
&self,
- _client: &mut C,
+ client: &mut C,
query: &'a str,
) -> PgWireResult<Vec<Response<'a>>>
where
- C: ClientInfo + Unpin + Send + Sync,
+ C: ClientInfo + Sink<PgWireBackendMessage> + Unpin + Send + Sync,
+ C::Error: Debug,
+ PgWireError: From<<C as Sink<PgWireBackendMessage>>::Error>,
{
let query_ctx = self.session.new_query_context();
let db = query_ctx.get_db_string();
@@ -67,6 +71,7 @@ impl SimpleQueryHandler for PostgresServerHandlerInner {
}
if let Some(resps) = fixtures::process(query, query_ctx.clone()) {
+ send_warning_opt(client, query_ctx).await?;
Ok(resps)
} else {
let outputs = self.query_handler.do_query(query, query_ctx.clone()).await;
@@ -79,11 +84,34 @@ impl SimpleQueryHandler for PostgresServerHandlerInner {
results.push(resp);
}
+ send_warning_opt(client, query_ctx).await?;
Ok(results)
}
}
}
+async fn send_warning_opt<C>(client: &mut C, query_context: QueryContextRef) -> PgWireResult<()>
+where
+ C: Sink<PgWireBackendMessage> + Unpin + Send + Sync,
+ C::Error: Debug,
+ PgWireError: From<<C as Sink<PgWireBackendMessage>>::Error>,
+{
+ if let Some(warning) = query_context.warning() {
+ client
+ .feed(PgWireBackendMessage::NoticeResponse(
+ ErrorInfo::new(
+ PgErrorSeverity::Warning.to_string(),
+ PgErrorCode::Ec01000.code(),
+ warning.to_string(),
+ )
+ .into(),
+ ))
+ .await?;
+ }
+
+ Ok(())
+}
+
pub(crate) fn output_to_query_response<'a>(
query_ctx: QueryContextRef,
output: Result<Output>,
@@ -247,12 +275,14 @@ impl ExtendedQueryHandler for PostgresServerHandlerInner {
async fn do_query<'a, C>(
&self,
- _client: &mut C,
+ client: &mut C,
portal: &'a Portal<Self::Statement>,
_max_rows: usize,
) -> PgWireResult<Response<'a>>
where
- C: ClientInfo + Unpin + Send + Sync,
+ C: ClientInfo + Sink<PgWireBackendMessage> + Unpin + Send + Sync,
+ C::Error: Debug,
+ PgWireError: From<<C as Sink<PgWireBackendMessage>>::Error>,
{
let query_ctx = self.session.new_query_context();
let db = query_ctx.get_db_string();
@@ -268,6 +298,7 @@ impl ExtendedQueryHandler for PostgresServerHandlerInner {
}
if let Some(mut resps) = fixtures::process(&sql_plan.query, query_ctx.clone()) {
+ send_warning_opt(client, query_ctx).await?;
// if the statement matches our predefined rules, return it early
return Ok(resps.remove(0));
}
@@ -297,6 +328,7 @@ impl ExtendedQueryHandler for PostgresServerHandlerInner {
.remove(0)
};
+ send_warning_opt(client, query_ctx.clone()).await?;
output_to_query_response(query_ctx, output, &portal.result_column_format)
}
diff --git a/src/servers/src/postgres/types.rs b/src/servers/src/postgres/types.rs
index 2e4a805ef0bc..a5d1d392ac3b 100644
--- a/src/servers/src/postgres/types.rs
+++ b/src/servers/src/postgres/types.rs
@@ -37,7 +37,7 @@ use session::session_config::PGByteaOutputValue;
use self::bytea::{EscapeOutputBytea, HexOutputBytea};
use self::datetime::{StylingDate, StylingDateTime};
-pub use self::error::PgErrorCode;
+pub use self::error::{PgErrorCode, PgErrorSeverity};
use self::interval::PgInterval;
use crate::error::{self as server_error, Error, Result};
use crate::SqlPlan;
diff --git a/src/servers/src/postgres/types/error.rs b/src/servers/src/postgres/types/error.rs
index 928c5454ce27..9e6f570f2610 100644
--- a/src/servers/src/postgres/types/error.rs
+++ b/src/servers/src/postgres/types/error.rs
@@ -19,7 +19,7 @@ use strum::{AsRefStr, Display, EnumIter, EnumMessage};
#[derive(Display, Debug, PartialEq)]
#[allow(dead_code)]
-enum ErrorSeverity {
+pub enum PgErrorSeverity {
#[strum(serialize = "INFO")]
Info,
#[strum(serialize = "DEBUG")]
@@ -335,23 +335,23 @@ pub enum PgErrorCode {
}
impl PgErrorCode {
- fn severity(&self) -> ErrorSeverity {
+ fn severity(&self) -> PgErrorSeverity {
match self {
- PgErrorCode::Ec00000 => ErrorSeverity::Info,
- PgErrorCode::Ec01000 => ErrorSeverity::Warning,
+ PgErrorCode::Ec00000 => PgErrorSeverity::Info,
+ PgErrorCode::Ec01000 => PgErrorSeverity::Warning,
PgErrorCode::EcXX000 | PgErrorCode::Ec42P14 | PgErrorCode::Ec22023 => {
- ErrorSeverity::Error
+ PgErrorSeverity::Error
}
PgErrorCode::Ec28000 | PgErrorCode::Ec28P01 | PgErrorCode::Ec3D000 => {
- ErrorSeverity::Fatal
+ PgErrorSeverity::Fatal
}
- _ => ErrorSeverity::Error,
+ _ => PgErrorSeverity::Error,
}
}
- fn code(&self) -> String {
+ pub(crate) fn code(&self) -> String {
self.as_ref()[2..].to_string()
}
@@ -428,19 +428,19 @@ mod tests {
use common_error::status_code::StatusCode;
use strum::{EnumMessage, IntoEnumIterator};
- use super::{ErrorInfo, ErrorSeverity, PgErrorCode};
+ use super::{ErrorInfo, PgErrorCode, PgErrorSeverity};
#[test]
fn test_error_severity() {
// test for ErrorSeverity enum
- assert_eq!("INFO", ErrorSeverity::Info.to_string());
- assert_eq!("DEBUG", ErrorSeverity::Debug.to_string());
- assert_eq!("NOTICE", ErrorSeverity::Notice.to_string());
- assert_eq!("WARNING", ErrorSeverity::Warning.to_string());
+ assert_eq!("INFO", PgErrorSeverity::Info.to_string());
+ assert_eq!("DEBUG", PgErrorSeverity::Debug.to_string());
+ assert_eq!("NOTICE", PgErrorSeverity::Notice.to_string());
+ assert_eq!("WARNING", PgErrorSeverity::Warning.to_string());
- assert_eq!("ERROR", ErrorSeverity::Error.to_string());
- assert_eq!("FATAL", ErrorSeverity::Fatal.to_string());
- assert_eq!("PANIC", ErrorSeverity::Panic.to_string());
+ assert_eq!("ERROR", PgErrorSeverity::Error.to_string());
+ assert_eq!("FATAL", PgErrorSeverity::Fatal.to_string());
+ assert_eq!("PANIC", PgErrorSeverity::Panic.to_string());
// test for severity method
for code in PgErrorCode::iter() {
@@ -448,13 +448,13 @@ mod tests {
assert_eq!("Ec", &name[0..2]);
if name.starts_with("Ec00") {
- assert_eq!(ErrorSeverity::Info, code.severity());
+ assert_eq!(PgErrorSeverity::Info, code.severity());
} else if name.starts_with("Ec01") {
- assert_eq!(ErrorSeverity::Warning, code.severity());
+ assert_eq!(PgErrorSeverity::Warning, code.severity());
} else if name.starts_with("Ec28") || name.starts_with("Ec3D") {
- assert_eq!(ErrorSeverity::Fatal, code.severity());
+ assert_eq!(PgErrorSeverity::Fatal, code.severity());
} else {
- assert_eq!(ErrorSeverity::Error, code.severity());
+ assert_eq!(PgErrorSeverity::Error, code.severity());
}
}
}
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index 70168d9498eb..f85a8ceea313 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -40,7 +40,9 @@ pub struct QueryContext {
current_catalog: String,
// we use Arc<RwLock>> for modifiable fields
#[builder(default)]
- mutable_inner: Arc<RwLock<MutableInner>>,
+ mutable_session_data: Arc<RwLock<MutableInner>>,
+ #[builder(default)]
+ mutable_query_context_data: Arc<RwLock<QueryContextMutableFields>>,
sql_dialect: Arc<dyn Dialect + Send + Sync>,
#[builder(default)]
extensions: HashMap<String, String>,
@@ -52,6 +54,12 @@ pub struct QueryContext {
channel: Channel,
}
+/// This fields hold data that is only valid to current query context
+#[derive(Debug, Builder, Clone, Default)]
+pub struct QueryContextMutableFields {
+ warning: Option<String>,
+}
+
impl Display for QueryContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
@@ -65,21 +73,26 @@ impl Display for QueryContext {
impl QueryContextBuilder {
pub fn current_schema(mut self, schema: String) -> Self {
- if self.mutable_inner.is_none() {
- self.mutable_inner = Some(Arc::new(RwLock::new(MutableInner::default())));
+ if self.mutable_session_data.is_none() {
+ self.mutable_session_data = Some(Arc::new(RwLock::new(MutableInner::default())));
}
// safe for unwrap because previous none check
- self.mutable_inner.as_mut().unwrap().write().unwrap().schema = schema;
+ self.mutable_session_data
+ .as_mut()
+ .unwrap()
+ .write()
+ .unwrap()
+ .schema = schema;
self
}
pub fn timezone(mut self, timezone: Timezone) -> Self {
- if self.mutable_inner.is_none() {
- self.mutable_inner = Some(Arc::new(RwLock::new(MutableInner::default())));
+ if self.mutable_session_data.is_none() {
+ self.mutable_session_data = Some(Arc::new(RwLock::new(MutableInner::default())));
}
- self.mutable_inner
+ self.mutable_session_data
.as_mut()
.unwrap()
.write()
@@ -120,7 +133,7 @@ impl From<QueryContext> for api::v1::QueryContext {
fn from(
QueryContext {
current_catalog,
- mutable_inner,
+ mutable_session_data: mutable_inner,
extensions,
channel,
..
@@ -182,11 +195,11 @@ impl QueryContext {
}
pub fn current_schema(&self) -> String {
- self.mutable_inner.read().unwrap().schema.clone()
+ self.mutable_session_data.read().unwrap().schema.clone()
}
pub fn set_current_schema(&self, new_schema: &str) {
- self.mutable_inner.write().unwrap().schema = new_schema.to_string();
+ self.mutable_session_data.write().unwrap().schema = new_schema.to_string();
}
pub fn current_catalog(&self) -> &str {
@@ -208,19 +221,19 @@ impl QueryContext {
}
pub fn timezone(&self) -> Timezone {
- self.mutable_inner.read().unwrap().timezone.clone()
+ self.mutable_session_data.read().unwrap().timezone.clone()
}
pub fn set_timezone(&self, timezone: Timezone) {
- self.mutable_inner.write().unwrap().timezone = timezone;
+ self.mutable_session_data.write().unwrap().timezone = timezone;
}
pub fn current_user(&self) -> UserInfoRef {
- self.mutable_inner.read().unwrap().user_info.clone()
+ self.mutable_session_data.read().unwrap().user_info.clone()
}
pub fn set_current_user(&self, user: UserInfoRef) {
- self.mutable_inner.write().unwrap().user_info = user;
+ self.mutable_session_data.write().unwrap().user_info = user;
}
pub fn set_extension<S1: Into<String>, S2: Into<String>>(&mut self, key: S1, value: S2) {
@@ -257,6 +270,18 @@ impl QueryContext {
pub fn set_channel(&mut self, channel: Channel) {
self.channel = channel;
}
+
+ pub fn warning(&self) -> Option<String> {
+ self.mutable_query_context_data
+ .read()
+ .unwrap()
+ .warning
+ .clone()
+ }
+
+ pub fn set_warning(&self, msg: String) {
+ self.mutable_query_context_data.write().unwrap().warning = Some(msg);
+ }
}
impl QueryContextBuilder {
@@ -266,7 +291,8 @@ impl QueryContextBuilder {
current_catalog: self
.current_catalog
.unwrap_or_else(|| DEFAULT_CATALOG_NAME.to_string()),
- mutable_inner: self.mutable_inner.unwrap_or_default(),
+ mutable_session_data: self.mutable_session_data.unwrap_or_default(),
+ mutable_query_context_data: self.mutable_query_context_data.unwrap_or_default(),
sql_dialect: self
.sql_dialect
.unwrap_or_else(|| Arc::new(GreptimeDbDialect {})),
diff --git a/src/session/src/lib.rs b/src/session/src/lib.rs
index ecfc02f23001..33bd140c7057 100644
--- a/src/session/src/lib.rs
+++ b/src/session/src/lib.rs
@@ -76,7 +76,7 @@ impl Session {
// catalog is not allowed for update in query context so we use
// string here
.current_catalog(self.catalog.read().unwrap().clone())
- .mutable_inner(self.mutable_inner.clone())
+ .mutable_session_data(self.mutable_inner.clone())
.sql_dialect(self.conn_info.channel.dialect())
.configuration_parameter(self.configuration_variables.clone())
.channel(self.conn_info.channel)
|
feat
|
returning warning instead of error on unsupported `SET` statement (#4761)
|
18b77408aee3a75020688cf37887262b71968715
|
2025-01-16 08:40:43
|
Weny Xu
|
feat: introduce `SparsePrimaryKeyCodec` and `SparsePrimaryKeyFilter` (#5365)
| false
|
diff --git a/src/mito2/src/memtable/partition_tree.rs b/src/mito2/src/memtable/partition_tree.rs
index 458d6a6d69c5..df81c8dd010f 100644
--- a/src/mito2/src/memtable/partition_tree.rs
+++ b/src/mito2/src/memtable/partition_tree.rs
@@ -19,6 +19,9 @@ mod dedup;
mod dict;
mod merger;
mod partition;
+// TODO(weny): remove this
+#[allow(unused)]
+mod primary_key_filter;
mod shard;
mod shard_builder;
mod tree;
@@ -28,7 +31,7 @@ use std::sync::atomic::{AtomicI64, AtomicU64, AtomicUsize, Ordering};
use std::sync::Arc;
use common_base::readable_size::ReadableSize;
-pub(crate) use partition::DensePrimaryKeyFilter;
+pub(crate) use primary_key_filter::DensePrimaryKeyFilter;
use serde::{Deserialize, Serialize};
use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::RegionMetadataRef;
diff --git a/src/mito2/src/memtable/partition_tree/partition.rs b/src/mito2/src/memtable/partition_tree/partition.rs
index d527f581f9fe..75af79f14520 100644
--- a/src/mito2/src/memtable/partition_tree/partition.rs
+++ b/src/mito2/src/memtable/partition_tree/partition.rs
@@ -39,7 +39,7 @@ use crate::memtable::partition_tree::{PartitionTreeConfig, PkId};
use crate::memtable::stats::WriteMetrics;
use crate::metrics::PARTITION_TREE_READ_STAGE_ELAPSED;
use crate::read::{Batch, BatchBuilder};
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodec, PrimaryKeyFilter};
+use crate::row_converter::{PrimaryKeyCodec, PrimaryKeyFilter};
/// Key of a partition.
pub type PartitionKey = u32;
@@ -381,80 +381,6 @@ impl PartitionReader {
}
}
-/// Dense primary key filter.
-#[derive(Clone)]
-pub struct DensePrimaryKeyFilter {
- metadata: RegionMetadataRef,
- filters: Arc<Vec<SimpleFilterEvaluator>>,
- codec: DensePrimaryKeyCodec,
- offsets_buf: Vec<usize>,
-}
-
-impl DensePrimaryKeyFilter {
- pub(crate) fn new(
- metadata: RegionMetadataRef,
- filters: Arc<Vec<SimpleFilterEvaluator>>,
- codec: DensePrimaryKeyCodec,
- ) -> Self {
- Self {
- metadata,
- filters,
- codec,
- offsets_buf: Vec::new(),
- }
- }
-}
-
-impl PrimaryKeyFilter for DensePrimaryKeyFilter {
- fn prune_primary_key(&mut self, pk: &[u8]) -> bool {
- if self.filters.is_empty() {
- return true;
- }
-
- // no primary key, we simply return true.
- if self.metadata.primary_key.is_empty() {
- return true;
- }
-
- // evaluate filters against primary key values
- let mut result = true;
- self.offsets_buf.clear();
- for filter in &*self.filters {
- if Partition::is_partition_column(filter.column_name()) {
- continue;
- }
- let Some(column) = self.metadata.column_by_name(filter.column_name()) else {
- continue;
- };
- // ignore filters that are not referencing primary key columns
- if column.semantic_type != SemanticType::Tag {
- continue;
- }
- // index of the column in primary keys.
- // Safety: A tag column is always in primary key.
- let index = self.metadata.primary_key_index(column.column_id).unwrap();
- let value = match self.codec.decode_value_at(pk, index, &mut self.offsets_buf) {
- Ok(v) => v,
- Err(e) => {
- common_telemetry::error!(e; "Failed to decode primary key");
- return true;
- }
- };
-
- // TODO(yingwen): `evaluate_scalar()` creates temporary arrays to compare scalars. We
- // can compare the bytes directly without allocation and matching types as we use
- // comparable encoding.
- // Safety: arrow schema and datatypes are constructed from the same source.
- let scalar_value = value
- .try_to_scalar_value(&column.column_schema.data_type)
- .unwrap();
- result &= filter.evaluate_scalar(&scalar_value).unwrap_or(true);
- }
-
- result
- }
-}
-
/// Structs to reuse across readers to avoid allocating for each reader.
pub(crate) struct ReadPartitionContext {
metadata: RegionMetadataRef,
diff --git a/src/mito2/src/memtable/partition_tree/primary_key_filter.rs b/src/mito2/src/memtable/partition_tree/primary_key_filter.rs
new file mode 100644
index 000000000000..ce4873ab48bd
--- /dev/null
+++ b/src/mito2/src/memtable/partition_tree/primary_key_filter.rs
@@ -0,0 +1,342 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::sync::Arc;
+
+use api::v1::SemanticType;
+use common_recordbatch::filter::SimpleFilterEvaluator;
+use datatypes::value::Value;
+use store_api::metadata::RegionMetadataRef;
+use store_api::storage::ColumnId;
+
+use crate::error::Result;
+use crate::memtable::partition_tree::partition::Partition;
+use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyFilter, SparsePrimaryKeyCodec};
+
+#[derive(Clone)]
+struct PrimaryKeyFilterInner {
+ metadata: RegionMetadataRef,
+ filters: Arc<Vec<SimpleFilterEvaluator>>,
+}
+
+impl PrimaryKeyFilterInner {
+ fn evaluate_filters(
+ &self,
+ pk: &[u8],
+ mut decode_fn: impl FnMut(ColumnId, &RegionMetadataRef) -> Result<Value>,
+ ) -> bool {
+ if self.filters.is_empty() || self.metadata.primary_key.is_empty() {
+ return true;
+ }
+
+ let mut result = true;
+ for filter in self.filters.iter() {
+ if Partition::is_partition_column(filter.column_name()) {
+ continue;
+ }
+ let Some(column) = self.metadata.column_by_name(filter.column_name()) else {
+ continue;
+ };
+ // ignore filters that are not referencing primary key columns
+ if column.semantic_type != SemanticType::Tag {
+ continue;
+ }
+
+ let value = match decode_fn(column.column_id, &self.metadata) {
+ Ok(v) => v,
+ Err(e) => {
+ common_telemetry::error!(e; "Failed to decode primary key");
+ return true;
+ }
+ };
+
+ // TODO(yingwen): `evaluate_scalar()` creates temporary arrays to compare scalars. We
+ // can compare the bytes directly without allocation and matching types as we use
+ // comparable encoding.
+ // Safety: arrow schema and datatypes are constructed from the same source.
+ let scalar_value = value
+ .try_to_scalar_value(&column.column_schema.data_type)
+ .unwrap();
+ result &= filter.evaluate_scalar(&scalar_value).unwrap_or(true);
+ }
+
+ result
+ }
+}
+
+/// Dense primary key filter.
+#[derive(Clone)]
+pub struct DensePrimaryKeyFilter {
+ inner: PrimaryKeyFilterInner,
+ codec: DensePrimaryKeyCodec,
+ offsets_buf: Vec<usize>,
+}
+
+impl DensePrimaryKeyFilter {
+ pub(crate) fn new(
+ metadata: RegionMetadataRef,
+ filters: Arc<Vec<SimpleFilterEvaluator>>,
+ codec: DensePrimaryKeyCodec,
+ ) -> Self {
+ Self {
+ inner: PrimaryKeyFilterInner { metadata, filters },
+ codec,
+ offsets_buf: Vec::new(),
+ }
+ }
+}
+
+impl PrimaryKeyFilter for DensePrimaryKeyFilter {
+ fn matches(&mut self, pk: &[u8]) -> bool {
+ self.offsets_buf.clear();
+ self.inner.evaluate_filters(pk, |column_id, metadata| {
+ // index of tag column in primary key
+ // Safety: A tag column is always in primary key.
+ let index = metadata.primary_key_index(column_id).unwrap();
+ self.codec.decode_value_at(pk, index, &mut self.offsets_buf)
+ })
+ }
+}
+
+/// Sparse primary key filter.
+#[derive(Clone)]
+pub struct SparsePrimaryKeyFilter {
+ inner: PrimaryKeyFilterInner,
+ codec: SparsePrimaryKeyCodec,
+ offsets_map: HashMap<ColumnId, usize>,
+}
+
+impl SparsePrimaryKeyFilter {
+ pub(crate) fn new(
+ metadata: RegionMetadataRef,
+ filters: Arc<Vec<SimpleFilterEvaluator>>,
+ codec: SparsePrimaryKeyCodec,
+ ) -> Self {
+ Self {
+ inner: PrimaryKeyFilterInner { metadata, filters },
+ codec,
+ offsets_map: HashMap::new(),
+ }
+ }
+}
+
+impl PrimaryKeyFilter for SparsePrimaryKeyFilter {
+ fn matches(&mut self, pk: &[u8]) -> bool {
+ self.offsets_map.clear();
+ self.inner.evaluate_filters(pk, |column_id, _| {
+ if let Some(offset) = self.codec.has_column(pk, &mut self.offsets_map, column_id) {
+ self.codec.decode_value_at(pk, offset, column_id)
+ } else {
+ Ok(Value::Null)
+ }
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use api::v1::SemanticType;
+ use common_time::timestamp::TimeUnit;
+ use common_time::Timestamp;
+ use datafusion::execution::context::ExecutionProps;
+ use datafusion::logical_expr::{col, lit, BinaryExpr};
+ use datafusion::physical_expr::create_physical_expr;
+ use datafusion_common::{Column, DFSchema, ScalarValue};
+ use datafusion_expr::{Expr, Operator};
+ use datatypes::arrow::datatypes::{DataType, Field, Schema};
+ use datatypes::prelude::ConcreteDataType;
+ use datatypes::schema::ColumnSchema;
+ use datatypes::value::{OrderedFloat, Value, ValueRef};
+ use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
+ use store_api::metric_engine_consts::{
+ DATA_SCHEMA_TABLE_ID_COLUMN_NAME, DATA_SCHEMA_TSID_COLUMN_NAME,
+ };
+ use store_api::storage::consts::ReservedColumnId;
+ use store_api::storage::{ColumnId, RegionId};
+
+ use super::*;
+ use crate::row_converter::PrimaryKeyCodecExt;
+
+ fn setup_metadata() -> RegionMetadataRef {
+ let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 1));
+ builder
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new("pod", ConcreteDataType::string_datatype(), true),
+ semantic_type: SemanticType::Tag,
+ column_id: 1,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "namespace",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: 2,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "container",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: 3,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "greptime_value",
+ ConcreteDataType::float64_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Field,
+ column_id: 4,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "greptime_timestamp",
+ ConcreteDataType::timestamp_nanosecond_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Timestamp,
+ column_id: 5,
+ })
+ .primary_key(vec![1, 2, 3]);
+ let metadata = builder.build().unwrap();
+ Arc::new(metadata)
+ }
+
+ fn create_test_row() -> Vec<(ColumnId, ValueRef<'static>)> {
+ vec![
+ (1, ValueRef::String("greptime-frontend-6989d9899-22222")),
+ (2, ValueRef::String("greptime-cluster")),
+ (3, ValueRef::String("greptime-frontend-6989d9899-22222")),
+ ]
+ }
+
+ fn create_filter(column_name: &str, value: &str) -> SimpleFilterEvaluator {
+ let expr = Expr::BinaryExpr(BinaryExpr {
+ left: Box::new(Expr::Column(Column {
+ relation: None,
+ name: column_name.to_string(),
+ })),
+ op: Operator::Eq,
+ right: Box::new(Expr::Literal(ScalarValue::Utf8(Some(value.to_string())))),
+ });
+ SimpleFilterEvaluator::try_new(&expr).unwrap()
+ }
+
+ fn encode_sparse_pk(
+ metadata: &RegionMetadataRef,
+ row: Vec<(ColumnId, ValueRef<'static>)>,
+ ) -> Vec<u8> {
+ let codec = SparsePrimaryKeyCodec::new(metadata);
+ let mut pk = Vec::new();
+ codec.encode_to_vec(row.into_iter(), &mut pk).unwrap();
+ pk
+ }
+
+ fn encode_dense_pk(
+ metadata: &RegionMetadataRef,
+ row: Vec<(ColumnId, ValueRef<'static>)>,
+ ) -> Vec<u8> {
+ let codec = DensePrimaryKeyCodec::new(metadata);
+ let mut pk = Vec::new();
+ codec
+ .encode_to_vec(row.into_iter().map(|(_, v)| v), &mut pk)
+ .unwrap();
+ pk
+ }
+
+ #[test]
+ fn test_sparse_primary_key_filter_matches() {
+ let metadata = setup_metadata();
+ let filters = Arc::new(vec![create_filter(
+ "pod",
+ "greptime-frontend-6989d9899-22222",
+ )]);
+ let pk = encode_sparse_pk(&metadata, create_test_row());
+ let codec = SparsePrimaryKeyCodec::new(&metadata);
+ let mut filter = SparsePrimaryKeyFilter::new(metadata, filters, codec);
+ assert!(filter.matches(&pk));
+ }
+
+ #[test]
+ fn test_sparse_primary_key_filter_not_matches() {
+ let metadata = setup_metadata();
+ let filters = Arc::new(vec![create_filter(
+ "pod",
+ "greptime-frontend-6989d9899-22223",
+ )]);
+ let pk = encode_sparse_pk(&metadata, create_test_row());
+ let codec = SparsePrimaryKeyCodec::new(&metadata);
+ let mut filter = SparsePrimaryKeyFilter::new(metadata, filters, codec);
+ assert!(!filter.matches(&pk));
+ }
+
+ #[test]
+ fn test_sparse_primary_key_filter_matches_with_null() {
+ let metadata = setup_metadata();
+ let filters = Arc::new(vec![create_filter(
+ "non-exist-label",
+ "greptime-frontend-6989d9899-22222",
+ )]);
+ let pk = encode_sparse_pk(&metadata, create_test_row());
+ let codec = SparsePrimaryKeyCodec::new(&metadata);
+ let mut filter = SparsePrimaryKeyFilter::new(metadata, filters, codec);
+ assert!(filter.matches(&pk));
+ }
+
+ #[test]
+ fn test_dense_primary_key_filter_matches() {
+ let metadata = setup_metadata();
+ let filters = Arc::new(vec![create_filter(
+ "pod",
+ "greptime-frontend-6989d9899-22222",
+ )]);
+ let pk = encode_dense_pk(&metadata, create_test_row());
+ let codec = DensePrimaryKeyCodec::new(&metadata);
+ let mut filter = DensePrimaryKeyFilter::new(metadata, filters, codec);
+ assert!(filter.matches(&pk));
+ }
+
+ #[test]
+ fn test_dense_primary_key_filter_not_matches() {
+ let metadata = setup_metadata();
+ let filters = Arc::new(vec![create_filter(
+ "pod",
+ "greptime-frontend-6989d9899-22223",
+ )]);
+ let pk = encode_dense_pk(&metadata, create_test_row());
+ let codec = DensePrimaryKeyCodec::new(&metadata);
+ let mut filter = DensePrimaryKeyFilter::new(metadata, filters, codec);
+ assert!(!filter.matches(&pk));
+ }
+
+ #[test]
+ fn test_dense_primary_key_filter_matches_with_null() {
+ let metadata = setup_metadata();
+ let filters = Arc::new(vec![create_filter(
+ "non-exist-label",
+ "greptime-frontend-6989d9899-22222",
+ )]);
+ let pk = encode_dense_pk(&metadata, create_test_row());
+ let codec = DensePrimaryKeyCodec::new(&metadata);
+ let mut filter = DensePrimaryKeyFilter::new(metadata, filters, codec);
+ assert!(filter.matches(&pk));
+ }
+}
diff --git a/src/mito2/src/memtable/partition_tree/shard.rs b/src/mito2/src/memtable/partition_tree/shard.rs
index 5154bf93516c..d7350d7d12cb 100644
--- a/src/mito2/src/memtable/partition_tree/shard.rs
+++ b/src/mito2/src/memtable/partition_tree/shard.rs
@@ -243,7 +243,7 @@ impl ShardReader {
// Safety: `key_filter` is some so the shard has primary keys.
let key = self.key_dict.as_ref().unwrap().key_by_pk_index(pk_index);
let now = Instant::now();
- if key_filter.prune_primary_key(key) {
+ if key_filter.matches(key) {
self.prune_pk_cost += now.elapsed();
self.last_yield_pk_index = Some(pk_index);
self.keys_after_pruning += 1;
diff --git a/src/mito2/src/memtable/partition_tree/shard_builder.rs b/src/mito2/src/memtable/partition_tree/shard_builder.rs
index f3a00f746977..e1720f849f21 100644
--- a/src/mito2/src/memtable/partition_tree/shard_builder.rs
+++ b/src/mito2/src/memtable/partition_tree/shard_builder.rs
@@ -281,7 +281,7 @@ impl ShardBuilderReader {
self.keys_before_pruning += 1;
let key = self.dict_reader.key_by_pk_index(pk_index);
let now = Instant::now();
- if key_filter.prune_primary_key(key) {
+ if key_filter.matches(key) {
self.prune_pk_cost += now.elapsed();
self.last_yield_pk_index = Some(pk_index);
self.keys_after_pruning += 1;
diff --git a/src/mito2/src/row_converter.rs b/src/mito2/src/row_converter.rs
index f05c1ce0bc34..75f015d4494f 100644
--- a/src/mito2/src/row_converter.rs
+++ b/src/mito2/src/row_converter.rs
@@ -13,12 +13,16 @@
// limitations under the License.
mod dense;
+// TODO(weny): remove it.
+#[allow(unused)]
+mod sparse;
use std::sync::Arc;
use common_recordbatch::filter::SimpleFilterEvaluator;
use datatypes::value::{Value, ValueRef};
pub use dense::{DensePrimaryKeyCodec, SortField};
+pub use sparse::{SparsePrimaryKeyCodec, SparseValues};
use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::RegionMetadataRef;
@@ -51,8 +55,8 @@ pub trait PrimaryKeyCodecExt {
}
pub trait PrimaryKeyFilter: Send + Sync {
- /// Returns true if need to prune the primary key.
- fn prune_primary_key(&mut self, pk: &[u8]) -> bool;
+ /// Returns true if the primary key matches the filter.
+ fn matches(&mut self, pk: &[u8]) -> bool;
}
pub trait PrimaryKeyCodec: Send + Sync {
diff --git a/src/mito2/src/row_converter/dense.rs b/src/mito2/src/row_converter/dense.rs
index f84b5905e855..5c21428523f8 100644
--- a/src/mito2/src/row_converter/dense.rs
+++ b/src/mito2/src/row_converter/dense.rs
@@ -163,7 +163,7 @@ impl SortField {
Ok(())
}
- fn deserialize<B: Buf>(&self, deserializer: &mut Deserializer<B>) -> Result<Value> {
+ pub(crate) fn deserialize<B: Buf>(&self, deserializer: &mut Deserializer<B>) -> Result<Value> {
use common_time::DateTime;
macro_rules! deserialize_and_build_value {
(
@@ -246,7 +246,7 @@ impl SortField {
}
/// Skip deserializing this field, returns the length of it.
- fn skip_deserialize(
+ pub(crate) fn skip_deserialize(
&self,
bytes: &[u8],
deserializer: &mut Deserializer<&[u8]>,
diff --git a/src/mito2/src/row_converter/sparse.rs b/src/mito2/src/row_converter/sparse.rs
new file mode 100644
index 000000000000..d1eaa7a15d25
--- /dev/null
+++ b/src/mito2/src/row_converter/sparse.rs
@@ -0,0 +1,464 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::{HashMap, HashSet};
+use std::sync::Arc;
+
+use datatypes::prelude::ConcreteDataType;
+use datatypes::value::{Value, ValueRef};
+use memcomparable::{Deserializer, Serializer};
+use serde::{Deserialize, Serialize};
+use snafu::ResultExt;
+use store_api::metadata::RegionMetadataRef;
+use store_api::storage::consts::ReservedColumnId;
+use store_api::storage::ColumnId;
+
+use crate::error::{DeserializeFieldSnafu, Result, SerializeFieldSnafu};
+use crate::row_converter::dense::SortField;
+use crate::row_converter::PrimaryKeyCodec;
+
+/// A codec for sparse key of metrics.
+#[derive(Clone)]
+pub struct SparsePrimaryKeyCodec {
+ inner: Arc<SparsePrimaryKeyCodecInner>,
+}
+
+struct SparsePrimaryKeyCodecInner {
+ // Internal fields
+ table_id_field: SortField,
+ // Internal fields
+ tsid_field: SortField,
+ // User defined label field
+ label_field: SortField,
+ // Columns in primary key
+ columns: HashSet<ColumnId>,
+}
+
+/// Sparse values representation.
+///
+/// A map of [`ColumnId`] to [`Value`].
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct SparseValues {
+ values: HashMap<ColumnId, Value>,
+}
+
+impl SparseValues {
+ /// Creates a new [`SparseValues`] instance.
+ pub fn new(values: HashMap<ColumnId, Value>) -> Self {
+ Self { values }
+ }
+
+ /// Returns the value of the given column, or [`Value::Null`] if the column is not present.
+ pub fn get_or_null(&self, column_id: ColumnId) -> &Value {
+ self.values.get(&column_id).unwrap_or(&Value::Null)
+ }
+
+ /// Inserts a new value into the [`SparseValues`].
+ pub fn insert(&mut self, column_id: ColumnId, value: Value) {
+ self.values.insert(column_id, value);
+ }
+}
+
+/// The column id of the tsid.
+const RESERVED_COLUMN_ID_TSID: ColumnId = ReservedColumnId::tsid();
+/// The column id of the table id.
+const RESERVED_COLUMN_ID_TABLE_ID: ColumnId = ReservedColumnId::table_id();
+/// The size of the column id in the encoded sparse row.
+const COLUMN_ID_ENCODE_SIZE: usize = 4;
+
+impl SparsePrimaryKeyCodec {
+ /// Creates a new [`SparsePrimaryKeyCodec`] instance.
+ pub fn new(region_metadata: &RegionMetadataRef) -> Self {
+ Self {
+ inner: Arc::new(SparsePrimaryKeyCodecInner {
+ table_id_field: SortField::new(ConcreteDataType::uint32_datatype()),
+ tsid_field: SortField::new(ConcreteDataType::uint64_datatype()),
+ label_field: SortField::new(ConcreteDataType::string_datatype()),
+ columns: region_metadata
+ .primary_key_columns()
+ .map(|c| c.column_id)
+ .collect(),
+ }),
+ }
+ }
+
+ /// Returns the field of the given column id.
+ fn get_field(&self, column_id: ColumnId) -> Option<&SortField> {
+ if !self.inner.columns.contains(&column_id) {
+ return None;
+ }
+
+ match column_id {
+ RESERVED_COLUMN_ID_TABLE_ID => Some(&self.inner.table_id_field),
+ RESERVED_COLUMN_ID_TSID => Some(&self.inner.tsid_field),
+ _ => Some(&self.inner.label_field),
+ }
+ }
+
+ /// Encodes the given bytes into a [`SparseValues`].
+ pub(crate) fn encode_to_vec<'a, I>(&self, row: I, buffer: &mut Vec<u8>) -> Result<()>
+ where
+ I: Iterator<Item = (ColumnId, ValueRef<'a>)>,
+ {
+ let mut serializer = Serializer::new(buffer);
+ for (column_id, value) in row {
+ if value.is_null() {
+ continue;
+ }
+
+ if let Some(field) = self.get_field(column_id) {
+ column_id
+ .serialize(&mut serializer)
+ .context(SerializeFieldSnafu)?;
+ field.serialize(&mut serializer, &value)?;
+ } else {
+ // TODO(weny): handle the error.
+ common_telemetry::warn!("Column {} is not in primary key, skipping", column_id);
+ }
+ }
+ Ok(())
+ }
+
+ /// Decodes the given bytes into a [`SparseValues`].
+ fn decode_sparse(&self, bytes: &[u8]) -> Result<SparseValues> {
+ let mut deserializer = Deserializer::new(bytes);
+ let mut values = SparseValues::new(HashMap::new());
+
+ let column_id = u32::deserialize(&mut deserializer).context(DeserializeFieldSnafu)?;
+ let value = self.inner.table_id_field.deserialize(&mut deserializer)?;
+ values.insert(column_id, value);
+
+ let column_id = u32::deserialize(&mut deserializer).context(DeserializeFieldSnafu)?;
+ let value = self.inner.tsid_field.deserialize(&mut deserializer)?;
+ values.insert(column_id, value);
+ while deserializer.has_remaining() {
+ let column_id = u32::deserialize(&mut deserializer).context(DeserializeFieldSnafu)?;
+ let value = self.inner.label_field.deserialize(&mut deserializer)?;
+ values.insert(column_id, value);
+ }
+
+ Ok(values)
+ }
+
+ /// Decodes the given bytes into a [`Value`].
+ fn decode_leftmost(&self, bytes: &[u8]) -> Result<Option<Value>> {
+ let mut deserializer = Deserializer::new(bytes);
+ // Skip the column id.
+ deserializer.advance(COLUMN_ID_ENCODE_SIZE);
+ let value = self.inner.table_id_field.deserialize(&mut deserializer)?;
+ Ok(Some(value))
+ }
+
+ /// Returns the offset of the given column id in the given primary key.
+ pub(crate) fn has_column(
+ &self,
+ pk: &[u8],
+ offsets_map: &mut HashMap<u32, usize>,
+ column_id: ColumnId,
+ ) -> Option<usize> {
+ if offsets_map.is_empty() {
+ let mut deserializer = Deserializer::new(pk);
+ let mut offset = 0;
+ while deserializer.has_remaining() {
+ let column_id = u32::deserialize(&mut deserializer).unwrap();
+ offset += 4;
+ offsets_map.insert(column_id, offset);
+ let Some(field) = self.get_field(column_id) else {
+ break;
+ };
+
+ let skip = field.skip_deserialize(pk, &mut deserializer).unwrap();
+ offset += skip;
+ }
+
+ offsets_map.get(&column_id).copied()
+ } else {
+ offsets_map.get(&column_id).copied()
+ }
+ }
+
+ /// Decode value at `offset` in `pk`.
+ pub(crate) fn decode_value_at(
+ &self,
+ pk: &[u8],
+ offset: usize,
+ column_id: ColumnId,
+ ) -> Result<Value> {
+ let mut deserializer = Deserializer::new(pk);
+ deserializer.advance(offset);
+ // Safety: checked by `has_column`
+ let field = self.get_field(column_id).unwrap();
+ field.deserialize(&mut deserializer)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use api::v1::SemanticType;
+ use common_time::timestamp::TimeUnit;
+ use common_time::Timestamp;
+ use datatypes::schema::ColumnSchema;
+ use datatypes::value::{OrderedFloat, Value};
+ use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder};
+ use store_api::metric_engine_consts::{
+ DATA_SCHEMA_TABLE_ID_COLUMN_NAME, DATA_SCHEMA_TSID_COLUMN_NAME,
+ };
+ use store_api::storage::{ColumnId, RegionId};
+
+ use super::*;
+
+ fn test_region_metadata() -> RegionMetadataRef {
+ let mut builder = RegionMetadataBuilder::new(RegionId::new(1, 1));
+ builder
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ DATA_SCHEMA_TABLE_ID_COLUMN_NAME,
+ ConcreteDataType::uint32_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: ReservedColumnId::table_id(),
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ DATA_SCHEMA_TSID_COLUMN_NAME,
+ ConcreteDataType::uint64_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: ReservedColumnId::tsid(),
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new("pod", ConcreteDataType::string_datatype(), true),
+ semantic_type: SemanticType::Tag,
+ column_id: 1,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "namespace",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: 2,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "container",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: 3,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "pod_name",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: 4,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "pod_ip",
+ ConcreteDataType::string_datatype(),
+ true,
+ ),
+ semantic_type: SemanticType::Tag,
+ column_id: 5,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "greptime_value",
+ ConcreteDataType::float64_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Field,
+ column_id: 6,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "greptime_timestamp",
+ ConcreteDataType::timestamp_nanosecond_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Timestamp,
+ column_id: 7,
+ })
+ .primary_key(vec![
+ ReservedColumnId::table_id(),
+ ReservedColumnId::tsid(),
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ ]);
+ let metadata = builder.build().unwrap();
+ Arc::new(metadata)
+ }
+
+ #[test]
+ fn test_sparse_value_new_and_get_or_null() {
+ let mut values = HashMap::new();
+ values.insert(1, Value::Int32(42));
+ let sparse_value = SparseValues::new(values);
+
+ assert_eq!(sparse_value.get_or_null(1), &Value::Int32(42));
+ assert_eq!(sparse_value.get_or_null(2), &Value::Null);
+ }
+
+ #[test]
+ fn test_sparse_value_insert() {
+ let mut sparse_value = SparseValues::new(HashMap::new());
+ sparse_value.insert(1, Value::Int32(42));
+
+ assert_eq!(sparse_value.get_or_null(1), &Value::Int32(42));
+ }
+
+ fn test_row() -> Vec<(ColumnId, ValueRef<'static>)> {
+ vec![
+ (RESERVED_COLUMN_ID_TABLE_ID, ValueRef::UInt32(42)),
+ (
+ RESERVED_COLUMN_ID_TSID,
+ ValueRef::UInt64(123843349035232323),
+ ),
+ // label: pod
+ (1, ValueRef::String("greptime-frontend-6989d9899-22222")),
+ // label: namespace
+ (2, ValueRef::String("greptime-cluster")),
+ // label: container
+ (3, ValueRef::String("greptime-frontend-6989d9899-22222")),
+ // label: pod_name
+ (4, ValueRef::String("greptime-frontend-6989d9899-22222")),
+ // label: pod_ip
+ (5, ValueRef::String("10.10.10.10")),
+ // field: greptime_value
+ (6, ValueRef::Float64(OrderedFloat(1.0))),
+ // field: greptime_timestamp
+ (
+ 7,
+ ValueRef::Timestamp(Timestamp::new(1618876800000000000, TimeUnit::Nanosecond)),
+ ),
+ ]
+ }
+
+ #[test]
+ fn test_encode_to_vec() {
+ let region_metadata = test_region_metadata();
+ let codec = SparsePrimaryKeyCodec::new(®ion_metadata);
+ let mut buffer = Vec::new();
+
+ let row = test_row();
+ codec.encode_to_vec(row.into_iter(), &mut buffer).unwrap();
+ assert!(!buffer.is_empty());
+ let sparse_value = codec.decode_sparse(&buffer).unwrap();
+ assert_eq!(
+ sparse_value.get_or_null(RESERVED_COLUMN_ID_TABLE_ID),
+ &Value::UInt32(42)
+ );
+ assert_eq!(
+ sparse_value.get_or_null(1),
+ &Value::String("greptime-frontend-6989d9899-22222".into())
+ );
+ assert_eq!(
+ sparse_value.get_or_null(2),
+ &Value::String("greptime-cluster".into())
+ );
+ assert_eq!(
+ sparse_value.get_or_null(3),
+ &Value::String("greptime-frontend-6989d9899-22222".into())
+ );
+ assert_eq!(
+ sparse_value.get_or_null(4),
+ &Value::String("greptime-frontend-6989d9899-22222".into())
+ );
+ assert_eq!(
+ sparse_value.get_or_null(5),
+ &Value::String("10.10.10.10".into())
+ );
+ }
+
+ #[test]
+ fn test_decode_leftmost() {
+ let region_metadata = test_region_metadata();
+ let codec = SparsePrimaryKeyCodec::new(®ion_metadata);
+ let mut buffer = Vec::new();
+ let row = test_row();
+ codec.encode_to_vec(row.into_iter(), &mut buffer).unwrap();
+ assert!(!buffer.is_empty());
+ let result = codec.decode_leftmost(&buffer).unwrap().unwrap();
+ assert_eq!(result, Value::UInt32(42));
+ }
+
+ #[test]
+ fn test_has_column() {
+ let region_metadata = test_region_metadata();
+ let codec = SparsePrimaryKeyCodec::new(®ion_metadata);
+ let mut buffer = Vec::new();
+ let row = test_row();
+ codec.encode_to_vec(row.into_iter(), &mut buffer).unwrap();
+ assert!(!buffer.is_empty());
+
+ let mut offsets_map = HashMap::new();
+ for column_id in [
+ RESERVED_COLUMN_ID_TABLE_ID,
+ RESERVED_COLUMN_ID_TSID,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ ] {
+ let offset = codec.has_column(&buffer, &mut offsets_map, column_id);
+ assert!(offset.is_some());
+ }
+
+ let offset = codec.has_column(&buffer, &mut offsets_map, 6);
+ assert!(offset.is_none());
+ }
+
+ #[test]
+ fn test_decode_value_at() {
+ let region_metadata = test_region_metadata();
+ let codec = SparsePrimaryKeyCodec::new(®ion_metadata);
+ let mut buffer = Vec::new();
+ let row = test_row();
+ codec.encode_to_vec(row.into_iter(), &mut buffer).unwrap();
+ assert!(!buffer.is_empty());
+
+ let row = test_row();
+ let mut offsets_map = HashMap::new();
+ for column_id in [
+ RESERVED_COLUMN_ID_TABLE_ID,
+ RESERVED_COLUMN_ID_TSID,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ ] {
+ let offset = codec
+ .has_column(&buffer, &mut offsets_map, column_id)
+ .unwrap();
+ let value = codec.decode_value_at(&buffer, offset, column_id).unwrap();
+ let expected_value = row.iter().find(|(id, _)| *id == column_id).unwrap().1;
+ assert_eq!(value.as_value_ref(), expected_value);
+ }
+ }
+}
diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs
index 373218e91f4c..cfa8cd885378 100644
--- a/src/mito2/src/test_util/memtable_util.rs
+++ b/src/mito2/src/test_util/memtable_util.rs
@@ -36,7 +36,7 @@ use crate::memtable::{
BoxedBatchIterator, BulkPart, KeyValues, Memtable, MemtableBuilder, MemtableId, MemtableRanges,
MemtableRef, MemtableStats,
};
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodec, PrimaryKeyCodecExt, SortField};
+use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt, SortField};
/// Empty memtable for test.
#[derive(Debug, Default)]
diff --git a/src/mito2/src/test_util/sst_util.rs b/src/mito2/src/test_util/sst_util.rs
index cb0b3d12fb84..ce8cd4412f63 100644
--- a/src/mito2/src/test_util/sst_util.rs
+++ b/src/mito2/src/test_util/sst_util.rs
@@ -29,7 +29,7 @@ use store_api::metadata::{
use store_api::storage::RegionId;
use crate::read::{Batch, BatchBuilder, Source};
-use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodec, PrimaryKeyCodecExt, SortField};
+use crate::row_converter::{DensePrimaryKeyCodec, PrimaryKeyCodecExt, SortField};
use crate::sst::file::{FileHandle, FileId, FileMeta};
use crate::test_util::{new_batch_builder, new_noop_file_purger, VecBatchReader};
|
feat
|
introduce `SparsePrimaryKeyCodec` and `SparsePrimaryKeyFilter` (#5365)
|
7fe39e918784086a38757f60554dd5b2ec7f9ce2
|
2022-10-25 13:35:53
|
Ruihang Xia
|
feat: support quering with logical plan in gRPC layer (#344)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 2af5ac466eed..a1ca0da6a127 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -805,7 +805,10 @@ dependencies = [
"datafusion",
"datanode",
"datatypes",
+ "prost 0.9.0",
"snafu",
+ "substrait 0.1.0",
+ "substrait 0.2.0",
"tokio",
"tonic",
"tracing",
@@ -1418,6 +1421,7 @@ dependencies = [
"sql",
"storage",
"store-api",
+ "substrait 0.1.0",
"table",
"table-engine",
"tempdir",
diff --git a/src/api/greptime/v1/database.proto b/src/api/greptime/v1/database.proto
index 417a1b205ebb..163b872a6325 100644
--- a/src/api/greptime/v1/database.proto
+++ b/src/api/greptime/v1/database.proto
@@ -27,6 +27,7 @@ message ObjectExpr {
message SelectExpr {
oneof expr {
string sql = 1;
+ bytes logical_plan = 2;
PhysicalPlan physical_plan = 15;
}
}
diff --git a/src/client/Cargo.toml b/src/client/Cargo.toml
index 82bb407f9c67..620037e4a8ad 100644
--- a/src/client/Cargo.toml
+++ b/src/client/Cargo.toml
@@ -13,13 +13,25 @@ common-grpc = { path = "../common/grpc" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
common-time = { path = "../common/time" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
+ "simd",
+] }
datatypes = { path = "../datatypes" }
snafu = { version = "0.7", features = ["backtraces"] }
tonic = "0.8"
[dev-dependencies]
datanode = { path = "../datanode" }
+substrait = { path = "../common/substrait" }
tokio = { version = "1.0", features = ["full"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
+
+[dev-dependencies.substrait_proto]
+package = "substrait"
+version = "0.2"
+
+# TODO(ruihang): upgrade to 0.11 once substrait-rs supports it.
+[dev-dependencies.prost_09]
+package = "prost"
+version = "0.9"
diff --git a/src/client/examples/logical.rs b/src/client/examples/logical.rs
new file mode 100644
index 000000000000..6b0f8233ccb4
--- /dev/null
+++ b/src/client/examples/logical.rs
@@ -0,0 +1,96 @@
+use api::v1::{ColumnDataType, ColumnDef, CreateExpr};
+use client::{admin::Admin, Client, Database};
+use prost_09::Message;
+use substrait_proto::protobuf::{
+ plan_rel::RelType as PlanRelType,
+ read_rel::{NamedTable, ReadType},
+ rel::RelType,
+ PlanRel, ReadRel, Rel,
+};
+use tracing::{event, Level};
+
+fn main() {
+ tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish())
+ .unwrap();
+
+ run();
+}
+
+#[tokio::main]
+async fn run() {
+ let client = Client::connect("http://127.0.0.1:3001").await.unwrap();
+
+ let create_table_expr = CreateExpr {
+ catalog_name: Some("greptime".to_string()),
+ schema_name: Some("public".to_string()),
+ table_name: "test_logical_dist_exec".to_string(),
+ desc: None,
+ column_defs: vec![
+ ColumnDef {
+ name: "timestamp".to_string(),
+ datatype: ColumnDataType::Timestamp as i32,
+ is_nullable: false,
+ default_constraint: None,
+ },
+ ColumnDef {
+ name: "key".to_string(),
+ datatype: ColumnDataType::Uint64 as i32,
+ is_nullable: false,
+ default_constraint: None,
+ },
+ ColumnDef {
+ name: "value".to_string(),
+ datatype: ColumnDataType::Uint64 as i32,
+ is_nullable: false,
+ default_constraint: None,
+ },
+ ],
+ time_index: "timestamp".to_string(),
+ primary_keys: vec!["key".to_string()],
+ create_if_not_exists: false,
+ table_options: Default::default(),
+ };
+
+ let admin = Admin::new("create table", client.clone());
+ let result = admin.create(create_table_expr).await.unwrap();
+ event!(Level::INFO, "create table result: {:#?}", result);
+
+ let logical = mock_logical_plan();
+ event!(Level::INFO, "plan size: {:#?}", logical.len());
+ let db = Database::new("greptime", client);
+ let result = db.logical_plan(logical).await.unwrap();
+
+ event!(Level::INFO, "result: {:#?}", result);
+}
+
+fn mock_logical_plan() -> Vec<u8> {
+ let catalog_name = "greptime".to_string();
+ let schema_name = "public".to_string();
+ let table_name = "test_logical_dist_exec".to_string();
+
+ let named_table = NamedTable {
+ names: vec![catalog_name, schema_name, table_name],
+ advanced_extension: None,
+ };
+ let read_type = ReadType::NamedTable(named_table);
+
+ let read_rel = ReadRel {
+ common: None,
+ base_schema: None,
+ filter: None,
+ projection: None,
+ advanced_extension: None,
+ read_type: Some(read_type),
+ };
+
+ let mut buf = vec![];
+ let rel = Rel {
+ rel_type: Some(RelType::Read(Box::new(read_rel))),
+ };
+ let plan_rel = PlanRel {
+ rel_type: Some(PlanRelType::Rel(rel)),
+ };
+ plan_rel.encode(&mut buf).unwrap();
+
+ buf
+}
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 6a26131da90b..2a1d8dc76e26 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -107,6 +107,13 @@ impl Database {
self.do_select(select_expr).await
}
+ pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<ObjectResult> {
+ let select_expr = SelectExpr {
+ expr: Some(select_expr::Expr::LogicalPlan(logical_plan)),
+ };
+ self.do_select(select_expr).await
+ }
+
async fn do_select(&self, select_expr: SelectExpr) -> Result<ObjectResult> {
let header = ExprHeader {
version: PROTOCOL_VERSION,
diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs
index d9881603d444..92ba98a923e4 100644
--- a/src/common/substrait/src/df_logical.rs
+++ b/src/common/substrait/src/df_logical.rs
@@ -47,9 +47,12 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error> {
let rel = self.convert_plan(plan)?;
+ let plan_rel = PlanRel {
+ rel_type: Some(PlanRelType::Rel(rel)),
+ };
let mut buf = BytesMut::new();
- rel.encode(&mut buf).context(EncodeRelSnafu)?;
+ plan_rel.encode(&mut buf).context(EncodeRelSnafu)?;
Ok(buf.freeze())
}
@@ -182,35 +185,35 @@ impl DFLogicalSubstraitConvertor {
}
.fail()?,
LogicalPlan::Filter(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical Filter",
}
.fail()?,
LogicalPlan::Window(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical Window",
}
.fail()?,
LogicalPlan::Aggregate(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical Aggregate",
}
.fail()?,
LogicalPlan::Sort(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical Sort",
}
.fail()?,
LogicalPlan::Join(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical Join",
}
.fail()?,
LogicalPlan::CrossJoin(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical CrossJoin",
}
.fail()?,
LogicalPlan::Repartition(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical Repartition",
}
.fail()?,
LogicalPlan::Union(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical Union",
}
.fail()?,
LogicalPlan::TableScan(table_scan) => {
@@ -220,11 +223,11 @@ impl DFLogicalSubstraitConvertor {
})
}
LogicalPlan::EmptyRelation(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical EmptyRelation",
}
.fail()?,
LogicalPlan::Limit(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
+ name: "DataFusion Logical Limit",
}
.fail()?,
LogicalPlan::CreateExternalTable(_)
@@ -321,8 +324,8 @@ mod test {
async fn logical_plan_round_trip(plan: LogicalPlan, catalog: CatalogManagerRef) {
let convertor = DFLogicalSubstraitConvertor::new(catalog);
- let rel = convertor.convert_plan(plan.clone()).unwrap();
- let tripped_plan = convertor.convert_rel(rel).unwrap();
+ let proto = convertor.encode(plan.clone()).unwrap();
+ let tripped_plan = convertor.decode(proto).unwrap();
assert_eq!(format!("{:?}", plan), format!("{:?}", tripped_plan));
}
diff --git a/src/common/substrait/src/lib.rs b/src/common/substrait/src/lib.rs
index d5576daaa94a..3808ce9ec6a1 100644
--- a/src/common/substrait/src/lib.rs
+++ b/src/common/substrait/src/lib.rs
@@ -1,5 +1,5 @@
mod df_logical;
-mod error;
+pub mod error;
use bytes::{Buf, Bytes};
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index d9112adef1d8..e6ae94049a9a 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -5,9 +5,7 @@ edition = "2021"
[features]
default = ["python"]
-python = [
- "dep:script"
-]
+python = ["dep:script"]
[dependencies]
api = { path = "../api" }
@@ -23,7 +21,9 @@ common-recordbatch = { path = "../common/recordbatch" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
+ "simd",
+] }
datatypes = { path = "../datatypes" }
futures = "0.3"
hyper = { version = "0.14", features = ["full"] }
@@ -39,6 +39,7 @@ snafu = { version = "0.7", features = ["backtraces"] }
sql = { path = "../sql" }
storage = { path = "../storage" }
store-api = { path = "../store-api" }
+substrait = { path = "../common/substrait" }
table = { path = "../table" }
table-engine = { path = "../table-engine", features = ["test"] }
tokio = { version = "1.18", features = ["full"] }
@@ -46,22 +47,41 @@ tokio-stream = { version = "0.1", features = ["net"] }
tonic = "0.8"
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.3", features = ["full"] }
-# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies.arrow]
package = "arrow2"
version = "0.10"
-features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc", "ahash", "compute", "serde_types"]
+features = [
+ "io_csv",
+ "io_json",
+ "io_parquet",
+ "io_parquet_compression",
+ "io_ipc",
+ "ahash",
+ "compute",
+ "serde_types",
+]
[dev-dependencies]
axum-test-helper = { git = "https://github.com/sunng87/axum-test-helper.git", branch = "patch-1" }
client = { path = "../client" }
common-query = { path = "../common/query" }
-datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = ["simd"] }
+datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
+ "simd",
+] }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2" }
tempdir = "0.3"
[dev-dependencies.arrow]
package = "arrow2"
version = "0.10"
-features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc", "ahash", "compute", "serde_types"]
+features = [
+ "io_csv",
+ "io_json",
+ "io_parquet",
+ "io_parquet_compression",
+ "io_ipc",
+ "ahash",
+ "compute",
+ "serde_types",
+]
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 01c5cd1e39a9..a0b7be3784da 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -16,6 +16,12 @@ pub enum Error {
source: query::error::Error,
},
+ #[snafu(display("Failed to decode logical plan, source: {}", source))]
+ DecodeLogicalPlan {
+ #[snafu(backtrace)]
+ source: substrait::error::Error,
+ },
+
#[snafu(display("Failed to execute physical plan, source: {}", source))]
ExecutePhysicalPlan {
#[snafu(backtrace)]
@@ -269,6 +275,7 @@ impl ErrorExt for Error {
fn status_code(&self) -> StatusCode {
match self {
Error::ExecuteSql { source } => source.status_code(),
+ Error::DecodeLogicalPlan { source } => source.status_code(),
Error::ExecutePhysicalPlan { source } => source.status_code(),
Error::NewCatalog { source } => source.status_code(),
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index 250df8504fd0..5b736f734531 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -7,11 +7,16 @@ use catalog::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::status_code::StatusCode;
use common_query::Output;
use common_telemetry::logging::{debug, info};
+use query::plan::LogicalPlan;
use servers::query_handler::{GrpcAdminHandler, GrpcQueryHandler};
use snafu::prelude::*;
+use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use table::requests::AddColumnRequest;
-use crate::error::{self, InsertSnafu, Result, TableNotFoundSnafu, UnsupportedExprSnafu};
+use crate::error::{
+ self, DecodeLogicalPlanSnafu, ExecuteSqlSnafu, InsertSnafu, Result, TableNotFoundSnafu,
+ UnsupportedExprSnafu,
+};
use crate::instance::Instance;
use crate::server::grpc::handler::{build_err_result, ObjectResultBuilder};
use crate::server::grpc::insert::{self, insertion_expr_to_request};
@@ -155,6 +160,7 @@ impl Instance {
let expr = select_expr.expr;
match expr {
Some(select_expr::Expr::Sql(sql)) => self.execute_sql(&sql).await,
+ Some(select_expr::Expr::LogicalPlan(plan)) => self.execute_logical(plan).await,
Some(select_expr::Expr::PhysicalPlan(api::v1::PhysicalPlan { original_ql, plan })) => {
self.physical_planner
.execute(PhysicalPlanner::parse(plan)?, original_ql)
@@ -166,6 +172,18 @@ impl Instance {
.fail(),
}
}
+
+ async fn execute_logical(&self, plan_bytes: Vec<u8>) -> Result<Output> {
+ let logical_plan_converter = DFLogicalSubstraitConvertor::new(self.catalog_manager.clone());
+ let logical_plan = logical_plan_converter
+ .decode(plan_bytes.as_slice())
+ .context(DecodeLogicalPlanSnafu)?;
+
+ self.query_engine
+ .execute(&LogicalPlan::DfPlan(logical_plan))
+ .await
+ .context(ExecuteSqlSnafu)
+ }
}
#[async_trait]
|
feat
|
support quering with logical plan in gRPC layer (#344)
|
081c6d9e74a540541e32b323c72eee0213efdac9
|
2024-10-18 12:51:35
|
Weny Xu
|
fix: flush metric metadata region (#4852)
| false
|
diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs
index ab789db661b2..a136ed3c76c6 100644
--- a/src/metric-engine/src/engine.rs
+++ b/src/metric-engine/src/engine.rs
@@ -17,6 +17,7 @@ mod catchup;
mod close;
mod create;
mod drop;
+mod flush;
mod open;
mod options;
mod put;
@@ -145,7 +146,7 @@ impl RegionEngine for MetricEngine {
.alter_region(region_id, alter, &mut extension_return_value)
.await
}
- RegionRequest::Flush(_) | RegionRequest::Compact(_) => {
+ RegionRequest::Compact(_) => {
if self.inner.is_physical_region(region_id) {
self.inner
.mito
@@ -157,10 +158,11 @@ impl RegionEngine for MetricEngine {
UnsupportedRegionRequestSnafu { request }.fail()
}
}
+ RegionRequest::Flush(req) => self.inner.flush_region(region_id, req).await,
RegionRequest::Delete(_) | RegionRequest::Truncate(_) => {
UnsupportedRegionRequestSnafu { request }.fail()
}
- RegionRequest::Catchup(ref req) => self.inner.catchup_region(region_id, *req).await,
+ RegionRequest::Catchup(req) => self.inner.catchup_region(region_id, req).await,
};
result.map_err(BoxedError::new).map(|rows| RegionResponse {
diff --git a/src/metric-engine/src/engine/catchup.rs b/src/metric-engine/src/engine/catchup.rs
index 576ca54cfa25..4b1268c049b5 100644
--- a/src/metric-engine/src/engine/catchup.rs
+++ b/src/metric-engine/src/engine/catchup.rs
@@ -47,9 +47,10 @@ impl MetricEngineInner {
.await
.context(MitoCatchupOperationSnafu)?;
+ let data_region_id = utils::to_data_region_id(region_id);
self.mito
.handle_request(
- region_id,
+ data_region_id,
RegionRequest::Catchup(RegionCatchupRequest {
set_writable: req.set_writable,
entry_id: req.entry_id,
diff --git a/src/metric-engine/src/engine/flush.rs b/src/metric-engine/src/engine/flush.rs
new file mode 100644
index 000000000000..5afadd54b6e0
--- /dev/null
+++ b/src/metric-engine/src/engine/flush.rs
@@ -0,0 +1,52 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use snafu::ResultExt;
+use store_api::region_engine::RegionEngine;
+use store_api::region_request::{AffectedRows, RegionFlushRequest, RegionRequest};
+use store_api::storage::RegionId;
+
+use crate::engine::MetricEngineInner;
+use crate::error::{MitoFlushOperationSnafu, Result, UnsupportedRegionRequestSnafu};
+use crate::utils;
+
+impl MetricEngineInner {
+ pub async fn flush_region(
+ &self,
+ region_id: RegionId,
+ req: RegionFlushRequest,
+ ) -> Result<AffectedRows> {
+ if !self.is_physical_region(region_id) {
+ return UnsupportedRegionRequestSnafu {
+ request: RegionRequest::Flush(req),
+ }
+ .fail();
+ }
+
+ let metadata_region_id = utils::to_metadata_region_id(region_id);
+ // Flushes the metadata region as well
+ self.mito
+ .handle_request(metadata_region_id, RegionRequest::Flush(req.clone()))
+ .await
+ .context(MitoFlushOperationSnafu)
+ .map(|response| response.affected_rows)?;
+
+ let data_region_id = utils::to_data_region_id(region_id);
+ self.mito
+ .handle_request(data_region_id, RegionRequest::Flush(req.clone()))
+ .await
+ .context(MitoFlushOperationSnafu)
+ .map(|response| response.affected_rows)
+ }
+}
diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs
index cad251988fe5..2453b833402c 100644
--- a/src/store-api/src/region_request.rs
+++ b/src/store-api/src/region_request.rs
@@ -639,7 +639,7 @@ impl From<v1::ChangeColumnType> for ChangeColumnType {
}
}
-#[derive(Debug, Default)]
+#[derive(Debug, Clone, Default)]
pub struct RegionFlushRequest {
pub row_group_size: Option<usize>,
}
|
fix
|
flush metric metadata region (#4852)
|
db6ceda5f0855ce7fbba16812abff0a2475d919c
|
2023-09-27 08:28:17
|
Yingwen
|
fix(mito): fix region drop task runs multiple times but never clean the dir (#2504)
| false
|
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index 987ecbf7e593..437e8e3da30d 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -230,7 +230,7 @@ impl MitoEngine {
mut config: MitoConfig,
log_store: Arc<S>,
object_store: ObjectStore,
- write_buffer_manager: crate::flush::WriteBufferManagerRef,
+ write_buffer_manager: Option<crate::flush::WriteBufferManagerRef>,
listener: Option<crate::engine::listener::EventListenerRef>,
) -> MitoEngine {
config.sanitize();
diff --git a/src/mito2/src/engine/drop_test.rs b/src/mito2/src/engine/drop_test.rs
index d42c37d05b7e..35c86c184371 100644
--- a/src/mito2/src/engine/drop_test.rs
+++ b/src/mito2/src/engine/drop_test.rs
@@ -12,19 +12,31 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::sync::Arc;
+use std::time::Duration;
+
+use api::v1::Rows;
use object_store::util::join_path;
use store_api::region_engine::RegionEngine;
use store_api::region_request::{RegionDropRequest, RegionRequest};
use store_api::storage::RegionId;
use crate::config::MitoConfig;
-use crate::test_util::{CreateRequestBuilder, TestEnv};
+use crate::engine::listener::DropListener;
+use crate::test_util::{
+ build_rows_for_key, flush_region, put_rows, rows_schema, CreateRequestBuilder, TestEnv,
+};
use crate::worker::DROPPING_MARKER_FILE;
#[tokio::test]
async fn test_engine_drop_region() {
+ common_telemetry::init_default_ut_logging();
+
let mut env = TestEnv::with_prefix("drop");
- let engine = env.create_engine(MitoConfig::default()).await;
+ let listener = Arc::new(DropListener::new(Duration::from_millis(100)));
+ let engine = env
+ .create_engine_with(MitoConfig::default(), None, Some(listener.clone()))
+ .await;
let region_id = RegionId::new(1, 1);
// It's okay to drop a region doesn't exist.
@@ -34,13 +46,14 @@ async fn test_engine_drop_region() {
.unwrap_err();
let request = CreateRequestBuilder::new().build();
+ let column_schemas = rows_schema(&request);
engine
.handle_request(region_id, RegionRequest::Create(request))
.await
.unwrap();
let region = engine.get_region(region_id).unwrap();
- let region_dir = region.access_layer.region_dir().to_owned();
+ let region_dir = region.access_layer.region_dir().to_string();
// no dropping marker file
assert!(!env
.get_object_store()
@@ -49,12 +62,12 @@ async fn test_engine_drop_region() {
.await
.unwrap());
- // create a parquet file
- env.get_object_store()
- .unwrap()
- .write(&join_path(®ion_dir, "blabla.parquet"), vec![])
- .await
- .unwrap();
+ let rows = Rows {
+ schema: column_schemas.clone(),
+ rows: build_rows_for_key("a", 0, 2, 0),
+ };
+ put_rows(&engine, region_id, rows).await;
+ flush_region(&engine, region_id).await;
// drop the created region.
engine
@@ -62,11 +75,10 @@ async fn test_engine_drop_region() {
.await
.unwrap();
assert!(!engine.is_region_exists(region_id));
- // the drop marker is not removed yet
- assert!(env
- .get_object_store()
- .unwrap()
- .is_exist(&join_path(®ion_dir, DROPPING_MARKER_FILE))
- .await
- .unwrap());
+
+ // Wait for drop task.
+ listener.wait().await;
+
+ let object_store = env.get_object_store().unwrap();
+ assert!(!object_store.is_exist(®ion_dir).await.unwrap());
}
diff --git a/src/mito2/src/engine/flush_test.rs b/src/mito2/src/engine/flush_test.rs
index 636ce7df2ddd..84eb8b0eec44 100644
--- a/src/mito2/src/engine/flush_test.rs
+++ b/src/mito2/src/engine/flush_test.rs
@@ -76,7 +76,7 @@ async fn test_flush_engine() {
let engine = env
.create_engine_with(
MitoConfig::default(),
- write_buffer_manager.clone(),
+ Some(write_buffer_manager.clone()),
Some(listener.clone()),
)
.await;
@@ -135,7 +135,7 @@ async fn test_write_stall() {
let engine = env
.create_engine_with(
MitoConfig::default(),
- write_buffer_manager.clone(),
+ Some(write_buffer_manager.clone()),
Some(listener.clone()),
)
.await;
@@ -197,7 +197,11 @@ async fn test_flush_empty() {
let mut env = TestEnv::new();
let write_buffer_manager = Arc::new(MockWriteBufferManager::default());
let engine = env
- .create_engine_with(MitoConfig::default(), write_buffer_manager.clone(), None)
+ .create_engine_with(
+ MitoConfig::default(),
+ Some(write_buffer_manager.clone()),
+ None,
+ )
.await;
let region_id = RegionId::new(1, 1);
diff --git a/src/mito2/src/engine/listener.rs b/src/mito2/src/engine/listener.rs
index e591d2b5a576..f0b5def366e0 100644
--- a/src/mito2/src/engine/listener.rs
+++ b/src/mito2/src/engine/listener.rs
@@ -15,6 +15,7 @@
//! Engine event listener for tests.
use std::sync::Arc;
+use std::time::Duration;
use async_trait::async_trait;
use common_telemetry::info;
@@ -32,6 +33,19 @@ pub trait EventListener: Send + Sync {
/// Notifies the listener that the region starts to do flush.
async fn on_flush_begin(&self, region_id: RegionId);
+
+ /// Notifies the listener that the later drop task starts running.
+ /// Returns the gc interval if we want to override the default one.
+ fn on_later_drop_begin(&self, region_id: RegionId) -> Option<Duration> {
+ let _ = region_id;
+ None
+ }
+
+ /// Notifies the listener that the later drop task of the region is finished.
+ fn on_later_drop_end(&self, region_id: RegionId, removed: bool) {
+ let _ = region_id;
+ let _ = removed;
+ }
}
pub type EventListenerRef = Arc<dyn EventListener>;
@@ -102,7 +116,7 @@ impl EventListener for StallListener {
/// Listener to watch begin flush events.
///
-/// Crate a background thread to execute flush region, and the main thread calls `wait_truncate()`
+/// Creates a background thread to execute flush region, and the main thread calls `wait_truncate()`
/// to block and wait for `on_flush_region()`.
/// When the background thread calls `on_flush_begin()`, the main thread is notified to truncate
/// region, and background thread thread blocks and waits for `notify_flush()` to continue flushing.
@@ -150,3 +164,43 @@ impl EventListener for FlushTruncateListener {
self.notify_flush.notified().await;
}
}
+
+/// Listener on dropping.
+pub struct DropListener {
+ gc_duration: Duration,
+ notify: Notify,
+}
+
+impl DropListener {
+ /// Creates a new listener with specific `gc_duration`.
+ pub fn new(gc_duration: Duration) -> Self {
+ DropListener {
+ gc_duration,
+ notify: Notify::new(),
+ }
+ }
+
+ /// Waits until later drop task is done.
+ pub async fn wait(&self) {
+ self.notify.notified().await;
+ }
+}
+
+#[async_trait]
+impl EventListener for DropListener {
+ fn on_flush_success(&self, _region_id: RegionId) {}
+
+ fn on_write_stall(&self) {}
+
+ async fn on_flush_begin(&self, _region_id: RegionId) {}
+
+ fn on_later_drop_begin(&self, _region_id: RegionId) -> Option<Duration> {
+ Some(self.gc_duration)
+ }
+
+ fn on_later_drop_end(&self, _region_id: RegionId, removed: bool) {
+ // Asserts result.
+ assert!(removed);
+ self.notify.notify_one();
+ }
+}
diff --git a/src/mito2/src/engine/truncate_test.rs b/src/mito2/src/engine/truncate_test.rs
index c39f3a0e374b..03386369bd2d 100644
--- a/src/mito2/src/engine/truncate_test.rs
+++ b/src/mito2/src/engine/truncate_test.rs
@@ -270,7 +270,7 @@ async fn test_engine_truncate_during_flush() {
let engine = env
.create_engine_with(
MitoConfig::default(),
- write_buffer_manager.clone(),
+ Some(write_buffer_manager),
Some(listener.clone()),
)
.await;
diff --git a/src/mito2/src/region/version.rs b/src/mito2/src/region/version.rs
index c7d84fd913df..471a85c1e38f 100644
--- a/src/mito2/src/region/version.rs
+++ b/src/mito2/src/region/version.rs
@@ -115,10 +115,17 @@ impl VersionControl {
}
/// Mark all opened files as deleted and set the delete marker in [VersionControlData]
- pub(crate) fn mark_dropped(&self) {
+ pub(crate) fn mark_dropped(&self, memtable_builder: &MemtableBuilderRef) {
+ let version = self.current().version;
+ let new_mutable = memtable_builder.build(&version.metadata);
+
let mut data = self.data.write().unwrap();
data.is_dropped = true;
data.version.ssts.mark_all_deleted();
+ // Reset version so we can release the reference to memtables and SSTs.
+ let new_version =
+ Arc::new(VersionBuilder::new(version.metadata.clone(), new_mutable).build());
+ data.version = new_version;
}
/// Alter schema of the region.
diff --git a/src/mito2/src/sst/version.rs b/src/mito2/src/sst/version.rs
index b066af7ba3b6..1f56c8b940d8 100644
--- a/src/mito2/src/sst/version.rs
+++ b/src/mito2/src/sst/version.rs
@@ -75,9 +75,9 @@ impl SstVersion {
}
}
- /// Mark all SSTs in this version as deleted.
+ /// Marks all SSTs in this version as deleted.
pub(crate) fn mark_all_deleted(&self) {
- for level_meta in self.levels.iter() {
+ for level_meta in &self.levels {
for file_handle in level_meta.files.values() {
file_handle.mark_deleted();
}
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index 2ad545825de2..25994fa0e601 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -120,7 +120,7 @@ impl TestEnv {
pub async fn create_engine_with(
&mut self,
config: MitoConfig,
- manager: WriteBufferManagerRef,
+ manager: Option<WriteBufferManagerRef>,
listener: Option<EventListenerRef>,
) -> MitoEngine {
let (log_store, object_store) = self.create_log_and_object_store().await;
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index 4b119c69959f..6b3ea02c1896 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -28,6 +28,7 @@ use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
+use std::time::Duration;
use common_runtime::JoinHandle;
use common_telemetry::{error, info, warn};
@@ -203,11 +204,16 @@ impl WorkerGroup {
config: MitoConfig,
log_store: Arc<S>,
object_store: ObjectStore,
- write_buffer_manager: WriteBufferManagerRef,
+ write_buffer_manager: Option<WriteBufferManagerRef>,
listener: Option<crate::engine::listener::EventListenerRef>,
) -> WorkerGroup {
assert!(config.num_workers.is_power_of_two());
let config = Arc::new(config);
+ let write_buffer_manager = write_buffer_manager.unwrap_or_else(|| {
+ Arc::new(WriteBufferManagerImpl::new(
+ config.global_write_buffer_size.as_bytes() as usize,
+ ))
+ });
let scheduler = Arc::new(LocalScheduler::new(config.max_background_jobs));
let cache_manager = Arc::new(CacheManager::new(config.sst_meta_cache_size.as_bytes()));
@@ -608,6 +614,27 @@ impl WorkerListener {
// Avoid compiler warning.
let _ = region_id;
}
+
+ pub(crate) fn on_later_drop_begin(&self, region_id: RegionId) -> Option<Duration> {
+ #[cfg(test)]
+ if let Some(listener) = &self.listener {
+ return listener.on_later_drop_begin(region_id);
+ }
+ // Avoid compiler warning.
+ let _ = region_id;
+ None
+ }
+
+ /// On later drop task is finished.
+ pub(crate) fn on_later_drop_end(&self, region_id: RegionId, removed: bool) {
+ #[cfg(test)]
+ if let Some(listener) = &self.listener {
+ listener.on_later_drop_end(region_id, removed);
+ }
+ // Avoid compiler warning.
+ let _ = region_id;
+ let _ = removed;
+ }
}
#[cfg(test)]
diff --git a/src/mito2/src/worker/handle_drop.rs b/src/mito2/src/worker/handle_drop.rs
index f7aa5d15dc6a..0b0431180fd0 100644
--- a/src/mito2/src/worker/handle_drop.rs
+++ b/src/mito2/src/worker/handle_drop.rs
@@ -56,7 +56,7 @@ impl<S> RegionWorkerLoop<S> {
self.compaction_scheduler.on_region_dropped(region_id);
// mark region version as dropped
- region.version_control.mark_dropped();
+ region.version_control.mark_dropped(&self.memtable_builder);
info!(
"Region {} is dropped logically, but some files are not deleted yet",
region_id
@@ -66,8 +66,20 @@ impl<S> RegionWorkerLoop<S> {
let region_dir = region.access_layer.region_dir().to_owned();
let object_store = self.object_store.clone();
let dropping_regions = self.dropping_regions.clone();
+ let listener = self.listener.clone();
common_runtime::spawn_bg(async move {
- later_drop_task(region_id, region_dir, object_store, dropping_regions).await;
+ let gc_duration = listener
+ .on_later_drop_begin(region_id)
+ .unwrap_or(Duration::from_secs(GC_TASK_INTERVAL_SEC));
+ let removed = later_drop_task(
+ region_id,
+ region_dir,
+ object_store,
+ dropping_regions,
+ gc_duration,
+ )
+ .await;
+ listener.on_later_drop_end(region_id, removed);
});
Ok(Output::AffectedRows(0))
@@ -75,7 +87,7 @@ impl<S> RegionWorkerLoop<S> {
}
/// Background GC task to remove the entire region path once it find there is no
-/// parquet file left.
+/// parquet file left. Returns whether the path is removed.
///
/// This task will keep running until finished. Any resource captured by it will
/// not be released before then. Be sure to only pass weak reference if something
@@ -85,18 +97,24 @@ async fn later_drop_task(
region_path: String,
object_store: ObjectStore,
dropping_regions: RegionMapRef,
-) {
+ gc_duration: Duration,
+) -> bool {
for _ in 0..MAX_RETRY_TIMES {
- sleep(Duration::from_secs(GC_TASK_INTERVAL_SEC)).await;
+ sleep(gc_duration).await;
let result = remove_region_dir_once(®ion_path, &object_store).await;
- if let Err(err) = result {
- warn!(
- "Error occurs during trying to GC region dir {}: {}",
- region_path, err
- );
- } else {
- dropping_regions.remove_region(region_id);
- info!("Region {} is dropped", region_path);
+ match result {
+ Err(err) => {
+ warn!(
+ "Error occurs during trying to GC region dir {}: {}",
+ region_path, err
+ );
+ }
+ Ok(true) => {
+ dropping_regions.remove_region(region_id);
+ info!("Region {} is dropped", region_path);
+ return true;
+ }
+ Ok(false) => (),
}
}
@@ -104,13 +122,16 @@ async fn later_drop_task(
"Failed to GC region dir {} after {} retries, giving up",
region_path, MAX_RETRY_TIMES
);
+
+ false
}
// TODO(ruihang): place the marker in a separate dir
+/// Removes region dir if there is no parquet files, returns whether the directory is removed.
pub(crate) async fn remove_region_dir_once(
region_path: &str,
object_store: &ObjectStore,
-) -> Result<()> {
+) -> Result<bool> {
// list all files under the given region path to check if there are un-deleted parquet files
let mut has_parquet_file = false;
// record all paths that neither ends with .parquet nor the marker file
@@ -143,6 +164,8 @@ pub(crate) async fn remove_region_dir_once(
.remove_all(region_path)
.await
.context(OpenDalSnafu)?;
+ Ok(true)
+ } else {
+ Ok(false)
}
- Ok(())
}
|
fix
|
fix region drop task runs multiple times but never clean the dir (#2504)
|
196c06db145f36c4a3d750768aba4883e1499db6
|
2023-12-26 13:07:50
|
LFC
|
feat: make logging to stdout configurable (#3003)
| false
|
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index a7a7ea0aedf6..77f4b4c3cdb5 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -218,6 +218,8 @@ parallel_scan_channel_size = 32
# otlp_endpoint = "localhost:4317"
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
# tracing_sample_ratio = 1.0
+# Whether to append logs to stdout. Defaults to true.
+# append_stdout = true
# Standalone export the metrics generated by itself
# encoded to Prometheus remote-write format
diff --git a/src/common/telemetry/src/logging.rs b/src/common/telemetry/src/logging.rs
index f825d8f3835b..0b7c3ba1b818 100644
--- a/src/common/telemetry/src/logging.rs
+++ b/src/common/telemetry/src/logging.rs
@@ -43,6 +43,7 @@ pub struct LoggingOptions {
pub enable_otlp_tracing: bool,
pub otlp_endpoint: Option<String>,
pub tracing_sample_ratio: Option<f64>,
+ pub append_stdout: bool,
}
impl PartialEq for LoggingOptions {
@@ -52,6 +53,7 @@ impl PartialEq for LoggingOptions {
&& self.enable_otlp_tracing == other.enable_otlp_tracing
&& self.otlp_endpoint == other.otlp_endpoint
&& self.tracing_sample_ratio == other.tracing_sample_ratio
+ && self.append_stdout == other.append_stdout
}
}
@@ -65,6 +67,7 @@ impl Default for LoggingOptions {
enable_otlp_tracing: false,
otlp_endpoint: None,
tracing_sample_ratio: None,
+ append_stdout: true,
}
}
}
@@ -129,10 +132,14 @@ pub fn init_global_logging(
// Enable log compatible layer to convert log record to tracing span.
LogTracer::init().expect("log tracer must be valid");
- // Stdout layer.
- let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
- let stdout_logging_layer = Layer::new().with_writer(stdout_writer);
- guards.push(stdout_guard);
+ let stdout_logging_layer = if opts.append_stdout {
+ let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout());
+ guards.push(stdout_guard);
+
+ Some(Layer::new().with_writer(stdout_writer))
+ } else {
+ None
+ };
// JSON log layer.
let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name);
@@ -184,7 +191,7 @@ pub fn init_global_logging(
None
};
- let stdout_logging_layer = stdout_logging_layer.with_filter(filter.clone());
+ let stdout_logging_layer = stdout_logging_layer.map(|x| x.with_filter(filter.clone()));
let file_logging_layer = file_logging_layer.with_filter(filter);
diff --git a/src/operator/src/error.rs b/src/operator/src/error.rs
index e96f1aaa21fe..52956e8055f9 100644
--- a/src/operator/src/error.rs
+++ b/src/operator/src/error.rs
@@ -483,6 +483,12 @@ pub enum Error {
location: Location,
source: query::error::Error,
},
+
+ #[snafu(display("Invalid table name: {}", table_name))]
+ InvalidTableName {
+ table_name: String,
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -507,7 +513,8 @@ impl ErrorExt for Error {
| Error::InvalidPartitionColumns { .. }
| Error::PrepareFileTable { .. }
| Error::InferFileTableSchema { .. }
- | Error::SchemaIncompatible { .. } => StatusCode::InvalidArguments,
+ | Error::SchemaIncompatible { .. }
+ | Error::InvalidTableName { .. } => StatusCode::InvalidArguments,
Error::TableAlreadyExists { .. } => StatusCode::TableAlreadyExists,
diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs
index 620e3de6445d..43fdf23a4f5b 100644
--- a/src/operator/src/statement/ddl.rs
+++ b/src/operator/src/statement/ddl.rs
@@ -50,8 +50,8 @@ use table::TableRef;
use super::StatementExecutor;
use crate::error::{
self, AlterExprToRequestSnafu, CatalogSnafu, ColumnDataTypeSnafu, ColumnNotFoundSnafu,
- DeserializePartitionSnafu, InvalidPartitionColumnsSnafu, ParseSqlSnafu, Result,
- SchemaNotFoundSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu,
+ DeserializePartitionSnafu, InvalidPartitionColumnsSnafu, InvalidTableNameSnafu, ParseSqlSnafu,
+ Result, SchemaNotFoundSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu,
UnrecognizedTableOptionSnafu,
};
use crate::expr_factory;
@@ -131,8 +131,8 @@ impl StatementExecutor {
ensure!(
NAME_PATTERN_REG.is_match(&create_table.table_name),
- error::UnexpectedSnafu {
- violated: format!("Invalid table name: {}", create_table.table_name)
+ InvalidTableNameSnafu {
+ table_name: create_table.table_name.clone(),
}
);
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index d73fcbe91397..70b4401c9a73 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -24,7 +24,7 @@ use catalog;
use common_error::ext::{BoxedError, ErrorExt};
use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
-use common_telemetry::logging;
+use common_telemetry::{debug, error};
use datatypes::prelude::ConcreteDataType;
use query::parser::PromQuery;
use serde_json::json;
@@ -620,7 +620,11 @@ impl IntoResponse for Error {
| Error::InvalidQuery { .. }
| Error::TimePrecision { .. } => HttpStatusCode::BAD_REQUEST,
_ => {
- logging::error!(self; "Failed to handle HTTP request");
+ if self.status_code().should_log_error() {
+ error!(self; "Failed to handle HTTP request: ");
+ } else {
+ debug!("Failed to handle HTTP request: {self}");
+ }
HttpStatusCode::INTERNAL_SERVER_ERROR
}
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 0decb3821951..9341ba5f09ce 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -747,6 +747,7 @@ enable = true
[frontend.logging]
enable_otlp_tracing = false
+append_stdout = true
[frontend.datanode.client]
timeout = "10s"
@@ -815,6 +816,7 @@ parallel_scan_channel_size = 32
[datanode.logging]
enable_otlp_tracing = false
+append_stdout = true
[datanode.export_metrics]
enable = false
@@ -825,6 +827,7 @@ write_interval = "30s"
[logging]
enable_otlp_tracing = false
+append_stdout = true
[wal_meta]
provider = "raft_engine""#,
diff --git a/tests/cases/standalone/common/create/create.result b/tests/cases/standalone/common/create/create.result
index 08e4b658de2b..436cbfb393db 100644
--- a/tests/cases/standalone/common/create/create.result
+++ b/tests/cases/standalone/common/create/create.result
@@ -52,7 +52,7 @@ Error: 4000(TableAlreadyExists), Table already exists: `greptime.public.test2`
CREATE TABLE 'N.~' (i TIMESTAMP TIME INDEX);
-Error: 1002(Unexpected), Unexpected, violated: Invalid table name: N.~
+Error: 1004(InvalidArguments), Invalid table name: N.~
DESC TABLE integers;
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index b5218979821a..1bd7ad36496a 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -190,7 +190,7 @@ impl Env {
"start".to_string(),
"-c".to_string(),
self.generate_config_file(subcommand, db_ctx),
- "--http-addr=127.0.0.1:5001".to_string(),
+ "--http-addr=127.0.0.1:5002".to_string(),
];
(args, SERVER_ADDR.to_string())
}
@@ -213,7 +213,7 @@ impl Env {
"true".to_string(),
"--enable-region-failover".to_string(),
"false".to_string(),
- "--http-addr=127.0.0.1:5001".to_string(),
+ "--http-addr=127.0.0.1:5002".to_string(),
];
(args, METASRV_ADDR.to_string())
}
|
feat
|
make logging to stdout configurable (#3003)
|
58f19b2a33ca16e537af9688c5601c210e4f142f
|
2022-04-25 14:33:05
|
evenyag
|
refactor(datatypes): pub use some types
| false
|
diff --git a/src/datatypes/src/prelude.rs b/src/datatypes/src/prelude.rs
index 1aa4775354be..de9b056a3915 100644
--- a/src/datatypes/src/prelude.rs
+++ b/src/datatypes/src/prelude.rs
@@ -1,3 +1,5 @@
pub use crate::data_type::{DataType, DataTypeRef};
+pub use crate::scalars::{ScalarVector, ScalarVectorBuilder};
pub use crate::type_id::LogicalTypeId;
pub use crate::value::Value;
+pub use crate::vectors::{Vector, VectorRef};
diff --git a/src/datatypes/src/types.rs b/src/datatypes/src/types.rs
index 02fa6bc718f6..61982e99f970 100644
--- a/src/datatypes/src/types.rs
+++ b/src/datatypes/src/types.rs
@@ -1,3 +1,7 @@
-pub mod binary_type;
-pub mod primitive_traits;
-pub mod primitive_type;
+mod binary_type;
+mod primitive_traits;
+mod primitive_type;
+
+pub use binary_type::BinaryType;
+pub use primitive_traits::Primitive;
+pub use primitive_type::{DataTypeBuilder, PrimitiveType};
diff --git a/src/datatypes/src/vectors.rs b/src/datatypes/src/vectors.rs
index c90106c2ea4d..1ea36c32a222 100644
--- a/src/datatypes/src/vectors.rs
+++ b/src/datatypes/src/vectors.rs
@@ -5,6 +5,8 @@ use std::any::Any;
use std::sync::Arc;
use arrow2::array::ArrayRef;
+pub use binary::*;
+pub use primitive::*;
use crate::data_type::DataTypeRef;
diff --git a/src/datatypes/src/vectors/binary.rs b/src/datatypes/src/vectors/binary.rs
index bb6d5dc77550..60b49dd827b1 100644
--- a/src/datatypes/src/vectors/binary.rs
+++ b/src/datatypes/src/vectors/binary.rs
@@ -7,7 +7,7 @@ use arrow2::bitmap::utils::ZipValidity;
use crate::data_type::DataTypeRef;
use crate::scalars::{ScalarVector, ScalarVectorBuilder};
-use crate::types::binary_type::BinaryType;
+use crate::types::BinaryType;
use crate::vectors::Vector;
use crate::{LargeBinaryArray, MutableLargeBinaryArray};
diff --git a/src/datatypes/src/vectors/primitive.rs b/src/datatypes/src/vectors/primitive.rs
index ac1e99447ddf..95d126684353 100644
--- a/src/datatypes/src/vectors/primitive.rs
+++ b/src/datatypes/src/vectors/primitive.rs
@@ -2,14 +2,12 @@ use std::any::Any;
use std::slice::Iter;
use std::sync::Arc;
-use arrow2::array::ArrayRef;
-use arrow2::array::{MutablePrimitiveArray, PrimitiveArray};
+use arrow2::array::{ArrayRef, MutablePrimitiveArray, PrimitiveArray};
use arrow2::bitmap::utils::ZipValidity;
use crate::data_type::DataTypeRef;
use crate::scalars::{ScalarVector, ScalarVectorBuilder};
-use crate::types::primitive_traits::Primitive;
-use crate::types::primitive_type::DataTypeBuilder;
+use crate::types::{DataTypeBuilder, Primitive};
use crate::vectors::Vector;
/// Vector for primitive data types.
|
refactor
|
pub use some types
|
40cf63d3c407cf4daae9c6ac0599594484d09972
|
2025-03-04 09:24:07
|
Ruihang Xia
|
refactor: rename table function to admin function (#5636)
| false
|
diff --git a/src/common/function/src/table.rs b/src/common/function/src/admin.rs
similarity index 96%
rename from src/common/function/src/table.rs
rename to src/common/function/src/admin.rs
index 91ee6dd178fd..b2f916d87676 100644
--- a/src/common/function/src/table.rs
+++ b/src/common/function/src/admin.rs
@@ -26,9 +26,9 @@ use crate::flush_flow::FlushFlowFunction;
use crate::function_registry::FunctionRegistry;
/// Table functions
-pub(crate) struct TableFunction;
+pub(crate) struct AdminFunction;
-impl TableFunction {
+impl AdminFunction {
/// Register all table functions to [`FunctionRegistry`].
pub fn register(registry: &FunctionRegistry) {
registry.register_async(Arc::new(MigrateRegionFunction));
diff --git a/src/common/function/src/table/flush_compact_region.rs b/src/common/function/src/admin/flush_compact_region.rs
similarity index 100%
rename from src/common/function/src/table/flush_compact_region.rs
rename to src/common/function/src/admin/flush_compact_region.rs
diff --git a/src/common/function/src/table/flush_compact_table.rs b/src/common/function/src/admin/flush_compact_table.rs
similarity index 100%
rename from src/common/function/src/table/flush_compact_table.rs
rename to src/common/function/src/admin/flush_compact_table.rs
diff --git a/src/common/function/src/table/migrate_region.rs b/src/common/function/src/admin/migrate_region.rs
similarity index 100%
rename from src/common/function/src/table/migrate_region.rs
rename to src/common/function/src/admin/migrate_region.rs
diff --git a/src/common/function/src/function_registry.rs b/src/common/function/src/function_registry.rs
index 1761f6ef5045..f786623ac05b 100644
--- a/src/common/function/src/function_registry.rs
+++ b/src/common/function/src/function_registry.rs
@@ -18,6 +18,7 @@ use std::sync::{Arc, RwLock};
use once_cell::sync::Lazy;
+use crate::admin::AdminFunction;
use crate::function::{AsyncFunctionRef, FunctionRef};
use crate::scalars::aggregate::{AggregateFunctionMetaRef, AggregateFunctions};
use crate::scalars::date::DateFunction;
@@ -30,7 +31,6 @@ use crate::scalars::timestamp::TimestampFunction;
use crate::scalars::uddsketch_calc::UddSketchCalcFunction;
use crate::scalars::vector::VectorFunction;
use crate::system::SystemFunction;
-use crate::table::TableFunction;
#[derive(Default)]
pub struct FunctionRegistry {
@@ -118,7 +118,7 @@ pub static FUNCTION_REGISTRY: Lazy<Arc<FunctionRegistry>> = Lazy::new(|| {
// System and administration functions
SystemFunction::register(&function_registry);
- TableFunction::register(&function_registry);
+ AdminFunction::register(&function_registry);
// Json related functions
JsonFunction::register(&function_registry);
diff --git a/src/common/function/src/lib.rs b/src/common/function/src/lib.rs
index a553e8924b34..ea5e20ee3c8e 100644
--- a/src/common/function/src/lib.rs
+++ b/src/common/function/src/lib.rs
@@ -15,11 +15,11 @@
#![feature(let_chains)]
#![feature(try_blocks)]
+mod admin;
mod flush_flow;
mod macros;
pub mod scalars;
mod system;
-mod table;
pub mod aggr;
pub mod function;
|
refactor
|
rename table function to admin function (#5636)
|
64941d848e53cbd9b931a26458b029b120d48614
|
2024-04-16 13:48:38
|
Weny Xu
|
fix(alter_table): ignore request outdated error (#3715)
| false
|
diff --git a/src/common/meta/src/ddl/alter_table.rs b/src/common/meta/src/ddl/alter_table.rs
index 5ec4fb17a558..f286db47cd7a 100644
--- a/src/common/meta/src/ddl/alter_table.rs
+++ b/src/common/meta/src/ddl/alter_table.rs
@@ -22,6 +22,8 @@ use std::vec;
use api::v1::alter_expr::Kind;
use api::v1::RenameTable;
use async_trait::async_trait;
+use common_error::ext::ErrorExt;
+use common_error::status_code::StatusCode;
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
use common_procedure::{
Context as ProcedureContext, Error as ProcedureError, LockKey, Procedure, Status, StringKey,
@@ -115,10 +117,16 @@ impl AlterTableProcedure {
let requester = requester.clone();
alter_region_tasks.push(async move {
- requester
- .handle(request)
- .await
- .map_err(add_peer_context_if_needed(datanode))
+ if let Err(err) = requester.handle(request).await {
+ if err.status_code() != StatusCode::RequestOutdated {
+ // Treat request outdated as success.
+ // The engine will throw this code when the schema version not match.
+ // As this procedure has locked the table, the only reason for this error
+ // is procedure is succeeded before and is retrying.
+ return Err(add_peer_context_if_needed(datanode)(err));
+ }
+ }
+ Ok(())
});
}
}
diff --git a/src/common/meta/src/ddl/test_util/datanode_handler.rs b/src/common/meta/src/ddl/test_util/datanode_handler.rs
index 530705f7d31b..69d233dd9844 100644
--- a/src/common/meta/src/ddl/test_util/datanode_handler.rs
+++ b/src/common/meta/src/ddl/test_util/datanode_handler.rs
@@ -13,9 +13,11 @@
// limitations under the License.
use api::v1::region::{QueryRequest, RegionRequest};
-use common_error::ext::BoxedError;
+use common_error::ext::{BoxedError, ErrorExt, StackError};
+use common_error::status_code::StatusCode;
use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
+use snafu::{ResultExt, Snafu};
use tokio::sync::mpsc;
use crate::datanode_manager::HandleResponse;
@@ -106,6 +108,47 @@ impl MockDatanodeHandler for UnexpectedErrorDatanodeHandler {
}
}
+#[derive(Clone)]
+pub struct RequestOutdatedErrorDatanodeHandler;
+
+#[derive(Debug, Snafu)]
+#[snafu(display("A mock RequestOutdated error"))]
+struct MockRequestOutdatedError;
+
+impl StackError for MockRequestOutdatedError {
+ fn debug_fmt(&self, _: usize, _: &mut Vec<String>) {}
+
+ fn next(&self) -> Option<&dyn StackError> {
+ None
+ }
+}
+
+impl ErrorExt for MockRequestOutdatedError {
+ fn as_any(&self) -> &dyn std::any::Any {
+ self
+ }
+
+ fn status_code(&self) -> StatusCode {
+ StatusCode::RequestOutdated
+ }
+}
+
+#[async_trait::async_trait]
+impl MockDatanodeHandler for RequestOutdatedErrorDatanodeHandler {
+ async fn handle(&self, peer: &Peer, request: RegionRequest) -> Result<HandleResponse> {
+ debug!("Returning mock error for request: {request:?}, peer: {peer:?}");
+ Err(BoxedError::new(MockRequestOutdatedError)).context(error::ExternalSnafu)
+ }
+
+ async fn handle_query(
+ &self,
+ _peer: &Peer,
+ _request: QueryRequest,
+ ) -> Result<SendableRecordBatchStream> {
+ unreachable!()
+ }
+}
+
#[derive(Clone)]
pub struct NaiveDatanodeHandler;
diff --git a/src/common/meta/src/ddl/tests/alter_table.rs b/src/common/meta/src/ddl/tests/alter_table.rs
index 1a85ec576640..2342aaf44e0d 100644
--- a/src/common/meta/src/ddl/tests/alter_table.rs
+++ b/src/common/meta/src/ddl/tests/alter_table.rs
@@ -31,7 +31,9 @@ use tokio::sync::mpsc::{self};
use crate::ddl::alter_table::AlterTableProcedure;
use crate::ddl::test_util::alter_table::TestAlterTableExprBuilder;
use crate::ddl::test_util::create_table::test_create_table_task;
-use crate::ddl::test_util::datanode_handler::DatanodeWatcher;
+use crate::ddl::test_util::datanode_handler::{
+ DatanodeWatcher, RequestOutdatedErrorDatanodeHandler,
+};
use crate::key::table_name::TableNameKey;
use crate::key::table_route::TableRouteValue;
use crate::peer::Peer;
@@ -171,6 +173,67 @@ async fn test_on_submit_alter_request() {
check(peer, request, 3, RegionId::new(table_id, 3));
}
+#[tokio::test]
+async fn test_on_submit_alter_request_with_outdated_request() {
+ let datanode_manager = Arc::new(MockDatanodeManager::new(
+ RequestOutdatedErrorDatanodeHandler,
+ ));
+ let ddl_context = new_ddl_context(datanode_manager);
+ let cluster_id = 1;
+ let table_id = 1024;
+ let table_name = "foo";
+ let task = test_create_table_task(table_name, table_id);
+ // Puts a value to table name key.
+ ddl_context
+ .table_metadata_manager
+ .create_table_metadata(
+ task.table_info.clone(),
+ TableRouteValue::physical(vec![
+ RegionRoute {
+ region: Region::new_test(RegionId::new(table_id, 1)),
+ leader_peer: Some(Peer::empty(1)),
+ follower_peers: vec![Peer::empty(5)],
+ leader_status: None,
+ leader_down_since: None,
+ },
+ RegionRoute {
+ region: Region::new_test(RegionId::new(table_id, 2)),
+ leader_peer: Some(Peer::empty(2)),
+ follower_peers: vec![Peer::empty(4)],
+ leader_status: None,
+ leader_down_since: None,
+ },
+ RegionRoute {
+ region: Region::new_test(RegionId::new(table_id, 3)),
+ leader_peer: Some(Peer::empty(3)),
+ follower_peers: vec![],
+ leader_status: None,
+ leader_down_since: None,
+ },
+ ]),
+ HashMap::new(),
+ )
+ .await
+ .unwrap();
+
+ let alter_table_task = AlterTableTask {
+ alter_table: AlterExpr {
+ catalog_name: DEFAULT_CATALOG_NAME.to_string(),
+ schema_name: DEFAULT_SCHEMA_NAME.to_string(),
+ table_name: table_name.to_string(),
+ kind: Some(Kind::DropColumns(DropColumns {
+ drop_columns: vec![DropColumn {
+ name: "my_field_column".to_string(),
+ }],
+ })),
+ },
+ };
+ let mut procedure =
+ AlterTableProcedure::new(cluster_id, table_id, alter_table_task, ddl_context).unwrap();
+ procedure.on_prepare().await.unwrap();
+ procedure.submit_alter_region_requests().await.unwrap();
+}
+
#[tokio::test]
async fn test_on_update_metadata_rename() {
let datanode_manager = Arc::new(MockDatanodeManager::new(()));
|
fix
|
ignore request outdated error (#3715)
|
0acc6b0354cb05f574f48ca20f9d16422fd7ac2b
|
2025-03-10 09:55:38
|
Yingwen
|
fix: correct stalled count (#5678)
| false
|
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index 208c4c647998..c03fa5231f21 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -583,6 +583,8 @@ type RequestBuffer = Vec<WorkerRequest>;
#[derive(Default)]
pub(crate) struct StalledRequests {
/// Stalled requests.
+ /// Remember to use `StalledRequests::stalled_count()` to get the total number of stalled requests
+ /// instead of `StalledRequests::requests.len()`.
///
/// Key: RegionId
/// Value: (estimated size, stalled requests)
@@ -617,6 +619,11 @@ impl StalledRequests {
vec![]
}
}
+
+ /// Returns the total number of all stalled requests.
+ pub(crate) fn stalled_count(&self) -> usize {
+ self.requests.values().map(|reqs| reqs.1.len()).sum()
+ }
}
/// Background worker loop to handle requests.
diff --git a/src/mito2/src/worker/handle_write.rs b/src/mito2/src/worker/handle_write.rs
index efc81df57bf7..b3e1aa949ac5 100644
--- a/src/mito2/src/worker/handle_write.rs
+++ b/src/mito2/src/worker/handle_write.rs
@@ -147,7 +147,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
pub(crate) async fn handle_stalled_requests(&mut self) {
// Handle stalled requests.
let stalled = std::mem::take(&mut self.stalled_requests);
- self.stalled_count.sub(stalled.requests.len() as i64);
+ self.stalled_count.sub(stalled.stalled_count() as i64);
// We already stalled these requests, don't stall them again.
for (_, (_, mut requests)) in stalled.requests {
self.handle_write_requests(&mut requests, false).await;
@@ -157,7 +157,7 @@ impl<S: LogStore> RegionWorkerLoop<S> {
/// Rejects all stalled requests.
pub(crate) fn reject_stalled_requests(&mut self) {
let stalled = std::mem::take(&mut self.stalled_requests);
- self.stalled_count.sub(stalled.requests.len() as i64);
+ self.stalled_count.sub(stalled.stalled_count() as i64);
for (_, (_, mut requests)) in stalled.requests {
reject_write_requests(&mut requests);
}
|
fix
|
correct stalled count (#5678)
|
922b1a9b66a8f09ed25111e42e876917acc4cffb
|
2024-03-27 08:51:22
|
Yingwen
|
feat: Implement append mode for a region (#3558)
| false
|
diff --git a/src/mito2/src/compaction/twcs.rs b/src/mito2/src/compaction/twcs.rs
index fbb07b71e1c7..82bceabd56a8 100644
--- a/src/mito2/src/compaction/twcs.rs
+++ b/src/mito2/src/compaction/twcs.rs
@@ -188,6 +188,7 @@ impl Picker for TwcsPicker {
cache_manager,
storage: current_version.options.storage.clone(),
index_options: current_version.options.index_options.clone(),
+ append_mode: current_version.options.append_mode,
};
Some(Box::new(task))
}
@@ -255,6 +256,8 @@ pub(crate) struct TwcsCompactionTask {
pub(crate) storage: Option<String>,
/// Index options of the region.
pub(crate) index_options: IndexOptions,
+ /// The region is using append mode.
+ pub(crate) append_mode: bool,
}
impl Debug for TwcsCompactionTask {
@@ -264,6 +267,7 @@ impl Debug for TwcsCompactionTask {
.field("outputs", &self.outputs)
.field("expired_ssts", &self.expired_ssts)
.field("compaction_time_window", &self.compaction_time_window)
+ .field("append_mode", &self.append_mode)
.finish()
}
}
@@ -332,9 +336,15 @@ impl TwcsCompactionTask {
let cache_manager = self.cache_manager.clone();
let storage = self.storage.clone();
let index_options = self.index_options.clone();
+ let append_mode = self.append_mode;
futs.push(async move {
- let reader =
- build_sst_reader(metadata.clone(), sst_layer.clone(), &output.inputs).await?;
+ let reader = build_sst_reader(
+ metadata.clone(),
+ sst_layer.clone(),
+ &output.inputs,
+ append_mode,
+ )
+ .await?;
let file_meta_opt = sst_layer
.write_sst(
SstWriteRequest {
@@ -565,9 +575,11 @@ async fn build_sst_reader(
metadata: RegionMetadataRef,
sst_layer: AccessLayerRef,
inputs: &[FileHandle],
+ append_mode: bool,
) -> error::Result<BoxedBatchReader> {
SeqScan::new(sst_layer, ProjectionMapper::all(&metadata)?)
.with_files(inputs.to_vec())
+ .with_append_mode(append_mode)
// We ignore file not found error during compaction.
.with_ignore_file_not_found(true)
.build_reader()
diff --git a/src/mito2/src/engine.rs b/src/mito2/src/engine.rs
index 449f87dcf44a..f244a1edc9e6 100644
--- a/src/mito2/src/engine.rs
+++ b/src/mito2/src/engine.rs
@@ -17,6 +17,8 @@
#[cfg(test)]
mod alter_test;
#[cfg(test)]
+mod append_mode_test;
+#[cfg(test)]
mod basic_test;
#[cfg(test)]
mod catchup_test;
diff --git a/src/mito2/src/engine/append_mode_test.rs b/src/mito2/src/engine/append_mode_test.rs
new file mode 100644
index 000000000000..bb2a4e017fa5
--- /dev/null
+++ b/src/mito2/src/engine/append_mode_test.rs
@@ -0,0 +1,153 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Tests for append mode.
+
+use api::v1::Rows;
+use common_recordbatch::RecordBatches;
+use store_api::region_engine::RegionEngine;
+use store_api::region_request::{RegionCompactRequest, RegionRequest};
+use store_api::storage::{RegionId, ScanRequest};
+
+use crate::config::MitoConfig;
+use crate::test_util::{
+ build_rows, build_rows_for_key, flush_region, put_rows, rows_schema, CreateRequestBuilder,
+ TestEnv,
+};
+
+#[tokio::test]
+async fn test_append_mode_write_query() {
+ common_telemetry::init_default_ut_logging();
+
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new()
+ .insert_option("append_mode", "true")
+ .build();
+
+ let column_schemas = rows_schema(&request);
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+
+ // rows 1, 2
+ let rows = build_rows(1, 3);
+ let rows = Rows {
+ schema: column_schemas.clone(),
+ rows,
+ };
+ put_rows(&engine, region_id, rows).await;
+
+ let mut rows = build_rows(0, 2);
+ rows.append(&mut build_rows(1, 2));
+ // rows 0, 1, 1
+ let rows = Rows {
+ schema: column_schemas,
+ rows,
+ };
+ put_rows(&engine, region_id, rows).await;
+
+ let request = ScanRequest::default();
+ let stream = engine.handle_query(region_id, request).await.unwrap();
+ let batches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++-------+---------+---------------------+
+| tag_0 | field_0 | ts |
++-------+---------+---------------------+
+| 0 | 0.0 | 1970-01-01T00:00:00 |
+| 1 | 1.0 | 1970-01-01T00:00:01 |
+| 1 | 1.0 | 1970-01-01T00:00:01 |
+| 1 | 1.0 | 1970-01-01T00:00:01 |
+| 2 | 2.0 | 1970-01-01T00:00:02 |
++-------+---------+---------------------+";
+ assert_eq!(expected, batches.pretty_print().unwrap());
+}
+
+#[tokio::test]
+async fn test_append_mode_compaction() {
+ let mut env = TestEnv::new();
+ let engine = env.create_engine(MitoConfig::default()).await;
+
+ let region_id = RegionId::new(1, 1);
+ let request = CreateRequestBuilder::new()
+ .insert_option("compaction.type", "twcs")
+ .insert_option("compaction.twcs.max_active_window_files", "2")
+ .insert_option("compaction.twcs.max_inactive_window_files", "2")
+ .insert_option("append_mode", "true")
+ .build();
+
+ let column_schemas = rows_schema(&request);
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+
+ // Flush 2 SSTs for compaction.
+ // a, field 1, 2
+ let rows = Rows {
+ schema: column_schemas.clone(),
+ rows: build_rows_for_key("a", 1, 3, 1),
+ };
+ put_rows(&engine, region_id, rows).await;
+ flush_region(&engine, region_id, None).await;
+ // a, field 0, 1
+ let rows = Rows {
+ schema: column_schemas.clone(),
+ rows: build_rows_for_key("a", 0, 2, 0),
+ };
+ put_rows(&engine, region_id, rows).await;
+ flush_region(&engine, region_id, None).await;
+ // b, field 0, 1
+ let rows = Rows {
+ schema: column_schemas.clone(),
+ rows: build_rows_for_key("b", 0, 2, 0),
+ };
+ put_rows(&engine, region_id, rows).await;
+ flush_region(&engine, region_id, None).await;
+
+ let output = engine
+ .handle_request(region_id, RegionRequest::Compact(RegionCompactRequest {}))
+ .await
+ .unwrap();
+ assert_eq!(output.affected_rows, 0);
+
+ // a, field 2, 3
+ let rows = Rows {
+ schema: column_schemas,
+ rows: build_rows_for_key("a", 2, 4, 2),
+ };
+ put_rows(&engine, region_id, rows).await;
+
+ let scanner = engine.scanner(region_id, ScanRequest::default()).unwrap();
+ assert_eq!(1, scanner.num_files());
+ let stream = scanner.scan().await.unwrap();
+ let batches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++-------+---------+---------------------+
+| tag_0 | field_0 | ts |
++-------+---------+---------------------+
+| a | 0.0 | 1970-01-01T00:00:00 |
+| a | 1.0 | 1970-01-01T00:00:01 |
+| a | 1.0 | 1970-01-01T00:00:01 |
+| a | 2.0 | 1970-01-01T00:00:02 |
+| a | 2.0 | 1970-01-01T00:00:02 |
+| a | 3.0 | 1970-01-01T00:00:03 |
+| b | 0.0 | 1970-01-01T00:00:00 |
+| b | 1.0 | 1970-01-01T00:00:01 |
++-------+---------+---------------------+";
+ assert_eq!(expected, batches.pretty_print().unwrap());
+}
diff --git a/src/mito2/src/memtable.rs b/src/mito2/src/memtable.rs
index d92857dc97ff..6df63f0e9973 100644
--- a/src/mito2/src/memtable.rs
+++ b/src/mito2/src/memtable.rs
@@ -24,6 +24,7 @@ use store_api::metadata::RegionMetadataRef;
use store_api::storage::ColumnId;
use table::predicate::Predicate;
+use crate::config::MitoConfig;
use crate::error::Result;
use crate::flush::WriteBufferManagerRef;
use crate::memtable::key_values::KeyValue;
@@ -212,27 +213,29 @@ impl Drop for AllocTracker {
#[derive(Clone)]
pub(crate) struct MemtableBuilderProvider {
write_buffer_manager: Option<WriteBufferManagerRef>,
- default_memtable_builder: MemtableBuilderRef,
+ config: Arc<MitoConfig>,
}
impl MemtableBuilderProvider {
pub(crate) fn new(
write_buffer_manager: Option<WriteBufferManagerRef>,
- default_memtable_builder: MemtableBuilderRef,
+ config: Arc<MitoConfig>,
) -> Self {
Self {
write_buffer_manager,
- default_memtable_builder,
+ config,
}
}
pub(crate) fn builder_for_options(
&self,
options: Option<&MemtableOptions>,
+ dedup: bool,
) -> MemtableBuilderRef {
match options {
Some(MemtableOptions::TimeSeries) => Arc::new(TimeSeriesMemtableBuilder::new(
self.write_buffer_manager.clone(),
+ dedup,
)),
Some(MemtableOptions::PartitionTree(opts)) => {
Arc::new(PartitionTreeMemtableBuilder::new(
@@ -240,12 +243,29 @@ impl MemtableBuilderProvider {
index_max_keys_per_shard: opts.index_max_keys_per_shard,
data_freeze_threshold: opts.data_freeze_threshold,
fork_dictionary_bytes: opts.fork_dictionary_bytes,
- ..Default::default()
+ dedup,
},
self.write_buffer_manager.clone(),
))
}
- None => self.default_memtable_builder.clone(),
+ None => self.default_memtable_builder(dedup),
+ }
+ }
+
+ fn default_memtable_builder(&self, dedup: bool) -> MemtableBuilderRef {
+ match &self.config.memtable {
+ MemtableConfig::PartitionTree(config) => {
+ let mut config = config.clone();
+ config.dedup = dedup;
+ Arc::new(PartitionTreeMemtableBuilder::new(
+ config,
+ self.write_buffer_manager.clone(),
+ ))
+ }
+ MemtableConfig::TimeSeries => Arc::new(TimeSeriesMemtableBuilder::new(
+ self.write_buffer_manager.clone(),
+ dedup,
+ )),
}
}
}
diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs
index 5403a8fea225..3991504e5182 100644
--- a/src/mito2/src/memtable/time_series.rs
+++ b/src/mito2/src/memtable/time_series.rs
@@ -54,13 +54,15 @@ const INITIAL_BUILDER_CAPACITY: usize = 0;
#[derive(Debug, Default)]
pub struct TimeSeriesMemtableBuilder {
write_buffer_manager: Option<WriteBufferManagerRef>,
+ dedup: bool,
}
impl TimeSeriesMemtableBuilder {
/// Creates a new builder with specific `write_buffer_manager`.
- pub fn new(write_buffer_manager: Option<WriteBufferManagerRef>) -> Self {
+ pub fn new(write_buffer_manager: Option<WriteBufferManagerRef>, dedup: bool) -> Self {
Self {
write_buffer_manager,
+ dedup,
}
}
}
@@ -71,7 +73,7 @@ impl MemtableBuilder for TimeSeriesMemtableBuilder {
metadata.clone(),
id,
self.write_buffer_manager.clone(),
- true, // todo(hl): set according to region option
+ self.dedup,
))
}
}
diff --git a/src/mito2/src/read/merge.rs b/src/mito2/src/read/merge.rs
index ae9dca224c91..ca758d28253b 100644
--- a/src/mito2/src/read/merge.rs
+++ b/src/mito2/src/read/merge.rs
@@ -33,7 +33,8 @@ use crate::read::{Batch, BatchReader, BoxedBatchReader, Source};
/// The merge reader merges [Batch]es from multiple sources that yield sorted batches.
/// 1. Batch is ordered by primary key, time index, sequence desc, op type desc (we can
/// ignore op type as sequence is already unique).
-/// 2. Batch doesn't have duplicate elements (elements with the same primary key and time index).
+/// 2. Batch doesn't have duplicate elements (elements with the same primary key and time index) if
+/// dedup is true.
/// 3. Batches from sources **must** not be empty.
pub struct MergeReader {
/// Holds [Node]s whose key range of current batch **is** overlapped with the merge window.
@@ -48,6 +49,8 @@ pub struct MergeReader {
cold: BinaryHeap<Node>,
/// Batch to output.
output_batch: Option<Batch>,
+ /// Remove duplicate timestamps.
+ dedup: bool,
/// Local metrics.
metrics: Metrics,
}
@@ -98,7 +101,7 @@ impl Drop for MergeReader {
impl MergeReader {
/// Creates and initializes a new [MergeReader].
- pub async fn new(sources: Vec<Source>) -> Result<MergeReader> {
+ pub async fn new(sources: Vec<Source>, dedup: bool) -> Result<MergeReader> {
let start = Instant::now();
let mut metrics = Metrics::default();
@@ -116,6 +119,7 @@ impl MergeReader {
hot,
cold,
output_batch: None,
+ dedup,
metrics,
};
// Initializes the reader.
@@ -150,12 +154,13 @@ impl MergeReader {
let mut hottest = self.hot.pop().unwrap();
let batch = hottest.fetch_batch(&mut self.metrics).await?;
- Self::maybe_output_batch(batch, &mut self.output_batch, &mut self.metrics)?;
+ Self::maybe_output_batch(batch, &mut self.output_batch, self.dedup, &mut self.metrics)?;
self.reheap(hottest)
}
- /// Fetches non-duplicated rows from the hottest node and skips the timestamp duplicated
- /// with the first timestamp in the next node.
+ /// Fetches non-duplicated rows from the hottest node.
+ ///
+ /// If `dedup` is true, it skips the timestamp duplicated with the first timestamp in the next node.
async fn fetch_rows_from_hottest(&mut self) -> Result<()> {
// Safety: `fetch_batches_to_output()` ensures the hot heap has more than 1 element.
// Pop hottest node.
@@ -176,36 +181,58 @@ impl MergeReader {
// Binary searches the timestamp in the top batch.
// Safety: Batches should have the same timestamp resolution so we can compare the native
// value directly.
- match timestamps.binary_search(&next_min_ts.value()) {
- Ok(pos) => {
- // They have duplicate timestamps. Outputs timestamps before the duplicated timestamp.
- // Batch itself doesn't contain duplicate timestamps so timestamps before `pos`
- // must be less than `next_min_ts`.
- Self::maybe_output_batch(
- top.slice(0, pos),
- &mut self.output_batch,
- &mut self.metrics,
- )?;
- // This keep the duplicate timestamp in the node.
- top_node.skip_rows(pos, &mut self.metrics).await?;
- // The merge window should contain this timestamp so only nodes in the hot heap
- // have this timestamp.
- self.filter_first_duplicate_timestamp_in_hot(top_node, next_min_ts)
- .await?;
- }
+ let duplicate_pos = match timestamps.binary_search(&next_min_ts.value()) {
+ Ok(pos) => pos,
Err(pos) => {
// No duplicate timestamp. Outputs timestamp before `pos`.
Self::maybe_output_batch(
top.slice(0, pos),
&mut self.output_batch,
+ self.dedup,
&mut self.metrics,
)?;
top_node.skip_rows(pos, &mut self.metrics).await?;
- self.reheap(top_node)?;
+ return self.reheap(top_node);
}
+ };
+
+ if self.dedup {
+ // They have duplicate timestamps. Outputs timestamps before the duplicated timestamp.
+ // Batch itself doesn't contain duplicate timestamps so timestamps before `duplicate_pos`
+ // must be less than `next_min_ts`.
+ Self::maybe_output_batch(
+ top.slice(0, duplicate_pos),
+ &mut self.output_batch,
+ self.dedup,
+ &mut self.metrics,
+ )?;
+ // This keep the duplicate timestamp in the node.
+ top_node.skip_rows(duplicate_pos, &mut self.metrics).await?;
+ // The merge window should contain this timestamp so only nodes in the hot heap
+ // have this timestamp.
+ return self
+ .filter_first_duplicate_timestamp_in_hot(top_node, next_min_ts)
+ .await;
}
- Ok(())
+ // No need to remove duplicate timestamps.
+ let output_end = if duplicate_pos == 0 {
+ // If the first timestamp of the top node is duplicate. We can simply return the first row
+ // as the heap ensure it is the one with largest sequence.
+ 1
+ } else {
+ // We don't know which one has the larger sequence so we use the range before
+ // the duplicate pos.
+ duplicate_pos
+ };
+ Self::maybe_output_batch(
+ top.slice(0, output_end),
+ &mut self.output_batch,
+ self.dedup,
+ &mut self.metrics,
+ )?;
+ top_node.skip_rows(output_end, &mut self.metrics).await?;
+ self.reheap(top_node)
}
/// Filters the first duplicate `timestamp` in `top_node` and `hot` heap. Only keeps the timestamp
@@ -297,12 +324,17 @@ impl MergeReader {
fn maybe_output_batch(
mut batch: Batch,
output_batch: &mut Option<Batch>,
+ dedup: bool,
metrics: &mut Metrics,
) -> Result<()> {
debug_assert!(output_batch.is_none());
let num_rows = batch.num_rows();
- batch.filter_deleted()?;
+ // If dedup is false, we don't expect delete happens and we skip checking whether there
+ // is any deleted entry.
+ if dedup {
+ batch.filter_deleted()?;
+ }
// Update deleted rows metrics.
metrics.num_deleted_rows += num_rows - batch.num_rows();
if batch.is_empty() {
@@ -315,12 +347,13 @@ impl MergeReader {
}
/// Builder to build and initialize a [MergeReader].
-#[derive(Default)]
pub struct MergeReaderBuilder {
/// Input sources.
///
/// All source must yield batches with the same schema.
sources: Vec<Source>,
+ /// Remove duplicate timestamps. Default is true.
+ dedup: bool,
}
impl MergeReaderBuilder {
@@ -330,8 +363,8 @@ impl MergeReaderBuilder {
}
/// Creates a builder from sources.
- pub fn from_sources(sources: Vec<Source>) -> MergeReaderBuilder {
- MergeReaderBuilder { sources }
+ pub fn from_sources(sources: Vec<Source>, dedup: bool) -> MergeReaderBuilder {
+ MergeReaderBuilder { sources, dedup }
}
/// Pushes a batch reader to sources.
@@ -349,7 +382,16 @@ impl MergeReaderBuilder {
/// Builds and initializes the reader, then resets the builder.
pub async fn build(&mut self) -> Result<MergeReader> {
let sources = mem::take(&mut self.sources);
- MergeReader::new(sources).await
+ MergeReader::new(sources, self.dedup).await
+ }
+}
+
+impl Default for MergeReaderBuilder {
+ fn default() -> Self {
+ MergeReaderBuilder {
+ sources: Vec::new(),
+ dedup: true,
+ }
}
}
@@ -901,4 +943,40 @@ mod tests {
.collect();
check_reader_result(&mut reader, &expect).await;
}
+
+ #[tokio::test]
+ async fn test_merge_keep_duplicate() {
+ let reader1 = VecBatchReader::new(&[new_batch(
+ b"k1",
+ &[1, 2],
+ &[10, 10],
+ &[OpType::Put, OpType::Put],
+ &[21, 22],
+ )]);
+ let reader2 = VecBatchReader::new(&[new_batch(
+ b"k1",
+ &[2, 3],
+ &[11, 11],
+ &[OpType::Put, OpType::Put],
+ &[32, 33],
+ )]);
+ let sources = vec![
+ Source::Reader(Box::new(reader1)),
+ Source::Iter(Box::new(reader2)),
+ ];
+ let mut reader = MergeReaderBuilder::from_sources(sources, false)
+ .build()
+ .await
+ .unwrap();
+ check_reader_result(
+ &mut reader,
+ &[
+ new_batch(b"k1", &[1], &[10], &[OpType::Put], &[21]),
+ new_batch(b"k1", &[2], &[11], &[OpType::Put], &[32]),
+ new_batch(b"k1", &[2], &[10], &[OpType::Put], &[22]),
+ new_batch(b"k1", &[3], &[11], &[OpType::Put], &[33]),
+ ],
+ )
+ .await;
+ }
}
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index 882903672564..07de897d3132 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -210,12 +210,13 @@ impl ScanRegion {
.collect();
debug!(
- "Seq scan region {}, request: {:?}, memtables: {}, ssts_to_read: {}, total_ssts: {}",
+ "Seq scan region {}, request: {:?}, memtables: {}, ssts_to_read: {}, total_ssts: {}, append_mode: {}",
self.version.metadata.region_id,
self.request,
memtables.len(),
files.len(),
- total_ssts
+ total_ssts,
+ self.version.options.append_mode,
);
let index_applier = self.build_index_applier();
@@ -234,7 +235,8 @@ impl ScanRegion {
.with_cache(self.cache_manager)
.with_index_applier(index_applier)
.with_parallelism(self.parallelism)
- .with_start_time(self.start_time);
+ .with_start_time(self.start_time)
+ .with_append_mode(self.version.options.append_mode);
Ok(seq_scan)
}
diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs
index 151210422baf..4f955adace4a 100644
--- a/src/mito2/src/read/seq_scan.rs
+++ b/src/mito2/src/read/seq_scan.rs
@@ -67,6 +67,8 @@ pub struct SeqScan {
index_applier: Option<SstIndexApplierRef>,
/// Start time of the query.
query_start: Option<Instant>,
+ /// The region is using append mode.
+ append_mode: bool,
}
impl SeqScan {
@@ -85,6 +87,7 @@ impl SeqScan {
parallelism: ScanParallism::default(),
index_applier: None,
query_start: None,
+ append_mode: false,
}
}
@@ -151,6 +154,12 @@ impl SeqScan {
self
}
+ #[must_use]
+ pub(crate) fn with_append_mode(mut self, is_append_mode: bool) -> Self {
+ self.append_mode = is_append_mode;
+ self
+ }
+
/// Builds a stream for the query.
pub async fn build_stream(&self) -> Result<SendableRecordBatchStream> {
let mut metrics = Metrics::default();
@@ -210,8 +219,10 @@ impl SeqScan {
pub async fn build_reader(&self) -> Result<BoxedBatchReader> {
// Scans all memtables and SSTs. Builds a merge reader to merge results.
let sources = self.build_sources().await?;
- let mut builder = MergeReaderBuilder::from_sources(sources);
- Ok(Box::new(builder.build().await?))
+ let dedup = !self.append_mode;
+ let mut builder = MergeReaderBuilder::from_sources(sources, dedup);
+ let reader = builder.build().await?;
+ Ok(Box::new(reader))
}
/// Builds a [BoxedBatchReader] that can scan memtables and SSTs in parallel.
@@ -228,8 +239,10 @@ impl SeqScan {
Source::Stream(stream)
})
.collect();
- let mut builder = MergeReaderBuilder::from_sources(sources);
- Ok(Box::new(builder.build().await?))
+ let dedup = !self.append_mode;
+ let mut builder = MergeReaderBuilder::from_sources(sources, dedup);
+ let reader = builder.build().await?;
+ Ok(Box::new(reader))
}
/// Builds and returns sources to read.
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index f99ab4e5d04c..d0fa4ea708ac 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -173,7 +173,7 @@ impl RegionOpener {
let memtable_builder = self
.memtable_builder_provider
- .builder_for_options(options.memtable.as_ref());
+ .builder_for_options(options.memtable.as_ref(), !options.append_mode);
// Initial memtable id is 0.
let part_duration = options.compaction.time_window();
let mutable = Arc::new(TimePartitions::new(
@@ -281,9 +281,10 @@ impl RegionOpener {
access_layer.clone(),
self.cache_manager.clone(),
));
- let memtable_builder = self
- .memtable_builder_provider
- .builder_for_options(region_options.memtable.as_ref());
+ let memtable_builder = self.memtable_builder_provider.builder_for_options(
+ region_options.memtable.as_ref(),
+ !region_options.append_mode,
+ );
// Initial memtable id is 0.
let part_duration = region_options.compaction.time_window();
let mutable = Arc::new(TimePartitions::new(
diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs
index e890207e874b..a56976874bc0 100644
--- a/src/mito2/src/region/options.rs
+++ b/src/mito2/src/region/options.rs
@@ -46,6 +46,8 @@ pub struct RegionOptions {
pub compaction: CompactionOptions,
/// Custom storage. Uses default storage if it is `None`.
pub storage: Option<String>,
+ /// If append mode is enabled, the region keeps duplicate rows.
+ pub append_mode: bool,
/// Wal options.
pub wal_options: WalOptions,
/// Index options.
@@ -91,6 +93,7 @@ impl TryFrom<&HashMap<String, String>> for RegionOptions {
ttl: options.ttl,
compaction,
storage: options.storage,
+ append_mode: options.append_mode,
wal_options,
index_options,
memtable,
@@ -166,6 +169,7 @@ impl Default for TwcsOptions {
/// We need to define a new struct without enum fields as `#[serde(default)]` does not
/// support external tagging.
+#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(default)]
struct RegionOptionsWithoutEnum {
@@ -173,6 +177,8 @@ struct RegionOptionsWithoutEnum {
#[serde(with = "humantime_serde")]
ttl: Option<Duration>,
storage: Option<String>,
+ #[serde_as(as = "DisplayFromStr")]
+ append_mode: bool,
}
impl Default for RegionOptionsWithoutEnum {
@@ -181,6 +187,7 @@ impl Default for RegionOptionsWithoutEnum {
RegionOptionsWithoutEnum {
ttl: options.ttl,
storage: options.storage,
+ append_mode: options.append_mode,
}
}
}
@@ -482,6 +489,7 @@ mod tests {
("compaction.twcs.time_window", "2h"),
("compaction.type", "twcs"),
("storage", "S3"),
+ ("append_mode", "true"),
("index.inverted_index.ignore_column_ids", "1,2,3"),
("index.inverted_index.segment_row_count", "512"),
(
@@ -502,6 +510,7 @@ mod tests {
time_window: Some(Duration::from_secs(3600 * 2)),
}),
storage: Some("S3".to_string()),
+ append_mode: true,
wal_options,
index_options: IndexOptions {
inverted_index: InvertedIndexOptions {
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index d007a0c0dc65..a9aa652d8673 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -48,9 +48,7 @@ use crate::config::MitoConfig;
use crate::error::{InvalidRequestSnafu, JoinSnafu, Result, WorkerStoppedSnafu};
use crate::flush::{FlushScheduler, WriteBufferManagerImpl, WriteBufferManagerRef};
use crate::manifest::action::RegionEdit;
-use crate::memtable::partition_tree::PartitionTreeMemtableBuilder;
-use crate::memtable::time_series::TimeSeriesMemtableBuilder;
-use crate::memtable::{MemtableBuilderProvider, MemtableConfig};
+use crate::memtable::MemtableBuilderProvider;
use crate::region::{MitoRegionRef, RegionMap, RegionMapRef};
use crate::request::{
BackgroundNotify, DdlRequest, SenderDdlRequest, SenderWriteRequest, WorkerRequest,
@@ -338,20 +336,10 @@ impl<S: LogStore> WorkerStarter<S> {
let (sender, receiver) = mpsc::channel(self.config.worker_channel_size);
let running = Arc::new(AtomicBool::new(true));
-
- let default_memtable_builder = match &self.config.memtable {
- MemtableConfig::PartitionTree(config) => Arc::new(PartitionTreeMemtableBuilder::new(
- config.clone(),
- Some(self.write_buffer_manager.clone()),
- )) as _,
- MemtableConfig::TimeSeries => Arc::new(TimeSeriesMemtableBuilder::new(Some(
- self.write_buffer_manager.clone(),
- ))) as _,
- };
let now = self.time_provider.current_time_millis();
let mut worker_thread = RegionWorkerLoop {
id: self.id,
- config: self.config,
+ config: self.config.clone(),
regions: regions.clone(),
dropping_regions: Arc::new(RegionMap::default()),
sender: sender.clone(),
@@ -361,7 +349,7 @@ impl<S: LogStore> WorkerStarter<S> {
running: running.clone(),
memtable_builder_provider: MemtableBuilderProvider::new(
Some(self.write_buffer_manager.clone()),
- default_memtable_builder,
+ self.config,
),
scheduler: self.scheduler.clone(),
write_buffer_manager: self.write_buffer_manager,
diff --git a/src/mito2/src/worker/handle_write.rs b/src/mito2/src/worker/handle_write.rs
index 495af6cb05b1..9b7693710b32 100644
--- a/src/mito2/src/worker/handle_write.rs
+++ b/src/mito2/src/worker/handle_write.rs
@@ -17,11 +17,13 @@
use std::collections::{hash_map, HashMap};
use std::sync::Arc;
+use api::v1::OpType;
+use snafu::ensure;
use store_api::logstore::LogStore;
use store_api::metadata::RegionMetadata;
use store_api::storage::RegionId;
-use crate::error::{RejectWriteSnafu, Result};
+use crate::error::{InvalidRequestSnafu, RejectWriteSnafu, Result};
use crate::metrics::{
WRITE_REJECT_TOTAL, WRITE_ROWS_TOTAL, WRITE_STAGE_ELAPSED, WRITE_STALL_TOTAL,
};
@@ -162,6 +164,16 @@ impl<S> RegionWorkerLoop<S> {
// Safety: Now we ensure the region exists.
let region_ctx = region_ctxs.get_mut(®ion_id).unwrap();
+ if let Err(e) = check_op_type(
+ region_ctx.version().options.append_mode,
+ &sender_req.request,
+ ) {
+ // Do not allow non-put op under append mode.
+ sender_req.sender.send(Err(e));
+
+ continue;
+ }
+
// Checks whether request schema is compatible with region schema.
if let Err(e) =
maybe_fill_missing_columns(&mut sender_req.request, ®ion_ctx.version().metadata)
@@ -219,3 +231,18 @@ fn maybe_fill_missing_columns(request: &mut WriteRequest, metadata: &RegionMetad
Ok(())
}
+
+/// Rejects delete request under append mode.
+fn check_op_type(append_mode: bool, request: &WriteRequest) -> Result<()> {
+ if append_mode {
+ ensure!(
+ request.op_type == OpType::Put,
+ InvalidRequestSnafu {
+ region_id: request.region_id,
+ reason: "Only put is allowed under append mode",
+ }
+ );
+ }
+
+ Ok(())
+}
|
feat
|
Implement append mode for a region (#3558)
|
8e695437047d3b76c540bd1478360ee6d6971315
|
2024-07-08 17:39:30
|
Yohan Wal
|
feat: support inserting into binary value through string (#4197)
| false
|
diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs
index f74a94c7a14f..262110dbf53b 100644
--- a/src/datatypes/src/data_type.rs
+++ b/src/datatypes/src/data_type.rs
@@ -161,6 +161,7 @@ impl ConcreteDataType {
| ConcreteDataType::Interval(_)
| ConcreteDataType::Duration(_)
| ConcreteDataType::Decimal128(_)
+ | ConcreteDataType::Binary(_)
)
}
@@ -717,6 +718,7 @@ mod tests {
assert!(!ConcreteDataType::int32_datatype().is_stringifiable());
assert!(!ConcreteDataType::float32_datatype().is_stringifiable());
assert!(ConcreteDataType::string_datatype().is_stringifiable());
+ assert!(ConcreteDataType::binary_datatype().is_stringifiable());
assert!(ConcreteDataType::date_datatype().is_stringifiable());
assert!(ConcreteDataType::datetime_datatype().is_stringifiable());
assert!(ConcreteDataType::timestamp_second_datatype().is_stringifiable());
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index de35b71a90a8..4d27453cefd1 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -120,6 +120,7 @@ fn parse_string_to_value(
.fail()
}
}
+ ConcreteDataType::Binary(_) => Ok(Value::Binary(s.as_bytes().into())),
_ => {
unreachable!()
}
@@ -710,6 +711,14 @@ mod tests {
sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val, None).unwrap();
assert_eq!(Value::Binary(Bytes::from(b"Hello world!".as_slice())), v);
+ let sql_val = SqlValue::DoubleQuotedString("MorningMyFriends".to_string());
+ let v =
+ sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val, None).unwrap();
+ assert_eq!(
+ Value::Binary(Bytes::from(b"MorningMyFriends".as_slice())),
+ v
+ );
+
let sql_val = SqlValue::HexStringLiteral("9AF".to_string());
let v = sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val, None);
assert!(v.is_err());
|
feat
|
support inserting into binary value through string (#4197)
|
291d9d55a4d554d45673822fe3601e2263588a87
|
2024-08-01 08:29:38
|
Jeremyhi
|
feat: hint options for gRPC insert (#4454)
| false
|
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index e310a73e584d..80dc51df2ef6 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -33,9 +33,12 @@ use common_telemetry::tracing_context::W3cTrace;
use futures_util::StreamExt;
use prost::Message;
use snafu::{ensure, ResultExt};
+use tonic::metadata::AsciiMetadataKey;
use tonic::transport::Channel;
-use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
+use crate::error::{
+ ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, InvalidAsciiSnafu, ServerSnafu,
+};
use crate::{from_grpc_response, Client, Result};
#[derive(Clone, Debug, Default)]
@@ -130,6 +133,36 @@ impl Database {
self.handle(Request::Inserts(requests)).await
}
+ pub async fn insert_with_hints(
+ &self,
+ requests: InsertRequests,
+ hints: &[(&str, &str)],
+ ) -> Result<u32> {
+ let mut client = make_database_client(&self.client)?.inner;
+ let request = self.to_rpc_request(Request::Inserts(requests));
+
+ let mut request = tonic::Request::new(request);
+ let metadata = request.metadata_mut();
+ for (key, value) in hints {
+ let key = AsciiMetadataKey::from_bytes(format!("x-greptime-hint-{}", key).as_bytes())
+ .map_err(|_| {
+ InvalidAsciiSnafu {
+ value: key.to_string(),
+ }
+ .build()
+ })?;
+ let value = value.parse().map_err(|_| {
+ InvalidAsciiSnafu {
+ value: value.to_string(),
+ }
+ .build()
+ })?;
+ metadata.insert(key, value);
+ }
+ let response = client.handle(request).await?.into_inner();
+ from_grpc_response(response)
+ }
+
async fn handle(&self, request: Request) -> Result<u32> {
let mut client = make_database_client(&self.client)?.inner;
let request = self.to_rpc_request(request);
diff --git a/src/client/src/error.rs b/src/client/src/error.rs
index f200b1c93dd3..b5aef255d4f7 100644
--- a/src/client/src/error.rs
+++ b/src/client/src/error.rs
@@ -122,6 +122,13 @@ pub enum Error {
#[snafu(implicit)]
location: Location,
},
+
+ #[snafu(display("Failed to parse ascii string: {}", value))]
+ InvalidAscii {
+ value: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -143,6 +150,8 @@ impl ErrorExt for Error {
| Error::ConvertFlightData { source, .. }
| Error::CreateTlsChannel { source, .. } => source.status_code(),
Error::IllegalGrpcClientState { .. } => StatusCode::Unexpected,
+
+ Error::InvalidAscii { .. } => StatusCode::InvalidArguments,
}
}
diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs
index 556a58c49e08..90edee015705 100644
--- a/src/operator/src/insert.rs
+++ b/src/operator/src/insert.rs
@@ -649,9 +649,18 @@ impl Inserter {
statement_executor: &StatementExecutor,
create_type: AutoCreateTableType,
) -> Result<TableRef> {
+ let mut hint_options = vec![];
let options: &[(&str, &str)] = match create_type {
AutoCreateTableType::Logical(_) => unreachable!(),
- AutoCreateTableType::Physical => &[],
+ AutoCreateTableType::Physical => {
+ if let Some(append_mode) = ctx.extension(APPEND_MODE_KEY) {
+ hint_options.push((APPEND_MODE_KEY, append_mode));
+ }
+ if let Some(merge_mode) = ctx.extension(MERGE_MODE_KEY) {
+ hint_options.push((MERGE_MODE_KEY, merge_mode));
+ }
+ hint_options.as_slice()
+ }
// Set append_mode to true for log table.
// because log tables should keep rows with the same ts and tags.
AutoCreateTableType::Log => &[(APPEND_MODE_KEY, "true")],
diff --git a/src/servers/src/grpc/database.rs b/src/servers/src/grpc/database.rs
index f8c9e298d4b5..4c3cf2c72b0d 100644
--- a/src/servers/src/grpc/database.rs
+++ b/src/servers/src/grpc/database.rs
@@ -18,13 +18,16 @@ use api::v1::{AffectedRows, GreptimeRequest, GreptimeResponse, ResponseHeader};
use async_trait::async_trait;
use common_error::status_code::StatusCode;
use common_query::OutputData;
-use common_telemetry::warn;
+use common_telemetry::{debug, warn};
use futures::StreamExt;
+use tonic::metadata::{KeyAndValueRef, MetadataMap};
use tonic::{Request, Response, Status, Streaming};
use crate::grpc::greptime_handler::GreptimeRequestHandler;
use crate::grpc::{cancellation, TonicResult};
+pub const GREPTIME_DB_HEADER_HINT_PREFIX: &str = "x-greptime-hint-";
+
pub(crate) struct DatabaseService {
handler: GreptimeRequestHandler,
}
@@ -42,10 +45,15 @@ impl GreptimeDatabase for DatabaseService {
request: Request<GreptimeRequest>,
) -> TonicResult<Response<GreptimeResponse>> {
let remote_addr = request.remote_addr();
+ let hints = extract_hints(request.metadata());
+ debug!(
+ "GreptimeDatabase::Handle: request from {:?} with hints: {:?}",
+ remote_addr, hints
+ );
let handler = self.handler.clone();
let request_future = async move {
let request = request.into_inner();
- let output = handler.handle_request(request).await?;
+ let output = handler.handle_request(request, hints).await?;
let message = match output.data {
OutputData::AffectedRows(rows) => GreptimeResponse {
header: Some(ResponseHeader {
@@ -83,6 +91,11 @@ impl GreptimeDatabase for DatabaseService {
request: Request<Streaming<GreptimeRequest>>,
) -> Result<Response<GreptimeResponse>, Status> {
let remote_addr = request.remote_addr();
+ let hints = extract_hints(request.metadata());
+ debug!(
+ "GreptimeDatabase::HandleRequests: request from {:?} with hints: {:?}",
+ remote_addr, hints
+ );
let handler = self.handler.clone();
let request_future = async move {
let mut affected_rows = 0;
@@ -90,7 +103,7 @@ impl GreptimeDatabase for DatabaseService {
let mut stream = request.into_inner();
while let Some(request) = stream.next().await {
let request = request?;
- let output = handler.handle_request(request).await?;
+ let output = handler.handle_request(request, hints.clone()).await?;
match output.data {
OutputData::AffectedRows(rows) => affected_rows += rows,
OutputData::Stream(_) | OutputData::RecordBatches(_) => {
@@ -129,3 +142,58 @@ impl GreptimeDatabase for DatabaseService {
cancellation::with_cancellation_handler(request_future, cancellation_future).await
}
}
+
+fn extract_hints(metadata: &MetadataMap) -> Vec<(String, String)> {
+ metadata
+ .iter()
+ .filter_map(|kv| {
+ let KeyAndValueRef::Ascii(key, value) = kv else {
+ return None;
+ };
+ let key = key.as_str();
+ if !key.starts_with(GREPTIME_DB_HEADER_HINT_PREFIX) {
+ return None;
+ }
+ let Ok(value) = value.to_str() else {
+ // Simply return None for non-string values.
+ return None;
+ };
+ // Safety: we already checked the prefix.
+ let new_key = key
+ .strip_prefix(GREPTIME_DB_HEADER_HINT_PREFIX)
+ .unwrap()
+ .to_string();
+ Some((new_key, value.trim().to_string()))
+ })
+ .collect()
+}
+
+#[cfg(test)]
+mod tests {
+ use tonic::metadata::MetadataValue;
+
+ use super::*;
+
+ #[test]
+ fn test_extract_hints() {
+ let mut metadata = MetadataMap::new();
+ let prev = metadata.insert(
+ "x-greptime-hint-append_mode",
+ MetadataValue::from_static("true"),
+ );
+ assert!(prev.is_none());
+ let hints = extract_hints(&metadata);
+ assert_eq!(hints, vec![("append_mode".to_string(), "true".to_string())]);
+ }
+
+ #[test]
+ fn extract_hints_ignores_non_ascii_metadata() {
+ let mut metadata = MetadataMap::new();
+ metadata.insert_bin(
+ "x-greptime-hint-merge_mode-bin",
+ MetadataValue::from_bytes(b"last_non_null"),
+ );
+ let hints = extract_hints(&metadata);
+ assert!(hints.is_empty());
+ }
+}
diff --git a/src/servers/src/grpc/flight.rs b/src/servers/src/grpc/flight.rs
index cd1d2a4bd019..76a6cc00cec1 100644
--- a/src/servers/src/grpc/flight.rs
+++ b/src/servers/src/grpc/flight.rs
@@ -167,7 +167,7 @@ impl FlightCraft for GreptimeRequestHandler {
request_type = get_request_type(&request)
);
async {
- let output = self.handle_request(request).await?;
+ let output = self.handle_request(request, Default::default()).await?;
let stream: Pin<Box<dyn Stream<Item = Result<FlightData, Status>> + Send + Sync>> =
to_flight_data_stream(output, TracingContext::from_current_span());
Ok(Response::new(stream))
diff --git a/src/servers/src/grpc/greptime_handler.rs b/src/servers/src/grpc/greptime_handler.rs
index 79ca1e09522c..2abf5efaf8a7 100644
--- a/src/servers/src/grpc/greptime_handler.rs
+++ b/src/servers/src/grpc/greptime_handler.rs
@@ -58,13 +58,17 @@ impl GreptimeRequestHandler {
}
#[tracing::instrument(skip_all, fields(protocol = "grpc", request_type = get_request_type(&request)))]
- pub(crate) async fn handle_request(&self, request: GreptimeRequest) -> Result<Output> {
+ pub(crate) async fn handle_request(
+ &self,
+ request: GreptimeRequest,
+ hints: Vec<(String, String)>,
+ ) -> Result<Output> {
let query = request.request.context(InvalidQuerySnafu {
reason: "Expecting non-empty GreptimeRequest.",
})?;
let header = request.header.as_ref();
- let query_ctx = create_query_context(header);
+ let query_ctx = create_query_context(header, hints);
let user_info = auth(self.user_provider.clone(), header, &query_ctx).await?;
query_ctx.set_current_user(user_info);
@@ -164,7 +168,10 @@ pub(crate) async fn auth(
})
}
-pub(crate) fn create_query_context(header: Option<&RequestHeader>) -> QueryContextRef {
+pub(crate) fn create_query_context(
+ header: Option<&RequestHeader>,
+ extensions: Vec<(String, String)>,
+) -> QueryContextRef {
let (catalog, schema) = header
.map(|header| {
// We provide dbname field in newer versions of protos/sdks
@@ -193,12 +200,14 @@ pub(crate) fn create_query_context(header: Option<&RequestHeader>) -> QueryConte
)
});
let timezone = parse_timezone(header.map(|h| h.timezone.as_str()));
- QueryContextBuilder::default()
+ let mut ctx_builder = QueryContextBuilder::default()
.current_catalog(catalog)
.current_schema(schema)
- .timezone(timezone)
- .build()
- .into()
+ .timezone(timezone);
+ for (key, value) in extensions {
+ ctx_builder = ctx_builder.set_extension(key, value);
+ }
+ ctx_builder.build().into()
}
/// Histogram timer for handling gRPC request.
diff --git a/src/servers/src/grpc/prom_query_gateway.rs b/src/servers/src/grpc/prom_query_gateway.rs
index 200f2fde9ca5..fd22831b12bb 100644
--- a/src/servers/src/grpc/prom_query_gateway.rs
+++ b/src/servers/src/grpc/prom_query_gateway.rs
@@ -78,7 +78,7 @@ impl PrometheusGateway for PrometheusGatewayService {
};
let header = inner.header.as_ref();
- let query_ctx = create_query_context(header);
+ let query_ctx = create_query_context(header, Default::default());
let user_info = auth(self.user_provider.clone(), header, &query_ctx).await?;
query_ctx.set_current_user(user_info);
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 7f7517f792e7..55ccdb258d96 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -71,6 +71,7 @@ macro_rules! grpc_tests {
test_invalid_dbname,
test_auto_create_table,
+ test_auto_create_table_with_hints,
test_insert_and_select,
test_dbname,
test_grpc_message_size_ok,
@@ -277,6 +278,17 @@ pub async fn test_auto_create_table(store_type: StorageType) {
guard.remove_all().await;
}
+pub async fn test_auto_create_table_with_hints(store_type: StorageType) {
+ let (addr, mut guard, fe_grpc_server) =
+ setup_grpc_server(store_type, "auto_create_table_with_hints").await;
+
+ let grpc_client = Client::with_urls(vec![addr]);
+ let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, grpc_client);
+ insert_with_hints_and_assert(&db).await;
+ let _ = fe_grpc_server.shutdown().await;
+ guard.remove_all().await;
+}
+
fn expect_data() -> (Column, Column, Column, Column) {
// testing data:
let expected_host_col = Column {
@@ -377,6 +389,62 @@ pub async fn test_insert_and_select(store_type: StorageType) {
guard.remove_all().await;
}
+async fn insert_with_hints_and_assert(db: &Database) {
+ // testing data:
+ let (expected_host_col, expected_cpu_col, expected_mem_col, expected_ts_col) = expect_data();
+
+ let request = InsertRequest {
+ table_name: "demo".to_string(),
+ columns: vec![
+ expected_host_col.clone(),
+ expected_cpu_col.clone(),
+ expected_mem_col.clone(),
+ expected_ts_col.clone(),
+ ],
+ row_count: 4,
+ };
+ let result = db
+ .insert_with_hints(
+ InsertRequests {
+ inserts: vec![request],
+ },
+ &[("append_mode", "true")],
+ )
+ .await;
+ assert_eq!(result.unwrap(), 4);
+
+ // show table
+ let output = db.sql("SHOW CREATE TABLE demo;").await.unwrap();
+
+ let record_batches = match output.data {
+ OutputData::RecordBatches(record_batches) => record_batches,
+ OutputData::Stream(stream) => RecordBatches::try_collect(stream).await.unwrap(),
+ OutputData::AffectedRows(_) => unreachable!(),
+ };
+
+ let pretty = record_batches.pretty_print().unwrap();
+ let expected = "\
++-------+-------------------------------------+
+| Table | Create Table |
++-------+-------------------------------------+
+| demo | CREATE TABLE IF NOT EXISTS \"demo\" ( |
+| | \"host\" STRING NULL, |
+| | \"cpu\" DOUBLE NULL, |
+| | \"memory\" DOUBLE NULL, |
+| | \"ts\" TIMESTAMP(3) NOT NULL, |
+| | TIME INDEX (\"ts\"), |
+| | PRIMARY KEY (\"host\") |
+| | ) |
+| | |
+| | ENGINE=mito |
+| | WITH( |
+| | append_mode = 'true' |
+| | ) |
++-------+-------------------------------------+\
+";
+ assert_eq!(pretty, expected);
+}
+
async fn insert_and_assert(db: &Database) {
// testing data:
let (expected_host_col, expected_cpu_col, expected_mem_col, expected_ts_col) = expect_data();
|
feat
|
hint options for gRPC insert (#4454)
|
18250c48034235b332a602dfd98a6fa55b42b62e
|
2023-08-22 19:00:09
|
Ruihang Xia
|
feat: implement Flight and gRPC services for RegionServer (#2226)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index d30ce2bfc8c7..80c932f7e504 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2623,6 +2623,7 @@ name = "datanode"
version = "0.3.2"
dependencies = [
"api",
+ "arrow-flight",
"async-compat",
"async-stream",
"async-trait",
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index bae965c8e8df..bb049ecb26fd 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -37,6 +37,7 @@ use greptime_proto::v1;
use greptime_proto::v1::ddl_request::Expr;
use greptime_proto::v1::greptime_request::Request;
use greptime_proto::v1::query_request::Query;
+use greptime_proto::v1::region::region_request;
use greptime_proto::v1::value::ValueData;
use greptime_proto::v1::{DdlRequest, IntervalMonthDayNano, QueryRequest, SemanticType};
use snafu::prelude::*;
@@ -328,6 +329,21 @@ fn query_request_type(request: &QueryRequest) -> &'static str {
}
}
+/// Returns the type name of the [RegionRequest].
+pub fn region_request_type(request: ®ion_request::Request) -> &'static str {
+ match request {
+ region_request::Request::Inserts(_) => "region.inserts",
+ region_request::Request::Deletes(_) => "region.deletes",
+ region_request::Request::Create(_) => "region.create",
+ region_request::Request::Drop(_) => "region.drop ",
+ region_request::Request::Open(_) => "region.open",
+ region_request::Request::Close(_) => "region.close",
+ region_request::Request::Alter(_) => "region.alter",
+ region_request::Request::Flush(_) => "region.flush",
+ region_request::Request::Compact(_) => "region.compact",
+ }
+}
+
/// Returns the type name of the [DdlRequest].
fn ddl_request_type(request: &DdlRequest) -> &'static str {
match request.expr {
diff --git a/src/datanode/Cargo.toml b/src/datanode/Cargo.toml
index 1f7589a90133..caa71d44e643 100644
--- a/src/datanode/Cargo.toml
+++ b/src/datanode/Cargo.toml
@@ -9,6 +9,7 @@ testing = ["meta-srv/mock"]
[dependencies]
api = { workspace = true }
+arrow-flight.workspace = true
async-compat = "0.2"
async-stream.workspace = true
async-trait.workspace = true
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index 34c22642799c..ae41a00f80ae 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -14,9 +14,11 @@
use std::any::Any;
use std::collections::HashMap;
-use std::sync::{Arc, Mutex};
+use std::sync::{Arc, Mutex, RwLock};
-use api::v1::region::QueryRequest;
+use api::v1::region::region_request::Request as RequestBody;
+use api::v1::region::{QueryRequest, RegionResponse};
+use arrow_flight::{FlightData, Ticket};
use async_trait::async_trait;
use bytes::Bytes;
use common_query::logical_plan::Expr;
@@ -33,7 +35,12 @@ use datafusion::execution::context::SessionState;
use datafusion_common::DataFusionError;
use datafusion_expr::{Expr as DfExpr, TableType};
use datatypes::arrow::datatypes::SchemaRef;
+use prost::Message;
use query::QueryEngineRef;
+use servers::error as servers_error;
+use servers::error::Result as ServerResult;
+use servers::grpc::flight::{FlightCraft, FlightRecordBatchStream, TonicStream};
+use servers::grpc::region_server::RegionServerHandler;
use session::context::QueryContext;
use snafu::{OptionExt, ResultExt};
use store_api::metadata::RegionMetadataRef;
@@ -42,6 +49,7 @@ use store_api::region_request::RegionRequest;
use store_api::storage::{RegionId, ScanRequest};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use table::table::scan::StreamScanAdapter;
+use tonic::{Request, Response, Result as TonicResult};
use crate::error::{
DecodeLogicalPlanSnafu, ExecuteLogicalPlanSnafu, GetRegionMetadataSnafu,
@@ -49,24 +57,80 @@ use crate::error::{
UnsupportedOutputSnafu,
};
+#[derive(Clone)]
pub struct RegionServer {
- engines: HashMap<String, RegionEngineRef>,
+ inner: Arc<RegionServerInner>,
+}
+
+impl RegionServer {
+ pub fn new(query_engine: QueryEngineRef) -> Self {
+ Self {
+ inner: Arc::new(RegionServerInner::new(query_engine)),
+ }
+ }
+
+ pub fn register_engine(&mut self, engine: RegionEngineRef) {
+ self.inner.register_engine(engine);
+ }
+
+ pub async fn handle_request(
+ &self,
+ region_id: RegionId,
+ request: RegionRequest,
+ ) -> Result<Output> {
+ self.inner.handle_request(region_id, request).await
+ }
+
+ pub async fn handle_read(&self, request: QueryRequest) -> Result<SendableRecordBatchStream> {
+ self.inner.handle_read(request).await
+ }
+}
+
+#[async_trait]
+impl RegionServerHandler for RegionServer {
+ async fn handle(&self, _request: RequestBody) -> ServerResult<RegionResponse> {
+ todo!()
+ }
+}
+
+#[async_trait]
+impl FlightCraft for RegionServer {
+ async fn do_get(
+ &self,
+ request: Request<Ticket>,
+ ) -> TonicResult<Response<TonicStream<FlightData>>> {
+ let ticket = request.into_inner().ticket;
+ let request = QueryRequest::decode(ticket.as_ref())
+ .context(servers_error::InvalidFlightTicketSnafu)?;
+
+ let result = self.handle_read(request).await?;
+
+ let stream = Box::pin(FlightRecordBatchStream::new(result));
+ Ok(Response::new(stream))
+ }
+}
+
+struct RegionServerInner {
+ engines: RwLock<HashMap<String, RegionEngineRef>>,
region_map: DashMap<RegionId, RegionEngineRef>,
query_engine: QueryEngineRef,
}
-impl RegionServer {
+impl RegionServerInner {
pub fn new(query_engine: QueryEngineRef) -> Self {
Self {
- engines: HashMap::new(),
+ engines: RwLock::new(HashMap::new()),
region_map: DashMap::new(),
query_engine,
}
}
- pub fn register_engine(&mut self, engine: RegionEngineRef) {
+ pub fn register_engine(&self, engine: RegionEngineRef) {
let engine_name = engine.name();
- self.engines.insert(engine_name.to_string(), engine);
+ self.engines
+ .write()
+ .unwrap()
+ .insert(engine_name.to_string(), engine);
}
pub async fn handle_request(
@@ -90,6 +154,8 @@ impl RegionServer {
let engine = match ®ion_change {
RegionChange::Register(engine_type) => self
.engines
+ .read()
+ .unwrap()
.get(engine_type)
.with_context(|| RegionEngineNotFoundSnafu { name: engine_type })?
.clone(),
diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs
index 0b037f4d59b5..dbc2ececc919 100644
--- a/src/datanode/src/server.rs
+++ b/src/datanode/src/server.rs
@@ -31,6 +31,7 @@ use crate::error::{
WaitForGrpcServingSnafu,
};
use crate::instance::InstanceRef;
+use crate::region_server::RegionServer;
pub mod grpc;
@@ -42,6 +43,9 @@ pub struct Services {
impl Services {
pub async fn try_new(instance: InstanceRef, opts: &DatanodeOptions) -> Result<Self> {
+ // TODO(ruihang): remove database service once region server is ready.
+ let enable_region_server = option_env!("ENABLE_REGION_SERVER").is_some();
+
let grpc_runtime = Arc::new(
RuntimeBuilder::default()
.worker_threads(opts.rpc_runtime_size)
@@ -50,10 +54,24 @@ impl Services {
.context(RuntimeResourceSnafu)?,
);
+ let region_server = RegionServer::new(instance.query_engine());
+ let flight_handler = if enable_region_server {
+ Some(Arc::new(region_server.clone()) as _)
+ } else {
+ None
+ };
+ let region_server_handler = if enable_region_server {
+ Some(Arc::new(region_server.clone()) as _)
+ } else {
+ None
+ };
+
Ok(Self {
grpc_server: GrpcServer::new(
ServerGrpcQueryHandlerAdaptor::arc(instance),
None,
+ flight_handler,
+ region_server_handler,
None,
grpc_runtime,
),
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 2810436468de..91d709b8d807 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -71,6 +71,8 @@ impl Services {
let grpc_server = GrpcServer::new(
ServerGrpcQueryHandlerAdaptor::arc(instance.clone()),
Some(instance.clone()),
+ None,
+ None,
user_provider.clone(),
grpc_runtime,
);
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index f13fe7ee33bc..7ad63ea38aa2 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -8,6 +8,7 @@ license.workspace = true
dashboard = []
mem-prof = ["dep:common-mem-prof"]
pprof = ["dep:pprof"]
+testing = []
[dependencies]
aide = { version = "0.9", features = ["axum"] }
diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs
index 337add7b7696..8ead91f6b34c 100644
--- a/src/servers/src/grpc.rs
+++ b/src/servers/src/grpc.rs
@@ -14,8 +14,9 @@
mod database;
pub mod flight;
-pub mod handler;
+pub mod greptime_handler;
pub mod prom_query_gateway;
+pub mod region_server;
use std::net::SocketAddr;
use std::sync::Arc;
@@ -23,6 +24,7 @@ use std::sync::Arc;
use api::v1::greptime_database_server::{GreptimeDatabase, GreptimeDatabaseServer};
use api::v1::health_check_server::{HealthCheck, HealthCheckServer};
use api::v1::prometheus_gateway_server::{PrometheusGateway, PrometheusGatewayServer};
+use api::v1::region::region_server_server::RegionServerServer;
use api::v1::{HealthCheckRequest, HealthCheckResponse};
use arrow_flight::flight_service_server::{FlightService, FlightServiceServer};
use async_trait::async_trait;
@@ -37,15 +39,14 @@ use tokio::sync::oneshot::{self, Receiver, Sender};
use tokio::sync::Mutex;
use tokio_stream::wrappers::TcpListenerStream;
use tonic::{Request, Response, Status};
+use tonic_reflection::server::{ServerReflection, ServerReflectionServer};
+use self::flight::{FlightCraftRef, FlightCraftWrapper};
use self::prom_query_gateway::PrometheusGatewayService;
-use crate::error::{
- AlreadyStartedSnafu, GrpcReflectionServiceSnafu, InternalSnafu, Result, StartGrpcSnafu,
- TcpBindSnafu,
-};
+use self::region_server::{RegionServerHandlerRef, RegionServerRequestHandler};
+use crate::error::{AlreadyStartedSnafu, InternalSnafu, Result, StartGrpcSnafu, TcpBindSnafu};
use crate::grpc::database::DatabaseService;
-use crate::grpc::flight::FlightHandler;
-use crate::grpc::handler::GreptimeRequestHandler;
+use crate::grpc::greptime_handler::GreptimeRequestHandler;
use crate::prometheus::PrometheusHandlerRef;
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
use crate::server::Server;
@@ -53,50 +54,74 @@ use crate::server::Server;
type TonicResult<T> = std::result::Result<T, Status>;
pub struct GrpcServer {
+ // states
shutdown_tx: Mutex<Option<Sender<()>>>,
- request_handler: Arc<GreptimeRequestHandler>,
user_provider: Option<UserProviderRef>,
- /// Handler for Prometheus-compatible PromQL queries. Only present for frontend server.
- prometheus_handler: Option<PrometheusHandlerRef>,
/// gRPC serving state receiver. Only present if the gRPC server is started.
/// Used to wait for the server to stop, performing the old blocking fashion.
serve_state: Mutex<Option<Receiver<Result<()>>>>,
+
+ // handlers
+ /// Handler for [GreptimeDatabase] service.
+ database_handler: Option<GreptimeRequestHandler>,
+ /// Handler for Prometheus-compatible PromQL queries ([PrometheusGateway]). Only present for frontend server.
+ prometheus_handler: Option<PrometheusHandlerRef>,
+ /// Handler for [FlightService].
+ flight_handler: Option<FlightCraftRef>,
+ /// Handler for [RegionServer].
+ region_server_handler: Option<RegionServerRequestHandler>,
}
impl GrpcServer {
pub fn new(
query_handler: ServerGrpcQueryHandlerRef,
prometheus_handler: Option<PrometheusHandlerRef>,
+ flight_handler: Option<FlightCraftRef>,
+ region_server_handler: Option<RegionServerHandlerRef>,
user_provider: Option<UserProviderRef>,
runtime: Arc<Runtime>,
) -> Self {
- let request_handler = Arc::new(GreptimeRequestHandler::new(
- query_handler,
- user_provider.clone(),
- runtime,
- ));
+ let database_handler =
+ GreptimeRequestHandler::new(query_handler, user_provider.clone(), runtime.clone());
+ let region_server_handler = region_server_handler.map(|handler| {
+ RegionServerRequestHandler::new(handler, user_provider.clone(), runtime.clone())
+ });
Self {
shutdown_tx: Mutex::new(None),
- request_handler,
user_provider,
- prometheus_handler,
serve_state: Mutex::new(None),
+ database_handler: Some(database_handler),
+ prometheus_handler,
+ flight_handler,
+ region_server_handler,
}
}
+ #[cfg(feature = "testing")]
pub fn create_flight_service(&self) -> FlightServiceServer<impl FlightService> {
- FlightServiceServer::new(FlightHandler::new(self.request_handler.clone()))
+ FlightServiceServer::new(FlightCraftWrapper(self.database_handler.clone().unwrap()))
}
+ #[cfg(feature = "testing")]
pub fn create_database_service(&self) -> GreptimeDatabaseServer<impl GreptimeDatabase> {
- GreptimeDatabaseServer::new(DatabaseService::new(self.request_handler.clone()))
+ GreptimeDatabaseServer::new(DatabaseService::new(self.database_handler.clone().unwrap()))
}
pub fn create_healthcheck_service(&self) -> HealthCheckServer<impl HealthCheck> {
HealthCheckServer::new(HealthCheckHandler)
}
+ pub fn create_reflection_service(&self) -> ServerReflectionServer<impl ServerReflection> {
+ tonic_reflection::server::Builder::configure()
+ .register_encoded_file_descriptor_set(api::v1::GREPTIME_GRPC_DESC)
+ .with_service_name("greptime.v1.GreptimeDatabase")
+ .with_service_name("greptime.v1.HealthCheck")
+ .with_service_name("greptime.v1.RegionServer")
+ .build()
+ .unwrap()
+ }
+
pub fn create_prom_query_gateway_service(
&self,
handler: PrometheusHandlerRef,
@@ -172,22 +197,31 @@ impl Server for GrpcServer {
(listener, addr)
};
- let reflection_service = tonic_reflection::server::Builder::configure()
- .register_encoded_file_descriptor_set(api::v1::GREPTIME_GRPC_DESC)
- .with_service_name("greptime.v1.GreptimeDatabase")
- .with_service_name("greptime.v1.HealthCheck")
- .build()
- .context(GrpcReflectionServiceSnafu)?;
-
let mut builder = tonic::transport::Server::builder()
- .add_service(self.create_flight_service())
- .add_service(self.create_database_service())
- .add_service(self.create_healthcheck_service());
+ .add_service(self.create_healthcheck_service())
+ .add_service(self.create_reflection_service());
+ if let Some(database_handler) = &self.database_handler {
+ builder = builder.add_service(GreptimeDatabaseServer::new(DatabaseService::new(
+ database_handler.clone(),
+ )))
+ }
if let Some(prometheus_handler) = &self.prometheus_handler {
builder = builder
.add_service(self.create_prom_query_gateway_service(prometheus_handler.clone()))
}
- let builder = builder.add_service(reflection_service);
+ if let Some(flight_handler) = &self.flight_handler {
+ builder = builder.add_service(FlightServiceServer::new(FlightCraftWrapper(
+ flight_handler.clone(),
+ )))
+ } else {
+ // TODO(ruihang): this is a temporary workaround before region server is ready.
+ builder = builder.add_service(FlightServiceServer::new(FlightCraftWrapper(
+ self.database_handler.clone().unwrap(),
+ )))
+ }
+ if let Some(region_server_handler) = &self.region_server_handler {
+ builder = builder.add_service(RegionServerServer::new(region_server_handler.clone()))
+ }
let (serve_state_tx, serve_state_rx) = oneshot::channel();
let mut serve_state = self.serve_state.lock().await;
diff --git a/src/servers/src/grpc/database.rs b/src/servers/src/grpc/database.rs
index 1c832e6efeb0..5be5d762549f 100644
--- a/src/servers/src/grpc/database.rs
+++ b/src/servers/src/grpc/database.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
-
use api::v1::greptime_database_server::GreptimeDatabase;
use api::v1::greptime_response::Response as RawResponse;
use api::v1::{AffectedRows, GreptimeRequest, GreptimeResponse, ResponseHeader};
@@ -23,15 +21,15 @@ use common_query::Output;
use futures::StreamExt;
use tonic::{Request, Response, Status, Streaming};
-use crate::grpc::handler::GreptimeRequestHandler;
+use crate::grpc::greptime_handler::GreptimeRequestHandler;
use crate::grpc::TonicResult;
pub(crate) struct DatabaseService {
- handler: Arc<GreptimeRequestHandler>,
+ handler: GreptimeRequestHandler,
}
impl DatabaseService {
- pub(crate) fn new(handler: Arc<GreptimeRequestHandler>) -> Self {
+ pub(crate) fn new(handler: GreptimeRequestHandler) -> Self {
Self { handler }
}
}
diff --git a/src/servers/src/grpc/flight.rs b/src/servers/src/grpc/flight.rs
index 0b793d98554d..6f4f7cbca721 100644
--- a/src/servers/src/grpc/flight.rs
+++ b/src/servers/src/grpc/flight.rs
@@ -32,24 +32,43 @@ use snafu::ResultExt;
use tonic::{Request, Response, Status, Streaming};
use crate::error;
-use crate::grpc::flight::stream::FlightRecordBatchStream;
-use crate::grpc::handler::GreptimeRequestHandler;
+pub use crate::grpc::flight::stream::FlightRecordBatchStream;
+use crate::grpc::greptime_handler::GreptimeRequestHandler;
use crate::grpc::TonicResult;
-type TonicStream<T> = Pin<Box<dyn Stream<Item = TonicResult<T>> + Send + Sync + 'static>>;
+pub type TonicStream<T> = Pin<Box<dyn Stream<Item = TonicResult<T>> + Send + Sync + 'static>>;
-pub struct FlightHandler {
- handler: Arc<GreptimeRequestHandler>,
+/// A subset of [FlightService]
+#[async_trait]
+pub trait FlightCraft: Send + Sync + 'static {
+ async fn do_get(
+ &self,
+ request: Request<Ticket>,
+ ) -> TonicResult<Response<TonicStream<FlightData>>>;
}
-impl FlightHandler {
- pub fn new(handler: Arc<GreptimeRequestHandler>) -> Self {
- Self { handler }
+pub type FlightCraftRef = Arc<dyn FlightCraft>;
+
+pub struct FlightCraftWrapper<T: FlightCraft>(pub T);
+
+impl<T: FlightCraft> From<T> for FlightCraftWrapper<T> {
+ fn from(t: T) -> Self {
+ Self(t)
}
}
#[async_trait]
-impl FlightService for FlightHandler {
+impl FlightCraft for FlightCraftRef {
+ async fn do_get(
+ &self,
+ request: Request<Ticket>,
+ ) -> TonicResult<Response<TonicStream<FlightData>>> {
+ (**self).do_get(request).await
+ }
+}
+
+#[async_trait]
+impl<T: FlightCraft> FlightService for FlightCraftWrapper<T> {
type HandshakeStream = TonicStream<HandshakeResponse>;
async fn handshake(
@@ -85,14 +104,7 @@ impl FlightService for FlightHandler {
type DoGetStream = TonicStream<FlightData>;
async fn do_get(&self, request: Request<Ticket>) -> TonicResult<Response<Self::DoGetStream>> {
- let ticket = request.into_inner().ticket;
- let request =
- GreptimeRequest::decode(ticket.as_ref()).context(error::InvalidFlightTicketSnafu)?;
-
- let output = self.handler.handle_request(request).await?;
-
- let stream = to_flight_data_stream(output);
- Ok(Response::new(stream))
+ self.0.do_get(request).await
}
type DoPutStream = TonicStream<PutResult>;
@@ -129,6 +141,24 @@ impl FlightService for FlightHandler {
}
}
+#[async_trait]
+impl FlightCraft for GreptimeRequestHandler {
+ async fn do_get(
+ &self,
+ request: Request<Ticket>,
+ ) -> TonicResult<Response<TonicStream<FlightData>>> {
+ let ticket = request.into_inner().ticket;
+ let request =
+ GreptimeRequest::decode(ticket.as_ref()).context(error::InvalidFlightTicketSnafu)?;
+
+ let output = self.handle_request(request).await?;
+
+ let stream: Pin<Box<dyn Stream<Item = Result<FlightData, Status>> + Send + Sync>> =
+ to_flight_data_stream(output);
+ Ok(Response::new(stream))
+ }
+}
+
fn to_flight_data_stream(output: Output) -> TonicStream<FlightData> {
match output {
Output::Stream(stream) => {
diff --git a/src/servers/src/grpc/flight/stream.rs b/src/servers/src/grpc/flight/stream.rs
index 0048da2ed8b4..5ff570608e33 100644
--- a/src/servers/src/grpc/flight/stream.rs
+++ b/src/servers/src/grpc/flight/stream.rs
@@ -30,7 +30,7 @@ use super::TonicResult;
use crate::error;
#[pin_project(PinnedDrop)]
-pub(super) struct FlightRecordBatchStream {
+pub struct FlightRecordBatchStream {
#[pin]
rx: mpsc::Receiver<Result<FlightMessage, tonic::Status>>,
join_handle: JoinHandle<()>,
@@ -39,7 +39,7 @@ pub(super) struct FlightRecordBatchStream {
}
impl FlightRecordBatchStream {
- pub(super) fn new(recordbatches: SendableRecordBatchStream) -> Self {
+ pub fn new(recordbatches: SendableRecordBatchStream) -> Self {
let (tx, rx) = mpsc::channel::<TonicResult<FlightMessage>>(1);
let join_handle =
common_runtime::spawn_read(
diff --git a/src/servers/src/grpc/handler.rs b/src/servers/src/grpc/greptime_handler.rs
similarity index 96%
rename from src/servers/src/grpc/handler.rs
rename to src/servers/src/grpc/greptime_handler.rs
index ed6e36ad2e89..873a6293fb05 100644
--- a/src/servers/src/grpc/handler.rs
+++ b/src/servers/src/grpc/greptime_handler.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//! Handler for Greptime Database service. It's implemented by frontend.
+
use std::sync::Arc;
use std::time::Instant;
@@ -38,6 +40,7 @@ use crate::metrics::{
};
use crate::query_handler::grpc::ServerGrpcQueryHandlerRef;
+#[derive(Clone)]
pub struct GreptimeRequestHandler {
handler: ServerGrpcQueryHandlerRef,
user_provider: Option<UserProviderRef>,
@@ -174,7 +177,7 @@ pub(crate) fn create_query_context(header: Option<&RequestHeader>) -> QueryConte
/// Histogram timer for handling gRPC request.
///
/// The timer records the elapsed time with [StatusCode::Success] on drop.
-struct RequestTimer {
+pub(crate) struct RequestTimer {
start: Instant,
db: String,
request_type: &'static str,
@@ -183,7 +186,7 @@ struct RequestTimer {
impl RequestTimer {
/// Returns a new timer.
- fn new(db: String, request_type: &'static str) -> RequestTimer {
+ pub fn new(db: String, request_type: &'static str) -> RequestTimer {
RequestTimer {
start: Instant::now(),
db,
@@ -193,7 +196,7 @@ impl RequestTimer {
}
/// Consumes the timer and record the elapsed time with specific `status_code`.
- fn record(mut self, status_code: StatusCode) {
+ pub fn record(mut self, status_code: StatusCode) {
self.status_code = status_code;
}
}
diff --git a/src/servers/src/grpc/prom_query_gateway.rs b/src/servers/src/grpc/prom_query_gateway.rs
index 1cae3b3a4575..02d74839c498 100644
--- a/src/servers/src/grpc/prom_query_gateway.rs
+++ b/src/servers/src/grpc/prom_query_gateway.rs
@@ -33,7 +33,7 @@ use snafu::OptionExt;
use tonic::{Request, Response};
use crate::error::InvalidQuerySnafu;
-use crate::grpc::handler::{auth, create_query_context};
+use crate::grpc::greptime_handler::{auth, create_query_context};
use crate::grpc::TonicResult;
use crate::prometheus::{
retrieve_metric_name_and_result_type, PrometheusHandlerRef, PrometheusJsonResponse,
diff --git a/src/servers/src/grpc/region_server.rs b/src/servers/src/grpc/region_server.rs
new file mode 100644
index 000000000000..e3a7c06673eb
--- /dev/null
+++ b/src/servers/src/grpc/region_server.rs
@@ -0,0 +1,195 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use api::helper::region_request_type;
+use api::v1::auth_header::AuthScheme;
+use api::v1::region::region_request::Request as RequestBody;
+use api::v1::region::region_server_server::RegionServer as RegionServerService;
+use api::v1::region::{RegionRequest, RegionResponse};
+use api::v1::{Basic, RequestHeader};
+use async_trait::async_trait;
+use auth::{Identity, Password, UserInfoRef, UserProviderRef};
+use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_catalog::parse_catalog_and_schema_from_db_string;
+use common_error::ext::ErrorExt;
+use common_runtime::Runtime;
+use common_telemetry::{debug, error};
+use metrics::increment_counter;
+use session::context::{QueryContextBuilder, QueryContextRef};
+use snafu::{OptionExt, ResultExt};
+use tonic::{Request, Response};
+
+use crate::error::{
+ AuthSnafu, InvalidQuerySnafu, JoinTaskSnafu, NotFoundAuthHeaderSnafu, Result,
+ UnsupportedAuthSchemeSnafu,
+};
+use crate::grpc::greptime_handler::RequestTimer;
+use crate::grpc::TonicResult;
+use crate::metrics::{METRIC_AUTH_FAILURE, METRIC_CODE_LABEL};
+
+#[async_trait]
+pub trait RegionServerHandler: Send + Sync {
+ async fn handle(&self, request: RequestBody) -> Result<RegionResponse>;
+}
+
+pub type RegionServerHandlerRef = Arc<dyn RegionServerHandler>;
+
+#[derive(Clone)]
+pub struct RegionServerRequestHandler {
+ handler: Arc<dyn RegionServerHandler>,
+ user_provider: Option<UserProviderRef>,
+ runtime: Arc<Runtime>,
+}
+
+impl RegionServerRequestHandler {
+ pub fn new(
+ handler: Arc<dyn RegionServerHandler>,
+ user_provider: Option<UserProviderRef>,
+ runtime: Arc<Runtime>,
+ ) -> Self {
+ Self {
+ handler,
+ user_provider,
+ runtime,
+ }
+ }
+
+ async fn handle(&self, request: RegionRequest) -> Result<RegionResponse> {
+ let query = request.request.context(InvalidQuerySnafu {
+ reason: "Expecting non-empty GreptimeRequest.",
+ })?;
+
+ let header = request.header.as_ref();
+ let query_ctx = create_query_context(header);
+ let user_info = self.auth(header, &query_ctx).await?;
+ query_ctx.set_current_user(user_info);
+
+ let handler = self.handler.clone();
+ let request_type = region_request_type(&query);
+ let db = query_ctx.get_db_string();
+ let timer = RequestTimer::new(db.clone(), request_type);
+
+ // Executes requests in another runtime to
+ // 1. prevent the execution from being cancelled unexpected by Tonic runtime;
+ // - Refer to our blog for the rational behind it:
+ // https://www.greptime.com/blogs/2023-01-12-hidden-control-flow.html
+ // - Obtaining a `JoinHandle` to get the panic message (if there's any).
+ // From its docs, `JoinHandle` is cancel safe. The task keeps running even it's handle been dropped.
+ // 2. avoid the handler blocks the gRPC runtime incidentally.
+ let handle = self.runtime.spawn(async move {
+ handler.handle(query).await.map_err(|e| {
+ if e.status_code().should_log_error() {
+ error!(e; "Failed to handle request");
+ } else {
+ // Currently, we still print a debug log.
+ debug!("Failed to handle request, err: {}", e);
+ }
+ e
+ })
+ });
+
+ handle.await.context(JoinTaskSnafu).map_err(|e| {
+ timer.record(e.status_code());
+ e
+ })?
+ }
+
+ async fn auth(
+ &self,
+ header: Option<&RequestHeader>,
+ query_ctx: &QueryContextRef,
+ ) -> Result<Option<UserInfoRef>> {
+ let Some(user_provider) = self.user_provider.as_ref() else {
+ return Ok(None);
+ };
+
+ let auth_scheme = header
+ .and_then(|header| {
+ header
+ .authorization
+ .as_ref()
+ .and_then(|x| x.auth_scheme.clone())
+ })
+ .context(NotFoundAuthHeaderSnafu)?;
+
+ match auth_scheme {
+ AuthScheme::Basic(Basic { username, password }) => user_provider
+ .auth(
+ Identity::UserId(&username, None),
+ Password::PlainText(password.into()),
+ query_ctx.current_catalog(),
+ query_ctx.current_schema(),
+ )
+ .await
+ .context(AuthSnafu),
+ AuthScheme::Token(_) => UnsupportedAuthSchemeSnafu {
+ name: "Token AuthScheme".to_string(),
+ }
+ .fail(),
+ }
+ .map(Some)
+ .map_err(|e| {
+ increment_counter!(
+ METRIC_AUTH_FAILURE,
+ &[(METRIC_CODE_LABEL, format!("{}", e.status_code()))]
+ );
+ e
+ })
+ }
+}
+
+pub(crate) fn create_query_context(header: Option<&RequestHeader>) -> QueryContextRef {
+ let (catalog, schema) = header
+ .map(|header| {
+ // We provide dbname field in newer versions of protos/sdks
+ // parse dbname from header in priority
+ if !header.dbname.is_empty() {
+ parse_catalog_and_schema_from_db_string(&header.dbname)
+ } else {
+ (
+ if !header.catalog.is_empty() {
+ &header.catalog
+ } else {
+ DEFAULT_CATALOG_NAME
+ },
+ if !header.schema.is_empty() {
+ &header.schema
+ } else {
+ DEFAULT_SCHEMA_NAME
+ },
+ )
+ }
+ })
+ .unwrap_or((DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME));
+
+ QueryContextBuilder::default()
+ .current_catalog(catalog.to_string())
+ .current_schema(schema.to_string())
+ .try_trace_id(header.and_then(|h: &RequestHeader| h.trace_id))
+ .build()
+}
+
+#[async_trait]
+impl RegionServerService for RegionServerRequestHandler {
+ async fn handle(
+ &self,
+ request: Request<RegionRequest>,
+ ) -> TonicResult<Response<RegionResponse>> {
+ let request = request.into_inner();
+ let response = self.handle(request).await?;
+ Ok(Response::new(response))
+ }
+}
diff --git a/src/servers/tests/grpc/mod.rs b/src/servers/tests/grpc/mod.rs
index 82a98f1938bd..96becf5f94d9 100644
--- a/src/servers/tests/grpc/mod.rs
+++ b/src/servers/tests/grpc/mod.rs
@@ -24,8 +24,8 @@ use auth::UserProviderRef;
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_runtime::{Builder as RuntimeBuilder, Runtime};
use servers::error::{Result, StartGrpcSnafu, TcpBindSnafu};
-use servers::grpc::flight::FlightHandler;
-use servers::grpc::handler::GreptimeRequestHandler;
+use servers::grpc::flight::FlightCraftWrapper;
+use servers::grpc::greptime_handler::GreptimeRequestHandler;
use servers::query_handler::grpc::ServerGrpcQueryHandlerRef;
use servers::server::Server;
use snafu::ResultExt;
@@ -55,11 +55,12 @@ impl MockGrpcServer {
}
fn create_service(&self) -> FlightServiceServer<impl FlightService> {
- let service = FlightHandler::new(Arc::new(GreptimeRequestHandler::new(
+ let service: FlightCraftWrapper<_> = GreptimeRequestHandler::new(
self.query_handler.clone(),
self.user_provider.clone(),
self.runtime.clone(),
- )));
+ )
+ .into();
FlightServiceServer::new(service)
}
}
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index 4baed8119c57..ea69c1af25f7 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -43,7 +43,7 @@ rstest_reuse = "0.5"
secrecy = "0.8"
serde.workspace = true
serde_json = "1.0"
-servers = { workspace = true }
+servers = { workspace = true, features = ["testing"] }
session = { workspace = true }
snafu.workspace = true
sql = { workspace = true }
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index b792e6b59c77..350a6a27c197 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -38,6 +38,7 @@ use meta_srv::metasrv::{MetaSrv, MetaSrvOptions};
use meta_srv::mocks::MockInfo;
use meta_srv::service::store::kv::{KvBackendAdapter, KvStoreRef};
use meta_srv::service::store::memory::MemStore;
+use servers::grpc::greptime_handler::GreptimeRequestHandler;
use servers::grpc::GrpcServer;
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
use servers::Mode;
@@ -287,9 +288,16 @@ async fn create_datanode_client(datanode_instance: Arc<DatanodeInstance>) -> (St
// create a mock datanode grpc service, see example here:
// https://github.com/hyperium/tonic/blob/master/examples/src/mock/mock.rs
+ let query_handler = Arc::new(GreptimeRequestHandler::new(
+ ServerGrpcQueryHandlerAdaptor::arc(datanode_instance.clone()),
+ None,
+ runtime.clone(),
+ ));
let grpc_server = GrpcServer::new(
ServerGrpcQueryHandlerAdaptor::arc(datanode_instance),
None,
+ Some(query_handler),
+ None,
None,
runtime,
);
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index c963d06a4240..c152ec6acb30 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -49,6 +49,7 @@ use object_store::services::{Azblob, Gcs, Oss, S3};
use object_store::test_util::TempFolder;
use object_store::ObjectStore;
use secrecy::ExposeSecret;
+use servers::grpc::greptime_handler::GreptimeRequestHandler;
use servers::grpc::GrpcServer;
use servers::http::{HttpOptions, HttpServerBuilder};
use servers::metrics_handler::MetricsHandler;
@@ -583,9 +584,16 @@ pub async fn setup_grpc_server_with_user_provider(
heartbeat.start().await.unwrap();
}
let fe_instance_ref = Arc::new(fe_instance);
+ let flight_handler = Arc::new(GreptimeRequestHandler::new(
+ ServerGrpcQueryHandlerAdaptor::arc(fe_instance_ref.clone()),
+ user_provider.clone(),
+ runtime.clone(),
+ ));
let fe_grpc_server = Arc::new(GrpcServer::new(
ServerGrpcQueryHandlerAdaptor::arc(fe_instance_ref.clone()),
Some(fe_instance_ref.clone()),
+ Some(flight_handler),
+ None,
user_provider,
runtime,
));
|
feat
|
implement Flight and gRPC services for RegionServer (#2226)
|
38fe1a2f013208be520fe0cedd8d57d8ca0f225d
|
2023-03-09 11:54:29
|
Ruihang Xia
|
chore: update dependencies (#1148)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index ec75358e6c67..259ffeb60f51 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -123,9 +123,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.68"
+version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61"
+checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
[[package]]
name = "anymap"
@@ -496,17 +496,16 @@ dependencies = [
"slab",
"socket2",
"waker-fn",
- "windows-sys",
+ "windows-sys 0.42.0",
]
[[package]]
name = "async-lock"
-version = "2.6.0"
+version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685"
+checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7"
dependencies = [
"event-listener",
- "futures-lite",
]
[[package]]
@@ -522,19 +521,20 @@ dependencies = [
[[package]]
name = "async-stream"
-version = "0.3.3"
+version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e"
+checksum = "ad445822218ce64be7a341abfb0b1ea43b5c23aa83902542a4542e78309d8e5e"
dependencies = [
"async-stream-impl",
"futures-core",
+ "pin-project-lite",
]
[[package]]
name = "async-stream-impl"
-version = "0.3.3"
+version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27"
+checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965"
dependencies = [
"proc-macro2",
"quote",
@@ -543,9 +543,9 @@ dependencies = [
[[package]]
name = "async-trait"
-version = "0.1.63"
+version = "0.1.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eff18d764974428cf3a9328e23fc5c986f5fbed46e6cd4cdf42544df5d297ec1"
+checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc"
dependencies = [
"proc-macro2",
"quote",
@@ -592,9 +592,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "axum"
-version = "0.6.4"
+version = "0.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5694b64066a2459918d8074c2ce0d5a88f409431994c2356617c8ae0c4721fc"
+checksum = "2fb79c228270dcf2426e74864cabc94babb5dbab01a4314e702d2f16540e1591"
dependencies = [
"async-trait",
"axum-core",
@@ -604,7 +604,7 @@ dependencies = [
"http",
"http-body",
"hyper",
- "itoa 1.0.5",
+ "itoa",
"matchit",
"memchr",
"mime",
@@ -625,9 +625,9 @@ dependencies = [
[[package]]
name = "axum-core"
-version = "0.3.2"
+version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cae3e661676ffbacb30f1a824089a8c9150e71017f7e1e38f2aa32009188d34"
+checksum = "b2f958c80c248b34b9a877a643811be8dbca03ca5ba827f2b63baf3a81e5fc4e"
dependencies = [
"async-trait",
"bytes",
@@ -642,11 +642,11 @@ dependencies = [
[[package]]
name = "axum-macros"
-version = "0.3.2"
+version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9dbcf61bed07d554bd5c225cd07bc41b793eab63e79c6f0ceac7e1aed2f1c670"
+checksum = "404e816a138c27c29f7428ae9b1816ab880ba6923fa76a9f15296af79444a8dc"
dependencies = [
- "heck 0.4.0",
+ "heck 0.4.1",
"proc-macro2",
"quote",
"syn",
@@ -736,9 +736,9 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a"
[[package]]
name = "base64ct"
-version = "1.5.3"
+version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
+checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
[[package]]
name = "bcder"
@@ -755,7 +755,7 @@ name = "benchmarks"
version = "0.1.0"
dependencies = [
"arrow",
- "clap 4.1.4",
+ "clap 4.1.8",
"client",
"indicatif",
"itertools",
@@ -869,19 +869,19 @@ dependencies = [
[[package]]
name = "borsh"
-version = "0.9.3"
+version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa"
+checksum = "40f9ca3698b2e4cb7c15571db0abc5551dca417a21ae8140460b50309bb2cc62"
dependencies = [
"borsh-derive",
- "hashbrown 0.11.2",
+ "hashbrown 0.13.2",
]
[[package]]
name = "borsh-derive"
-version = "0.9.3"
+version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775"
+checksum = "598b3eacc6db9c3ee57b22707ad8f6a8d2f6d442bfe24ffeb8cbb70ca59e6a35"
dependencies = [
"borsh-derive-internal",
"borsh-schema-derive-internal",
@@ -892,9 +892,9 @@ dependencies = [
[[package]]
name = "borsh-derive-internal"
-version = "0.9.3"
+version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065"
+checksum = "186b734fa1c9f6743e90c95d7233c9faab6360d1a96d4ffa19d9cfd1e9350f8a"
dependencies = [
"proc-macro2",
"quote",
@@ -903,9 +903,9 @@ dependencies = [
[[package]]
name = "borsh-schema-derive-internal"
-version = "0.9.3"
+version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0"
+checksum = "99b7ff1008316626f485991b960ade129253d4034014616b94f309a15366cc49"
dependencies = [
"proc-macro2",
"quote",
@@ -942,7 +942,6 @@ dependencies = [
"lazy_static",
"memchr",
"regex-automata",
- "serde",
]
[[package]]
@@ -964,19 +963,20 @@ checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535"
[[package]]
name = "bytecheck"
-version = "0.6.9"
+version = "0.6.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d11cac2c12b5adc6570dad2ee1b87eff4955dac476fe12d81e5fdd352e52406f"
+checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f"
dependencies = [
"bytecheck_derive",
"ptr_meta",
+ "simdutf8",
]
[[package]]
name = "bytecheck_derive"
-version = "0.6.9"
+version = "0.6.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf"
+checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5"
dependencies = [
"proc-macro2",
"quote",
@@ -991,9 +991,9 @@ checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c"
[[package]]
name = "bytemuck"
-version = "1.13.0"
+version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c041d3eab048880cb0b86b256447da3f18859a163c3b8d8893f4e6368abe6393"
+checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea"
[[package]]
name = "byteorder"
@@ -1003,9 +1003,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "bytes"
-version = "1.3.0"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c"
+checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be"
dependencies = [
"serde",
]
@@ -1039,9 +1039,9 @@ checksum = "cf034765b7d19a011c6d619e880582bf95e8186b580e6fab56589872dd87dcf5"
[[package]]
name = "camino"
-version = "1.1.2"
+version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c77df041dc383319cc661b428b6961a005db4d6808d5e12536931b1ca9556055"
+checksum = "6031a462f977dd38968b6f23378356512feeace69cef817e1a4475108093cec3"
dependencies = [
"serde",
]
@@ -1235,9 +1235,9 @@ dependencies = [
[[package]]
name = "clang-sys"
-version = "1.4.0"
+version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3"
+checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a"
dependencies = [
"glob",
"libc",
@@ -1278,13 +1278,13 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.1.4"
+version = "4.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f13b9c79b5d1dd500d20ef541215a6423c75829ef43117e1b4d17fd8af0b5d76"
+checksum = "c3d7ae14b20b94cb02149ed21a86c423859cbe18dc7ed69845cace50e52b40a5"
dependencies = [
"bitflags",
- "clap_derive 4.1.0",
- "clap_lex 0.3.1",
+ "clap_derive 4.1.8",
+ "clap_lex 0.3.2",
"is-terminal",
"once_cell",
"strsim 0.10.0",
@@ -1297,7 +1297,7 @@ version = "3.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65"
dependencies = [
- "heck 0.4.0",
+ "heck 0.4.1",
"proc-macro-error",
"proc-macro2",
"quote",
@@ -1306,11 +1306,11 @@ dependencies = [
[[package]]
name = "clap_derive"
-version = "4.1.0"
+version = "4.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8"
+checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0"
dependencies = [
- "heck 0.4.0",
+ "heck 0.4.1",
"proc-macro-error",
"proc-macro2",
"quote",
@@ -1328,9 +1328,9 @@ dependencies = [
[[package]]
name = "clap_lex"
-version = "0.3.1"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "783fe232adfca04f90f56201b26d79682d4cd2625e0bc7290b95123afe558ade"
+checksum = "350b9cf31731f9957399229e9b2adc51eeabdfbe9d71d9a0552275fd12710d09"
dependencies = [
"os_str_bytes",
]
@@ -1360,7 +1360,7 @@ dependencies = [
"rand",
"snafu",
"substrait 0.1.0",
- "substrait 0.4.0",
+ "substrait 0.4.1",
"tokio",
"tonic",
"tracing",
@@ -1712,7 +1712,7 @@ dependencies = [
"lazy_static",
"libc",
"unicode-width",
- "windows-sys",
+ "windows-sys 0.42.0",
]
[[package]]
@@ -1753,9 +1753,9 @@ dependencies = [
[[package]]
name = "const-oid"
-version = "0.9.1"
+version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b"
+checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913"
[[package]]
name = "const-random"
@@ -1922,9 +1922,9 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
-version = "0.5.6"
+version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
+checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-utils",
@@ -1932,9 +1932,9 @@ dependencies = [
[[package]]
name = "crossbeam-deque"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
+checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
dependencies = [
"cfg-if 1.0.0",
"crossbeam-epoch",
@@ -1943,14 +1943,14 @@ dependencies = [
[[package]]
name = "crossbeam-epoch"
-version = "0.9.13"
+version = "0.9.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a"
+checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695"
dependencies = [
"autocfg",
"cfg-if 1.0.0",
"crossbeam-utils",
- "memoffset 0.7.1",
+ "memoffset 0.8.0",
"scopeguard",
]
@@ -1966,9 +1966,9 @@ dependencies = [
[[package]]
name = "crossbeam-utils"
-version = "0.8.14"
+version = "0.8.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f"
+checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b"
dependencies = [
"cfg-if 1.0.0",
]
@@ -1991,13 +1991,12 @@ dependencies = [
[[package]]
name = "csv"
-version = "1.1.6"
+version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
+checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad"
dependencies = [
- "bstr",
"csv-core",
- "itoa 0.4.8",
+ "itoa",
"ryu",
"serde",
]
@@ -2013,9 +2012,9 @@ dependencies = [
[[package]]
name = "cxx"
-version = "1.0.88"
+version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "322296e2f2e5af4270b54df9e85a02ff037e271af20ba3e7fe1575515dc840b8"
+checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72"
dependencies = [
"cc",
"cxxbridge-flags",
@@ -2025,9 +2024,9 @@ dependencies = [
[[package]]
name = "cxx-build"
-version = "1.0.88"
+version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "017a1385b05d631e7875b1f151c9f012d37b53491e2a87f65bff5c262b2111d8"
+checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613"
dependencies = [
"cc",
"codespan-reporting",
@@ -2040,15 +2039,15 @@ dependencies = [
[[package]]
name = "cxxbridge-flags"
-version = "1.0.88"
+version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c26bbb078acf09bc1ecda02d4223f03bdd28bd4874edcb0379138efc499ce971"
+checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97"
[[package]]
name = "cxxbridge-macro"
-version = "1.0.88"
+version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "357f40d1f06a24b60ae1fe122542c1fb05d28d32acb2aed064e84bc2ad1e252e"
+checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56"
dependencies = [
"proc-macro2",
"quote",
@@ -2057,9 +2056,9 @@ dependencies = [
[[package]]
name = "darling"
-version = "0.14.2"
+version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa"
+checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8"
dependencies = [
"darling_core",
"darling_macro",
@@ -2067,9 +2066,9 @@ dependencies = [
[[package]]
name = "darling_core"
-version = "0.14.2"
+version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f"
+checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb"
dependencies = [
"fnv",
"ident_case",
@@ -2081,9 +2080,9 @@ dependencies = [
[[package]]
name = "darling_macro"
-version = "0.14.2"
+version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e"
+checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685"
dependencies = [
"darling_core",
"quote",
@@ -2505,9 +2504,9 @@ checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
[[package]]
name = "dyn-clone"
-version = "1.0.10"
+version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c9b0705efd4599c15a38151f4721f7bc388306f61084d3bfd50bd07fbca5cb60"
+checksum = "68b0cf012f1230e43cd00ebb729c6bb58707ecfa8ad08b52ef3a4ccd2697fc30"
[[package]]
name = "either"
@@ -2517,9 +2516,9 @@ checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
[[package]]
name = "ena"
-version = "0.14.0"
+version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7402b94a93c24e742487327a7cd839dc9d36fec9de9fb25b09f2dae459f36c3"
+checksum = "b2e5d13ca2353ab7d0230988629def93914a8c4015f621f9b13ed2955614731d"
dependencies = [
"log",
]
@@ -2553,18 +2552,18 @@ checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d"
[[package]]
name = "enum-iterator"
-version = "1.2.0"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91a4ec26efacf4aeff80887a175a419493cb6f8b5480d26387eb0bd038976187"
+checksum = "706d9e7cf1c7664859d79cd524e4e53ea2b67ea03c98cc2870c5e539695d597e"
dependencies = [
"enum-iterator-derive",
]
[[package]]
name = "enum-iterator-derive"
-version = "1.1.0"
+version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "828de45d0ca18782232dfb8f3ea9cc428e8ced380eb26a520baaacfc70de39ce"
+checksum = "355f93763ef7b0ae1c43c4d8eccc9d5848d84ad1a1d8ce61c421d1ac85a19d05"
dependencies = [
"proc-macro2",
"quote",
@@ -2625,9 +2624,9 @@ dependencies = [
[[package]]
name = "etcd-client"
-version = "0.10.2"
+version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1259da3b15ec7e54bd7203adb2c4335adb9ca1d47b56220d650e52c247e824a"
+checksum = "9f7a02ed1498d55034fcf41f80e81131d80bf90fff432dc7332cb29a7b53680f"
dependencies = [
"http",
"prost",
@@ -2670,34 +2669,34 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
[[package]]
name = "fastrand"
-version = "1.8.0"
+version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
+checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be"
dependencies = [
"instant",
]
[[package]]
name = "fd-lock"
-version = "3.0.9"
+version = "3.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "28c0190ff0bd3b28bfdd4d0cf9f92faa12880fb0b8ae2054723dd6c76a4efd42"
+checksum = "8ef1a30ae415c3a691a4f41afddc2dbcd6d70baf338368d85ebc1e8ed92cedb9"
dependencies = [
"cfg-if 1.0.0",
"rustix",
- "windows-sys",
+ "windows-sys 0.45.0",
]
[[package]]
name = "filetime"
-version = "0.2.19"
+version = "0.2.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9"
+checksum = "8a3de6e8d11b22ff9edc6d916f890800597d60f8b2da1caf2955c274638d6412"
dependencies = [
"cfg-if 1.0.0",
"libc",
"redox_syscall",
- "windows-sys",
+ "windows-sys 0.45.0",
]
[[package]]
@@ -2891,9 +2890,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "futures"
-version = "0.3.25"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0"
+checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84"
dependencies = [
"futures-channel",
"futures-core",
@@ -2906,9 +2905,9 @@ dependencies = [
[[package]]
name = "futures-channel"
-version = "0.3.25"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed"
+checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5"
dependencies = [
"futures-core",
"futures-sink",
@@ -2916,15 +2915,15 @@ dependencies = [
[[package]]
name = "futures-core"
-version = "0.3.25"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac"
+checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608"
[[package]]
name = "futures-executor"
-version = "0.3.25"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2"
+checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e"
dependencies = [
"futures-core",
"futures-task",
@@ -2933,9 +2932,9 @@ dependencies = [
[[package]]
name = "futures-io"
-version = "0.3.25"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb"
+checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531"
[[package]]
name = "futures-lite"
@@ -2954,9 +2953,9 @@ dependencies = [
[[package]]
name = "futures-macro"
-version = "0.3.25"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d"
+checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70"
dependencies = [
"proc-macro2",
"quote",
@@ -2965,21 +2964,21 @@ dependencies = [
[[package]]
name = "futures-sink"
-version = "0.3.25"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9"
+checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364"
[[package]]
name = "futures-task"
-version = "0.3.25"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea"
+checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366"
[[package]]
name = "futures-util"
-version = "0.3.25"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6"
+checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1"
dependencies = [
"futures-channel",
"futures-core",
@@ -3049,9 +3048,9 @@ dependencies = [
[[package]]
name = "gimli"
-version = "0.27.1"
+version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec"
+checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
[[package]]
name = "glob"
@@ -3071,9 +3070,9 @@ dependencies = [
[[package]]
name = "h2"
-version = "0.3.15"
+version = "0.3.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4"
+checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d"
dependencies = [
"bytes",
"fnv",
@@ -3104,15 +3103,6 @@ dependencies = [
"num-traits",
]
-[[package]]
-name = "hashbrown"
-version = "0.11.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
-dependencies = [
- "ahash 0.7.6",
-]
-
[[package]]
name = "hashbrown"
version = "0.12.3"
@@ -3155,9 +3145,9 @@ dependencies = [
[[package]]
name = "heck"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "hermit-abi"
@@ -3177,6 +3167,12 @@ dependencies = [
"libc",
]
+[[package]]
+name = "hermit-abi"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
+
[[package]]
name = "hex"
version = "0.4.3"
@@ -3215,7 +3211,7 @@ checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482"
dependencies = [
"bytes",
"fnv",
- "itoa 1.0.5",
+ "itoa",
]
[[package]]
@@ -3265,9 +3261,9 @@ dependencies = [
[[package]]
name = "hyper"
-version = "0.14.23"
+version = "0.14.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c"
+checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c"
dependencies = [
"bytes",
"futures-channel",
@@ -3278,7 +3274,7 @@ dependencies = [
"http-body",
"httparse",
"httpdate",
- "itoa 1.0.5",
+ "itoa",
"pin-project-lite",
"socket2",
"tokio",
@@ -3419,12 +3415,12 @@ dependencies = [
[[package]]
name = "io-lifetimes"
-version = "1.0.4"
+version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e"
+checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3"
dependencies = [
"libc",
- "windows-sys",
+ "windows-sys 0.45.0",
]
[[package]]
@@ -3444,9 +3440,9 @@ dependencies = [
[[package]]
name = "is-macro"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c068d4c6b922cd6284c609cfa6dec0e41615c9c5a1a4ba729a970d8daba05fb"
+checksum = "8a7d079e129b77477a49c5c4f1cfe9ce6c2c909ef52520693e8e811a714c7b20"
dependencies = [
"Inflector",
"pmutil",
@@ -3457,14 +3453,14 @@ dependencies = [
[[package]]
name = "is-terminal"
-version = "0.4.2"
+version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189"
+checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857"
dependencies = [
- "hermit-abi 0.2.6",
+ "hermit-abi 0.3.1",
"io-lifetimes",
"rustix",
- "windows-sys",
+ "windows-sys 0.45.0",
]
[[package]]
@@ -3478,30 +3474,24 @@ dependencies = [
[[package]]
name = "itoa"
-version = "0.4.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
-
-[[package]]
-name = "itoa"
-version = "1.0.5"
+version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440"
+checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
[[package]]
name = "jobserver"
-version = "0.1.25"
+version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b"
+checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2"
dependencies = [
"libc",
]
[[package]]
name = "js-sys"
-version = "0.3.60"
+version = "0.3.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
+checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730"
dependencies = [
"wasm-bindgen",
]
@@ -3934,9 +3924,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "memmap2"
-version = "0.5.8"
+version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc"
+checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327"
dependencies = [
"libc",
]
@@ -3950,15 +3940,6 @@ dependencies = [
"autocfg",
]
-[[package]]
-name = "memoffset"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4"
-dependencies = [
- "autocfg",
-]
-
[[package]]
name = "memoffset"
version = "0.8.0"
@@ -4120,14 +4101,14 @@ dependencies = [
[[package]]
name = "mio"
-version = "0.8.5"
+version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de"
+checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9"
dependencies = [
"libc",
"log",
"wasi 0.11.0+wasi-snapshot-preview1",
- "windows-sys",
+ "windows-sys 0.45.0",
]
[[package]]
@@ -4164,9 +4145,9 @@ dependencies = [
[[package]]
name = "moka"
-version = "0.9.6"
+version = "0.9.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b49a05f67020456541f4f29cbaa812016a266a86ec76f96d3873d459c68fe5e"
+checksum = "19b9268097a2cf211ac9955b1cc95e80fa84fff5c2d13ba292916445dc8a311f"
dependencies = [
"async-io",
"async-lock",
@@ -4205,9 +4186,9 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
[[package]]
name = "mysql_async"
-version = "0.31.2"
+version = "0.31.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7f9a46598da19a35a5637ee5510da39b3f07a8c53b621645e83a8959490a067"
+checksum = "2975442c70450b8f3a0400216321f6ab7b8bda177579f533d312ac511f913655"
dependencies = [
"bytes",
"crossbeam",
@@ -4370,15 +4351,6 @@ dependencies = [
"minimal-lexical",
]
-[[package]]
-name = "nom8"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8"
-dependencies = [
- "memchr",
-]
-
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
@@ -4508,20 +4480,20 @@ dependencies = [
[[package]]
name = "num_enum"
-version = "0.5.9"
+version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8d829733185c1ca374f17e52b762f24f535ec625d2cc1f070e34c8a9068f341b"
+checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
-version = "0.5.9"
+version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2be1598bf1c313dcdd12092e3f1920f463462525a21b7b4e11b4168353d0123e"
+checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799"
dependencies = [
- "proc-macro-crate 1.3.0",
+ "proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
"syn",
@@ -4559,9 +4531,9 @@ dependencies = [
[[package]]
name = "object_store"
-version = "0.5.4"
+version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f344e51ec9584d2f51199c0c29c6f73dddd04ade986497875bf8fa2f178caf0"
+checksum = "e1ea8f683b4f89a64181393742c041520a1a87e9775e6b4c0dd5a3281af05fc6"
dependencies = [
"async-trait",
"bytes",
@@ -4579,9 +4551,9 @@ dependencies = [
[[package]]
name = "once_cell"
-version = "1.17.0"
+version = "1.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66"
+checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
[[package]]
name = "oorandom"
@@ -4795,9 +4767,9 @@ dependencies = [
[[package]]
name = "parking_lot_core"
-version = "0.9.6"
+version = "0.9.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf"
+checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521"
dependencies = [
"backtrace",
"cfg-if 1.0.0",
@@ -4806,7 +4778,7 @@ dependencies = [
"redox_syscall",
"smallvec",
"thread-id",
- "windows-sys",
+ "windows-sys 0.45.0",
]
[[package]]
@@ -4886,9 +4858,9 @@ dependencies = [
[[package]]
name = "paste"
-version = "1.0.11"
+version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba"
+checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79"
[[package]]
name = "peeking_take_while"
@@ -4922,9 +4894,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
[[package]]
name = "pest"
-version = "2.5.4"
+version = "2.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ab62d2fa33726dbe6321cc97ef96d8cde531e3eeaf858a058de53a8a6d40d8f"
+checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7"
dependencies = [
"thiserror",
"ucd-trie",
@@ -4932,9 +4904,9 @@ dependencies = [
[[package]]
name = "pest_derive"
-version = "2.5.4"
+version = "2.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bf026e2d0581559db66d837fe5242320f525d85c76283c61f4d51a1238d65ea"
+checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7"
dependencies = [
"pest",
"pest_generator",
@@ -4942,9 +4914,9 @@ dependencies = [
[[package]]
name = "pest_generator"
-version = "2.5.4"
+version = "2.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b27bd18aa01d91c8ed2b61ea23406a676b42d82609c6e2581fba42f0c15f17f"
+checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b"
dependencies = [
"pest",
"pest_meta",
@@ -4955,9 +4927,9 @@ dependencies = [
[[package]]
name = "pest_meta"
-version = "2.5.4"
+version = "2.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f02b677c1859756359fc9983c2e56a0237f18624a3789528804406b7e915e5d"
+checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80"
dependencies = [
"once_cell",
"pest",
@@ -4966,9 +4938,9 @@ dependencies = [
[[package]]
name = "petgraph"
-version = "0.6.2"
+version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143"
+checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4"
dependencies = [
"fixedbitset",
"indexmap",
@@ -5202,7 +5174,7 @@ dependencies = [
"libc",
"log",
"wepoll-ffi",
- "windows-sys",
+ "windows-sys 0.42.0",
]
[[package]]
@@ -5266,9 +5238,9 @@ dependencies = [
[[package]]
name = "prettyplease"
-version = "0.1.23"
+version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78"
+checksum = "4ebcd279d20a4a0a2404a33056388e950504d891c855c7975b9a8fef75f3bf04"
dependencies = [
"proc-macro2",
"syn",
@@ -5290,9 +5262,9 @@ dependencies = [
[[package]]
name = "priority-queue"
-version = "1.3.0"
+version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7685ca4cc0b3ad748c22ce6803e23b55b9206ef7715b965ebeaf41639238fdc"
+checksum = "5ca9c6be70d989d21a136eb86c2d83e4b328447fac4a88dace2143c179c86267"
dependencies = [
"autocfg",
"indexmap",
@@ -5309,9 +5281,9 @@ dependencies = [
[[package]]
name = "proc-macro-crate"
-version = "1.3.0"
+version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34"
+checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919"
dependencies = [
"once_cell",
"toml_edit",
@@ -5420,9 +5392,9 @@ dependencies = [
[[package]]
name = "prost"
-version = "0.11.6"
+version = "0.11.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698"
+checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537"
dependencies = [
"bytes",
"prost-derive",
@@ -5435,7 +5407,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e"
dependencies = [
"bytes",
- "heck 0.4.0",
+ "heck 0.4.1",
"itertools",
"lazy_static",
"log",
@@ -5452,9 +5424,9 @@ dependencies = [
[[package]]
name = "prost-derive"
-version = "0.11.6"
+version = "0.11.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d"
+checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b"
dependencies = [
"anyhow",
"itertools",
@@ -5465,11 +5437,10 @@ dependencies = [
[[package]]
name = "prost-types"
-version = "0.11.6"
+version = "0.11.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788"
+checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88"
dependencies = [
- "bytes",
"prost",
]
@@ -5484,9 +5455,9 @@ dependencies = [
[[package]]
name = "protobuf-build"
-version = "0.14.0"
+version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6fb3c02f54ecaf12572c1a60dbdb36b1f8f713a16105881143f2be84cca5bbe3"
+checksum = "2df9942df2981178a930a72d442de47e2f0df18ad68e50a30f816f1848215ad0"
dependencies = [
"bitflags",
"protobuf",
@@ -5776,9 +5747,9 @@ dependencies = [
[[package]]
name = "raw-cpuid"
-version = "10.6.0"
+version = "10.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a6823ea29436221176fe662da99998ad3b4db2c7f31e7b6f5fe43adccd6320bb"
+checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332"
dependencies = [
"bitflags",
]
@@ -5791,9 +5762,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3"
[[package]]
name = "rayon"
-version = "1.6.1"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7"
+checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
dependencies = [
"either",
"rayon-core",
@@ -5801,9 +5772,9 @@ dependencies = [
[[package]]
name = "rayon-core"
-version = "1.10.2"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b"
+checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
@@ -5868,9 +5839,9 @@ dependencies = [
[[package]]
name = "rend"
-version = "0.3.6"
+version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95"
+checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab"
dependencies = [
"bytecheck",
]
@@ -5950,18 +5921,18 @@ dependencies = [
[[package]]
name = "result-like"
-version = "0.4.5"
+version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b80fe0296795a96913be20558326b797a187bb3986ce84ed82dee0fb7414428"
+checksum = "ccc7ce6435c33898517a30e85578cd204cbb696875efb93dec19a2d31294f810"
dependencies = [
"result-like-derive",
]
[[package]]
name = "result-like-derive"
-version = "0.4.5"
+version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2a29c8a4ac7839f1dcb8b899263b501e0d6932f210300c8a0d271323727b35c1"
+checksum = "1fabf0a2e54f711c68c50d49f648a1a8a37adcb57353f518ac4df374f0788f42"
dependencies = [
"pmutil",
"proc-macro2",
@@ -6000,9 +5971,9 @@ dependencies = [
[[package]]
name = "rkyv"
-version = "0.7.39"
+version = "0.7.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15"
+checksum = "c30f1d45d9aa61cbc8cd1eb87705470892289bb2d01943e7803b873a57404dc3"
dependencies = [
"bytecheck",
"hashbrown 0.12.3",
@@ -6014,9 +5985,9 @@ dependencies = [
[[package]]
name = "rkyv_derive"
-version = "0.7.39"
+version = "0.7.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4"
+checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476"
dependencies = [
"proc-macro2",
"quote",
@@ -6036,9 +6007,9 @@ dependencies = [
[[package]]
name = "rsa"
-version = "0.8.1"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "89b3896c9b7790b70a9aa314a30e4ae114200992a19c96cbe0ca6070edd32ab8"
+checksum = "55a77d189da1fee555ad95b7e50e7457d91c0e089ec68ca69ad2989413bbdab4"
dependencies = [
"byteorder",
"digest",
@@ -6067,9 +6038,9 @@ dependencies = [
[[package]]
name = "rust_decimal"
-version = "1.28.0"
+version = "1.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fe32e8c89834541077a5c5bbe5691aa69324361e27e6aeb3552a737db4a70c8"
+checksum = "e13cf35f7140155d02ba4ec3294373d513a3c7baa8364c162b030e33c61520a8"
dependencies = [
"arrayvec",
"borsh",
@@ -6128,16 +6099,16 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.36.7"
+version = "0.36.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4fdebc4b395b7fbb9ab11e462e20ed9051e7b16e42d24042c776eca0ac81b03"
+checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc"
dependencies = [
"bitflags",
"errno",
"io-lifetimes",
"libc",
"linux-raw-sys",
- "windows-sys",
+ "windows-sys 0.45.0",
]
[[package]]
@@ -6464,9 +6435,9 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.11"
+version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70"
+checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06"
[[package]]
name = "rustyline"
@@ -6493,9 +6464,9 @@ dependencies = [
[[package]]
name = "ryu"
-version = "1.0.12"
+version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde"
+checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
[[package]]
name = "safe-lock"
@@ -6580,23 +6551,23 @@ version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3"
dependencies = [
- "windows-sys",
+ "windows-sys 0.42.0",
]
[[package]]
name = "scheduled-thread-pool"
-version = "0.2.6"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf"
+checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"
dependencies = [
"parking_lot",
]
[[package]]
name = "schemars"
-version = "0.8.11"
+version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2a5fb6c61f29e723026dc8e923d94c694313212abbecbbe5f55a7748eec5b307"
+checksum = "02c613288622e5f0c3fdc5dbd4db1c5fbe752746b1d1a56a0630b78fd00de44f"
dependencies = [
"dyn-clone",
"indexmap",
@@ -6607,9 +6578,9 @@ dependencies = [
[[package]]
name = "schemars_derive"
-version = "0.8.11"
+version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f188d036977451159430f3b8dc82ec76364a42b7e289c2b18a9a18f4470058e9"
+checksum = "109da1e6b197438deb6db99952990c7f959572794b80ff93707d55a232545e7c"
dependencies = [
"proc-macro2",
"quote",
@@ -6625,9 +6596,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "scratch"
-version = "1.0.3"
+version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2"
+checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1"
[[package]]
name = "script"
@@ -6747,15 +6718,15 @@ dependencies = [
[[package]]
name = "seq-macro"
-version = "0.3.2"
+version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1685deded9b272198423bdbdb907d8519def2f26cf3699040e54e8c4fbd5c5ce"
+checksum = "e6b44e8fc93a14e66336d230954dda83d18b4605ccace8fe09bc7514a71ad0bc"
[[package]]
name = "serde"
-version = "1.0.152"
+version = "1.0.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
+checksum = "3a382c72b4ba118526e187430bb4963cd6d55051ebf13d9b25574d379cc98d20"
dependencies = [
"serde_derive",
]
@@ -6772,9 +6743,9 @@ dependencies = [
[[package]]
name = "serde_derive"
-version = "1.0.152"
+version = "1.0.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
+checksum = "1ef476a5790f0f6decbc66726b6e5d63680ed518283e64c7df415989d880954f"
dependencies = [
"proc-macro2",
"quote",
@@ -6794,29 +6765,29 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.91"
+version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883"
+checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea"
dependencies = [
- "itoa 1.0.5",
+ "itoa",
"ryu",
"serde",
]
[[package]]
name = "serde_path_to_error"
-version = "0.1.9"
+version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341"
+checksum = "db0969fff533976baadd92e08b1d102c5a3d8a8049eadfd69d4d1e3c5b2ed189"
dependencies = [
"serde",
]
[[package]]
name = "serde_repr"
-version = "0.1.10"
+version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e"
+checksum = "395627de918015623b32e7669714206363a7fc00382bf477e72c1f7533e8eafc"
dependencies = [
"proc-macro2",
"quote",
@@ -6825,9 +6796,9 @@ dependencies = [
[[package]]
name = "serde_tokenstream"
-version = "0.1.6"
+version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "274f512d6748a01e67cbcde5b4307ab2c9d52a98a2b870a980ef0793a351deff"
+checksum = "797ba1d80299b264f3aac68ab5d12e5825a561749db4df7cd7c8083900c5d4e9"
dependencies = [
"proc-macro2",
"serde",
@@ -6841,19 +6812,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
dependencies = [
"form_urlencoded",
- "itoa 1.0.5",
+ "itoa",
"ryu",
"serde",
]
[[package]]
name = "serde_yaml"
-version = "0.9.17"
+version = "0.9.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8fb06d4b6cdaef0e0c51fa881acb721bed3c924cfaa71d9c94a3b771dfdf6567"
+checksum = "f82e6c8c047aa50a7328632d067bcae6ef38772a79e28daf32f735e0e4f3dd10"
dependencies = [
"indexmap",
- "itoa 1.0.5",
+ "itoa",
"ryu",
"serde",
"unsafe-libyaml",
@@ -7004,9 +6975,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
[[package]]
name = "signal-hook-registry"
-version = "1.4.0"
+version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0"
+checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
dependencies = [
"libc",
]
@@ -7034,6 +7005,12 @@ dependencies = [
"wide",
]
+[[package]]
+name = "simdutf8"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a"
+
[[package]]
name = "simple_asn1"
version = "0.6.2"
@@ -7075,9 +7052,9 @@ checksum = "ceb945e54128e09c43d8e4f1277851bd5044c6fc540bbaa2ad888f60b3da9ae7"
[[package]]
name = "slab"
-version = "0.4.7"
+version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef"
+checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d"
dependencies = [
"autocfg",
]
@@ -7116,7 +7093,7 @@ version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2"
dependencies = [
- "heck 0.4.0",
+ "heck 0.4.1",
"proc-macro2",
"quote",
"syn",
@@ -7130,9 +7107,9 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831"
[[package]]
name = "socket2"
-version = "0.4.7"
+version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd"
+checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662"
dependencies = [
"libc",
"winapi",
@@ -7188,16 +7165,15 @@ dependencies = [
[[package]]
name = "sqlness"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc90b1d2e46d618148d3118ec25c3cb4c18563fef8589095fc21389952c54864"
+checksum = "a7b1588220ac77b9b450b56505566626e3cf215077e44fddf7d1bba6ca5dbd7b"
dependencies = [
"async-trait",
"derive_builder 0.11.2",
"prettydiff",
"serde",
"thiserror",
- "tokio",
"toml",
"walkdir",
]
@@ -7365,15 +7341,15 @@ dependencies = [
[[package]]
name = "strfmt"
-version = "0.2.3"
+version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b73de159e5e71c4c7579ea3041d3e765f46790555790c24489195554210f1fb4"
+checksum = "7a8348af2d9fc3258c8733b8d9d8db2e56f54b2363a4b5b81585c7875ed65e65"
[[package]]
name = "string_cache"
-version = "0.8.4"
+version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "213494b7a2b503146286049378ce02b482200519accc31872ee8be91fa820a08"
+checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b"
dependencies = [
"new_debug_unreachable",
"once_cell",
@@ -7443,7 +7419,7 @@ version = "0.24.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59"
dependencies = [
- "heck 0.4.0",
+ "heck 0.4.1",
"proc-macro2",
"quote",
"rustversion",
@@ -7478,18 +7454,18 @@ dependencies = [
"prost",
"session",
"snafu",
- "substrait 0.4.0",
+ "substrait 0.4.1",
"table",
"tokio",
]
[[package]]
name = "substrait"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2feb96a6a106e21161551af32dc4e0fdab3aceb926b940d7e92a086b640fc7c"
+checksum = "3108bf99c703e39728847cce9becff451f8a94cbc72fb5918b4e7f0543d7b06a"
dependencies = [
- "heck 0.4.0",
+ "heck 0.4.1",
"prost",
"prost-build",
"prost-types",
@@ -7509,9 +7485,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
-version = "1.0.107"
+version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
@@ -7529,9 +7505,9 @@ dependencies = [
[[package]]
name = "sync_wrapper"
-version = "0.1.1"
+version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8"
+checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
[[package]]
name = "system-configuration"
@@ -7639,7 +7615,7 @@ dependencies = [
"fastrand",
"redox_syscall",
"rustix",
- "windows-sys",
+ "windows-sys 0.42.0",
]
[[package]]
@@ -7730,18 +7706,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]]
name = "thiserror"
-version = "1.0.38"
+version = "1.0.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0"
+checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.38"
+version = "1.0.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f"
+checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e"
dependencies = [
"proc-macro2",
"quote",
@@ -7761,10 +7737,11 @@ dependencies = [
[[package]]
name = "thread_local"
-version = "1.1.4"
+version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180"
+checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
dependencies = [
+ "cfg-if 1.0.0",
"once_cell",
]
@@ -7849,7 +7826,7 @@ version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
dependencies = [
- "itoa 1.0.5",
+ "itoa",
"serde",
"time-core",
"time-macros",
@@ -7906,15 +7883,15 @@ dependencies = [
[[package]]
name = "tinyvec_macros"
-version = "0.1.0"
+version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
+checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.25.0"
+version = "1.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af"
+checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64"
dependencies = [
"autocfg",
"bytes",
@@ -7928,7 +7905,7 @@ dependencies = [
"socket2",
"tokio-macros",
"tracing",
- "windows-sys",
+ "windows-sys 0.45.0",
]
[[package]]
@@ -8003,9 +7980,9 @@ dependencies = [
[[package]]
name = "tokio-stream"
-version = "0.1.11"
+version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce"
+checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313"
dependencies = [
"futures-core",
"pin-project-lite",
@@ -8027,9 +8004,9 @@ dependencies = [
[[package]]
name = "tokio-util"
-version = "0.7.4"
+version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740"
+checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2"
dependencies = [
"bytes",
"futures-core",
@@ -8051,19 +8028,19 @@ dependencies = [
[[package]]
name = "toml_datetime"
-version = "0.5.1"
+version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5"
+checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622"
[[package]]
name = "toml_edit"
-version = "0.18.1"
+version = "0.19.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b"
+checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825"
dependencies = [
"indexmap",
- "nom8",
"toml_datetime",
+ "winnow",
]
[[package]]
@@ -8361,7 +8338,7 @@ version = "0.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7624d0b911df6e2bbf34a236f76281f93b294cdde1d4df1dbdb748e5a7fefa5"
dependencies = [
- "heck 0.4.0",
+ "heck 0.4.1",
"log",
"proc-macro2",
"quote",
@@ -8553,9 +8530,9 @@ checksum = "623f59e6af2a98bdafeb93fa277ac8e1e40440973001ca15cf4ae1541cd16d56"
[[package]]
name = "unicode-ident"
-version = "1.0.6"
+version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
+checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
[[package]]
name = "unicode-normalization"
@@ -8568,9 +8545,9 @@ dependencies = [
[[package]]
name = "unicode-segmentation"
-version = "1.10.0"
+version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a"
+checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
[[package]]
name = "unicode-width"
@@ -8598,9 +8575,9 @@ checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c"
[[package]]
name = "unsafe-libyaml"
-version = "0.2.5"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc7ed8ba44ca06be78ea1ad2c3682a43349126c8818054231ee6f4748012aed2"
+checksum = "ad2024452afd3874bf539695e04af6732ba06517424dbf958fdb16a01f3bef6c"
[[package]]
name = "untrusted"
@@ -8643,9 +8620,9 @@ checksum = "936e4b492acfd135421d8dca4b1aa80a7bfc26e702ef3af710e0752684df5372"
[[package]]
name = "uuid"
-version = "1.2.2"
+version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c"
+checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79"
dependencies = [
"atomic",
"getrandom",
@@ -8656,9 +8633,9 @@ dependencies = [
[[package]]
name = "uuid-macro-internal"
-version = "1.2.2"
+version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73bc89f2894593e665241e0052c3791999e6787b7c4831daa0a5c2e637e276d8"
+checksum = "c1b300a878652a387d2a0de915bdae8f1a548f0c6d45e072fe2688794b656cc9"
dependencies = [
"proc-macro2",
"quote",
@@ -8685,9 +8662,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "vergen"
-version = "7.5.0"
+version = "7.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "571b69f690c855821462709b6f41d42ceccc316fbd17b60bd06d06928cfe6a99"
+checksum = "f21b881cd6636ece9735721cf03c1fe1e774fe258683d084bb2812ab67435749"
dependencies = [
"anyhow",
"cfg-if 1.0.0",
@@ -8762,9 +8739,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
-version = "0.2.83"
+version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
+checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b"
dependencies = [
"cfg-if 1.0.0",
"wasm-bindgen-macro",
@@ -8772,9 +8749,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.83"
+version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
+checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9"
dependencies = [
"bumpalo",
"log",
@@ -8787,9 +8764,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-futures"
-version = "0.4.33"
+version = "0.4.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d"
+checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454"
dependencies = [
"cfg-if 1.0.0",
"js-sys",
@@ -8799,9 +8776,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.83"
+version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
+checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -8809,9 +8786,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.83"
+version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
+checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6"
dependencies = [
"proc-macro2",
"quote",
@@ -8822,9 +8799,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.83"
+version = "0.2.84"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
+checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d"
[[package]]
name = "wasm-streams"
@@ -8841,9 +8818,9 @@ dependencies = [
[[package]]
name = "web-sys"
-version = "0.3.60"
+version = "0.3.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f"
+checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -8890,9 +8867,9 @@ dependencies = [
[[package]]
name = "wide"
-version = "0.7.6"
+version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "feff0a412894d67223777b6cc8d68c0dab06d52d95e9890d5f2d47f10dd9366c"
+checksum = "b689b6c49d6549434bf944e6b0f39238cf63693cb7a147e9d887507fffa3b223"
dependencies = [
"bytemuck",
"safe_arch",
@@ -8963,6 +8940,30 @@ dependencies = [
"windows_x86_64_msvc 0.42.1",
]
+[[package]]
+name = "windows-sys"
+version = "0.45.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc 0.42.1",
+ "windows_i686_gnu 0.42.1",
+ "windows_i686_msvc 0.42.1",
+ "windows_x86_64_gnu 0.42.1",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc 0.42.1",
+]
+
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.1"
@@ -9035,6 +9036,15 @@ version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd"
+[[package]]
+name = "winnow"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f"
+dependencies = [
+ "memchr",
+]
+
[[package]]
name = "winreg"
version = "0.10.1"
@@ -9094,18 +9104,18 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
[[package]]
name = "zstd"
-version = "0.12.2+zstd.1.5.2"
+version = "0.12.3+zstd.1.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9262a83dc741c0b0ffec209881b45dbc232c21b02a2b9cb1adb93266e41303d"
+checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806"
dependencies = [
"zstd-safe",
]
[[package]]
name = "zstd-safe"
-version = "6.0.2+zstd.1.5.2"
+version = "6.0.4+zstd.1.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a6cf39f730b440bab43da8fb5faf5f254574462f73f260f85f7987f32154ff17"
+checksum = "7afb4b54b8910cf5447638cb54bf4e8a65cbedd783af98b98c62ffe91f185543"
dependencies = [
"libc",
"zstd-sys",
@@ -9113,9 +9123,9 @@ dependencies = [
[[package]]
name = "zstd-sys"
-version = "2.0.5+zstd.1.5.2"
+version = "2.0.7+zstd.1.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "edc50ffce891ad571e9f9afe5039c4837bede781ac4bb13052ed7ae695518596"
+checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5"
dependencies = [
"cc",
"libc",
|
chore
|
update dependencies (#1148)
|
185953e58624c05523c8e751a90c8eeb27bcb29c
|
2024-07-09 14:29:06
|
Ruihang Xia
|
fix: support unary operator in default value, partition rule and prepare statement (#4301)
| false
|
diff --git a/src/common/decimal/src/decimal128.rs b/src/common/decimal/src/decimal128.rs
index d742be5876f4..fc4331ddfcc0 100644
--- a/src/common/decimal/src/decimal128.rs
+++ b/src/common/decimal/src/decimal128.rs
@@ -121,6 +121,11 @@ impl Decimal128 {
let value = (hi | lo) as i128;
Self::new(value, precision, scale)
}
+
+ pub fn negative(mut self) -> Self {
+ self.value = -self.value;
+ self
+ }
}
/// The default value of Decimal128 is 0, and its precision is 1 and scale is 0.
diff --git a/src/common/time/src/date.rs b/src/common/time/src/date.rs
index a04b529fd448..86759d737d41 100644
--- a/src/common/time/src/date.rs
+++ b/src/common/time/src/date.rs
@@ -159,6 +159,10 @@ impl Date {
.checked_sub_days(Days::new(days as u64))
.map(Into::into)
}
+
+ pub fn negative(&self) -> Self {
+ Self(-self.0)
+ }
}
#[cfg(test)]
diff --git a/src/common/time/src/datetime.rs b/src/common/time/src/datetime.rs
index f1980a38d1af..4a60470aebce 100644
--- a/src/common/time/src/datetime.rs
+++ b/src/common/time/src/datetime.rs
@@ -192,6 +192,10 @@ impl DateTime {
pub fn to_date(&self) -> Option<Date> {
self.to_chrono_datetime().map(|d| Date::from(d.date()))
}
+
+ pub fn negative(&self) -> Self {
+ Self(-self.0)
+ }
}
#[cfg(test)]
diff --git a/src/common/time/src/duration.rs b/src/common/time/src/duration.rs
index 06742ba294b5..d39596b47297 100644
--- a/src/common/time/src/duration.rs
+++ b/src/common/time/src/duration.rs
@@ -92,6 +92,11 @@ impl Duration {
pub fn to_std_duration(self) -> std::time::Duration {
self.into()
}
+
+ pub fn negative(mut self) -> Self {
+ self.value = -self.value;
+ self
+ }
}
/// Convert i64 to Duration Type.
diff --git a/src/common/time/src/interval.rs b/src/common/time/src/interval.rs
index 95e8982f071e..cd57028d29f6 100644
--- a/src/common/time/src/interval.rs
+++ b/src/common/time/src/interval.rs
@@ -281,6 +281,15 @@ impl Interval {
pub fn to_i32(&self) -> i32 {
self.months
}
+
+ pub fn negative(&self) -> Self {
+ Self {
+ months: -self.months,
+ days: -self.days,
+ nsecs: -self.nsecs,
+ unit: self.unit,
+ }
+ }
}
impl From<i128> for Interval {
diff --git a/src/common/time/src/time.rs b/src/common/time/src/time.rs
index 8490195ff601..f8ffbf3608b1 100644
--- a/src/common/time/src/time.rs
+++ b/src/common/time/src/time.rs
@@ -145,6 +145,11 @@ impl Time {
None
}
}
+
+ pub fn negative(mut self) -> Self {
+ self.value = -self.value;
+ self
+ }
}
impl From<i64> for Time {
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index a4aac7fd54ba..503c44cf9901 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -441,6 +441,11 @@ impl Timestamp {
ParseTimestampSnafu { raw: s }.fail()
}
+
+ pub fn negative(mut self) -> Self {
+ self.value = -self.value;
+ self
+ }
}
impl Timestamp {
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index 4e9db7e2e52d..fdb6b38bb698 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -367,6 +367,56 @@ impl Value {
Ok(scalar_value)
}
+
+ /// Apply `-` unary op if possible
+ pub fn try_negative(&self) -> Option<Self> {
+ match self {
+ Value::Null => Some(Value::Null),
+ Value::UInt8(x) => {
+ if *x == 0 {
+ Some(Value::UInt8(*x))
+ } else {
+ None
+ }
+ }
+ Value::UInt16(x) => {
+ if *x == 0 {
+ Some(Value::UInt16(*x))
+ } else {
+ None
+ }
+ }
+ Value::UInt32(x) => {
+ if *x == 0 {
+ Some(Value::UInt32(*x))
+ } else {
+ None
+ }
+ }
+ Value::UInt64(x) => {
+ if *x == 0 {
+ Some(Value::UInt64(*x))
+ } else {
+ None
+ }
+ }
+ Value::Int8(x) => Some(Value::Int8(-*x)),
+ Value::Int16(x) => Some(Value::Int16(-*x)),
+ Value::Int32(x) => Some(Value::Int32(-*x)),
+ Value::Int64(x) => Some(Value::Int64(-*x)),
+ Value::Float32(x) => Some(Value::Float32(-*x)),
+ Value::Float64(x) => Some(Value::Float64(-*x)),
+ Value::Decimal128(x) => Some(Value::Decimal128(x.negative())),
+ Value::Date(x) => Some(Value::Date(x.negative())),
+ Value::DateTime(x) => Some(Value::DateTime(x.negative())),
+ Value::Timestamp(x) => Some(Value::Timestamp(x.negative())),
+ Value::Time(x) => Some(Value::Time(x.negative())),
+ Value::Duration(x) => Some(Value::Duration(x.negative())),
+ Value::Interval(x) => Some(Value::Interval(x.negative())),
+
+ Value::Binary(_) | Value::String(_) | Value::Boolean(_) | Value::List(_) => None,
+ }
+ }
}
pub trait TryAsPrimitive<T: LogicalPrimitiveType> {
diff --git a/src/operator/src/lib.rs b/src/operator/src/lib.rs
index efced830f22b..b3e7e3afb9d8 100644
--- a/src/operator/src/lib.rs
+++ b/src/operator/src/lib.rs
@@ -13,6 +13,7 @@
// limitations under the License.
#![feature(assert_matches)]
+#![feature(if_let_guard)]
pub mod delete;
pub mod error;
diff --git a/src/operator/src/req_convert/insert/stmt_to_region.rs b/src/operator/src/req_convert/insert/stmt_to_region.rs
index 71f107020d62..37d55e6c9e90 100644
--- a/src/operator/src/req_convert/insert/stmt_to_region.rs
+++ b/src/operator/src/req_convert/insert/stmt_to_region.rs
@@ -202,7 +202,7 @@ fn sql_value_to_grpc_value(
column: column.clone(),
})?
} else {
- statements::sql_value_to_value(column, &column_schema.data_type, sql_val, timezone)
+ statements::sql_value_to_value(column, &column_schema.data_type, sql_val, timezone, None)
.context(ParseSqlSnafu)?
};
diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs
index daef8b74579b..0f1775a0924b 100644
--- a/src/operator/src/statement/ddl.rs
+++ b/src/operator/src/statement/ddl.rs
@@ -56,7 +56,7 @@ use sql::statements::create::{
};
use sql::statements::sql_value_to_value;
use sql::statements::statement::Statement;
-use sqlparser::ast::{Expr, Ident, Value as ParserValue};
+use sqlparser::ast::{Expr, Ident, UnaryOperator, Value as ParserValue};
use store_api::metric_engine_consts::{LOGICAL_TABLE_METADATA_KEY, METRIC_ENGINE_NAME};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use table::dist_table::DistTable;
@@ -1329,14 +1329,30 @@ fn convert_one_expr(
// convert leaf node.
let (lhs, op, rhs) = match (left.as_ref(), right.as_ref()) {
+ // col, val
(Expr::Identifier(ident), Expr::Value(value)) => {
let (column_name, data_type) = convert_identifier(ident, column_name_and_type)?;
- let value = convert_value(value, data_type, timezone)?;
+ let value = convert_value(value, data_type, timezone, None)?;
(Operand::Column(column_name), op, Operand::Value(value))
}
+ (Expr::Identifier(ident), Expr::UnaryOp { op: unary_op, expr })
+ if let Expr::Value(v) = &**expr =>
+ {
+ let (column_name, data_type) = convert_identifier(ident, column_name_and_type)?;
+ let value = convert_value(v, data_type, timezone, Some(*unary_op))?;
+ (Operand::Column(column_name), op, Operand::Value(value))
+ }
+ // val, col
(Expr::Value(value), Expr::Identifier(ident)) => {
let (column_name, data_type) = convert_identifier(ident, column_name_and_type)?;
- let value = convert_value(value, data_type, timezone)?;
+ let value = convert_value(value, data_type, timezone, None)?;
+ (Operand::Value(value), op, Operand::Column(column_name))
+ }
+ (Expr::UnaryOp { op: unary_op, expr }, Expr::Identifier(ident))
+ if let Expr::Value(v) = &**expr =>
+ {
+ let (column_name, data_type) = convert_identifier(ident, column_name_and_type)?;
+ let value = convert_value(v, data_type, timezone, Some(*unary_op))?;
(Operand::Value(value), op, Operand::Column(column_name))
}
(Expr::BinaryOp { .. }, Expr::BinaryOp { .. }) => {
@@ -1372,8 +1388,10 @@ fn convert_value(
value: &ParserValue,
data_type: ConcreteDataType,
timezone: &Timezone,
+ unary_op: Option<UnaryOperator>,
) -> Result<Value> {
- sql_value_to_value("<NONAME>", &data_type, value, Some(timezone)).context(ParseSqlValueSnafu)
+ sql_value_to_value("<NONAME>", &data_type, value, Some(timezone), unary_op)
+ .context(ParseSqlValueSnafu)
}
/// Merge table level table options with schema level table options.
diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs
index 85ebea4ca8a3..a8f97877bdda 100644
--- a/src/servers/src/lib.rs
+++ b/src/servers/src/lib.rs
@@ -16,6 +16,7 @@
#![feature(try_blocks)]
#![feature(exclusive_wrapper)]
#![feature(let_chains)]
+#![feature(if_let_guard)]
use datatypes::schema::Schema;
use query::plan::LogicalPlan;
diff --git a/src/servers/src/mysql/helper.rs b/src/servers/src/mysql/helper.rs
index df174b38400c..e18053a9b793 100644
--- a/src/servers/src/mysql/helper.rs
+++ b/src/servers/src/mysql/helper.rs
@@ -205,7 +205,19 @@ pub fn convert_value(param: &ParamValue, t: &ConcreteDataType) -> Result<ScalarV
pub fn convert_expr_to_scalar_value(param: &Expr, t: &ConcreteDataType) -> Result<ScalarValue> {
match param {
Expr::Value(v) => {
- let v = sql_value_to_value("", t, v, None);
+ let v = sql_value_to_value("", t, v, None, None);
+ match v {
+ Ok(v) => v
+ .try_to_scalar_value(t)
+ .context(error::ConvertScalarValueSnafu),
+ Err(e) => error::InvalidParameterSnafu {
+ reason: e.to_string(),
+ }
+ .fail(),
+ }
+ }
+ Expr::UnaryOp { op, expr } if let Expr::Value(v) = &**expr => {
+ let v = sql_value_to_value("", t, v, None, Some(*op));
match v {
Ok(v) => v
.try_to_scalar_value(t)
diff --git a/src/sql/src/error.rs b/src/sql/src/error.rs
index 18453f8b3099..19ff7f47be7c 100644
--- a/src/sql/src/error.rs
+++ b/src/sql/src/error.rs
@@ -20,6 +20,7 @@ use common_macro::stack_trace_debug;
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
use datafusion_common::DataFusionError;
+use datafusion_sql::sqlparser::ast::UnaryOperator;
use datatypes::prelude::{ConcreteDataType, Value};
use snafu::{Location, Snafu};
use sqlparser::ast::Ident;
@@ -161,6 +162,21 @@ pub enum Error {
source: datatypes::error::Error,
},
+ #[snafu(display("Invalid unary operator {} for value {}", unary_op, value))]
+ InvalidUnaryOp {
+ unary_op: UnaryOperator,
+ value: Value,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Unsupported unary operator {}", unary_op))]
+ UnsupportedUnaryOp {
+ unary_op: UnaryOperator,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Unrecognized table option key: {}", key))]
InvalidTableOption {
key: String,
@@ -299,7 +315,8 @@ impl ErrorExt for Error {
| ConvertToLogicalExpression { .. }
| Simplification { .. }
| InvalidInterval { .. }
- | PermissionDenied { .. }
+ | InvalidUnaryOp { .. }
+ | UnsupportedUnaryOp { .. }
| FulltextInvalidOption { .. } => StatusCode::InvalidArguments,
SerializeColumnDefaultConstraint { source, .. } => source.status_code(),
@@ -307,6 +324,7 @@ impl ErrorExt for Error {
ConvertToDfStatement { .. } => StatusCode::Internal,
ConvertSqlValue { .. } | ConvertValue { .. } => StatusCode::Unsupported,
+ PermissionDenied { .. } => StatusCode::PermissionDenied,
SetFulltextOption { .. } => StatusCode::Unexpected,
}
}
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index da315123a26a..565b621a649b 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -940,6 +940,10 @@ fn ensure_one_expr(expr: &Expr, columns: &[&Column]) -> Result<()> {
Ok(())
}
Expr::Value(_) => Ok(()),
+ Expr::UnaryOp { expr, .. } => {
+ ensure_one_expr(expr, columns)?;
+ Ok(())
+ }
_ => error::InvalidSqlSnafu {
msg: format!("Partition rule expr {:?} is not a binary expr!", expr),
}
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index ccf93861d809..196e3b9c9863 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -52,9 +52,9 @@ use crate::ast::{
};
use crate::error::{
self, ColumnTypeMismatchSnafu, ConvertSqlValueSnafu, ConvertToGrpcDataTypeSnafu,
- ConvertValueSnafu, InvalidCastSnafu, InvalidSqlValueSnafu, ParseSqlValueSnafu, Result,
- SerializeColumnDefaultConstraintSnafu, SetFulltextOptionSnafu, TimestampOverflowSnafu,
- UnsupportedDefaultValueSnafu,
+ ConvertValueSnafu, InvalidCastSnafu, InvalidSqlValueSnafu, InvalidUnaryOpSnafu,
+ ParseSqlValueSnafu, Result, SerializeColumnDefaultConstraintSnafu, SetFulltextOptionSnafu,
+ TimestampOverflowSnafu, UnsupportedDefaultValueSnafu, UnsupportedUnaryOpSnafu,
};
use crate::statements::create::Column;
pub use crate::statements::option_map::OptionMap;
@@ -229,8 +229,9 @@ pub fn sql_value_to_value(
data_type: &ConcreteDataType,
sql_val: &SqlValue,
timezone: Option<&Timezone>,
+ unary_op: Option<UnaryOperator>,
) -> Result<Value> {
- let value = match sql_val {
+ let mut value = match sql_val {
SqlValue::Number(n, _) => sql_number_to_value(data_type, n)?,
SqlValue::Null => Value::Null,
SqlValue::Boolean(b) => {
@@ -260,6 +261,60 @@ pub fn sql_value_to_value(
.fail()
}
};
+
+ if let Some(unary_op) = unary_op {
+ match unary_op {
+ UnaryOperator::Plus | UnaryOperator::Minus | UnaryOperator::Not => {}
+ UnaryOperator::PGBitwiseNot
+ | UnaryOperator::PGSquareRoot
+ | UnaryOperator::PGCubeRoot
+ | UnaryOperator::PGPostfixFactorial
+ | UnaryOperator::PGPrefixFactorial
+ | UnaryOperator::PGAbs => {
+ return UnsupportedUnaryOpSnafu { unary_op }.fail();
+ }
+ }
+
+ match value {
+ Value::Null => {}
+ Value::Boolean(bool) => match unary_op {
+ UnaryOperator::Not => value = Value::Boolean(!bool),
+ _ => {
+ return InvalidUnaryOpSnafu { unary_op, value }.fail();
+ }
+ },
+ Value::UInt8(_)
+ | Value::UInt16(_)
+ | Value::UInt32(_)
+ | Value::UInt64(_)
+ | Value::Int8(_)
+ | Value::Int16(_)
+ | Value::Int32(_)
+ | Value::Int64(_)
+ | Value::Float32(_)
+ | Value::Float64(_)
+ | Value::Decimal128(_)
+ | Value::Date(_)
+ | Value::DateTime(_)
+ | Value::Timestamp(_)
+ | Value::Time(_)
+ | Value::Duration(_)
+ | Value::Interval(_) => match unary_op {
+ UnaryOperator::Plus => {}
+ UnaryOperator::Minus => {
+ value = value
+ .try_negative()
+ .with_context(|| InvalidUnaryOpSnafu { unary_op, value })?;
+ }
+ _ => return InvalidUnaryOpSnafu { unary_op, value }.fail(),
+ },
+
+ Value::String(_) | Value::Binary(_) | Value::List(_) => {
+ return InvalidUnaryOpSnafu { unary_op, value }.fail()
+ }
+ }
+ }
+
if value.data_type() != *data_type {
cast(value, data_type).with_context(|_| InvalidCastSnafu {
sql_value: sql_val.clone(),
@@ -305,7 +360,7 @@ fn parse_column_default_constraint(
{
let default_constraint = match &opt.option {
ColumnOption::Default(Expr::Value(v)) => ColumnDefaultConstraint::Value(
- sql_value_to_value(column_name, data_type, v, timezone)?,
+ sql_value_to_value(column_name, data_type, v, timezone, None)?,
),
ColumnOption::Default(Expr::Function(func)) => {
let mut func = format!("{func}").to_lowercase();
@@ -316,20 +371,22 @@ fn parse_column_default_constraint(
// Always use lowercase for function expression
ColumnDefaultConstraint::Function(func.to_lowercase())
}
- ColumnOption::Default(expr) => {
- if let Expr::UnaryOp { op, expr } = expr {
- if let (UnaryOperator::Minus, Expr::Value(SqlValue::Number(n, _))) =
- (op, expr.as_ref())
- {
- return Ok(Some(ColumnDefaultConstraint::Value(sql_number_to_value(
- data_type,
- &format!("-{n}"),
- )?)));
+ ColumnOption::Default(Expr::UnaryOp { op, expr }) => {
+ if let Expr::Value(v) = &**expr {
+ let value = sql_value_to_value(column_name, data_type, v, timezone, Some(*op))?;
+ ColumnDefaultConstraint::Value(value)
+ } else {
+ return UnsupportedDefaultValueSnafu {
+ column_name,
+ expr: *expr.clone(),
}
+ .fail();
}
+ }
+ ColumnOption::Default(others) => {
return UnsupportedDefaultValueSnafu {
column_name,
- expr: expr.clone(),
+ expr: others.clone(),
}
.fail();
}
@@ -689,28 +746,61 @@ mod tests {
let sql_val = SqlValue::Null;
assert_eq!(
Value::Null,
- sql_value_to_value("a", &ConcreteDataType::float64_datatype(), &sql_val, None).unwrap()
+ sql_value_to_value(
+ "a",
+ &ConcreteDataType::float64_datatype(),
+ &sql_val,
+ None,
+ None
+ )
+ .unwrap()
);
let sql_val = SqlValue::Boolean(true);
assert_eq!(
Value::Boolean(true),
- sql_value_to_value("a", &ConcreteDataType::boolean_datatype(), &sql_val, None).unwrap()
+ sql_value_to_value(
+ "a",
+ &ConcreteDataType::boolean_datatype(),
+ &sql_val,
+ None,
+ None
+ )
+ .unwrap()
);
let sql_val = SqlValue::Number("3.0".to_string(), false);
assert_eq!(
Value::Float64(OrderedFloat(3.0)),
- sql_value_to_value("a", &ConcreteDataType::float64_datatype(), &sql_val, None).unwrap()
+ sql_value_to_value(
+ "a",
+ &ConcreteDataType::float64_datatype(),
+ &sql_val,
+ None,
+ None
+ )
+ .unwrap()
);
let sql_val = SqlValue::Number("3.0".to_string(), false);
- let v = sql_value_to_value("a", &ConcreteDataType::boolean_datatype(), &sql_val, None);
+ let v = sql_value_to_value(
+ "a",
+ &ConcreteDataType::boolean_datatype(),
+ &sql_val,
+ None,
+ None,
+ );
assert!(v.is_err());
assert!(format!("{v:?}").contains("Failed to parse number '3.0' to boolean column type"));
let sql_val = SqlValue::Boolean(true);
- let v = sql_value_to_value("a", &ConcreteDataType::float64_datatype(), &sql_val, None);
+ let v = sql_value_to_value(
+ "a",
+ &ConcreteDataType::float64_datatype(),
+ &sql_val,
+ None,
+ None,
+ );
assert!(v.is_err());
assert!(
format!("{v:?}").contains(
@@ -720,20 +810,38 @@ mod tests {
);
let sql_val = SqlValue::HexStringLiteral("48656c6c6f20776f726c6421".to_string());
- let v =
- sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val, None).unwrap();
+ let v = sql_value_to_value(
+ "a",
+ &ConcreteDataType::binary_datatype(),
+ &sql_val,
+ None,
+ None,
+ )
+ .unwrap();
assert_eq!(Value::Binary(Bytes::from(b"Hello world!".as_slice())), v);
let sql_val = SqlValue::DoubleQuotedString("MorningMyFriends".to_string());
- let v =
- sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val, None).unwrap();
+ let v = sql_value_to_value(
+ "a",
+ &ConcreteDataType::binary_datatype(),
+ &sql_val,
+ None,
+ None,
+ )
+ .unwrap();
assert_eq!(
Value::Binary(Bytes::from(b"MorningMyFriends".as_slice())),
v
);
let sql_val = SqlValue::HexStringLiteral("9AF".to_string());
- let v = sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val, None);
+ let v = sql_value_to_value(
+ "a",
+ &ConcreteDataType::binary_datatype(),
+ &sql_val,
+ None,
+ None,
+ );
assert!(v.is_err());
assert!(
format!("{v:?}").contains("odd number of digits"),
@@ -741,7 +849,13 @@ mod tests {
);
let sql_val = SqlValue::HexStringLiteral("AG".to_string());
- let v = sql_value_to_value("a", &ConcreteDataType::binary_datatype(), &sql_val, None);
+ let v = sql_value_to_value(
+ "a",
+ &ConcreteDataType::binary_datatype(),
+ &sql_val,
+ None,
+ None,
+ );
assert!(v.is_err());
assert!(format!("{v:?}").contains("invalid character"), "v is {v:?}",);
}
@@ -753,6 +867,7 @@ mod tests {
&ConcreteDataType::date_datatype(),
&SqlValue::DoubleQuotedString("2022-02-22".to_string()),
None,
+ None,
)
.unwrap();
assert_eq!(ConcreteDataType::date_datatype(), value.data_type());
@@ -768,6 +883,7 @@ mod tests {
&ConcreteDataType::date_datatype(),
&SqlValue::DoubleQuotedString("2022-02-22".to_string()),
Some(&Timezone::from_tz_string("+07:00").unwrap()),
+ None,
)
.unwrap();
assert_eq!(ConcreteDataType::date_datatype(), value.data_type());
@@ -786,6 +902,7 @@ mod tests {
&ConcreteDataType::datetime_datatype(),
&SqlValue::DoubleQuotedString("2022-02-22 00:01:03+0800".to_string()),
None,
+ None,
)
.unwrap();
assert_eq!(ConcreteDataType::datetime_datatype(), value.data_type());
@@ -803,6 +920,7 @@ mod tests {
&ConcreteDataType::datetime_datatype(),
&SqlValue::DoubleQuotedString("2022-02-22 00:01:61".to_string()),
None,
+ None
)
.is_err());
}
@@ -1247,7 +1365,32 @@ mod tests {
&ConcreteDataType::string_datatype(),
&SqlValue::Placeholder("default".into()),
None,
+ None
)
.is_err());
+ assert!(sql_value_to_value(
+ "test",
+ &ConcreteDataType::string_datatype(),
+ &SqlValue::Placeholder("default".into()),
+ None,
+ Some(UnaryOperator::Minus),
+ )
+ .is_err());
+ assert!(sql_value_to_value(
+ "test",
+ &ConcreteDataType::uint16_datatype(),
+ &SqlValue::Number("3".into(), false),
+ None,
+ Some(UnaryOperator::Minus),
+ )
+ .is_err());
+ assert!(sql_value_to_value(
+ "test",
+ &ConcreteDataType::uint16_datatype(),
+ &SqlValue::Number("3".into(), false),
+ None,
+ None
+ )
+ .is_ok());
}
}
diff --git a/tests-integration/tests/sql.rs b/tests-integration/tests/sql.rs
index ba3d847b644d..29835f904ad1 100644
--- a/tests-integration/tests/sql.rs
+++ b/tests-integration/tests/sql.rs
@@ -240,18 +240,23 @@ pub async fn test_mysql_crud(store_type: StorageType) {
.execute(&pool)
.await
.unwrap();
+ sqlx::query("insert into demo(i) values(?)")
+ .bind(-99)
+ .execute(&pool)
+ .await
+ .unwrap();
let rows = sqlx::query("select * from demo")
.fetch_all(&pool)
.await
.unwrap();
- assert_eq!(rows.len(), 1);
+ assert_eq!(rows.len(), 2);
for row in rows {
let i: i64 = row.get("i");
let ts: DateTime<Utc> = row.get("ts");
let now = common_time::util::current_time_millis();
assert!(now - ts.timestamp_millis() < 1000);
- assert_eq!(i, 99);
+ assert_eq!(i.abs(), 99);
}
let _ = fe_mysql_server.shutdown().await;
diff --git a/tests/cases/standalone/common/insert/insert_default.result b/tests/cases/standalone/common/insert/insert_default.result
index da5ac155e1a9..64d6175993e4 100644
--- a/tests/cases/standalone/common/insert/insert_default.result
+++ b/tests/cases/standalone/common/insert/insert_default.result
@@ -64,3 +64,28 @@ DROP TABLE test2;
Affected Rows: 0
+CREATE TABLE test3 (
+ i INTEGER DEFAULT -1,
+ j DOUBLE DEFAULT -2,
+ k TIMESTAMP DEFAULT -3,
+ ts TIMESTAMP TIME INDEX,
+);
+
+Affected Rows: 0
+
+INSERT INTO test3 (ts) VALUES (1);
+
+Affected Rows: 1
+
+SELECT * FROM test3;
+
++----+------+-------------------------+-------------------------+
+| i | j | k | ts |
++----+------+-------------------------+-------------------------+
+| -1 | -2.0 | 1969-12-31T23:59:59.997 | 1970-01-01T00:00:00.001 |
++----+------+-------------------------+-------------------------+
+
+DROP TABLE test3;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/insert/insert_default.sql b/tests/cases/standalone/common/insert/insert_default.sql
index 4e50fc59c978..5d41b77e5ccf 100644
--- a/tests/cases/standalone/common/insert/insert_default.sql
+++ b/tests/cases/standalone/common/insert/insert_default.sql
@@ -20,3 +20,16 @@ SELECT * FROM test2;
DROP TABLE test1;
DROP TABLE test2;
+
+CREATE TABLE test3 (
+ i INTEGER DEFAULT -1,
+ j DOUBLE DEFAULT -2,
+ k TIMESTAMP DEFAULT -3,
+ ts TIMESTAMP TIME INDEX,
+);
+
+INSERT INTO test3 (ts) VALUES (1);
+
+SELECT * FROM test3;
+
+DROP TABLE test3;
diff --git a/tests/cases/standalone/common/partition.result b/tests/cases/standalone/common/partition.result
index 3fb5f50ddb39..cdf0f51be57c 100644
--- a/tests/cases/standalone/common/partition.result
+++ b/tests/cases/standalone/common/partition.result
@@ -185,3 +185,40 @@ PARTITION ON COLUMNS (a) (
Error: 1004(InvalidArguments), Unclosed value Int32(10) on column a
+-- Issue https://github.com/GreptimeTeam/greptimedb/issues/4247
+-- Partition rule with unary operator
+CREATE TABLE `molestiAe` (
+ `sImiLiQUE` FLOAT NOT NULL,
+ `amEt` TIMESTAMP(6) TIME INDEX,
+ `EXpLICaBo` DOUBLE,
+ PRIMARY KEY (`sImiLiQUE`)
+) PARTITION ON COLUMNS (`sImiLiQUE`) (
+ `sImiLiQUE` < -1,
+ `sImiLiQUE` >= -1 AND `sImiLiQUE` < -0,
+ `sImiLiQUE` >= 0
+);
+
+Affected Rows: 0
+
+INSERT INTO `molestiAe` VALUES
+ (-2, 0, 0),
+ (-0.9, 0, 0),
+ (1, 0, 0);
+
+Affected Rows: 3
+
+-- SQLNESS SORT_RESULT 3 1
+SELECT * FROM `molestiAe`;
+
++-----------+---------------------+-----------+
+| sImiLiQUE | amEt | EXpLICaBo |
++-----------+---------------------+-----------+
+| -0.9 | 1970-01-01T00:00:00 | 0.0 |
+| -2.0 | 1970-01-01T00:00:00 | 0.0 |
+| 1.0 | 1970-01-01T00:00:00 | 0.0 |
++-----------+---------------------+-----------+
+
+DROP TABLE `molestiAe`;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/partition.sql b/tests/cases/standalone/common/partition.sql
index 55f3d87cf9ab..c65f2c9e97b6 100644
--- a/tests/cases/standalone/common/partition.sql
+++ b/tests/cases/standalone/common/partition.sql
@@ -85,3 +85,26 @@ PARTITION ON COLUMNS (a) (
a > 10 AND a < 20,
a >= 20
);
+
+-- Issue https://github.com/GreptimeTeam/greptimedb/issues/4247
+-- Partition rule with unary operator
+CREATE TABLE `molestiAe` (
+ `sImiLiQUE` FLOAT NOT NULL,
+ `amEt` TIMESTAMP(6) TIME INDEX,
+ `EXpLICaBo` DOUBLE,
+ PRIMARY KEY (`sImiLiQUE`)
+) PARTITION ON COLUMNS (`sImiLiQUE`) (
+ `sImiLiQUE` < -1,
+ `sImiLiQUE` >= -1 AND `sImiLiQUE` < -0,
+ `sImiLiQUE` >= 0
+);
+
+INSERT INTO `molestiAe` VALUES
+ (-2, 0, 0),
+ (-0.9, 0, 0),
+ (1, 0, 0);
+
+-- SQLNESS SORT_RESULT 3 1
+SELECT * FROM `molestiAe`;
+
+DROP TABLE `molestiAe`;
|
fix
|
support unary operator in default value, partition rule and prepare statement (#4301)
|
d9eeeee06e27cbac7c9a1761e8fc7937789d48c0
|
2023-11-20 09:59:41
|
Zhenchi
|
feat(puffin): add file reader (#2751)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index afb51230247d..e08c49877238 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6585,12 +6585,18 @@ dependencies = [
name = "puffin"
version = "0.4.3"
dependencies = [
+ "async-trait",
+ "bitflags 2.4.1",
+ "common-error",
+ "common-macro",
"derive_builder 0.12.0",
"futures",
"pin-project",
"serde",
"serde_json",
+ "snafu",
"tokio",
+ "tokio-util",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 9c9db1117653..dbd5d578692a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -73,6 +73,7 @@ async-stream = "0.3"
async-trait = "0.1"
base64 = "0.21"
bigdecimal = "0.4.2"
+bitflags = "2.4.1"
chrono = { version = "0.4", features = ["serde"] }
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "26e43acac3a96cec8dd4c8365f22dfb1a84306e9" }
diff --git a/src/puffin/Cargo.toml b/src/puffin/Cargo.toml
index a0c56c0f8136..55001b6b07de 100644
--- a/src/puffin/Cargo.toml
+++ b/src/puffin/Cargo.toml
@@ -5,11 +5,17 @@ edition.workspace = true
license.workspace = true
[dependencies]
+async-trait.workspace = true
+bitflags.workspace = true
+common-error.workspace = true
+common-macro.workspace = true
derive_builder.workspace = true
futures.workspace = true
pin-project.workspace = true
serde.workspace = true
serde_json.workspace = true
+snafu.workspace = true
[dev-dependencies]
+tokio-util.workspace = true
tokio.workspace = true
diff --git a/src/puffin/src/error.rs b/src/puffin/src/error.rs
new file mode 100644
index 000000000000..9de5f9a17c92
--- /dev/null
+++ b/src/puffin/src/error.rs
@@ -0,0 +1,132 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::io::Error as IoError;
+
+use common_error::ext::ErrorExt;
+use common_error::status_code::StatusCode;
+use common_macro::stack_trace_debug;
+use snafu::{Location, Snafu};
+
+#[derive(Snafu)]
+#[snafu(visibility(pub))]
+#[stack_trace_debug]
+pub enum Error {
+ #[snafu(display("Failed to seek"))]
+ Seek {
+ #[snafu(source)]
+ error: IoError,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to read"))]
+ Read {
+ #[snafu(source)]
+ error: IoError,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to write"))]
+ Write {
+ #[snafu(source)]
+ error: IoError,
+ location: Location,
+ },
+
+ #[snafu(display("Magic not matched"))]
+ MagicNotMatched { location: Location },
+
+ #[snafu(display("Failed to convert bytes to integer"))]
+ BytesToInteger {
+ #[snafu(source)]
+ error: std::array::TryFromSliceError,
+ location: Location,
+ },
+
+ #[snafu(display("Unsupported decompression: {}", decompression))]
+ UnsupportedDecompression {
+ decompression: String,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to serialize json"))]
+ SerializeJson {
+ #[snafu(source)]
+ error: serde_json::Error,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to deserialize json"))]
+ DeserializeJson {
+ #[snafu(source)]
+ error: serde_json::Error,
+ location: Location,
+ },
+
+ #[snafu(display("Parse stage not match, expected: {}, actual: {}", expected, actual))]
+ ParseStageNotMatch {
+ expected: String,
+ actual: String,
+ location: Location,
+ },
+
+ #[snafu(display("Unexpected footer payload size: {}", size))]
+ UnexpectedFooterPayloadSize { size: i32, location: Location },
+
+ #[snafu(display(
+ "Unexpected puffin file size, min: {}, actual: {}",
+ min_file_size,
+ actual_file_size
+ ))]
+ UnexpectedPuffinFileSize {
+ min_file_size: u64,
+ actual_file_size: u64,
+ location: Location,
+ },
+
+ #[snafu(display("Invalid blob offset: {}, location: {:?}", offset, location))]
+ InvalidBlobOffset { offset: i64, location: Location },
+
+ #[snafu(display("Invalid blob area end: {}, location: {:?}", offset, location))]
+ InvalidBlobAreaEnd { offset: u64, location: Location },
+}
+
+impl ErrorExt for Error {
+ fn status_code(&self) -> StatusCode {
+ use Error::*;
+ match self {
+ Seek { .. }
+ | Read { .. }
+ | MagicNotMatched { .. }
+ | DeserializeJson { .. }
+ | Write { .. }
+ | SerializeJson { .. }
+ | BytesToInteger { .. }
+ | ParseStageNotMatch { .. }
+ | UnexpectedFooterPayloadSize { .. }
+ | UnexpectedPuffinFileSize { .. }
+ | InvalidBlobOffset { .. }
+ | InvalidBlobAreaEnd { .. } => StatusCode::Unexpected,
+
+ UnsupportedDecompression { .. } => StatusCode::Unsupported,
+ }
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+}
+
+pub type Result<T> = std::result::Result<T, Error>;
diff --git a/src/puffin/src/file_format.rs b/src/puffin/src/file_format.rs
new file mode 100644
index 000000000000..0802c977e87e
--- /dev/null
+++ b/src/puffin/src/file_format.rs
@@ -0,0 +1,55 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! # Format specification for Puffin files
+//!
+//! ## File structure
+//!
+//! Magic Blob₁ Blob₂ ... Blobₙ Footer
+//!
+//! - `Magic` is four bytes 0x50, 0x46, 0x41, 0x31 (short for: Puffin Fratercula arctica, version 1),
+//! - `Blobᵢ` is i-th blob contained in the file, to be interpreted by application according to the footer,
+//! - `Footer` is defined below.
+//!
+//! ## Footer structure
+//!
+//! Magic FooterPayload FooterPayloadSize Flags Magic
+//!
+//! - `Magic`: four bytes, same as at the beginning of the file
+//! - `FooterPayload`: optionally compressed, UTF-8 encoded JSON payload describing the blobs in the file, with the structure described below
+//! - `FooterPayloadSize`: a length in bytes of the `FooterPayload` (after compression, if compressed), stored as 4 byte integer
+//! - `Flags`: 4 bytes for boolean flags
+//! * byte 0 (first)
+//! - bit 0 (lowest bit): whether `FooterPayload` is compressed
+//! - all other bits are reserved for future use and should be set to 0 on write
+//! * all other bytes are reserved for future use and should be set to 0 on write
+//! A 4 byte integer is always signed, in a two’s complement representation, stored little-endian.
+//!
+//! ## Footer Payload
+//!
+//! Footer payload bytes is either uncompressed or LZ4-compressed (as a single LZ4 compression frame with content size present),
+//! UTF-8 encoded JSON payload representing a single [`FileMetadata`] object.
+
+pub mod reader;
+
+use bitflags::bitflags;
+
+pub const MAGIC: [u8; 4] = [0x50, 0x46, 0x41, 0x31];
+
+bitflags! {
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+ pub struct Flags: u32 {
+ const FOOTER_PAYLOAD_COMPRESSED_LZ4 = 0b00000001;
+ }
+}
diff --git a/src/puffin/src/file_format/reader.rs b/src/puffin/src/file_format/reader.rs
new file mode 100644
index 000000000000..8d51c8cb6962
--- /dev/null
+++ b/src/puffin/src/file_format/reader.rs
@@ -0,0 +1,46 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod file;
+mod footer;
+
+use async_trait::async_trait;
+
+use crate::blob_metadata::BlobMetadata;
+use crate::error::Result;
+pub use crate::file_format::reader::file::PuffinFileReader;
+use crate::file_metadata::FileMetadata;
+
+/// `PuffinSyncReader` defines a synchronous reader for puffin data.
+pub trait PuffinSyncReader<'a> {
+ type Reader: std::io::Read + std::io::Seek;
+
+ /// fetch the FileMetadata
+ fn metadata(&'a mut self) -> Result<FileMetadata>;
+
+ /// read particular blob data based on given metadata
+ fn blob_reader(&'a mut self, blob_metadata: &BlobMetadata) -> Result<Self::Reader>;
+}
+
+/// `PuffinAsyncReader` defines an asynchronous reader for puffin data.
+#[async_trait]
+pub trait PuffinAsyncReader<'a> {
+ type Reader: futures::AsyncRead + futures::AsyncSeek;
+
+ /// fetch the FileMetadata
+ async fn metadata(&'a mut self) -> Result<FileMetadata>;
+
+ /// read particular blob data based on given metadata
+ fn blob_reader(&'a mut self, blob_metadata: &BlobMetadata) -> Result<Self::Reader>;
+}
diff --git a/src/puffin/src/file_format/reader/file.rs b/src/puffin/src/file_format/reader/file.rs
new file mode 100644
index 000000000000..a7ca115b6cb2
--- /dev/null
+++ b/src/puffin/src/file_format/reader/file.rs
@@ -0,0 +1,173 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::io::{self, SeekFrom};
+
+use async_trait::async_trait;
+use futures::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt};
+use snafu::{ensure, ResultExt};
+
+use crate::blob_metadata::BlobMetadata;
+use crate::error::{
+ MagicNotMatchedSnafu, ReadSnafu, Result, SeekSnafu, UnexpectedPuffinFileSizeSnafu,
+ UnsupportedDecompressionSnafu,
+};
+use crate::file_format::reader::footer::{FooterParser, MIN_FOOTER_SIZE};
+use crate::file_format::reader::{PuffinAsyncReader, PuffinSyncReader};
+use crate::file_format::MAGIC;
+use crate::file_metadata::FileMetadata;
+use crate::partial_reader::PartialReader;
+
+/// Puffin file reader, implemented [`PuffinSyncReader`] and [`PuffinAsyncReader`]
+///
+/// ```text
+/// File layout: Magic Blob₁ Blob₂ ... Blobₙ Footer
+/// [4] [?] [?] [?] [?]
+/// ```
+pub struct PuffinFileReader<R> {
+ /// The source of the puffin file
+ source: R,
+
+ /// The metadata of the puffin file, which is parsed from the footer
+ metadata: Option<FileMetadata>,
+}
+
+pub const MAGIC_SIZE: u64 = MAGIC.len() as u64;
+pub const MIN_FILE_SIZE: u64 = MAGIC_SIZE + MIN_FOOTER_SIZE;
+
+impl<R> PuffinFileReader<R> {
+ pub fn new(source: R) -> Self {
+ Self {
+ source,
+ metadata: None,
+ }
+ }
+
+ fn validate_file_size(file_size: u64) -> Result<()> {
+ ensure!(
+ file_size >= MIN_FILE_SIZE,
+ UnexpectedPuffinFileSizeSnafu {
+ min_file_size: MIN_FILE_SIZE,
+ actual_file_size: file_size
+ }
+ );
+ Ok(())
+ }
+}
+
+impl<'a, R: io::Read + io::Seek + 'a> PuffinSyncReader<'a> for PuffinFileReader<R> {
+ type Reader = PartialReader<&'a mut R>;
+
+ fn metadata(&mut self) -> Result<FileMetadata> {
+ if let Some(metadata) = &self.metadata {
+ return Ok(metadata.clone());
+ }
+
+ // check the magic
+ let mut magic = [0; MAGIC_SIZE as usize];
+ self.source.read_exact(&mut magic).context(ReadSnafu)?;
+ ensure!(magic == MAGIC, MagicNotMatchedSnafu);
+
+ let file_size = self.get_file_size_sync()?;
+
+ // parse the footer
+ let metadata = FooterParser::new(&mut self.source, file_size).parse_sync()?;
+ self.metadata = Some(metadata.clone());
+ Ok(metadata)
+ }
+
+ fn blob_reader(&'a mut self, blob_metadata: &BlobMetadata) -> Result<Self::Reader> {
+ // TODO(zhongzc): support decompression
+ let compression = blob_metadata.compression_codec.as_ref();
+ ensure!(
+ compression.is_none(),
+ UnsupportedDecompressionSnafu {
+ decompression: compression.unwrap().to_string()
+ }
+ );
+
+ Ok(PartialReader::new(
+ &mut self.source,
+ blob_metadata.offset as _,
+ blob_metadata.length as _,
+ ))
+ }
+}
+
+#[async_trait]
+impl<'a, R: AsyncRead + AsyncSeek + Unpin + Send + 'a> PuffinAsyncReader<'a>
+ for PuffinFileReader<R>
+{
+ type Reader = PartialReader<&'a mut R>;
+
+ async fn metadata(&'a mut self) -> Result<FileMetadata> {
+ if let Some(metadata) = &self.metadata {
+ return Ok(metadata.clone());
+ }
+
+ // check the magic
+ let mut magic = [0; MAGIC_SIZE as usize];
+ self.source
+ .read_exact(&mut magic)
+ .await
+ .context(ReadSnafu)?;
+ ensure!(magic == MAGIC, MagicNotMatchedSnafu);
+
+ let file_size = self.get_file_size_async().await?;
+
+ // parse the footer
+ let metadata = FooterParser::new(&mut self.source, file_size)
+ .parse_async()
+ .await?;
+ self.metadata = Some(metadata.clone());
+ Ok(metadata)
+ }
+
+ fn blob_reader(&'a mut self, blob_metadata: &BlobMetadata) -> Result<Self::Reader> {
+ // TODO(zhongzc): support decompression
+ let compression = blob_metadata.compression_codec.as_ref();
+ ensure!(
+ compression.is_none(),
+ UnsupportedDecompressionSnafu {
+ decompression: compression.unwrap().to_string()
+ }
+ );
+
+ Ok(PartialReader::new(
+ &mut self.source,
+ blob_metadata.offset as _,
+ blob_metadata.length as _,
+ ))
+ }
+}
+
+impl<R: io::Read + io::Seek> PuffinFileReader<R> {
+ fn get_file_size_sync(&mut self) -> Result<u64> {
+ let file_size = self.source.seek(SeekFrom::End(0)).context(SeekSnafu)?;
+ Self::validate_file_size(file_size)?;
+ Ok(file_size)
+ }
+}
+
+impl<R: AsyncRead + AsyncSeek + Send + Unpin> PuffinFileReader<R> {
+ async fn get_file_size_async(&mut self) -> Result<u64> {
+ let file_size = self
+ .source
+ .seek(SeekFrom::End(0))
+ .await
+ .context(SeekSnafu)?;
+ Self::validate_file_size(file_size)?;
+ Ok(file_size)
+ }
+}
diff --git a/src/puffin/src/file_format/reader/footer.rs b/src/puffin/src/file_format/reader/footer.rs
new file mode 100644
index 000000000000..987c70a7d7ef
--- /dev/null
+++ b/src/puffin/src/file_format/reader/footer.rs
@@ -0,0 +1,318 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::io::{self, SeekFrom};
+
+use futures::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt};
+use snafu::{ensure, ResultExt};
+
+use crate::error::{
+ BytesToIntegerSnafu, DeserializeJsonSnafu, InvalidBlobAreaEndSnafu, InvalidBlobOffsetSnafu,
+ MagicNotMatchedSnafu, ParseStageNotMatchSnafu, ReadSnafu, Result, SeekSnafu,
+ UnexpectedFooterPayloadSizeSnafu, UnsupportedDecompressionSnafu,
+};
+use crate::file_format::reader::file::{MAGIC_SIZE, MIN_FILE_SIZE};
+use crate::file_format::{Flags, MAGIC};
+use crate::file_metadata::FileMetadata;
+
+/// Parser for the footer of a Puffin data file
+///
+/// The footer has a specific layout that needs to be read and parsed to
+/// extract metadata about the file, which is encapsulated in the [`FileMetadata`] type.
+///
+/// ```text
+/// Footer layout: HeadMagic Payload PayloadSize Flags FootMagic
+/// [4] [?] [4] [4] [4]
+/// ```
+pub struct FooterParser<R> {
+ // The underlying IO source
+ source: R,
+
+ // The size of the file, used for calculating offsets to read from
+ file_size: u64,
+}
+
+pub const FLAGS_SIZE: u64 = 4;
+pub const PAYLOAD_SIZE_SIZE: u64 = 4;
+pub const MIN_FOOTER_SIZE: u64 = MAGIC_SIZE * 2 + FLAGS_SIZE + PAYLOAD_SIZE_SIZE;
+
+impl<R> FooterParser<R> {
+ pub fn new(source: R, file_size: u64) -> Self {
+ Self { source, file_size }
+ }
+}
+
+impl<R: io::Read + io::Seek> FooterParser<R> {
+ /// Parses the footer from the IO source in a synchronous manner.
+ pub fn parse_sync(&mut self) -> Result<FileMetadata> {
+ let mut parser = StageParser::new(self.file_size);
+
+ let mut buf = vec![];
+ while let Some(byte_to_read) = parser.next_to_read() {
+ self.source
+ .seek(SeekFrom::Start(byte_to_read.offset))
+ .context(SeekSnafu)?;
+ let size = byte_to_read.size as usize;
+
+ buf.resize(size, 0);
+ let buf = &mut buf[..size];
+
+ self.source.read_exact(buf).context(ReadSnafu)?;
+
+ parser.consume_bytes(buf)?;
+ }
+
+ parser.finish()
+ }
+}
+
+impl<R: AsyncRead + AsyncSeek + Unpin> FooterParser<R> {
+ /// Parses the footer from the IO source in a asynchronous manner.
+ pub async fn parse_async(&mut self) -> Result<FileMetadata> {
+ let mut parser = StageParser::new(self.file_size);
+
+ let mut buf = vec![];
+ while let Some(byte_to_read) = parser.next_to_read() {
+ self.source
+ .seek(SeekFrom::Start(byte_to_read.offset))
+ .await
+ .context(SeekSnafu)?;
+ let size = byte_to_read.size as usize;
+
+ buf.resize(size, 0);
+ let buf = &mut buf[..size];
+
+ self.source.read_exact(buf).await.context(ReadSnafu)?;
+ parser.consume_bytes(buf)?;
+ }
+
+ parser.finish()
+ }
+}
+
+/// The internal stages of parsing the footer.
+/// This enum allows the StageParser to keep track of which part
+/// of the footer needs to be parsed next.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+enum ParseStage {
+ FootMagic,
+ Flags,
+ PayloadSize,
+ Payload,
+ HeadMagic,
+ Done,
+}
+
+/// Manages the parsing process of the file's footer.
+struct StageParser {
+ /// Current stage in the parsing sequence of the footer.
+ stage: ParseStage,
+
+ /// Total file size; used for calculating offsets to read from.
+ file_size: u64,
+
+ /// Flags from the footer, set when the `Flags` field is parsed.
+ flags: Flags,
+
+ /// Size of the footer's payload, set when the `PayloadSize` is parsed.
+ payload_size: u64,
+
+ /// Metadata from the footer's payload, set when the `Payload` is parsed.
+ metadata: Option<FileMetadata>,
+}
+
+/// Represents a read operation that needs to be performed, including the
+/// offset from the start of the file and the number of bytes to read.
+struct BytesToRead {
+ offset: u64,
+ size: u64,
+}
+
+impl StageParser {
+ fn new(file_size: u64) -> Self {
+ Self {
+ stage: ParseStage::FootMagic,
+ file_size,
+ payload_size: 0,
+ flags: Flags::empty(),
+ metadata: None,
+ }
+ }
+
+ /// Determines the next segment of bytes to read based on the current parsing stage.
+ /// This method returns information like the offset and size of the next read,
+ /// or None if parsing is complete.
+ fn next_to_read(&self) -> Option<BytesToRead> {
+ if self.stage == ParseStage::Done {
+ return None;
+ }
+
+ let btr = match self.stage {
+ ParseStage::FootMagic => BytesToRead {
+ offset: self.foot_magic_offset(),
+ size: MAGIC_SIZE,
+ },
+ ParseStage::Flags => BytesToRead {
+ offset: self.flags_offset(),
+ size: FLAGS_SIZE,
+ },
+ ParseStage::PayloadSize => BytesToRead {
+ offset: self.payload_size_offset(),
+ size: PAYLOAD_SIZE_SIZE,
+ },
+ ParseStage::Payload => BytesToRead {
+ offset: self.payload_offset(),
+ size: self.payload_size,
+ },
+ ParseStage::HeadMagic => BytesToRead {
+ offset: self.head_magic_offset(),
+ size: MAGIC_SIZE,
+ },
+ ParseStage::Done => unreachable!(),
+ };
+
+ Some(btr)
+ }
+
+ /// Processes the bytes that have been read according to the current parsing stage
+ /// and advances the parsing stage. It ensures the correct sequence of bytes is
+ /// encountered and stores the necessary information in the `StageParser`.
+ fn consume_bytes(&mut self, bytes: &[u8]) -> Result<()> {
+ match self.stage {
+ ParseStage::FootMagic => {
+ ensure!(bytes == MAGIC, MagicNotMatchedSnafu);
+ self.stage = ParseStage::Flags;
+ }
+ ParseStage::Flags => {
+ self.flags = Self::parse_flags(bytes)?;
+ self.stage = ParseStage::PayloadSize;
+ }
+ ParseStage::PayloadSize => {
+ self.payload_size = Self::parse_payload_size(bytes)?;
+ self.validate_payload_size()?;
+ self.stage = ParseStage::Payload;
+ }
+ ParseStage::Payload => {
+ self.metadata = Some(self.parse_payload(bytes)?);
+ self.validate_metadata()?;
+ self.stage = ParseStage::HeadMagic;
+ }
+ ParseStage::HeadMagic => {
+ ensure!(bytes == MAGIC, MagicNotMatchedSnafu);
+ self.stage = ParseStage::Done;
+ }
+ ParseStage::Done => unreachable!(),
+ }
+
+ Ok(())
+ }
+
+ /// Finalizes the parsing process, ensuring all stages are complete, and returns
+ /// the parsed `FileMetadata`. It converts the raw footer payload into structured data.
+ fn finish(self) -> Result<FileMetadata> {
+ ensure!(
+ self.stage == ParseStage::Done,
+ ParseStageNotMatchSnafu {
+ expected: format!("{:?}", ParseStage::Done),
+ actual: format!("{:?}", self.stage),
+ }
+ );
+
+ Ok(self.metadata.unwrap())
+ }
+
+ fn parse_flags(bytes: &[u8]) -> Result<Flags> {
+ let n = u32::from_le_bytes(bytes.try_into().context(BytesToIntegerSnafu)?);
+ Ok(Flags::from_bits_truncate(n))
+ }
+
+ fn parse_payload_size(bytes: &[u8]) -> Result<u64> {
+ let n = i32::from_le_bytes(bytes.try_into().context(BytesToIntegerSnafu)?);
+ ensure!(n >= 0, UnexpectedFooterPayloadSizeSnafu { size: n });
+ Ok(n as u64)
+ }
+
+ fn validate_payload_size(&self) -> Result<()> {
+ ensure!(
+ self.payload_size <= self.file_size - MIN_FILE_SIZE,
+ UnexpectedFooterPayloadSizeSnafu {
+ size: self.payload_size as i32
+ }
+ );
+ Ok(())
+ }
+
+ fn parse_payload(&self, bytes: &[u8]) -> Result<FileMetadata> {
+ // TODO(zhongzc): support lz4
+ ensure!(
+ !self.flags.contains(Flags::FOOTER_PAYLOAD_COMPRESSED_LZ4),
+ UnsupportedDecompressionSnafu {
+ decompression: "lz4"
+ }
+ );
+
+ serde_json::from_slice(bytes).context(DeserializeJsonSnafu)
+ }
+
+ fn validate_metadata(&self) -> Result<()> {
+ let metadata = self.metadata.as_ref().expect("metadata is not set");
+
+ let mut next_blob_offset = MAGIC_SIZE;
+ // check blob offsets
+ for blob in &metadata.blobs {
+ ensure!(
+ blob.offset as u64 == next_blob_offset,
+ InvalidBlobOffsetSnafu {
+ offset: blob.offset
+ }
+ );
+ next_blob_offset += blob.length as u64;
+ }
+
+ let blob_area_end = metadata
+ .blobs
+ .last()
+ .map_or(MAGIC_SIZE, |b| (b.offset + b.length) as u64);
+ ensure!(
+ blob_area_end == self.head_magic_offset(),
+ InvalidBlobAreaEndSnafu {
+ offset: blob_area_end
+ }
+ );
+
+ Ok(())
+ }
+
+ fn foot_magic_offset(&self) -> u64 {
+ self.file_size - MAGIC_SIZE
+ }
+
+ fn flags_offset(&self) -> u64 {
+ self.file_size - MAGIC_SIZE - FLAGS_SIZE
+ }
+
+ fn payload_size_offset(&self) -> u64 {
+ self.file_size - MAGIC_SIZE - FLAGS_SIZE - PAYLOAD_SIZE_SIZE
+ }
+
+ fn payload_offset(&self) -> u64 {
+ // `validate_payload_size` ensures that this subtraction will not overflow
+ self.file_size - MAGIC_SIZE - FLAGS_SIZE - PAYLOAD_SIZE_SIZE - self.payload_size
+ }
+
+ fn head_magic_offset(&self) -> u64 {
+ // `validate_payload_size` ensures that this subtraction will not overflow
+ self.file_size - MAGIC_SIZE * 2 - FLAGS_SIZE - PAYLOAD_SIZE_SIZE - self.payload_size
+ }
+}
diff --git a/src/puffin/src/lib.rs b/src/puffin/src/lib.rs
index 88a1d0254019..40c35057c275 100644
--- a/src/puffin/src/lib.rs
+++ b/src/puffin/src/lib.rs
@@ -13,5 +13,10 @@
// limitations under the License.
pub mod blob_metadata;
+pub mod error;
+pub mod file_format;
pub mod file_metadata;
pub mod partial_reader;
+
+#[cfg(test)]
+mod tests;
diff --git a/src/puffin/src/tests.rs b/src/puffin/src/tests.rs
new file mode 100644
index 000000000000..1d48ecd5656e
Binary files /dev/null and b/src/puffin/src/tests.rs differ
diff --git a/src/puffin/src/tests/resources/empty-puffin-uncompressed.puffin b/src/puffin/src/tests/resources/empty-puffin-uncompressed.puffin
new file mode 100644
index 000000000000..142b45bd4ebe
Binary files /dev/null and b/src/puffin/src/tests/resources/empty-puffin-uncompressed.puffin differ
diff --git a/src/puffin/src/tests/resources/sample-metric-data-uncompressed.puffin b/src/puffin/src/tests/resources/sample-metric-data-uncompressed.puffin
new file mode 100644
index 000000000000..ab8da13822c5
Binary files /dev/null and b/src/puffin/src/tests/resources/sample-metric-data-uncompressed.puffin differ
|
feat
|
add file reader (#2751)
|
8959dbcef83507ccd76aaaffd2b44cab6426e68f
|
2022-12-06 16:51:57
|
LFC
|
feat: Substrait logical plan (#704)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index abe4e0918ca3..33df1779b8e8 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1316,7 +1316,6 @@ dependencies = [
"serde",
"serde_json",
"snafu",
- "table",
"tempdir",
"tokio",
]
@@ -2458,6 +2457,7 @@ dependencies = [
"sql",
"sqlparser 0.15.0",
"store-api",
+ "substrait 0.1.0",
"table",
"tempdir",
"tokio",
@@ -3470,6 +3470,7 @@ version = "0.1.0"
dependencies = [
"api",
"async-trait",
+ "catalog",
"common-base",
"common-catalog",
"common-error",
@@ -6583,6 +6584,7 @@ version = "0.1.0"
dependencies = [
"async-trait",
"chrono",
+ "common-catalog",
"common-error",
"common-query",
"common-recordbatch",
diff --git a/src/common/catalog/src/helper.rs b/src/catalog/src/helper.rs
similarity index 98%
rename from src/common/catalog/src/helper.rs
rename to src/catalog/src/helper.rs
index dcfa08e8a7a0..2caf098865b8 100644
--- a/src/common/catalog/src/helper.rs
+++ b/src/catalog/src/helper.rs
@@ -15,18 +15,19 @@
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
+use common_catalog::error::{
+ DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, SerializeCatalogEntryValueSnafu,
+};
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize, Serializer};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::{RawTableInfo, TableId, TableVersion};
-use crate::consts::{
- CATALOG_KEY_PREFIX, SCHEMA_KEY_PREFIX, TABLE_GLOBAL_KEY_PREFIX, TABLE_REGIONAL_KEY_PREFIX,
-};
-use crate::error::{
- DeserializeCatalogEntryValueSnafu, Error, InvalidCatalogSnafu, SerializeCatalogEntryValueSnafu,
-};
+const CATALOG_KEY_PREFIX: &str = "__c";
+const SCHEMA_KEY_PREFIX: &str = "__s";
+const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
+const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
const ALPHANUMERICS_NAME_PATTERN: &str = "[a-zA-Z_][a-zA-Z0-9_]*";
diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs
index fc7bb42b030a..d71a0c6d5b72 100644
--- a/src/catalog/src/lib.rs
+++ b/src/catalog/src/lib.rs
@@ -29,6 +29,7 @@ use crate::error::{CreateTableSnafu, Result};
pub use crate::schema::{SchemaProvider, SchemaProviderRef};
pub mod error;
+pub mod helper;
pub mod local;
pub mod remote;
pub mod schema;
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index ba7c09f6c004..c37acdc303be 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -20,10 +20,6 @@ use std::sync::Arc;
use arc_swap::ArcSwap;
use async_stream::stream;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER_TABLE_ID};
-use common_catalog::{
- build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
- SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
-};
use common_telemetry::{debug, info};
use futures::Stream;
use futures_util::StreamExt;
@@ -39,6 +35,10 @@ use crate::error::{
CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
};
+use crate::helper::{
+ build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
+ SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue, TableRegionalKey, TableRegionalValue,
+};
use crate::remote::{Kv, KvBackendRef};
use crate::{
handle_system_table_request, CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef,
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index e5d8811e71d9..9903b8ff8570 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -22,12 +22,12 @@ mod tests {
use std::collections::HashSet;
use std::sync::Arc;
+ use catalog::helper::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use catalog::remote::{
KvBackend, KvBackendRef, RemoteCatalogManager, RemoteCatalogProvider, RemoteSchemaProvider,
};
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use common_catalog::{CatalogKey, CatalogValue, SchemaKey, SchemaValue};
use datatypes::schema::Schema;
use futures_util::StreamExt;
use table::engine::{EngineContext, TableEngineRef};
diff --git a/src/common/catalog/Cargo.toml b/src/common/catalog/Cargo.toml
index 5df337479cb9..b18c561caa3f 100644
--- a/src/common/catalog/Cargo.toml
+++ b/src/common/catalog/Cargo.toml
@@ -14,7 +14,6 @@ regex = "1.6"
serde = "1.0"
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
-table = { path = "../../table" }
[dev-dependencies]
chrono = "0.4"
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index 775cddcb42a5..118c53930b3f 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -25,9 +25,3 @@ pub const MIN_USER_TABLE_ID: u32 = 1024;
pub const SYSTEM_CATALOG_TABLE_ID: u32 = 0;
/// scripts table id
pub const SCRIPTS_TABLE_ID: u32 = 1;
-
-pub(crate) const CATALOG_KEY_PREFIX: &str = "__c";
-pub(crate) const SCHEMA_KEY_PREFIX: &str = "__s";
-pub(crate) const TABLE_GLOBAL_KEY_PREFIX: &str = "__tg";
-pub(crate) const TABLE_REGIONAL_KEY_PREFIX: &str = "__tr";
-pub const TABLE_ID_KEY_PREFIX: &str = "__tid";
diff --git a/src/common/catalog/src/lib.rs b/src/common/catalog/src/lib.rs
index 30e01900b3f4..841420c21992 100644
--- a/src/common/catalog/src/lib.rs
+++ b/src/common/catalog/src/lib.rs
@@ -14,10 +14,3 @@
pub mod consts;
pub mod error;
-mod helper;
-
-pub use helper::{
- build_catalog_prefix, build_schema_prefix, build_table_global_prefix,
- build_table_regional_prefix, CatalogKey, CatalogValue, SchemaKey, SchemaValue, TableGlobalKey,
- TableGlobalValue, TableRegionalKey, TableRegionalValue,
-};
diff --git a/src/common/substrait/src/context.rs b/src/common/substrait/src/context.rs
index 893546ea4815..b017e9cc9aa9 100644
--- a/src/common/substrait/src/context.rs
+++ b/src/common/substrait/src/context.rs
@@ -14,6 +14,7 @@
use std::collections::HashMap;
+use datafusion::logical_plan::DFSchemaRef;
use substrait_proto::protobuf::extensions::simple_extension_declaration::{
ExtensionFunction, MappingType,
};
@@ -23,6 +24,7 @@ use substrait_proto::protobuf::extensions::SimpleExtensionDeclaration;
pub struct ConvertorContext {
scalar_fn_names: HashMap<String, u32>,
scalar_fn_map: HashMap<u32, String>,
+ df_schema: Option<DFSchemaRef>,
}
impl ConvertorContext {
@@ -63,4 +65,13 @@ impl ConvertorContext {
}
result
}
+
+ pub(crate) fn set_df_schema(&mut self, schema: DFSchemaRef) {
+ debug_assert!(self.df_schema.is_none());
+ self.df_schema.get_or_insert(schema);
+ }
+
+ pub(crate) fn df_schema(&self) -> Option<&DFSchemaRef> {
+ self.df_schema.as_ref()
+ }
}
diff --git a/src/common/substrait/src/df_expr.rs b/src/common/substrait/src/df_expr.rs
index 8267fa9cc10f..d924e7b08508 100644
--- a/src/common/substrait/src/df_expr.rs
+++ b/src/common/substrait/src/df_expr.rs
@@ -16,7 +16,7 @@ use std::collections::VecDeque;
use std::str::FromStr;
use datafusion::logical_plan::{Column, Expr};
-use datafusion_expr::{expr_fn, BuiltinScalarFunction, Operator};
+use datafusion_expr::{expr_fn, lit, BuiltinScalarFunction, Operator};
use datatypes::schema::Schema;
use snafu::{ensure, OptionExt};
use substrait_proto::protobuf::expression::field_reference::ReferenceType as FieldReferenceType;
@@ -24,7 +24,7 @@ use substrait_proto::protobuf::expression::reference_segment::{
ReferenceType as SegReferenceType, StructField,
};
use substrait_proto::protobuf::expression::{
- FieldReference, ReferenceSegment, RexType, ScalarFunction,
+ FieldReference, Literal, ReferenceSegment, RexType, ScalarFunction,
};
use substrait_proto::protobuf::function_argument::ArgType;
use substrait_proto::protobuf::Expression;
@@ -33,15 +33,24 @@ use crate::context::ConvertorContext;
use crate::error::{
EmptyExprSnafu, InvalidParametersSnafu, MissingFieldSnafu, Result, UnsupportedExprSnafu,
};
+use crate::types::{literal_type_to_scalar_value, scalar_value_as_literal_type};
/// Convert substrait's `Expression` to DataFusion's `Expr`.
-pub fn to_df_expr(ctx: &ConvertorContext, expression: Expression, schema: &Schema) -> Result<Expr> {
+pub(crate) fn to_df_expr(
+ ctx: &ConvertorContext,
+ expression: Expression,
+ schema: &Schema,
+) -> Result<Expr> {
let expr_rex_type = expression.rex_type.context(EmptyExprSnafu)?;
match expr_rex_type {
- RexType::Literal(_) => UnsupportedExprSnafu {
- name: "substrait Literal expression",
+ RexType::Literal(l) => {
+ let t = l.literal_type.context(MissingFieldSnafu {
+ field: "LiteralType",
+ plan: "Literal",
+ })?;
+ let v = literal_type_to_scalar_value(t)?;
+ Ok(lit(v))
}
- .fail()?,
RexType::Selection(selection) => convert_selection_rex(*selection, schema),
RexType::ScalarFunction(scalar_fn) => convert_scalar_function(ctx, scalar_fn, schema),
RexType::WindowFunction(_)
@@ -453,10 +462,21 @@ pub fn expression_from_df_expr(
}
}
// Don't merge them with other unsupported expr arms to preserve the ordering.
- Expr::ScalarVariable(..) | Expr::Literal(..) => UnsupportedExprSnafu {
+ Expr::ScalarVariable(..) => UnsupportedExprSnafu {
name: expr.to_string(),
}
.fail()?,
+ Expr::Literal(v) => {
+ let t = scalar_value_as_literal_type(v)?;
+ let l = Literal {
+ nullable: true,
+ type_variation_reference: 0,
+ literal_type: Some(t),
+ };
+ Expression {
+ rex_type: Some(RexType::Literal(l)),
+ }
+ }
Expr::BinaryExpr { left, op, right } => {
let left = expression_from_df_expr(ctx, left, schema)?;
let right = expression_from_df_expr(ctx, right, schema)?;
diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs
index 8d53ef1b089b..81909cf38d26 100644
--- a/src/common/substrait/src/df_logical.rs
+++ b/src/common/substrait/src/df_logical.rs
@@ -18,7 +18,9 @@ use bytes::{Buf, Bytes, BytesMut};
use catalog::CatalogManagerRef;
use common_error::prelude::BoxedError;
use common_telemetry::debug;
+use datafusion::arrow::datatypes::SchemaRef as ArrowSchemaRef;
use datafusion::datasource::TableProvider;
+use datafusion::logical_plan::plan::Filter;
use datafusion::logical_plan::{LogicalPlan, TableScan, ToDFSchema};
use datafusion::physical_plan::project_schema;
use prost::Message;
@@ -29,31 +31,33 @@ use substrait_proto::protobuf::extensions::simple_extension_declaration::Mapping
use substrait_proto::protobuf::plan_rel::RelType as PlanRelType;
use substrait_proto::protobuf::read_rel::{NamedTable, ReadType};
use substrait_proto::protobuf::rel::RelType;
-use substrait_proto::protobuf::{Plan, PlanRel, ReadRel, Rel};
+use substrait_proto::protobuf::{FilterRel, Plan, PlanRel, ReadRel, Rel};
use table::table::adapter::DfTableProviderAdapter;
use crate::context::ConvertorContext;
use crate::df_expr::{expression_from_df_expr, to_df_expr};
use crate::error::{
- DFInternalSnafu, DecodeRelSnafu, EmptyPlanSnafu, EncodeRelSnafu, Error, InternalSnafu,
+ self, DFInternalSnafu, DecodeRelSnafu, EmptyPlanSnafu, EncodeRelSnafu, Error, InternalSnafu,
InvalidParametersSnafu, MissingFieldSnafu, SchemaNotMatchSnafu, TableNotFoundSnafu,
UnknownPlanSnafu, UnsupportedExprSnafu, UnsupportedPlanSnafu,
};
use crate::schema::{from_schema, to_schema};
use crate::SubstraitPlan;
-pub struct DFLogicalSubstraitConvertor {
- catalog_manager: CatalogManagerRef,
-}
+pub struct DFLogicalSubstraitConvertor;
impl SubstraitPlan for DFLogicalSubstraitConvertor {
type Error = Error;
type Plan = LogicalPlan;
- fn decode<B: Buf + Send>(&self, message: B) -> Result<Self::Plan, Self::Error> {
+ fn decode<B: Buf + Send>(
+ &self,
+ message: B,
+ catalog_manager: CatalogManagerRef,
+ ) -> Result<Self::Plan, Self::Error> {
let plan = Plan::decode(message).context(DecodeRelSnafu)?;
- self.convert_plan(plan)
+ self.convert_plan(plan, catalog_manager)
}
fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error> {
@@ -67,13 +71,11 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
}
impl DFLogicalSubstraitConvertor {
- pub fn new(catalog_manager: CatalogManagerRef) -> Self {
- Self { catalog_manager }
- }
-}
-
-impl DFLogicalSubstraitConvertor {
- pub fn convert_plan(&self, mut plan: Plan) -> Result<LogicalPlan, Error> {
+ fn convert_plan(
+ &self,
+ mut plan: Plan,
+ catalog_manager: CatalogManagerRef,
+ ) -> Result<LogicalPlan, Error> {
// prepare convertor context
let mut ctx = ConvertorContext::default();
for simple_ext in plan.extensions {
@@ -99,15 +101,51 @@ impl DFLogicalSubstraitConvertor {
}
.fail()?
};
+
+ self.rel_to_logical_plan(&mut ctx, Box::new(rel), catalog_manager)
+ }
+
+ fn rel_to_logical_plan(
+ &self,
+ ctx: &mut ConvertorContext,
+ rel: Box<Rel>,
+ catalog_manager: CatalogManagerRef,
+ ) -> Result<LogicalPlan, Error> {
let rel_type = rel.rel_type.context(EmptyPlanSnafu)?;
// build logical plan
let logical_plan = match rel_type {
- RelType::Read(read_rel) => self.convert_read_rel(&mut ctx, read_rel),
- RelType::Filter(_filter_rel) => UnsupportedPlanSnafu {
- name: "Filter Relation",
+ RelType::Read(read_rel) => self.convert_read_rel(ctx, read_rel, catalog_manager)?,
+ RelType::Filter(filter) => {
+ let FilterRel {
+ common: _,
+ input,
+ condition,
+ advanced_extension: _,
+ } = *filter;
+
+ let input = input.context(MissingFieldSnafu {
+ field: "input",
+ plan: "Filter",
+ })?;
+ let input = Arc::new(self.rel_to_logical_plan(ctx, input, catalog_manager)?);
+
+ let condition = condition.context(MissingFieldSnafu {
+ field: "condition",
+ plan: "Filter",
+ })?;
+
+ let schema = ctx.df_schema().context(InvalidParametersSnafu {
+ reason: "the underlying TableScan plan should have included a table schema",
+ })?;
+ let schema = schema
+ .clone()
+ .try_into()
+ .context(error::ConvertDfSchemaSnafu)?;
+ let predicate = to_df_expr(ctx, *condition, &schema)?;
+
+ LogicalPlan::Filter(Filter { predicate, input })
}
- .fail()?,
RelType::Fetch(_fetch_rel) => UnsupportedPlanSnafu {
name: "Fetch Relation",
}
@@ -148,7 +186,7 @@ impl DFLogicalSubstraitConvertor {
name: "Cross Relation",
}
.fail()?,
- }?;
+ };
Ok(logical_plan)
}
@@ -157,6 +195,7 @@ impl DFLogicalSubstraitConvertor {
&self,
ctx: &mut ConvertorContext,
read_rel: Box<ReadRel>,
+ catalog_manager: CatalogManagerRef,
) -> Result<LogicalPlan, Error> {
// Extract the catalog, schema and table name from NamedTable. Assume the first three are those names.
let read_type = read_rel.read_type.context(MissingFieldSnafu {
@@ -192,8 +231,7 @@ impl DFLogicalSubstraitConvertor {
.map(|mask_expr| self.convert_mask_expression(mask_expr));
// Get table handle from catalog manager
- let table_ref = self
- .catalog_manager
+ let table_ref = catalog_manager
.table(&catalog_name, &schema_name, &table_name)
.map_err(BoxedError::new)
.context(InternalSnafu)?
@@ -207,7 +245,7 @@ impl DFLogicalSubstraitConvertor {
let retrieved_schema = to_schema(read_rel.base_schema.unwrap_or_default())?;
let retrieved_arrow_schema = retrieved_schema.arrow_schema();
ensure!(
- stored_schema.fields == retrieved_arrow_schema.fields,
+ same_schema_without_metadata(&stored_schema, retrieved_arrow_schema),
SchemaNotMatchSnafu {
substrait_schema: retrieved_arrow_schema.clone(),
storage_schema: stored_schema
@@ -227,9 +265,11 @@ impl DFLogicalSubstraitConvertor {
.to_dfschema_ref()
.context(DFInternalSnafu)?;
- // TODO(ruihang): Support filters and limit
+ ctx.set_df_schema(projected_schema.clone());
+
+ // TODO(ruihang): Support limit
Ok(LogicalPlan::TableScan(TableScan {
- table_name,
+ table_name: format!("{}.{}.{}", catalog_name, schema_name, table_name),
source: adapter,
projection,
projected_schema,
@@ -250,20 +290,42 @@ impl DFLogicalSubstraitConvertor {
}
impl DFLogicalSubstraitConvertor {
- pub fn convert_df_plan(&self, plan: LogicalPlan) -> Result<Plan, Error> {
- let mut ctx = ConvertorContext::default();
-
- // TODO(ruihang): extract this translation logic into a separated function
- // convert PlanRel
- let rel = match plan {
+ fn logical_plan_to_rel(
+ &self,
+ ctx: &mut ConvertorContext,
+ plan: Arc<LogicalPlan>,
+ ) -> Result<Rel, Error> {
+ Ok(match &*plan {
LogicalPlan::Projection(_) => UnsupportedPlanSnafu {
name: "DataFusion Logical Projection",
}
.fail()?,
- LogicalPlan::Filter(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Filter",
+ LogicalPlan::Filter(filter) => {
+ let input = Some(Box::new(
+ self.logical_plan_to_rel(ctx, filter.input.clone())?,
+ ));
+
+ let schema = plan
+ .schema()
+ .clone()
+ .try_into()
+ .context(error::ConvertDfSchemaSnafu)?;
+ let condition = Some(Box::new(expression_from_df_expr(
+ ctx,
+ &filter.predicate,
+ &schema,
+ )?));
+
+ let rel = FilterRel {
+ common: None,
+ input,
+ condition,
+ advanced_extension: None,
+ };
+ Rel {
+ rel_type: Some(RelType::Filter(Box::new(rel))),
+ }
}
- .fail()?,
LogicalPlan::Window(_) => UnsupportedPlanSnafu {
name: "DataFusion Logical Window",
}
@@ -293,7 +355,7 @@ impl DFLogicalSubstraitConvertor {
}
.fail()?,
LogicalPlan::TableScan(table_scan) => {
- let read_rel = self.convert_table_scan_plan(&mut ctx, table_scan)?;
+ let read_rel = self.convert_table_scan_plan(ctx, table_scan)?;
Rel {
rel_type: Some(RelType::Read(Box::new(read_rel))),
}
@@ -319,7 +381,13 @@ impl DFLogicalSubstraitConvertor {
),
}
.fail()?,
- };
+ })
+ }
+
+ fn convert_df_plan(&self, plan: LogicalPlan) -> Result<Plan, Error> {
+ let mut ctx = ConvertorContext::default();
+
+ let rel = self.logical_plan_to_rel(&mut ctx, Arc::new(plan))?;
// convert extension
let extensions = ctx.generate_function_extension();
@@ -341,7 +409,7 @@ impl DFLogicalSubstraitConvertor {
pub fn convert_table_scan_plan(
&self,
ctx: &mut ConvertorContext,
- table_scan: TableScan,
+ table_scan: &TableScan,
) -> Result<ReadRel, Error> {
let provider = table_scan
.source
@@ -363,7 +431,8 @@ impl DFLogicalSubstraitConvertor {
// assemble projection
let projection = table_scan
.projection
- .map(|proj| self.convert_schema_projection(&proj));
+ .as_ref()
+ .map(|x| self.convert_schema_projection(x));
// assemble base (unprojected) schema using Table's schema.
let base_schema = from_schema(&provider.table().schema())?;
@@ -371,7 +440,8 @@ impl DFLogicalSubstraitConvertor {
// make conjunction over a list of filters and convert the result to substrait
let filter = if let Some(conjunction) = table_scan
.filters
- .into_iter()
+ .iter()
+ .cloned()
.reduce(|accum, expr| accum.and(expr))
{
Some(Box::new(expression_from_df_expr(
@@ -412,6 +482,13 @@ impl DFLogicalSubstraitConvertor {
}
}
+fn same_schema_without_metadata(lhs: &ArrowSchemaRef, rhs: &ArrowSchemaRef) -> bool {
+ lhs.fields.len() == rhs.fields.len()
+ && lhs.fields.iter().zip(rhs.fields.iter()).all(|(x, y)| {
+ x.name == y.name && x.data_type == y.data_type && x.is_nullable == y.is_nullable
+ })
+}
+
#[cfg(test)]
mod test {
use catalog::local::{LocalCatalogManager, MemoryCatalogProvider, MemorySchemaProvider};
@@ -463,10 +540,10 @@ mod test {
}
async fn logical_plan_round_trip(plan: LogicalPlan, catalog: CatalogManagerRef) {
- let convertor = DFLogicalSubstraitConvertor::new(catalog);
+ let convertor = DFLogicalSubstraitConvertor;
let proto = convertor.encode(plan.clone()).unwrap();
- let tripped_plan = convertor.decode(proto).unwrap();
+ let tripped_plan = convertor.decode(proto, catalog).unwrap();
assert_eq!(format!("{:?}", plan), format!("{:?}", tripped_plan));
}
@@ -488,6 +565,7 @@ mod test {
.await
.unwrap();
let adapter = Arc::new(DfTableProviderAdapter::new(table_ref));
+
let projection = vec![1, 3, 5];
let df_schema = adapter.schema().to_dfschema().unwrap();
let projected_fields = projection
@@ -498,7 +576,10 @@ mod test {
Arc::new(DFSchema::new_with_metadata(projected_fields, Default::default()).unwrap());
let table_scan_plan = LogicalPlan::TableScan(TableScan {
- table_name: DEFAULT_TABLE_NAME.to_string(),
+ table_name: format!(
+ "{}.{}.{}",
+ DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, DEFAULT_TABLE_NAME
+ ),
source: adapter,
projection: Some(projection),
projected_schema,
diff --git a/src/common/substrait/src/error.rs b/src/common/substrait/src/error.rs
index c33b3679fbac..4455e9231c17 100644
--- a/src/common/substrait/src/error.rs
+++ b/src/common/substrait/src/error.rs
@@ -99,6 +99,12 @@ pub enum Error {
storage_schema: datafusion::arrow::datatypes::SchemaRef,
backtrace: Backtrace,
},
+
+ #[snafu(display("Failed to convert DataFusion schema, source: {}", source))]
+ ConvertDfSchema {
+ #[snafu(backtrace)]
+ source: datatypes::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -120,6 +126,7 @@ impl ErrorExt for Error {
| Error::TableNotFound { .. }
| Error::SchemaNotMatch { .. } => StatusCode::InvalidArguments,
Error::DFInternal { .. } | Error::Internal { .. } => StatusCode::Internal,
+ Error::ConvertDfSchema { source } => source.status_code(),
}
}
diff --git a/src/common/substrait/src/lib.rs b/src/common/substrait/src/lib.rs
index c318799a3b02..04c5e8277199 100644
--- a/src/common/substrait/src/lib.rs
+++ b/src/common/substrait/src/lib.rs
@@ -22,6 +22,7 @@ mod schema;
mod types;
use bytes::{Buf, Bytes};
+use catalog::CatalogManagerRef;
pub use crate::df_logical::DFLogicalSubstraitConvertor;
@@ -30,7 +31,11 @@ pub trait SubstraitPlan {
type Plan;
- fn decode<B: Buf + Send>(&self, message: B) -> Result<Self::Plan, Self::Error>;
+ fn decode<B: Buf + Send>(
+ &self,
+ message: B,
+ catalog_manager: CatalogManagerRef,
+ ) -> Result<Self::Plan, Self::Error>;
fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error>;
}
diff --git a/src/common/substrait/src/types.rs b/src/common/substrait/src/types.rs
index fd4cc34fbec5..d1033c7a3e6b 100644
--- a/src/common/substrait/src/types.rs
+++ b/src/common/substrait/src/types.rs
@@ -18,11 +18,13 @@
//! Current we only have variations on integer types. Variation 0 (system preferred) are the same with base types, which
//! are signed integer (i.e. I8 -> [i8]), and Variation 1 stands for unsigned integer (i.e. I8 -> [u8]).
+use datafusion::scalar::ScalarValue;
use datatypes::prelude::ConcreteDataType;
+use substrait_proto::protobuf::expression::literal::LiteralType;
use substrait_proto::protobuf::r#type::{self as s_type, Kind, Nullability};
-use substrait_proto::protobuf::Type as SType;
+use substrait_proto::protobuf::{Type as SType, Type};
-use crate::error::{Result, UnsupportedConcreteTypeSnafu, UnsupportedSubstraitTypeSnafu};
+use crate::error::{self, Result, UnsupportedConcreteTypeSnafu, UnsupportedSubstraitTypeSnafu};
macro_rules! substrait_kind {
($desc:ident, $concrete_ty:ident) => {{
@@ -134,3 +136,67 @@ pub fn from_concrete_type(ty: ConcreteDataType, nullability: Option<bool>) -> Re
Ok(SType { kind })
}
+
+pub(crate) fn scalar_value_as_literal_type(v: &ScalarValue) -> Result<LiteralType> {
+ Ok(if v.is_null() {
+ LiteralType::Null(Type { kind: None })
+ } else {
+ match v {
+ ScalarValue::Boolean(Some(v)) => LiteralType::Boolean(*v),
+ ScalarValue::Float32(Some(v)) => LiteralType::Fp32(*v),
+ ScalarValue::Float64(Some(v)) => LiteralType::Fp64(*v),
+ ScalarValue::Int8(Some(v)) => LiteralType::I8(*v as i32),
+ ScalarValue::Int16(Some(v)) => LiteralType::I16(*v as i32),
+ ScalarValue::Int32(Some(v)) => LiteralType::I32(*v),
+ ScalarValue::Int64(Some(v)) => LiteralType::I64(*v),
+ ScalarValue::LargeUtf8(Some(v)) => LiteralType::String(v.clone()),
+ ScalarValue::LargeBinary(Some(v)) => LiteralType::Binary(v.clone()),
+ // TODO(LFC): Implement other conversions: ScalarValue => LiteralType
+ _ => {
+ return error::UnsupportedExprSnafu {
+ name: format!("{:?}", v),
+ }
+ .fail()
+ }
+ }
+ })
+}
+
+pub(crate) fn literal_type_to_scalar_value(t: LiteralType) -> Result<ScalarValue> {
+ Ok(match t {
+ LiteralType::Null(Type { kind: Some(kind) }) => match kind {
+ Kind::Bool(_) => ScalarValue::Boolean(None),
+ Kind::I8(_) => ScalarValue::Int8(None),
+ Kind::I16(_) => ScalarValue::Int16(None),
+ Kind::I32(_) => ScalarValue::Int32(None),
+ Kind::I64(_) => ScalarValue::Int64(None),
+ Kind::Fp32(_) => ScalarValue::Float32(None),
+ Kind::Fp64(_) => ScalarValue::Float64(None),
+ Kind::String(_) => ScalarValue::LargeUtf8(None),
+ Kind::Binary(_) => ScalarValue::LargeBinary(None),
+ // TODO(LFC): Implement other conversions: Kind => ScalarValue
+ _ => {
+ return error::UnsupportedSubstraitTypeSnafu {
+ ty: format!("{:?}", kind),
+ }
+ .fail()
+ }
+ },
+ LiteralType::Boolean(v) => ScalarValue::Boolean(Some(v)),
+ LiteralType::I8(v) => ScalarValue::Int8(Some(v as i8)),
+ LiteralType::I16(v) => ScalarValue::Int16(Some(v as i16)),
+ LiteralType::I32(v) => ScalarValue::Int32(Some(v)),
+ LiteralType::I64(v) => ScalarValue::Int64(Some(v)),
+ LiteralType::Fp32(v) => ScalarValue::Float32(Some(v)),
+ LiteralType::Fp64(v) => ScalarValue::Float64(Some(v)),
+ LiteralType::String(v) => ScalarValue::LargeUtf8(Some(v)),
+ LiteralType::Binary(v) => ScalarValue::LargeBinary(Some(v)),
+ // TODO(LFC): Implement other conversions: LiteralType => ScalarValue
+ _ => {
+ return error::UnsupportedSubstraitTypeSnafu {
+ ty: format!("{:?}", t),
+ }
+ .fail()
+ }
+ })
+}
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index ddc03a64369a..41998c15901d 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -151,9 +151,8 @@ impl Instance {
}
async fn execute_logical(&self, plan_bytes: Vec<u8>) -> Result<Output> {
- let logical_plan_converter = DFLogicalSubstraitConvertor::new(self.catalog_manager.clone());
- let logical_plan = logical_plan_converter
- .decode(plan_bytes.as_slice())
+ let logical_plan = DFLogicalSubstraitConvertor
+ .decode(plan_bytes.as_slice(), self.catalog_manager.clone())
.context(DecodeLogicalPlanSnafu)?;
self.query_engine
diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs
index a1792fd665c5..e3a5661dfd38 100644
--- a/src/datatypes/src/schema.rs
+++ b/src/datatypes/src/schema.rs
@@ -20,6 +20,7 @@ use std::sync::Arc;
pub use arrow::datatypes::Metadata;
use arrow::datatypes::{Field, Schema as ArrowSchema};
+use datafusion_common::DFSchemaRef;
use serde::{Deserialize, Serialize};
use snafu::{ensure, ResultExt};
@@ -465,6 +466,15 @@ impl TryFrom<ArrowSchema> for Schema {
}
}
+impl TryFrom<DFSchemaRef> for Schema {
+ type Error = Error;
+
+ fn try_from(value: DFSchemaRef) -> Result<Self> {
+ let s: ArrowSchema = value.as_ref().into();
+ s.try_into()
+ }
+}
+
fn try_parse_version(metadata: &Metadata, key: &str) -> Result<u32> {
if let Some(value) = metadata.get(key) {
let version = value
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index a269983db88e..5e3eee4b946c 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -45,6 +45,7 @@ snafu = { version = "0.7", features = ["backtraces"] }
sql = { path = "../sql" }
sqlparser = "0.15"
store-api = { path = "../store-api" }
+substrait = { path = "../common/substrait" }
table = { path = "../table" }
tokio = { version = "1.18", features = ["full"] }
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index 86356db08cbf..aea667367f6b 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -17,13 +17,16 @@ use std::collections::HashSet;
use std::sync::Arc;
use catalog::error::{self as catalog_err, InvalidCatalogValueSnafu};
+use catalog::helper::{
+ build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, SchemaKey,
+ TableGlobalKey, TableGlobalValue,
+};
use catalog::remote::{Kv, KvBackendRef};
use catalog::{
CatalogList, CatalogManager, CatalogProvider, CatalogProviderRef, DeregisterTableRequest,
RegisterSchemaRequest, RegisterSystemTableRequest, RegisterTableRequest, SchemaProvider,
SchemaProviderRef,
};
-use common_catalog::{CatalogKey, SchemaKey, TableGlobalKey, TableGlobalValue};
use futures::StreamExt;
use meta_client::rpc::TableName;
use snafu::prelude::*;
@@ -130,7 +133,7 @@ impl CatalogList for FrontendCatalogManager {
let backend = self.backend.clone();
let res = std::thread::spawn(|| {
common_runtime::block_on_read(async move {
- let key = common_catalog::build_catalog_prefix();
+ let key = build_catalog_prefix();
let mut iter = backend.range(key.as_bytes());
let mut res = HashSet::new();
@@ -180,7 +183,7 @@ impl CatalogProvider for FrontendCatalogProvider {
let catalog_name = self.catalog_name.clone();
let res = std::thread::spawn(|| {
common_runtime::block_on_read(async move {
- let key = common_catalog::build_schema_prefix(&catalog_name);
+ let key = build_schema_prefix(&catalog_name);
let mut iter = backend.range(key.as_bytes());
let mut res = HashSet::new();
@@ -242,7 +245,7 @@ impl SchemaProvider for FrontendSchemaProvider {
std::thread::spawn(|| {
common_runtime::block_on_read(async move {
- let key = common_catalog::build_table_global_prefix(catalog_name, schema_name);
+ let key = build_table_global_prefix(catalog_name, schema_name);
let mut iter = backend.range(key.as_bytes());
let mut res = HashSet::new();
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index 9b6275c7bff0..823ce693ceed 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -445,6 +445,12 @@ pub enum Error {
#[snafu(display("Table already exists: `{}`", table))]
TableAlreadyExist { table: String, backtrace: Backtrace },
+
+ #[snafu(display("Failed to encode Substrait logical plan, source: {}", source))]
+ EncodeSubstraitLogicalPlan {
+ #[snafu(backtrace)]
+ source: substrait::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -536,6 +542,7 @@ impl ErrorExt for Error {
Error::AlterExprToRequest { source, .. } => source.status_code(),
Error::LeaderNotFound { .. } => StatusCode::StorageUnavailable,
Error::TableAlreadyExist { .. } => StatusCode::TableAlreadyExists,
+ Error::EncodeSubstraitLogicalPlan { source } => source.status_code(),
}
}
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index a96f8170350e..d32e12ee24d4 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -17,11 +17,11 @@ use std::sync::Arc;
use api::helper::ColumnDataTypeWrapper;
use api::v1::{AlterExpr, CreateDatabaseExpr, CreateExpr};
+use catalog::helper::{SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue};
use catalog::CatalogList;
use chrono::DateTime;
use client::admin::{admin_result_to_output, Admin};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
-use common_catalog::{SchemaKey, SchemaValue, TableGlobalKey, TableGlobalValue};
use common_query::Output;
use common_telemetry::{debug, error, info};
use datatypes::prelude::ConcreteDataType;
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 8f97ba12f70f..36d229a24512 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -818,9 +818,9 @@ mod test {
async fn new_dist_table() -> DistTable {
let column_schemas = vec![
- ColumnSchema::new("ts", ConcreteDataType::uint64_datatype(), false),
- ColumnSchema::new("a", ConcreteDataType::int32_datatype(), true),
- ColumnSchema::new("row_id", ConcreteDataType::uint32_datatype(), true),
+ ColumnSchema::new("ts", ConcreteDataType::int64_datatype(), false),
+ ColumnSchema::new("a", ConcreteDataType::int32_datatype(), false),
+ ColumnSchema::new("row_id", ConcreteDataType::int32_datatype(), false),
];
let schema = Arc::new(Schema::new(column_schemas.clone()));
diff --git a/src/frontend/src/table/scan.rs b/src/frontend/src/table/scan.rs
index 1919dc0fb6c7..14ea9a6a93df 100644
--- a/src/frontend/src/table/scan.rs
+++ b/src/frontend/src/table/scan.rs
@@ -16,17 +16,14 @@ use std::fmt::Formatter;
use std::sync::Arc;
use api::v1::InsertExpr;
-use client::{Database, ObjectResult, Select};
+use client::{Database, ObjectResult};
use common_query::prelude::Expr;
use common_query::Output;
use common_recordbatch::{util, RecordBatches};
-use datafusion::logical_plan::{LogicalPlan as DfLogicPlan, LogicalPlanBuilder};
-use datafusion_expr::Expr as DfExpr;
-use datatypes::prelude::*;
-use datatypes::schema::SchemaRef;
+use datafusion::logical_plan::{LogicalPlan, LogicalPlanBuilder};
use meta_client::rpc::TableName;
-use query::plan::LogicalPlan;
use snafu::ResultExt;
+use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use table::table::adapter::DfTableProviderAdapter;
use table::TableRef;
@@ -56,12 +53,13 @@ impl DatanodeInstance {
pub(crate) async fn grpc_table_scan(&self, plan: TableScanPlan) -> Result<RecordBatches> {
let logical_plan = self.build_logical_plan(&plan)?;
- // TODO(LFC): Directly pass in logical plan to GRPC interface when our substrait codec supports filter.
- let sql = to_sql(logical_plan)?;
+ let substrait_plan = DFLogicalSubstraitConvertor
+ .encode(logical_plan)
+ .context(error::EncodeSubstraitLogicalPlanSnafu)?;
let output = self
.db
- .select(Select::Sql(sql))
+ .logical_plan(substrait_plan.to_vec())
.await
.and_then(Output::try_from)
.context(error::SelectSnafu)?;
@@ -94,14 +92,25 @@ impl DatanodeInstance {
)
.context(error::BuildDfLogicalPlanSnafu)?;
+ if let Some(filter) = table_scan
+ .filters
+ .iter()
+ .map(|x| x.df_expr())
+ .cloned()
+ .reduce(|accum, expr| accum.and(expr))
+ {
+ builder = builder
+ .filter(filter)
+ .context(error::BuildDfLogicalPlanSnafu)?;
+ }
+
if let Some(limit) = table_scan.limit {
builder = builder
.limit(limit)
.context(error::BuildDfLogicalPlanSnafu)?;
}
- let plan = builder.build().context(error::BuildDfLogicalPlanSnafu)?;
- Ok(LogicalPlan::DfPlan(plan))
+ builder.build().context(error::BuildDfLogicalPlanSnafu)
}
}
@@ -112,79 +121,3 @@ pub(crate) struct TableScanPlan {
pub filters: Vec<Expr>,
pub limit: Option<usize>,
}
-
-fn to_sql(plan: LogicalPlan) -> Result<String> {
- let LogicalPlan::DfPlan(plan) = plan;
- let table_scan = match plan {
- DfLogicPlan::TableScan(table_scan) => table_scan,
- _ => unreachable!("unknown plan: {:?}", plan),
- };
-
- let schema: SchemaRef = Arc::new(
- table_scan
- .source
- .schema()
- .try_into()
- .context(error::ConvertArrowSchemaSnafu)?,
- );
- let projection = table_scan
- .projection
- .map(|x| {
- x.iter()
- .map(|i| schema.column_name_by_index(*i).to_string())
- .collect::<Vec<String>>()
- })
- .unwrap_or_else(|| {
- schema
- .column_schemas()
- .iter()
- .map(|x| x.name.clone())
- .collect::<Vec<String>>()
- })
- .join(", ");
-
- let mut sql = format!("select {} from {}", projection, &table_scan.table_name);
-
- let filters = table_scan
- .filters
- .iter()
- .map(expr_to_sql)
- .collect::<Result<Vec<String>>>()?
- .join(" AND ");
- if !filters.is_empty() {
- sql.push_str(" where ");
- sql.push_str(&filters);
- }
-
- if let Some(limit) = table_scan.limit {
- sql.push_str(" limit ");
- sql.push_str(&limit.to_string());
- }
- Ok(sql)
-}
-
-fn expr_to_sql(expr: &DfExpr) -> Result<String> {
- Ok(match expr {
- DfExpr::BinaryExpr {
- ref left,
- ref right,
- ref op,
- } => format!(
- "{} {} {}",
- expr_to_sql(left.as_ref())?,
- op,
- expr_to_sql(right.as_ref())?
- ),
- DfExpr::Column(c) => c.name.clone(),
- DfExpr::Literal(sv) => {
- let v: Value = Value::try_from(sv.clone())
- .with_context(|_| error::ConvertScalarValueSnafu { value: sv.clone() })?;
- if matches!(v.data_type(), ConcreteDataType::String(_)) {
- format!("'{}'", sv)
- } else {
- format!("{}", sv)
- }
- }
- _ => unimplemented!("not implemented for {:?}", expr),
- })
-}
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index 738ca359faa6..333cbac4d98c 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -10,6 +10,7 @@ mock = []
[dependencies]
api = { path = "../api" }
async-trait = "0.1"
+catalog = { path = "../catalog" }
common-base = { path = "../common/base" }
common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
diff --git a/src/meta-srv/src/keys.rs b/src/meta-srv/src/keys.rs
index 71a24acbd681..b7e215fec997 100644
--- a/src/meta-srv/src/keys.rs
+++ b/src/meta-srv/src/keys.rs
@@ -15,7 +15,7 @@
use std::str::FromStr;
use api::v1::meta::TableName;
-use common_catalog::TableGlobalKey;
+use catalog::helper::TableGlobalKey;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
diff --git a/src/meta-srv/src/service/router.rs b/src/meta-srv/src/service/router.rs
index ba924e61d233..0c502be09486 100644
--- a/src/meta-srv/src/service/router.rs
+++ b/src/meta-srv/src/service/router.rs
@@ -16,7 +16,7 @@ use api::v1::meta::{
router_server, CreateRequest, Error, PeerDict, PutRequest, RangeRequest, Region, RegionRoute,
ResponseHeader, RouteRequest, RouteResponse, Table, TableRoute, TableRouteValue,
};
-use common_catalog::{TableGlobalKey, TableGlobalValue};
+use catalog::helper::{TableGlobalKey, TableGlobalValue};
use common_telemetry::warn;
use snafu::{OptionExt, ResultExt};
use tonic::{Request, Response};
diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml
index 0d0d352cd7ff..8e7cebb40d68 100644
--- a/src/table/Cargo.toml
+++ b/src/table/Cargo.toml
@@ -7,6 +7,7 @@ license = "Apache-2.0"
[dependencies]
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
+common-catalog = { path = "../common/catalog" }
common-error = { path = "../common/error" }
common-query = { path = "../common/query" }
common-recordbatch = { path = "../common/recordbatch" }
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index 3fb367589ebe..2e0f72235250 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -16,6 +16,7 @@ use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use chrono::{DateTime, Utc};
+use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
pub use datatypes::error::{Error as ConvertError, Result as ConvertResult};
use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
use derive_builder::Builder;
@@ -333,9 +334,9 @@ pub struct TableInfo {
/// Comment of the table.
#[builder(default, setter(into))]
pub desc: Option<String>,
- #[builder(default, setter(into))]
+ #[builder(default = "DEFAULT_CATALOG_NAME.to_string()", setter(into))]
pub catalog_name: String,
- #[builder(default, setter(into))]
+ #[builder(default = "DEFAULT_SCHEMA_NAME.to_string()", setter(into))]
pub schema_name: String,
pub meta: TableMeta,
#[builder(default = "TableType::Base")]
|
feat
|
Substrait logical plan (#704)
|
61e0f1a11c1225711f7a10f8ce0e94e2ca4fea97
|
2022-12-12 07:32:17
|
zyy17
|
refactor: add tls option in frontend cli options (#735)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index b77aaf75a2c7..ed41965ca4c5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5959,6 +5959,7 @@ dependencies = [
"session",
"snafu",
"snap",
+ "strum 0.24.1",
"table",
"tokio",
"tokio-postgres",
@@ -6464,6 +6465,9 @@ name = "strum"
version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f"
+dependencies = [
+ "strum_macros 0.24.3",
+]
[[package]]
name = "strum_macros"
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index e395d0912b0f..59695c3bbfb5 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::sync::Arc;
+
use clap::Parser;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::grpc::GrpcOptions;
@@ -22,6 +24,7 @@ use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use meta_client::MetaClientOpts;
use servers::http::HttpOptions;
+use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
@@ -71,6 +74,12 @@ pub struct StartCommand {
influxdb_enable: Option<bool>,
#[clap(long)]
metasrv_addr: Option<String>,
+ #[clap(long)]
+ tls_mode: Option<TlsMode>,
+ #[clap(long)]
+ tls_cert_path: Option<String>,
+ #[clap(long)]
+ tls_key_path: Option<String>,
}
impl StartCommand {
@@ -96,6 +105,8 @@ impl TryFrom<StartCommand> for FrontendOptions {
FrontendOptions::default()
};
+ let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
+
if let Some(addr) = cmd.http_addr {
opts.http_options = Some(HttpOptions {
addr,
@@ -111,12 +122,14 @@ impl TryFrom<StartCommand> for FrontendOptions {
if let Some(addr) = cmd.mysql_addr {
opts.mysql_options = Some(MysqlOptions {
addr,
+ tls: Arc::new(tls_option.clone()),
..Default::default()
});
}
if let Some(addr) = cmd.postgres_addr {
opts.postgres_options = Some(PostgresOptions {
addr,
+ tls: Arc::new(tls_option),
..Default::default()
});
}
@@ -160,6 +173,9 @@ mod tests {
influxdb_enable: Some(false),
config_file: None,
metasrv_addr: None,
+ tls_mode: None,
+ tls_cert_path: None,
+ tls_key_path: None,
};
let opts: FrontendOptions = command.try_into().unwrap();
@@ -209,6 +225,9 @@ mod tests {
std::env::current_dir().unwrap().as_path().to_str().unwrap()
)),
metasrv_addr: None,
+ tls_mode: None,
+ tls_cert_path: None,
+ tls_key_path: None,
};
let fe_opts = FrontendOptions::try_from(command).unwrap();
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index e72166b30359..e6d109b6a209 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::sync::Arc;
+
use clap::Parser;
use common_telemetry::info;
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
@@ -26,6 +28,7 @@ use frontend::postgres::PostgresOptions;
use frontend::prometheus::PrometheusOptions;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
+use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
use snafu::ResultExt;
@@ -133,6 +136,12 @@ struct StartCommand {
config_file: Option<String>,
#[clap(short = 'm', long = "memory-catalog")]
enable_memory_catalog: bool,
+ #[clap(long)]
+ tls_mode: Option<TlsMode>,
+ #[clap(long)]
+ tls_cert_path: Option<String>,
+ #[clap(long)]
+ tls_key_path: Option<String>,
}
impl StartCommand {
@@ -245,6 +254,18 @@ impl TryFrom<StartCommand> for FrontendOptions {
opts.influxdb_options = Some(InfluxdbOptions { enable: true });
}
+ let tls_option = TlsOption::new(cmd.tls_mode, cmd.tls_cert_path, cmd.tls_key_path);
+
+ if let Some(mut mysql_options) = opts.mysql_options {
+ mysql_options.tls = Arc::new(tls_option.clone());
+ opts.mysql_options = Some(mysql_options);
+ }
+
+ if let Some(mut postgres_options) = opts.postgres_options {
+ postgres_options.tls = Arc::new(tls_option);
+ opts.postgres_options = Some(postgres_options);
+ }
+
Ok(opts)
}
}
@@ -269,6 +290,9 @@ mod tests {
)),
influxdb_enable: false,
enable_memory_catalog: false,
+ tls_mode: None,
+ tls_cert_path: None,
+ tls_key_path: None,
};
let fe_opts = FrontendOptions::try_from(cmd).unwrap();
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index c1d3cab2b5b6..74b2369d69da 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -38,6 +38,7 @@ regex = "1.6"
rustls = "0.20"
rustls-pemfile = "1.0"
schemars = "0.8"
+strum = { version = "0.24", features = ["derive"] }
serde = "1.0"
serde_json = "1.0"
session = { path = "../session" }
diff --git a/src/servers/src/tls.rs b/src/servers/src/tls.rs
index 57a3c7862171..cee4b1fa9c22 100644
--- a/src/servers/src/tls.rs
+++ b/src/servers/src/tls.rs
@@ -18,22 +18,32 @@ use std::io::{BufReader, Error, ErrorKind};
use rustls::{Certificate, PrivateKey, ServerConfig};
use rustls_pemfile::{certs, pkcs8_private_keys};
use serde::{Deserialize, Serialize};
+use strum::EnumString;
/// TlsMode is used for Mysql and Postgres server start up.
-#[derive(Debug, Default, Serialize, Deserialize, Clone)]
+#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq, EnumString)]
#[serde(rename_all = "snake_case")]
pub enum TlsMode {
#[default]
+ #[strum(to_string = "disable")]
Disable,
+
+ #[strum(to_string = "prefer")]
Prefer,
+
+ #[strum(to_string = "require")]
Require,
+
// TODO(SSebo): Implement the following 2 TSL mode described in
// ["34.19.3. Protection Provided in Different Modes"](https://www.postgresql.org/docs/current/libpq-ssl.html)
+ #[strum(to_string = "verify-ca")]
VerifyCa,
+
+ #[strum(to_string = "verify-full")]
VerifyFull,
}
-#[derive(Debug, Default, Serialize, Deserialize, Clone)]
+#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub struct TlsOption {
pub mode: TlsMode,
@@ -44,6 +54,24 @@ pub struct TlsOption {
}
impl TlsOption {
+ pub fn new(mode: Option<TlsMode>, cert_path: Option<String>, key_path: Option<String>) -> Self {
+ let mut tls_option = TlsOption::default();
+
+ if let Some(mode) = mode {
+ tls_option.mode = mode
+ };
+
+ if let Some(cert_path) = cert_path {
+ tls_option.cert_path = cert_path
+ };
+
+ if let Some(key_path) = key_path {
+ tls_option.key_path = key_path
+ };
+
+ tls_option
+ }
+
pub fn setup(&self) -> Result<Option<ServerConfig>, Error> {
if let TlsMode::Disable = self.mode {
return Ok(None);
@@ -76,6 +104,31 @@ impl TlsOption {
#[cfg(test)]
mod tests {
use super::*;
+ use crate::tls::TlsMode::Disable;
+
+ #[test]
+ fn test_new_tls_option() {
+ assert_eq!(TlsOption::default(), TlsOption::new(None, None, None));
+ assert_eq!(
+ TlsOption {
+ mode: Disable,
+ ..Default::default()
+ },
+ TlsOption::new(Some(Disable), None, None)
+ );
+ assert_eq!(
+ TlsOption {
+ mode: Disable,
+ cert_path: "/path/to/cert_path".to_string(),
+ key_path: "/path/to/key_path".to_string(),
+ },
+ TlsOption::new(
+ Some(Disable),
+ Some("/path/to/cert_path".to_string()),
+ Some("/path/to/key_path".to_string())
+ )
+ );
+ }
#[test]
fn test_tls_option_disable() {
|
refactor
|
add tls option in frontend cli options (#735)
|
d6392acd65d018bb7857715ff8674714aa6e0704
|
2024-06-15 16:43:01
|
Weny Xu
|
fix(sqlness): catch different format timestamp (#4149)
| false
|
diff --git a/tests/cases/distributed/information_schema/cluster_info.result b/tests/cases/distributed/information_schema/cluster_info.result
index e0817e60f3e7..0cb23706194b 100644
--- a/tests/cases/distributed/information_schema/cluster_info.result
+++ b/tests/cases/distributed/information_schema/cluster_info.result
@@ -20,7 +20,7 @@ DESC TABLE CLUSTER_INFO;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO ORDER BY peer_type;
@@ -30,7 +30,7 @@ SELECT * FROM CLUSTER_INFO ORDER BY peer_type;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE = 'METASRV' ORDER BY peer_type;
@@ -40,7 +40,7 @@ SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE = 'METASRV' ORDER BY peer_type;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE = 'FRONTEND' ORDER BY peer_type;
@@ -50,7 +50,7 @@ SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE = 'FRONTEND' ORDER BY peer_type;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE != 'FRONTEND' ORDER BY peer_type;
@@ -60,7 +60,7 @@ SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE != 'FRONTEND' ORDER BY peer_type;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO WHERE PEER_ID > 1 ORDER BY peer_type;
diff --git a/tests/cases/distributed/information_schema/cluster_info.sql b/tests/cases/distributed/information_schema/cluster_info.sql
index c26e4854bcc2..7ac47e3d6309 100644
--- a/tests/cases/distributed/information_schema/cluster_info.sql
+++ b/tests/cases/distributed/information_schema/cluster_info.sql
@@ -5,7 +5,7 @@ DESC TABLE CLUSTER_INFO;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO ORDER BY peer_type;
@@ -13,7 +13,7 @@ SELECT * FROM CLUSTER_INFO ORDER BY peer_type;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE = 'METASRV' ORDER BY peer_type;
@@ -21,7 +21,7 @@ SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE = 'METASRV' ORDER BY peer_type;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE = 'FRONTEND' ORDER BY peer_type;
@@ -29,7 +29,7 @@ SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE = 'FRONTEND' ORDER BY peer_type;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE != 'FRONTEND' ORDER BY peer_type;
@@ -37,7 +37,7 @@ SELECT * FROM CLUSTER_INFO WHERE PEER_TYPE != 'FRONTEND' ORDER BY peer_type;
-- SQLNESS REPLACE version node_version
-- SQLNESS REPLACE (\s\d\.\d\.\d\s) Version
-- SQLNESS REPLACE (\s[a-z0-9]{7}\s) Hash
--- SQLNESS REPLACE (\s[\-0-9T:\.]{23}) Start_time
+-- SQLNESS REPLACE (\s[0-9T:\.]{17}|\s[\-0-9T:\.]{23}) Start_time
-- SQLNESS REPLACE ((\d+(s|ms|m)\s)+) Duration
-- SQLNESS REPLACE [\s\-]+
SELECT * FROM CLUSTER_INFO WHERE PEER_ID > 1 ORDER BY peer_type;
|
fix
|
catch different format timestamp (#4149)
|
9c1704d4cbbfab8af07a77da598a1cfe2a5e7b22
|
2024-08-21 15:01:05
|
Yingwen
|
docs: move v0.9.1 benchmark report to tsbs dir (#4598)
| false
|
diff --git a/docs/benchmarks/log/README.md b/docs/benchmarks/log/README.md
index 5fc8d4053a43..c3984b3ee65b 100644
--- a/docs/benchmarks/log/README.md
+++ b/docs/benchmarks/log/README.md
@@ -29,7 +29,7 @@ INSERT INTO test_table (message, timestamp) VALUES ()
The goal is to test fuzzy search performance for each database. In real scenarios it means the log is produced by some kind of middleware and inserted directly into the database.
## Creating tables
-See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
+See [here](./create_table.sql) for GreptimeDB and Clickhouse's create table clause.
The mapping of Elastic search is created automatically.
## Vector Configuration
@@ -41,11 +41,11 @@ Please refer to [SQL query](./query.sql) for GreptimeDB and Clickhouse, and [que
## Steps to reproduce
0. Decide whether to run structured model test or unstructured mode test.
-1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
+1. Build vector binary(see vector's config file for specific branch) and databases binaries accordingly.
2. Create table in GreptimeDB and Clickhouse in advance.
3. Run vector to insert data.
4. When data insertion is finished, run queries against each database. Note: you'll need to update timerange value after data insertion.
## Addition
- You can tune GreptimeDB's configuration to get better performance.
-- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/operations/configuration/#storage-options).
\ No newline at end of file
+- You can setup GreptimeDB to use S3 as storage, see [here](https://docs.greptime.com/user-guide/operations/configuration/#storage-options).
diff --git a/docs/benchmarks/log/create_table.sql b/docs/benchmarks/log/create_table.sql
index 212f5b86eb26..81e86479cb87 100644
--- a/docs/benchmarks/log/create_table.sql
+++ b/docs/benchmarks/log/create_table.sql
@@ -51,6 +51,6 @@ CREATE TABLE IF NOT EXISTS test_table
message String,
timestamp String,
INDEX inv_idx(message) TYPE full_text(0) GRANULARITY 1
-)
+)
ENGINE = MergeTree()
-ORDER BY tuple();
\ No newline at end of file
+ORDER BY tuple();
diff --git a/docs/benchmarks/log/query.md b/docs/benchmarks/log/query.md
index 0b30712ac716..6a9f97fc2581 100644
--- a/docs/benchmarks/log/query.md
+++ b/docs/benchmarks/log/query.md
@@ -196,4 +196,4 @@ URL: `http://127.0.0.1:9200/_search`
}
}
}
-```
\ No newline at end of file
+```
diff --git a/docs/benchmarks/log/structured_vector.toml b/docs/benchmarks/log/structured_vector.toml
index 6b06e5a1a379..f08fc5c3868f 100644
--- a/docs/benchmarks/log/structured_vector.toml
+++ b/docs/benchmarks/log/structured_vector.toml
@@ -54,4 +54,4 @@ compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
-mode = "bulk"
\ No newline at end of file
+mode = "bulk"
diff --git a/docs/benchmarks/log/unstructured_vector.toml b/docs/benchmarks/log/unstructured_vector.toml
index 172b104c3944..2594ac44284d 100644
--- a/docs/benchmarks/log/unstructured_vector.toml
+++ b/docs/benchmarks/log/unstructured_vector.toml
@@ -40,4 +40,4 @@ compression = "none"
doc_type = "_doc"
endpoints = [ "http://127.0.0.1:9200" ]
id_key = "id"
-mode = "bulk"
\ No newline at end of file
+mode = "bulk"
diff --git a/v0.9.1.md b/docs/benchmarks/tsbs/v0.9.1.md
similarity index 100%
rename from v0.9.1.md
rename to docs/benchmarks/tsbs/v0.9.1.md
|
docs
|
move v0.9.1 benchmark report to tsbs dir (#4598)
|
3329da5b72dc57f85ad97b0f0d3a70264c48f134
|
2023-11-15 11:58:20
|
Zhenchi
|
feat(puffin): add partial reader (#2741)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 622a3fafd4cc..698e3138d806 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6549,8 +6549,11 @@ name = "puffin"
version = "0.4.2"
dependencies = [
"derive_builder 0.12.0",
+ "futures",
+ "pin-project",
"serde",
"serde_json",
+ "tokio",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index e00d8afad6ea..b178fd9b97ea 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -98,6 +98,7 @@ opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.gi
] }
parquet = "47.0"
paste = "1.0"
+pin-project = "1.0"
prometheus = { version = "0.13.3", features = ["process"] }
prost = "0.12"
raft-engine = { git = "https://github.com/tikv/raft-engine.git", rev = "22dfb426cd994602b57725ef080287d3e53db479" }
diff --git a/src/puffin/Cargo.toml b/src/puffin/Cargo.toml
index 6c1520f3505d..a0c56c0f8136 100644
--- a/src/puffin/Cargo.toml
+++ b/src/puffin/Cargo.toml
@@ -6,5 +6,10 @@ license.workspace = true
[dependencies]
derive_builder.workspace = true
+futures.workspace = true
+pin-project.workspace = true
serde.workspace = true
serde_json.workspace = true
+
+[dev-dependencies]
+tokio.workspace = true
diff --git a/src/puffin/src/lib.rs b/src/puffin/src/lib.rs
index 53ef9295145b..88a1d0254019 100644
--- a/src/puffin/src/lib.rs
+++ b/src/puffin/src/lib.rs
@@ -14,3 +14,4 @@
pub mod blob_metadata;
pub mod file_metadata;
+pub mod partial_reader;
diff --git a/src/puffin/src/partial_reader.rs b/src/puffin/src/partial_reader.rs
new file mode 100644
index 000000000000..ef4815679440
--- /dev/null
+++ b/src/puffin/src/partial_reader.rs
@@ -0,0 +1,95 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod r#async;
+mod position;
+mod sync;
+
+use pin_project::pin_project;
+
+/// `PartialReader` to perform synchronous or asynchronous reads on a portion of a resource.
+#[pin_project]
+pub struct PartialReader<R> {
+ /// offset of the portion in the resource
+ offset: u64,
+
+ /// size of the portion in the resource
+ size: u64,
+
+ /// Resource for the portion.
+ /// The `offset` and `size` fields are used to determine the slice of `source` to read.
+ #[pin]
+ source: R,
+
+ /// The current position within the portion.
+ ///
+ /// A `None` value indicates that no read operations have been performed yet on this portion.
+ /// Before a read operation can be performed, the resource must be positioned at the correct offset in the portion.
+ /// After the first read operation, this field will be set to `Some(_)`, representing the current read position in the portion.
+ position_in_portion: Option<u64>,
+}
+
+impl<R> PartialReader<R> {
+ /// Creates a new `PartialReader` for the given resource.
+ pub fn new(source: R, offset: u64, size: u64) -> Self {
+ Self {
+ offset,
+ size,
+ source,
+ position_in_portion: None,
+ }
+ }
+
+ /// Returns the current position in the portion.
+ pub fn position(&self) -> u64 {
+ self.position_in_portion.unwrap_or_default()
+ }
+
+ /// Returns the size of the portion in portion.
+ pub fn size(&self) -> u64 {
+ self.size
+ }
+
+ /// Returns whether the portion is empty.
+ pub fn is_empty(&self) -> bool {
+ self.size == 0
+ }
+
+ /// Returns whether the current position is at the end of the portion.
+ pub fn is_eof(&self) -> bool {
+ self.position() == self.size
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::io::Cursor;
+
+ use super::*;
+
+ #[test]
+ fn is_empty_returns_true_for_zero_length_blob() {
+ let data: Vec<u8> = (0..100).collect();
+ let reader = PartialReader::new(Cursor::new(data), 10, 0);
+ assert!(reader.is_empty());
+ assert!(reader.is_eof());
+ }
+
+ #[test]
+ fn is_empty_returns_false_for_non_zero_length_blob() {
+ let data: Vec<u8> = (0..100).collect();
+ let reader = PartialReader::new(Cursor::new(data), 10, 30);
+ assert!(!reader.is_empty());
+ }
+}
diff --git a/src/puffin/src/partial_reader/async.rs b/src/puffin/src/partial_reader/async.rs
new file mode 100644
index 000000000000..2cc9fae5236a
--- /dev/null
+++ b/src/puffin/src/partial_reader/async.rs
@@ -0,0 +1,196 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use futures::{ready, AsyncRead, AsyncSeek};
+
+use crate::partial_reader::position::position_after_seek;
+use crate::partial_reader::PartialReader;
+
+impl<R: AsyncRead + AsyncSeek + Unpin> AsyncRead for PartialReader<R> {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &mut [u8],
+ ) -> Poll<io::Result<usize>> {
+ // past end of portion
+ if self.position() > self.size() {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "invalid read past the end of the portion",
+ )));
+ }
+
+ // end of portion
+ if self.is_eof() {
+ return Poll::Ready(Ok(0));
+ }
+
+ // first read, seek to the correct offset
+ if self.position_in_portion.is_none() {
+ // seek operation
+ let seek_from = io::SeekFrom::Start(self.offset);
+ ready!(self.as_mut().project().source.poll_seek(cx, seek_from))?;
+
+ self.position_in_portion = Some(0);
+ }
+
+ // prevent reading over the end
+ let max_len = (self.size() - self.position_in_portion.unwrap()) as usize;
+ let actual_len = max_len.min(buf.len());
+
+ // create a limited reader
+ let target_buf = &mut buf[..actual_len];
+
+ // read operation
+ let read_bytes = ready!(self.as_mut().project().source.poll_read(cx, target_buf))?;
+ self.position_in_portion = Some(self.position() + read_bytes as u64);
+
+ Poll::Ready(Ok(read_bytes))
+ }
+}
+
+impl<R: AsyncRead + AsyncSeek + Unpin> AsyncSeek for PartialReader<R> {
+ fn poll_seek(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ pos: io::SeekFrom,
+ ) -> Poll<io::Result<u64>> {
+ let new_position = position_after_seek(pos, self.position(), self.size())?;
+ let pos = io::SeekFrom::Start(self.offset + new_position);
+ ready!(self.as_mut().project().source.poll_seek(cx, pos))?;
+
+ self.position_in_portion = Some(new_position);
+ Poll::Ready(Ok(new_position))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use futures::io::Cursor;
+ use futures::{AsyncReadExt as _, AsyncSeekExt as _};
+
+ use super::*;
+
+ #[tokio::test]
+ async fn read_all_data_in_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data.clone()), 0, 100);
+ let mut buf = vec![0; 100];
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 100);
+ assert_eq!(buf, data);
+ }
+
+ #[tokio::test]
+ async fn read_part_of_data_in_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ let mut buf = vec![0; 30];
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 30);
+ assert_eq!(buf, (10..40).collect::<Vec<u8>>());
+ }
+
+ #[tokio::test]
+ async fn seek_and_read_data_in_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ assert_eq!(reader.seek(io::SeekFrom::Start(10)).await.unwrap(), 10);
+ let mut buf = vec![0; 10];
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 10);
+ assert_eq!(buf, (20..30).collect::<Vec<u8>>());
+ }
+
+ #[tokio::test]
+ async fn read_past_end_of_portion_is_eof() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ let mut buf = vec![0; 50];
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 30);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0); // hit EOF
+ }
+
+ #[tokio::test]
+ async fn seek_past_end_of_portion_returns_error() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ // seeking past the portion returns an error
+ assert!(reader.seek(io::SeekFrom::Start(31)).await.is_err());
+ }
+
+ #[tokio::test]
+ async fn seek_to_negative_position_returns_error() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ assert_eq!(reader.seek(io::SeekFrom::Start(10)).await.unwrap(), 10);
+ // seeking back to the start of the portion
+ assert_eq!(reader.seek(io::SeekFrom::Current(-10)).await.unwrap(), 0);
+ // seeking to a negative position returns an error
+ assert!(reader.seek(io::SeekFrom::Current(-1)).await.is_err());
+ }
+
+ #[tokio::test]
+ async fn seek_from_end_of_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ let mut buf = vec![0; 10];
+ // seek to 10 bytes before the end of the portion
+ assert_eq!(reader.seek(io::SeekFrom::End(-10)).await.unwrap(), 20);
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 10);
+ // the final 10 bytes of the portion
+ assert_eq!(buf, (30..40).collect::<Vec<u8>>());
+ assert!(reader.is_eof());
+ }
+
+ #[tokio::test]
+ async fn seek_from_end_to_negative_position_returns_error() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data.clone()), 10, 30);
+ // seeking to a negative position returns an error
+ assert!(reader.seek(io::SeekFrom::End(-31)).await.is_err());
+ }
+
+ #[tokio::test]
+ async fn zero_length_portion_returns_zero_on_read() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 0);
+ let mut buf = vec![0; 10];
+ // reading a portion with zero length returns 0 bytes
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
+ }
+
+ #[tokio::test]
+ async fn is_eof_returns_true_at_end_of_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ // we are not at the end of the portion
+ assert!(!reader.is_eof());
+ let mut buf = vec![0; 30];
+ assert_eq!(reader.read(&mut buf).await.unwrap(), 30);
+ // we are at the end of the portion
+ assert!(reader.is_eof());
+ }
+
+ #[tokio::test]
+ async fn position_resets_after_seek_to_start() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ assert_eq!(reader.seek(io::SeekFrom::Start(10)).await.unwrap(), 10);
+ assert_eq!(reader.position(), 10);
+ assert_eq!(reader.seek(io::SeekFrom::Start(0)).await.unwrap(), 0);
+ assert_eq!(reader.position(), 0);
+ }
+}
diff --git a/src/puffin/src/partial_reader/position.rs b/src/puffin/src/partial_reader/position.rs
new file mode 100644
index 000000000000..e57817c493af
--- /dev/null
+++ b/src/puffin/src/partial_reader/position.rs
@@ -0,0 +1,102 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::io;
+
+/// Calculates the new position after seeking. It checks if the new position
+/// is valid (within the portion bounds) before returning it.
+pub fn position_after_seek(
+ seek_from: io::SeekFrom,
+ position_in_portion: u64,
+ size_of_portion: u64,
+) -> io::Result<u64> {
+ let new_position = match seek_from {
+ io::SeekFrom::Start(offset) => offset,
+ io::SeekFrom::Current(offset) => {
+ let next = (position_in_portion as i64) + offset;
+ if next < 0 {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "invalid seek to a negative or overflowing position",
+ ));
+ }
+ next as u64
+ }
+ io::SeekFrom::End(offset) => {
+ let end = size_of_portion as i64;
+ (end + offset) as u64
+ }
+ };
+
+ if new_position > size_of_portion {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "invalid seek to a position beyond the end of the portion",
+ ));
+ }
+
+ Ok(new_position)
+}
+
+#[cfg(test)]
+mod tests {
+ use std::io::ErrorKind;
+
+ use super::*;
+
+ #[test]
+ fn test_position_after_seek_from_start() {
+ let result = position_after_seek(io::SeekFrom::Start(10), 0, 20).unwrap();
+ assert_eq!(result, 10);
+ }
+
+ #[test]
+ fn test_position_after_seek_from_start_out_of_bounds() {
+ let result = position_after_seek(io::SeekFrom::Start(30), 0, 20);
+ assert!(result.is_err());
+ assert_eq!(result.unwrap_err().kind(), ErrorKind::InvalidInput);
+ }
+
+ #[test]
+ fn test_position_after_seek_from_current() {
+ let result = position_after_seek(io::SeekFrom::Current(10), 10, 30).unwrap();
+ assert_eq!(result, 20);
+ }
+
+ #[test]
+ fn test_position_after_seek_from_current_negative_position_within_bounds() {
+ let result = position_after_seek(io::SeekFrom::Current(-10), 15, 20).unwrap();
+ assert_eq!(result, 5);
+ }
+
+ #[test]
+ fn test_position_after_seek_from_current_negative_position() {
+ let result = position_after_seek(io::SeekFrom::Current(-10), 5, 20);
+ assert!(result.is_err());
+ assert_eq!(result.unwrap_err().kind(), ErrorKind::InvalidInput);
+ }
+
+ #[test]
+ fn test_position_after_seek_from_end() {
+ let result = position_after_seek(io::SeekFrom::End(-10), 0, 30).unwrap();
+ assert_eq!(result, 20);
+ }
+
+ #[test]
+ fn test_position_after_seek_from_end_out_of_bounds() {
+ let result = position_after_seek(io::SeekFrom::End(10), 0, 20);
+ assert!(result.is_err());
+ assert_eq!(result.unwrap_err().kind(), ErrorKind::InvalidInput);
+ }
+}
diff --git a/src/puffin/src/partial_reader/sync.rs b/src/puffin/src/partial_reader/sync.rs
new file mode 100644
index 000000000000..1b7781543973
--- /dev/null
+++ b/src/puffin/src/partial_reader/sync.rs
@@ -0,0 +1,180 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::io;
+
+use crate::partial_reader::position::position_after_seek;
+use crate::partial_reader::PartialReader;
+
+impl<R: io::Read + io::Seek> io::Read for PartialReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ // past end of portion
+ if self.position() > self.size() {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "invalid read past the end of the portion",
+ ));
+ }
+
+ // end of portion
+ if self.is_eof() {
+ return Ok(0);
+ }
+
+ // haven't read from the portion yet, need to seek to the start of it.
+ if self.position_in_portion.is_none() {
+ self.source.seek(io::SeekFrom::Start(self.offset))?;
+ self.position_in_portion = Some(0);
+ }
+
+ // prevent reading over the end
+ let max_len = (self.size() - self.position_in_portion.unwrap()) as usize;
+ let actual_len = max_len.min(buf.len());
+
+ // create a limited reader
+ let target_buf = &mut buf[..actual_len];
+
+ // perform the actual read from the source and update the position.
+ let read_bytes = self.source.read(target_buf)?;
+ self.position_in_portion = Some(self.position_in_portion.unwrap() + read_bytes as u64);
+
+ Ok(read_bytes)
+ }
+}
+
+impl<R: io::Read + io::Seek> io::Seek for PartialReader<R> {
+ fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
+ let new_position = position_after_seek(pos, self.position(), self.size())?;
+ let pos = io::SeekFrom::Start(self.offset + new_position);
+ self.source.seek(pos)?;
+
+ self.position_in_portion = Some(new_position);
+ Ok(new_position)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::io::{Cursor, Read, Seek, SeekFrom};
+
+ use super::*;
+
+ #[test]
+ fn read_all_data_in_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data.clone()), 0, 100);
+ let mut buf = vec![0; 100];
+ assert_eq!(reader.read(&mut buf).unwrap(), 100);
+ assert_eq!(buf, data);
+ }
+
+ #[test]
+ fn read_part_of_data_in_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ let mut buf = vec![0; 30];
+ assert_eq!(reader.read(&mut buf).unwrap(), 30);
+ assert_eq!(buf, (10..40).collect::<Vec<u8>>());
+ }
+
+ #[test]
+ fn seek_and_read_data_in_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ assert_eq!(reader.seek(SeekFrom::Start(10)).unwrap(), 10);
+ let mut buf = vec![0; 10];
+ assert_eq!(reader.read(&mut buf).unwrap(), 10);
+ assert_eq!(buf, (20..30).collect::<Vec<u8>>());
+ }
+
+ #[test]
+ fn read_past_end_of_portion_is_eof() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ let mut buf = vec![0; 50];
+ assert_eq!(reader.read(&mut buf).unwrap(), 30);
+ assert_eq!(reader.read(&mut buf).unwrap(), 0); // hit EOF
+ }
+
+ #[test]
+ fn seek_past_end_of_portion_returns_error() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ // seeking past the portion returns an error
+ assert!(reader.seek(SeekFrom::Start(31)).is_err());
+ }
+
+ #[test]
+ fn seek_to_negative_position_returns_error() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ assert_eq!(reader.seek(SeekFrom::Start(10)).unwrap(), 10);
+ // seeking back to the start of the portion
+ assert_eq!(reader.seek(SeekFrom::Current(-10)).unwrap(), 0);
+ // seeking to a negative position returns an error
+ assert!(reader.seek(SeekFrom::Current(-1)).is_err());
+ }
+
+ #[test]
+ fn seek_from_end_of_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ let mut buf = vec![0; 10];
+ // seek to 10 bytes before the end of the portion
+ assert_eq!(reader.seek(SeekFrom::End(-10)).unwrap(), 20);
+ assert_eq!(reader.read(&mut buf).unwrap(), 10);
+ // the final 10 bytes of the portion
+ assert_eq!(buf, (30..40).collect::<Vec<u8>>());
+ assert!(reader.is_eof());
+ }
+
+ #[test]
+ fn seek_from_end_to_negative_position_returns_error() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data.clone()), 10, 30);
+ // seeking to a negative position returns an error
+ assert!(reader.seek(SeekFrom::End(-31)).is_err());
+ }
+
+ #[test]
+ fn zero_length_portion_returns_zero_on_read() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 0);
+ let mut buf = vec![0; 10];
+ // reading a portion with zero length returns 0 bytes
+ assert_eq!(reader.read(&mut buf).unwrap(), 0);
+ }
+
+ #[test]
+ fn is_eof_returns_true_at_end_of_portion() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ // we are not at the end of the portion
+ assert!(!reader.is_eof());
+ let mut buf = vec![0; 30];
+ assert_eq!(reader.read(&mut buf).unwrap(), 30);
+ // we are at the end of the portion
+ assert!(reader.is_eof());
+ }
+
+ #[test]
+ fn position_resets_after_seek_to_start() {
+ let data: Vec<u8> = (0..100).collect();
+ let mut reader = PartialReader::new(Cursor::new(data), 10, 30);
+ assert_eq!(reader.seek(SeekFrom::Start(10)).unwrap(), 10);
+ assert_eq!(reader.position(), 10);
+ assert_eq!(reader.seek(SeekFrom::Start(0)).unwrap(), 0);
+ assert_eq!(reader.position(), 0);
+ }
+}
|
feat
|
add partial reader (#2741)
|
ac3666b84110ae12dc26beeb918b0d1bb3b8bc9a
|
2023-05-31 16:25:02
|
Ruihang Xia
|
chore(deps): bump arrow/parquet to 40.0, datafuson to the latest HEAD (#1677)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index a2ea7730a194..9574a05311c5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -106,6 +106,12 @@ dependencies = [
"alloc-no-stdlib",
]
+[[package]]
+name = "android-tzdata"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
+
[[package]]
name = "android_system_properties"
version = "0.1.5"
@@ -132,9 +138,9 @@ dependencies = [
[[package]]
name = "anstream"
-version = "0.3.0"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e579a7752471abc2a8268df8b20005e3eadd975f585398f17efcfd8d4927371"
+checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -171,9 +177,9 @@ dependencies = [
[[package]]
name = "anstyle-wincon"
-version = "1.0.0"
+version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4bcd8291a340dd8ac70e18878bc4501dd7b4ff970cfa21c207d36ece51ea88fd"
+checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188"
dependencies = [
"anstyle",
"windows-sys 0.48.0",
@@ -181,9 +187,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.70"
+version = "1.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4"
+checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
[[package]]
name = "anymap"
@@ -248,9 +254,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
[[package]]
name = "arrow"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1aea9fcb25bbb70f7f922f95b99ca29c1013dab47f6df61a6f24861842dd7f2e"
+checksum = "6619cab21a0cdd8c9b9f1d9e09bfaa9b1974e5ef809a6566aef0b998caf38ace"
dependencies = [
"ahash 0.8.3",
"arrow-arith",
@@ -271,9 +277,9 @@ dependencies = [
[[package]]
name = "arrow-arith"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8d967b42f7b12c91fd78acd396b20c2973b184c8866846674abbb00c963e93ab"
+checksum = "e0dc95485623a76e00929bda8caa40c1f838190952365c4f43a7b9ae86d03e94"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -286,9 +292,9 @@ dependencies = [
[[package]]
name = "arrow-array"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3190f208ee7aa0f3596fa0098d42911dec5e123ca88c002a08b24877ad14c71e"
+checksum = "3267847f53d3042473cfd2c769afd8d74a6d7d201fc3a34f5cb84c0282ef47a7"
dependencies = [
"ahash 0.8.3",
"arrow-buffer",
@@ -303,9 +309,9 @@ dependencies = [
[[package]]
name = "arrow-buffer"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d33c733c5b6c44a0fc526f29c09546e04eb56772a7a21e48e602f368be381f6"
+checksum = "c5f66553e66e120ac4b21570368ee9ebf35ff3f5399f872b0667699e145678f5"
dependencies = [
"half 2.2.1",
"num",
@@ -313,9 +319,9 @@ dependencies = [
[[package]]
name = "arrow-cast"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "abd349520b6a1ed4924ae2afc9d23330a3044319e4ec3d5b124c09e4d440ae87"
+checksum = "65e6f3579dbf0d97c683d451b2550062b0f0e62a3169bf74238b5f59f44ad6d8"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -330,9 +336,9 @@ dependencies = [
[[package]]
name = "arrow-csv"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c80af3c3e290a2a7e1cc518f1471dff331878cb4af9a5b088bf030b89debf649"
+checksum = "373579c4c1a8f5307d3125b7a89c700fcf8caf85821c77eb4baab3855ae0aba5"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -349,9 +355,9 @@ dependencies = [
[[package]]
name = "arrow-data"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1c8361947aaa96d331da9df3f7a08bdd8ab805a449994c97f5c4d24c4b7e2cf"
+checksum = "61bc8df9912cca6642665fdf989d6fa0de2570f18a7f709bcf59d29de96d2097"
dependencies = [
"arrow-buffer",
"arrow-schema",
@@ -361,16 +367,16 @@ dependencies = [
[[package]]
name = "arrow-flight"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd1fc687f3e4ffe91ccb7f2ffb06143ff97029448d427a9641006242bcbd0c24"
+checksum = "6dff9cf247f68541be625fa32a52f9307beae701d1b3a126d1761c605adcd80c"
dependencies = [
"arrow-array",
"arrow-buffer",
"arrow-cast",
"arrow-ipc",
"arrow-schema",
- "base64 0.21.0",
+ "base64 0.21.2",
"bytes",
"futures",
"paste",
@@ -381,9 +387,9 @@ dependencies = [
[[package]]
name = "arrow-ipc"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9a46ee000b9fbd1e8db6e8b26acb8c760838512b39d8c9f9d73892cb55351d50"
+checksum = "0105dcf5f91daa7182d87b713ee0b32b3bfc88e0c48e7dc3e9d6f1277a07d1ae"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -395,9 +401,9 @@ dependencies = [
[[package]]
name = "arrow-json"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4bf2366607be867ced681ad7f272371a5cf1fc2941328eef7b4fee14565166fb"
+checksum = "e73134fb5b5ec8770f8cbb214c2c487b2d350081e403ca4eeeb6f8f5e19846ac"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -415,9 +421,9 @@ dependencies = [
[[package]]
name = "arrow-ord"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "304069901c867200e21ec868ae7521165875470ef2f1f6d58f979a443d63997e"
+checksum = "89f25bc66e18d4c2aa1fe2f9bb03e2269da60e636213210385ae41a107f9965a"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -430,9 +436,9 @@ dependencies = [
[[package]]
name = "arrow-row"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d57fe8ceef3392fdd493269d8a2d589de17bafce151aacbffbddac7a57f441a"
+checksum = "1095ff85ea4f5ff02d17b30b089de31b51a50be01c6b674f0a0509ab771232f1"
dependencies = [
"ahash 0.8.3",
"arrow-array",
@@ -445,19 +451,19 @@ dependencies = [
[[package]]
name = "arrow-schema"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a16b88a93ac8350f0200b1cd336a1f887315925b8dd7aa145a37b8bdbd8497a4"
+checksum = "25187bbef474151a2e4ddec67b9e34bda5cbfba292dc571392fa3a1f71ff5a82"
dependencies = [
- "bitflags 2.1.0",
+ "bitflags 2.3.1",
"serde",
]
[[package]]
name = "arrow-select"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "98e8a4d6ca37d5212439b24caad4d80743fcbb706706200dd174bb98e68fe9d8"
+checksum = "fd0d4ee884aec3aa05e41478e3cd312bf609de9babb5d187a43fb45931da4da4"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -468,9 +474,9 @@ dependencies = [
[[package]]
name = "arrow-string"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cbb594efa397eb6a546f42b1f8df3d242ea84dbfda5232e06035dc2b2e2c8459"
+checksum = "d6d71c3ffe4c07e66ce8fdc6aed5b00e0e60c5144911879b10546f5b72d8fa1c"
dependencies = [
"arrow-array",
"arrow-buffer",
@@ -478,7 +484,7 @@ dependencies = [
"arrow-schema",
"arrow-select",
"regex",
- "regex-syntax 0.6.29",
+ "regex-syntax 0.7.2",
]
[[package]]
@@ -539,6 +545,24 @@ dependencies = [
"zstd-safe 5.0.2+zstd.1.5.2",
]
+[[package]]
+name = "async-compression"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b0122885821398cc923ece939e24d1056a2384ee719432397fa9db87230ff11"
+dependencies = [
+ "bzip2",
+ "flate2",
+ "futures-core",
+ "futures-io",
+ "memchr",
+ "pin-project-lite",
+ "tokio",
+ "xz2",
+ "zstd 0.12.3+zstd.1.5.2",
+ "zstd-safe 6.0.5+zstd.1.5.4",
+]
+
[[package]]
name = "async-io"
version = "1.13.0"
@@ -553,7 +577,7 @@ dependencies = [
"log",
"parking",
"polling",
- "rustix 0.37.14",
+ "rustix 0.37.19",
"slab",
"socket2 0.4.9",
"waker-fn",
@@ -576,7 +600,7 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -598,7 +622,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -609,17 +633,14 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
name = "atomic"
-version = "0.5.1"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c"
-dependencies = [
- "autocfg",
-]
+checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba"
[[package]]
name = "atomic_float"
@@ -652,9 +673,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "axum"
-version = "0.6.16"
+version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "113713495a32dd0ab52baf5c10044725aa3aec00b31beda84218e469029b72a3"
+checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39"
dependencies = [
"async-trait",
"axum-core",
@@ -708,7 +729,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -756,13 +777,13 @@ dependencies = [
[[package]]
name = "backon"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f34fac4d7cdaefa2deded0eda2d5d59dbfd43370ff3f856209e72340ae84c294"
+checksum = "0c1a6197b2120bb2185a267f6515038558b019e92b832bb0320e96d66268dcf9"
dependencies = [
- "futures",
+ "fastrand",
+ "futures-core",
"pin-project",
- "rand",
"tokio",
]
@@ -776,7 +797,7 @@ dependencies = [
"cc",
"cfg-if 1.0.0",
"libc",
- "miniz_oxide",
+ "miniz_oxide 0.6.2",
"object",
"rustc-demangle",
]
@@ -789,9 +810,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
[[package]]
name = "base64"
-version = "0.21.0"
+version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a"
+checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d"
[[package]]
name = "base64ct"
@@ -814,7 +835,7 @@ name = "benchmarks"
version = "0.2.0"
dependencies = [
"arrow",
- "clap 4.2.4",
+ "clap 4.3.0",
"client",
"indicatif",
"itertools",
@@ -824,9 +845,9 @@ dependencies = [
[[package]]
name = "bigdecimal"
-version = "0.3.0"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6aaf33151a6429fe9211d1b276eafdf70cdff28b071e76c0b0e1503221ea3744"
+checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa"
dependencies = [
"num-bigint",
"num-integer",
@@ -874,13 +895,13 @@ dependencies = [
"lazy_static",
"lazycell",
"peeking_take_while",
- "prettyplease 0.2.4",
+ "prettyplease 0.2.6",
"proc-macro2",
"quote",
"regex",
"rustc-hash",
"shlex",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -906,9 +927,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
-version = "2.1.0"
+version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c70beb79cbb5ce9c4f8e20849978f34225931f665bb49efa6982875a4d5facb3"
+checksum = "6776fc96284a0bb647b615056fc496d1fe1644a7ab01829818a6d91cae888b84"
[[package]]
name = "bitvec"
@@ -1033,9 +1054,9 @@ dependencies = [
[[package]]
name = "bstr"
-version = "1.4.0"
+version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09"
+checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5"
dependencies = [
"memchr",
"once_cell",
@@ -1065,15 +1086,15 @@ dependencies = [
[[package]]
name = "bumpalo"
-version = "3.12.1"
+version = "3.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8"
+checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bytecheck"
-version = "0.6.10"
+version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f"
+checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627"
dependencies = [
"bytecheck_derive",
"ptr_meta",
@@ -1082,9 +1103,9 @@ dependencies = [
[[package]]
name = "bytecheck_derive"
-version = "0.6.10"
+version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5"
+checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61"
dependencies = [
"proc-macro2",
"quote",
@@ -1224,7 +1245,7 @@ dependencies = [
"meta-client",
"metrics",
"mito",
- "moka 0.11.0",
+ "moka 0.11.1",
"object-store",
"parking_lot",
"regex",
@@ -1284,16 +1305,16 @@ dependencies = [
[[package]]
name = "chrono"
-version = "0.4.24"
+version = "0.4.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b"
+checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5"
dependencies = [
+ "android-tzdata",
"iana-time-zone",
"js-sys",
- "num-integer",
"num-traits",
"serde",
- "time 0.1.45",
+ "time 0.1.43",
"wasm-bindgen",
"winapi",
]
@@ -1344,9 +1365,9 @@ dependencies = [
[[package]]
name = "ciborium"
-version = "0.2.0"
+version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f"
+checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926"
dependencies = [
"ciborium-io",
"ciborium-ll",
@@ -1355,15 +1376,15 @@ dependencies = [
[[package]]
name = "ciborium-io"
-version = "0.2.0"
+version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369"
+checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656"
[[package]]
name = "ciborium-ll"
-version = "0.2.0"
+version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b"
+checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b"
dependencies = [
"ciborium-io",
"half 1.8.2",
@@ -1397,13 +1418,13 @@ dependencies = [
[[package]]
name = "clap"
-version = "3.2.23"
+version = "3.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
+checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
dependencies = [
"atty",
"bitflags 1.3.2",
- "clap_derive 3.2.18",
+ "clap_derive 3.2.25",
"clap_lex 0.2.4",
"indexmap",
"once_cell",
@@ -1414,33 +1435,33 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.2.4"
+version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "956ac1f6381d8d82ab4684768f89c0ea3afe66925ceadb4eeb3fc452ffc55d62"
+checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc"
dependencies = [
"clap_builder",
- "clap_derive 4.2.0",
+ "clap_derive 4.3.0",
"once_cell",
]
[[package]]
name = "clap_builder"
-version = "4.2.4"
+version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "84080e799e54cff944f4b4a4b0e71630b0e0443b25b985175c7dddc1a859b749"
+checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990"
dependencies = [
"anstream",
"anstyle",
"bitflags 1.3.2",
- "clap_lex 0.4.1",
+ "clap_lex 0.5.0",
"strsim 0.10.0",
]
[[package]]
name = "clap_derive"
-version = "3.2.18"
+version = "3.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65"
+checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008"
dependencies = [
"heck",
"proc-macro-error",
@@ -1451,14 +1472,14 @@ dependencies = [
[[package]]
name = "clap_derive"
-version = "4.2.0"
+version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4"
+checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b"
dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -1472,9 +1493,9 @@ dependencies = [
[[package]]
name = "clap_lex"
-version = "0.4.1"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1"
+checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
[[package]]
name = "client"
@@ -1542,7 +1563,7 @@ dependencies = [
"anymap",
"build-data",
"catalog",
- "clap 3.2.23",
+ "clap 3.2.25",
"client",
"common-base",
"common-error",
@@ -1574,16 +1595,6 @@ dependencies = [
"toml",
]
-[[package]]
-name = "codespan-reporting"
-version = "0.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
-dependencies = [
- "termcolor",
- "unicode-width",
-]
-
[[package]]
name = "colorchoice"
version = "1.0.0"
@@ -1592,9 +1603,9 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "comfy-table"
-version = "6.1.4"
+version = "6.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e7b787b0dc42e8111badfdbe4c3059158ccb2db8780352fa1b01e8ccf45cc4d"
+checksum = "7e959d788268e3bf9d35ace83e81b124190378e4c91c9067524675e33394b8ba"
dependencies = [
"strum",
"strum_macros",
@@ -1644,7 +1655,7 @@ version = "0.2.0"
dependencies = [
"arrow",
"arrow-schema",
- "async-compression",
+ "async-compression 0.3.15",
"async-trait",
"bytes",
"common-base",
@@ -1796,7 +1807,7 @@ version = "0.2.0"
dependencies = [
"async-stream",
"async-trait",
- "backon 0.4.0",
+ "backon 0.4.1",
"common-error",
"common-runtime",
"common-telemetry",
@@ -1947,34 +1958,34 @@ dependencies = [
[[package]]
name = "console"
-version = "0.15.5"
+version = "0.15.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60"
+checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8"
dependencies = [
"encode_unicode",
"lazy_static",
"libc",
"unicode-width",
- "windows-sys 0.42.0",
+ "windows-sys 0.45.0",
]
[[package]]
name = "console-api"
-version = "0.4.0"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e57ff02e8ad8e06ab9731d5dc72dc23bef9200778eae1a89d555d8c42e5d4a86"
+checksum = "c2895653b4d9f1538a83970077cb01dfc77a4810524e51a110944688e916b18e"
dependencies = [
"prost",
"prost-types",
- "tonic 0.8.3",
+ "tonic 0.9.2",
"tracing-core",
]
[[package]]
name = "console-subscriber"
-version = "0.1.8"
+version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22a3a81dfaf6b66bce5d159eddae701e3a002f194d378cbf7be5f053c281d9be"
+checksum = "57ab2224a0311582eb03adba4caaf18644f7b1f10a760803a803b9b605187fc7"
dependencies = [
"console-api",
"crossbeam-channel",
@@ -1988,7 +1999,7 @@ dependencies = [
"thread_local",
"tokio",
"tokio-stream",
- "tonic 0.8.3",
+ "tonic 0.9.2",
"tracing",
"tracing-core",
"tracing-subscriber",
@@ -2113,7 +2124,7 @@ dependencies = [
"atty",
"cast",
"ciborium",
- "clap 3.2.23",
+ "clap 3.2.25",
"criterion-plot 0.5.0",
"futures",
"itertools",
@@ -2256,92 +2267,73 @@ dependencies = [
]
[[package]]
-name = "ctor"
-version = "0.2.0"
+name = "darling"
+version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd4056f63fce3b82d852c3da92b08ea59959890813a7f4ce9c0ff85b10cf301b"
+checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
dependencies = [
- "quote",
- "syn 2.0.15",
+ "darling_core 0.14.4",
+ "darling_macro 0.14.4",
]
[[package]]
-name = "cxx"
-version = "1.0.94"
+name = "darling"
+version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93"
+checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944"
dependencies = [
- "cc",
- "cxxbridge-flags",
- "cxxbridge-macro",
- "link-cplusplus",
+ "darling_core 0.20.1",
+ "darling_macro 0.20.1",
]
[[package]]
-name = "cxx-build"
-version = "1.0.94"
+name = "darling_core"
+version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b"
+checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
dependencies = [
- "cc",
- "codespan-reporting",
- "once_cell",
+ "fnv",
+ "ident_case",
"proc-macro2",
"quote",
- "scratch",
- "syn 2.0.15",
+ "strsim 0.10.0",
+ "syn 1.0.109",
]
[[package]]
-name = "cxxbridge-flags"
-version = "1.0.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb"
-
-[[package]]
-name = "cxxbridge-macro"
-version = "1.0.94"
+name = "darling_core"
+version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5"
+checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb"
dependencies = [
+ "fnv",
+ "ident_case",
"proc-macro2",
"quote",
- "syn 2.0.15",
-]
-
-[[package]]
-name = "darling"
-version = "0.14.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
-dependencies = [
- "darling_core",
- "darling_macro",
+ "strsim 0.10.0",
+ "syn 2.0.18",
]
[[package]]
-name = "darling_core"
+name = "darling_macro"
version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
+checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
dependencies = [
- "fnv",
- "ident_case",
- "proc-macro2",
+ "darling_core 0.14.4",
"quote",
- "strsim 0.10.0",
"syn 1.0.109",
]
[[package]]
name = "darling_macro"
-version = "0.14.4"
+version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
+checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a"
dependencies = [
- "darling_core",
+ "darling_core 0.20.1",
"quote",
- "syn 1.0.109",
+ "syn 2.0.18",
]
[[package]]
@@ -2359,14 +2351,14 @@ dependencies = [
[[package]]
name = "datafusion"
-version = "22.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5337c86120de8193406b59be7612484796a46294#5337c86120de8193406b59be7612484796a46294"
+version = "25.0.0"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323#63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323"
dependencies = [
"ahash 0.8.3",
"arrow",
"arrow-array",
"arrow-schema",
- "async-compression",
+ "async-compression 0.4.0",
"async-trait",
"bytes",
"bzip2",
@@ -2408,8 +2400,8 @@ dependencies = [
[[package]]
name = "datafusion-common"
-version = "22.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5337c86120de8193406b59be7612484796a46294#5337c86120de8193406b59be7612484796a46294"
+version = "25.0.0"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323#63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323"
dependencies = [
"arrow",
"arrow-array",
@@ -2422,8 +2414,8 @@ dependencies = [
[[package]]
name = "datafusion-execution"
-version = "22.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5337c86120de8193406b59be7612484796a46294#5337c86120de8193406b59be7612484796a46294"
+version = "25.0.0"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323#63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323"
dependencies = [
"dashmap",
"datafusion-common",
@@ -2439,19 +2431,20 @@ dependencies = [
[[package]]
name = "datafusion-expr"
-version = "22.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5337c86120de8193406b59be7612484796a46294#5337c86120de8193406b59be7612484796a46294"
+version = "25.0.0"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323#63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323"
dependencies = [
"ahash 0.8.3",
"arrow",
"datafusion-common",
+ "lazy_static",
"sqlparser",
]
[[package]]
name = "datafusion-optimizer"
-version = "22.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5337c86120de8193406b59be7612484796a46294#5337c86120de8193406b59be7612484796a46294"
+version = "25.0.0"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323#63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323"
dependencies = [
"arrow",
"async-trait",
@@ -2462,13 +2455,13 @@ dependencies = [
"hashbrown 0.13.2",
"itertools",
"log",
- "regex-syntax 0.6.29",
+ "regex-syntax 0.7.2",
]
[[package]]
name = "datafusion-physical-expr"
-version = "22.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5337c86120de8193406b59be7612484796a46294#5337c86120de8193406b59be7612484796a46294"
+version = "25.0.0"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323#63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323"
dependencies = [
"ahash 0.8.3",
"arrow",
@@ -2486,6 +2479,7 @@ dependencies = [
"indexmap",
"itertools",
"lazy_static",
+ "libc",
"md-5",
"paste",
"petgraph",
@@ -2498,8 +2492,8 @@ dependencies = [
[[package]]
name = "datafusion-row"
-version = "22.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5337c86120de8193406b59be7612484796a46294#5337c86120de8193406b59be7612484796a46294"
+version = "25.0.0"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323#63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323"
dependencies = [
"arrow",
"datafusion-common",
@@ -2509,8 +2503,8 @@ dependencies = [
[[package]]
name = "datafusion-sql"
-version = "22.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5337c86120de8193406b59be7612484796a46294#5337c86120de8193406b59be7612484796a46294"
+version = "25.0.0"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323#63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323"
dependencies = [
"arrow",
"arrow-schema",
@@ -2522,8 +2516,8 @@ dependencies = [
[[package]]
name = "datafusion-substrait"
-version = "22.0.0"
-source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=5337c86120de8193406b59be7612484796a46294#5337c86120de8193406b59be7612484796a46294"
+version = "25.0.0"
+source = "git+https://github.com/waynexia/arrow-datafusion.git?rev=63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323#63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323"
dependencies = [
"async-recursion",
"chrono",
@@ -2531,7 +2525,8 @@ dependencies = [
"itertools",
"object_store",
"prost",
- "substrait 0.7.5",
+ "prost-types",
+ "substrait 0.10.0",
"tokio",
]
@@ -2629,9 +2624,9 @@ dependencies = [
[[package]]
name = "der"
-version = "0.7.4"
+version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86b14af2045fa69ed2b7a48934bebb842d0f33e73e96e78766ecb14bb5347a11"
+checksum = "56acb310e15652100da43d130af8d97b509e95af61aab1c5a7939ef24337ee17"
dependencies = [
"const-oid",
"pem-rfc7468",
@@ -2673,7 +2668,7 @@ version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4"
dependencies = [
- "darling",
+ "darling 0.14.4",
"proc-macro2",
"quote",
"syn 1.0.109",
@@ -2685,7 +2680,7 @@ version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f"
dependencies = [
- "darling",
+ "darling 0.14.4",
"proc-macro2",
"quote",
"syn 1.0.109",
@@ -2719,9 +2714,9 @@ checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
[[package]]
name = "digest"
-version = "0.10.6"
+version = "0.10.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f"
+checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
"const-oid",
@@ -2859,22 +2854,22 @@ checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d"
[[package]]
name = "enum-iterator"
-version = "1.4.0"
+version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "706d9e7cf1c7664859d79cd524e4e53ea2b67ea03c98cc2870c5e539695d597e"
+checksum = "7add3873b5dd076766ee79c8e406ad1a472c385476b9e38849f8eec24f1be689"
dependencies = [
"enum-iterator-derive",
]
[[package]]
name = "enum-iterator-derive"
-version = "1.2.0"
+version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "355f93763ef7b0ae1c43c4d8eccc9d5848d84ad1a1d8ce61c421d1ac85a19d05"
+checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb"
dependencies = [
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.18",
]
[[package]]
@@ -3010,7 +3005,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39ae6b3d9530211fb3b12a95374b8b0823be812f53d09e18c5675c0146b09642"
dependencies = [
"cfg-if 1.0.0",
- "rustix 0.37.14",
+ "rustix 0.37.19",
"windows-sys 0.48.0",
]
@@ -3078,13 +3073,13 @@ dependencies = [
[[package]]
name = "flate2"
-version = "1.0.25"
+version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
+checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743"
dependencies = [
"crc32fast",
"libz-sys",
- "miniz_oxide",
+ "miniz_oxide 0.7.1",
]
[[package]]
@@ -3327,7 +3322,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -3428,7 +3423,7 @@ checksum = "e77ac7b51b8e6313251737fcef4b1c01a2ea102bde68415b62c0ee9268fec357"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -3437,6 +3432,19 @@ version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
+[[package]]
+name = "git2"
+version = "0.17.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b989d6a7ca95a362cf2cfc5ad688b3a467be1f87e480b8dad07fee8c79b0044"
+dependencies = [
+ "bitflags 1.3.2",
+ "libc",
+ "libgit2-sys",
+ "log",
+ "url",
+]
+
[[package]]
name = "gix"
version = "0.43.1"
@@ -3450,9 +3458,9 @@ dependencies = [
"gix-date",
"gix-diff",
"gix-discover",
- "gix-features",
+ "gix-features 0.28.1",
"gix-glob",
- "gix-hash",
+ "gix-hash 0.10.4",
"gix-hashtable",
"gix-index",
"gix-lock",
@@ -3485,7 +3493,7 @@ version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc22b0cdc52237667c301dd7cdc6ead8f8f73c9f824e9942c8ebd6b764f6c0bf"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"btoi",
"gix-date",
"itoa",
@@ -3499,8 +3507,8 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2231a25934a240d0a4b6f4478401c73ee81d8be52de0293eedbc172334abf3e1"
dependencies = [
- "bstr 1.4.0",
- "gix-features",
+ "bstr 1.5.0",
+ "gix-features 0.28.1",
"gix-glob",
"gix-path",
"gix-quote",
@@ -3510,9 +3518,9 @@ dependencies = [
[[package]]
name = "gix-bitmap"
-version = "0.2.2"
+version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "024bca0c7187517bda5ea24ab148c9ca8208dd0c3e2bea88cdb2008f91791a6d"
+checksum = "55a95f4942360766c3880bdb2b4b57f1ef73b190fc424755e7fdf480430af618"
dependencies = [
"thiserror",
]
@@ -3532,7 +3540,7 @@ version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2c6f75c1e0f924de39e750880a6e21307194bb1ab773efe3c7d2d787277f8ab"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
]
[[package]]
@@ -3541,9 +3549,9 @@ version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fbad5ce54a8fc997acc50febd89ec80fa6e97cb7f8d0654cb229936407489d8"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"gix-config-value",
- "gix-features",
+ "gix-features 0.28.1",
"gix-glob",
"gix-path",
"gix-ref",
@@ -3564,7 +3572,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d09154c0c8677e4da0ec35e896f56ee3e338e741b9599fae06075edd83a4081c"
dependencies = [
"bitflags 1.3.2",
- "bstr 1.4.0",
+ "bstr 1.5.0",
"gix-path",
"libc",
"thiserror",
@@ -3576,7 +3584,7 @@ version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "750b684197374518ea057e0a0594713e07683faa0a3f43c0f93d97f64130ad8d"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"gix-command",
"gix-config-value",
"gix-path",
@@ -3592,10 +3600,10 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b96271912ce39822501616f177dea7218784e6c63be90d5f36322ff3a722aae2"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"itoa",
"thiserror",
- "time 0.3.20",
+ "time 0.3.21",
]
[[package]]
@@ -3604,7 +3612,7 @@ version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "103a0fa79b0d438f5ecb662502f052e530ace4fe1fe8e1c83c0c6da76d728e67"
dependencies = [
- "gix-hash",
+ "gix-hash 0.10.4",
"gix-object",
"imara-diff",
"thiserror",
@@ -3616,9 +3624,9 @@ version = "0.16.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6eba8ba458cb8f4a6c33409b0fe650b1258655175a7ffd1d24fafd3ed31d880b"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"dunce",
- "gix-hash",
+ "gix-hash 0.10.4",
"gix-path",
"gix-ref",
"gix-sec",
@@ -3633,7 +3641,7 @@ checksum = "0b76f9a80f6dd7be66442ae86e1f534effad9546676a392acc95e269d0c21c22"
dependencies = [
"crc32fast",
"flate2",
- "gix-hash",
+ "gix-hash 0.10.4",
"libc",
"once_cell",
"prodash",
@@ -3642,6 +3650,25 @@ dependencies = [
"walkdir",
]
+[[package]]
+name = "gix-features"
+version = "0.29.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf69b0f5c701cc3ae22d3204b671907668f6437ca88862d355eaf9bc47a4f897"
+dependencies = [
+ "gix-hash 0.11.1",
+ "libc",
+]
+
+[[package]]
+name = "gix-fs"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b37a1832f691fdc09910bd267f9a2e413737c1f9ec68c6e31f9e802616278a9"
+dependencies = [
+ "gix-features 0.29.0",
+]
+
[[package]]
name = "gix-glob"
version = "0.5.5"
@@ -3649,7 +3676,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93e43efd776bc543f46f0fd0ca3d920c37af71a764a16f2aebd89765e9ff2993"
dependencies = [
"bitflags 1.3.2",
- "bstr 1.4.0",
+ "bstr 1.5.0",
]
[[package]]
@@ -3662,13 +3689,23 @@ dependencies = [
"thiserror",
]
+[[package]]
+name = "gix-hash"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "078eec3ac2808cc03f0bddd2704cb661da5c5dc33b41a9d7947b141d499c7c42"
+dependencies = [
+ "hex",
+ "thiserror",
+]
+
[[package]]
name = "gix-hashtable"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4e55e40dfd694884f0eb78796c5bddcf2f8b295dace47039099dd7e76534973"
dependencies = [
- "gix-hash",
+ "gix-hash 0.10.4",
"hashbrown 0.13.2",
"parking_lot",
]
@@ -3680,12 +3717,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "717ab601ece7921f59fe86849dbe27d44a46ebb883b5885732c4f30df4996177"
dependencies = [
"bitflags 1.3.2",
- "bstr 1.4.0",
+ "bstr 1.5.0",
"btoi",
"filetime",
"gix-bitmap",
- "gix-features",
- "gix-hash",
+ "gix-features 0.28.1",
+ "gix-hash 0.10.4",
"gix-lock",
"gix-object",
"gix-traverse",
@@ -3697,12 +3734,12 @@ dependencies = [
[[package]]
name = "gix-lock"
-version = "5.0.0"
+version = "5.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41b80172055c5d8017a48ddac5cc7a95421c00211047db0165c97853c4f05194"
+checksum = "2c693d7f05730fa74a7c467150adc7cea393518410c65f0672f80226b8111555"
dependencies = [
- "fastrand",
"gix-tempfile",
+ "gix-utils",
"thiserror",
]
@@ -3712,7 +3749,7 @@ version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b66aea5e52875cd4915f4957a6f4b75831a36981e2ec3f5fad9e370e444fe1a"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"gix-actor",
"thiserror",
]
@@ -3723,11 +3760,11 @@ version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8df068db9180ee935fbb70504848369e270bdcb576b05c0faa8b9fd3b86fc017"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"btoi",
"gix-actor",
- "gix-features",
- "gix-hash",
+ "gix-features 0.28.1",
+ "gix-hash 0.10.4",
"gix-validate",
"hex",
"itoa",
@@ -3743,8 +3780,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e83af2e3e36005bfe010927f0dff41fb5acc3e3d89c6f1174135b3a34086bda2"
dependencies = [
"arc-swap",
- "gix-features",
- "gix-hash",
+ "gix-features 0.28.1",
+ "gix-hash 0.10.4",
"gix-object",
"gix-pack",
"gix-path",
@@ -3763,8 +3800,8 @@ dependencies = [
"clru",
"gix-chunk",
"gix-diff",
- "gix-features",
- "gix-hash",
+ "gix-features 0.28.1",
+ "gix-hash 0.10.4",
"gix-hashtable",
"gix-object",
"gix-path",
@@ -3782,7 +3819,7 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32370dce200bb951df013e03dff35b4233fc7a89458642b047629b91734a7e19"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"thiserror",
]
@@ -3805,7 +3842,7 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a282f5a8d9ee0b09ec47390ac727350c48f2f5c76d803cd8da6b3e7ad56e0bcb"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"btoi",
"thiserror",
]
@@ -3817,8 +3854,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4e909396ed3b176823991ccc391c276ae2a015e54edaafa3566d35123cfac9d"
dependencies = [
"gix-actor",
- "gix-features",
- "gix-hash",
+ "gix-features 0.28.1",
+ "gix-hash 0.10.4",
"gix-lock",
"gix-object",
"gix-path",
@@ -3835,8 +3872,8 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aba332462bda2e8efeae4302b39a6ed01ad56ef772fd5b7ef197cf2798294d65"
dependencies = [
- "bstr 1.4.0",
- "gix-hash",
+ "bstr 1.5.0",
+ "gix-hash 0.10.4",
"gix-revision",
"gix-validate",
"smallvec",
@@ -3849,9 +3886,9 @@ version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c6f6ff53f888858afc24bf12628446a14279ceec148df6194481f306f553ad2"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"gix-date",
- "gix-hash",
+ "gix-hash 0.10.4",
"gix-hashtable",
"gix-object",
"thiserror",
@@ -3872,10 +3909,11 @@ dependencies = [
[[package]]
name = "gix-tempfile"
-version = "5.0.2"
+version = "5.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c2ceb30a610e3f5f2d5f9a5114689fde507ba9417705a8cf3429604275b2153c"
+checksum = "d71a0d32f34e71e86586124225caefd78dabc605d0486de580d717653addf182"
dependencies = [
+ "gix-fs",
"libc",
"once_cell",
"parking_lot",
@@ -3890,7 +3928,7 @@ version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd9a4a07bb22168dc79c60e1a6a41919d198187ca83d8a5940ad8d7122a45df3"
dependencies = [
- "gix-hash",
+ "gix-hash 0.10.4",
"gix-hashtable",
"gix-object",
"thiserror",
@@ -3902,21 +3940,30 @@ version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6a22b4b32ad14d68f7b7fb6458fa58d44b01797d94c1b8f4db2d9c7b3c366b5"
dependencies = [
- "bstr 1.4.0",
- "gix-features",
+ "bstr 1.5.0",
+ "gix-features 0.28.1",
"gix-path",
"home",
"thiserror",
"url",
]
+[[package]]
+name = "gix-utils"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c10b69beac219acb8df673187a1f07dde2d74092f974fb3f9eb385aeb667c909"
+dependencies = [
+ "fastrand",
+]
+
[[package]]
name = "gix-validate"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bd629d3680773e1785e585d76fd4295b740b559cad9141517300d99a0c8c049"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"thiserror",
]
@@ -3926,11 +3973,11 @@ version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54ec9a000b4f24af706c3cc680c7cda235656cbe3216336522f5692773b8a301"
dependencies = [
- "bstr 1.4.0",
+ "bstr 1.5.0",
"gix-attributes",
- "gix-features",
+ "gix-features 0.28.1",
"gix-glob",
- "gix-hash",
+ "gix-hash 0.10.4",
"gix-index",
"gix-object",
"gix-path",
@@ -3958,9 +4005,9 @@ dependencies = [
[[package]]
name = "h2"
-version = "0.3.18"
+version = "0.3.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21"
+checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782"
dependencies = [
"bytes",
"fnv",
@@ -4075,11 +4122,11 @@ dependencies = [
[[package]]
name = "home"
-version = "0.5.4"
+version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408"
+checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb"
dependencies = [
- "winapi",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -4164,15 +4211,15 @@ dependencies = [
[[package]]
name = "hyper-rustls"
-version = "0.23.2"
+version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c"
+checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7"
dependencies = [
"http",
"hyper",
- "rustls 0.20.8",
+ "rustls",
"tokio",
- "tokio-rustls 0.23.4",
+ "tokio-rustls",
]
[[package]]
@@ -4203,12 +4250,11 @@ dependencies = [
[[package]]
name = "iana-time-zone-haiku"
-version = "0.1.1"
+version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
+checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
- "cxx",
- "cxx-build",
+ "cc",
]
[[package]]
@@ -4256,13 +4302,14 @@ dependencies = [
[[package]]
name = "indicatif"
-version = "0.17.3"
+version = "0.17.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cef509aa9bc73864d6756f0d34d35504af3cf0844373afe9b8669a5b8005a729"
+checksum = "db45317f37ef454e6519b6c3ed7d377e5f23346f0823f86e65ca36912d1d0ef8"
dependencies = [
"console",
+ "instant",
"number_prefix",
- "portable-atomic",
+ "portable-atomic 1.3.3",
"unicode-width",
]
@@ -4304,11 +4351,10 @@ dependencies = [
[[package]]
name = "inventory"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7741301a6d6a9b28ce77c0fb77a4eb116b6bc8f3bef09923f7743d059c4157d3"
+checksum = "e0539b5de9241582ce6bd6b0ba7399313560151e58c9aaf8b74b711b1bdce644"
dependencies = [
- "ctor",
"ghost",
]
@@ -4324,9 +4370,9 @@ dependencies = [
[[package]]
name = "io-lifetimes"
-version = "1.0.10"
+version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220"
+checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
dependencies = [
"hermit-abi 0.3.1",
"libc",
@@ -4369,7 +4415,7 @@ checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f"
dependencies = [
"hermit-abi 0.3.1",
"io-lifetimes",
- "rustix 0.37.14",
+ "rustix 0.37.19",
"windows-sys 0.48.0",
]
@@ -4399,9 +4445,9 @@ dependencies = [
[[package]]
name = "js-sys"
-version = "0.3.61"
+version = "0.3.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730"
+checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790"
dependencies = [
"wasm-bindgen",
]
@@ -4423,7 +4469,7 @@ version = "8.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378"
dependencies = [
- "base64 0.21.0",
+ "base64 0.21.2",
"pem 1.1.1",
"ring",
"serde",
@@ -4433,9 +4479,9 @@ dependencies = [
[[package]]
name = "keccak"
-version = "0.1.3"
+version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768"
+checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940"
dependencies = [
"cpufeatures",
]
@@ -4451,9 +4497,9 @@ dependencies = [
[[package]]
name = "lalrpop"
-version = "0.19.9"
+version = "0.19.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f34313ec00c2eb5c3c87ca6732ea02dcf3af99c3ff7a8fb622ffb99c9d860a87"
+checksum = "0a1cbf952127589f2851ab2046af368fd20645491bb4b376f04b7f94d7a9837b"
dependencies = [
"ascii-canvas",
"bit-set",
@@ -4463,7 +4509,6 @@ dependencies = [
"itertools",
"lalrpop-util",
"petgraph",
- "pico-args",
"regex",
"regex-syntax 0.6.29",
"string_cache",
@@ -4474,9 +4519,9 @@ dependencies = [
[[package]]
name = "lalrpop-util"
-version = "0.19.9"
+version = "0.19.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5c1f7869c94d214466c5fd432dfed12c379fd87786768d36455892d46b18edd"
+checksum = "d3c48237b9604c5a4702de6b824e02006c3214327564636aef27c1028a8fa0ed"
dependencies = [
"regex",
]
@@ -4571,9 +4616,21 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.142"
+version = "0.2.144"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1"
+
+[[package]]
+name = "libgit2-sys"
+version = "0.15.2+1.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317"
+checksum = "a80df2e11fb4a61f4ba2ab42dbe7f74468da143f1a75c74e11dee7c813f694fa"
+dependencies = [
+ "cc",
+ "libc",
+ "libz-sys",
+ "pkg-config",
+]
[[package]]
name = "libloading"
@@ -4587,9 +4644,9 @@ dependencies = [
[[package]]
name = "libm"
-version = "0.2.6"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
+checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4"
[[package]]
name = "libproc"
@@ -4615,24 +4672,16 @@ dependencies = [
[[package]]
name = "libz-sys"
-version = "1.1.8"
+version = "1.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf"
+checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db"
dependencies = [
"cc",
+ "libc",
"pkg-config",
"vcpkg",
]
-[[package]]
-name = "link-cplusplus"
-version = "1.0.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5"
-dependencies = [
- "cc",
-]
-
[[package]]
name = "linked-hash-map"
version = "0.5.6"
@@ -4647,9 +4696,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
[[package]]
name = "linux-raw-sys"
-version = "0.3.4"
+version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "36eb31c1778188ae1e64398743890d0877fef36d11521ac60406b42016e8c2cf"
+checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]]
name = "lock_api"
@@ -4663,12 +4712,9 @@ dependencies = [
[[package]]
name = "log"
-version = "0.4.17"
+version = "0.4.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
-dependencies = [
- "cfg-if 1.0.0",
-]
+checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de"
[[package]]
name = "log-store"
@@ -4812,9 +4858,9 @@ dependencies = [
[[package]]
name = "mac_address"
-version = "1.1.4"
+version = "1.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b238e3235c8382b7653c6408ed1b08dd379bdb9fdf990fb0bbae3db2cc0ae963"
+checksum = "4863ee94f19ed315bf3bc00299338d857d4b5bc856af375cc97d237382ad3856"
dependencies = [
"nix 0.23.2",
"winapi",
@@ -4867,10 +4913,11 @@ checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40"
[[package]]
name = "matrixmultiply"
-version = "0.3.3"
+version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb99c395ae250e1bf9133673f03ca9f97b7e71b705436bf8f089453445d1e9fe"
+checksum = "090126dc04f95dc0d1c1c91f61bdd474b3930ca064c1edc8a849da2c6cbe1e77"
dependencies = [
+ "autocfg",
"rawpointer",
]
@@ -5034,7 +5081,7 @@ checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849"
dependencies = [
"ahash 0.7.6",
"metrics-macros",
- "portable-atomic",
+ "portable-atomic 0.3.20",
]
[[package]]
@@ -5047,7 +5094,7 @@ dependencies = [
"metrics",
"metrics-util",
"parking_lot",
- "portable-atomic",
+ "portable-atomic 0.3.20",
"quanta 0.10.1",
"thiserror",
]
@@ -5093,7 +5140,7 @@ dependencies = [
"num_cpus",
"ordered-float 2.10.0",
"parking_lot",
- "portable-atomic",
+ "portable-atomic 0.3.20",
"quanta 0.10.1",
"radix_trie",
"sketches-ddsketch",
@@ -5130,16 +5177,25 @@ dependencies = [
"adler",
]
+[[package]]
+name = "miniz_oxide"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
+dependencies = [
+ "adler",
+]
+
[[package]]
name = "mio"
-version = "0.8.6"
+version = "0.8.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9"
+checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2"
dependencies = [
"libc",
"log",
"wasi 0.11.0+wasi-snapshot-preview1",
- "windows-sys 0.45.0",
+ "windows-sys 0.48.0",
]
[[package]]
@@ -5206,9 +5262,9 @@ dependencies = [
[[package]]
name = "moka"
-version = "0.11.0"
+version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "934030d03f6191edbb4ba16835ccdb80d560788ac686570a8e2986a0fb59ded8"
+checksum = "36506f2f935238463605f3bb13b362f1949daafc3b347d05d60ae08836db2bd2"
dependencies = [
"async-io",
"async-lock",
@@ -5247,18 +5303,18 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
[[package]]
name = "mysql-common-derive"
-version = "0.30.0"
+version = "0.30.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8c3c1f30203977ce6134381bd895ba82892f967578442a0894484858594de992"
+checksum = "56b0d8a0db9bf6d2213e11f2c701cb91387b0614361625ab7b9743b41aa4938f"
dependencies = [
- "darling",
+ "darling 0.20.1",
"heck",
"num-bigint",
"proc-macro-crate 1.3.1",
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.18",
"termcolor",
"thiserror",
]
@@ -5283,14 +5339,14 @@ dependencies = [
"percent-encoding",
"pin-project",
"priority-queue",
- "rustls 0.21.0",
+ "rustls",
"rustls-pemfile",
"serde",
"serde_json",
- "socket2 0.5.2",
+ "socket2 0.5.3",
"thiserror",
"tokio",
- "tokio-rustls 0.24.0",
+ "tokio-rustls",
"tokio-util",
"twox-hash",
"url",
@@ -5300,14 +5356,14 @@ dependencies = [
[[package]]
name = "mysql_common"
-version = "0.30.3"
+version = "0.30.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e6b76684cc6825e9e5f3d9d41968faf04c6f9eb39815dc9827695b1eb5faa826"
+checksum = "73b8fb568c9537cf4f1ad39e2542aa74a66bf89883e550df2cb30a8f0c0f0355"
dependencies = [
- "base64 0.21.0",
+ "base64 0.21.2",
"bigdecimal",
"bindgen 0.65.1",
- "bitflags 2.1.0",
+ "bitflags 2.3.1",
"bitvec",
"byteorder",
"bytes",
@@ -5333,7 +5389,7 @@ dependencies = [
"smallvec",
"subprocess",
"thiserror",
- "time 0.3.20",
+ "time 0.3.21",
"uuid",
]
@@ -5643,9 +5699,9 @@ dependencies = [
[[package]]
name = "once_cell"
-version = "1.17.1"
+version = "1.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
+checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b"
[[package]]
name = "oorandom"
@@ -5662,8 +5718,8 @@ dependencies = [
"anyhow",
"async-compat",
"async-trait",
- "backon 0.4.0",
- "base64 0.21.0",
+ "backon 0.4.1",
+ "base64 0.21.2",
"bytes",
"chrono",
"flagset",
@@ -5690,9 +5746,9 @@ dependencies = [
[[package]]
name = "openmetrics-parser"
-version = "0.4.0"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5caf1ccaaf43651cc5abda77353a173869d8d8b0238f2faacb23d6b32931e860"
+checksum = "1fa1075b40b84d04375a1dcb39e5726c4ac3aee06cc036e60e5b4e636aaba86f"
dependencies = [
"auto_ops",
"pest",
@@ -5712,7 +5768,7 @@ dependencies = [
"nom",
"pin-project-lite",
"tokio",
- "tokio-rustls 0.24.0",
+ "tokio-rustls",
]
[[package]]
@@ -5896,9 +5952,9 @@ dependencies = [
[[package]]
name = "parquet"
-version = "37.0.0"
+version = "40.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5022d98333271f4ca3e87bab760498e61726bf5a6ca919123c80517e20ded29"
+checksum = "d6a656fcc17e641657c955742c689732684e096f790ff30865d9f8dcc39f7c4a"
dependencies = [
"ahash 0.8.3",
"arrow-array",
@@ -5908,7 +5964,7 @@ dependencies = [
"arrow-ipc",
"arrow-schema",
"arrow-select",
- "base64 0.21.0",
+ "base64 0.21.2",
"brotli",
"bytes",
"chrono",
@@ -5918,6 +5974,7 @@ dependencies = [
"lz4",
"num",
"num-bigint",
+ "object_store",
"paste",
"seq-macro",
"snap",
@@ -6005,7 +6062,7 @@ version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a"
dependencies = [
- "base64 0.21.0",
+ "base64 0.21.2",
"serde",
]
@@ -6026,9 +6083,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
[[package]]
name = "pest"
-version = "2.5.7"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b1403e8401ad5dedea73c626b99758535b342502f8d1e361f4a2dd952749122"
+checksum = "e68e84bfb01f0507134eac1e9b410a12ba379d064eab48c50ba4ce329a527b70"
dependencies = [
"thiserror",
"ucd-trie",
@@ -6036,9 +6093,9 @@ dependencies = [
[[package]]
name = "pest_derive"
-version = "2.5.7"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be99c4c1d2fc2769b1d00239431d711d08f6efedcecb8b6e30707160aee99c15"
+checksum = "6b79d4c71c865a25a4322296122e3924d30bc8ee0834c8bfc8b95f7f054afbfb"
dependencies = [
"pest",
"pest_generator",
@@ -6046,22 +6103,22 @@ dependencies = [
[[package]]
name = "pest_generator"
-version = "2.5.7"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e56094789873daa36164de2e822b3888c6ae4b4f9da555a1103587658c805b1e"
+checksum = "6c435bf1076437b851ebc8edc3a18442796b30f1728ffea6262d59bbe28b077e"
dependencies = [
"pest",
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
name = "pest_meta"
-version = "2.5.7"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6733073c7cff3d8459fda0e42f13a047870242aed8b509fe98000928975f359e"
+checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411"
dependencies = [
"once_cell",
"pest",
@@ -6085,7 +6142,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd66851a4b1d6631371c931810e453b0319eb260bbd5853ebe98e37b15105b80"
dependencies = [
"async-trait",
- "base64 0.21.0",
+ "base64 0.21.2",
"bytes",
"chrono",
"derive-new",
@@ -6099,9 +6156,9 @@ dependencies = [
"ring",
"stringprep",
"thiserror",
- "time 0.3.20",
+ "time 0.3.21",
"tokio",
- "tokio-rustls 0.24.0",
+ "tokio-rustls",
"tokio-util",
"x509-certificate",
]
@@ -6154,30 +6211,24 @@ dependencies = [
"uncased",
]
-[[package]]
-name = "pico-args"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db8bcd96cb740d03149cbad5518db9fd87126a10ab519c011893b1754134c468"
-
[[package]]
name = "pin-project"
-version = "1.0.12"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc"
+checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
-version = "1.0.12"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55"
+checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07"
dependencies = [
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.18",
]
[[package]]
@@ -6215,9 +6266,9 @@ dependencies = [
[[package]]
name = "pkg-config"
-version = "0.3.26"
+version = "0.3.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
+checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
[[package]]
name = "planus"
@@ -6285,9 +6336,18 @@ dependencies = [
[[package]]
name = "portable-atomic"
-version = "0.3.19"
+version = "0.3.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26f6a7b87c2e435a3241addceeeff740ff8b7e76b74c13bf9acb17fa454ea00b"
+checksum = "e30165d31df606f5726b090ec7592c308a0eaf61721ff64c9a3018e344a8753e"
+dependencies = [
+ "portable-atomic 1.3.3",
+]
+
+[[package]]
+name = "portable-atomic"
+version = "1.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794"
[[package]]
name = "postgres-protocol"
@@ -6295,7 +6355,7 @@ version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d"
dependencies = [
- "base64 0.21.0",
+ "base64 0.21.2",
"byteorder",
"bytes",
"fallible-iterator",
@@ -6353,12 +6413,12 @@ dependencies = [
[[package]]
name = "prettyplease"
-version = "0.2.4"
+version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058"
+checksum = "3b69d39aab54d069e7f2fe8cb970493e7834601ca2d8c65fd7bbd183578080d1"
dependencies = [
"proc-macro2",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -6422,9 +6482,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
[[package]]
name = "proc-macro2"
-version = "1.0.56"
+version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
+checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b"
dependencies = [
"unicode-ident",
]
@@ -6616,9 +6676,9 @@ dependencies = [
[[package]]
name = "pulldown-cmark"
-version = "0.9.2"
+version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63"
+checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998"
dependencies = [
"bitflags 1.3.2",
"memchr",
@@ -6702,7 +6762,7 @@ dependencies = [
"mach",
"once_cell",
"raw-cpuid",
- "wasi 0.10.0+wasi-snapshot-preview1",
+ "wasi 0.10.2+wasi-snapshot-preview1",
"web-sys",
"winapi",
]
@@ -6801,9 +6861,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.26"
+version = "1.0.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
+checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488"
dependencies = [
"proc-macro2",
]
@@ -6967,13 +7027,13 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.8.1"
+version = "1.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370"
+checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390"
dependencies = [
"aho-corasick 1.0.1",
"memchr",
- "regex-syntax 0.7.1",
+ "regex-syntax 0.7.2",
]
[[package]]
@@ -6993,9 +7053,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
-version = "0.7.1"
+version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c"
+checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
[[package]]
name = "regress"
@@ -7007,6 +7067,16 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "regress"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "82a9ecfa0cb04d0b04dddb99b8ccf4f66bc8dfd23df694b398570bd8ae3a50fb"
+dependencies = [
+ "hashbrown 0.13.2",
+ "memchr",
+]
+
[[package]]
name = "rend"
version = "0.4.0"
@@ -7024,7 +7094,7 @@ checksum = "b04f5fccb94d61c154f0d8520ec42e79afdc145f4b1a392faa269874995fda66"
dependencies = [
"anyhow",
"async-trait",
- "base64 0.21.0",
+ "base64 0.21.2",
"chrono",
"form_urlencoded",
"hex",
@@ -7049,11 +7119,11 @@ dependencies = [
[[package]]
name = "reqwest"
-version = "0.11.16"
+version = "0.11.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254"
+checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55"
dependencies = [
- "base64 0.21.0",
+ "base64 0.21.2",
"bytes",
"encoding_rs",
"futures-core",
@@ -7071,14 +7141,14 @@ dependencies = [
"once_cell",
"percent-encoding",
"pin-project-lite",
- "rustls 0.20.8",
+ "rustls",
"rustls-native-certs",
"rustls-pemfile",
"serde",
"serde_json",
"serde_urlencoded",
"tokio",
- "tokio-rustls 0.23.4",
+ "tokio-rustls",
"tokio-util",
"tower-service",
"url",
@@ -7141,23 +7211,26 @@ dependencies = [
[[package]]
name = "rkyv"
-version = "0.7.41"
+version = "0.7.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21499ed91807f07ae081880aabb2ccc0235e9d88011867d984525e9a4c3cfa3e"
+checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58"
dependencies = [
+ "bitvec",
"bytecheck",
"hashbrown 0.12.3",
"ptr_meta",
"rend",
"rkyv_derive",
"seahash",
+ "tinyvec",
+ "uuid",
]
[[package]]
name = "rkyv_derive"
-version = "0.7.41"
+version = "0.7.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac1c672430eb41556291981f45ca900a0239ad007242d1cb4b4167af842db666"
+checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d"
dependencies = [
"proc-macro2",
"quote",
@@ -7362,35 +7435,23 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.37.14"
+version = "0.37.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9b864d3c18a5785a05953adeed93e2dca37ed30f18e69bba9f30079d51f363f"
+checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d"
dependencies = [
"bitflags 1.3.2",
"errno 0.3.1",
"io-lifetimes",
"libc",
- "linux-raw-sys 0.3.4",
+ "linux-raw-sys 0.3.8",
"windows-sys 0.48.0",
]
[[package]]
name = "rustls"
-version = "0.20.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f"
-dependencies = [
- "log",
- "ring",
- "sct",
- "webpki",
-]
-
-[[package]]
-name = "rustls"
-version = "0.21.0"
+version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07180898a28ed6a7f7ba2311594308f595e3dd2e3c3812fa0a80a47b45f17e5d"
+checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e"
dependencies = [
"log",
"ring",
@@ -7416,7 +7477,7 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b"
dependencies = [
- "base64 0.21.0",
+ "base64 0.21.2",
]
[[package]]
@@ -7896,12 +7957,6 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-[[package]]
-name = "scratch"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1"
-
[[package]]
name = "script"
version = "0.2.0"
@@ -7983,9 +8038,9 @@ dependencies = [
[[package]]
name = "security-framework"
-version = "2.8.2"
+version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254"
+checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8"
dependencies = [
"bitflags 1.3.2",
"core-foundation",
@@ -7996,9 +8051,9 @@ dependencies = [
[[package]]
name = "security-framework-sys"
-version = "2.8.0"
+version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4"
+checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7"
dependencies = [
"core-foundation-sys",
"libc",
@@ -8039,9 +8094,9 @@ checksum = "e6b44e8fc93a14e66336d230954dda83d18b4605ccace8fe09bc7514a71ad0bc"
[[package]]
name = "serde"
-version = "1.0.160"
+version = "1.0.163"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c"
+checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2"
dependencies = [
"serde_derive",
]
@@ -8058,13 +8113,13 @@ dependencies = [
[[package]]
name = "serde_derive"
-version = "1.0.160"
+version = "1.0.163"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df"
+checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -8106,7 +8161,7 @@ checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -8120,6 +8175,18 @@ dependencies = [
"syn 1.0.109",
]
+[[package]]
+name = "serde_tokenstream"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a00ffd23fd882d096f09fcaae2a9de8329a328628e86027e049ee051dc1621f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "serde",
+ "syn 2.0.18",
+]
+
[[package]]
name = "serde_urlencoded"
version = "0.7.1"
@@ -8200,7 +8267,7 @@ dependencies = [
"rand",
"regex",
"rust-embed",
- "rustls 0.21.0",
+ "rustls",
"rustls-pemfile",
"schemars",
"script",
@@ -8217,7 +8284,7 @@ dependencies = [
"tokio",
"tokio-postgres",
"tokio-postgres-rustls",
- "tokio-rustls 0.24.0",
+ "tokio-rustls",
"tokio-stream",
"tokio-test",
"tonic 0.9.2",
@@ -8278,9 +8345,9 @@ dependencies = [
[[package]]
name = "sha3"
-version = "0.10.7"
+version = "0.10.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54c2bb1a323307527314a36bfb73f24febb08ce2b8a554bf4ffd6f51ad15198c"
+checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60"
dependencies = [
"digest",
"keccak",
@@ -8358,7 +8425,7 @@ dependencies = [
"num-bigint",
"num-traits",
"thiserror",
- "time 0.3.20",
+ "time 0.3.21",
]
[[package]]
@@ -8455,9 +8522,9 @@ dependencies = [
[[package]]
name = "socket2"
-version = "0.5.2"
+version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d283f86695ae989d1e18440a943880967156325ba025f05049946bff47bcc2b"
+checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877"
dependencies = [
"libc",
"windows-sys 0.48.0",
@@ -8544,9 +8611,9 @@ dependencies = [
[[package]]
name = "sqlparser"
-version = "0.33.0"
+version = "0.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "355dc4d4b6207ca8a3434fc587db0a8016130a574dbcdbfb93d7f7b5bc5b211a"
+checksum = "37d3706eefb17039056234df6b566b0014f303f867f2656108334a55b8096f59"
dependencies = [
"log",
"sqlparser_derive",
@@ -8783,7 +8850,7 @@ dependencies = [
"query",
"session",
"snafu",
- "substrait 0.7.5",
+ "substrait 0.10.0",
"table",
"tokio",
]
@@ -8796,7 +8863,7 @@ checksum = "e3ae64fb7ad0670c7d6d53d57b1b91beb2212afc30e164cc8edb02d6b2cff32a"
dependencies = [
"gix",
"heck",
- "prettyplease 0.2.4",
+ "prettyplease 0.2.6",
"prost",
"prost-build",
"prost-types",
@@ -8805,16 +8872,38 @@ dependencies = [
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.15",
- "typify",
+ "syn 2.0.18",
+ "typify 0.0.11",
+ "walkdir",
+]
+
+[[package]]
+name = "substrait"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9df5d9e071804204172dc77e707c363f187e7f6566f9c78e5100c9a8f5ea434e"
+dependencies = [
+ "git2",
+ "heck",
+ "prettyplease 0.2.6",
+ "prost",
+ "prost-build",
+ "prost-types",
+ "schemars",
+ "semver 1.0.17",
+ "serde",
+ "serde_json",
+ "serde_yaml",
+ "syn 2.0.18",
+ "typify 0.0.12",
"walkdir",
]
[[package]]
name = "subtle"
-version = "2.4.1"
+version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
+checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "syn"
@@ -8829,9 +8918,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.15"
+version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822"
+checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e"
dependencies = [
"proc-macro2",
"quote",
@@ -8855,9 +8944,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
[[package]]
name = "system-configuration"
-version = "0.5.0"
+version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd"
+checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
dependencies = [
"bitflags 1.3.2",
"core-foundation",
@@ -8948,9 +9037,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "target-lexicon"
-version = "0.12.6"
+version = "0.12.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ae9980cab1db3fceee2f6c6f643d5d8de2997c58ee8d25fb0cc8a9e9e7348e5"
+checksum = "fd1ba337640d60c3e96bc6f0638a939b9c9a7f2c316a1598c279828b3d1dc8c5"
[[package]]
name = "temp-env"
@@ -8970,7 +9059,7 @@ dependencies = [
"cfg-if 1.0.0",
"fastrand",
"redox_syscall 0.3.5",
- "rustix 0.37.14",
+ "rustix 0.37.19",
"windows-sys 0.45.0",
]
@@ -9098,14 +9187,14 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
name = "thread-id"
-version = "4.0.0"
+version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5fdfe0627923f7411a43ec9ec9c39c3a9b4151be313e0922042581fb6c9b717f"
+checksum = "3ee93aa2b8331c0fec9091548843f2c90019571814057da3b783f9de09349d73"
dependencies = [
"libc",
"redox_syscall 0.2.16",
@@ -9188,20 +9277,19 @@ dependencies = [
[[package]]
name = "time"
-version = "0.1.45"
+version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a"
+checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438"
dependencies = [
"libc",
- "wasi 0.10.0+wasi-snapshot-preview1",
"winapi",
]
[[package]]
name = "time"
-version = "0.3.20"
+version = "0.3.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890"
+checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc"
dependencies = [
"itoa",
"libc",
@@ -9213,15 +9301,15 @@ dependencies = [
[[package]]
name = "time-core"
-version = "0.1.0"
+version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd"
+checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb"
[[package]]
name = "time-macros"
-version = "0.2.8"
+version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36"
+checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b"
dependencies = [
"time-core",
]
@@ -9268,9 +9356,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.28.1"
+version = "1.28.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105"
+checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2"
dependencies = [
"autocfg",
"bytes",
@@ -9304,7 +9392,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -9326,7 +9414,7 @@ dependencies = [
"pin-project-lite",
"postgres-protocol",
"postgres-types",
- "socket2 0.5.2",
+ "socket2 0.5.3",
"tokio",
"tokio-util",
]
@@ -9339,21 +9427,10 @@ checksum = "dd5831152cb0d3f79ef5523b357319ba154795d64c7078b2daa95a803b54057f"
dependencies = [
"futures",
"ring",
- "rustls 0.21.0",
+ "rustls",
"tokio",
"tokio-postgres",
- "tokio-rustls 0.24.0",
-]
-
-[[package]]
-name = "tokio-rustls"
-version = "0.23.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59"
-dependencies = [
- "rustls 0.20.8",
- "tokio",
- "webpki",
+ "tokio-rustls",
]
[[package]]
@@ -9362,15 +9439,15 @@ version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5"
dependencies = [
- "rustls 0.21.0",
+ "rustls",
"tokio",
]
[[package]]
name = "tokio-stream"
-version = "0.1.12"
+version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313"
+checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842"
dependencies = [
"futures-core",
"pin-project-lite",
@@ -9392,9 +9469,9 @@ dependencies = [
[[package]]
name = "tokio-util"
-version = "0.7.7"
+version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2"
+checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d"
dependencies = [
"bytes",
"futures-core",
@@ -9416,15 +9493,15 @@ dependencies = [
[[package]]
name = "toml_datetime"
-version = "0.6.1"
+version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622"
+checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f"
[[package]]
name = "toml_edit"
-version = "0.19.8"
+version = "0.19.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13"
+checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739"
dependencies = [
"indexmap",
"toml_datetime",
@@ -9472,7 +9549,7 @@ dependencies = [
"async-stream",
"async-trait",
"axum",
- "base64 0.21.0",
+ "base64 0.21.2",
"bytes",
"futures-core",
"futures-util",
@@ -9486,7 +9563,7 @@ dependencies = [
"prost",
"rustls-pemfile",
"tokio",
- "tokio-rustls 0.24.0",
+ "tokio-rustls",
"tokio-stream",
"tower",
"tower-layer",
@@ -9560,7 +9637,7 @@ version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858"
dependencies = [
- "async-compression",
+ "async-compression 0.3.15",
"base64 0.13.1",
"bitflags 1.3.2",
"bytes",
@@ -9616,19 +9693,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
dependencies = [
"crossbeam-channel",
- "time 0.3.20",
+ "time 0.3.21",
"tracing-subscriber",
]
[[package]]
name = "tracing-attributes"
-version = "0.1.23"
+version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a"
+checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74"
dependencies = [
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.18",
]
[[package]]
@@ -9642,7 +9719,7 @@ dependencies = [
"log",
"serde",
"serde_json",
- "time 0.3.20",
+ "time 0.3.21",
"tracing",
"tracing-core",
"tracing-log",
@@ -9651,9 +9728,9 @@ dependencies = [
[[package]]
name = "tracing-core"
-version = "0.1.30"
+version = "0.1.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a"
+checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a"
dependencies = [
"once_cell",
"valuable",
@@ -9773,7 +9850,7 @@ checksum = "2c3e1c30cedd24fc597f7d37a721efdbdc2b1acae012c1ef1218f4c7c2c0f3e7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -9782,8 +9859,18 @@ version = "0.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30bfde96849e25d7feef1bbf652e9cfc51deb63203fdc07b115b8bc3bcfe20b9"
dependencies = [
- "typify-impl",
- "typify-macro",
+ "typify-impl 0.0.11",
+ "typify-macro 0.0.11",
+]
+
+[[package]]
+name = "typify"
+version = "0.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a6658d09e71bfe59e7987dc95ee7f71809fdb5793ab0cdc1503cc0073990484d"
+dependencies = [
+ "typify-impl 0.0.12",
+ "typify-macro 0.0.12",
]
[[package]]
@@ -9796,7 +9883,7 @@ dependencies = [
"log",
"proc-macro2",
"quote",
- "regress",
+ "regress 0.5.0",
"schemars",
"serde_json",
"syn 1.0.109",
@@ -9804,6 +9891,24 @@ dependencies = [
"unicode-ident",
]
+[[package]]
+name = "typify-impl"
+version = "0.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34d3bb47587b13edf526d6ed02bf360ecefe083ab47a4ef29fc43112828b2bef"
+dependencies = [
+ "heck",
+ "log",
+ "proc-macro2",
+ "quote",
+ "regress 0.6.0",
+ "schemars",
+ "serde_json",
+ "syn 2.0.18",
+ "thiserror",
+ "unicode-ident",
+]
+
[[package]]
name = "typify-macro"
version = "0.0.11"
@@ -9815,9 +9920,25 @@ dependencies = [
"schemars",
"serde",
"serde_json",
- "serde_tokenstream",
+ "serde_tokenstream 0.1.7",
"syn 1.0.109",
- "typify-impl",
+ "typify-impl 0.0.11",
+]
+
+[[package]]
+name = "typify-macro"
+version = "0.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3f7e627c18be12d53bc1f261830b9c2763437b6a86ac57293b9085af2d32ffe"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "schemars",
+ "serde",
+ "serde_json",
+ "serde_tokenstream 0.2.0",
+ "syn 2.0.18",
+ "typify-impl 0.0.12",
]
[[package]]
@@ -9843,9 +9964,9 @@ dependencies = [
[[package]]
name = "uncased"
-version = "0.9.7"
+version = "0.9.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09b01702b0fd0b3fadcf98e098780badda8742d4f4a7676615cad90e8ac73622"
+checksum = "9b9bc53168a4be7402ab86c3aad243a84dd7381d09be0eddc81280c1da95ca68"
dependencies = [
"version_check",
]
@@ -9995,9 +10116,9 @@ checksum = "623f59e6af2a98bdafeb93fa277ac8e1e40440973001ca15cf4ae1541cd16d56"
[[package]]
name = "unicode-ident"
-version = "1.0.8"
+version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
+checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0"
[[package]]
name = "unicode-normalization"
@@ -10071,9 +10192,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "uuid"
-version = "1.3.1"
+version = "1.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b55a3fef2a1e3b3a00ce878640918820d3c51081576ac657d23af9fc7928fdb"
+checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2"
dependencies = [
"atomic",
"getrandom",
@@ -10084,13 +10205,13 @@ dependencies = [
[[package]]
name = "uuid-macro-internal"
-version = "1.3.1"
+version = "1.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "20e8a505384e9309dc842520c6c9348f4b141dee06aaa845522727b1b99ca235"
+checksum = "3f67b459f42af2e6e1ee213cb9da4dbd022d3320788c3fb3e1b893093f1e45da"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.15",
+ "syn 2.0.18",
]
[[package]]
@@ -10123,7 +10244,7 @@ dependencies = [
"getset",
"rustversion",
"thiserror",
- "time 0.3.20",
+ "time 0.3.21",
]
[[package]]
@@ -10177,9 +10298,9 @@ dependencies = [
[[package]]
name = "wasi"
-version = "0.10.0+wasi-snapshot-preview1"
+version = "0.10.2+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
[[package]]
name = "wasi"
@@ -10189,9 +10310,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b"
+checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73"
dependencies = [
"cfg-if 1.0.0",
"wasm-bindgen-macro",
@@ -10199,24 +10320,24 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9"
+checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.18",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-futures"
-version = "0.4.34"
+version = "0.4.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454"
+checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e"
dependencies = [
"cfg-if 1.0.0",
"js-sys",
@@ -10226,9 +10347,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5"
+checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -10236,22 +10357,22 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6"
+checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8"
dependencies = [
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.18",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.84"
+version = "0.2.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d"
+checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93"
[[package]]
name = "wasm-streams"
@@ -10268,9 +10389,9 @@ dependencies = [
[[package]]
name = "web-sys"
-version = "0.3.61"
+version = "0.3.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97"
+checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -10288,9 +10409,9 @@ dependencies = [
[[package]]
name = "webpki-roots"
-version = "0.23.0"
+version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa54963694b65584e170cf5dc46aeb4dcaa5584e652ff5f3952e56d66aff0125"
+checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338"
dependencies = [
"rustls-webpki",
]
@@ -10308,9 +10429,9 @@ dependencies = [
[[package]]
name = "wide"
-version = "0.7.8"
+version = "0.7.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b689b6c49d6549434bf944e6b0f39238cf63693cb7a147e9d887507fffa3b223"
+checksum = "5cd0496a71f3cc6bc4bf0ed91346426a5099e93d89807e663162dc5a1069ff65"
dependencies = [
"bytemuck",
"safe_arch",
@@ -10569,9 +10690,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
[[package]]
name = "winnow"
-version = "0.4.1"
+version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28"
+checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699"
dependencies = [
"memchr",
]
@@ -10614,9 +10735,9 @@ dependencies = [
[[package]]
name = "xml-rs"
-version = "0.8.4"
+version = "0.8.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3"
+checksum = "2d8f380ae16a37b30e6a2cf67040608071384b1450c189e61bea3ff57cde922d"
[[package]]
name = "xz2"
diff --git a/Cargo.toml b/Cargo.toml
index 021e64138037..ac1654d77fc6 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -54,31 +54,31 @@ edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
-arrow = { version = "37.0" }
-arrow-array = "37.0"
-arrow-flight = "37.0"
-arrow-schema = { version = "37.0", features = ["serde"] }
+arrow = { version = "40.0" }
+arrow-array = "40.0"
+arrow-flight = "40.0"
+arrow-schema = { version = "40.0", features = ["serde"] }
async-stream = "0.3"
async-trait = "0.1"
chrono = { version = "0.4", features = ["serde"] }
# TODO(ruihang): use arrow-datafusion when it contains https://github.com/apache/arrow-datafusion/pull/6032
-datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5337c86120de8193406b59be7612484796a46294" }
-datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5337c86120de8193406b59be7612484796a46294" }
-datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5337c86120de8193406b59be7612484796a46294" }
-datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5337c86120de8193406b59be7612484796a46294" }
-datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5337c86120de8193406b59be7612484796a46294" }
-datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5337c86120de8193406b59be7612484796a46294" }
-datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "5337c86120de8193406b59be7612484796a46294" }
+datafusion = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
+datafusion-common = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
+datafusion-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
+datafusion-optimizer = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
+datafusion-physical-expr = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
+datafusion-sql = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
+datafusion-substrait = { git = "https://github.com/waynexia/arrow-datafusion.git", rev = "63e52dde9e44cac4b1f6c6e6b6bf6368ba3bd323" }
futures = "0.3"
futures-util = "0.3"
-parquet = "37.0"
+parquet = "40.0"
paste = "1.0"
prost = "0.11"
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
snafu = { version = "0.7", features = ["backtraces"] }
-sqlparser = "0.33"
+sqlparser = "0.34"
tempfile = "3"
tokio = { version = "1.28", features = ["full"] }
tokio-util = { version = "0.7", features = ["io-util", "compat"] }
diff --git a/src/common/datasource/src/file_format.rs b/src/common/datasource/src/file_format.rs
index 59870a67ed75..4987510ecc0d 100644
--- a/src/common/datasource/src/file_format.rs
+++ b/src/common/datasource/src/file_format.rs
@@ -110,6 +110,7 @@ impl ArrowDecoder for arrow::csv::reader::Decoder {
}
}
+#[allow(deprecated)]
impl ArrowDecoder for arrow::json::RawDecoder {
fn decode(&mut self, buf: &[u8]) -> result::Result<usize, ArrowError> {
self.decode(buf)
diff --git a/src/common/datasource/src/file_format/csv.rs b/src/common/datasource/src/file_format/csv.rs
index 70f5262046b3..b723ce9ddc1b 100644
--- a/src/common/datasource/src/file_format/csv.rs
+++ b/src/common/datasource/src/file_format/csv.rs
@@ -17,6 +17,7 @@ use std::str::FromStr;
use std::sync::Arc;
use arrow::csv;
+#[allow(deprecated)]
use arrow::csv::reader::infer_reader_schema as infer_csv_schema;
use arrow::record_batch::RecordBatch;
use arrow_schema::{Schema, SchemaRef};
@@ -113,8 +114,7 @@ pub struct CsvConfig {
impl CsvConfig {
fn builder(&self) -> csv::ReaderBuilder {
- let mut builder = csv::ReaderBuilder::new()
- .with_schema(self.file_schema.clone())
+ let mut builder = csv::ReaderBuilder::new(self.file_schema.clone())
.with_delimiter(self.delimiter)
.with_batch_size(self.batch_size)
.has_header(self.has_header);
@@ -160,6 +160,7 @@ impl FileOpener for CsvOpener {
}
}
+#[allow(deprecated)]
#[async_trait]
impl FileFormat for CsvFormat {
async fn infer_schema(&self, store: &ObjectStore, path: &str) -> Result<Schema> {
diff --git a/src/common/datasource/src/file_format/json.rs b/src/common/datasource/src/file_format/json.rs
index 6fab2ba5b728..b9cf6e31a93a 100644
--- a/src/common/datasource/src/file_format/json.rs
+++ b/src/common/datasource/src/file_format/json.rs
@@ -20,6 +20,7 @@ use std::sync::Arc;
use arrow::datatypes::SchemaRef;
use arrow::json::reader::{infer_json_schema_from_iterator, ValueIter};
use arrow::json::writer::LineDelimited;
+#[allow(deprecated)]
use arrow::json::{self, RawReaderBuilder};
use arrow::record_batch::RecordBatch;
use arrow_schema::Schema;
@@ -129,6 +130,7 @@ impl JsonOpener {
}
}
+#[allow(deprecated)]
impl FileOpener for JsonOpener {
fn open(&self, meta: FileMeta) -> DataFusionResult<FileOpenFuture> {
open_with_decoder(
@@ -159,8 +161,7 @@ pub async fn stream_to_json(
impl DfRecordBatchEncoder for json::Writer<SharedBuffer, LineDelimited> {
fn write(&mut self, batch: &RecordBatch) -> Result<()> {
- self.write(batch.clone())
- .context(error::WriteRecordBatchSnafu)
+ self.write(batch).context(error::WriteRecordBatchSnafu)
}
}
diff --git a/src/common/substrait/Cargo.toml b/src/common/substrait/Cargo.toml
index 60a3b49378dc..d4ed2cf36b61 100644
--- a/src/common/substrait/Cargo.toml
+++ b/src/common/substrait/Cargo.toml
@@ -25,7 +25,7 @@ query = { path = "../../query" }
[dependencies.substrait_proto]
package = "substrait"
-version = "0.7"
+version = "0.10"
[dev-dependencies]
datatypes = { path = "../../datatypes" }
diff --git a/src/common/substrait/src/context.rs b/src/common/substrait/src/context.rs
deleted file mode 100644
index 9258d4088eb4..000000000000
--- a/src/common/substrait/src/context.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::collections::HashMap;
-
-use datafusion::common::DFSchemaRef;
-use substrait_proto::proto::extensions::simple_extension_declaration::{
- ExtensionFunction, MappingType,
-};
-use substrait_proto::proto::extensions::SimpleExtensionDeclaration;
-
-#[derive(Default)]
-pub struct ConvertorContext {
- scalar_fn_names: HashMap<String, u32>,
- scalar_fn_map: HashMap<u32, String>,
- df_schema: Option<DFSchemaRef>,
-}
-
-impl ConvertorContext {
- pub fn register_scalar_fn<S: AsRef<str>>(&mut self, name: S) -> u32 {
- if let Some(anchor) = self.scalar_fn_names.get(name.as_ref()) {
- return *anchor;
- }
-
- let next_anchor = self.scalar_fn_map.len() as _;
- self.scalar_fn_map
- .insert(next_anchor, name.as_ref().to_string());
- self.scalar_fn_names
- .insert(name.as_ref().to_string(), next_anchor);
- next_anchor
- }
-
- pub fn register_scalar_with_anchor<S: AsRef<str>>(&mut self, name: S, anchor: u32) {
- self.scalar_fn_map.insert(anchor, name.as_ref().to_string());
- self.scalar_fn_names
- .insert(name.as_ref().to_string(), anchor);
- }
-
- pub fn find_scalar_fn(&self, anchor: u32) -> Option<&str> {
- self.scalar_fn_map.get(&anchor).map(|s| s.as_str())
- }
-
- pub fn generate_function_extension(&self) -> Vec<SimpleExtensionDeclaration> {
- let mut result = Vec::with_capacity(self.scalar_fn_map.len());
- for (anchor, name) in &self.scalar_fn_map {
- let declaration = SimpleExtensionDeclaration {
- mapping_type: Some(MappingType::ExtensionFunction(ExtensionFunction {
- extension_uri_reference: 0,
- function_anchor: *anchor,
- name: name.clone(),
- })),
- };
- result.push(declaration);
- }
- result
- }
-
- pub(crate) fn set_df_schema(&mut self, schema: DFSchemaRef) {
- debug_assert!(self.df_schema.is_none());
- self.df_schema.get_or_insert(schema);
- }
-
- pub(crate) fn df_schema(&self) -> Option<&DFSchemaRef> {
- self.df_schema.as_ref()
- }
-}
diff --git a/src/common/substrait/src/df_expr.rs b/src/common/substrait/src/df_expr.rs
deleted file mode 100644
index 3b8beb1ca71f..000000000000
--- a/src/common/substrait/src/df_expr.rs
+++ /dev/null
@@ -1,799 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::collections::VecDeque;
-use std::str::FromStr;
-
-use datafusion::common::Column;
-use datafusion_expr::expr::Sort;
-use datafusion_expr::{expr_fn, lit, Between, BinaryExpr, BuiltinScalarFunction, Expr, Operator};
-use datatypes::schema::Schema;
-use snafu::{ensure, OptionExt};
-use substrait_proto::proto::expression::field_reference::ReferenceType as FieldReferenceType;
-use substrait_proto::proto::expression::reference_segment::{
- ReferenceType as SegReferenceType, StructField,
-};
-use substrait_proto::proto::expression::{
- FieldReference, Literal, ReferenceSegment, RexType, ScalarFunction,
-};
-use substrait_proto::proto::function_argument::ArgType;
-use substrait_proto::proto::Expression;
-
-use crate::context::ConvertorContext;
-use crate::error::{
- EmptyExprSnafu, InvalidParametersSnafu, MissingFieldSnafu, Result, UnsupportedExprSnafu,
-};
-use crate::types::{literal_type_to_scalar_value, scalar_value_as_literal_type};
-
-/// Convert substrait's `Expression` to DataFusion's `Expr`.
-pub(crate) fn to_df_expr(
- ctx: &ConvertorContext,
- expression: Expression,
- schema: &Schema,
-) -> Result<Expr> {
- let expr_rex_type = expression.rex_type.context(EmptyExprSnafu)?;
- match expr_rex_type {
- RexType::Literal(l) => {
- let t = l.literal_type.context(MissingFieldSnafu {
- field: "LiteralType",
- plan: "Literal",
- })?;
- let v = literal_type_to_scalar_value(t)?;
- Ok(lit(v))
- }
- RexType::Selection(selection) => convert_selection_rex(*selection, schema),
- RexType::ScalarFunction(scalar_fn) => convert_scalar_function(ctx, scalar_fn, schema),
- RexType::WindowFunction(_)
- | RexType::IfThen(_)
- | RexType::SwitchExpression(_)
- | RexType::SingularOrList(_)
- | RexType::MultiOrList(_)
- | RexType::Cast(_)
- | RexType::Subquery(_)
- | RexType::Nested(_)
- | RexType::Enum(_) => UnsupportedExprSnafu {
- name: format!("substrait expression {expr_rex_type:?}"),
- }
- .fail()?,
- }
-}
-
-/// Convert Substrait's `FieldReference` - `DirectReference` - `StructField` to Datafusion's
-/// `Column` expr.
-pub fn convert_selection_rex(selection: FieldReference, schema: &Schema) -> Result<Expr> {
- if let Some(FieldReferenceType::DirectReference(direct_ref)) = selection.reference_type
- && let Some(SegReferenceType::StructField(field)) = direct_ref.reference_type {
- let column_name = schema.column_name_by_index(field.field as _).to_string();
- Ok(Expr::Column(Column {
- relation: None,
- name: column_name,
- }))
- } else {
- InvalidParametersSnafu {
- reason: "Only support direct struct reference in Selection Rex",
- }
- .fail()
- }
-}
-
-pub fn convert_scalar_function(
- ctx: &ConvertorContext,
- scalar_fn: ScalarFunction,
- schema: &Schema,
-) -> Result<Expr> {
- // convert argument
- let mut inputs = VecDeque::with_capacity(scalar_fn.arguments.len());
- for arg in scalar_fn.arguments {
- if let Some(ArgType::Value(sub_expr)) = arg.arg_type {
- inputs.push_back(to_df_expr(ctx, sub_expr, schema)?);
- } else {
- InvalidParametersSnafu {
- reason: "Only value expression arg is supported to be function argument",
- }
- .fail()?;
- }
- }
-
- // convert this scalar function
- // map function name
- let anchor = scalar_fn.function_reference;
- let fn_name = ctx
- .find_scalar_fn(anchor)
- .with_context(|| InvalidParametersSnafu {
- reason: format!("Unregistered scalar function reference: {anchor}"),
- })?;
-
- // convenient util
- let ensure_arg_len = |expected: usize| -> Result<()> {
- ensure!(
- inputs.len() == expected,
- InvalidParametersSnafu {
- reason: format!(
- "Invalid number of scalar function {}, expected {} but found {}",
- fn_name,
- expected,
- inputs.len()
- )
- }
- );
- Ok(())
- };
-
- // construct DataFusion expr
- let expr = match fn_name {
- // begin binary exprs, with the same order of DF `Operator`'s definition.
- "eq" | "equal" => {
- ensure_arg_len(2)?;
- inputs.pop_front().unwrap().eq(inputs.pop_front().unwrap())
- }
- "not_eq" | "not_equal" => {
- ensure_arg_len(2)?;
- inputs
- .pop_front()
- .unwrap()
- .not_eq(inputs.pop_front().unwrap())
- }
- "lt" => {
- ensure_arg_len(2)?;
- inputs.pop_front().unwrap().lt(inputs.pop_front().unwrap())
- }
- "lt_eq" | "lte" => {
- ensure_arg_len(2)?;
- inputs
- .pop_front()
- .unwrap()
- .lt_eq(inputs.pop_front().unwrap())
- }
- "gt" => {
- ensure_arg_len(2)?;
- inputs.pop_front().unwrap().gt(inputs.pop_front().unwrap())
- }
- "gt_eq" | "gte" => {
- ensure_arg_len(2)?;
- inputs
- .pop_front()
- .unwrap()
- .gt_eq(inputs.pop_front().unwrap())
- }
- "plus" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::Plus,
- inputs.pop_front().unwrap(),
- )
- }
- "minus" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::Minus,
- inputs.pop_front().unwrap(),
- )
- }
- "multiply" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::Multiply,
- inputs.pop_front().unwrap(),
- )
- }
- "divide" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::Divide,
- inputs.pop_front().unwrap(),
- )
- }
- "modulo" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::Modulo,
- inputs.pop_front().unwrap(),
- )
- }
- "and" => {
- ensure_arg_len(2)?;
- expr_fn::and(inputs.pop_front().unwrap(), inputs.pop_front().unwrap())
- }
- "or" => {
- ensure_arg_len(2)?;
- expr_fn::or(inputs.pop_front().unwrap(), inputs.pop_front().unwrap())
- }
- "like" => {
- ensure_arg_len(2)?;
- inputs
- .pop_front()
- .unwrap()
- .like(inputs.pop_front().unwrap())
- }
- "not_like" => {
- ensure_arg_len(2)?;
- inputs
- .pop_front()
- .unwrap()
- .not_like(inputs.pop_front().unwrap())
- }
- "is_distinct_from" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::IsDistinctFrom,
- inputs.pop_front().unwrap(),
- )
- }
- "is_not_distinct_from" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::IsNotDistinctFrom,
- inputs.pop_front().unwrap(),
- )
- }
- "regex_match" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::RegexMatch,
- inputs.pop_front().unwrap(),
- )
- }
- "regex_i_match" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::RegexIMatch,
- inputs.pop_front().unwrap(),
- )
- }
- "regex_not_match" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::RegexNotMatch,
- inputs.pop_front().unwrap(),
- )
- }
- "regex_not_i_match" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::RegexNotIMatch,
- inputs.pop_front().unwrap(),
- )
- }
- "bitwise_and" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::BitwiseAnd,
- inputs.pop_front().unwrap(),
- )
- }
- "bitwise_or" => {
- ensure_arg_len(2)?;
- expr_fn::binary_expr(
- inputs.pop_front().unwrap(),
- Operator::BitwiseOr,
- inputs.pop_front().unwrap(),
- )
- }
- // end binary exprs
- // start other direct expr, with the same order of DF `Expr`'s definition.
- "not" => {
- ensure_arg_len(1)?;
- inputs.pop_front().unwrap().not()
- }
- "is_not_null" => {
- ensure_arg_len(1)?;
- inputs.pop_front().unwrap().is_not_null()
- }
- "is_null" => {
- ensure_arg_len(1)?;
- inputs.pop_front().unwrap().is_null()
- }
- "negative" => {
- ensure_arg_len(1)?;
- Expr::Negative(Box::new(inputs.pop_front().unwrap()))
- }
- // skip GetIndexedField, unimplemented.
- "between" => {
- ensure_arg_len(3)?;
- Expr::Between(Between {
- expr: Box::new(inputs.pop_front().unwrap()),
- negated: false,
- low: Box::new(inputs.pop_front().unwrap()),
- high: Box::new(inputs.pop_front().unwrap()),
- })
- }
- "not_between" => {
- ensure_arg_len(3)?;
- Expr::Between(Between {
- expr: Box::new(inputs.pop_front().unwrap()),
- negated: true,
- low: Box::new(inputs.pop_front().unwrap()),
- high: Box::new(inputs.pop_front().unwrap()),
- })
- }
- // skip Case, is covered in substrait::SwitchExpression.
- // skip Cast and TryCast, is covered in substrait::Cast.
- "sort" | "sort_des" => {
- ensure_arg_len(1)?;
- Expr::Sort(Sort {
- expr: Box::new(inputs.pop_front().unwrap()),
- asc: false,
- nulls_first: false,
- })
- }
- "sort_asc" => {
- ensure_arg_len(1)?;
- Expr::Sort(Sort {
- expr: Box::new(inputs.pop_front().unwrap()),
- asc: true,
- nulls_first: false,
- })
- }
- // those are datafusion built-in "scalar functions".
- "abs"
- | "acos"
- | "asin"
- | "atan"
- | "atan2"
- | "ceil"
- | "cos"
- | "exp"
- | "floor"
- | "ln"
- | "log"
- | "log10"
- | "log2"
- | "power"
- | "pow"
- | "round"
- | "signum"
- | "sin"
- | "sqrt"
- | "tan"
- | "trunc"
- | "coalesce"
- | "make_array"
- | "ascii"
- | "bit_length"
- | "btrim"
- | "char_length"
- | "character_length"
- | "concat"
- | "concat_ws"
- | "chr"
- | "current_date"
- | "current_time"
- | "date_part"
- | "datepart"
- | "date_trunc"
- | "datetrunc"
- | "date_bin"
- | "initcap"
- | "left"
- | "length"
- | "lower"
- | "lpad"
- | "ltrim"
- | "md5"
- | "nullif"
- | "octet_length"
- | "random"
- | "regexp_replace"
- | "repeat"
- | "replace"
- | "reverse"
- | "right"
- | "rpad"
- | "rtrim"
- | "sha224"
- | "sha256"
- | "sha384"
- | "sha512"
- | "digest"
- | "split_part"
- | "starts_with"
- | "strpos"
- | "substr"
- | "to_hex"
- | "to_timestamp"
- | "to_timestamp_millis"
- | "to_timestamp_micros"
- | "to_timestamp_seconds"
- | "now"
- | "translate"
- | "trim"
- | "upper"
- | "uuid"
- | "regexp_match"
- | "struct"
- | "from_unixtime"
- | "arrow_typeof" => Expr::ScalarFunction {
- fun: BuiltinScalarFunction::from_str(fn_name).unwrap(),
- args: inputs.into(),
- },
- // skip ScalarUDF, unimplemented.
- // skip AggregateFunction, is covered in substrait::AggregateRel
- // skip WindowFunction, is covered in substrait WindowFunction
- // skip AggregateUDF, unimplemented.
- // skip InList, unimplemented
- // skip Wildcard, unimplemented.
- // end other direct expr
- _ => UnsupportedExprSnafu {
- name: format!("scalar function {fn_name}"),
- }
- .fail()?,
- };
-
- Ok(expr)
-}
-
-/// Convert DataFusion's `Expr` to substrait's `Expression`
-pub fn expression_from_df_expr(
- ctx: &mut ConvertorContext,
- expr: &Expr,
- schema: &Schema,
-) -> Result<Expression> {
- let expression = match expr {
- // Don't merge them with other unsupported expr arms to preserve the ordering.
- Expr::Alias(..) => UnsupportedExprSnafu {
- name: expr.to_string(),
- }
- .fail()?,
- Expr::Column(column) => {
- let field_reference = convert_column(column, schema)?;
- Expression {
- rex_type: Some(RexType::Selection(Box::new(field_reference))),
- }
- }
- // Don't merge them with other unsupported expr arms to preserve the ordering.
- Expr::ScalarVariable(..) => UnsupportedExprSnafu {
- name: expr.to_string(),
- }
- .fail()?,
- Expr::Literal(v) => {
- let t = scalar_value_as_literal_type(v)?;
- let l = Literal {
- nullable: true,
- type_variation_reference: 0,
- literal_type: Some(t),
- };
- Expression {
- rex_type: Some(RexType::Literal(l)),
- }
- }
- Expr::BinaryExpr(BinaryExpr { left, op, right }) => {
- let left = expression_from_df_expr(ctx, left, schema)?;
- let right = expression_from_df_expr(ctx, right, schema)?;
- let arguments = utils::expression_to_argument(vec![left, right]);
- let op_name = utils::name_df_operator(op);
- let function_reference = ctx.register_scalar_fn(op_name);
- utils::build_scalar_function_expression(function_reference, arguments)
- }
- Expr::Not(e) => {
- let arg = expression_from_df_expr(ctx, e, schema)?;
- let arguments = utils::expression_to_argument(vec![arg]);
- let op_name = "not";
- let function_reference = ctx.register_scalar_fn(op_name);
- utils::build_scalar_function_expression(function_reference, arguments)
- }
- Expr::IsNotNull(e) => {
- let arg = expression_from_df_expr(ctx, e, schema)?;
- let arguments = utils::expression_to_argument(vec![arg]);
- let op_name = "is_not_null";
- let function_reference = ctx.register_scalar_fn(op_name);
- utils::build_scalar_function_expression(function_reference, arguments)
- }
- Expr::IsNull(e) => {
- let arg = expression_from_df_expr(ctx, e, schema)?;
- let arguments = utils::expression_to_argument(vec![arg]);
- let op_name = "is_null";
- let function_reference = ctx.register_scalar_fn(op_name);
- utils::build_scalar_function_expression(function_reference, arguments)
- }
- Expr::Negative(e) => {
- let arg = expression_from_df_expr(ctx, e, schema)?;
- let arguments = utils::expression_to_argument(vec![arg]);
- let op_name = "negative";
- let function_reference = ctx.register_scalar_fn(op_name);
- utils::build_scalar_function_expression(function_reference, arguments)
- }
- // Don't merge them with other unsupported expr arms to preserve the ordering.
- Expr::GetIndexedField { .. } => UnsupportedExprSnafu {
- name: expr.to_string(),
- }
- .fail()?,
- Expr::Between(Between {
- expr,
- negated,
- low,
- high,
- }) => {
- let expr = expression_from_df_expr(ctx, expr, schema)?;
- let low = expression_from_df_expr(ctx, low, schema)?;
- let high = expression_from_df_expr(ctx, high, schema)?;
- let arguments = utils::expression_to_argument(vec![expr, low, high]);
- let op_name = if *negated { "not_between" } else { "between" };
- let function_reference = ctx.register_scalar_fn(op_name);
- utils::build_scalar_function_expression(function_reference, arguments)
- }
- // Don't merge them with other unsupported expr arms to preserve the ordering.
- Expr::Case { .. } | Expr::Cast { .. } | Expr::TryCast { .. } => UnsupportedExprSnafu {
- name: expr.to_string(),
- }
- .fail()?,
- Expr::Sort(Sort {
- expr,
- asc,
- nulls_first: _,
- }) => {
- let expr = expression_from_df_expr(ctx, expr, schema)?;
- let arguments = utils::expression_to_argument(vec![expr]);
- let op_name = if *asc { "sort_asc" } else { "sort_des" };
- let function_reference = ctx.register_scalar_fn(op_name);
- utils::build_scalar_function_expression(function_reference, arguments)
- }
- Expr::ScalarFunction { fun, args } => {
- let arguments = utils::expression_to_argument(
- args.iter()
- .map(|e| expression_from_df_expr(ctx, e, schema))
- .collect::<Result<Vec<_>>>()?,
- );
- let op_name = utils::name_builtin_scalar_function(fun);
- let function_reference = ctx.register_scalar_fn(op_name);
- utils::build_scalar_function_expression(function_reference, arguments)
- }
- // Don't merge them with other unsupported expr arms to preserve the ordering.
- Expr::ScalarUDF { .. }
- | Expr::AggregateFunction { .. }
- | Expr::WindowFunction { .. }
- | Expr::AggregateUDF { .. }
- | Expr::InList { .. }
- | Expr::Wildcard
- | Expr::Like(_)
- | Expr::ILike(_)
- | Expr::SimilarTo(_)
- | Expr::IsTrue(_)
- | Expr::IsFalse(_)
- | Expr::IsUnknown(_)
- | Expr::IsNotTrue(_)
- | Expr::IsNotFalse(_)
- | Expr::IsNotUnknown(_)
- | Expr::Exists { .. }
- | Expr::InSubquery { .. }
- | Expr::ScalarSubquery(..)
- | Expr::Placeholder { .. }
- | Expr::QualifiedWildcard { .. } => todo!(),
- Expr::GroupingSet(_) | Expr::OuterReferenceColumn(_, _) => UnsupportedExprSnafu {
- name: expr.to_string(),
- }
- .fail()?,
- };
-
- Ok(expression)
-}
-
-/// Convert DataFusion's `Column` expr into substrait's `FieldReference` -
-/// `DirectReference` - `StructField`.
-pub fn convert_column(column: &Column, schema: &Schema) -> Result<FieldReference> {
- let column_name = &column.name;
- let field_index =
- schema
- .column_index_by_name(column_name)
- .with_context(|| MissingFieldSnafu {
- field: format!("{column:?}"),
- plan: format!("schema: {schema:?}"),
- })?;
-
- Ok(FieldReference {
- reference_type: Some(FieldReferenceType::DirectReference(ReferenceSegment {
- reference_type: Some(SegReferenceType::StructField(Box::new(StructField {
- field: field_index as _,
- child: None,
- }))),
- })),
- root_type: None,
- })
-}
-
-/// Some utils special for this `DataFusion::Expr` and `Substrait::Expression` conversion.
-mod utils {
- use datafusion_expr::{BuiltinScalarFunction, Operator};
- use substrait_proto::proto::expression::{RexType, ScalarFunction};
- use substrait_proto::proto::function_argument::ArgType;
- use substrait_proto::proto::{Expression, FunctionArgument};
-
- pub(crate) fn name_df_operator(op: &Operator) -> &str {
- match op {
- Operator::Eq => "equal",
- Operator::NotEq => "not_equal",
- Operator::Lt => "lt",
- Operator::LtEq => "lte",
- Operator::Gt => "gt",
- Operator::GtEq => "gte",
- Operator::Plus => "plus",
- Operator::Minus => "minus",
- Operator::Multiply => "multiply",
- Operator::Divide => "divide",
- Operator::Modulo => "modulo",
- Operator::And => "and",
- Operator::Or => "or",
- Operator::IsDistinctFrom => "is_distinct_from",
- Operator::IsNotDistinctFrom => "is_not_distinct_from",
- Operator::RegexMatch => "regex_match",
- Operator::RegexIMatch => "regex_i_match",
- Operator::RegexNotMatch => "regex_not_match",
- Operator::RegexNotIMatch => "regex_not_i_match",
- Operator::BitwiseAnd => "bitwise_and",
- Operator::BitwiseOr => "bitwise_or",
- Operator::BitwiseXor => "bitwise_xor",
- Operator::BitwiseShiftRight => "bitwise_shift_right",
- Operator::BitwiseShiftLeft => "bitwise_shift_left",
- Operator::StringConcat => "string_concat",
- }
- }
-
- /// Convert list of [Expression] to [FunctionArgument] vector.
- pub(crate) fn expression_to_argument<I: IntoIterator<Item = Expression>>(
- expressions: I,
- ) -> Vec<FunctionArgument> {
- expressions
- .into_iter()
- .map(|expr| FunctionArgument {
- arg_type: Some(ArgType::Value(expr)),
- })
- .collect()
- }
-
- /// Convenient builder for [Expression]
- pub(crate) fn build_scalar_function_expression(
- function_reference: u32,
- arguments: Vec<FunctionArgument>,
- ) -> Expression {
- Expression {
- rex_type: Some(RexType::ScalarFunction(ScalarFunction {
- function_reference,
- arguments,
- output_type: None,
- ..Default::default()
- })),
- }
- }
-
- pub(crate) fn name_builtin_scalar_function(fun: &BuiltinScalarFunction) -> &str {
- match fun {
- BuiltinScalarFunction::Abs => "abs",
- BuiltinScalarFunction::Acos => "acos",
- BuiltinScalarFunction::Asin => "asin",
- BuiltinScalarFunction::Atan => "atan",
- BuiltinScalarFunction::Ceil => "ceil",
- BuiltinScalarFunction::Cos => "cos",
- BuiltinScalarFunction::Digest => "digest",
- BuiltinScalarFunction::Exp => "exp",
- BuiltinScalarFunction::Floor => "floor",
- BuiltinScalarFunction::Ln => "ln",
- BuiltinScalarFunction::Log => "log",
- BuiltinScalarFunction::Log10 => "log10",
- BuiltinScalarFunction::Log2 => "log2",
- BuiltinScalarFunction::Round => "round",
- BuiltinScalarFunction::Signum => "signum",
- BuiltinScalarFunction::Sin => "sin",
- BuiltinScalarFunction::Sqrt => "sqrt",
- BuiltinScalarFunction::Tan => "tan",
- BuiltinScalarFunction::Trunc => "trunc",
- BuiltinScalarFunction::Ascii => "ascii",
- BuiltinScalarFunction::BitLength => "bit_length",
- BuiltinScalarFunction::Btrim => "btrim",
- BuiltinScalarFunction::CharacterLength => "character_length",
- BuiltinScalarFunction::Chr => "chr",
- BuiltinScalarFunction::Concat => "concat",
- BuiltinScalarFunction::ConcatWithSeparator => "concat_ws",
- BuiltinScalarFunction::DatePart => "date_part",
- BuiltinScalarFunction::DateTrunc => "date_trunc",
- BuiltinScalarFunction::InitCap => "initcap",
- BuiltinScalarFunction::Left => "left",
- BuiltinScalarFunction::Lpad => "lpad",
- BuiltinScalarFunction::Lower => "lower",
- BuiltinScalarFunction::Ltrim => "ltrim",
- BuiltinScalarFunction::MD5 => "md5",
- BuiltinScalarFunction::NullIf => "nullif",
- BuiltinScalarFunction::OctetLength => "octet_length",
- BuiltinScalarFunction::Random => "random",
- BuiltinScalarFunction::RegexpReplace => "regexp_replace",
- BuiltinScalarFunction::Repeat => "repeat",
- BuiltinScalarFunction::Replace => "replace",
- BuiltinScalarFunction::Reverse => "reverse",
- BuiltinScalarFunction::Right => "right",
- BuiltinScalarFunction::Rpad => "rpad",
- BuiltinScalarFunction::Rtrim => "rtrim",
- BuiltinScalarFunction::SHA224 => "sha224",
- BuiltinScalarFunction::SHA256 => "sha256",
- BuiltinScalarFunction::SHA384 => "sha384",
- BuiltinScalarFunction::SHA512 => "sha512",
- BuiltinScalarFunction::SplitPart => "split_part",
- BuiltinScalarFunction::StartsWith => "starts_with",
- BuiltinScalarFunction::Strpos => "strpos",
- BuiltinScalarFunction::Substr => "substr",
- BuiltinScalarFunction::ToHex => "to_hex",
- BuiltinScalarFunction::ToTimestamp => "to_timestamp",
- BuiltinScalarFunction::ToTimestampMillis => "to_timestamp_millis",
- BuiltinScalarFunction::ToTimestampMicros => "to_timestamp_macros",
- BuiltinScalarFunction::ToTimestampSeconds => "to_timestamp_seconds",
- BuiltinScalarFunction::Now => "now",
- BuiltinScalarFunction::Translate => "translate",
- BuiltinScalarFunction::Trim => "trim",
- BuiltinScalarFunction::Upper => "upper",
- BuiltinScalarFunction::RegexpMatch => "regexp_match",
- BuiltinScalarFunction::Atan2 => "atan2",
- BuiltinScalarFunction::Coalesce => "coalesce",
- BuiltinScalarFunction::Power => "power",
- BuiltinScalarFunction::MakeArray => "make_array",
- BuiltinScalarFunction::DateBin => "date_bin",
- BuiltinScalarFunction::FromUnixtime => "from_unixtime",
- BuiltinScalarFunction::CurrentDate => "current_date",
- BuiltinScalarFunction::CurrentTime => "current_time",
- BuiltinScalarFunction::Uuid => "uuid",
- BuiltinScalarFunction::Struct => "struct",
- BuiltinScalarFunction::ArrowTypeof => "arrow_type_of",
- BuiltinScalarFunction::Acosh => "acosh",
- BuiltinScalarFunction::Asinh => "asinh",
- BuiltinScalarFunction::Atanh => "atanh",
- BuiltinScalarFunction::Cbrt => "cbrt",
- BuiltinScalarFunction::Cosh => "cosh",
- BuiltinScalarFunction::Pi => "pi",
- BuiltinScalarFunction::Sinh => "sinh",
- BuiltinScalarFunction::Tanh => "tanh",
- }
- }
-}
-
-#[cfg(test)]
-mod test {
- use datatypes::schema::ColumnSchema;
-
- use super::*;
-
- #[test]
- fn expr_round_trip() {
- let expr = expr_fn::and(
- expr_fn::col("column_a").lt_eq(expr_fn::col("column_b")),
- expr_fn::col("column_a").gt(expr_fn::col("column_b")),
- );
-
- let schema = Schema::new(vec![
- ColumnSchema::new(
- "column_a",
- datatypes::data_type::ConcreteDataType::int64_datatype(),
- true,
- ),
- ColumnSchema::new(
- "column_b",
- datatypes::data_type::ConcreteDataType::float64_datatype(),
- true,
- ),
- ]);
-
- let mut ctx = ConvertorContext::default();
- let substrait_expr = expression_from_df_expr(&mut ctx, &expr, &schema).unwrap();
- let converted_expr = to_df_expr(&ctx, substrait_expr, &schema).unwrap();
-
- assert_eq!(expr, converted_expr);
- }
-}
diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs
deleted file mode 100644
index bb8edf45edee..000000000000
--- a/src/common/substrait/src/df_logical.rs
+++ /dev/null
@@ -1,534 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::sync::Arc;
-
-use async_recursion::async_recursion;
-use async_trait::async_trait;
-use bytes::{Buf, Bytes};
-use catalog::table_source::DfTableSourceProvider;
-use catalog::CatalogManagerRef;
-use common_catalog::format_full_table_name;
-use common_telemetry::debug;
-use datafusion::arrow::datatypes::SchemaRef as ArrowSchemaRef;
-use datafusion::catalog::catalog::CatalogList;
-use datafusion::common::{DFField, DFSchema};
-use datafusion::datasource::DefaultTableSource;
-use datafusion::physical_plan::project_schema;
-use datafusion::sql::TableReference;
-use datafusion_expr::{Filter, LogicalPlan, TableScan};
-use session::context::QueryContext;
-use snafu::{ensure, OptionExt, ResultExt};
-use substrait_proto::proto::expression::mask_expression::{StructItem, StructSelect};
-use substrait_proto::proto::expression::MaskExpression;
-use substrait_proto::proto::extensions::simple_extension_declaration::MappingType;
-use substrait_proto::proto::plan_rel::RelType as PlanRelType;
-use substrait_proto::proto::read_rel::{NamedTable, ReadType};
-use substrait_proto::proto::rel::RelType;
-use substrait_proto::proto::{FilterRel, Plan, PlanRel, ReadRel, Rel};
-use table::table::adapter::DfTableProviderAdapter;
-
-use crate::context::ConvertorContext;
-use crate::df_expr::{expression_from_df_expr, to_df_expr};
-use crate::error::{
- self, DFInternalSnafu, EmptyPlanSnafu, Error, InvalidParametersSnafu, MissingFieldSnafu,
- ResolveTableSnafu, SchemaNotMatchSnafu, UnknownPlanSnafu, UnsupportedExprSnafu,
- UnsupportedPlanSnafu,
-};
-use crate::schema::{from_schema, to_schema};
-use crate::SubstraitPlan;
-
-pub struct DFLogicalSubstraitConvertorDeprecated;
-
-#[async_trait]
-impl SubstraitPlan for DFLogicalSubstraitConvertorDeprecated {
- type Error = Error;
-
- type Plan = LogicalPlan;
-
- async fn decode<B: Buf + Send>(
- &self,
- _message: B,
- _catalog_list: Arc<dyn CatalogList>,
- ) -> Result<Self::Plan, Self::Error> {
- unimplemented!()
- }
-
- fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error> {
- unimplemented!()
- }
-}
-
-impl DFLogicalSubstraitConvertorDeprecated {
- async fn convert_plan(
- &self,
- mut plan: Plan,
- catalog_manager: CatalogManagerRef,
- ) -> Result<LogicalPlan, Error> {
- // prepare convertor context
- let mut ctx = ConvertorContext::default();
- for simple_ext in plan.extensions {
- if let Some(MappingType::ExtensionFunction(function_extension)) =
- simple_ext.mapping_type
- {
- ctx.register_scalar_with_anchor(
- function_extension.name,
- function_extension.function_anchor,
- );
- } else {
- debug!("Encounter unsupported substrait extension {:?}", simple_ext);
- }
- }
-
- // extract rel
- let rel = if let Some(PlanRel { rel_type }) = plan.relations.pop()
- && let Some(PlanRelType::Rel(rel)) = rel_type {
- rel
- } else {
- UnsupportedPlanSnafu {
- name: "Emply or non-Rel relation",
- }
- .fail()?
- };
-
- // TODO(LFC): Create table provider from outside, respect "disallow_cross_schema_query" option in query engine state.
- let mut table_provider =
- DfTableSourceProvider::new(catalog_manager, false, &QueryContext::new());
- self.rel_to_logical_plan(&mut ctx, Box::new(rel), &mut table_provider)
- .await
- }
-
- #[async_recursion]
- async fn rel_to_logical_plan(
- &self,
- ctx: &mut ConvertorContext,
- rel: Box<Rel>,
- table_provider: &mut DfTableSourceProvider,
- ) -> Result<LogicalPlan, Error> {
- let rel_type = rel.rel_type.context(EmptyPlanSnafu)?;
-
- // build logical plan
- let logical_plan = match rel_type {
- RelType::Read(read_rel) => self.convert_read_rel(ctx, read_rel, table_provider).await?,
- RelType::Filter(filter) => {
- let FilterRel {
- common: _,
- input,
- condition,
- advanced_extension: _,
- } = *filter;
-
- let input = input.context(MissingFieldSnafu {
- field: "input",
- plan: "Filter",
- })?;
- let input = Arc::new(self.rel_to_logical_plan(ctx, input, table_provider).await?);
-
- let condition = condition.context(MissingFieldSnafu {
- field: "condition",
- plan: "Filter",
- })?;
-
- let schema = ctx.df_schema().context(InvalidParametersSnafu {
- reason: "the underlying TableScan plan should have included a table schema",
- })?;
- let schema = schema
- .clone()
- .try_into()
- .context(error::ConvertDfSchemaSnafu)?;
- let predicate = to_df_expr(ctx, *condition, &schema)?;
-
- LogicalPlan::Filter(Filter::try_new(predicate, input).context(DFInternalSnafu)?)
- }
- RelType::Fetch(_fetch_rel) => UnsupportedPlanSnafu {
- name: "Fetch Relation",
- }
- .fail()?,
- RelType::Aggregate(_aggr_rel) => UnsupportedPlanSnafu {
- name: "Fetch Relation",
- }
- .fail()?,
- RelType::Sort(_sort_rel) => UnsupportedPlanSnafu {
- name: "Sort Relation",
- }
- .fail()?,
- RelType::Join(_join_rel) => UnsupportedPlanSnafu {
- name: "Join Relation",
- }
- .fail()?,
- RelType::Project(_project_rel) => UnsupportedPlanSnafu {
- name: "Project Relation",
- }
- .fail()?,
- RelType::Set(_set_rel) => UnsupportedPlanSnafu {
- name: "Set Relation",
- }
- .fail()?,
- RelType::ExtensionSingle(_ext_single_rel) => UnsupportedPlanSnafu {
- name: "Extension Single Relation",
- }
- .fail()?,
- RelType::ExtensionMulti(_ext_multi_rel) => UnsupportedPlanSnafu {
- name: "Extension Multi Relation",
- }
- .fail()?,
- RelType::ExtensionLeaf(_ext_leaf_rel) => UnsupportedPlanSnafu {
- name: "Extension Leaf Relation",
- }
- .fail()?,
- RelType::Cross(_cross_rel) => UnsupportedPlanSnafu {
- name: "Cross Relation",
- }
- .fail()?,
- RelType::HashJoin(_) => UnsupportedPlanSnafu {
- name: "Cross Relation",
- }
- .fail()?,
- RelType::MergeJoin(_) => UnsupportedPlanSnafu {
- name: "Cross Relation",
- }
- .fail()?,
- };
-
- Ok(logical_plan)
- }
-
- async fn convert_read_rel(
- &self,
- ctx: &mut ConvertorContext,
- read_rel: Box<ReadRel>,
- table_provider: &mut DfTableSourceProvider,
- ) -> Result<LogicalPlan, Error> {
- // Extract the catalog, schema and table name from NamedTable. Assume the first three are those names.
- let read_type = read_rel.read_type.context(MissingFieldSnafu {
- field: "read_type",
- plan: "Read",
- })?;
- let (table_name, schema_name, catalog_name) = match read_type {
- ReadType::NamedTable(mut named_table) => {
- ensure!(
- named_table.names.len() == 3,
- InvalidParametersSnafu {
- reason:
- "NamedTable should contains three names for catalog, schema and table",
- }
- );
- (
- named_table.names.pop().unwrap(),
- named_table.names.pop().unwrap(),
- named_table.names.pop().unwrap(),
- )
- }
- ReadType::VirtualTable(_) | ReadType::LocalFiles(_) | ReadType::ExtensionTable(_) => {
- UnsupportedExprSnafu {
- name: "Non-NamedTable Read",
- }
- .fail()?
- }
- };
-
- // Get projection indices
- let projection = read_rel
- .projection
- .map(|mask_expr| self.convert_mask_expression(mask_expr));
-
- let table_ref = TableReference::full(
- catalog_name.clone(),
- schema_name.clone(),
- table_name.clone(),
- );
- let adapter = table_provider
- .resolve_table(table_ref.clone())
- .await
- .with_context(|_| ResolveTableSnafu {
- table_name: format_full_table_name(&catalog_name, &schema_name, &table_name),
- })?;
-
- // Get schema directly from the table, and compare it with the schema retrieved from substrait proto.
- let stored_schema = adapter.schema();
- let retrieved_schema = to_schema(read_rel.base_schema.unwrap_or_default())?;
- let retrieved_arrow_schema = retrieved_schema.arrow_schema();
- ensure!(
- same_schema_without_metadata(&stored_schema, retrieved_arrow_schema),
- SchemaNotMatchSnafu {
- substrait_schema: retrieved_arrow_schema.clone(),
- storage_schema: stored_schema
- }
- );
-
- // Convert filter
- let filters = if let Some(filter) = read_rel.filter {
- vec![to_df_expr(ctx, *filter, &retrieved_schema)?]
- } else {
- vec![]
- };
-
- // Calculate the projected schema
- let projected_schema = Arc::new(
- project_schema(&stored_schema, projection.as_ref())
- .and_then(|x| {
- DFSchema::new_with_metadata(
- x.fields()
- .iter()
- .map(|f| DFField::from_qualified(table_ref.clone(), f.clone()))
- .collect(),
- x.metadata().clone(),
- )
- })
- .context(DFInternalSnafu)?,
- );
-
- ctx.set_df_schema(projected_schema.clone());
-
- // TODO(ruihang): Support limit(fetch)
- Ok(LogicalPlan::TableScan(TableScan {
- table_name: table_ref,
- source: adapter,
- projection,
- projected_schema,
- filters,
- fetch: None,
- }))
- }
-
- fn convert_mask_expression(&self, mask_expression: MaskExpression) -> Vec<usize> {
- mask_expression
- .select
- .unwrap_or_default()
- .struct_items
- .into_iter()
- .map(|select| select.field as _)
- .collect()
- }
-}
-
-impl DFLogicalSubstraitConvertorDeprecated {
- fn logical_plan_to_rel(
- &self,
- ctx: &mut ConvertorContext,
- plan: Arc<LogicalPlan>,
- ) -> Result<Rel, Error> {
- Ok(match &*plan {
- LogicalPlan::Projection(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Projection",
- }
- .fail()?,
- LogicalPlan::Filter(filter) => {
- let input = Some(Box::new(
- self.logical_plan_to_rel(ctx, filter.input.clone())?,
- ));
-
- let schema = plan
- .schema()
- .clone()
- .try_into()
- .context(error::ConvertDfSchemaSnafu)?;
- let condition = Some(Box::new(expression_from_df_expr(
- ctx,
- &filter.predicate,
- &schema,
- )?));
-
- let rel = FilterRel {
- common: None,
- input,
- condition,
- advanced_extension: None,
- };
- Rel {
- rel_type: Some(RelType::Filter(Box::new(rel))),
- }
- }
- LogicalPlan::Window(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Window",
- }
- .fail()?,
- LogicalPlan::Aggregate(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Aggregate",
- }
- .fail()?,
- LogicalPlan::Sort(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Sort",
- }
- .fail()?,
- LogicalPlan::Join(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Join",
- }
- .fail()?,
- LogicalPlan::CrossJoin(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical CrossJoin",
- }
- .fail()?,
- LogicalPlan::Repartition(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Repartition",
- }
- .fail()?,
- LogicalPlan::Union(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Union",
- }
- .fail()?,
- LogicalPlan::TableScan(table_scan) => {
- let read_rel = self.convert_table_scan_plan(ctx, table_scan)?;
- Rel {
- rel_type: Some(RelType::Read(Box::new(read_rel))),
- }
- }
- LogicalPlan::EmptyRelation(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical EmptyRelation",
- }
- .fail()?,
- LogicalPlan::Limit(_) => UnsupportedPlanSnafu {
- name: "DataFusion Logical Limit",
- }
- .fail()?,
-
- LogicalPlan::Subquery(_)
- | LogicalPlan::SubqueryAlias(_)
- | LogicalPlan::CreateView(_)
- | LogicalPlan::CreateCatalogSchema(_)
- | LogicalPlan::CreateCatalog(_)
- | LogicalPlan::DropView(_)
- | LogicalPlan::Distinct(_)
- | LogicalPlan::CreateExternalTable(_)
- | LogicalPlan::CreateMemoryTable(_)
- | LogicalPlan::DropTable(_)
- | LogicalPlan::Values(_)
- | LogicalPlan::Explain(_)
- | LogicalPlan::Analyze(_)
- | LogicalPlan::Extension(_)
- | LogicalPlan::Prepare(_)
- | LogicalPlan::Dml(_)
- | LogicalPlan::DescribeTable(_)
- | LogicalPlan::Unnest(_)
- | LogicalPlan::Statement(_) => InvalidParametersSnafu {
- reason: format!(
- "Trying to convert DDL/DML plan to substrait proto, plan: {plan:?}",
- ),
- }
- .fail()?,
- })
- }
-
- fn convert_df_plan(&self, plan: LogicalPlan) -> Result<Plan, Error> {
- let mut ctx = ConvertorContext::default();
-
- let rel = self.logical_plan_to_rel(&mut ctx, Arc::new(plan))?;
-
- // convert extension
- let extensions = ctx.generate_function_extension();
-
- // assemble PlanRel
- let plan_rel = PlanRel {
- rel_type: Some(PlanRelType::Rel(rel)),
- };
-
- Ok(Plan {
- extension_uris: vec![],
- extensions,
- relations: vec![plan_rel],
- advanced_extensions: None,
- expected_type_urls: vec![],
- ..Default::default()
- })
- }
-
- pub fn convert_table_scan_plan(
- &self,
- ctx: &mut ConvertorContext,
- table_scan: &TableScan,
- ) -> Result<ReadRel, Error> {
- let provider = table_scan
- .source
- .as_any()
- .downcast_ref::<DefaultTableSource>()
- .context(UnknownPlanSnafu)?
- .table_provider
- .as_any()
- .downcast_ref::<DfTableProviderAdapter>()
- .context(UnknownPlanSnafu)?;
- let table_info = provider.table().table_info();
-
- // assemble NamedTable and ReadType
- let catalog_name = table_info.catalog_name.clone();
- let schema_name = table_info.schema_name.clone();
- let table_name = table_info.name.clone();
- let named_table = NamedTable {
- names: vec![catalog_name, schema_name, table_name],
- advanced_extension: None,
- };
- let read_type = ReadType::NamedTable(named_table);
-
- // assemble projection
- let projection = table_scan
- .projection
- .as_ref()
- .map(|x| self.convert_schema_projection(x));
-
- // assemble base (unprojected) schema using Table's schema.
- let base_schema = from_schema(&provider.table().schema())?;
-
- // make conjunction over a list of filters and convert the result to substrait
- let filter = if let Some(conjunction) = table_scan
- .filters
- .iter()
- .cloned()
- .reduce(|accum, expr| accum.and(expr))
- {
- Some(Box::new(expression_from_df_expr(
- ctx,
- &conjunction,
- &provider.table().schema(),
- )?))
- } else {
- None
- };
-
- let read_rel = ReadRel {
- common: None,
- base_schema: Some(base_schema),
- filter,
- projection,
- advanced_extension: None,
- read_type: Some(read_type),
- ..Default::default()
- };
-
- Ok(read_rel)
- }
-
- /// Convert a index-based schema projection to substrait's [MaskExpression].
- fn convert_schema_projection(&self, projections: &[usize]) -> MaskExpression {
- let struct_items = projections
- .iter()
- .map(|index| StructItem {
- field: *index as i32,
- child: None,
- })
- .collect();
- MaskExpression {
- select: Some(StructSelect { struct_items }),
- // TODO(ruihang): this field is unspecified
- maintain_singular_struct: true,
- }
- }
-}
-
-fn same_schema_without_metadata(lhs: &ArrowSchemaRef, rhs: &ArrowSchemaRef) -> bool {
- lhs.fields.len() == rhs.fields.len()
- && lhs.fields.iter().zip(rhs.fields.iter()).all(|(x, y)| {
- x.name() == y.name()
- && x.data_type() == y.data_type()
- && x.is_nullable() == y.is_nullable()
- })
-}
diff --git a/src/common/substrait/src/df_substrait.rs b/src/common/substrait/src/df_substrait.rs
index ceb2760acfd8..4936bcb8b9cb 100644
--- a/src/common/substrait/src/df_substrait.rs
+++ b/src/common/substrait/src/df_substrait.rs
@@ -52,8 +52,9 @@ impl SubstraitPlan for DFLogicalSubstraitConvertor {
fn encode(&self, plan: Self::Plan) -> Result<Bytes, Self::Error> {
let mut buf = BytesMut::new();
+ let context = SessionContext::new();
- let substrait_plan = to_substrait_plan(&plan).context(EncodeDfPlanSnafu)?;
+ let substrait_plan = to_substrait_plan(&plan, &context).context(EncodeDfPlanSnafu)?;
substrait_plan.encode(&mut buf).context(EncodeRelSnafu)?;
Ok(buf.freeze())
diff --git a/src/common/substrait/src/lib.rs b/src/common/substrait/src/lib.rs
index 420ef6e39dac..07b2fd5c6bd9 100644
--- a/src/common/substrait/src/lib.rs
+++ b/src/common/substrait/src/lib.rs
@@ -15,14 +15,8 @@
#![feature(let_chains)]
#![feature(trait_upcasting)]
-mod context;
-mod df_expr;
-#[allow(unused)]
-mod df_logical;
mod df_substrait;
pub mod error;
-mod schema;
-mod types;
use std::sync::Arc;
diff --git a/src/common/substrait/src/schema.rs b/src/common/substrait/src/schema.rs
deleted file mode 100644
index a9611baec848..000000000000
--- a/src/common/substrait/src/schema.rs
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use datatypes::schema::{ColumnSchema, Schema};
-use substrait_proto::proto::r#type::{Nullability, Struct as SubstraitStruct};
-use substrait_proto::proto::NamedStruct;
-
-use crate::error::Result;
-use crate::types::{from_concrete_type, to_concrete_type};
-
-pub fn to_schema(named_struct: NamedStruct) -> Result<Schema> {
- if named_struct.r#struct.is_none() {
- return Ok(Schema::new(vec![]));
- }
-
- let column_schemas = named_struct
- .r#struct
- .unwrap()
- .types
- .into_iter()
- .zip(named_struct.names.into_iter())
- .map(|(ty, name)| {
- let (concrete_type, is_nullable) = to_concrete_type(&ty)?;
- let column_schema = ColumnSchema::new(name, concrete_type, is_nullable);
- Ok(column_schema)
- })
- .collect::<Result<_>>()?;
-
- Ok(Schema::new(column_schemas))
-}
-
-pub fn from_schema(schema: &Schema) -> Result<NamedStruct> {
- let mut names = Vec::with_capacity(schema.num_columns());
- let mut types = Vec::with_capacity(schema.num_columns());
-
- for column_schema in schema.column_schemas() {
- names.push(column_schema.name.clone());
- let substrait_type = from_concrete_type(
- column_schema.data_type.clone(),
- Some(column_schema.is_nullable()),
- )?;
- types.push(substrait_type);
- }
-
- // TODO(ruihang): `type_variation_reference` and `nullability` are unspecified.
- let substrait_struct = SubstraitStruct {
- types,
- type_variation_reference: 0,
- nullability: Nullability::Unspecified as _,
- };
-
- Ok(NamedStruct {
- names,
- r#struct: Some(substrait_struct),
- })
-}
-
-#[cfg(test)]
-pub(crate) mod test {
- use datatypes::prelude::{ConcreteDataType, DataType};
-
- use super::*;
-
- pub(crate) fn supported_types() -> Vec<ColumnSchema> {
- [
- ConcreteDataType::null_datatype(),
- ConcreteDataType::boolean_datatype(),
- ConcreteDataType::int8_datatype(),
- ConcreteDataType::int16_datatype(),
- ConcreteDataType::int32_datatype(),
- ConcreteDataType::int64_datatype(),
- ConcreteDataType::uint8_datatype(),
- ConcreteDataType::uint16_datatype(),
- ConcreteDataType::uint32_datatype(),
- ConcreteDataType::uint64_datatype(),
- ConcreteDataType::float32_datatype(),
- ConcreteDataType::float64_datatype(),
- ConcreteDataType::binary_datatype(),
- ConcreteDataType::string_datatype(),
- ConcreteDataType::date_datatype(),
- ConcreteDataType::timestamp_datatype(Default::default()),
- // TODO(ruihang): DateTime and List type are not supported now
- ]
- .into_iter()
- .enumerate()
- .map(|(ordinal, ty)| ColumnSchema::new(ty.name().to_string(), ty, ordinal % 2 == 0))
- .collect()
- }
-
- #[test]
- fn supported_types_round_trip() {
- let column_schemas = supported_types();
- let schema = Schema::new(column_schemas);
-
- let named_struct = from_schema(&schema).unwrap();
- let converted_schema = to_schema(named_struct).unwrap();
-
- assert_eq!(schema, converted_schema);
- }
-}
diff --git a/src/common/substrait/src/types.rs b/src/common/substrait/src/types.rs
deleted file mode 100644
index cc0bf6110e43..000000000000
--- a/src/common/substrait/src/types.rs
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Methods that perform conversion between Substrait's type ([Type](SType)) and GreptimeDB's type ([ConcreteDataType]).
-//!
-//! Substrait use [type variation](https://substrait.io/types/type_variations/) to express different "logical types".
-//! Current we only have variations on integer types. Variation 0 (system preferred) are the same with base types, which
-//! are signed integer (i.e. I8 -> [i8]), and Variation 1 stands for unsigned integer (i.e. I8 -> [u8]).
-
-use datafusion::scalar::ScalarValue;
-use datatypes::prelude::ConcreteDataType;
-use datatypes::types::TimestampType;
-use substrait_proto::proto::expression::literal::LiteralType;
-use substrait_proto::proto::r#type::{self as s_type, Kind, Nullability};
-use substrait_proto::proto::{Type as SType, Type};
-
-use crate::error::{self, Result, UnsupportedConcreteTypeSnafu, UnsupportedSubstraitTypeSnafu};
-
-macro_rules! substrait_kind {
- ($desc:ident, $concrete_ty:ident) => {{
- let nullable = $desc.nullability() == Nullability::Nullable;
- let ty = ConcreteDataType::$concrete_ty();
- Ok((ty, nullable))
- }};
-
- ($desc:ident, $concrete_ty:expr) => {{
- let nullable = $desc.nullability() == Nullability::Nullable;
- Ok(($concrete_ty, nullable))
- }};
-
- ($desc:ident, $concrete_ty_0:ident, $concrete_ty_1:ident) => {{
- let nullable = $desc.nullability() == Nullability::Nullable;
- let ty = match $desc.type_variation_reference {
- 0 => ConcreteDataType::$concrete_ty_0(),
- 1 => ConcreteDataType::$concrete_ty_1(),
- _ => UnsupportedSubstraitTypeSnafu {
- ty: format!("{:?}", $desc),
- }
- .fail()?,
- };
- Ok((ty, nullable))
- }};
-}
-
-/// Convert Substrait [Type](SType) to GreptimeDB's [ConcreteDataType]. The bool in return
-/// tuple is the nullability identifier.
-pub fn to_concrete_type(ty: &SType) -> Result<(ConcreteDataType, bool)> {
- if ty.kind.is_none() {
- return Ok((ConcreteDataType::null_datatype(), true));
- }
- let kind = ty.kind.as_ref().unwrap();
- match kind {
- Kind::Bool(desc) => substrait_kind!(desc, boolean_datatype),
- Kind::I8(desc) => substrait_kind!(desc, int8_datatype, uint8_datatype),
- Kind::I16(desc) => substrait_kind!(desc, int16_datatype, uint16_datatype),
- Kind::I32(desc) => substrait_kind!(desc, int32_datatype, uint32_datatype),
- Kind::I64(desc) => substrait_kind!(desc, int64_datatype, uint64_datatype),
- Kind::Fp32(desc) => substrait_kind!(desc, float32_datatype),
- Kind::Fp64(desc) => substrait_kind!(desc, float64_datatype),
- Kind::String(desc) => substrait_kind!(desc, string_datatype),
- Kind::Binary(desc) => substrait_kind!(desc, binary_datatype),
- Kind::Timestamp(desc) => substrait_kind!(
- desc,
- ConcreteDataType::timestamp_datatype(
- TimestampType::try_from(desc.type_variation_reference as u64)
- .map_err(|_| UnsupportedSubstraitTypeSnafu {
- ty: format!("{kind:?}")
- }
- .build())?
- .unit()
- )
- ),
- Kind::Date(desc) => substrait_kind!(desc, date_datatype),
- Kind::Time(_)
- | Kind::IntervalYear(_)
- | Kind::IntervalDay(_)
- | Kind::TimestampTz(_)
- | Kind::Uuid(_)
- | Kind::FixedChar(_)
- | Kind::Varchar(_)
- | Kind::FixedBinary(_)
- | Kind::Decimal(_)
- | Kind::Struct(_)
- | Kind::List(_)
- | Kind::Map(_)
- | Kind::UserDefined(_)
- | Kind::UserDefinedTypeReference(_) => UnsupportedSubstraitTypeSnafu {
- ty: format!("{kind:?}"),
- }
- .fail(),
- }
-}
-
-macro_rules! build_substrait_kind {
- ($kind:ident,$s_type:ident,$nullable:ident,$variation:expr) => {{
- let nullability = match $nullable {
- Some(true) => Nullability::Nullable,
- Some(false) => Nullability::Required,
- None => Nullability::Unspecified,
- } as _;
- Some(Kind::$kind(s_type::$s_type {
- type_variation_reference: $variation,
- nullability,
- }))
- }};
-}
-
-/// Convert GreptimeDB's [ConcreteDataType] to Substrait [Type](SType).
-///
-/// Refer to [mod level documentation](super::types) for more information about type variation.
-pub fn from_concrete_type(ty: ConcreteDataType, nullability: Option<bool>) -> Result<SType> {
- let kind = match ty {
- ConcreteDataType::Null(_) => None,
- ConcreteDataType::Boolean(_) => build_substrait_kind!(Bool, Boolean, nullability, 0),
- ConcreteDataType::Int8(_) => build_substrait_kind!(I8, I8, nullability, 0),
- ConcreteDataType::Int16(_) => build_substrait_kind!(I16, I16, nullability, 0),
- ConcreteDataType::Int32(_) => build_substrait_kind!(I32, I32, nullability, 0),
- ConcreteDataType::Int64(_) => build_substrait_kind!(I64, I64, nullability, 0),
- ConcreteDataType::UInt8(_) => build_substrait_kind!(I8, I8, nullability, 1),
- ConcreteDataType::UInt16(_) => build_substrait_kind!(I16, I16, nullability, 1),
- ConcreteDataType::UInt32(_) => build_substrait_kind!(I32, I32, nullability, 1),
- ConcreteDataType::UInt64(_) => build_substrait_kind!(I64, I64, nullability, 1),
- ConcreteDataType::Float32(_) => build_substrait_kind!(Fp32, Fp32, nullability, 0),
- ConcreteDataType::Float64(_) => build_substrait_kind!(Fp64, Fp64, nullability, 0),
- ConcreteDataType::Binary(_) => build_substrait_kind!(Binary, Binary, nullability, 0),
- ConcreteDataType::String(_) => build_substrait_kind!(String, String, nullability, 0),
- ConcreteDataType::Date(_) => build_substrait_kind!(Date, Date, nullability, 0),
- ConcreteDataType::DateTime(_) => UnsupportedConcreteTypeSnafu { ty }.fail()?,
- ConcreteDataType::Timestamp(ty) => {
- build_substrait_kind!(Timestamp, Timestamp, nullability, ty.precision() as u32)
- }
- ConcreteDataType::List(_) | ConcreteDataType::Dictionary(_) => {
- UnsupportedConcreteTypeSnafu { ty }.fail()?
- }
- };
-
- Ok(SType { kind })
-}
-
-pub(crate) fn scalar_value_as_literal_type(v: &ScalarValue) -> Result<LiteralType> {
- Ok(if v.is_null() {
- LiteralType::Null(Type { kind: None })
- } else {
- match v {
- ScalarValue::Boolean(Some(v)) => LiteralType::Boolean(*v),
- ScalarValue::Float32(Some(v)) => LiteralType::Fp32(*v),
- ScalarValue::Float64(Some(v)) => LiteralType::Fp64(*v),
- ScalarValue::Int8(Some(v)) => LiteralType::I8(*v as i32),
- ScalarValue::Int16(Some(v)) => LiteralType::I16(*v as i32),
- ScalarValue::Int32(Some(v)) => LiteralType::I32(*v),
- ScalarValue::Int64(Some(v)) => LiteralType::I64(*v),
- ScalarValue::LargeUtf8(Some(v)) => LiteralType::String(v.clone()),
- ScalarValue::LargeBinary(Some(v)) => LiteralType::Binary(v.clone()),
- ScalarValue::TimestampSecond(Some(seconds), _) => {
- LiteralType::Timestamp(*seconds * 1_000_000)
- }
- ScalarValue::TimestampMillisecond(Some(millis), _) => {
- LiteralType::Timestamp(*millis * 1000)
- }
- ScalarValue::TimestampMicrosecond(Some(micros), _) => LiteralType::Timestamp(*micros),
- ScalarValue::TimestampNanosecond(Some(nanos), _) => {
- LiteralType::Timestamp(*nanos / 1000)
- }
- ScalarValue::Utf8(Some(s)) => LiteralType::String(s.clone()),
- // TODO(LFC): Implement other conversions: ScalarValue => LiteralType
- _ => {
- return error::UnsupportedExprSnafu {
- name: format!("ScalarValue: {v:?}"),
- }
- .fail()
- }
- }
- })
-}
-
-pub(crate) fn literal_type_to_scalar_value(t: LiteralType) -> Result<ScalarValue> {
- Ok(match t {
- LiteralType::Null(Type { kind: Some(kind) }) => match kind {
- Kind::Bool(_) => ScalarValue::Boolean(None),
- Kind::I8(_) => ScalarValue::Int8(None),
- Kind::I16(_) => ScalarValue::Int16(None),
- Kind::I32(_) => ScalarValue::Int32(None),
- Kind::I64(_) => ScalarValue::Int64(None),
- Kind::Fp32(_) => ScalarValue::Float32(None),
- Kind::Fp64(_) => ScalarValue::Float64(None),
- Kind::String(_) => ScalarValue::LargeUtf8(None),
- Kind::Binary(_) => ScalarValue::LargeBinary(None),
- // TODO(LFC): Implement other conversions: Kind => ScalarValue
- _ => {
- return error::UnsupportedSubstraitTypeSnafu {
- ty: format!("{kind:?}"),
- }
- .fail()
- }
- },
- LiteralType::Boolean(v) => ScalarValue::Boolean(Some(v)),
- LiteralType::I8(v) => ScalarValue::Int8(Some(v as i8)),
- LiteralType::I16(v) => ScalarValue::Int16(Some(v as i16)),
- LiteralType::I32(v) => ScalarValue::Int32(Some(v)),
- LiteralType::I64(v) => ScalarValue::Int64(Some(v)),
- LiteralType::Fp32(v) => ScalarValue::Float32(Some(v)),
- LiteralType::Fp64(v) => ScalarValue::Float64(Some(v)),
- LiteralType::String(v) => ScalarValue::LargeUtf8(Some(v)),
- LiteralType::Binary(v) => ScalarValue::LargeBinary(Some(v)),
- LiteralType::Timestamp(v) => ScalarValue::TimestampMicrosecond(Some(v), None),
- // TODO(LFC): Implement other conversions: LiteralType => ScalarValue
- _ => {
- return error::UnsupportedSubstraitTypeSnafu {
- ty: format!("{t:?}"),
- }
- .fail()
- }
- })
-}
diff --git a/src/datatypes/src/vectors/list.rs b/src/datatypes/src/vectors/list.rs
index 23d7ac663691..21a658f7986a 100644
--- a/src/datatypes/src/vectors/list.rs
+++ b/src/datatypes/src/vectors/list.rs
@@ -433,8 +433,7 @@ impl NullBufferBuilder {
/// Builds the null buffer and resets the builder.
/// Returns `None` if the builder only contains `true`s.
fn finish(&mut self) -> Option<Buffer> {
- let buf = self.bitmap_builder.as_mut().map(|b| b.finish());
- self.bitmap_builder = None;
+ let buf = self.bitmap_builder.take().map(Into::into);
self.len = 0;
buf
}
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index a8c09b927068..3af4197bbdd7 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -21,11 +21,11 @@ use async_recursion::async_recursion;
use catalog::table_source::DfTableSourceProvider;
use datafusion::common::{DFSchemaRef, OwnedTableReference, Result as DfResult};
use datafusion::datasource::DefaultTableSource;
-use datafusion::logical_expr::expr::AggregateFunction;
+use datafusion::logical_expr::expr::{AggregateFunction, ScalarFunction, ScalarUDF};
use datafusion::logical_expr::expr_rewriter::normalize_cols;
use datafusion::logical_expr::{
AggregateFunction as AggregateFunctionEnum, BinaryExpr, BuiltinScalarFunction, Cast, Extension,
- LogicalPlan, LogicalPlanBuilder, Operator, ScalarUDF,
+ LogicalPlan, LogicalPlanBuilder, Operator, ScalarUDF as ScalarUdfDef,
};
use datafusion::optimizer::utils;
use datafusion::prelude as df_prelude;
@@ -927,10 +927,10 @@ impl PromPlanner {
match scalar_func.clone() {
ScalarFunc::DataFusionBuiltin(fun) => {
other_input_exprs.insert(field_column_pos, col_expr);
- let fn_expr = DfExpr::ScalarFunction {
+ let fn_expr = DfExpr::ScalarFunction(ScalarFunction {
fun,
args: other_input_exprs.clone(),
- };
+ });
exprs.push(fn_expr);
other_input_exprs.remove(field_column_pos);
}
@@ -942,10 +942,10 @@ impl PromPlanner {
));
other_input_exprs.insert(field_column_pos, ts_range_expr);
other_input_exprs.insert(field_column_pos + 1, col_expr);
- let fn_expr = DfExpr::ScalarUDF {
+ let fn_expr = DfExpr::ScalarUDF(ScalarUDF {
fun: Arc::new(fun),
args: other_input_exprs.clone(),
- };
+ });
exprs.push(fn_expr);
other_input_exprs.remove(field_column_pos + 1);
other_input_exprs.remove(field_column_pos);
@@ -960,10 +960,10 @@ impl PromPlanner {
other_input_exprs.insert(field_column_pos + 1, col_expr);
other_input_exprs
.insert(field_column_pos + 2, self.create_time_index_column_expr()?);
- let fn_expr = DfExpr::ScalarUDF {
+ let fn_expr = DfExpr::ScalarUDF(ScalarUDF {
fun: Arc::new(fun),
args: other_input_exprs.clone(),
- };
+ });
exprs.push(fn_expr);
other_input_exprs.remove(field_column_pos + 2);
other_input_exprs.remove(field_column_pos + 1);
@@ -1069,6 +1069,7 @@ impl PromPlanner {
args: vec![DfExpr::Column(Column::from_name(col))],
distinct: false,
filter: None,
+ order_by: None,
})
})
.collect();
@@ -1281,10 +1282,10 @@ struct FunctionArgs {
#[derive(Debug, Clone)]
enum ScalarFunc {
DataFusionBuiltin(BuiltinScalarFunction),
- Udf(ScalarUDF),
+ Udf(ScalarUdfDef),
// todo(ruihang): maybe merge with Udf later
/// UDF that require extra information like range length to be evaluated.
- ExtrapolateUdf(ScalarUDF),
+ ExtrapolateUdf(ScalarUdfDef),
}
#[cfg(test)]
@@ -1668,12 +1669,12 @@ mod test {
#[tokio::test]
async fn aggregate_stddev() {
- do_aggregate_expr_plan("stddev", "STDDEVPOP").await;
+ do_aggregate_expr_plan("stddev", "STDDEV_POP").await;
}
#[tokio::test]
async fn aggregate_stdvar() {
- do_aggregate_expr_plan("stdvar", "VARIANCEPOP").await;
+ do_aggregate_expr_plan("stdvar", "VARIANCE_POP").await;
}
#[tokio::test]
diff --git a/src/promql/src/range_array.rs b/src/promql/src/range_array.rs
index e3a50868356d..92d0f55c1b71 100644
--- a/src/promql/src/range_array.rs
+++ b/src/promql/src/range_array.rs
@@ -219,11 +219,6 @@ impl Array for RangeArray {
self
}
- #[allow(deprecated)]
- fn data(&self) -> &ArrayData {
- self.array.data()
- }
-
fn into_data(self) -> ArrayData {
self.array.into_data()
}
@@ -239,6 +234,30 @@ impl Array for RangeArray {
fn nulls(&self) -> Option<&NullBuffer> {
self.array.nulls()
}
+
+ fn data_type(&self) -> &DataType {
+ self.array.data_type()
+ }
+
+ fn len(&self) -> usize {
+ self.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.is_empty()
+ }
+
+ fn offset(&self) -> usize {
+ self.array.offset()
+ }
+
+ fn get_buffer_memory_size(&self) -> usize {
+ self.array.get_buffer_memory_size()
+ }
+
+ fn get_array_memory_size(&self) -> usize {
+ self.array.get_array_memory_size()
+ }
}
impl std::fmt::Debug for RangeArray {
diff --git a/src/query/src/dist_plan/commutativity.rs b/src/query/src/dist_plan/commutativity.rs
index 28723f3350bf..acec186910f0 100644
--- a/src/query/src/dist_plan/commutativity.rs
+++ b/src/query/src/dist_plan/commutativity.rs
@@ -59,19 +59,13 @@ impl Categorizer {
LogicalPlan::Distinct(_) => Commutativity::PartialCommutative,
LogicalPlan::Unnest(_) => Commutativity::Commutative,
LogicalPlan::Statement(_) => Commutativity::Unsupported,
- LogicalPlan::CreateExternalTable(_) => Commutativity::Unsupported,
- LogicalPlan::CreateMemoryTable(_) => Commutativity::Unsupported,
- LogicalPlan::CreateView(_) => Commutativity::Unsupported,
- LogicalPlan::CreateCatalogSchema(_) => Commutativity::Unsupported,
- LogicalPlan::CreateCatalog(_) => Commutativity::Unsupported,
- LogicalPlan::DropTable(_) => Commutativity::Unsupported,
- LogicalPlan::DropView(_) => Commutativity::Unsupported,
LogicalPlan::Values(_) => Commutativity::Unsupported,
LogicalPlan::Explain(_) => Commutativity::Unsupported,
LogicalPlan::Analyze(_) => Commutativity::Unsupported,
LogicalPlan::Prepare(_) => Commutativity::Unsupported,
LogicalPlan::DescribeTable(_) => Commutativity::Unsupported,
LogicalPlan::Dml(_) => Commutativity::Unsupported,
+ LogicalPlan::Ddl(_) => Commutativity::Unsupported,
}
}
diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs
index e4ba4a65035e..011d72d8b195 100644
--- a/src/query/src/optimizer.rs
+++ b/src/query/src/optimizer.rs
@@ -18,6 +18,7 @@ use common_time::timestamp::{TimeUnit, Timestamp};
use datafusion::config::ConfigOptions;
use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter};
use datafusion_common::{DFSchemaRef, DataFusionError, Result, ScalarValue};
+use datafusion_expr::expr::InList;
use datafusion_expr::{
Between, BinaryExpr, Expr, ExprSchemable, Filter, LogicalPlan, Operator, TableScan,
};
@@ -76,7 +77,6 @@ impl AnalyzerRule for TypeConversionRule {
| LogicalPlan::Window { .. }
| LogicalPlan::Aggregate { .. }
| LogicalPlan::Repartition { .. }
- | LogicalPlan::CreateExternalTable { .. }
| LogicalPlan::Extension { .. }
| LogicalPlan::Sort { .. }
| LogicalPlan::Explain { .. }
@@ -84,9 +84,6 @@ impl AnalyzerRule for TypeConversionRule {
| LogicalPlan::Union { .. }
| LogicalPlan::Join { .. }
| LogicalPlan::CrossJoin { .. }
- | LogicalPlan::CreateMemoryTable { .. }
- | LogicalPlan::DropTable { .. }
- | LogicalPlan::DropView { .. }
| LogicalPlan::Distinct { .. }
| LogicalPlan::Values { .. }
| LogicalPlan::Analyze { .. } => {
@@ -105,15 +102,13 @@ impl AnalyzerRule for TypeConversionRule {
LogicalPlan::Subquery { .. }
| LogicalPlan::SubqueryAlias { .. }
- | LogicalPlan::CreateView { .. }
- | LogicalPlan::CreateCatalogSchema { .. }
- | LogicalPlan::CreateCatalog { .. }
| LogicalPlan::EmptyRelation(_)
| LogicalPlan::Prepare(_)
| LogicalPlan::Dml(_)
| LogicalPlan::DescribeTable(_)
| LogicalPlan::Unnest(_)
- | LogicalPlan::Statement(_) => Ok(Transformed::No(plan)),
+ | LogicalPlan::Statement(_)
+ | LogicalPlan::Ddl(_) => Ok(Transformed::No(plan)),
})
}
@@ -229,21 +224,21 @@ impl TreeNodeRewriter for TypeConverter {
high: Box::new(high),
})
}
- Expr::InList {
+ Expr::InList(InList {
expr,
list,
negated,
- } => {
+ }) => {
let mut list_expr = Vec::with_capacity(list.len());
for e in list {
let (_, expr_conversion) = self.convert_type(&expr, &e)?;
list_expr.push(expr_conversion);
}
- Expr::InList {
+ Expr::InList(InList {
expr,
list: list_expr,
negated,
- }
+ })
}
Expr::Literal(value) => match value {
ScalarValue::TimestampSecond(Some(i), _) => {
diff --git a/src/query/src/parser.rs b/src/query/src/parser.rs
index 14de3e026ead..df962efc72cb 100644
--- a/src/query/src/parser.rs
+++ b/src/query/src/parser.rs
@@ -255,7 +255,7 @@ mod test {
let expected = String::from("Sql(Query(Query { \
inner: Query { \
with: None, body: Select(Select { \
- distinct: false, \
+ distinct: None, \
top: None, \
projection: \
[Wildcard(WildcardAdditionalOptions { opt_exclude: None, opt_except: None, opt_rename: None, opt_replace: None })], \
@@ -274,6 +274,7 @@ mod test {
distribute_by: [], \
sort_by: [], \
having: None, \
+ named_window: [], \
qualify: None \
}), order_by: [], limit: None, offset: None, fetch: None, locks: [] }, param_types: [] }))");
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index a6c350144074..e709cddbf8c9 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -539,7 +539,7 @@ mod tests {
assert_eq!(1, stmts.len());
let select = sqlparser::ast::Select {
- distinct: false,
+ distinct: None,
top: None,
projection: vec![sqlparser::ast::SelectItem::Wildcard(
WildcardAdditionalOptions::default(),
@@ -562,6 +562,7 @@ mod tests {
sort_by: vec![],
having: None,
qualify: None,
+ named_window: vec![],
};
let sp_statement = SpStatement::Query(Box::new(SpQuery {
diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs
index e5153bee1104..4068e5189e85 100644
--- a/src/table/src/predicate.rs
+++ b/src/table/src/predicate.rs
@@ -19,6 +19,7 @@ use common_time::Timestamp;
use datafusion::parquet::file::metadata::RowGroupMetaData;
use datafusion::physical_optimizer::pruning::PruningPredicate;
use datafusion_common::ToDFSchema;
+use datafusion_expr::expr::InList;
use datafusion_expr::{Between, BinaryExpr, Operator};
use datafusion_physical_expr::create_physical_expr;
use datafusion_physical_expr::execution_props::ExecutionProps;
@@ -130,11 +131,11 @@ impl<'a> TimeRangePredicateBuilder<'a> {
low,
high,
}) => self.extract_from_between_expr(expr, negated, low, high),
- DfExpr::InList {
+ DfExpr::InList(InList {
expr,
list,
negated,
- } => self.extract_from_in_list_expr(expr, *negated, list),
+ }) => self.extract_from_in_list_expr(expr, *negated, list),
_ => None,
}
}
diff --git a/tests-integration/src/opentsdb.rs b/tests-integration/src/opentsdb.rs
index 6114e19926e6..5c5942c8613a 100644
--- a/tests-integration/src/opentsdb.rs
+++ b/tests-integration/src/opentsdb.rs
@@ -35,6 +35,7 @@ mod tests {
test_exec(instance).await;
}
+ #[ignore = "https://github.com/GreptimeTeam/greptimedb/issues/1681"]
#[tokio::test(flavor = "multi_thread")]
async fn test_distributed_exec() {
let distributed = tests::create_distributed_instance("test_distributed_exec").await;
diff --git a/tests-integration/src/tests/instance_test.rs b/tests-integration/src/tests/instance_test.rs
index dc46e0446856..67c0db3eeae2 100644
--- a/tests-integration/src/tests/instance_test.rs
+++ b/tests-integration/src/tests/instance_test.rs
@@ -869,6 +869,7 @@ async fn test_create_table_after_rename_table(instance: Arc<dyn MockInstance>) {
check_output_stream(output, expect).await;
}
+#[ignore = "https://github.com/GreptimeTeam/greptimedb/issues/1681"]
#[apply(both_instances_cases)]
async fn test_alter_table(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
diff --git a/tests/cases/standalone/common/aggregate/distinct_order_by.result b/tests/cases/standalone/common/aggregate/distinct_order_by.result
index 2638a3a9543b..bacfd3badb12 100644
--- a/tests/cases/standalone/common/aggregate/distinct_order_by.result
+++ b/tests/cases/standalone/common/aggregate/distinct_order_by.result
@@ -21,7 +21,7 @@ Error: 3000(PlanQuery), Error during planning: For SELECT DISTINCT, ORDER BY exp
SELECT DISTINCT ON (1) i % 2, i FROM integers WHERE i<3 ORDER BY i;
-Error: 1001(Unsupported), SQL statement is not supported: SELECT DISTINCT ON (1) i % 2, i FROM integers WHERE i<3 ORDER BY i;, keyword: %
+Error: 3000(PlanQuery), This feature is not implemented: DISTINCT ON Exprs not supported
SELECT DISTINCT integers.i FROM integers ORDER BY i DESC;
diff --git a/tests/cases/standalone/common/optimizer/filter_push_down.result b/tests/cases/standalone/common/optimizer/filter_push_down.result
index 118680dd3b02..e48471107f44 100644
--- a/tests/cases/standalone/common/optimizer/filter_push_down.result
+++ b/tests/cases/standalone/common/optimizer/filter_push_down.result
@@ -149,7 +149,7 @@ SELECT * FROM integers i1 WHERE NOT EXISTS(SELECT i FROM integers WHERE i=i1.i)
SELECT i1.i,i2.i FROM integers i1, integers i2 WHERE i1.i=(SELECT i FROM integers WHERE i1.i=i) AND i1.i=i2.i ORDER BY i1.i;
-Error: 3001(EngineExecuteQuery), This feature is not implemented: Physical plan does not support logical expression (<subquery>)
+Error: 3001(EngineExecuteQuery), Error during planning: Correlated scalar subquery must be aggregated to return at most one row
SELECT * FROM (SELECT i1.i AS a, i2.i AS b FROM integers i1, integers i2) a1 WHERE a=b ORDER BY 1;
@@ -194,8 +194,7 @@ SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2) a1 WHERE cond O
SELECT * FROM (SELECT 0=1 AS cond FROM integers i1, integers i2 GROUP BY 1) a1 WHERE cond ORDER BY 1;
-++
-++
+Error: 3001(EngineExecuteQuery), Error during planning: Attempted to create Filter predicate with expression `Boolean(false)` aliased as 'Int64(0) = Int64(1)'. Filter predicates should not be aliased.
DROP TABLE integers;
diff --git a/tests/cases/standalone/order/order_variable_size_payload.result b/tests/cases/standalone/order/order_variable_size_payload.result
index c23e6fe832a9..2c84e4c3bec7 100644
--- a/tests/cases/standalone/order/order_variable_size_payload.result
+++ b/tests/cases/standalone/order/order_variable_size_payload.result
@@ -337,14 +337,14 @@ Affected Rows: 4
select i, split_part(s, 'b', 1) from test8 order by i;
-+---+---------------------------------------+
-| i | splitpart(test8.s,Utf8("b"),Int64(1)) |
-+---+---------------------------------------+
-| 1 | cc |
-| 2 | |
-| 3 | a |
-| | d |
-+---+---------------------------------------+
++---+----------------------------------------+
+| i | split_part(test8.s,Utf8("b"),Int64(1)) |
++---+----------------------------------------+
+| 1 | cc |
+| 2 | |
+| 3 | a |
+| | d |
++---+----------------------------------------+
CREATE TABLE DirectReports
(
|
chore
|
bump arrow/parquet to 40.0, datafuson to the latest HEAD (#1677)
|
fc6ebf58b4e8187726f17d34b68763f7682e2fde
|
2023-07-31 08:21:06
|
parkma99
|
refactor: create_current_timestamp_vector by using VectorOp::cast (#2042)
| false
|
diff --git a/src/datatypes/src/schema/constraint.rs b/src/datatypes/src/schema/constraint.rs
index e63d8748fceb..2d6d0d9e5891 100644
--- a/src/datatypes/src/schema/constraint.rs
+++ b/src/datatypes/src/schema/constraint.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::fmt::{Display, Formatter};
-use std::sync::Arc;
use common_time::util;
use serde::{Deserialize, Serialize};
@@ -22,7 +21,8 @@ use snafu::{ensure, ResultExt};
use crate::data_type::{ConcreteDataType, DataType};
use crate::error::{self, Result};
use crate::value::Value;
-use crate::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
+use crate::vectors::operations::VectorOp;
+use crate::vectors::{TimestampMillisecondVector, VectorRef};
const CURRENT_TIMESTAMP: &str = "current_timestamp()";
@@ -162,24 +162,23 @@ fn create_current_timestamp_vector(
data_type: &ConcreteDataType,
num_rows: usize,
) -> Result<VectorRef> {
- // FIXME(yingwen): We should implements cast in VectorOp so we could cast the millisecond vector
- // to other data type and avoid this match.
- match data_type {
- ConcreteDataType::Timestamp(_) => Ok(Arc::new(TimestampMillisecondVector::from_values(
- std::iter::repeat(util::current_time_millis()).take(num_rows),
- ))),
- ConcreteDataType::Int64(_) => Ok(Arc::new(Int64Vector::from_values(
- std::iter::repeat(util::current_time_millis()).take(num_rows),
- ))),
- _ => error::DefaultValueTypeSnafu {
+ let current_timestamp_vector = TimestampMillisecondVector::from_values(
+ std::iter::repeat(util::current_time_millis()).take(num_rows),
+ );
+ if data_type.is_timestamp_compatible() {
+ current_timestamp_vector.cast(data_type)
+ } else {
+ error::DefaultValueTypeSnafu {
reason: format!("Not support to assign current timestamp to {data_type:?} type",),
}
- .fail(),
+ .fail()
}
}
#[cfg(test)]
mod tests {
+ use std::sync::Arc;
+
use super::*;
use crate::error::Error;
use crate::vectors::Int32Vector;
@@ -272,6 +271,39 @@ mod tests {
v.get(0)
);
+ let data_type = ConcreteDataType::timestamp_second_datatype();
+ let v = constraint
+ .create_default_vector(&data_type, false, 4)
+ .unwrap();
+ assert_eq!(4, v.len());
+ assert!(
+ matches!(v.get(0), Value::Timestamp(_)),
+ "v {:?} is not timestamp",
+ v.get(0)
+ );
+
+ let data_type = ConcreteDataType::timestamp_microsecond_datatype();
+ let v = constraint
+ .create_default_vector(&data_type, false, 4)
+ .unwrap();
+ assert_eq!(4, v.len());
+ assert!(
+ matches!(v.get(0), Value::Timestamp(_)),
+ "v {:?} is not timestamp",
+ v.get(0)
+ );
+
+ let data_type = ConcreteDataType::timestamp_nanosecond_datatype();
+ let v = constraint
+ .create_default_vector(&data_type, false, 4)
+ .unwrap();
+ assert_eq!(4, v.len());
+ assert!(
+ matches!(v.get(0), Value::Timestamp(_)),
+ "v {:?} is not timestamp",
+ v.get(0)
+ );
+
// Int64 type.
let data_type = ConcreteDataType::int64_datatype();
let v = constraint
diff --git a/src/datatypes/src/vectors.rs b/src/datatypes/src/vectors.rs
index 3d4c04f70537..5c873a070a35 100644
--- a/src/datatypes/src/vectors.rs
+++ b/src/datatypes/src/vectors.rs
@@ -34,7 +34,7 @@ mod eq;
mod helper;
mod list;
mod null;
-mod operations;
+pub(crate) mod operations;
mod primitive;
mod string;
mod time;
|
refactor
|
create_current_timestamp_vector by using VectorOp::cast (#2042)
|
a1587595d935d02b550876f45852e2bfae09a537
|
2023-05-10 08:25:00
|
Ning Sun
|
feat: add information_schema as exception of cross schema check (#1551)
| false
|
diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs
index 878b54dec35a..ca150b5c3926 100644
--- a/src/query/src/lib.rs
+++ b/src/query/src/lib.rs
@@ -25,10 +25,11 @@ pub mod plan;
pub mod planner;
pub mod query_engine;
pub mod sql;
-#[cfg(test)]
-mod tests;
pub use crate::datafusion::DfContextProviderAdapter;
pub use crate::query_engine::{
QueryEngine, QueryEngineContext, QueryEngineFactory, QueryEngineRef,
};
+
+#[cfg(test)]
+mod tests;
diff --git a/src/query/src/query_engine/options.rs b/src/query/src/query_engine/options.rs
index e76774d36b57..8c60b9465cf1 100644
--- a/src/query/src/query_engine/options.rs
+++ b/src/query/src/query_engine/options.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use common_catalog::consts::INFORMATION_SCHEMA_NAME;
use session::context::QueryContextRef;
use snafu::ensure;
@@ -28,6 +29,11 @@ pub fn validate_catalog_and_schema(
schema: &str,
query_ctx: &QueryContextRef,
) -> Result<()> {
+ // information_schema is an exception
+ if schema.eq_ignore_ascii_case(INFORMATION_SCHEMA_NAME) {
+ return Ok(());
+ }
+
ensure!(
catalog == query_ctx.current_catalog() && schema == query_ctx.current_schema(),
QueryAccessDeniedSnafu {
@@ -59,5 +65,7 @@ mod tests {
assert!(re.is_err());
let re = validate_catalog_and_schema("wrong_catalog", "wrong_schema", &context);
assert!(re.is_err());
+
+ assert!(validate_catalog_and_schema("greptime", "information_schema", &context).is_ok());
}
}
diff --git a/src/table-procedure/src/lib.rs b/src/table-procedure/src/lib.rs
index 6e637a1798cb..0778a3fac23d 100644
--- a/src/table-procedure/src/lib.rs
+++ b/src/table-procedure/src/lib.rs
@@ -18,8 +18,6 @@ mod alter;
mod create;
mod drop;
pub mod error;
-#[cfg(test)]
-mod test_util;
pub use alter::AlterTableProcedure;
use catalog::CatalogManagerRef;
@@ -52,3 +50,6 @@ pub fn register_procedure_loaders(
);
DropTableProcedure::register_loader(catalog_manager, engine_procedure, procedure_manager);
}
+
+#[cfg(test)]
+mod test_util;
|
feat
|
add information_schema as exception of cross schema check (#1551)
|
e3b37ee2c94ba41bc5c1796b22c82660d9e0153f
|
2024-03-28 12:10:15
|
dennis zhuang
|
fix: canonicalize catalog and schema names (#3600)
| false
|
diff --git a/src/common/catalog/src/lib.rs b/src/common/catalog/src/lib.rs
index 1a2596371709..e1cf4c201d48 100644
--- a/src/common/catalog/src/lib.rs
+++ b/src/common/catalog/src/lib.rs
@@ -55,10 +55,10 @@ pub fn build_db_string(catalog: &str, schema: &str) -> String {
/// schema name
/// - if `[<catalog>-]` is provided, we split database name with `-` and use
/// `<catalog>` and `<schema>`.
-pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (&str, &str) {
+pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (String, String) {
match parse_optional_catalog_and_schema_from_db_string(db) {
(Some(catalog), schema) => (catalog, schema),
- (None, schema) => (DEFAULT_CATALOG_NAME, schema),
+ (None, schema) => (DEFAULT_CATALOG_NAME.to_string(), schema),
}
}
@@ -66,12 +66,12 @@ pub fn parse_catalog_and_schema_from_db_string(db: &str) -> (&str, &str) {
///
/// Similar to [`parse_catalog_and_schema_from_db_string`] but returns an optional
/// catalog if it's not provided in the database name.
-pub fn parse_optional_catalog_and_schema_from_db_string(db: &str) -> (Option<&str>, &str) {
+pub fn parse_optional_catalog_and_schema_from_db_string(db: &str) -> (Option<String>, String) {
let parts = db.splitn(2, '-').collect::<Vec<&str>>();
if parts.len() == 2 {
- (Some(parts[0]), parts[1])
+ (Some(parts[0].to_lowercase()), parts[1].to_lowercase())
} else {
- (None, db)
+ (None, db.to_lowercase())
}
}
@@ -88,32 +88,37 @@ mod tests {
#[test]
fn test_parse_catalog_and_schema() {
assert_eq!(
- (DEFAULT_CATALOG_NAME, "fullschema"),
+ (DEFAULT_CATALOG_NAME.to_string(), "fullschema".to_string()),
parse_catalog_and_schema_from_db_string("fullschema")
);
assert_eq!(
- ("catalog", "schema"),
+ ("catalog".to_string(), "schema".to_string()),
parse_catalog_and_schema_from_db_string("catalog-schema")
);
assert_eq!(
- ("catalog", "schema1-schema2"),
+ ("catalog".to_string(), "schema1-schema2".to_string()),
parse_catalog_and_schema_from_db_string("catalog-schema1-schema2")
);
assert_eq!(
- (None, "fullschema"),
+ (None, "fullschema".to_string()),
parse_optional_catalog_and_schema_from_db_string("fullschema")
);
assert_eq!(
- (Some("catalog"), "schema"),
+ (Some("catalog".to_string()), "schema".to_string()),
parse_optional_catalog_and_schema_from_db_string("catalog-schema")
);
assert_eq!(
- (Some("catalog"), "schema1-schema2"),
+ (Some("catalog".to_string()), "schema".to_string()),
+ parse_optional_catalog_and_schema_from_db_string("CATALOG-SCHEMA")
+ );
+
+ assert_eq!(
+ (Some("catalog".to_string()), "schema1-schema2".to_string()),
parse_optional_catalog_and_schema_from_db_string("catalog-schema1-schema2")
);
}
diff --git a/src/servers/src/grpc/authorize.rs b/src/servers/src/grpc/authorize.rs
index 84e203d3730e..ae003640ea4b 100644
--- a/src/servers/src/grpc/authorize.rs
+++ b/src/servers/src/grpc/authorize.rs
@@ -104,7 +104,7 @@ async fn do_auth<T>(
) -> Result<(), tonic::Status> {
let (catalog, schema) = extract_catalog_and_schema(req);
- let query_ctx = QueryContext::with(catalog, schema);
+ let query_ctx = QueryContext::with(&catalog, &schema);
let Some(user_provider) = user_provider else {
query_ctx.set_current_user(Some(auth::userinfo_by_name(None)));
@@ -119,7 +119,7 @@ async fn do_auth<T>(
let pwd = auth::Password::PlainText(password);
let user_info = user_provider
- .auth(id, pwd, catalog, schema)
+ .auth(id, pwd, &catalog, &schema)
.await
.map_err(|e| tonic::Status::unauthenticated(e.to_string()))?;
diff --git a/src/servers/src/grpc/greptime_handler.rs b/src/servers/src/grpc/greptime_handler.rs
index 19a4e1d373e0..a79217e6ee09 100644
--- a/src/servers/src/grpc/greptime_handler.rs
+++ b/src/servers/src/grpc/greptime_handler.rs
@@ -166,23 +166,28 @@ pub(crate) fn create_query_context(header: Option<&RequestHeader>) -> QueryConte
} else {
(
if !header.catalog.is_empty() {
- &header.catalog
+ header.catalog.to_lowercase()
} else {
- DEFAULT_CATALOG_NAME
+ DEFAULT_CATALOG_NAME.to_string()
},
if !header.schema.is_empty() {
- &header.schema
+ header.schema.to_lowercase()
} else {
- DEFAULT_SCHEMA_NAME
+ DEFAULT_SCHEMA_NAME.to_string()
},
)
}
})
- .unwrap_or((DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME));
+ .unwrap_or_else(|| {
+ (
+ DEFAULT_CATALOG_NAME.to_string(),
+ DEFAULT_SCHEMA_NAME.to_string(),
+ )
+ });
let timezone = parse_timezone(header.map(|h| h.timezone.as_str()));
QueryContextBuilder::default()
- .current_catalog(catalog.to_string())
- .current_schema(schema.to_string())
+ .current_catalog(catalog)
+ .current_schema(schema)
.timezone(Arc::new(timezone))
.build()
}
diff --git a/src/servers/src/http/authorize.rs b/src/servers/src/http/authorize.rs
index de99828fb33e..12c270c43cda 100644
--- a/src/servers/src/http/authorize.rs
+++ b/src/servers/src/http/authorize.rs
@@ -64,8 +64,8 @@ pub async fn inner_auth<B>(
// TODO(ruihang): move this out of auth module
let timezone = Arc::new(extract_timezone(&req));
let query_ctx_builder = QueryContextBuilder::default()
- .current_catalog(catalog.to_string())
- .current_schema(schema.to_string())
+ .current_catalog(catalog.clone())
+ .current_schema(schema.clone())
.timezone(timezone);
let query_ctx = query_ctx_builder.build();
@@ -97,8 +97,8 @@ pub async fn inner_auth<B>(
.auth(
auth::Identity::UserId(&username, None),
auth::Password::PlainText(password),
- catalog,
- schema,
+ &catalog,
+ &schema,
)
.await
{
@@ -132,7 +132,7 @@ fn err_response(err: impl ErrorExt) -> Response {
(StatusCode::UNAUTHORIZED, ErrorResponse::from_error(err)).into_response()
}
-pub fn extract_catalog_and_schema<B>(request: &Request<B>) -> (&str, &str) {
+pub fn extract_catalog_and_schema<B>(request: &Request<B>) -> (String, String) {
// parse database from header
let dbname = request
.headers()
@@ -414,7 +414,7 @@ mod tests {
.unwrap();
let db = extract_catalog_and_schema(&req);
- assert_eq!(db, ("greptime", "tomcat"));
+ assert_eq!(db, ("greptime".to_string(), "tomcat".to_string()));
}
#[test]
diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs
index af5567993fac..21e5b4c2ccd0 100644
--- a/src/servers/src/http/prometheus.rs
+++ b/src/servers/src/http/prometheus.rs
@@ -255,7 +255,7 @@ pub async fn labels_query(
queries = form_params.matches.0;
}
if queries.is_empty() {
- match get_all_column_names(catalog, schema, &handler.catalog_manager()).await {
+ match get_all_column_names(&catalog, &schema, &handler.catalog_manager()).await {
Ok(labels) => {
return PrometheusJsonResponse::success(PrometheusResponse::Labels(labels))
}
@@ -530,7 +530,11 @@ pub async fn label_values_query(
let (catalog, schema) = parse_catalog_and_schema_from_db_string(db);
if label_name == METRIC_NAME_LABEL {
- let mut table_names = match handler.catalog_manager().table_names(catalog, schema).await {
+ let mut table_names = match handler
+ .catalog_manager()
+ .table_names(&catalog, &schema)
+ .await
+ {
Ok(table_names) => table_names,
Err(e) => {
return PrometheusJsonResponse::error(e.status_code().to_string(), e.output_msg());
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index 9fe088cb6604..9e43aea7b42b 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -371,13 +371,17 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
async fn on_init<'a>(&'a mut self, database: &'a str, w: InitWriter<'a, W>) -> Result<()> {
let (catalog_from_db, schema) = parse_optional_catalog_and_schema_from_db_string(database);
- let catalog = if let Some(catalog) = catalog_from_db {
- catalog.to_owned()
+ let catalog = if let Some(catalog) = &catalog_from_db {
+ catalog.to_string()
} else {
self.session.get_catalog()
};
- if !self.query_handler.is_valid_schema(&catalog, schema).await? {
+ if !self
+ .query_handler
+ .is_valid_schema(&catalog, &schema)
+ .await?
+ {
return w
.error(
ErrorKind::ER_WRONG_DB_NAME,
@@ -391,7 +395,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
if let Some(schema_validator) = &self.user_provider {
if let Err(e) = schema_validator
- .authorize(&catalog, schema, user_info)
+ .authorize(&catalog, &schema, user_info)
.await
{
METRIC_AUTH_FAILURE
@@ -410,7 +414,7 @@ impl<W: AsyncWrite + Send + Sync + Unpin> AsyncMysqlShim<W> for MysqlInstanceShi
if catalog_from_db.is_some() {
self.session.set_catalog(catalog)
}
- self.session.set_schema(schema.into());
+ self.session.set_schema(schema);
w.ok().await.map_err(|e| e.into())
}
diff --git a/src/servers/src/postgres/auth_handler.rs b/src/servers/src/postgres/auth_handler.rs
index 3708f6f57a53..da316d04cf42 100644
--- a/src/servers/src/postgres/auth_handler.rs
+++ b/src/servers/src/postgres/auth_handler.rs
@@ -237,14 +237,11 @@ where
if let Some(db) = db_ref {
let (catalog, schema) = parse_catalog_and_schema_from_db_string(db);
if query_handler
- .is_valid_schema(catalog, schema)
+ .is_valid_schema(&catalog, &schema)
.await
.map_err(|e| PgWireError::ApiError(Box::new(e)))?
{
- Ok(DbResolution::Resolved(
- catalog.to_owned(),
- schema.to_owned(),
- ))
+ Ok(DbResolution::Resolved(catalog, schema))
} else {
Ok(DbResolution::NotFound(format!("Database not found: {db}")))
}
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index d401b0331637..ab1e468dc6e3 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -114,7 +114,7 @@ impl QueryContext {
let (catalog, schema) = db_name
.map(|db| {
let (catalog, schema) = parse_catalog_and_schema_from_db_string(db);
- (catalog.to_string(), schema.to_string())
+ (catalog, schema)
})
.unwrap_or_else(|| {
(
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
index 330900ae7995..aca9a78a1080 100644
--- a/tests/cases/standalone/common/system/information_schema.result
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -674,6 +674,27 @@ DESC TABLE GREPTIME_REGION_PEERS;
| down_seconds | Int64 | | YES | | FIELD |
+--------------+--------+-----+------+---------+---------------+
+USE INFORMATION_SCHEMA;
+
+Affected Rows: 0
+
+DESC COLUMNS;
+
++----------------+--------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++----------------+--------+-----+------+---------+---------------+
+| table_catalog | String | | NO | | FIELD |
+| table_schema | String | | NO | | FIELD |
+| table_name | String | | NO | | FIELD |
+| column_name | String | | NO | | FIELD |
+| data_type | String | | NO | | FIELD |
+| semantic_type | String | | NO | | FIELD |
+| column_default | String | | YES | | FIELD |
+| is_nullable | String | | NO | | FIELD |
+| column_type | String | | NO | | FIELD |
+| column_comment | String | | YES | | FIELD |
++----------------+--------+-----+------+---------+---------------+
+
drop table my_db.foo;
Error: 4001(TableNotFound), Table not found: greptime.my_db.foo
diff --git a/tests/cases/standalone/common/system/information_schema.sql b/tests/cases/standalone/common/system/information_schema.sql
index 76261d1c665b..d54c2c0ebd51 100644
--- a/tests/cases/standalone/common/system/information_schema.sql
+++ b/tests/cases/standalone/common/system/information_schema.sql
@@ -119,6 +119,10 @@ DESC TABLE RUNTIME_METRICS;
DESC TABLE GREPTIME_REGION_PEERS;
+USE INFORMATION_SCHEMA;
+
+DESC COLUMNS;
+
drop table my_db.foo;
use public;
|
fix
|
canonicalize catalog and schema names (#3600)
|
0c5f4801b73141bc5d3af22e281872e9e71681d5
|
2024-04-19 11:12:34
|
Ruihang Xia
|
build: update toolchain to nightly-2024-04-18 (#3740)
| false
|
diff --git a/.github/workflows/apidoc.yml b/.github/workflows/apidoc.yml
index ca1befa52cff..75979fbce8b7 100644
--- a/.github/workflows/apidoc.yml
+++ b/.github/workflows/apidoc.yml
@@ -13,7 +13,7 @@ on:
name: Build API docs
env:
- RUST_TOOLCHAIN: nightly-2023-12-19
+ RUST_TOOLCHAIN: nightly-2024-04-18
jobs:
apidoc:
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 4dc909ae5128..626decd81a80 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -30,7 +30,7 @@ concurrency:
cancel-in-progress: true
env:
- RUST_TOOLCHAIN: nightly-2023-12-19
+ RUST_TOOLCHAIN: nightly-2024-04-18
jobs:
check-typos-and-docs:
diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml
index 3ea18123142c..b635ab16d6b7 100644
--- a/.github/workflows/nightly-ci.yml
+++ b/.github/workflows/nightly-ci.yml
@@ -12,7 +12,7 @@ concurrency:
cancel-in-progress: true
env:
- RUST_TOOLCHAIN: nightly-2023-12-19
+ RUST_TOOLCHAIN: nightly-2024-04-18
jobs:
sqlness:
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 77705c21a6c4..37dd3c70a8a5 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -82,7 +82,7 @@ on:
# Use env variables to control all the release process.
env:
# The arguments of building greptime.
- RUST_TOOLCHAIN: nightly-2023-12-19
+ RUST_TOOLCHAIN: nightly-2024-04-18
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
diff --git a/Cargo.lock b/Cargo.lock
index 596712e6e9af..341ae2dbca6a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -42,9 +42,9 @@ dependencies = [
[[package]]
name = "ahash"
-version = "0.7.7"
+version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd"
+checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
dependencies = [
"getrandom",
"once_cell",
@@ -53,9 +53,9 @@ dependencies = [
[[package]]
name = "ahash"
-version = "0.8.6"
+version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a"
+checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
dependencies = [
"cfg-if 1.0.0",
"const-random",
@@ -67,9 +67,9 @@ dependencies = [
[[package]]
name = "aho-corasick"
-version = "1.1.2"
+version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
@@ -111,9 +111,9 @@ dependencies = [
[[package]]
name = "allocator-api2"
-version = "0.2.16"
+version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
+checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
[[package]]
name = "android-tzdata"
@@ -147,9 +147,9 @@ dependencies = [
[[package]]
name = "anstream"
-version = "0.6.5"
+version = "0.6.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6"
+checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -161,9 +161,9 @@ dependencies = [
[[package]]
name = "anstyle"
-version = "1.0.4"
+version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
+checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc"
[[package]]
name = "anstyle-parse"
@@ -195,9 +195,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.76"
+version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "59d2a3357dde987206219e78ecfbbb6e8dad06cbb65292758d3270e6254f7355"
+checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519"
[[package]]
name = "anymap"
@@ -217,7 +217,7 @@ dependencies = [
"datatypes",
"greptime-proto",
"paste",
- "prost 0.12.3",
+ "prost 0.12.4",
"snafu",
"tonic-build 0.9.2",
]
@@ -262,9 +262,9 @@ dependencies = [
[[package]]
name = "arc-swap"
-version = "1.6.0"
+version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6"
+checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
[[package]]
name = "array-init"
@@ -317,7 +317,7 @@ dependencies = [
"arrow-data",
"arrow-schema",
"chrono",
- "half 2.3.1",
+ "half 2.4.1",
"num",
]
@@ -327,13 +327,13 @@ version = "51.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8010572cf8c745e242d1b632bd97bd6d4f40fefed5ed1290a8f433abaa686fea"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"arrow-buffer",
"arrow-data",
"arrow-schema",
"chrono",
"chrono-tz",
- "half 2.3.1",
+ "half 2.4.1",
"hashbrown 0.14.3",
"num",
]
@@ -345,7 +345,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d0a2432f0cba5692bf4cb757469c66791394bac9ec7ce63c1afe74744c37b27"
dependencies = [
"bytes",
- "half 2.3.1",
+ "half 2.4.1",
"num",
]
@@ -364,7 +364,7 @@ dependencies = [
"base64 0.22.0",
"chrono",
"comfy-table",
- "half 2.3.1",
+ "half 2.4.1",
"lexical-core",
"num",
"ryu",
@@ -397,7 +397,7 @@ checksum = "2742ac1f6650696ab08c88f6dd3f0eb68ce10f8c253958a18c943a68cd04aec5"
dependencies = [
"arrow-buffer",
"arrow-schema",
- "half 2.3.1",
+ "half 2.4.1",
"num",
]
@@ -416,8 +416,8 @@ dependencies = [
"bytes",
"futures",
"paste",
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.4",
+ "prost-types 0.12.4",
"tokio",
"tonic 0.11.0",
]
@@ -434,7 +434,7 @@ dependencies = [
"arrow-data",
"arrow-schema",
"flatbuffers",
- "lz4_flex 0.11.2",
+ "lz4_flex 0.11.3",
]
[[package]]
@@ -449,7 +449,7 @@ dependencies = [
"arrow-data",
"arrow-schema",
"chrono",
- "half 2.3.1",
+ "half 2.4.1",
"indexmap 2.2.6",
"lexical-core",
"num",
@@ -468,7 +468,7 @@ dependencies = [
"arrow-data",
"arrow-schema",
"arrow-select",
- "half 2.3.1",
+ "half 2.4.1",
"num",
]
@@ -478,12 +478,12 @@ version = "51.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "848ee52bb92eb459b811fb471175ea3afcf620157674c8794f539838920f9228"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"arrow-array",
"arrow-buffer",
"arrow-data",
"arrow-schema",
- "half 2.3.1",
+ "half 2.4.1",
"hashbrown 0.14.3",
]
@@ -493,7 +493,7 @@ version = "51.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02d9483aaabe910c4781153ae1b6ae0393f72d9ef757d38d09d450070cf2e528"
dependencies = [
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"serde",
]
@@ -503,7 +503,7 @@ version = "51.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "849524fa70e0e3c5ab58394c770cb8f514d0122d20de08475f7b472ed8075830"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"arrow-array",
"arrow-buffer",
"arrow-data",
@@ -525,7 +525,7 @@ dependencies = [
"memchr",
"num",
"regex",
- "regex-syntax 0.8.2",
+ "regex-syntax 0.8.3",
]
[[package]]
@@ -550,7 +550,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35"
dependencies = [
"concurrent-queue",
- "event-listener",
+ "event-listener 2.5.3",
"futures-core",
]
@@ -574,11 +574,11 @@ dependencies = [
[[package]]
name = "async-compression"
-version = "0.4.5"
+version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc2d0cfb2a7388d34f590e76686704c494ed7aaceed62ee1ba35cbf363abc2a5"
+checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60"
dependencies = [
- "brotli",
+ "brotli 4.0.0",
"bzip2",
"flate2",
"futures-core",
@@ -587,28 +587,30 @@ dependencies = [
"pin-project-lite",
"tokio",
"xz2",
- "zstd 0.13.0",
- "zstd-safe 7.0.0",
+ "zstd 0.13.1",
+ "zstd-safe 7.1.0",
]
[[package]]
name = "async-lock"
-version = "2.8.0"
+version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b"
+checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b"
dependencies = [
- "event-listener",
+ "event-listener 4.0.3",
+ "event-listener-strategy",
+ "pin-project-lite",
]
[[package]]
name = "async-recursion"
-version = "1.0.5"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0"
+checksum = "30c5ef0ede93efbf733c1a727f3b6b5a1060bbedd5600183e66f6e4be4af0ec5"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -630,18 +632,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
name = "async-trait"
-version = "0.1.75"
+version = "0.1.80"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fdf6721fb0140e4f897002dd086c06f6c27775df19cfe1fccb21181a48fd2c98"
+checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -713,14 +715,13 @@ dependencies = [
[[package]]
name = "auto_impl"
-version = "1.1.0"
+version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89"
+checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42"
dependencies = [
- "proc-macro-error",
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.60",
]
[[package]]
@@ -731,15 +732,15 @@ checksum = "7460f7dd8e100147b82a63afca1a20eb6c231ee36b90ba7272e14951cb58af59"
[[package]]
name = "autocfg"
-version = "1.1.0"
+version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80"
[[package]]
name = "autotools"
-version = "0.2.6"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aef8da1805e028a172334c3b680f93e71126f2327622faef2ec3d893c0a4ad77"
+checksum = "ef941527c41b0fc0dd48511a8154cd5fc7e29200a0ff8b7203c5d777dbc795cf"
dependencies = [
"cc",
]
@@ -803,16 +804,16 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
name = "backon"
-version = "0.4.1"
+version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c1a6197b2120bb2185a267f6515038558b019e92b832bb0320e96d66268dcf9"
+checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0"
dependencies = [
- "fastrand 1.9.0",
+ "fastrand",
"futures-core",
"pin-project",
"tokio",
@@ -820,9 +821,9 @@ dependencies = [
[[package]]
name = "backtrace"
-version = "0.3.69"
+version = "0.3.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
+checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d"
dependencies = [
"addr2line",
"cc",
@@ -841,9 +842,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
[[package]]
name = "base64"
-version = "0.21.5"
+version = "0.21.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9"
+checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "base64"
@@ -859,9 +860,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
[[package]]
name = "bcder"
-version = "0.7.3"
+version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf16bec990f8ea25cab661199904ef452fcf11f565c404ce6cffbdf3f8cbbc47"
+checksum = "c627747a6774aab38beb35990d88309481378558875a41da1a4b2e373c906ef0"
dependencies = [
"bytes",
"smallvec",
@@ -874,7 +875,7 @@ dependencies = [
"api",
"arrow",
"chrono",
- "clap 4.4.11",
+ "clap 4.5.4",
"client",
"common-base",
"common-telemetry",
@@ -897,15 +898,15 @@ dependencies = [
"serde",
"store-api",
"tokio",
- "toml 0.8.8",
+ "toml 0.8.12",
"uuid",
]
[[package]]
name = "bigdecimal"
-version = "0.4.2"
+version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c06619be423ea5bb86c95f087d5707942791a08a85530df0db2209a3ecfb8bc9"
+checksum = "9324c8014cd04590682b34f1e9448d38f0674d0f7b2dc553331016ef0e4e9ebc"
dependencies = [
"autocfg",
"libm",
@@ -925,22 +926,22 @@ dependencies = [
[[package]]
name = "bindgen"
-version = "0.69.1"
+version = "0.69.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ffcebc3849946a7170a05992aac39da343a90676ab392c51a4280981d6379c2"
+checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0"
dependencies = [
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"cexpr",
"clang-sys",
+ "itertools 0.12.1",
"lazy_static",
"lazycell",
- "peeking_take_while",
"proc-macro2",
"quote",
"regex",
"rustc-hash",
"shlex",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -966,9 +967,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
-version = "2.4.1"
+version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
+checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
[[package]]
name = "bitvec"
@@ -993,9 +994,9 @@ dependencies = [
[[package]]
name = "blake3"
-version = "1.5.0"
+version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87"
+checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52"
dependencies = [
"arrayref",
"arrayvec",
@@ -1024,9 +1025,9 @@ dependencies = [
[[package]]
name = "borsh"
-version = "1.3.0"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028"
+checksum = "0901fc8eb0aca4c83be0106d6f2db17d86a08dfc2c25f0e84464bf381158add6"
dependencies = [
"borsh-derive",
"cfg_aliases",
@@ -1034,27 +1035,38 @@ dependencies = [
[[package]]
name = "borsh-derive"
-version = "1.3.0"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0"
+checksum = "51670c3aa053938b0ee3bd67c3817e471e626151131b934038e83c5bf8de48f5"
dependencies = [
"once_cell",
- "proc-macro-crate 2.0.0",
+ "proc-macro-crate 3.1.0",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
"syn_derive",
]
[[package]]
name = "brotli"
-version = "3.4.0"
+version = "3.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "516074a47ef4bce09577a3b379392300159ce5b1ba2e501ff1c819950066100f"
+checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391"
dependencies = [
"alloc-no-stdlib",
"alloc-stdlib",
- "brotli-decompressor",
+ "brotli-decompressor 2.5.1",
+]
+
+[[package]]
+name = "brotli"
+version = "4.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "125740193d7fee5cc63ab9e16c2fdc4e07c74ba755cc53b327d6ea029e9fc569"
+dependencies = [
+ "alloc-no-stdlib",
+ "alloc-stdlib",
+ "brotli-decompressor 3.0.0",
]
[[package]]
@@ -1067,6 +1079,16 @@ dependencies = [
"alloc-stdlib",
]
+[[package]]
+name = "brotli-decompressor"
+version = "3.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "65622a320492e09b5e0ac436b14c54ff68199bac392d0e89a6832c4518eea525"
+dependencies = [
+ "alloc-no-stdlib",
+ "alloc-stdlib",
+]
+
[[package]]
name = "bstr"
version = "0.2.17"
@@ -1100,15 +1122,15 @@ dependencies = [
[[package]]
name = "bumpalo"
-version = "3.14.0"
+version = "3.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytecheck"
-version = "0.6.11"
+version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627"
+checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2"
dependencies = [
"bytecheck_derive",
"ptr_meta",
@@ -1117,26 +1139,20 @@ dependencies = [
[[package]]
name = "bytecheck_derive"
-version = "0.6.11"
+version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61"
+checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
-[[package]]
-name = "bytecount"
-version = "0.6.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205"
-
[[package]]
name = "bytemuck"
-version = "1.14.0"
+version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6"
+checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15"
[[package]]
name = "byteorder"
@@ -1146,9 +1162,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
-version = "1.5.0"
+version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223"
+checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9"
dependencies = [
"serde",
]
@@ -1196,40 +1212,9 @@ dependencies = [
[[package]]
name = "cactus"
-version = "1.0.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf034765b7d19a011c6d619e880582bf95e8186b580e6fab56589872dd87dcf5"
-
-[[package]]
-name = "camino"
-version = "1.1.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "cargo-platform"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e34637b3140142bdf929fb439e8aa4ebad7651ebf7b1080b3930aa16ac1459ff"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "cargo_metadata"
-version = "0.14.2"
+version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa"
-dependencies = [
- "camino",
- "cargo-platform",
- "semver",
- "serde",
- "serde_json",
-]
+checksum = "acbc26382d871df4b7442e3df10a9402bf3cf5e55cbd66f12be38861425f0564"
[[package]]
name = "caseless"
@@ -1302,9 +1287,9 @@ dependencies = [
[[package]]
name = "cc"
-version = "1.0.83"
+version = "1.0.94"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
+checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7"
dependencies = [
"jobserver",
"libc",
@@ -1365,9 +1350,9 @@ dependencies = [
[[package]]
name = "chrono"
-version = "0.4.35"
+version = "0.4.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a"
+checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401"
dependencies = [
"android-tzdata",
"iana-time-zone",
@@ -1375,14 +1360,14 @@ dependencies = [
"num-traits",
"serde",
"wasm-bindgen",
- "windows-targets 0.52.0",
+ "windows-targets 0.52.5",
]
[[package]]
name = "chrono-tz"
-version = "0.8.4"
+version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e23185c0e21df6ed832a12e2bda87c7d1def6842881fb634a8511ced741b0d76"
+checksum = "d59ae0466b83e838b81a54256c39d5d7c20b9d7daa10510a242d9b75abd5936e"
dependencies = [
"chrono",
"chrono-tz-build",
@@ -1408,9 +1393,9 @@ checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901"
[[package]]
name = "ciborium"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926"
+checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
@@ -1419,18 +1404,18 @@ dependencies = [
[[package]]
name = "ciborium-io"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656"
+checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b"
+checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
- "half 1.8.2",
+ "half 2.4.1",
]
[[package]]
@@ -1445,13 +1430,13 @@ dependencies = [
[[package]]
name = "clang-sys"
-version = "1.6.1"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f"
+checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1"
dependencies = [
"glob",
"libc",
- "libloading",
+ "libloading 0.8.3",
]
[[package]]
@@ -1478,14 +1463,14 @@ dependencies = [
"bitflags 1.3.2",
"clap_lex 0.2.4",
"indexmap 1.9.3",
- "textwrap 0.16.0",
+ "textwrap 0.16.1",
]
[[package]]
name = "clap"
-version = "4.4.11"
+version = "4.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2"
+checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0"
dependencies = [
"clap_builder",
"clap_derive",
@@ -1493,26 +1478,26 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.4.11"
+version = "4.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb"
+checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4"
dependencies = [
"anstream",
"anstyle",
- "clap_lex 0.6.0",
- "strsim 0.10.0",
+ "clap_lex 0.7.0",
+ "strsim 0.11.1",
]
[[package]]
name = "clap_derive"
-version = "4.4.7"
+version = "4.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442"
+checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
dependencies = [
- "heck 0.4.1",
+ "heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -1526,9 +1511,9 @@ dependencies = [
[[package]]
name = "clap_lex"
-version = "0.6.0"
+version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1"
+checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
[[package]]
name = "client"
@@ -1556,7 +1541,7 @@ dependencies = [
"moka",
"parking_lot 0.12.1",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.4",
"rand",
"serde_json",
"snafu",
@@ -1597,7 +1582,7 @@ dependencies = [
"auth",
"catalog",
"chrono",
- "clap 4.4.11",
+ "clap 4.5.4",
"client",
"common-base",
"common-catalog",
@@ -1630,7 +1615,7 @@ dependencies = [
"nu-ansi-term",
"plugins",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.4",
"query",
"rand",
"regex",
@@ -1648,7 +1633,7 @@ dependencies = [
"tempfile",
"tikv-jemallocator",
"tokio",
- "toml 0.8.8",
+ "toml 0.8.12",
]
[[package]]
@@ -1659,9 +1644,9 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "combine"
-version = "4.6.6"
+version = "4.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4"
+checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd"
dependencies = [
"bytes",
"memchr",
@@ -1669,12 +1654,12 @@ dependencies = [
[[package]]
name = "comfy-table"
-version = "7.1.0"
+version = "7.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7c64043d6c7b7a4c58e39e7efccfdea7b93d885a795d0c054a69dbbf4dd52686"
+checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7"
dependencies = [
- "strum 0.25.0",
- "strum_macros 0.25.3",
+ "strum 0.26.2",
+ "strum_macros 0.26.2",
"unicode-width",
]
@@ -1696,7 +1681,7 @@ dependencies = [
"paste",
"serde",
"snafu",
- "toml 0.8.8",
+ "toml 0.8.12",
]
[[package]]
@@ -1841,7 +1826,7 @@ dependencies = [
"datatypes",
"flatbuffers",
"lazy_static",
- "prost 0.12.3",
+ "prost 0.12.4",
"rand",
"snafu",
"tokio",
@@ -1878,7 +1863,7 @@ dependencies = [
"snafu",
"static_assertions",
"syn 1.0.109",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -1901,7 +1886,7 @@ dependencies = [
"api",
"async-recursion",
"async-trait",
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"chrono",
"common-base",
@@ -1926,7 +1911,7 @@ dependencies = [
"itertools 0.10.5",
"lazy_static",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.4",
"rand",
"regex",
"rskafka",
@@ -2056,7 +2041,7 @@ dependencies = [
"opentelemetry 0.21.0",
"opentelemetry-otlp",
"opentelemetry-semantic-conventions",
- "opentelemetry_sdk 0.21.1",
+ "opentelemetry_sdk 0.21.2",
"parking_lot 0.12.1",
"prometheus",
"serde",
@@ -2122,7 +2107,7 @@ dependencies = [
"serde_with",
"snafu",
"tokio",
- "toml 0.8.8",
+ "toml 0.8.12",
]
[[package]]
@@ -2155,15 +2140,15 @@ dependencies = [
[[package]]
name = "console"
-version = "0.15.7"
+version = "0.15.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8"
+checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb"
dependencies = [
"encode_unicode",
"lazy_static",
"libc",
"unicode-width",
- "windows-sys 0.45.0",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -2216,9 +2201,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
[[package]]
name = "const-random"
-version = "0.1.17"
+version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a"
+checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359"
dependencies = [
"const-random-macro",
]
@@ -2267,18 +2252,18 @@ dependencies = [
[[package]]
name = "cpufeatures"
-version = "0.2.11"
+version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0"
+checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504"
dependencies = [
"libc",
]
[[package]]
name = "crc"
-version = "3.0.1"
+version = "3.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe"
+checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636"
dependencies = [
"crc-catalog",
]
@@ -2291,18 +2276,18 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
[[package]]
name = "crc32c"
-version = "0.6.4"
+version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8f48d60e5b4d2c53d5c2b1d8a58c849a70ae5e5509b08a48d047e3b65714a74"
+checksum = "89254598aa9b9fa608de44b3ae54c810f0f06d755e24c50177f1f8f31ff50ce2"
dependencies = [
"rustc_version",
]
[[package]]
name = "crc32fast"
-version = "1.3.2"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
+checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
dependencies = [
"cfg-if 1.0.0",
]
@@ -2347,11 +2332,10 @@ dependencies = [
[[package]]
name = "crossbeam"
-version = "0.8.3"
+version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6eb9105919ca8e40d437fc9cbb8f1975d916f1bd28afe795a48aae32a2cc8920"
+checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
dependencies = [
- "cfg-if 1.0.0",
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-epoch",
@@ -2361,54 +2345,46 @@ dependencies = [
[[package]]
name = "crossbeam-channel"
-version = "0.5.10"
+version = "0.5.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "82a9b73a36529d9c47029b9fb3a6f0ea3cc916a261195352ba19e770fc1748b2"
+checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95"
dependencies = [
- "cfg-if 1.0.0",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
-version = "0.8.4"
+version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751"
+checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
dependencies = [
- "cfg-if 1.0.0",
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
-version = "0.9.17"
+version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e3681d554572a651dda4186cd47240627c3d0114d45a95f6ad27f2f22e7548d"
+checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
- "autocfg",
- "cfg-if 1.0.0",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-queue"
-version = "0.3.10"
+version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "adc6598521bb5a83d491e8c1fe51db7296019d2ca3cb93cc6c2a20369a4d78a2"
+checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"
dependencies = [
- "cfg-if 1.0.0",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
-version = "0.8.18"
+version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c"
-dependencies = [
- "cfg-if 1.0.0",
-]
+checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
[[package]]
name = "crunchy"
@@ -2469,12 +2445,12 @@ dependencies = [
[[package]]
name = "darling"
-version = "0.20.3"
+version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e"
+checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391"
dependencies = [
- "darling_core 0.20.3",
- "darling_macro 0.20.3",
+ "darling_core 0.20.8",
+ "darling_macro 0.20.8",
]
[[package]]
@@ -2493,16 +2469,16 @@ dependencies = [
[[package]]
name = "darling_core"
-version = "0.20.3"
+version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621"
+checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim 0.10.0",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -2518,13 +2494,13 @@ dependencies = [
[[package]]
name = "darling_macro"
-version = "0.20.3"
+version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5"
+checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
dependencies = [
- "darling_core 0.20.3",
+ "darling_core 0.20.8",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -2551,12 +2527,12 @@ name = "datafusion"
version = "37.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"arrow",
"arrow-array",
"arrow-ipc",
"arrow-schema",
- "async-compression 0.4.5",
+ "async-compression 0.4.8",
"async-trait",
"bytes",
"bzip2",
@@ -2576,10 +2552,10 @@ dependencies = [
"flate2",
"futures",
"glob",
- "half 2.3.1",
+ "half 2.4.1",
"hashbrown 0.14.3",
"indexmap 2.2.6",
- "itertools 0.12.0",
+ "itertools 0.12.1",
"log",
"num_cpus",
"object_store",
@@ -2594,7 +2570,7 @@ dependencies = [
"url",
"uuid",
"xz2",
- "zstd 0.13.0",
+ "zstd 0.13.1",
]
[[package]]
@@ -2602,13 +2578,13 @@ name = "datafusion-common"
version = "37.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"arrow",
"arrow-array",
"arrow-buffer",
"arrow-schema",
"chrono",
- "half 2.3.1",
+ "half 2.4.1",
"instant",
"libc",
"num_cpus",
@@ -2650,7 +2626,7 @@ name = "datafusion-expr"
version = "37.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"arrow",
"arrow-array",
"chrono",
@@ -2678,7 +2654,7 @@ dependencies = [
"datafusion-physical-expr",
"hashbrown 0.14.3",
"hex",
- "itertools 0.12.0",
+ "itertools 0.12.1",
"log",
"md-5",
"rand",
@@ -2716,7 +2692,7 @@ dependencies = [
"datafusion-execution",
"datafusion-expr",
"datafusion-functions",
- "itertools 0.12.0",
+ "itertools 0.12.1",
"log",
"paste",
]
@@ -2733,9 +2709,9 @@ dependencies = [
"datafusion-expr",
"datafusion-physical-expr",
"hashbrown 0.14.3",
- "itertools 0.12.0",
+ "itertools 0.12.1",
"log",
- "regex-syntax 0.8.2",
+ "regex-syntax 0.8.3",
]
[[package]]
@@ -2743,7 +2719,7 @@ name = "datafusion-physical-expr"
version = "37.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"arrow",
"arrow-array",
"arrow-buffer",
@@ -2757,11 +2733,11 @@ dependencies = [
"datafusion-expr",
"datafusion-functions-aggregate",
"datafusion-physical-expr-common",
- "half 2.3.1",
+ "half 2.4.1",
"hashbrown 0.14.3",
"hex",
"indexmap 2.2.6",
- "itertools 0.12.0",
+ "itertools 0.12.1",
"log",
"paste",
"petgraph",
@@ -2783,7 +2759,7 @@ name = "datafusion-physical-plan"
version = "37.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?rev=34eda15b73a9e278af8844b30ed2f1c21c10359c#34eda15b73a9e278af8844b30ed2f1c21c10359c"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"arrow",
"arrow-array",
"arrow-buffer",
@@ -2799,10 +2775,10 @@ dependencies = [
"datafusion-physical-expr",
"datafusion-physical-expr-common",
"futures",
- "half 2.3.1",
+ "half 2.4.1",
"hashbrown 0.14.3",
"indexmap 2.2.6",
- "itertools 0.12.0",
+ "itertools 0.12.1",
"log",
"once_cell",
"parking_lot 0.12.1",
@@ -2834,10 +2810,10 @@ dependencies = [
"async-recursion",
"chrono",
"datafusion",
- "itertools 0.12.0",
+ "itertools 0.12.1",
"object_store",
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.4",
+ "prost-types 0.12.4",
"substrait 0.30.0",
]
@@ -2882,7 +2858,7 @@ dependencies = [
"mito2",
"object-store",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.4",
"query",
"reqwest",
"secrecy",
@@ -2894,7 +2870,7 @@ dependencies = [
"substrait 0.7.2",
"table",
"tokio",
- "toml 0.8.8",
+ "toml 0.8.12",
"tonic 0.11.0",
]
@@ -2944,9 +2920,9 @@ dependencies = [
[[package]]
name = "der"
-version = "0.7.8"
+version = "0.7.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c"
+checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0"
dependencies = [
"const-oid 0.9.6",
"pem-rfc7468 0.7.0",
@@ -2955,9 +2931,9 @@ dependencies = [
[[package]]
name = "deranged"
-version = "0.3.10"
+version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc"
+checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
dependencies = [
"powerfmt",
"serde",
@@ -2982,7 +2958,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -2993,7 +2969,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -3191,15 +3167,15 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1"
[[package]]
name = "dyn-clone"
-version = "1.0.16"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d"
+checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
[[package]]
name = "either"
-version = "1.9.0"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
+checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2"
[[package]]
name = "ena"
@@ -3218,9 +3194,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
[[package]]
name = "encoding_rs"
-version = "0.8.33"
+version = "0.8.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1"
+checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59"
dependencies = [
"cfg-if 1.0.0",
]
@@ -3233,34 +3209,34 @@ checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d"
[[package]]
name = "enum-iterator"
-version = "1.4.1"
+version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7add3873b5dd076766ee79c8e406ad1a472c385476b9e38849f8eec24f1be689"
+checksum = "9fd242f399be1da0a5354aa462d57b4ab2b4ee0683cc552f7c007d2d12d36e94"
dependencies = [
"enum-iterator-derive",
]
[[package]]
name = "enum-iterator-derive"
-version = "1.2.1"
+version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb"
+checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
name = "enum_dispatch"
-version = "0.3.12"
+version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f33313078bb8d4d05a2733a94ac4c2d8a0df9a2b84424ebf4f33bfc224a890e"
+checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd"
dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -3271,9 +3247,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "erased-serde"
-version = "0.4.1"
+version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4adbf0983fe06bd3a5c19c8477a637c2389feb0994eca7a59e3b961054aa7c0a"
+checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3"
dependencies = [
"serde",
]
@@ -3288,15 +3264,6 @@ dependencies = [
"windows-sys 0.52.0",
]
-[[package]]
-name = "error-chain"
-version = "0.12.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc"
-dependencies = [
- "version_check",
-]
-
[[package]]
name = "error-code"
version = "2.3.1"
@@ -3313,7 +3280,7 @@ version = "0.12.4"
source = "git+https://github.com/MichaelScofield/etcd-client.git?rev=4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b#4c371e9b3ea8e0a8ee2f9cbd7ded26e54a45df3b"
dependencies = [
"http",
- "prost 0.12.3",
+ "prost 0.12.4",
"tokio",
"tokio-stream",
"tonic 0.11.0",
@@ -3328,6 +3295,38 @@ version = "2.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
+[[package]]
+name = "event-listener"
+version = "4.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e"
+dependencies = [
+ "concurrent-queue",
+ "parking",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "event-listener"
+version = "5.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24"
+dependencies = [
+ "concurrent-queue",
+ "parking",
+ "pin-project-lite",
+]
+
+[[package]]
+name = "event-listener-strategy"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3"
+dependencies = [
+ "event-listener 4.0.3",
+ "pin-project-lite",
+]
+
[[package]]
name = "exitcode"
version = "1.1.2"
@@ -3359,18 +3358,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
[[package]]
name = "fastrand"
-version = "1.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be"
-dependencies = [
- "instant",
-]
-
-[[package]]
-name = "fastrand"
-version = "2.0.1"
+version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
+checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984"
[[package]]
name = "fd-lock"
@@ -3379,7 +3369,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5"
dependencies = [
"cfg-if 1.0.0",
- "rustix 0.38.28",
+ "rustix 0.38.32",
"windows-sys 0.48.0",
]
@@ -3451,9 +3441,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flagset"
-version = "0.4.4"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d52a7e408202050813e6f1d9addadcaafef3dca7530c7ddfb005d4081cce6779"
+checksum = "cdeb3aa5e95cf9aabc17f060cfa0ced7b83f042390760ca53bf09df9968acaa1"
[[package]]
name = "flatbuffers"
@@ -3505,7 +3495,7 @@ dependencies = [
"hydroflow",
"itertools 0.10.5",
"num-traits",
- "prost 0.12.3",
+ "prost 0.12.4",
"query",
"serde",
"serde_json",
@@ -3585,7 +3575,7 @@ dependencies = [
"operator",
"partition",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.4",
"query",
"raft-engine",
"script",
@@ -3599,7 +3589,7 @@ dependencies = [
"strfmt",
"table",
"tokio",
- "toml 0.8.8",
+ "toml 0.8.12",
"tonic 0.11.0",
"tower",
"uuid",
@@ -3630,7 +3620,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e"
dependencies = [
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -3642,7 +3632,7 @@ dependencies = [
"frunk_core",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -3654,7 +3644,7 @@ dependencies = [
"frunk_core",
"frunk_proc_macro_helpers",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -3755,7 +3745,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -3772,9 +3762,9 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
[[package]]
name = "futures-timer"
-version = "3.0.2"
+version = "3.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c"
+checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24"
[[package]]
name = "futures-util"
@@ -3825,9 +3815,9 @@ dependencies = [
[[package]]
name = "getrandom"
-version = "0.2.11"
+version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f"
+checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c"
dependencies = [
"cfg-if 1.0.0",
"js-sys",
@@ -3860,7 +3850,7 @@ version = "0.18.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "232e6a7bfe35766bf715e55a88b39a700596c0ccfd88cd3680b4cdb40d66ef70"
dependencies = [
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"libc",
"libgit2-sys",
"log",
@@ -3878,7 +3868,7 @@ name = "greptime-proto"
version = "0.1.0"
source = "git+https://github.com/MichaelScofield/greptime-proto.git?rev=bdbd4cfa871ec8d192d3dbabf11debcb2cb67748#bdbd4cfa871ec8d192d3dbabf11debcb2cb67748"
dependencies = [
- "prost 0.12.3",
+ "prost 0.12.4",
"serde",
"serde_json",
"strum 0.25.0",
@@ -3889,9 +3879,9 @@ dependencies = [
[[package]]
name = "h2"
-version = "0.3.24"
+version = "0.3.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9"
+checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8"
dependencies = [
"bytes",
"fnv",
@@ -3908,15 +3898,15 @@ dependencies = [
[[package]]
name = "half"
-version = "1.8.2"
+version = "1.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
+checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403"
[[package]]
name = "half"
-version = "2.3.1"
+version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872"
+checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888"
dependencies = [
"cfg-if 1.0.0",
"crunchy",
@@ -3929,7 +3919,7 @@ version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
- "ahash 0.7.7",
+ "ahash 0.7.8",
]
[[package]]
@@ -3938,7 +3928,7 @@ version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
]
[[package]]
@@ -3947,7 +3937,7 @@ version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"allocator-api2",
]
@@ -3966,7 +3956,7 @@ version = "7.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"byteorder",
"flate2",
"nom",
@@ -3979,7 +3969,7 @@ version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"headers-core",
"http",
@@ -4023,9 +4013,9 @@ dependencies = [
[[package]]
name = "hermit-abi"
-version = "0.3.3"
+version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7"
+checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
[[package]]
name = "hex"
@@ -4128,9 +4118,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "human-panic"
-version = "1.2.2"
+version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a79a67745be0cb8dd2771f03b24c2f25df98d5471fe7a595d668cfa2e6f843d"
+checksum = "c4f016c89920bbb30951a8405ecacbb4540db5524313b9445736e7e1855cf370"
dependencies = [
"anstream",
"anstyle",
@@ -4138,7 +4128,7 @@ dependencies = [
"os_info",
"serde",
"serde_derive",
- "toml 0.8.8",
+ "toml 0.8.12",
"uuid",
]
@@ -4199,7 +4189,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -4214,7 +4204,7 @@ dependencies = [
"rust-sitter",
"rust-sitter-tool",
"slotmap",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -4223,17 +4213,17 @@ version = "0.6.0"
source = "git+https://github.com/GreptimeTeam/hydroflow.git?rev=ba2df44efd42b7c4d37ebefbf82e77c6f1d4cb94#ba2df44efd42b7c4d37ebefbf82e77c6f1d4cb94"
dependencies = [
"auto_impl",
- "clap 4.4.11",
+ "clap 4.5.4",
"data-encoding",
"itertools 0.10.5",
- "prettyplease 0.2.15",
+ "prettyplease 0.2.19",
"proc-macro2",
"quote",
"regex",
"serde",
"serde_json",
"slotmap",
- "syn 2.0.55",
+ "syn 2.0.60",
"webbrowser",
]
@@ -4247,7 +4237,7 @@ dependencies = [
"proc-macro-crate 1.3.1",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -4267,7 +4257,7 @@ dependencies = [
"httpdate",
"itoa",
"pin-project-lite",
- "socket2 0.5.5",
+ "socket2 0.5.6",
"tokio",
"tower-service",
"tracing",
@@ -4302,16 +4292,16 @@ dependencies = [
[[package]]
name = "iana-time-zone"
-version = "0.1.58"
+version = "0.1.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20"
+checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141"
dependencies = [
"android_system_properties",
"core-foundation-sys",
"iana-time-zone-haiku",
"js-sys",
"wasm-bindgen",
- "windows-core 0.51.1",
+ "windows-core",
]
[[package]]
@@ -4381,10 +4371,10 @@ dependencies = [
"greptime-proto",
"mockall",
"pin-project",
- "prost 0.12.3",
+ "prost 0.12.4",
"rand",
"regex",
- "regex-automata 0.4.3",
+ "regex-automata 0.4.6",
"snafu",
"tempfile",
"tokio",
@@ -4415,9 +4405,9 @@ dependencies = [
[[package]]
name = "indicatif"
-version = "0.17.7"
+version = "0.17.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25"
+checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3"
dependencies = [
"console",
"instant",
@@ -4438,7 +4428,7 @@ version = "0.11.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"indexmap 2.2.6",
"is-terminal",
"itoa",
@@ -4517,9 +4507,9 @@ checksum = "924df4f0e24e2e7f9cdd90babb0b96f93b20f3ecfa949ea9e6613756b8c8e1bf"
[[package]]
name = "inventory"
-version = "0.3.14"
+version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8573b2b1fb643a372c73b23f4da5f888677feef3305146d68a539250a9bccc7"
+checksum = "f958d3d68f4167080a18141e10381e7634563984a537f2a49a30fd8e53ac5767"
[[package]]
name = "io-lifetimes"
@@ -4527,7 +4517,7 @@ version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
dependencies = [
- "hermit-abi 0.3.3",
+ "hermit-abi 0.3.9",
"libc",
"windows-sys 0.48.0",
]
@@ -4540,9 +4530,9 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"
[[package]]
name = "iri-string"
-version = "0.7.0"
+version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21859b667d66a4c1dacd9df0863b3efb65785474255face87f5bca39dd8407c0"
+checksum = "7f5f6c2df22c009ac44f6f1499308e7a3ac7ba42cd2378475cc691510e1eef1b"
dependencies = [
"memchr",
"serde",
@@ -4563,13 +4553,13 @@ dependencies = [
[[package]]
name = "is-terminal"
-version = "0.4.9"
+version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
+checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b"
dependencies = [
- "hermit-abi 0.3.3",
- "rustix 0.38.28",
- "windows-sys 0.48.0",
+ "hermit-abi 0.3.9",
+ "libc",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -4583,27 +4573,18 @@ dependencies = [
[[package]]
name = "itertools"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57"
-dependencies = [
- "either",
-]
-
-[[package]]
-name = "itertools"
-version = "0.12.0"
+version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0"
+checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
dependencies = [
"either",
]
[[package]]
name = "itoa"
-version = "1.0.10"
+version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
+checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
[[package]]
name = "jni"
@@ -4629,18 +4610,18 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
[[package]]
name = "jobserver"
-version = "0.1.27"
+version = "0.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d"
+checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2"
dependencies = [
"libc",
]
[[package]]
name = "js-sys"
-version = "0.3.66"
+version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca"
+checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d"
dependencies = [
"wasm-bindgen",
]
@@ -4658,14 +4639,14 @@ dependencies = [
[[package]]
name = "jsonwebtoken"
-version = "9.2.0"
+version = "9.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4"
+checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"js-sys",
"pem",
- "ring 0.17.7",
+ "ring 0.17.8",
"serde",
"serde_json",
"simple_asn1",
@@ -4673,9 +4654,9 @@ dependencies = [
[[package]]
name = "keccak"
-version = "0.1.4"
+version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940"
+checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654"
dependencies = [
"cpufeatures",
]
@@ -4868,6 +4849,16 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "libloading"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19"
+dependencies = [
+ "cfg-if 1.0.0",
+ "windows-targets 0.52.5",
+]
+
[[package]]
name = "libm"
version = "0.2.8"
@@ -4876,13 +4867,12 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
[[package]]
name = "libredox"
-version = "0.0.1"
+version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8"
+checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
dependencies = [
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"libc",
- "redox_syscall 0.4.1",
]
[[package]]
@@ -4898,9 +4888,9 @@ dependencies = [
[[package]]
name = "libz-sys"
-version = "1.1.12"
+version = "1.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b"
+checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9"
dependencies = [
"cc",
"libc",
@@ -4922,9 +4912,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
[[package]]
name = "linux-raw-sys"
-version = "0.4.12"
+version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
+checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
[[package]]
name = "lock_api"
@@ -4938,9 +4928,9 @@ dependencies = [
[[package]]
name = "log"
-version = "0.4.20"
+version = "0.4.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
+checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "log-store"
@@ -5032,9 +5022,9 @@ dependencies = [
[[package]]
name = "lru"
-version = "0.12.1"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7"
+checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc"
dependencies = [
"hashbrown 0.14.3",
]
@@ -5070,9 +5060,9 @@ dependencies = [
[[package]]
name = "lz4_flex"
-version = "0.11.2"
+version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "912b45c753ff5f7f5208307e8ace7d2a2e30d024e26d3509f3dce546c044ce15"
+checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5"
dependencies = [
"twox-hash",
]
@@ -5090,23 +5080,14 @@ dependencies = [
[[package]]
name = "mac_address"
-version = "1.1.5"
+version = "1.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4863ee94f19ed315bf3bc00299338d857d4b5bc856af375cc97d237382ad3856"
+checksum = "5aa12182b93606fff55b70a5cfe6130eaf7407c2ea4f2c2bcc8b113b67c9928f"
dependencies = [
- "nix 0.23.2",
+ "nix 0.28.0",
"winapi",
]
-[[package]]
-name = "mach2"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709"
-dependencies = [
- "libc",
-]
-
[[package]]
name = "malloc_buf"
version = "0.0.6"
@@ -5177,9 +5158,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]]
name = "memchr"
-version = "2.7.1"
+version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
+checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
[[package]]
name = "memcomparable"
@@ -5203,9 +5184,9 @@ dependencies = [
[[package]]
name = "memmap2"
-version = "0.9.3"
+version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45fd3a57831bf88bc63f8cebc0cf956116276e97fef3966103e96416209f7c92"
+checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322"
dependencies = [
"libc",
]
@@ -5230,9 +5211,9 @@ dependencies = [
[[package]]
name = "memoffset"
-version = "0.9.0"
+version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
+checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a"
dependencies = [
"autocfg",
]
@@ -5298,7 +5279,7 @@ dependencies = [
"once_cell",
"parking_lot 0.12.1",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.4",
"rand",
"regex",
"serde",
@@ -5310,7 +5291,7 @@ dependencies = [
"table",
"tokio",
"tokio-stream",
- "toml 0.8.8",
+ "toml 0.8.12",
"tonic 0.11.0",
"tower",
"tracing",
@@ -5344,7 +5325,7 @@ dependencies = [
"api",
"aquamarine",
"async-trait",
- "base64 0.21.5",
+ "base64 0.21.7",
"common-error",
"common-macro",
"common-query",
@@ -5390,9 +5371,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "miniz_oxide"
-version = "0.7.1"
+version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
+checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7"
dependencies = [
"adler",
]
@@ -5452,7 +5433,7 @@ dependencies = [
"paste",
"pin-project",
"prometheus",
- "prost 0.12.3",
+ "prost 0.12.4",
"puffin",
"rand",
"regex",
@@ -5467,7 +5448,7 @@ dependencies = [
"tokio",
"tokio-stream",
"tokio-util",
- "toml 0.8.8",
+ "toml 0.8.12",
"uuid",
]
@@ -5500,21 +5481,21 @@ dependencies = [
[[package]]
name = "moka"
-version = "0.12.1"
+version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8017ec3548ffe7d4cef7ac0e12b044c01164a74c0f3119420faeaf13490ad8b"
+checksum = "87bfd249f570638bfb0b4f9d258e6b8cddd2a5a7d0ed47e8bb8b176bfc0e7a17"
dependencies = [
"async-lock",
"async-trait",
"crossbeam-channel",
"crossbeam-epoch",
"crossbeam-utils",
+ "event-listener 5.3.0",
"futures-util",
"once_cell",
"parking_lot 0.12.1",
"quanta",
"rustc_version",
- "skeptic",
"smallvec",
"tagptr",
"thiserror",
@@ -5537,6 +5518,12 @@ version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
+[[package]]
+name = "multimap"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03"
+
[[package]]
name = "mur3"
version = "0.1.0"
@@ -5549,32 +5536,32 @@ version = "0.30.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56b0d8a0db9bf6d2213e11f2c701cb91387b0614361625ab7b9743b41aa4938f"
dependencies = [
- "darling 0.20.3",
+ "darling 0.20.8",
"heck 0.4.1",
"num-bigint",
"proc-macro-crate 1.3.1",
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
"termcolor",
"thiserror",
]
[[package]]
name = "mysql-common-derive"
-version = "0.31.0"
+version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c60492b5eb751e55b42d716b6b26dceb66767996cd7a5560a842fbf613ca2e92"
+checksum = "afe0450cc9344afff34915f8328600ab5ae19260802a334d0f72d2d5bdda3bfe"
dependencies = [
- "darling 0.20.3",
+ "darling 0.20.8",
"heck 0.4.1",
"num-bigint",
"proc-macro-crate 3.1.0",
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
"termcolor",
"thiserror",
]
@@ -5605,7 +5592,7 @@ dependencies = [
"rustls-pemfile 1.0.4",
"serde",
"serde_json",
- "socket2 0.5.5",
+ "socket2 0.5.6",
"thiserror",
"tokio",
"tokio-rustls 0.24.1",
@@ -5613,7 +5600,7 @@ dependencies = [
"twox-hash",
"url",
"webpki",
- "webpki-roots 0.25.3",
+ "webpki-roots 0.25.4",
]
[[package]]
@@ -5622,10 +5609,10 @@ version = "0.31.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06f19e4cfa0ab5a76b627cec2d81331c49b034988eaf302c3bafeada684eadef"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"bigdecimal",
"bindgen",
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"bitvec",
"btoi",
"byteorder",
@@ -5657,14 +5644,14 @@ dependencies = [
[[package]]
name = "mysql_common"
-version = "0.32.0"
+version = "0.32.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b73aacd01475af6d2efbdf489efd60fc519515ffe94edfd74236f954d521e31b"
+checksum = "0ccdc1fe2bb3ef97e07ba4397327ed45509a1e2e499e2f8265243879cbc7313c"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"bigdecimal",
"bindgen",
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"bitvec",
"btoi",
"byteorder",
@@ -5676,7 +5663,7 @@ dependencies = [
"flate2",
"frunk",
"lazy_static",
- "mysql-common-derive 0.31.0",
+ "mysql-common-derive 0.31.1",
"num-bigint",
"num-traits",
"rand",
@@ -5692,7 +5679,7 @@ dependencies = [
"thiserror",
"time",
"uuid",
- "zstd 0.13.0",
+ "zstd 0.13.1",
]
[[package]]
@@ -5732,9 +5719,9 @@ checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b"
[[package]]
name = "new_debug_unreachable"
-version = "1.0.4"
+version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54"
+checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086"
[[package]]
name = "nibble_vec"
@@ -5747,42 +5734,42 @@ dependencies = [
[[package]]
name = "nix"
-version = "0.23.2"
+version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c"
+checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4"
dependencies = [
+ "autocfg",
"bitflags 1.3.2",
- "cc",
"cfg-if 1.0.0",
"libc",
"memoffset 0.6.5",
+ "pin-utils",
]
[[package]]
name = "nix"
-version = "0.25.1"
+version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4"
+checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b"
dependencies = [
- "autocfg",
"bitflags 1.3.2",
"cfg-if 1.0.0",
"libc",
- "memoffset 0.6.5",
+ "memoffset 0.7.1",
"pin-utils",
]
[[package]]
name = "nix"
-version = "0.26.4"
+version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b"
+checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
dependencies = [
- "bitflags 1.3.2",
+ "bitflags 2.5.0",
"cfg-if 1.0.0",
+ "cfg_aliases",
"libc",
- "memoffset 0.7.1",
- "pin-utils",
+ "memoffset 0.9.1",
]
[[package]]
@@ -5807,7 +5794,7 @@ version = "6.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d"
dependencies = [
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"crossbeam-channel",
"filetime",
"fsevent-sys",
@@ -5841,9 +5828,9 @@ dependencies = [
[[package]]
name = "num"
-version = "0.4.1"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af"
+checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41"
dependencies = [
"num-bigint",
"num-complex",
@@ -5883,22 +5870,28 @@ dependencies = [
[[package]]
name = "num-complex"
-version = "0.4.4"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214"
+checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6"
dependencies = [
"num-traits",
]
+[[package]]
+name = "num-conv"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
+
[[package]]
name = "num-derive"
-version = "0.4.1"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712"
+checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -5913,19 +5906,18 @@ dependencies = [
[[package]]
name = "num-integer"
-version = "0.1.45"
+version = "0.1.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
+checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
dependencies = [
- "autocfg",
"num-traits",
]
[[package]]
name = "num-iter"
-version = "0.1.43"
+version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"
+checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9"
dependencies = [
"autocfg",
"num-integer",
@@ -5946,9 +5938,9 @@ dependencies = [
[[package]]
name = "num-traits"
-version = "0.2.17"
+version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
+checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a"
dependencies = [
"autocfg",
"libm",
@@ -5960,7 +5952,7 @@ version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
dependencies = [
- "hermit-abi 0.3.3",
+ "hermit-abi 0.3.9",
"libc",
]
@@ -6048,7 +6040,7 @@ dependencies = [
"chrono",
"futures",
"humantime",
- "itertools 0.12.0",
+ "itertools 0.12.1",
"parking_lot 0.12.1",
"percent-encoding",
"snafu",
@@ -6079,7 +6071,7 @@ dependencies = [
"anyhow",
"async-trait",
"backon",
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"chrono",
"flagset",
@@ -6121,7 +6113,7 @@ dependencies = [
"async-trait",
"byteorder",
"chrono",
- "mysql_common 0.32.0",
+ "mysql_common 0.32.2",
"nom",
"pin-project-lite",
"tokio",
@@ -6177,7 +6169,7 @@ dependencies = [
"opentelemetry 0.21.0",
"opentelemetry-proto 0.4.0",
"opentelemetry-semantic-conventions",
- "opentelemetry_sdk 0.21.1",
+ "opentelemetry_sdk 0.21.2",
"prost 0.11.9",
"thiserror",
"tokio",
@@ -6191,7 +6183,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2e155ce5cc812ea3d1dffbd1539aed653de4bf4882d60e6e04dcf0901d674e1"
dependencies = [
"opentelemetry 0.21.0",
- "opentelemetry_sdk 0.21.1",
+ "opentelemetry_sdk 0.21.2",
"prost 0.11.9",
"tonic 0.9.2",
]
@@ -6204,7 +6196,7 @@ checksum = "3a8fddc9b68f5b80dae9d6f510b88e02396f006ad48cac349411fbecc80caae4"
dependencies = [
"opentelemetry 0.22.0",
"opentelemetry_sdk 0.22.1",
- "prost 0.12.3",
+ "prost 0.12.4",
"tonic 0.11.0",
]
@@ -6219,9 +6211,9 @@ dependencies = [
[[package]]
name = "opentelemetry_sdk"
-version = "0.21.1"
+version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "968ba3f2ca03e90e5187f5e4f46c791ef7f2c163ae87789c8ce5f5ca3b7b7de5"
+checksum = "2f16aec8a98a457a52664d69e0091bac3a0abd18ead9b641cb00202ba4e0efe4"
dependencies = [
"async-trait",
"crossbeam-channel",
@@ -6379,9 +6371,9 @@ dependencies = [
[[package]]
name = "ordered-multimap"
-version = "0.7.1"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4d6a8c22fc714f0c2373e6091bf6f5e9b37b1bc0b1184874b7e0a4e303d318f"
+checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79"
dependencies = [
"dlv-list 0.5.2",
"hashbrown 0.14.3",
@@ -6389,13 +6381,13 @@ dependencies = [
[[package]]
name = "os_info"
-version = "3.7.0"
+version = "3.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e"
+checksum = "ae99c7fa6dd38c7cafe1ec085e804f8f555a2f8659b0dbe03f1f9963a9b51092"
dependencies = [
"log",
"serde",
- "winapi",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -6439,6 +6431,12 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "parking"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae"
+
[[package]]
name = "parking_lot"
version = "0.11.2"
@@ -6496,7 +6494,7 @@ version = "51.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "096795d4f47f65fd3ee1ec5a98b77ab26d602f2cc785b0e4be5443add17ecc32"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"arrow-array",
"arrow-buffer",
"arrow-cast",
@@ -6505,14 +6503,14 @@ dependencies = [
"arrow-schema",
"arrow-select",
"base64 0.22.0",
- "brotli",
+ "brotli 3.5.0",
"bytes",
"chrono",
"flate2",
"futures",
- "half 2.3.1",
+ "half 2.4.1",
"hashbrown 0.14.3",
- "lz4_flex 0.11.2",
+ "lz4_flex 0.11.3",
"num",
"num-bigint",
"object_store",
@@ -6522,7 +6520,7 @@ dependencies = [
"thrift",
"tokio",
"twox-hash",
- "zstd 0.13.0",
+ "zstd 0.13.1",
]
[[package]]
@@ -6585,19 +6583,13 @@ dependencies = [
"hmac",
]
-[[package]]
-name = "peeking_take_while"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
-
[[package]]
name = "pem"
-version = "3.0.3"
+version = "3.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310"
+checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae"
dependencies = [
- "base64 0.21.5",
+ "base64 0.22.0",
"serde",
]
@@ -6633,9 +6625,9 @@ checksum = "df202b0b0f5b8e389955afd5f27b007b00fb948162953f1db9c70d2c7e3157d7"
[[package]]
name = "pest"
-version = "2.7.5"
+version = "2.7.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5"
+checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95"
dependencies = [
"memchr",
"thiserror",
@@ -6644,9 +6636,9 @@ dependencies = [
[[package]]
name = "pest_derive"
-version = "2.7.5"
+version = "2.7.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81d78524685f5ef2a3b3bd1cafbc9fcabb036253d9b1463e726a91cd16e2dfc2"
+checksum = "f73541b156d32197eecda1a4014d7f868fd2bcb3c550d5386087cfba442bf69c"
dependencies = [
"pest",
"pest_generator",
@@ -6654,22 +6646,22 @@ dependencies = [
[[package]]
name = "pest_generator"
-version = "2.7.5"
+version = "2.7.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68bd1206e71118b5356dae5ddc61c8b11e28b09ef6a31acbd15ea48a28e0c227"
+checksum = "c35eeed0a3fab112f75165fdc026b3913f4183133f19b49be773ac9ea966e8bd"
dependencies = [
"pest",
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
name = "pest_meta"
-version = "2.7.5"
+version = "2.7.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7c747191d4ad9e4a4ab9c8798f1e82a39affe7ef9648390b7e5548d18e099de6"
+checksum = "2adbf29bb9776f28caece835398781ab24435585fe0d4dc1374a61db5accedca"
dependencies = [
"once_cell",
"pest",
@@ -6703,7 +6695,7 @@ dependencies = [
"md5",
"postgres-types",
"rand",
- "ring 0.17.7",
+ "ring 0.17.8",
"stringprep",
"thiserror",
"time",
@@ -6762,29 +6754,29 @@ dependencies = [
[[package]]
name = "pin-project"
-version = "1.1.3"
+version = "1.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422"
+checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
-version = "1.1.3"
+version = "1.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405"
+checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
name = "pin-project-lite"
-version = "0.2.13"
+version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
+checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
[[package]]
name = "pin-utils"
@@ -6809,7 +6801,7 @@ version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
dependencies = [
- "der 0.7.8",
+ "der 0.7.9",
"pkcs8 0.10.2",
"spki 0.7.3",
]
@@ -6822,7 +6814,7 @@ checksum = "e847e2c91a18bfa887dd028ec33f2fe6f25db77db3619024764914affe8b69a6"
dependencies = [
"aes",
"cbc",
- "der 0.7.8",
+ "der 0.7.9",
"pbkdf2",
"scrypt",
"sha2",
@@ -6846,7 +6838,7 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
dependencies = [
- "der 0.7.8",
+ "der 0.7.9",
"pkcs5",
"rand_core",
"spki 0.7.3",
@@ -6854,9 +6846,9 @@ dependencies = [
[[package]]
name = "pkg-config"
-version = "0.3.28"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a"
+checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec"
[[package]]
name = "plotters"
@@ -6921,7 +6913,7 @@ version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"byteorder",
"bytes",
"fallible-iterator",
@@ -6967,9 +6959,9 @@ dependencies = [
"nix 0.26.4",
"once_cell",
"parking_lot 0.12.1",
- "prost 0.12.3",
- "prost-build 0.12.3",
- "prost-derive 0.12.3",
+ "prost 0.12.4",
+ "prost-build 0.12.4",
+ "prost-derive 0.12.4",
"protobuf",
"sha2",
"smallvec",
@@ -7042,12 +7034,12 @@ dependencies = [
[[package]]
name = "prettyplease"
-version = "0.2.15"
+version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d"
+checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550"
dependencies = [
"proc-macro2",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -7060,22 +7052,13 @@ dependencies = [
"toml_edit 0.19.15",
]
-[[package]]
-name = "proc-macro-crate"
-version = "2.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8"
-dependencies = [
- "toml_edit 0.20.7",
-]
-
[[package]]
name = "proc-macro-crate"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284"
dependencies = [
- "toml_edit 0.21.0",
+ "toml_edit 0.21.1",
]
[[package]]
@@ -7104,9 +7087,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.79"
+version = "1.0.81"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e"
+checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba"
dependencies = [
"unicode-ident",
]
@@ -7157,7 +7140,7 @@ dependencies = [
name = "promql"
version = "0.7.2"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"async-recursion",
"async-trait",
"bytemuck",
@@ -7178,7 +7161,7 @@ dependencies = [
"lazy_static",
"prometheus",
"promql-parser",
- "prost 0.12.3",
+ "prost 0.12.4",
"query",
"session",
"snafu",
@@ -7211,12 +7194,12 @@ dependencies = [
[[package]]
name = "prost"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a"
+checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922"
dependencies = [
"bytes",
- "prost-derive 0.12.3",
+ "prost-derive 0.12.4",
]
[[package]]
@@ -7230,7 +7213,7 @@ dependencies = [
"itertools 0.10.5",
"lazy_static",
"log",
- "multimap",
+ "multimap 0.8.3",
"petgraph",
"prettyplease 0.1.25",
"prost 0.11.9",
@@ -7243,24 +7226,23 @@ dependencies = [
[[package]]
name = "prost-build"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2"
+checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1"
dependencies = [
"bytes",
- "heck 0.4.1",
- "itertools 0.11.0",
+ "heck 0.5.0",
+ "itertools 0.12.1",
"log",
- "multimap",
+ "multimap 0.10.0",
"once_cell",
"petgraph",
- "prettyplease 0.2.15",
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prettyplease 0.2.19",
+ "prost 0.12.4",
+ "prost-types 0.12.4",
"regex",
- "syn 2.0.55",
+ "syn 2.0.60",
"tempfile",
- "which",
]
[[package]]
@@ -7278,15 +7260,15 @@ dependencies = [
[[package]]
name = "prost-derive"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
+checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48"
dependencies = [
"anyhow",
- "itertools 0.11.0",
+ "itertools 0.12.1",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -7300,11 +7282,11 @@ dependencies = [
[[package]]
name = "prost-types"
-version = "0.12.3"
+version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e"
+checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe"
dependencies = [
- "prost 0.12.3",
+ "prost 0.12.4",
]
[[package]]
@@ -7372,7 +7354,7 @@ name = "puffin"
version = "0.7.2"
dependencies = [
"async-trait",
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"common-error",
"common-macro",
"derive_builder 0.12.0",
@@ -7385,17 +7367,6 @@ dependencies = [
"tokio-util",
]
-[[package]]
-name = "pulldown-cmark"
-version = "0.9.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998"
-dependencies = [
- "bitflags 1.3.2",
- "memchr",
- "unicase",
-]
-
[[package]]
name = "puruspe"
version = "0.1.5"
@@ -7420,7 +7391,7 @@ dependencies = [
"cfg-if 1.0.0",
"indoc",
"libc",
- "memoffset 0.9.0",
+ "memoffset 0.9.1",
"parking_lot 0.12.1",
"portable-atomic",
"pyo3-build-config",
@@ -7458,7 +7429,7 @@ dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -7471,18 +7442,17 @@ dependencies = [
"proc-macro2",
"pyo3-build-config",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
name = "quanta"
-version = "0.11.1"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab"
+checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5"
dependencies = [
"crossbeam-utils",
"libc",
- "mach2",
"once_cell",
"raw-cpuid",
"wasi",
@@ -7494,7 +7464,7 @@ dependencies = [
name = "query"
version = "0.7.2"
dependencies = [
- "ahash 0.8.6",
+ "ahash 0.8.11",
"api",
"approx_eq",
"arc-swap",
@@ -7576,9 +7546,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.35"
+version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
+checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
dependencies = [
"proc-macro2",
]
@@ -7676,11 +7646,11 @@ dependencies = [
[[package]]
name = "raw-cpuid"
-version = "10.7.0"
+version = "11.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332"
+checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1"
dependencies = [
- "bitflags 1.3.2",
+ "bitflags 2.5.0",
]
[[package]]
@@ -7697,9 +7667,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3"
[[package]]
name = "rayon"
-version = "1.8.0"
+version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1"
+checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
@@ -7707,9 +7677,9 @@ dependencies = [
[[package]]
name = "rayon-core"
-version = "1.12.0"
+version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed"
+checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
@@ -7735,9 +7705,9 @@ dependencies = [
[[package]]
name = "redox_users"
-version = "0.4.4"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4"
+checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891"
dependencies = [
"getrandom",
"libredox",
@@ -7746,34 +7716,34 @@ dependencies = [
[[package]]
name = "ref-cast"
-version = "1.0.21"
+version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "53313ec9f12686aeeffb43462c3ac77aa25f590a5f630eb2cde0de59417b29c7"
+checksum = "c4846d4c50d1721b1a3bef8af76924eef20d5e723647333798c1b519b3a9473f"
dependencies = [
"ref-cast-impl",
]
[[package]]
name = "ref-cast-impl"
-version = "1.0.21"
+version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2566c4bf6845f2c2e83b27043c3f5dfcd5ba8f2937d6c00dc009bfb51a079dc4"
+checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
name = "regex"
-version = "1.10.2"
+version = "1.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343"
+checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c"
dependencies = [
"aho-corasick",
"memchr",
- "regex-automata 0.4.3",
- "regex-syntax 0.8.2",
+ "regex-automata 0.4.6",
+ "regex-syntax 0.8.3",
]
[[package]]
@@ -7787,13 +7757,13 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.4.3"
+version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f"
+checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
dependencies = [
"aho-corasick",
"memchr",
- "regex-syntax 0.8.2",
+ "regex-syntax 0.8.3",
]
[[package]]
@@ -7804,9 +7774,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
-version = "0.8.2"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
+checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56"
[[package]]
name = "regress"
@@ -7830,9 +7800,9 @@ dependencies = [
[[package]]
name = "rend"
-version = "0.4.1"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2571463863a6bd50c32f94402933f03457a3fbaf697a707c5be741e459f08fd"
+checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c"
dependencies = [
"bytecheck",
]
@@ -7845,7 +7815,7 @@ checksum = "43e319d9de9ff4d941abf4ac718897118b0fe04577ea3f8e0f5788971784eef5"
dependencies = [
"anyhow",
"async-trait",
- "base64 0.21.5",
+ "base64 0.21.7",
"chrono",
"form_urlencoded",
"getrandom",
@@ -7870,11 +7840,11 @@ dependencies = [
[[package]]
name = "reqwest"
-version = "0.11.23"
+version = "0.11.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41"
+checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"encoding_rs",
"futures-core",
@@ -7898,6 +7868,7 @@ dependencies = [
"serde",
"serde_json",
"serde_urlencoded",
+ "sync_wrapper",
"system-configuration",
"tokio",
"tokio-rustls 0.24.1",
@@ -7972,23 +7943,24 @@ dependencies = [
[[package]]
name = "ring"
-version = "0.17.7"
+version = "0.17.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74"
+checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d"
dependencies = [
"cc",
+ "cfg-if 1.0.0",
"getrandom",
"libc",
"spin 0.9.8",
"untrusted 0.9.0",
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
name = "rkyv"
-version = "0.7.43"
+version = "0.7.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "527a97cdfef66f65998b5f3b637c26f5a5ec09cc52a3f9932313ac645f4190f5"
+checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0"
dependencies = [
"bitvec",
"bytecheck",
@@ -8004,9 +7976,9 @@ dependencies = [
[[package]]
name = "rkyv_derive"
-version = "0.7.43"
+version = "0.7.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033"
+checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65"
dependencies = [
"proc-macro2",
"quote",
@@ -8147,7 +8119,7 @@ dependencies = [
"proc-macro2",
"quote",
"rust-embed-utils",
- "syn 2.0.55",
+ "syn 2.0.60",
"walkdir",
]
@@ -8178,7 +8150,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a"
dependencies = [
"cfg-if 1.0.0",
- "ordered-multimap 0.7.1",
+ "ordered-multimap 0.7.3",
]
[[package]]
@@ -8232,9 +8204,9 @@ dependencies = [
[[package]]
name = "rust_decimal"
-version = "1.33.1"
+version = "1.35.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06676aec5ccb8fc1da723cc8c0f9a46549f21ebb8753d3915c6c41db1e7f1dc4"
+checksum = "1790d1c4c0ca81211399e0e0af16333276f375209e71a37b67698a373db5b47a"
dependencies = [
"arrayvec",
"borsh",
@@ -8283,14 +8255,14 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.38.28"
+version = "0.38.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316"
+checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89"
dependencies = [
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"errno",
"libc",
- "linux-raw-sys 0.4.12",
+ "linux-raw-sys 0.4.13",
"windows-sys 0.52.0",
]
@@ -8313,21 +8285,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba"
dependencies = [
"log",
- "ring 0.17.7",
+ "ring 0.17.8",
"rustls-webpki 0.101.7",
"sct",
]
[[package]]
name = "rustls"
-version = "0.22.1"
+version = "0.22.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe6b63262c9fcac8659abfaa96cac103d28166d3ff3eaf8f412e19f3ae9e5a48"
+checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c"
dependencies = [
"log",
- "ring 0.17.7",
+ "ring 0.17.8",
"rustls-pki-types",
- "rustls-webpki 0.102.0",
+ "rustls-webpki 0.102.2",
"subtle",
"zeroize",
]
@@ -8350,24 +8322,24 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
]
[[package]]
name = "rustls-pemfile"
-version = "2.0.0"
+version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4"
+checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d"
dependencies = [
- "base64 0.21.5",
+ "base64 0.22.0",
"rustls-pki-types",
]
[[package]]
name = "rustls-pki-types"
-version = "1.1.0"
+version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a"
+checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247"
[[package]]
name = "rustls-webpki"
@@ -8375,17 +8347,17 @@ version = "0.101.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765"
dependencies = [
- "ring 0.17.7",
+ "ring 0.17.8",
"untrusted 0.9.0",
]
[[package]]
name = "rustls-webpki"
-version = "0.102.0"
+version = "0.102.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89"
+checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610"
dependencies = [
- "ring 0.17.7",
+ "ring 0.17.8",
"rustls-pki-types",
"untrusted 0.9.0",
]
@@ -8405,7 +8377,7 @@ name = "rustpython-codegen"
version = "0.2.0"
source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b5f1a9e5cee5dd7e27023b8591f1e"
dependencies = [
- "ahash 0.7.7",
+ "ahash 0.7.8",
"bitflags 1.3.2",
"indexmap 1.9.3",
"itertools 0.10.5",
@@ -8506,7 +8478,7 @@ name = "rustpython-parser"
version = "0.2.0"
source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b5f1a9e5cee5dd7e27023b8591f1e"
dependencies = [
- "ahash 0.7.7",
+ "ahash 0.7.8",
"anyhow",
"itertools 0.10.5",
"lalrpop",
@@ -8541,7 +8513,7 @@ version = "0.2.0"
source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b5f1a9e5cee5dd7e27023b8591f1e"
dependencies = [
"adler32",
- "ahash 0.7.7",
+ "ahash 0.7.8",
"ascii",
"base64 0.13.1",
"blake2",
@@ -8607,7 +8579,7 @@ name = "rustpython-vm"
version = "0.2.0"
source = "git+https://github.com/discord9/RustPython?rev=9ed5137412#9ed51374125b5f1a9e5cee5dd7e27023b8591f1e"
dependencies = [
- "ahash 0.7.7",
+ "ahash 0.7.8",
"ascii",
"atty",
"bitflags 1.3.2",
@@ -8619,7 +8591,7 @@ dependencies = [
"exitcode",
"getrandom",
"glob",
- "half 1.8.2",
+ "half 1.8.3",
"hex",
"indexmap 1.9.3",
"is-macro",
@@ -8675,9 +8647,9 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.14"
+version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
+checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47"
[[package]]
name = "rustyline"
@@ -8727,9 +8699,9 @@ dependencies = [
[[package]]
name = "ryu"
-version = "1.0.16"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
+checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
[[package]]
name = "safe-lock"
@@ -8757,18 +8729,18 @@ dependencies = [
[[package]]
name = "safe-regex"
-version = "0.2.5"
+version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a15289bf322e0673d52756a18194167f2378ec1a15fe884af6e2d2cb934822b0"
+checksum = "e6ab4bc484ef480a9ce79b381efd7b6767700f514d47bc599036e9d6f7f3c49d"
dependencies = [
"safe-regex-macro",
]
[[package]]
name = "safe-regex-compiler"
-version = "0.2.5"
+version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fba76fae590a2aa665279deb1f57b5098cbace01a0c5e60e262fcf55f7c51542"
+checksum = "6d71f8c78bffb07962595e1bfa5ed11d24dd855eedc50b6a735f5ef648ce621b"
dependencies = [
"safe-proc-macro2",
"safe-quote",
@@ -8776,9 +8748,9 @@ dependencies = [
[[package]]
name = "safe-regex-macro"
-version = "0.2.5"
+version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96c2e96b5c03f158d1b16ba79af515137795f4ad4e8de3f790518aae91f1d127"
+checksum = "0909ab4b77511df24201cd66541d6a028887c77ecc065f277c68a12a663274ef"
dependencies = [
"safe-proc-macro2",
"safe-regex-compiler",
@@ -8819,11 +8791,11 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71"
[[package]]
name = "schannel"
-version = "0.1.22"
+version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88"
+checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534"
dependencies = [
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -8930,7 +8902,7 @@ version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414"
dependencies = [
- "ring 0.17.7",
+ "ring 0.17.8",
"untrusted 0.9.0",
]
@@ -8949,7 +8921,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -8964,9 +8936,9 @@ dependencies = [
[[package]]
name = "security-framework"
-version = "2.9.2"
+version = "2.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de"
+checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6"
dependencies = [
"bitflags 1.3.2",
"core-foundation",
@@ -8977,9 +8949,9 @@ dependencies = [
[[package]]
name = "security-framework-sys"
-version = "2.9.1"
+version = "2.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a"
+checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef"
dependencies = [
"core-foundation-sys",
"libc",
@@ -8990,9 +8962,6 @@ name = "semver"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca"
-dependencies = [
- "serde",
-]
[[package]]
name = "seq-macro"
@@ -9002,22 +8971,22 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4"
[[package]]
name = "serde"
-version = "1.0.197"
+version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
+checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.197"
+version = "1.0.198"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
+checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -9033,9 +9002,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.115"
+version = "1.0.116"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd"
+checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813"
dependencies = [
"indexmap 2.2.6",
"itoa",
@@ -9045,9 +9014,9 @@ dependencies = [
[[package]]
name = "serde_path_to_error"
-version = "0.1.14"
+version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335"
+checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6"
dependencies = [
"itoa",
"serde",
@@ -9055,13 +9024,13 @@ dependencies = [
[[package]]
name = "serde_repr"
-version = "0.1.17"
+version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145"
+checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -9082,7 +9051,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -9099,16 +9068,17 @@ dependencies = [
[[package]]
name = "serde_with"
-version = "3.4.0"
+version = "3.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23"
+checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"chrono",
"hex",
"indexmap 1.9.3",
"indexmap 2.2.6",
"serde",
+ "serde_derive",
"serde_json",
"serde_with_macros",
"time",
@@ -9116,14 +9086,14 @@ dependencies = [
[[package]]
name = "serde_with_macros"
-version = "3.4.0"
+version = "3.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93634eb5f75a2323b16de4748022ac4297f9e76b6dced2be287a099f41b5e788"
+checksum = "6561dc161a9224638a31d876ccdfefbc1df91d3f3a8342eddb35f055d48c7655"
dependencies = [
- "darling 0.20.3",
+ "darling 0.20.8",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -9153,7 +9123,7 @@ dependencies = [
"auth",
"axum",
"axum-macros",
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"catalog",
"chrono",
@@ -9206,14 +9176,14 @@ dependencies = [
"pprof",
"prometheus",
"promql-parser",
- "prost 0.12.3",
+ "prost 0.12.4",
"query",
"rand",
"regex",
"reqwest",
"rust-embed",
- "rustls 0.22.1",
- "rustls-pemfile 2.0.0",
+ "rustls 0.22.3",
+ "rustls-pemfile 2.1.2",
"rustls-pki-types",
"schemars",
"script",
@@ -9239,7 +9209,7 @@ dependencies = [
"tower",
"tower-http",
"urlencoding",
- "zstd 0.13.0",
+ "zstd 0.13.1",
]
[[package]]
@@ -9373,21 +9343,6 @@ version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d"
-[[package]]
-name = "skeptic"
-version = "0.13.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8"
-dependencies = [
- "bytecount",
- "cargo_metadata",
- "error-chain",
- "glob",
- "pulldown-cmark",
- "tempfile",
- "walkdir",
-]
-
[[package]]
name = "slab"
version = "0.4.9"
@@ -9409,15 +9364,15 @@ dependencies = [
[[package]]
name = "smallbitvec"
-version = "2.5.1"
+version = "2.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "75ce4f9dc4a41b4c3476cc925f1efb11b66df373a8fde5d4b8915fa91b5d995e"
+checksum = "fcc3fc564a4b53fd1e8589628efafe57602d91bde78be18186b5f61e8faea470"
[[package]]
name = "smallvec"
-version = "1.11.2"
+version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970"
+checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
dependencies = [
"serde",
]
@@ -9462,12 +9417,12 @@ dependencies = [
[[package]]
name = "socket2"
-version = "0.5.5"
+version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9"
+checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871"
dependencies = [
"libc",
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -9511,7 +9466,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"
dependencies = [
"base64ct",
- "der 0.7.8",
+ "der 0.7.9",
]
[[package]]
@@ -9550,7 +9505,7 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c"
dependencies = [
- "itertools 0.12.0",
+ "itertools 0.12.1",
"nom",
"unicode_categories",
]
@@ -9575,7 +9530,7 @@ name = "sqlness-runner"
version = "0.7.2"
dependencies = [
"async-trait",
- "clap 4.4.11",
+ "clap 4.5.4",
"client",
"common-error",
"common-query",
@@ -9629,7 +9584,7 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -9639,7 +9594,7 @@ source = "git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -9658,7 +9613,7 @@ version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa8241483a83a3f33aa5fff7e7d9def398ff9990b2752b6c6112b83c6d246029"
dependencies = [
- "ahash 0.7.7",
+ "ahash 0.7.8",
"atoi 1.0.0",
"base64 0.13.1",
"bitflags 1.3.2",
@@ -9671,7 +9626,7 @@ dependencies = [
"dirs 4.0.0",
"dotenvy",
"either",
- "event-listener",
+ "event-listener 2.5.3",
"futures-channel",
"futures-core",
"futures-intrusive",
@@ -9746,7 +9701,7 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1120e6a8cbd4d85d5532d2e8a245aef2128e1853981f8b6d9943264184843102"
dependencies = [
- "bitflags 2.4.1",
+ "bitflags 2.5.0",
"num_enum",
"optional",
]
@@ -9873,6 +9828,12 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+[[package]]
+name = "strsim"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
+
[[package]]
name = "strum"
version = "0.24.1"
@@ -9920,7 +9881,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -9933,7 +9894,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -9963,7 +9924,7 @@ dependencies = [
"datafusion-substrait",
"datatypes",
"promql",
- "prost 0.12.3",
+ "prost 0.12.4",
"session",
"snafu",
"substrait 0.17.1",
@@ -9978,16 +9939,16 @@ checksum = "f1e8440a1c9b95a7c9a00a19f78b980749e8c945eb880687a5d673cea83729c5"
dependencies = [
"git2",
"heck 0.4.1",
- "prettyplease 0.2.15",
- "prost 0.12.3",
- "prost-build 0.12.3",
- "prost-types 0.12.3",
+ "prettyplease 0.2.19",
+ "prost 0.12.4",
+ "prost-build 0.12.4",
+ "prost-types 0.12.4",
"schemars",
"semver",
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.55",
+ "syn 2.0.60",
"typify 0.0.14",
"walkdir",
]
@@ -9999,16 +9960,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba959c71b2a1a341a94e1f362615d7e5f1a4de9d25d82fceea8160f79f1e1dfb"
dependencies = [
"heck 0.5.0",
- "prettyplease 0.2.15",
- "prost 0.12.3",
- "prost-build 0.12.3",
- "prost-types 0.12.3",
+ "prettyplease 0.2.19",
+ "prost 0.12.4",
+ "prost-build 0.12.4",
+ "prost-types 0.12.4",
"schemars",
"semver",
"serde",
"serde_json",
"serde_yaml",
- "syn 2.0.55",
+ "syn 2.0.60",
"typify 0.0.16",
"walkdir",
]
@@ -10026,7 +9987,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cccfffbc6bb3bb2d3a26cd2077f4d055f6808d266f9d4d158797a4c60510dfe"
dependencies = [
"debugid",
- "memmap2 0.9.3",
+ "memmap2 0.9.4",
"stable_deref_trait",
"uuid",
]
@@ -10055,9 +10016,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.55"
+version = "2.0.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0"
+checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3"
dependencies = [
"proc-macro2",
"quote",
@@ -10092,7 +10053,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -10103,9 +10064,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
[[package]]
name = "sysinfo"
-version = "0.30.5"
+version = "0.30.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fb4f3438c8f6389c864e61221cbc97e9bca98b4daf39a5beb7bea660f528bb2"
+checksum = "26d7c217777061d5a2d652aea771fb9ba98b6dade657204b08c4b9604d11555b"
dependencies = [
"cfg-if 1.0.0",
"core-foundation-sys",
@@ -10186,9 +10147,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "target-lexicon"
-version = "0.12.12"
+version = "0.12.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "14c39fd04924ca3a864207c66fc2cd7d22d7c016007f9ce846cbb9326331930a"
+checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f"
[[package]]
name = "temp-env"
@@ -10201,15 +10162,14 @@ dependencies = [
[[package]]
name = "tempfile"
-version = "3.8.1"
+version = "3.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5"
+checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1"
dependencies = [
"cfg-if 1.0.0",
- "fastrand 2.0.1",
- "redox_syscall 0.4.1",
- "rustix 0.38.28",
- "windows-sys 0.48.0",
+ "fastrand",
+ "rustix 0.38.32",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -10225,9 +10185,9 @@ dependencies = [
[[package]]
name = "termcolor"
-version = "1.4.0"
+version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449"
+checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
dependencies = [
"winapi-util",
]
@@ -10318,7 +10278,7 @@ dependencies = [
"operator",
"partition",
"paste",
- "prost 0.12.3",
+ "prost 0.12.4",
"query",
"rand",
"rstest",
@@ -10340,7 +10300,7 @@ dependencies = [
"tonic 0.11.0",
"tower",
"uuid",
- "zstd 0.13.0",
+ "zstd 0.13.1",
]
[[package]]
@@ -10360,9 +10320,9 @@ checksum = "b7b3e525a49ec206798b40326a44121291b530c963cfb01018f63e135bac543d"
[[package]]
name = "textwrap"
-version = "0.16.0"
+version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
+checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9"
[[package]]
name = "thiserror"
@@ -10381,7 +10341,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -10396,9 +10356,9 @@ dependencies = [
[[package]]
name = "thread_local"
-version = "1.1.7"
+version = "1.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
+checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c"
dependencies = [
"cfg-if 1.0.0",
"once_cell",
@@ -10448,12 +10408,13 @@ dependencies = [
[[package]]
name = "time"
-version = "0.3.31"
+version = "0.3.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e"
+checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
dependencies = [
"deranged",
"itoa",
+ "num-conv",
"powerfmt",
"serde",
"time-core",
@@ -10468,10 +10429,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "time-macros"
-version = "0.2.16"
+version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f"
+checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
dependencies = [
+ "num-conv",
"time-core",
]
@@ -10529,9 +10491,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.36.0"
+version = "1.37.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931"
+checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
dependencies = [
"backtrace",
"bytes",
@@ -10541,7 +10503,7 @@ dependencies = [
"parking_lot 0.12.1",
"pin-project-lite",
"signal-hook-registry",
- "socket2 0.5.5",
+ "socket2 0.5.6",
"tokio-macros",
"tracing",
"windows-sys 0.48.0",
@@ -10565,7 +10527,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -10613,7 +10575,7 @@ dependencies = [
"postgres-protocol",
"postgres-types",
"rand",
- "socket2 0.5.5",
+ "socket2 0.5.6",
"tokio",
"tokio-util",
"whoami",
@@ -10626,8 +10588,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ea13f22eda7127c827983bdaf0d7fff9df21c8817bab02815ac277a21143677"
dependencies = [
"futures",
- "ring 0.17.7",
- "rustls 0.22.1",
+ "ring 0.17.8",
+ "rustls 0.22.3",
"tokio",
"tokio-postgres",
"tokio-rustls 0.25.0",
@@ -10661,16 +10623,16 @@ version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f"
dependencies = [
- "rustls 0.22.1",
+ "rustls 0.22.3",
"rustls-pki-types",
"tokio",
]
[[package]]
name = "tokio-stream"
-version = "0.1.14"
+version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842"
+checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af"
dependencies = [
"futures-core",
"pin-project-lite",
@@ -10680,9 +10642,9 @@ dependencies = [
[[package]]
name = "tokio-test"
-version = "0.4.3"
+version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e89b3cbabd3ae862100094ae433e1def582cf86451b4e9bf83aa7ac1d8a7d719"
+checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7"
dependencies = [
"async-stream",
"bytes",
@@ -10717,14 +10679,14 @@ dependencies = [
[[package]]
name = "toml"
-version = "0.8.8"
+version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35"
+checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
- "toml_edit 0.21.0",
+ "toml_edit 0.22.9",
]
[[package]]
@@ -10744,31 +10706,31 @@ checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [
"indexmap 2.2.6",
"toml_datetime",
- "winnow",
+ "winnow 0.5.40",
]
[[package]]
name = "toml_edit"
-version = "0.20.7"
+version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81"
+checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1"
dependencies = [
"indexmap 2.2.6",
"toml_datetime",
- "winnow",
+ "winnow 0.5.40",
]
[[package]]
name = "toml_edit"
-version = "0.21.0"
+version = "0.22.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03"
+checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4"
dependencies = [
"indexmap 2.2.6",
"serde",
"serde_spanned",
"toml_datetime",
- "winnow",
+ "winnow 0.6.6",
]
[[package]]
@@ -10779,7 +10741,7 @@ checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a"
dependencies = [
"async-trait",
"axum",
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"futures-core",
"futures-util",
@@ -10808,7 +10770,7 @@ dependencies = [
"async-stream",
"async-trait",
"axum",
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"h2",
"http",
@@ -10817,8 +10779,8 @@ dependencies = [
"hyper-timeout",
"percent-encoding",
"pin-project",
- "prost 0.12.3",
- "rustls-pemfile 2.0.0",
+ "prost 0.12.4",
+ "rustls-pemfile 2.1.2",
"rustls-pki-types",
"tokio",
"tokio-rustls 0.25.0",
@@ -10848,11 +10810,11 @@ version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889"
dependencies = [
- "prettyplease 0.2.15",
+ "prettyplease 0.2.19",
"proc-macro2",
- "prost-build 0.12.3",
+ "prost-build 0.12.4",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -10861,11 +10823,11 @@ version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4ef6dd70a610078cb4e338a0f79d06bc759ff1b22d2120c2ff02ae264ba9c2"
dependencies = [
- "prettyplease 0.2.15",
+ "prettyplease 0.2.19",
"proc-macro2",
- "prost-build 0.12.3",
+ "prost-build 0.12.4",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -10874,8 +10836,8 @@ version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7"
dependencies = [
- "prost 0.12.3",
- "prost-types 0.12.3",
+ "prost 0.12.4",
+ "prost-types 0.12.4",
"tokio",
"tokio-stream",
"tonic 0.11.0",
@@ -10908,9 +10870,9 @@ version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140"
dependencies = [
- "async-compression 0.4.5",
- "base64 0.21.5",
- "bitflags 2.4.1",
+ "async-compression 0.4.8",
+ "base64 0.21.7",
+ "bitflags 2.5.0",
"bytes",
"futures-core",
"futures-util",
@@ -10976,7 +10938,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -11020,7 +10982,7 @@ dependencies = [
"js-sys",
"once_cell",
"opentelemetry 0.21.0",
- "opentelemetry_sdk 0.21.1",
+ "opentelemetry_sdk 0.21.2",
"smallvec",
"tracing",
"tracing-core",
@@ -11136,7 +11098,7 @@ dependencies = [
"anyhow",
"cc",
"dirs 3.0.2",
- "libloading",
+ "libloading 0.7.4",
"once_cell",
"regex",
"serde",
@@ -11198,9 +11160,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "typetag"
-version = "0.2.14"
+version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "196976efd4a62737b3a2b662cda76efb448d099b1049613d7a5d72743c611ce0"
+checksum = "661d18414ec032a49ece2d56eee03636e43c4e8d577047ab334c0ba892e29aaf"
dependencies = [
"erased-serde",
"inventory",
@@ -11211,13 +11173,13 @@ dependencies = [
[[package]]
name = "typetag-impl"
-version = "0.2.14"
+version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2eea6765137e2414c44c7b1e07c73965a118a72c46148e1e168b3fc9d3ccf3aa"
+checksum = "ac73887f47b9312552aa90ef477927ff014d63d1920ca8037c6c1951eab64bb1"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -11253,7 +11215,7 @@ dependencies = [
"regress 0.7.1",
"schemars",
"serde_json",
- "syn 2.0.55",
+ "syn 2.0.60",
"thiserror",
"unicode-ident",
]
@@ -11271,7 +11233,7 @@ dependencies = [
"regress 0.8.0",
"schemars",
"serde_json",
- "syn 2.0.55",
+ "syn 2.0.60",
"thiserror",
"unicode-ident",
]
@@ -11288,7 +11250,7 @@ dependencies = [
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.55",
+ "syn 2.0.60",
"typify-impl 0.0.14",
]
@@ -11304,7 +11266,7 @@ dependencies = [
"serde",
"serde_json",
"serde_tokenstream",
- "syn 2.0.55",
+ "syn 2.0.60",
"typify-impl 0.0.16",
]
@@ -11456,9 +11418,9 @@ dependencies = [
[[package]]
name = "unicode-bidi"
-version = "0.3.14"
+version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416"
+checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75"
[[package]]
name = "unicode-casing"
@@ -11474,18 +11436,18 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "unicode-normalization"
-version = "0.1.22"
+version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921"
+checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5"
dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-segmentation"
-version = "1.10.1"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
+checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202"
[[package]]
name = "unicode-width"
@@ -11587,7 +11549,7 @@ checksum = "9881bea7cbe687e36c9ab3b778c36cd0487402e270304e8b1296d5085303c1a2"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -11679,11 +11641,17 @@ version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+[[package]]
+name = "wasite"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b"
+
[[package]]
name = "wasm-bindgen"
-version = "0.2.89"
+version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e"
+checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
dependencies = [
"cfg-if 1.0.0",
"wasm-bindgen-macro",
@@ -11691,24 +11659,24 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.89"
+version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826"
+checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-futures"
-version = "0.4.39"
+version = "0.4.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12"
+checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0"
dependencies = [
"cfg-if 1.0.0",
"js-sys",
@@ -11718,9 +11686,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.89"
+version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2"
+checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -11728,28 +11696,28 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.89"
+version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283"
+checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.89"
+version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f"
+checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
[[package]]
name = "wasm-streams"
-version = "0.3.0"
+version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7"
+checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129"
dependencies = [
"futures-util",
"js-sys",
@@ -11760,9 +11728,9 @@ dependencies = [
[[package]]
name = "web-sys"
-version = "0.3.66"
+version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f"
+checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef"
dependencies = [
"js-sys",
"wasm-bindgen",
@@ -11780,9 +11748,9 @@ dependencies = [
[[package]]
name = "webbrowser"
-version = "0.8.12"
+version = "0.8.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "82b2391658b02c27719fc5a0a73d6e696285138e8b12fba9d4baa70451023c71"
+checksum = "db67ae75a9405634f5882791678772c94ff5f16a66535aae186e26aa0841fc8b"
dependencies = [
"core-foundation",
"home",
@@ -11801,7 +11769,7 @@ version = "0.22.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53"
dependencies = [
- "ring 0.17.7",
+ "ring 0.17.8",
"untrusted 0.9.0",
]
@@ -11816,9 +11784,9 @@ dependencies = [
[[package]]
name = "webpki-roots"
-version = "0.25.3"
+version = "0.25.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10"
+checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
[[package]]
name = "which"
@@ -11829,24 +11797,25 @@ dependencies = [
"either",
"home",
"once_cell",
- "rustix 0.38.28",
+ "rustix 0.38.32",
]
[[package]]
name = "whoami"
-version = "1.4.1"
+version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50"
+checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9"
dependencies = [
- "wasm-bindgen",
+ "redox_syscall 0.4.1",
+ "wasite",
"web-sys",
]
[[package]]
name = "wide"
-version = "0.7.13"
+version = "0.7.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c68938b57b33da363195412cfc5fc37c9ed49aa9cfe2156fde64b8d2c9498242"
+checksum = "81a1851a719f11d1d2fea40e15c72f6c00de8c142d7ac47c1441cc7e4d0d5bc6"
dependencies = [
"bytemuck",
"safe_arch",
@@ -11908,17 +11877,8 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be"
dependencies = [
- "windows-core 0.52.0",
- "windows-targets 0.52.0",
-]
-
-[[package]]
-name = "windows-core"
-version = "0.51.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64"
-dependencies = [
- "windows-targets 0.48.5",
+ "windows-core",
+ "windows-targets 0.52.5",
]
[[package]]
@@ -11927,7 +11887,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
dependencies = [
- "windows-targets 0.52.0",
+ "windows-targets 0.52.5",
]
[[package]]
@@ -11954,7 +11914,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
- "windows-targets 0.52.0",
+ "windows-targets 0.52.5",
]
[[package]]
@@ -11989,17 +11949,18 @@ dependencies = [
[[package]]
name = "windows-targets"
-version = "0.52.0"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
+checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb"
dependencies = [
- "windows_aarch64_gnullvm 0.52.0",
- "windows_aarch64_msvc 0.52.0",
- "windows_i686_gnu 0.52.0",
- "windows_i686_msvc 0.52.0",
- "windows_x86_64_gnu 0.52.0",
- "windows_x86_64_gnullvm 0.52.0",
- "windows_x86_64_msvc 0.52.0",
+ "windows_aarch64_gnullvm 0.52.5",
+ "windows_aarch64_msvc 0.52.5",
+ "windows_i686_gnu 0.52.5",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc 0.52.5",
+ "windows_x86_64_gnu 0.52.5",
+ "windows_x86_64_gnullvm 0.52.5",
+ "windows_x86_64_msvc 0.52.5",
]
[[package]]
@@ -12016,9 +11977,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_gnullvm"
-version = "0.52.0"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
+checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263"
[[package]]
name = "windows_aarch64_msvc"
@@ -12040,9 +12001,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_aarch64_msvc"
-version = "0.52.0"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
+checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6"
[[package]]
name = "windows_i686_gnu"
@@ -12064,9 +12025,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_gnu"
-version = "0.52.0"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
+checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9"
[[package]]
name = "windows_i686_msvc"
@@ -12088,9 +12055,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_i686_msvc"
-version = "0.52.0"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
+checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf"
[[package]]
name = "windows_x86_64_gnu"
@@ -12112,9 +12079,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnu"
-version = "0.52.0"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
+checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9"
[[package]]
name = "windows_x86_64_gnullvm"
@@ -12130,9 +12097,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_gnullvm"
-version = "0.52.0"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
+checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596"
[[package]]
name = "windows_x86_64_msvc"
@@ -12154,15 +12121,24 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "windows_x86_64_msvc"
-version = "0.52.0"
+version = "0.52.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
+checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0"
+
+[[package]]
+name = "winnow"
+version = "0.5.40"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876"
+dependencies = [
+ "memchr",
+]
[[package]]
name = "winnow"
-version = "0.5.30"
+version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b5c3db89721d50d0e2a673f5043fc4722f76dcc352d7b1ab8b8288bed4ed2c5"
+checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352"
dependencies = [
"memchr",
]
@@ -12204,10 +12180,10 @@ dependencies = [
"bcder",
"bytes",
"chrono",
- "der 0.7.8",
+ "der 0.7.9",
"hex",
"pem",
- "ring 0.17.7",
+ "ring 0.17.8",
"signature",
"spki 0.7.3",
"thiserror",
@@ -12216,9 +12192,9 @@ dependencies = [
[[package]]
name = "xml-rs"
-version = "0.8.19"
+version = "0.8.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a"
+checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193"
[[package]]
name = "xz2"
@@ -12255,7 +12231,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -12275,7 +12251,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.55",
+ "syn 2.0.60",
]
[[package]]
@@ -12307,11 +12283,11 @@ dependencies = [
[[package]]
name = "zstd"
-version = "0.13.0"
+version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110"
+checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a"
dependencies = [
- "zstd-safe 7.0.0",
+ "zstd-safe 7.1.0",
]
[[package]]
@@ -12336,18 +12312,18 @@ dependencies = [
[[package]]
name = "zstd-safe"
-version = "7.0.0"
+version = "7.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e"
+checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a"
dependencies = [
"zstd-sys",
]
[[package]]
name = "zstd-sys"
-version = "2.0.9+zstd.1.5.5"
+version = "2.0.10+zstd.1.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656"
+checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa"
dependencies = [
"cc",
"pkg-config",
diff --git a/Cargo.toml b/Cargo.toml
index 06bdbc180862..f2508b169e59 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -70,7 +70,10 @@ license = "Apache-2.0"
clippy.print_stdout = "warn"
clippy.print_stderr = "warn"
clippy.implicit_clone = "warn"
+clippy.readonly_write_lock = "allow"
rust.unknown_lints = "deny"
+# Remove this after https://github.com/PyO3/pyo3/issues/4094
+rust.non_local_definitions = "allow"
[workspace.dependencies]
# We turn off default-features for some dependencies here so the workspaces which inherit them can
diff --git a/rust-toolchain.toml b/rust-toolchain.toml
index 81fce2619416..b92b3fa74b10 100644
--- a/rust-toolchain.toml
+++ b/rust-toolchain.toml
@@ -1,2 +1,2 @@
[toolchain]
-channel = "nightly-2023-12-19"
+channel = "nightly-2024-04-18"
diff --git a/src/auth/src/tests.rs b/src/auth/src/tests.rs
index 06a739241a9f..d3e8a41aa17a 100644
--- a/src/auth/src/tests.rs
+++ b/src/auth/src/tests.rs
@@ -45,9 +45,9 @@ impl Default for MockUserProvider {
impl MockUserProvider {
pub fn set_authorization_info(&mut self, info: DatabaseAuthInfo) {
- self.catalog = info.catalog.to_owned();
- self.schema = info.schema.to_owned();
- self.username = info.username.to_owned();
+ info.catalog.clone_into(&mut self.catalog);
+ info.schema.clone_into(&mut self.schema);
+ info.username.clone_into(&mut self.username);
}
}
diff --git a/src/catalog/src/information_schema/predicate.rs b/src/catalog/src/information_schema/predicate.rs
index 9afc83a389f5..243ff7053321 100644
--- a/src/catalog/src/information_schema/predicate.rs
+++ b/src/catalog/src/information_schema/predicate.rs
@@ -109,11 +109,7 @@ impl Predicate {
};
}
Predicate::Not(p) => {
- let Some(b) = p.eval(row) else {
- return None;
- };
-
- return Some(!b);
+ return Some(!p.eval(row)?);
}
}
@@ -125,13 +121,7 @@ impl Predicate {
fn from_expr(expr: DfExpr) -> Option<Predicate> {
match expr {
// NOT expr
- DfExpr::Not(expr) => {
- let Some(p) = Self::from_expr(*expr) else {
- return None;
- };
-
- Some(Predicate::Not(Box::new(p)))
- }
+ DfExpr::Not(expr) => Some(Predicate::Not(Box::new(Self::from_expr(*expr)?))),
// expr LIKE pattern
DfExpr::Like(Like {
negated,
@@ -178,25 +168,15 @@ impl Predicate {
}
// left AND right
(left, Operator::And, right) => {
- let Some(left) = Self::from_expr(left) else {
- return None;
- };
-
- let Some(right) = Self::from_expr(right) else {
- return None;
- };
+ let left = Self::from_expr(left)?;
+ let right = Self::from_expr(right)?;
Some(Predicate::And(Box::new(left), Box::new(right)))
}
// left OR right
(left, Operator::Or, right) => {
- let Some(left) = Self::from_expr(left) else {
- return None;
- };
-
- let Some(right) = Self::from_expr(right) else {
- return None;
- };
+ let left = Self::from_expr(left)?;
+ let right = Self::from_expr(right)?;
Some(Predicate::Or(Box::new(left), Box::new(right)))
}
diff --git a/src/catalog/src/kvbackend/client.rs b/src/catalog/src/kvbackend/client.rs
index 8c40c4369237..e72d2b527514 100644
--- a/src/catalog/src/kvbackend/client.rs
+++ b/src/catalog/src/kvbackend/client.rs
@@ -17,7 +17,6 @@ use std::fmt::Debug;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
-use std::usize;
use common_error::ext::BoxedError;
use common_meta::cache_invalidator::KvCacheInvalidator;
diff --git a/src/cmd/src/cli.rs b/src/cmd/src/cli.rs
index 409c8e4d54d0..35dc1e4ba7dc 100644
--- a/src/cmd/src/cli.rs
+++ b/src/cmd/src/cli.rs
@@ -84,10 +84,10 @@ impl Command {
let mut logging_opts = LoggingOptions::default();
if let Some(dir) = &cli_options.log_dir {
- logging_opts.dir = dir.clone();
+ logging_opts.dir.clone_from(dir);
}
- logging_opts.level = cli_options.log_level.clone();
+ logging_opts.level.clone_from(&cli_options.log_level);
Ok(Options::Cli(Box::new(logging_opts)))
}
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 162419a3b43e..ff0d90409023 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -139,19 +139,19 @@ impl StartCommand {
)?;
if let Some(dir) = &cli_options.log_dir {
- opts.logging.dir = dir.clone();
+ opts.logging.dir.clone_from(dir);
}
if cli_options.log_level.is_some() {
- opts.logging.level = cli_options.log_level.clone();
+ opts.logging.level.clone_from(&cli_options.log_level);
}
if let Some(addr) = &self.rpc_addr {
- opts.rpc_addr = addr.clone();
+ opts.rpc_addr.clone_from(addr);
}
if self.rpc_hostname.is_some() {
- opts.rpc_hostname = self.rpc_hostname.clone();
+ opts.rpc_hostname.clone_from(&self.rpc_hostname);
}
if let Some(node_id) = self.node_id {
@@ -161,7 +161,8 @@ impl StartCommand {
if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client
.get_or_insert_with(MetaClientOptions::default)
- .metasrv_addrs = metasrv_addrs.clone();
+ .metasrv_addrs
+ .clone_from(metasrv_addrs);
opts.mode = Mode::Distributed;
}
@@ -173,7 +174,7 @@ impl StartCommand {
}
if let Some(data_home) = &self.data_home {
- opts.storage.data_home = data_home.clone();
+ opts.storage.data_home.clone_from(data_home);
}
// `wal_dir` only affects raft-engine config.
@@ -191,7 +192,7 @@ impl StartCommand {
}
if let Some(http_addr) = &self.http_addr {
- opts.http.addr = http_addr.clone();
+ opts.http.addr.clone_from(http_addr);
}
if let Some(http_timeout) = self.http_timeout {
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 3100182f72b7..0ff35846256f 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -157,11 +157,11 @@ impl StartCommand {
)?;
if let Some(dir) = &cli_options.log_dir {
- opts.logging.dir = dir.clone();
+ opts.logging.dir.clone_from(dir);
}
if cli_options.log_level.is_some() {
- opts.logging.level = cli_options.log_level.clone();
+ opts.logging.level.clone_from(&cli_options.log_level);
}
let tls_opts = TlsOption::new(
@@ -171,7 +171,7 @@ impl StartCommand {
);
if let Some(addr) = &self.http_addr {
- opts.http.addr = addr.clone()
+ opts.http.addr.clone_from(addr);
}
if let Some(http_timeout) = self.http_timeout {
@@ -183,24 +183,24 @@ impl StartCommand {
}
if let Some(addr) = &self.rpc_addr {
- opts.grpc.addr = addr.clone()
+ opts.grpc.addr.clone_from(addr);
}
if let Some(addr) = &self.mysql_addr {
opts.mysql.enable = true;
- opts.mysql.addr = addr.clone();
+ opts.mysql.addr.clone_from(addr);
opts.mysql.tls = tls_opts.clone();
}
if let Some(addr) = &self.postgres_addr {
opts.postgres.enable = true;
- opts.postgres.addr = addr.clone();
+ opts.postgres.addr.clone_from(addr);
opts.postgres.tls = tls_opts;
}
if let Some(addr) = &self.opentsdb_addr {
opts.opentsdb.enable = true;
- opts.opentsdb.addr = addr.clone();
+ opts.opentsdb.addr.clone_from(addr);
}
if let Some(enable) = self.influxdb_enable {
@@ -210,11 +210,12 @@ impl StartCommand {
if let Some(metasrv_addrs) = &self.metasrv_addr {
opts.meta_client
.get_or_insert_with(MetaClientOptions::default)
- .metasrv_addrs = metasrv_addrs.clone();
+ .metasrv_addrs
+ .clone_from(metasrv_addrs);
opts.mode = Mode::Distributed;
}
- opts.user_provider = self.user_provider.clone();
+ opts.user_provider.clone_from(&self.user_provider);
Ok(Options::Frontend(Box::new(opts)))
}
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index 29b0f517de7e..bc542ada3024 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -134,23 +134,23 @@ impl StartCommand {
)?;
if let Some(dir) = &cli_options.log_dir {
- opts.logging.dir = dir.clone();
+ opts.logging.dir.clone_from(dir);
}
if cli_options.log_level.is_some() {
- opts.logging.level = cli_options.log_level.clone();
+ opts.logging.level.clone_from(&cli_options.log_level);
}
if let Some(addr) = &self.bind_addr {
- opts.bind_addr = addr.clone();
+ opts.bind_addr.clone_from(addr);
}
if let Some(addr) = &self.server_addr {
- opts.server_addr = addr.clone();
+ opts.server_addr.clone_from(addr);
}
if let Some(addr) = &self.store_addr {
- opts.store_addr = addr.clone();
+ opts.store_addr.clone_from(addr);
}
if let Some(selector_type) = &self.selector {
@@ -168,7 +168,7 @@ impl StartCommand {
}
if let Some(http_addr) = &self.http_addr {
- opts.http.addr = http_addr.clone();
+ opts.http.addr.clone_from(http_addr);
}
if let Some(http_timeout) = self.http_timeout {
@@ -176,11 +176,11 @@ impl StartCommand {
}
if let Some(data_home) = &self.data_home {
- opts.data_home = data_home.clone();
+ opts.data_home.clone_from(data_home);
}
if !self.store_key_prefix.is_empty() {
- opts.store_key_prefix = self.store_key_prefix.clone()
+ opts.store_key_prefix.clone_from(&self.store_key_prefix)
}
if let Some(max_txn_ops) = self.max_txn_ops {
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 694d10d6899c..2c7d4abad2b8 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -293,11 +293,11 @@ impl StartCommand {
opts.mode = Mode::Standalone;
if let Some(dir) = &cli_options.log_dir {
- opts.logging.dir = dir.clone();
+ opts.logging.dir.clone_from(dir);
}
if cli_options.log_level.is_some() {
- opts.logging.level = cli_options.log_level.clone();
+ opts.logging.level.clone_from(&cli_options.log_level);
}
let tls_opts = TlsOption::new(
@@ -307,11 +307,11 @@ impl StartCommand {
);
if let Some(addr) = &self.http_addr {
- opts.http.addr = addr.clone()
+ opts.http.addr.clone_from(addr);
}
if let Some(data_home) = &self.data_home {
- opts.storage.data_home = data_home.clone();
+ opts.storage.data_home.clone_from(data_home);
}
if let Some(addr) = &self.rpc_addr {
@@ -325,31 +325,31 @@ impl StartCommand {
}
.fail();
}
- opts.grpc.addr = addr.clone()
+ opts.grpc.addr.clone_from(addr)
}
if let Some(addr) = &self.mysql_addr {
opts.mysql.enable = true;
- opts.mysql.addr = addr.clone();
+ opts.mysql.addr.clone_from(addr);
opts.mysql.tls = tls_opts.clone();
}
if let Some(addr) = &self.postgres_addr {
opts.postgres.enable = true;
- opts.postgres.addr = addr.clone();
+ opts.postgres.addr.clone_from(addr);
opts.postgres.tls = tls_opts;
}
if let Some(addr) = &self.opentsdb_addr {
opts.opentsdb.enable = true;
- opts.opentsdb.addr = addr.clone();
+ opts.opentsdb.addr.clone_from(addr);
}
if self.influxdb_enable {
opts.influxdb.enable = self.influxdb_enable;
}
- opts.user_provider = self.user_provider.clone();
+ opts.user_provider.clone_from(&self.user_provider);
let metadata_store = opts.metadata_store.clone();
let procedure = opts.procedure.clone();
diff --git a/src/common/meta/src/key/datanode_table.rs b/src/common/meta/src/key/datanode_table.rs
index 82ddebc3206b..aa63eef09e1f 100644
--- a/src/common/meta/src/key/datanode_table.rs
+++ b/src/common/meta/src/key/datanode_table.rs
@@ -240,10 +240,14 @@ impl DatanodeTableManager {
// FIXME(weny): add unit tests.
let mut new_region_info = region_info.clone();
if need_update_options {
- new_region_info.region_options = new_region_options.clone();
+ new_region_info
+ .region_options
+ .clone_from(new_region_options);
}
if need_update_wal_options {
- new_region_info.region_wal_options = new_region_wal_options.clone();
+ new_region_info
+ .region_wal_options
+ .clone_from(new_region_wal_options);
}
let val = DatanodeTableValue::new(table_id, regions, new_region_info)
.try_as_raw_value()?;
diff --git a/src/common/meta/src/sequence.rs b/src/common/meta/src/sequence.rs
index 9e2469332a26..b1acc961d464 100644
--- a/src/common/meta/src/sequence.rs
+++ b/src/common/meta/src/sequence.rs
@@ -172,9 +172,7 @@ impl Inner {
if !res.success {
if let Some(kv) = res.prev_kv {
- expect = kv.value.clone();
-
- let v: [u8; 8] = match kv.value.try_into() {
+ let v: [u8; 8] = match kv.value.clone().try_into() {
Ok(a) => a,
Err(v) => {
return error::UnexpectedSequenceValueSnafu {
@@ -184,13 +182,12 @@ impl Inner {
}
};
let v = u64::from_le_bytes(v);
-
// If the existed value is smaller than the initial, we should start from the initial.
start = v.max(self.initial);
+ expect = kv.value;
} else {
- expect = vec![];
-
start = self.initial;
+ expect = vec![];
}
continue;
}
diff --git a/src/common/meta/src/state_store.rs b/src/common/meta/src/state_store.rs
index eb3a91de173b..1cf1ea86492d 100644
--- a/src/common/meta/src/state_store.rs
+++ b/src/common/meta/src/state_store.rs
@@ -22,6 +22,7 @@ use common_procedure::store::util::multiple_value_stream;
use common_procedure::Result as ProcedureResult;
use futures::future::try_join_all;
use futures::StreamExt;
+use itertools::Itertools;
use snafu::ResultExt;
use crate::error::Result;
@@ -79,17 +80,21 @@ fn decode_kv(kv: KeyValue) -> Result<(String, Vec<u8>)> {
Ok((key, value))
}
-enum SplitValue<'a> {
- Single(&'a [u8]),
- Multiple(Vec<&'a [u8]>),
+enum SplitValue {
+ Single(Vec<u8>),
+ Multiple(Vec<Vec<u8>>),
}
-fn split_value(value: &[u8], max_value_size: Option<usize>) -> SplitValue<'_> {
+fn split_value(value: Vec<u8>, max_value_size: Option<usize>) -> SplitValue {
if let Some(max_value_size) = max_value_size {
if value.len() <= max_value_size {
SplitValue::Single(value)
} else {
- SplitValue::Multiple(value.chunks(max_value_size).collect::<Vec<_>>())
+ let mut values = vec![];
+ for chunk in value.into_iter().chunks(max_value_size).into_iter() {
+ values.push(chunk.collect());
+ }
+ SplitValue::Multiple(values)
}
} else {
SplitValue::Single(value)
@@ -99,10 +104,10 @@ fn split_value(value: &[u8], max_value_size: Option<usize>) -> SplitValue<'_> {
#[async_trait]
impl StateStore for KvStateStore {
async fn put(&self, key: &str, value: Vec<u8>) -> ProcedureResult<()> {
- let split = split_value(&value, self.max_value_size);
+ let split = split_value(value, self.max_value_size);
let key = with_prefix(key);
match split {
- SplitValue::Single(_) => {
+ SplitValue::Single(value) => {
self.kv_backend
.put(
PutRequest::new()
diff --git a/src/common/meta/src/wal_options_allocator.rs b/src/common/meta/src/wal_options_allocator.rs
index 202b2958ba4a..ac55127894b6 100644
--- a/src/common/meta/src/wal_options_allocator.rs
+++ b/src/common/meta/src/wal_options_allocator.rs
@@ -163,7 +163,7 @@ mod tests {
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
let mut topic_manager = KafkaTopicManager::new(config.clone(), kv_backend);
// Replaces the default topic pool with the constructed topics.
- topic_manager.topic_pool = topics.clone();
+ topic_manager.topic_pool.clone_from(&topics);
// Replaces the default selector with a round-robin selector without shuffled.
topic_manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
diff --git a/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs b/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs
index ea2f89554bfa..ab64a6fa0fb0 100644
--- a/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs
+++ b/src/common/meta/src/wal_options_allocator/kafka/topic_manager.rs
@@ -291,7 +291,7 @@ mod tests {
let kv_backend = Arc::new(MemoryKvBackend::new()) as KvBackendRef;
let mut manager = TopicManager::new(config.clone(), kv_backend);
// Replaces the default topic pool with the constructed topics.
- manager.topic_pool = topics.clone();
+ manager.topic_pool.clone_from(&topics);
// Replaces the default selector with a round-robin selector without shuffled.
manager.topic_selector = Arc::new(RoundRobinTopicSelector::default());
manager.start().await.unwrap();
diff --git a/src/common/procedure/src/local/rwlock.rs b/src/common/procedure/src/local/rwlock.rs
index a1701320364c..538914e3950f 100644
--- a/src/common/procedure/src/local/rwlock.rs
+++ b/src/common/procedure/src/local/rwlock.rs
@@ -19,19 +19,19 @@ use std::sync::{Arc, Mutex};
use tokio::sync::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
pub enum OwnedKeyRwLockGuard {
- Read(OwnedRwLockReadGuard<()>),
- Write(OwnedRwLockWriteGuard<()>),
+ Read { _guard: OwnedRwLockReadGuard<()> },
+ Write { _guard: OwnedRwLockWriteGuard<()> },
}
impl From<OwnedRwLockReadGuard<()>> for OwnedKeyRwLockGuard {
fn from(guard: OwnedRwLockReadGuard<()>) -> Self {
- OwnedKeyRwLockGuard::Read(guard)
+ OwnedKeyRwLockGuard::Read { _guard: guard }
}
}
impl From<OwnedRwLockWriteGuard<()>> for OwnedKeyRwLockGuard {
fn from(guard: OwnedRwLockWriteGuard<()>) -> Self {
- OwnedKeyRwLockGuard::Write(guard)
+ OwnedKeyRwLockGuard::Write { _guard: guard }
}
}
diff --git a/src/common/query/src/logical_plan/expr.rs b/src/common/query/src/logical_plan/expr.rs
index 79f2363a6e12..c63de87f6147 100644
--- a/src/common/query/src/logical_plan/expr.rs
+++ b/src/common/query/src/logical_plan/expr.rs
@@ -44,9 +44,7 @@ pub fn build_filter_from_timestamp(
ts_col_name: &str,
time_range: Option<&TimestampRange>,
) -> Option<Expr> {
- let Some(time_range) = time_range else {
- return None;
- };
+ let time_range = time_range?;
let ts_col_expr = DfExpr::Column(Column {
relation: None,
name: ts_col_name.to_string(),
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index 95dba04adcae..a20a1925d040 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -381,7 +381,7 @@ impl Timestamp {
pub fn from_chrono_datetime(ndt: NaiveDateTime) -> Option<Self> {
let sec = ndt.and_utc().timestamp();
- let nsec = ndt.timestamp_subsec_nanos();
+ let nsec = ndt.and_utc().timestamp_subsec_nanos();
Timestamp::from_splits(sec, nsec)
}
@@ -1065,7 +1065,7 @@ mod tests {
let (sec, nsec) = Timestamp::new(i64::MIN, TimeUnit::Nanosecond).split();
let time = DateTime::from_timestamp(sec, nsec).unwrap().naive_utc();
assert_eq!(sec, time.and_utc().timestamp());
- assert_eq!(nsec, time.timestamp_subsec_nanos());
+ assert_eq!(nsec, time.and_utc().timestamp_subsec_nanos());
}
#[test]
diff --git a/src/index/src/inverted_index/format/writer/single.rs b/src/index/src/inverted_index/format/writer/single.rs
index c652c76390d4..07d10b388016 100644
--- a/src/index/src/inverted_index/format/writer/single.rs
+++ b/src/index/src/inverted_index/format/writer/single.rs
@@ -120,7 +120,7 @@ where
// update min/max, assume values are appended in lexicographic order
if stats.distinct_count == 1 {
- stats.min_value = value.clone();
+ stats.min_value.clone_from(&value);
}
stats.max_value = value;
}
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index ddb169ebc5d5..cb8341b3a88a 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -510,9 +510,6 @@ impl MetaClient {
#[cfg(test)]
mod tests {
use api::v1::meta::{HeartbeatRequest, Peer};
- use meta_srv::metasrv::SelectorContext;
- use meta_srv::selector::{Namespace, Selector, SelectorOptions};
- use meta_srv::Result as MetaResult;
use super::*;
use crate::{error, mocks};
@@ -662,36 +659,6 @@ mod tests {
});
}
- struct MockSelector;
-
- #[async_trait::async_trait]
- impl Selector for MockSelector {
- type Context = SelectorContext;
- type Output = Vec<Peer>;
-
- async fn select(
- &self,
- _ns: Namespace,
- _ctx: &Self::Context,
- _opts: SelectorOptions,
- ) -> MetaResult<Self::Output> {
- Ok(vec![
- Peer {
- id: 0,
- addr: "peer0".to_string(),
- },
- Peer {
- id: 1,
- addr: "peer1".to_string(),
- },
- Peer {
- id: 2,
- addr: "peer2".to_string(),
- },
- ])
- }
- }
-
#[tokio::test]
async fn test_range_get() {
let tc = new_client("test_range_get").await;
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index 003d8ba5c6d6..bd4ab0394dda 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -65,7 +65,7 @@ use crate::procedure::region_migration::DefaultContextFactory;
use crate::pubsub::PublishRef;
use crate::selector::lease_based::LeaseBasedSelector;
use crate::service::mailbox::MailboxRef;
-use crate::service::store::cached_kv::{CheckLeader, LeaderCachedKvBackend};
+use crate::service::store::cached_kv::LeaderCachedKvBackend;
use crate::state::State;
use crate::table_meta_alloc::MetasrvPeerAllocator;
@@ -436,13 +436,3 @@ impl Default for MetasrvBuilder {
Self::new()
}
}
-
-struct CheckLeaderByElection(Option<ElectionRef>);
-
-impl CheckLeader for CheckLeaderByElection {
- fn check(&self) -> bool {
- self.0
- .as_ref()
- .map_or(false, |election| election.is_leader())
- }
-}
diff --git a/src/meta-srv/src/selector/load_based.rs b/src/meta-srv/src/selector/load_based.rs
index 5f89a5cfeb2f..de9a293446a5 100644
--- a/src/meta-srv/src/selector/load_based.rs
+++ b/src/meta-srv/src/selector/load_based.rs
@@ -205,11 +205,9 @@ mod tests {
let alive_stat_kvs = filter_out_expired_datanode(stat_kvs, &lease_kvs);
assert_eq!(1, alive_stat_kvs.len());
- assert!(alive_stat_kvs
- .get(&StatKey {
- cluster_id: 1,
- node_id: 1
- })
- .is_some());
+ assert!(alive_stat_kvs.contains_key(&StatKey {
+ cluster_id: 1,
+ node_id: 1
+ }));
}
}
diff --git a/src/meta-srv/src/service/store/cached_kv.rs b/src/meta-srv/src/service/store/cached_kv.rs
index df898164d1b2..8772c47a1d70 100644
--- a/src/meta-srv/src/service/store/cached_kv.rs
+++ b/src/meta-srv/src/service/store/cached_kv.rs
@@ -263,7 +263,7 @@ impl KvBackend for LeaderCachedKvBackend {
.collect::<HashSet<_>>();
metrics::METRIC_META_KV_CACHE_HIT
- .with_label_values(&[&"batch_get"])
+ .with_label_values(&["batch_get"])
.inc_by(hit_keys.len() as u64);
let missed_keys = req
@@ -273,7 +273,7 @@ impl KvBackend for LeaderCachedKvBackend {
.cloned()
.collect::<Vec<_>>();
metrics::METRIC_META_KV_CACHE_MISS
- .with_label_values(&[&"batch_get"])
+ .with_label_values(&["batch_get"])
.inc_by(missed_keys.len() as u64);
let remote_req = BatchGetRequest { keys: missed_keys };
diff --git a/src/metric-engine/src/engine/create.rs b/src/metric-engine/src/engine/create.rs
index 8a7ee0006ced..a7e3c5c3647c 100644
--- a/src/metric-engine/src/engine/create.rs
+++ b/src/metric-engine/src/engine/create.rs
@@ -557,12 +557,12 @@ mod test {
let mut options = HashMap::new();
options.insert(PHYSICAL_TABLE_METADATA_KEY.to_string(), "value".to_string());
- request.options = options.clone();
+ request.options.clone_from(&options);
let result = MetricEngineInner::verify_region_create_request(&request);
assert!(result.is_ok());
options.insert(LOGICAL_TABLE_METADATA_KEY.to_string(), "value".to_string());
- request.options = options.clone();
+ request.options.clone_from(&options);
let result = MetricEngineInner::verify_region_create_request(&request);
assert!(result.is_err());
@@ -620,7 +620,7 @@ mod test {
data_region_request.primary_key,
vec![ReservedColumnId::table_id(), ReservedColumnId::tsid(), 1]
);
- assert!(data_region_request.options.get("ttl").is_some());
+ assert!(data_region_request.options.contains_key("ttl"));
// check create metadata region request
let metadata_region_request = engine_inner.create_request_for_metadata_region(&request);
@@ -628,6 +628,6 @@ mod test {
metadata_region_request.region_dir,
"/test_dir/metadata/".to_string()
);
- assert!(metadata_region_request.options.get("ttl").is_none());
+ assert!(!metadata_region_request.options.contains_key("ttl"));
}
}
diff --git a/src/mito2/src/cache/cache_size.rs b/src/mito2/src/cache/cache_size.rs
index 3d79bcbe5801..2b63441e647f 100644
--- a/src/mito2/src/cache/cache_size.rs
+++ b/src/mito2/src/cache/cache_size.rs
@@ -137,6 +137,6 @@ mod tests {
fn test_parquet_meta_size() {
let metadata = parquet_meta();
- assert_eq!(956, parquet_meta_size(&metadata));
+ assert_eq!(964, parquet_meta_size(&metadata));
}
}
diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs
index f3890cafda92..bdf27bdafdc4 100644
--- a/src/mito2/src/flush.rs
+++ b/src/mito2/src/flush.rs
@@ -497,9 +497,7 @@ impl FlushScheduler {
&mut self,
region_id: RegionId,
) -> Option<(Vec<SenderDdlRequest>, Vec<SenderWriteRequest>)> {
- let Some(flush_status) = self.region_status.get_mut(®ion_id) else {
- return None;
- };
+ let flush_status = self.region_status.get_mut(®ion_id)?;
// This region doesn't have running flush job.
flush_status.flushing = false;
diff --git a/src/mito2/src/region/options.rs b/src/mito2/src/region/options.rs
index a56976874bc0..5a2df0d8cc56 100644
--- a/src/mito2/src/region/options.rs
+++ b/src/mito2/src/region/options.rs
@@ -440,7 +440,7 @@ mod tests {
// No need to add compatible tests for RegionOptions since the above tests already check for compatibility.
#[test]
fn test_with_any_wal_options() {
- let all_wal_options = vec![
+ let all_wal_options = [
WalOptions::RaftEngine,
WalOptions::Kafka(KafkaWalOptions {
topic: "test_topic".to_string(),
diff --git a/src/mito2/src/sst/parquet/row_selection.rs b/src/mito2/src/sst/parquet/row_selection.rs
index 93accf11ac01..5ab961c22349 100644
--- a/src/mito2/src/sst/parquet/row_selection.rs
+++ b/src/mito2/src/sst/parquet/row_selection.rs
@@ -82,7 +82,7 @@ mod tests {
#[test]
fn test_non_contiguous_ranges() {
- let ranges = vec![1..3, 5..8];
+ let ranges = [1..3, 5..8];
let (selection, skipped) = row_selection_from_row_ranges(ranges.iter().cloned(), 10);
let expected = RowSelection::from(vec![
RowSelector::skip(1),
@@ -96,7 +96,7 @@ mod tests {
#[test]
fn test_empty_range() {
- let ranges = vec![];
+ let ranges = [];
let (selection, skipped) = row_selection_from_row_ranges(ranges.iter().cloned(), 10);
let expected = RowSelection::from(vec![]);
assert_eq!(selection, expected);
@@ -105,7 +105,7 @@ mod tests {
#[test]
fn test_adjacent_ranges() {
- let ranges = vec![1..2, 2..3];
+ let ranges = [1..2, 2..3];
let (selection, skipped) = row_selection_from_row_ranges(ranges.iter().cloned(), 10);
let expected = RowSelection::from(vec![RowSelector::skip(1), RowSelector::select(2)]);
assert_eq!(selection, expected);
@@ -114,7 +114,7 @@ mod tests {
#[test]
fn test_large_gap_between_ranges() {
- let ranges = vec![1..2, 100..101];
+ let ranges = [1..2, 100..101];
let (selection, skipped) = row_selection_from_row_ranges(ranges.iter().cloned(), 10240);
let expected = RowSelection::from(vec![
RowSelector::skip(1),
diff --git a/src/operator/src/statement/copy_table_from.rs b/src/operator/src/statement/copy_table_from.rs
index cb922742ff7b..378c3af31745 100644
--- a/src/operator/src/statement/copy_table_from.rs
+++ b/src/operator/src/statement/copy_table_from.rs
@@ -15,7 +15,6 @@
use std::collections::HashMap;
use std::future::Future;
use std::sync::Arc;
-use std::usize;
use client::{Output, OutputData, OutputMeta};
use common_base::readable_size::ReadableSize;
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index daa11025e440..67d5614093cd 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -791,7 +791,7 @@ impl PromPlanner {
}
// change the tag columns in context
- self.ctx.tag_columns = labels.labels.clone();
+ self.ctx.tag_columns.clone_from(&labels.labels);
// add timestamp column
exprs.push(self.create_time_index_column_expr()?);
diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs
index cfce77f039d8..fc99f8be12b8 100644
--- a/src/query/src/optimizer.rs
+++ b/src/query/src/optimizer.rs
@@ -29,9 +29,6 @@ pub trait ExtensionAnalyzerRule {
ctx: &QueryEngineContext,
config: &ConfigOptions,
) -> Result<LogicalPlan>;
-
- /// A human readable name for this analyzer rule
- fn name(&self) -> &str;
}
pub mod order_hint;
diff --git a/src/query/src/optimizer/type_conversion.rs b/src/query/src/optimizer/type_conversion.rs
index 883d1aff5fa8..87d0dc29e8cd 100644
--- a/src/query/src/optimizer/type_conversion.rs
+++ b/src/query/src/optimizer/type_conversion.rs
@@ -121,10 +121,6 @@ impl ExtensionAnalyzerRule for TypeConversionRule {
})
.map(|x| x.data)
}
-
- fn name(&self) -> &str {
- "TypeConversionRule"
- }
}
struct TypeConverter {
diff --git a/src/query/src/tests/time_range_filter_test.rs b/src/query/src/tests/time_range_filter_test.rs
index b47ecce99f65..9c19b1f9b15a 100644
--- a/src/query/src/tests/time_range_filter_test.rs
+++ b/src/query/src/tests/time_range_filter_test.rs
@@ -58,7 +58,7 @@ struct DataSourceWrapper {
impl DataSource for DataSourceWrapper {
fn get_stream(&self, request: ScanRequest) -> Result<SendableRecordBatchStream, BoxedError> {
- *self.filter.write().unwrap() = request.filters.clone();
+ self.filter.write().unwrap().clone_from(&request.filters);
self.inner.get_stream(request)
}
}
diff --git a/src/script/src/python/rspython/copr_impl.rs b/src/script/src/python/rspython/copr_impl.rs
index 89f12f66786b..1d5cb4e3bdf4 100644
--- a/src/script/src/python/rspython/copr_impl.rs
+++ b/src/script/src/python/rspython/copr_impl.rs
@@ -38,7 +38,7 @@ use crate::python::rspython::dataframe_impl::data_frame::set_dataframe_in_scope;
use crate::python::rspython::dataframe_impl::init_data_frame;
use crate::python::rspython::utils::{format_py_error, is_instance, py_obj_to_vec};
-thread_local!(static INTERPRETER: RefCell<Option<Rc<Interpreter>>> = RefCell::new(None));
+thread_local!(static INTERPRETER: RefCell<Option<Rc<Interpreter>>> = const { RefCell::new(None) });
/// Using `RustPython` to run a parsed `Coprocessor` struct as input to execute python code
pub(crate) fn rspy_exec_parsed(
diff --git a/src/sql/src/statements/explain.rs b/src/sql/src/statements/explain.rs
index fe953d66184d..5b3a2671f939 100644
--- a/src/sql/src/statements/explain.rs
+++ b/src/sql/src/statements/explain.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::{Display, Formatter};
+
use sqlparser::ast::Statement as SpStatement;
use sqlparser_derive::{Visit, VisitMut};
@@ -31,8 +33,8 @@ impl TryFrom<SpStatement> for Explain {
}
}
-impl ToString for Explain {
- fn to_string(&self) -> String {
- self.inner.to_string()
+impl Display for Explain {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{}", self.inner)
}
}
diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs
index f7047926265a..aecb96f267ee 100644
--- a/src/table/src/predicate.rs
+++ b/src/table/src/predicate.rs
@@ -573,6 +573,7 @@ mod tests {
let file = std::fs::OpenOptions::new()
.write(true)
.create(true)
+ .truncate(true)
.open(path.clone())
.unwrap();
diff --git a/tests-fuzz/src/executor.rs b/tests-fuzz/src/executor.rs
deleted file mode 100644
index 064bd0ce3486..000000000000
--- a/tests-fuzz/src/executor.rs
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::fmt;
-
-#[async_trait::async_trait]
-pub(crate) trait DslExecutor<T, U> {
- type Error: Sync + Send + fmt::Debug;
-
- async fn execute(&self, input: &T) -> Result<U, Self::Error>;
-}
diff --git a/tests-fuzz/src/generator/insert_expr.rs b/tests-fuzz/src/generator/insert_expr.rs
index ec4ba4f61577..f3f0dba11646 100644
--- a/tests-fuzz/src/generator/insert_expr.rs
+++ b/tests-fuzz/src/generator/insert_expr.rs
@@ -50,7 +50,7 @@ impl<R: Rng + 'static> Generator<InsertIntoExpr, R> for InsertExprGenerator<R> {
let mut values_columns = vec![];
if omit_column_list {
// If omit column list, then all columns are required in the values list
- values_columns = self.table_ctx.columns.clone();
+ values_columns.clone_from(&self.table_ctx.columns);
} else {
for column in &self.table_ctx.columns {
let can_omit = column.is_nullable() || column.has_default_value();
diff --git a/tests-fuzz/src/lib.rs b/tests-fuzz/src/lib.rs
index 406927d6b46b..8900350224aa 100644
--- a/tests-fuzz/src/lib.rs
+++ b/tests-fuzz/src/lib.rs
@@ -12,11 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#![feature(associated_type_bounds)]
-
pub mod context;
pub mod error;
-pub mod executor;
pub mod fake;
pub mod generator;
pub mod ir;
diff --git a/tests/runner/src/util.rs b/tests/runner/src/util.rs
index 5ae63ede37c7..04c336e1485c 100644
--- a/tests/runner/src/util.rs
+++ b/tests/runner/src/util.rs
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::fmt::Display;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::process::Command;
@@ -24,42 +23,6 @@ use tokio::time;
/// Check port every 0.1 second.
const PORT_CHECK_INTERVAL: Duration = Duration::from_millis(100);
-const NULL_DATA_PLACEHOLDER: &str = "NULL";
-
-/// Helper struct for iterate over column with null_mask
-struct NullableColumnIter<N, B, D, T>
-where
- N: Iterator<Item = B>,
- B: AsRef<bool>,
- D: Iterator<Item = T>,
- T: Display,
-{
- null_iter: N,
- data_iter: D,
-}
-
-impl<N, B, D, T> Iterator for NullableColumnIter<N, B, D, T>
-where
- N: Iterator<Item = B>,
- B: AsRef<bool>,
- D: Iterator<Item = T>,
- T: Display,
-{
- type Item = String;
-
- fn next(&mut self) -> Option<Self::Item> {
- // iter the null_mask first
- if let Some(is_null) = self.null_iter.next() {
- if *is_null.as_ref() {
- Some(NULL_DATA_PLACEHOLDER.to_string())
- } else {
- self.data_iter.next().map(|data| data.to_string())
- }
- } else {
- None
- }
- }
-}
/// Get the dir of test cases. This function only works when the runner is run
/// under the project's dir because it depends on some envs set by cargo.
|
build
|
update toolchain to nightly-2024-04-18 (#3740)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.