hash
stringlengths 40
40
| date
stringdate 2022-04-19 15:26:27
2025-03-21 10:49:23
| author
stringclasses 86
values | commit_message
stringlengths 12
115
| is_merge
bool 1
class | git_diff
stringlengths 214
553k
⌀ | type
stringclasses 15
values | masked_commit_message
stringlengths 8
110
|
|---|---|---|---|---|---|---|---|
197c34bc176641fa749da63d4fda0d37036e0858
|
2023-04-25 17:37:57
|
Lei, HUANG
|
fix: grpc client keepalive (#1461)
| false
|
diff --git a/src/common/grpc/src/channel_manager.rs b/src/common/grpc/src/channel_manager.rs
index 70e70893d4c3..439e3e2fe955 100644
--- a/src/common/grpc/src/channel_manager.rs
+++ b/src/common/grpc/src/channel_manager.rs
@@ -230,9 +230,9 @@ impl Default for ChannelConfig {
rate_limit: None,
initial_stream_window_size: None,
initial_connection_window_size: None,
- http2_keep_alive_interval: None,
+ http2_keep_alive_interval: Some(Duration::from_secs(30)),
http2_keep_alive_timeout: None,
- http2_keep_alive_while_idle: None,
+ http2_keep_alive_while_idle: Some(true),
http2_adaptive_window: None,
tcp_keepalive: None,
tcp_nodelay: true,
@@ -497,9 +497,9 @@ mod tests {
rate_limit: None,
initial_stream_window_size: None,
initial_connection_window_size: None,
- http2_keep_alive_interval: None,
+ http2_keep_alive_interval: Some(Duration::from_secs(30)),
http2_keep_alive_timeout: None,
- http2_keep_alive_while_idle: None,
+ http2_keep_alive_while_idle: Some(true),
http2_adaptive_window: None,
tcp_keepalive: None,
tcp_nodelay: true,
|
fix
|
grpc client keepalive (#1461)
|
7d77913e88505f16614a106304c10ad8f7751b9a
|
2023-02-07 06:17:06
|
elijah
|
chore: fix rfc typo (#952)
| false
|
diff --git a/docs/rfcs/2023-02-01-table-compaction.md b/docs/rfcs/2023-02-01-table-compaction.md
index 585151f5e1e5..645bf2d440b8 100644
--- a/docs/rfcs/2023-02-01-table-compaction.md
+++ b/docs/rfcs/2023-02-01-table-compaction.md
@@ -82,7 +82,7 @@ We can first group SSTs in level n into buckets according to some predefined tim
SSTs are compacted in a size-tired manner (find SSTs with similar size and compact them to level n+1).
SSTs from different time windows are neven compacted together.
That strategy guarantees SSTs in each level are mainly sorted in timestamp order which boosts queries with
-explict timestamp condition, while size-tired compaction minimizes the impact to foreground writes.
+explicit timestamp condition, while size-tired compaction minimizes the impact to foreground writes.
### Alternatives
|
chore
|
fix rfc typo (#952)
|
777bc3b89da75313e2059af3f4fd3008f1726f5d
|
2024-04-30 15:10:11
|
Yingwen
|
fix: compiler warnings on Windows (#3844)
| false
|
diff --git a/src/common/datasource/src/object_store.rs b/src/common/datasource/src/object_store.rs
index c9e36018c22b..d2ed0a4ad82d 100644
--- a/src/common/datasource/src/object_store.rs
+++ b/src/common/datasource/src/object_store.rs
@@ -35,7 +35,7 @@ pub fn parse_url(url: &str) -> Result<(String, Option<String>, String)> {
#[cfg(windows)]
{
// On Windows, the url may start with `C:/`.
- if let Some(_) = handle_windows_path(url) {
+ if handle_windows_path(url).is_some() {
return Ok((FS_SCHEMA.to_string(), None, url.to_string()));
}
}
diff --git a/src/metric-engine/src/test_util.rs b/src/metric-engine/src/test_util.rs
index 2da459419d65..79a523bb5758 100644
--- a/src/metric-engine/src/test_util.rs
+++ b/src/metric-engine/src/test_util.rs
@@ -291,7 +291,8 @@ pub fn build_rows(num_tags: usize, num_rows: usize) -> Vec<Row> {
#[cfg(test)]
mod test {
-
+ use object_store::services::Fs;
+ use object_store::ObjectStore;
use store_api::metric_engine_consts::{DATA_REGION_SUBDIR, METADATA_REGION_SUBDIR};
use super::*;
@@ -302,21 +303,21 @@ mod test {
let env = TestEnv::new().await;
env.init_metric_region().await;
let region_id = to_metadata_region_id(env.default_physical_region_id());
- let region_dir = join_dir(&env.data_home(), "test_metric_region");
- // `join_dir` doesn't suit windows path
- #[cfg(not(target_os = "windows"))]
- {
- // assert metadata region's dir
- let metadata_region_dir = join_dir(®ion_dir, METADATA_REGION_SUBDIR);
- let exist = tokio::fs::try_exists(metadata_region_dir).await.unwrap();
- assert!(exist);
+ let mut builder = Fs::default();
+ builder.root(&env.data_home());
+ let object_store = ObjectStore::new(builder).unwrap().finish();
- // assert data region's dir
- let data_region_dir = join_dir(®ion_dir, DATA_REGION_SUBDIR);
- let exist = tokio::fs::try_exists(data_region_dir).await.unwrap();
- assert!(exist);
- }
+ let region_dir = "test_metric_region";
+ // assert metadata region's dir
+ let metadata_region_dir = join_dir(region_dir, METADATA_REGION_SUBDIR);
+ let exist = object_store.is_exist(&metadata_region_dir).await.unwrap();
+ assert!(exist);
+
+ // assert data region's dir
+ let data_region_dir = join_dir(region_dir, DATA_REGION_SUBDIR);
+ let exist = object_store.is_exist(&data_region_dir).await.unwrap();
+ assert!(exist);
// check mito engine
let metadata_region_id = utils::to_metadata_region_id(region_id);
|
fix
|
compiler warnings on Windows (#3844)
|
fbd5316fdbc699a6ae2a0043a9a70904997c4b00
|
2025-02-13 10:44:39
|
yihong
|
perf: better performance for LastNonNullIter close #5229 about 10x times faster (#5518)
| false
|
diff --git a/src/mito2/src/read/dedup.rs b/src/mito2/src/read/dedup.rs
index a29781b94746..678c03fbf66d 100644
--- a/src/mito2/src/read/dedup.rs
+++ b/src/mito2/src/read/dedup.rs
@@ -502,6 +502,9 @@ pub(crate) struct LastNonNullIter<I> {
/// fetch a new batch.
/// The batch is always not empty.
current_batch: Option<Batch>,
+ /// The index of the current row in the current batch.
+ /// more to check issue #5229.
+ current_index: usize,
}
impl<I> LastNonNullIter<I> {
@@ -513,20 +516,9 @@ impl<I> LastNonNullIter<I> {
strategy: LastNonNull::new(false),
metrics: DedupMetrics::default(),
current_batch: None,
+ current_index: 0,
}
}
-
- /// Finds the index of the first row that has the same timestamp with the next row.
- /// If no duplicate rows, returns None.
- fn find_split_index(batch: &Batch) -> Option<usize> {
- if batch.num_rows() < 2 {
- return None;
- }
-
- // Safety: The batch is not empty.
- let timestamps = batch.timestamps_native().unwrap();
- timestamps.windows(2).position(|t| t[0] == t[1])
- }
}
impl<I: Iterator<Item = Result<Batch>>> LastNonNullIter<I> {
@@ -541,6 +533,7 @@ impl<I: Iterator<Item = Result<Batch>>> LastNonNullIter<I> {
};
self.current_batch = iter.next().transpose()?;
+ self.current_index = 0;
if self.current_batch.is_none() {
// The iterator is exhausted.
self.iter = None;
@@ -549,17 +542,21 @@ impl<I: Iterator<Item = Result<Batch>>> LastNonNullIter<I> {
}
if let Some(batch) = &self.current_batch {
- let Some(index) = Self::find_split_index(batch) else {
- // No duplicate rows in the current batch.
- return Ok(self.current_batch.take());
- };
-
- let first = batch.slice(0, index + 1);
- let batch = batch.slice(index + 1, batch.num_rows() - index - 1);
- // `index` is Some indicates that the batch has at least one row remaining.
- debug_assert!(!batch.is_empty());
- self.current_batch = Some(batch);
- return Ok(Some(first));
+ let n = batch.num_rows();
+ // Safety: The batch is not empty when accessed.
+ let timestamps = batch.timestamps_native().unwrap();
+ let mut pos = self.current_index;
+ while pos + 1 < n && timestamps[pos] != timestamps[pos + 1] {
+ pos += 1;
+ }
+ let segment = batch.slice(self.current_index, pos - self.current_index + 1);
+ if pos + 1 < n && timestamps[pos] == timestamps[pos + 1] {
+ self.current_index = pos + 1;
+ } else {
+ self.current_batch = None;
+ self.current_index = 0;
+ }
+ return Ok(Some(segment));
}
Ok(None)
|
perf
|
better performance for LastNonNullIter close #5229 about 10x times faster (#5518)
|
e375060b73b00c55db66820ea800c6c2c49515d3
|
2024-01-31 12:51:30
|
LFC
|
refactor: add same SST files (#3270)
| false
|
diff --git a/src/mito2/src/sst/version.rs b/src/mito2/src/sst/version.rs
index c690e8e0860a..dc388c742d73 100644
--- a/src/mito2/src/sst/version.rs
+++ b/src/mito2/src/sst/version.rs
@@ -55,10 +55,10 @@ impl SstVersion {
) {
for file in files_to_add {
let level = file.level;
- let handle = FileHandle::new(file, file_purger.clone());
- let file_id = handle.file_id();
- let old = self.levels[level as usize].files.insert(file_id, handle);
- assert!(old.is_none(), "Adds an existing file: {file_id}");
+ self.levels[level as usize]
+ .files
+ .entry(file.file_id)
+ .or_insert_with(|| FileHandle::new(file, file_purger.clone()));
}
}
@@ -157,3 +157,32 @@ fn new_level_meta_vec() -> LevelMetaArray {
.try_into()
.unwrap() // safety: LevelMetaArray is a fixed length array with length MAX_LEVEL
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::test_util::new_noop_file_purger;
+
+ #[test]
+ fn test_add_files() {
+ let purger = new_noop_file_purger();
+
+ let files = (1..=3)
+ .map(|_| FileMeta {
+ file_id: FileId::random(),
+ ..Default::default()
+ })
+ .collect::<Vec<_>>();
+
+ let mut version = SstVersion::new();
+ // files[1] is added multiple times, and that's ok.
+ version.add_files(purger.clone(), files[..=1].iter().cloned());
+ version.add_files(purger, files[1..].iter().cloned());
+
+ let added_files = &version.levels()[0].files;
+ assert_eq!(added_files.len(), 3);
+ files.iter().for_each(|f| {
+ assert!(added_files.contains_key(&f.file_id));
+ });
+ }
+}
|
refactor
|
add same SST files (#3270)
|
d83279567bc8f42140c20de8f53d9d68165d9103
|
2024-03-27 06:49:18
|
tison
|
feat(auth): watch file user provider (#3566)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index eeb75e0f0774..cb732d1ab0cb 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -687,8 +687,10 @@ dependencies = [
"async-trait",
"common-error",
"common-macro",
+ "common-telemetry",
"common-test-util",
"digest",
+ "notify",
"secrecy",
"sha1",
"snafu",
diff --git a/Cargo.toml b/Cargo.toml
index cebad1ef89b2..54354000b451 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -110,6 +110,7 @@ lazy_static = "1.4"
meter-core = { git = "https://github.com/GreptimeTeam/greptime-meter.git", rev = "80b72716dcde47ec4161478416a5c6c21343364d" }
mockall = "0.11.4"
moka = "0.12"
+notify = "6.1"
num_cpus = "1.16"
once_cell = "1.18"
opentelemetry-proto = { git = "https://github.com/waynexia/opentelemetry-rust.git", rev = "33841b38dda79b15f2024952be5f32533325ca02", features = [
diff --git a/src/auth/Cargo.toml b/src/auth/Cargo.toml
index a3e9f199a1b1..c10a38e86f83 100644
--- a/src/auth/Cargo.toml
+++ b/src/auth/Cargo.toml
@@ -16,7 +16,9 @@ api.workspace = true
async-trait.workspace = true
common-error.workspace = true
common-macro.workspace = true
+common-telemetry.workspace = true
digest = "0.10"
+notify.workspace = true
secrecy = { version = "0.8", features = ["serde", "alloc"] }
sha1 = "0.10"
snafu.workspace = true
diff --git a/src/auth/src/common.rs b/src/auth/src/common.rs
index 109a98175d4c..d8b70cea689c 100644
--- a/src/auth/src/common.rs
+++ b/src/auth/src/common.rs
@@ -22,6 +22,9 @@ use snafu::{ensure, OptionExt};
use crate::error::{IllegalParamSnafu, InvalidConfigSnafu, Result, UserPasswordMismatchSnafu};
use crate::user_info::DefaultUserInfo;
use crate::user_provider::static_user_provider::{StaticUserProvider, STATIC_USER_PROVIDER};
+use crate::user_provider::watch_file_user_provider::{
+ WatchFileUserProvider, WATCH_FILE_USER_PROVIDER,
+};
use crate::{UserInfoRef, UserProviderRef};
pub(crate) const DEFAULT_USERNAME: &str = "greptime";
@@ -43,6 +46,9 @@ pub fn user_provider_from_option(opt: &String) -> Result<UserProviderRef> {
StaticUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)?;
Ok(provider)
}
+ WATCH_FILE_USER_PROVIDER => {
+ WatchFileUserProvider::new(content).map(|p| Arc::new(p) as UserProviderRef)
+ }
_ => InvalidConfigSnafu {
value: name.to_string(),
msg: "Invalid UserProviderOption",
diff --git a/src/auth/src/error.rs b/src/auth/src/error.rs
index 529d71165920..bb9f37e83b60 100644
--- a/src/auth/src/error.rs
+++ b/src/auth/src/error.rs
@@ -64,6 +64,13 @@ pub enum Error {
username: String,
},
+ #[snafu(display("Failed to initialize a watcher for file {}", path))]
+ FileWatch {
+ path: String,
+ #[snafu(source)]
+ error: notify::Error,
+ },
+
#[snafu(display("User is not authorized to perform this action"))]
PermissionDenied { location: Location },
}
@@ -73,6 +80,7 @@ impl ErrorExt for Error {
match self {
Error::InvalidConfig { .. } => StatusCode::InvalidArguments,
Error::IllegalParam { .. } => StatusCode::InvalidArguments,
+ Error::FileWatch { .. } => StatusCode::InvalidArguments,
Error::InternalState { .. } => StatusCode::Unexpected,
Error::Io { .. } => StatusCode::Internal,
Error::AuthBackend { .. } => StatusCode::Internal,
diff --git a/src/auth/src/user_provider.rs b/src/auth/src/user_provider.rs
index 1acf499a8d4c..4fab604e62ce 100644
--- a/src/auth/src/user_provider.rs
+++ b/src/auth/src/user_provider.rs
@@ -13,10 +13,24 @@
// limitations under the License.
pub(crate) mod static_user_provider;
+pub(crate) mod watch_file_user_provider;
+
+use std::collections::HashMap;
+use std::fs::File;
+use std::io;
+use std::io::BufRead;
+use std::path::Path;
+
+use secrecy::ExposeSecret;
+use snafu::{ensure, OptionExt, ResultExt};
use crate::common::{Identity, Password};
-use crate::error::Result;
-use crate::UserInfoRef;
+use crate::error::{
+ IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
+ UserNotFoundSnafu, UserPasswordMismatchSnafu,
+};
+use crate::user_info::DefaultUserInfo;
+use crate::{auth_mysql, UserInfoRef};
#[async_trait::async_trait]
pub trait UserProvider: Send + Sync {
@@ -44,3 +58,88 @@ pub trait UserProvider: Send + Sync {
Ok(user_info)
}
}
+
+fn load_credential_from_file(filepath: &str) -> Result<Option<HashMap<String, Vec<u8>>>> {
+ // check valid path
+ let path = Path::new(filepath);
+ if !path.exists() {
+ return Ok(None);
+ }
+
+ ensure!(
+ path.is_file(),
+ InvalidConfigSnafu {
+ value: filepath,
+ msg: "UserProvider file must be a file",
+ }
+ );
+ let file = File::open(path).context(IoSnafu)?;
+ let credential = io::BufReader::new(file)
+ .lines()
+ .map_while(std::result::Result::ok)
+ .filter_map(|line| {
+ if let Some((k, v)) = line.split_once('=') {
+ Some((k.to_string(), v.as_bytes().to_vec()))
+ } else {
+ None
+ }
+ })
+ .collect::<HashMap<String, Vec<u8>>>();
+
+ ensure!(
+ !credential.is_empty(),
+ InvalidConfigSnafu {
+ value: filepath,
+ msg: "UserProvider's file must contains at least one valid credential",
+ }
+ );
+
+ Ok(Some(credential))
+}
+
+fn authenticate_with_credential(
+ users: &HashMap<String, Vec<u8>>,
+ input_id: Identity<'_>,
+ input_pwd: Password<'_>,
+) -> Result<UserInfoRef> {
+ match input_id {
+ Identity::UserId(username, _) => {
+ ensure!(
+ !username.is_empty(),
+ IllegalParamSnafu {
+ msg: "blank username"
+ }
+ );
+ let save_pwd = users.get(username).context(UserNotFoundSnafu {
+ username: username.to_string(),
+ })?;
+
+ match input_pwd {
+ Password::PlainText(pwd) => {
+ ensure!(
+ !pwd.expose_secret().is_empty(),
+ IllegalParamSnafu {
+ msg: "blank password"
+ }
+ );
+ if save_pwd == pwd.expose_secret().as_bytes() {
+ Ok(DefaultUserInfo::with_name(username))
+ } else {
+ UserPasswordMismatchSnafu {
+ username: username.to_string(),
+ }
+ .fail()
+ }
+ }
+ Password::MysqlNativePassword(auth_data, salt) => {
+ auth_mysql(auth_data, salt, username, save_pwd)
+ .map(|_| DefaultUserInfo::with_name(username))
+ }
+ Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
+ password_type: "pg_md5",
+ }
+ .fail(),
+ }
+ }
+ }
+}
diff --git a/src/auth/src/user_provider/static_user_provider.rs b/src/auth/src/user_provider/static_user_provider.rs
index e6d474389431..9e0567121920 100644
--- a/src/auth/src/user_provider/static_user_provider.rs
+++ b/src/auth/src/user_provider/static_user_provider.rs
@@ -13,21 +13,13 @@
// limitations under the License.
use std::collections::HashMap;
-use std::fs::File;
-use std::io;
-use std::io::BufRead;
-use std::path::Path;
use async_trait::async_trait;
-use secrecy::ExposeSecret;
-use snafu::{ensure, OptionExt, ResultExt};
+use snafu::OptionExt;
-use crate::error::{
- IllegalParamSnafu, InvalidConfigSnafu, IoSnafu, Result, UnsupportedPasswordTypeSnafu,
- UserNotFoundSnafu, UserPasswordMismatchSnafu,
-};
-use crate::user_info::DefaultUserInfo;
-use crate::{auth_mysql, Identity, Password, UserInfoRef, UserProvider};
+use crate::error::{InvalidConfigSnafu, Result};
+use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
+use crate::{Identity, Password, UserInfoRef, UserProvider};
pub(crate) const STATIC_USER_PROVIDER: &str = "static_user_provider";
@@ -43,32 +35,12 @@ impl StaticUserProvider {
})?;
return match mode {
"file" => {
- // check valid path
- let path = Path::new(content);
- ensure!(path.exists() && path.is_file(), InvalidConfigSnafu {
- value: content.to_string(),
- msg: "StaticUserProviderOption file must be a valid file path",
- });
-
- let file = File::open(path).context(IoSnafu)?;
- let credential = io::BufReader::new(file)
- .lines()
- .map_while(std::result::Result::ok)
- .filter_map(|line| {
- if let Some((k, v)) = line.split_once('=') {
- Some((k.to_string(), v.as_bytes().to_vec()))
- } else {
- None
- }
- })
- .collect::<HashMap<String, Vec<u8>>>();
-
- ensure!(!credential.is_empty(), InvalidConfigSnafu {
- value: content.to_string(),
- msg: "StaticUserProviderOption file must contains at least one valid credential",
- });
-
- Ok(StaticUserProvider { users: credential, })
+ let users = load_credential_from_file(content)?
+ .context(InvalidConfigSnafu {
+ value: content.to_string(),
+ msg: "StaticFileUserProvider must be a valid file path",
+ })?;
+ Ok(StaticUserProvider { users })
}
"cmd" => content
.split(',')
@@ -96,51 +68,8 @@ impl UserProvider for StaticUserProvider {
STATIC_USER_PROVIDER
}
- async fn authenticate(
- &self,
- input_id: Identity<'_>,
- input_pwd: Password<'_>,
- ) -> Result<UserInfoRef> {
- match input_id {
- Identity::UserId(username, _) => {
- ensure!(
- !username.is_empty(),
- IllegalParamSnafu {
- msg: "blank username"
- }
- );
- let save_pwd = self.users.get(username).context(UserNotFoundSnafu {
- username: username.to_string(),
- })?;
-
- match input_pwd {
- Password::PlainText(pwd) => {
- ensure!(
- !pwd.expose_secret().is_empty(),
- IllegalParamSnafu {
- msg: "blank password"
- }
- );
- return if save_pwd == pwd.expose_secret().as_bytes() {
- Ok(DefaultUserInfo::with_name(username))
- } else {
- UserPasswordMismatchSnafu {
- username: username.to_string(),
- }
- .fail()
- };
- }
- Password::MysqlNativePassword(auth_data, salt) => {
- auth_mysql(auth_data, salt, username, save_pwd)
- .map(|_| DefaultUserInfo::with_name(username))
- }
- Password::PgMD5(_, _) => UnsupportedPasswordTypeSnafu {
- password_type: "pg_md5",
- }
- .fail(),
- }
- }
- }
+ async fn authenticate(&self, id: Identity<'_>, pwd: Password<'_>) -> Result<UserInfoRef> {
+ authenticate_with_credential(&self.users, id, pwd)
}
async fn authorize(
diff --git a/src/auth/src/user_provider/watch_file_user_provider.rs b/src/auth/src/user_provider/watch_file_user_provider.rs
new file mode 100644
index 000000000000..4a654f2f31a9
--- /dev/null
+++ b/src/auth/src/user_provider/watch_file_user_provider.rs
@@ -0,0 +1,215 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+use std::path::Path;
+use std::sync::mpsc::channel;
+use std::sync::{Arc, Mutex};
+
+use async_trait::async_trait;
+use common_telemetry::{info, warn};
+use notify::{EventKind, RecursiveMode, Watcher};
+use snafu::{ensure, ResultExt};
+
+use crate::error::{FileWatchSnafu, InvalidConfigSnafu, Result};
+use crate::user_info::DefaultUserInfo;
+use crate::user_provider::{authenticate_with_credential, load_credential_from_file};
+use crate::{Identity, Password, UserInfoRef, UserProvider};
+
+pub(crate) const WATCH_FILE_USER_PROVIDER: &str = "watch_file_user_provider";
+
+type WatchedCredentialRef = Arc<Mutex<Option<HashMap<String, Vec<u8>>>>>;
+
+/// A user provider that reads user credential from a file and watches the file for changes.
+///
+/// Empty file is invalid; but file not exist means every user can be authenticated.
+pub(crate) struct WatchFileUserProvider {
+ users: WatchedCredentialRef,
+}
+
+impl WatchFileUserProvider {
+ pub fn new(filepath: &str) -> Result<Self> {
+ let credential = load_credential_from_file(filepath)?;
+ let users = Arc::new(Mutex::new(credential));
+ let this = WatchFileUserProvider {
+ users: users.clone(),
+ };
+
+ let (tx, rx) = channel::<notify::Result<notify::Event>>();
+ let mut debouncer =
+ notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
+ let mut dir = Path::new(filepath).to_path_buf();
+ ensure!(
+ dir.pop(),
+ InvalidConfigSnafu {
+ value: filepath,
+ msg: "UserProvider path must be a file path",
+ }
+ );
+ debouncer
+ .watch(&dir, RecursiveMode::NonRecursive)
+ .context(FileWatchSnafu { path: filepath })?;
+
+ let filepath = filepath.to_string();
+ std::thread::spawn(move || {
+ let filename = Path::new(&filepath).file_name();
+ let _hold = debouncer;
+ while let Ok(res) = rx.recv() {
+ if let Ok(event) = res {
+ let is_this_file = event.paths.iter().any(|p| p.file_name() == filename);
+ let is_relevant_event = matches!(
+ event.kind,
+ EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
+ );
+ if is_this_file && is_relevant_event {
+ info!(?event.kind, "User provider file {} changed", &filepath);
+ match load_credential_from_file(&filepath) {
+ Ok(credential) => {
+ let mut users =
+ users.lock().expect("users credential must be valid");
+ #[cfg(not(test))]
+ info!("User provider file {filepath} reloaded");
+ #[cfg(test)]
+ info!("User provider file {filepath} reloaded: {credential:?}");
+ *users = credential;
+ }
+ Err(err) => {
+ warn!(
+ ?err,
+ "Fail to load credential from file {filepath}; keep the old one",
+ )
+ }
+ }
+ }
+ }
+ }
+ });
+
+ Ok(this)
+ }
+}
+
+#[async_trait]
+impl UserProvider for WatchFileUserProvider {
+ fn name(&self) -> &str {
+ WATCH_FILE_USER_PROVIDER
+ }
+
+ async fn authenticate(&self, id: Identity<'_>, password: Password<'_>) -> Result<UserInfoRef> {
+ let users = self.users.lock().expect("users credential must be valid");
+ if let Some(users) = users.as_ref() {
+ authenticate_with_credential(users, id, password)
+ } else {
+ match id {
+ Identity::UserId(id, _) => {
+ warn!(id, "User provider file not exist, allow all users");
+ Ok(DefaultUserInfo::with_name(id))
+ }
+ }
+ }
+ }
+
+ async fn authorize(&self, _: &str, _: &str, _: &UserInfoRef) -> Result<()> {
+ // default allow all
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+pub mod test {
+ use std::time::{Duration, Instant};
+
+ use common_test_util::temp_dir::create_temp_dir;
+ use tokio::time::sleep;
+
+ use crate::user_provider::watch_file_user_provider::WatchFileUserProvider;
+ use crate::user_provider::{Identity, Password};
+ use crate::UserProvider;
+
+ async fn test_authenticate(
+ provider: &dyn UserProvider,
+ username: &str,
+ password: &str,
+ ok: bool,
+ timeout: Option<Duration>,
+ ) {
+ if let Some(timeout) = timeout {
+ let deadline = Instant::now().checked_add(timeout).unwrap();
+ loop {
+ let re = provider
+ .authenticate(
+ Identity::UserId(username, None),
+ Password::PlainText(password.to_string().into()),
+ )
+ .await;
+ if re.is_ok() == ok {
+ break;
+ } else if Instant::now() < deadline {
+ sleep(Duration::from_millis(100)).await;
+ } else {
+ panic!("timeout (username: {username}, password: {password}, expected: {ok})");
+ }
+ }
+ } else {
+ let re = provider
+ .authenticate(
+ Identity::UserId(username, None),
+ Password::PlainText(password.to_string().into()),
+ )
+ .await;
+ assert_eq!(
+ re.is_ok(),
+ ok,
+ "username: {}, password: {}",
+ username,
+ password
+ );
+ }
+ }
+
+ #[tokio::test]
+ async fn test_file_provider() {
+ common_telemetry::init_default_ut_logging();
+
+ let dir = create_temp_dir("test_file_provider");
+ let file_path = format!("{}/test_file_provider", dir.path().to_str().unwrap());
+
+ // write a tmp file
+ assert!(std::fs::write(&file_path, "root=123456\nadmin=654321\n").is_ok());
+ let provider = WatchFileUserProvider::new(file_path.as_str()).unwrap();
+ let timeout = Duration::from_secs(60);
+
+ test_authenticate(&provider, "root", "123456", true, None).await;
+ test_authenticate(&provider, "admin", "654321", true, None).await;
+ test_authenticate(&provider, "root", "654321", false, None).await;
+
+ // update the tmp file
+ assert!(std::fs::write(&file_path, "root=654321\n").is_ok());
+ test_authenticate(&provider, "root", "123456", false, Some(timeout)).await;
+ test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
+ test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
+
+ // remove the tmp file
+ assert!(std::fs::remove_file(&file_path).is_ok());
+ test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
+ test_authenticate(&provider, "root", "654321", true, Some(timeout)).await;
+ test_authenticate(&provider, "admin", "654321", true, Some(timeout)).await;
+
+ // recreate the tmp file
+ assert!(std::fs::write(&file_path, "root=123456\n").is_ok());
+ test_authenticate(&provider, "root", "123456", true, Some(timeout)).await;
+ test_authenticate(&provider, "root", "654321", false, Some(timeout)).await;
+ test_authenticate(&provider, "admin", "654321", false, Some(timeout)).await;
+ }
+}
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 4a54a49b272b..43dbc55703b6 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -56,7 +56,7 @@ influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", bran
itertools.workspace = true
lazy_static.workspace = true
mime_guess = "2.0"
-notify = "6.1"
+notify.workspace = true
object-pool = "0.5"
once_cell.workspace = true
openmetrics-parser = "0.4"
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 0546d2a262da..2d47547e65a6 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -449,8 +449,9 @@ pub enum Error {
))]
UnexpectedPhysicalTable { location: Location },
- #[snafu(display("Failed to initialize a watcher for file"))]
+ #[snafu(display("Failed to initialize a watcher for file {}", path))]
FileWatch {
+ path: String,
#[snafu(source)]
error: notify::Error,
},
diff --git a/src/servers/src/tls.rs b/src/servers/src/tls.rs
index f36970a42b30..2055081012ca 100644
--- a/src/servers/src/tls.rs
+++ b/src/servers/src/tls.rs
@@ -200,21 +200,21 @@ pub fn maybe_watch_tls_config(tls_server_config: Arc<ReloadableTlsServerConfig>)
let tls_server_config_for_watcher = tls_server_config.clone();
let (tx, rx) = channel::<notify::Result<notify::Event>>();
- let mut watcher = notify::recommended_watcher(tx).context(FileWatchSnafu)?;
+ let mut watcher = notify::recommended_watcher(tx).context(FileWatchSnafu { path: "<none>" })?;
+ let cert_path = tls_server_config.get_tls_option().cert_path();
watcher
- .watch(
- tls_server_config.get_tls_option().cert_path(),
- RecursiveMode::NonRecursive,
- )
- .context(FileWatchSnafu)?;
+ .watch(cert_path, RecursiveMode::NonRecursive)
+ .with_context(|_| FileWatchSnafu {
+ path: cert_path.display().to_string(),
+ })?;
+ let key_path = tls_server_config.get_tls_option().key_path();
watcher
- .watch(
- tls_server_config.get_tls_option().key_path(),
- RecursiveMode::NonRecursive,
- )
- .context(FileWatchSnafu)?;
+ .watch(key_path, RecursiveMode::NonRecursive)
+ .with_context(|_| FileWatchSnafu {
+ path: key_path.display().to_string(),
+ })?;
std::thread::spawn(move || {
let _watcher = watcher;
|
feat
|
watch file user provider (#3566)
|
62037ee4c80dc0e2ec2c4ed993d000dd659668cb
|
2024-04-24 12:39:06
|
irenjj
|
feat: impl Display for Statement (#3744)
| false
|
diff --git a/src/common/query/src/lib.rs b/src/common/query/src/lib.rs
index ca81ad9e41d4..3686cff83699 100644
--- a/src/common/query/src/lib.rs
+++ b/src/common/query/src/lib.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::fmt::{Debug, Formatter};
+use std::fmt::{Debug, Display, Formatter};
use std::sync::Arc;
use api::greptime_proto::v1::add_column_location::LocationType;
@@ -126,6 +126,17 @@ pub enum AddColumnLocation {
After { column_name: String },
}
+impl Display for AddColumnLocation {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ match self {
+ AddColumnLocation::First => write!(f, r#"FIRST"#),
+ AddColumnLocation::After { column_name } => {
+ write!(f, r#"AFTER {column_name}"#)
+ }
+ }
+ }
+}
+
impl From<&AddColumnLocation> for Location {
fn from(value: &AddColumnLocation) -> Self {
match value {
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index de35b71a90a8..27a0c9327a24 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -43,6 +43,7 @@ use datatypes::schema::constraint::{CURRENT_TIMESTAMP, CURRENT_TIMESTAMP_FN};
use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, COMMENT_KEY};
use datatypes::types::{cast, TimestampType};
use datatypes::value::{OrderedF32, OrderedF64, Value};
+use itertools::Itertools;
pub use option_map::OptionMap;
use snafu::{ensure, OptionExt, ResultExt};
use sqlparser::ast::{ExactNumberInfo, UnaryOperator};
@@ -58,6 +59,29 @@ use crate::error::{
SerializeColumnDefaultConstraintSnafu, TimestampOverflowSnafu, UnsupportedDefaultValueSnafu,
};
+const REDACTED_OPTIONS: [&str; 2] = ["access_key_id", "secret_access_key"];
+
+/// Convert the options into redacted and sorted key-value string. Options with key in
+/// [REDACTED_OPTIONS] will be converted into `<key> = '******'`.
+fn redact_and_sort_options(options: &OptionMap) -> Vec<String> {
+ let options = options.as_ref();
+ let mut result = Vec::with_capacity(options.len());
+ let keys = options.keys().sorted();
+ for key in keys {
+ if let Some(val) = options.get(key) {
+ let redacted = REDACTED_OPTIONS
+ .iter()
+ .any(|opt| opt.eq_ignore_ascii_case(key));
+ if redacted {
+ result.push(format!("{key} = '******'"));
+ } else {
+ result.push(format!("{key} = '{}'", val.escape_default()));
+ }
+ }
+ }
+ result
+}
+
fn parse_string_to_value(
column_name: &str,
s: String,
diff --git a/src/sql/src/statements/alter.rs b/src/sql/src/statements/alter.rs
index cf3dc1bf9179..a54ba2d41b74 100644
--- a/src/sql/src/statements/alter.rs
+++ b/src/sql/src/statements/alter.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::{Debug, Display};
+
use common_query::AddColumnLocation;
use sqlparser::ast::{ColumnDef, Ident, ObjectName, TableConstraint};
use sqlparser_derive::{Visit, VisitMut};
@@ -39,6 +41,14 @@ impl AlterTable {
}
}
+impl Display for AlterTable {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let table_name = self.table_name();
+ let alter_operation = self.alter_operation();
+ write!(f, r#"ALTER TABLE {table_name} {alter_operation}"#)
+ }
+}
+
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum AlterTableOperation {
/// `ADD <table_constraint>`
@@ -53,3 +63,100 @@ pub enum AlterTableOperation {
/// `RENAME <new_table_name>`
RenameTable { new_table_name: String },
}
+
+impl Display for AlterTableOperation {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ AlterTableOperation::AddConstraint(constraint) => write!(f, r#"ADD {constraint}"#),
+ AlterTableOperation::AddColumn {
+ column_def,
+ location,
+ } => {
+ if let Some(location) = location {
+ write!(f, r#"ADD COLUMN {column_def} {location}"#)
+ } else {
+ write!(f, r#"ADD COLUMN {column_def}"#)
+ }
+ }
+ AlterTableOperation::DropColumn { name } => write!(f, r#"DROP COLUMN {name}"#),
+ AlterTableOperation::RenameTable { new_table_name } => {
+ write!(f, r#"RENAME {new_table_name}"#)
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use crate::dialect::GreptimeDbDialect;
+ use crate::parser::{ParseOptions, ParserContext};
+ use crate::statements::statement::Statement;
+
+ #[test]
+ fn test_display_alter() {
+ let sql = r"alter table monitor add column app string default 'shop' primary key;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::Alter { .. });
+
+ match &stmts[0] {
+ Statement::Alter(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+ALTER TABLE monitor ADD COLUMN app STRING DEFAULT 'shop' PRIMARY KEY"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+
+ let sql = r"alter table monitor drop column load_15;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::Alter { .. });
+
+ match &stmts[0] {
+ Statement::Alter(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+ALTER TABLE monitor DROP COLUMN load_15"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+
+ let sql = r"alter table monitor rename monitor_new;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::Alter { .. });
+
+ match &stmts[0] {
+ Statement::Alter(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+ALTER TABLE monitor RENAME monitor_new"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+}
diff --git a/src/sql/src/statements/copy.rs b/src/sql/src/statements/copy.rs
index 8d3104f29e69..c801c3bb62fc 100644
--- a/src/sql/src/statements/copy.rs
+++ b/src/sql/src/statements/copy.rs
@@ -12,10 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Display;
+
use sqlparser::ast::ObjectName;
use sqlparser_derive::{Visit, VisitMut};
-use crate::statements::OptionMap;
+use crate::statements::{redact_and_sort_options, OptionMap};
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum Copy {
@@ -23,18 +25,77 @@ pub enum Copy {
CopyDatabase(CopyDatabase),
}
+impl Display for Copy {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Copy::CopyTable(s) => s.fmt(f),
+ Copy::CopyDatabase(s) => s.fmt(f),
+ }
+ }
+}
+
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum CopyTable {
To(CopyTableArgument),
From(CopyTableArgument),
}
+impl Display for CopyTable {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "COPY ")?;
+ let (with, connection) = match self {
+ CopyTable::To(args) => {
+ write!(f, "{} TO {}", &args.table_name, &args.location)?;
+ (&args.with, &args.connection)
+ }
+ CopyTable::From(args) => {
+ write!(f, "{} FROM {}", &args.table_name, &args.location)?;
+ (&args.with, &args.connection)
+ }
+ };
+ if !with.map.is_empty() {
+ let options = redact_and_sort_options(with);
+ write!(f, " WITH ({})", options.join(", "))?;
+ }
+ if !connection.map.is_empty() {
+ let options = redact_and_sort_options(connection);
+ write!(f, " CONNECTION ({})", options.join(", "))?;
+ }
+ Ok(())
+ }
+}
+
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub enum CopyDatabase {
To(CopyDatabaseArgument),
From(CopyDatabaseArgument),
}
+impl Display for CopyDatabase {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "COPY DATABASE ")?;
+ let (with, connection) = match self {
+ CopyDatabase::To(args) => {
+ write!(f, "{} TO {}", &args.database_name, &args.location)?;
+ (&args.with, &args.connection)
+ }
+ CopyDatabase::From(args) => {
+ write!(f, "{} FROM {}", &args.database_name, &args.location)?;
+ (&args.with, &args.connection)
+ }
+ };
+ if !with.map.is_empty() {
+ let options = redact_and_sort_options(with);
+ write!(f, " WITH ({})", options.join(", "))?;
+ }
+ if !connection.map.is_empty() {
+ let options = redact_and_sort_options(connection);
+ write!(f, " CONNECTION ({})", options.join(", "))?;
+ }
+ Ok(())
+ }
+}
+
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct CopyDatabaseArgument {
pub database_name: ObjectName,
@@ -67,3 +128,112 @@ impl CopyTableArgument {
.cloned()
}
}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use crate::dialect::GreptimeDbDialect;
+ use crate::parser::{ParseOptions, ParserContext};
+ use crate::statements::statement::Statement;
+
+ #[test]
+ fn test_display_copy_from_tb() {
+ let sql = r"copy tbl from 's3://my-bucket/data.parquet'
+ with (format = 'parquet', pattern = '.*parquet.*')
+ connection(region = 'us-west-2', secret_access_key = '12345678');";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::Copy { .. });
+
+ match &stmts[0] {
+ Statement::Copy(copy) => {
+ let new_sql = format!("{}", copy);
+ assert_eq!(
+ r#"COPY tbl FROM s3://my-bucket/data.parquet WITH (format = 'parquet', pattern = '.*parquet.*') CONNECTION (region = 'us-west-2', secret_access_key = '******')"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_copy_to_tb() {
+ let sql = r"copy tbl to 's3://my-bucket/data.parquet'
+ with (format = 'parquet', pattern = '.*parquet.*')
+ connection(region = 'us-west-2', secret_access_key = '12345678');";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::Copy { .. });
+
+ match &stmts[0] {
+ Statement::Copy(copy) => {
+ let new_sql = format!("{}", copy);
+ assert_eq!(
+ r#"COPY tbl TO s3://my-bucket/data.parquet WITH (format = 'parquet', pattern = '.*parquet.*') CONNECTION (region = 'us-west-2', secret_access_key = '******')"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_copy_from_db() {
+ let sql = r"copy database db1 from 's3://my-bucket/data.parquet'
+ with (format = 'parquet', pattern = '.*parquet.*')
+ connection(region = 'us-west-2', secret_access_key = '12345678');";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::Copy { .. });
+
+ match &stmts[0] {
+ Statement::Copy(copy) => {
+ let new_sql = format!("{}", copy);
+ assert_eq!(
+ r#"COPY DATABASE db1 FROM s3://my-bucket/data.parquet WITH (format = 'parquet', pattern = '.*parquet.*') CONNECTION (region = 'us-west-2', secret_access_key = '******')"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_copy_to_db() {
+ let sql = r"copy database db1 to 's3://my-bucket/data.parquet'
+ with (format = 'parquet', pattern = '.*parquet.*')
+ connection(region = 'us-west-2', secret_access_key = '12345678');";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::Copy { .. });
+
+ match &stmts[0] {
+ Statement::Copy(copy) => {
+ let new_sql = format!("{}", copy);
+ assert_eq!(
+ r#"COPY DATABASE db1 TO s3://my-bucket/data.parquet WITH (format = 'parquet', pattern = '.*parquet.*') CONNECTION (region = 'us-west-2', secret_access_key = '******')"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+}
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index cfcbd8d68242..eb992b48ef45 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -20,7 +20,7 @@ use sqlparser::ast::Expr;
use sqlparser_derive::{Visit, VisitMut};
use crate::ast::{ColumnDef, Ident, ObjectName, SqlOption, TableConstraint, Value as SqlValue};
-use crate::statements::OptionMap;
+use crate::statements::{redact_and_sort_options, OptionMap};
const LINE_SEP: &str = ",\n";
const COMMA_SEP: &str = ", ";
@@ -47,6 +47,23 @@ macro_rules! format_list_comma {
};
}
+fn format_table_constraint(constraints: &[TableConstraint]) -> String {
+ constraints
+ .iter()
+ .map(|c| {
+ if is_time_index(c) {
+ let TableConstraint::Unique { columns, .. } = c else {
+ unreachable!()
+ };
+
+ format_indent!("{}TIME INDEX ({})", format_list_comma!(columns))
+ } else {
+ format_indent!(c)
+ }
+ })
+ .join(LINE_SEP)
+}
+
/// Time index name, used in table constraints.
pub const TIME_INDEX: &str = "__time_index";
@@ -74,58 +91,6 @@ pub struct CreateTable {
pub partitions: Option<Partitions>,
}
-impl CreateTable {
- fn format_constraints(&self) -> String {
- self.constraints
- .iter()
- .map(|c| {
- if is_time_index(c) {
- let TableConstraint::Unique { columns, .. } = c else {
- unreachable!()
- };
-
- format_indent!("{}TIME INDEX ({})", format_list_comma!(columns))
- } else {
- format_indent!(c)
- }
- })
- .join(LINE_SEP)
- }
-
- #[inline]
- fn format_partitions(&self) -> String {
- if let Some(partitions) = &self.partitions {
- format!("{}\n", partitions)
- } else {
- String::default()
- }
- }
-
- #[inline]
- fn format_if_not_exists(&self) -> &str {
- if self.if_not_exists {
- "IF NOT EXISTS"
- } else {
- ""
- }
- }
-
- #[inline]
- fn format_options(&self) -> String {
- if self.options.is_empty() {
- String::default()
- } else {
- let options: Vec<&SqlOption> = self.options.iter().sorted().collect();
- let options = format_list_indent!(options);
- format!(
- r#"WITH(
-{options}
-)"#
- )
- }
- }
-}
-
#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
pub struct Partitions {
pub column_list: Vec<Ident>,
@@ -166,36 +131,37 @@ impl Display for Partitions {
"PARTITION ON COLUMNS ({}) (\n{}\n)",
format_list_comma!(self.column_list),
format_list_indent!(self.exprs),
- )
- } else {
- write!(f, "")
+ )?;
}
+ Ok(())
}
}
impl Display for CreateTable {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
- let if_not_exists = self.format_if_not_exists();
- let name = &self.name;
- let columns = format_list_indent!(self.columns);
- let constraints = self.format_constraints();
- let partitions = self.format_partitions();
- let engine = &self.engine;
- let options = self.format_options();
- let maybe_external = if self.engine == FILE_ENGINE {
- "EXTERNAL "
- } else {
- ""
- };
- write!(
- f,
- r#"CREATE {maybe_external}TABLE {if_not_exists} {name} (
-{columns},
-{constraints}
-)
-{partitions}ENGINE={engine}
-{options}"#
- )
+ write!(f, "CREATE ")?;
+ if self.engine == FILE_ENGINE {
+ write!(f, "EXTERNAL ")?;
+ }
+ write!(f, "TABLE ")?;
+ if self.if_not_exists {
+ write!(f, "IF NOT EXISTS ")?;
+ }
+ writeln!(f, "{} (", &self.name)?;
+ writeln!(f, "{},", format_list_indent!(self.columns))?;
+ writeln!(f, "{}", format_table_constraint(&self.constraints))?;
+ writeln!(f, ")")?;
+ if let Some(partitions) = &self.partitions {
+ writeln!(f, "{partitions}")?;
+ }
+ writeln!(f, "ENGINE={}", &self.engine)?;
+ if !self.options.is_empty() {
+ writeln!(f, "WITH(")?;
+ let options: Vec<&SqlOption> = self.options.iter().sorted().collect();
+ writeln!(f, "{}", format_list_indent!(options))?;
+ write!(f, ")")?;
+ }
+ Ok(())
}
}
@@ -216,6 +182,16 @@ impl CreateDatabase {
}
}
+impl Display for CreateDatabase {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(f, "CREATE DATABASE ")?;
+ if self.if_not_exists {
+ write!(f, "IF NOT EXISTS ")?;
+ }
+ write!(f, "{}", &self.name)
+ }
+}
+
#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
pub struct CreateExternalTable {
/// Table name
@@ -229,6 +205,27 @@ pub struct CreateExternalTable {
pub engine: String,
}
+impl Display for CreateExternalTable {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(f, "CREATE EXTERNAL TABLE ")?;
+ if self.if_not_exists {
+ write!(f, "IF NOT EXISTS ")?;
+ }
+ writeln!(f, "{} (", &self.name)?;
+ writeln!(f, "{},", format_list_indent!(self.columns))?;
+ writeln!(f, "{}", format_table_constraint(&self.constraints))?;
+ writeln!(f, ")")?;
+ writeln!(f, "ENGINE={}", &self.engine)?;
+ if !self.options.map.is_empty() {
+ let options = redact_and_sort_options(&self.options);
+ writeln!(f, "WITH(")?;
+ writeln!(f, "{}", format_list_indent!(options))?;
+ write!(f, ")")?;
+ }
+ Ok(())
+ }
+}
+
#[derive(Debug, PartialEq, Eq, Clone, Visit, VisitMut)]
pub struct CreateTableLike {
/// Table name
@@ -237,6 +234,14 @@ pub struct CreateTableLike {
pub source_name: ObjectName,
}
+impl Display for CreateTableLike {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ let table_name = &self.table_name;
+ let source_name = &self.source_name;
+ write!(f, r#"CREATE TABLE {table_name} LIKE {source_name}"#)
+ }
+}
+
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
@@ -392,4 +397,116 @@ ENGINE=mito
ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default());
assert_matches!(result, Err(Error::InvalidTableOption { .. }))
}
+
+ #[test]
+ fn test_display_create_database() {
+ let sql = r"create database test;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::CreateDatabase { .. });
+
+ match &stmts[0] {
+ Statement::CreateDatabase(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+CREATE DATABASE test"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+
+ let sql = r"create database if not exists test;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::CreateDatabase { .. });
+
+ match &stmts[0] {
+ Statement::CreateDatabase(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+CREATE DATABASE IF NOT EXISTS test"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_create_table_like() {
+ let sql = r"create table t2 like t1;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::CreateTableLike { .. });
+
+ match &stmts[0] {
+ Statement::CreateTableLike(create) => {
+ let new_sql = format!("\n{}", create);
+ assert_eq!(
+ r#"
+CREATE TABLE t2 LIKE t1"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_create_external_table() {
+ let sql = r#"CREATE EXTERNAL TABLE city (
+ host string,
+ ts timestamp,
+ cpu float64 default 0,
+ memory float64,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+) WITH (location='/var/data/city.csv', format='csv');"#;
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::CreateExternalTable { .. });
+
+ match &stmts[0] {
+ Statement::CreateExternalTable(create) => {
+ let new_sql = format!("\n{}", create);
+ assert_eq!(
+ r#"
+CREATE EXTERNAL TABLE city (
+ host STRING,
+ ts TIMESTAMP,
+ cpu FLOAT64 DEFAULT 0,
+ memory FLOAT64,
+ TIME INDEX (ts),
+ PRIMARY KEY (host)
+)
+ENGINE=file
+WITH(
+ format = 'csv',
+ location = '/var/data/city.csv'
+)"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
}
diff --git a/src/sql/src/statements/describe.rs b/src/sql/src/statements/describe.rs
index 6fe190800900..743f2b0123c2 100644
--- a/src/sql/src/statements/describe.rs
+++ b/src/sql/src/statements/describe.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Display;
+
use sqlparser::ast::ObjectName;
use sqlparser_derive::{Visit, VisitMut};
@@ -32,6 +34,13 @@ impl DescribeTable {
}
}
+impl Display for DescribeTable {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let name = self.name();
+ write!(f, r#"DESCRIBE TABLE {name}"#)
+ }
+}
+
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
@@ -108,4 +117,28 @@ mod tests {
)
.is_err());
}
+
+ #[test]
+ fn test_display_describe_table() {
+ let sql = r"describe table monitor;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::DescribeTable { .. });
+
+ match &stmts[0] {
+ Statement::DescribeTable(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+DESCRIBE TABLE monitor"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
}
diff --git a/src/sql/src/statements/drop.rs b/src/sql/src/statements/drop.rs
index 62da68a90c9c..4725f512816d 100644
--- a/src/sql/src/statements/drop.rs
+++ b/src/sql/src/statements/drop.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Display;
+
use sqlparser::ast::ObjectName;
use sqlparser_derive::{Visit, VisitMut};
@@ -41,6 +43,17 @@ impl DropTable {
}
}
+impl Display for DropTable {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.write_str("DROP TABLE")?;
+ if self.drop_if_exists() {
+ f.write_str(" IF EXISTS")?;
+ }
+ let table_name = self.table_name();
+ write!(f, r#" {table_name}"#)
+ }
+}
+
/// DROP DATABASE statement.
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct DropDatabase {
@@ -66,3 +79,113 @@ impl DropDatabase {
self.drop_if_exists
}
}
+
+impl Display for DropDatabase {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.write_str("DROP DATABASE")?;
+ if self.drop_if_exists() {
+ f.write_str(" IF EXISTS")?;
+ }
+ let name = self.name();
+ write!(f, r#" {name}"#)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use crate::dialect::GreptimeDbDialect;
+ use crate::parser::{ParseOptions, ParserContext};
+ use crate::statements::statement::Statement;
+
+ #[test]
+ fn test_display_drop_database() {
+ let sql = r"drop database test;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::DropDatabase { .. });
+
+ match &stmts[0] {
+ Statement::DropDatabase(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+DROP DATABASE test"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+
+ let sql = r"drop database if exists test;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::DropDatabase { .. });
+
+ match &stmts[0] {
+ Statement::DropDatabase(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+DROP DATABASE IF EXISTS test"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_drop_table() {
+ let sql = r"drop table test;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::DropTable { .. });
+
+ match &stmts[0] {
+ Statement::DropTable(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+DROP TABLE test"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+
+ let sql = r"drop table if exists test;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::DropTable { .. });
+
+ match &stmts[0] {
+ Statement::DropTable(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+DROP TABLE IF EXISTS test"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+}
diff --git a/src/sql/src/statements/set_variables.rs b/src/sql/src/statements/set_variables.rs
index 71d6849833a8..7a2a94a531df 100644
--- a/src/sql/src/statements/set_variables.rs
+++ b/src/sql/src/statements/set_variables.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Display;
+
use sqlparser::ast::{Expr, ObjectName};
use sqlparser_derive::{Visit, VisitMut};
@@ -21,3 +23,50 @@ pub struct SetVariables {
pub variable: ObjectName,
pub value: Vec<Expr>,
}
+
+impl Display for SetVariables {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let variable = &self.variable;
+ let value = &self
+ .value
+ .iter()
+ .map(|expr| format!("{}", expr))
+ .collect::<Vec<_>>()
+ .join(", ");
+
+ write!(f, r#"SET {variable} = {value}"#)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use crate::dialect::GreptimeDbDialect;
+ use crate::parser::{ParseOptions, ParserContext};
+ use crate::statements::statement::Statement;
+
+ #[test]
+ fn test_display_show_variables() {
+ let sql = r"set delayed_insert_timeout=300;";
+ let stmts =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::SetVariables { .. });
+
+ match &stmts[0] {
+ Statement::SetVariables(set) => {
+ let new_sql = format!("\n{}", set);
+ assert_eq!(
+ r#"
+SET delayed_insert_timeout = 300"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+}
diff --git a/src/sql/src/statements/show.rs b/src/sql/src/statements/show.rs
index 13cbb2f69ce0..f20a6a59191c 100644
--- a/src/sql/src/statements/show.rs
+++ b/src/sql/src/statements/show.rs
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::fmt;
+use std::fmt::{self, Display};
use sqlparser_derive::{Visit, VisitMut};
@@ -26,7 +26,7 @@ pub enum ShowKind {
Where(Expr),
}
-impl fmt::Display for ShowKind {
+impl Display for ShowKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ShowKind::All => write!(f, "ALL"),
@@ -51,6 +51,20 @@ pub struct ShowColumns {
pub full: bool,
}
+impl Display for ShowColumns {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "SHOW ")?;
+ if self.full {
+ write!(f, "FULL ")?;
+ }
+ write!(f, "COLUMNS IN {} ", &self.table)?;
+ if let Some(database) = &self.database {
+ write!(f, "IN {database} ")?;
+ }
+ write!(f, "{}", &self.kind)
+ }
+}
+
/// The SQL `SHOW INDEX` statement
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct ShowIndex {
@@ -59,6 +73,16 @@ pub struct ShowIndex {
pub database: Option<String>,
}
+impl Display for ShowIndex {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "SHOW INDEX IN {} ", &self.table)?;
+ if let Some(database) = &self.database {
+ write!(f, "IN {database} ")?;
+ }
+ write!(f, "{}", &self.kind)
+ }
+}
+
impl ShowDatabases {
/// Creates a statement for `SHOW DATABASES`
pub fn new(kind: ShowKind) -> Self {
@@ -66,6 +90,13 @@ impl ShowDatabases {
}
}
+impl Display for ShowDatabases {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let kind = &self.kind;
+ write!(f, r#"SHOW DATABASES {kind}"#)
+ }
+}
+
/// SQL structure for `SHOW TABLES`.
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct ShowTables {
@@ -74,18 +105,46 @@ pub struct ShowTables {
pub full: bool,
}
+impl Display for ShowTables {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "SHOW ")?;
+ if self.full {
+ write!(f, "FULL ")?;
+ }
+ write!(f, "TABLES ")?;
+ if let Some(database) = &self.database {
+ write!(f, "IN {database} ")?;
+ }
+ write!(f, "{}", &self.kind)
+ }
+}
+
/// SQL structure for `SHOW CREATE TABLE`.
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct ShowCreateTable {
pub table_name: ObjectName,
}
+impl Display for ShowCreateTable {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let table_name = &self.table_name;
+ write!(f, r#"SHOW CREATE TABLE {table_name}"#)
+ }
+}
+
/// SQL structure for `SHOW VARIABLES xxx`.
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct ShowVariables {
pub variable: ObjectName,
}
+impl Display for ShowVariables {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let variable = &self.variable;
+ write!(f, r#"SHOW VARIABLES {variable}"#)
+ }
+}
+
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
@@ -171,4 +230,162 @@ mod tests {
)
.is_err());
}
+
+ #[test]
+ fn test_display_show_variables() {
+ let sql = r"show variables v1;";
+ let stmts: Vec<Statement> =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::ShowVariables { .. });
+ match &stmts[0] {
+ Statement::ShowVariables(show) => {
+ let new_sql = format!("\n{}", show);
+ assert_eq!(
+ r#"
+SHOW VARIABLES v1"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_show_create_table() {
+ let sql = r"show create table monitor;";
+ let stmts: Vec<Statement> =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::ShowCreateTable { .. });
+ match &stmts[0] {
+ Statement::ShowCreateTable(show) => {
+ let new_sql = format!("\n{}", show);
+ assert_eq!(
+ r#"
+SHOW CREATE TABLE monitor"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_show_index() {
+ let sql = r"show index from t1 from d1;";
+ let stmts: Vec<Statement> =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::ShowIndex { .. });
+ match &stmts[0] {
+ Statement::ShowIndex(show) => {
+ let new_sql = format!("\n{}", show);
+ assert_eq!(
+ r#"
+SHOW INDEX IN t1 IN d1 ALL"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_show_columns() {
+ let sql = r"show full columns in t1 in d1;";
+ let stmts: Vec<Statement> =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::ShowColumns { .. });
+ match &stmts[0] {
+ Statement::ShowColumns(show) => {
+ let new_sql = format!("\n{}", show);
+ assert_eq!(
+ r#"
+SHOW FULL COLUMNS IN t1 IN d1 ALL"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_show_tables() {
+ let sql = r"show full tables in d1;";
+ let stmts: Vec<Statement> =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::ShowTables { .. });
+ match &stmts[0] {
+ Statement::ShowTables(show) => {
+ let new_sql = format!("\n{}", show);
+ assert_eq!(
+ r#"
+SHOW FULL TABLES IN d1 ALL"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+
+ let sql = r"show full tables;";
+ let stmts: Vec<Statement> =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::ShowTables { .. });
+ match &stmts[0] {
+ Statement::ShowTables(show) => {
+ let new_sql = format!("\n{}", show);
+ assert_eq!(
+ r#"
+SHOW FULL TABLES ALL"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+
+ #[test]
+ fn test_display_show_databases() {
+ let sql = r"show databases;";
+ let stmts: Vec<Statement> =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::ShowDatabases { .. });
+ match &stmts[0] {
+ Statement::ShowDatabases(show) => {
+ let new_sql = format!("\n{}", show);
+ assert_eq!(
+ r#"
+SHOW DATABASES ALL"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
}
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
index 5d1b5f47f893..70566893d65a 100644
--- a/src/sql/src/statements/statement.rs
+++ b/src/sql/src/statements/statement.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Display;
+
use datafusion_sql::parser::Statement as DfStatement;
use sqlparser::ast::Statement as SpStatement;
use sqlparser_derive::{Visit, VisitMut};
@@ -89,6 +91,41 @@ pub enum Statement {
ShowVariables(ShowVariables),
}
+impl Display for Statement {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Statement::Query(s) => s.inner.fmt(f),
+ Statement::Insert(s) => s.inner.fmt(f),
+ Statement::Delete(s) => s.inner.fmt(f),
+ Statement::CreateTable(s) => s.fmt(f),
+ Statement::CreateExternalTable(s) => s.fmt(f),
+ Statement::CreateTableLike(s) => s.fmt(f),
+ Statement::DropTable(s) => s.fmt(f),
+ Statement::DropDatabase(s) => s.fmt(f),
+ Statement::CreateDatabase(s) => s.fmt(f),
+ Statement::Alter(s) => s.fmt(f),
+ Statement::ShowDatabases(s) => s.fmt(f),
+ Statement::ShowTables(s) => s.fmt(f),
+ Statement::ShowColumns(s) => s.fmt(f),
+ Statement::ShowIndex(s) => s.fmt(f),
+ Statement::ShowCreateTable(s) => s.fmt(f),
+ Statement::DescribeTable(s) => s.fmt(f),
+ Statement::Explain(s) => s.fmt(f),
+ Statement::Copy(s) => s.fmt(f),
+ Statement::Tql(s) => s.fmt(f),
+ Statement::TruncateTable(s) => s.fmt(f),
+ Statement::SetVariables(s) => s.fmt(f),
+ Statement::ShowVariables(s) => s.fmt(f),
+ Statement::ShowCharset(kind) => {
+ write!(f, "SHOW CHARSET {kind}")
+ }
+ Statement::ShowCollation(kind) => {
+ write!(f, "SHOW COLLATION {kind}")
+ }
+ }
+ }
+}
+
/// Comment hints from SQL.
/// It'll be enabled when using `--comment` in mysql client.
/// Eg: `SELECT * FROM system.number LIMIT 1; -- { ErrorCode 25 }`
diff --git a/src/sql/src/statements/tql.rs b/src/sql/src/statements/tql.rs
index 6bc4136068ea..07f8cb8876ec 100644
--- a/src/sql/src/statements/tql.rs
+++ b/src/sql/src/statements/tql.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Display;
+
use sqlparser_derive::{Visit, VisitMut};
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
@@ -21,6 +23,32 @@ pub enum Tql {
Analyze(TqlAnalyze),
}
+impl Display for Tql {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Tql::Eval(t) => t.fmt(f),
+ Tql::Explain(t) => t.fmt(f),
+ Tql::Analyze(t) => t.fmt(f),
+ }
+ }
+}
+
+// TODO: encapsulate shard TQL args into a struct and implement Display for it.
+fn format_tql(
+ f: &mut std::fmt::Formatter<'_>,
+ start: &str,
+ end: &str,
+ step: &str,
+ lookback: Option<&str>,
+ query: &str,
+) -> std::fmt::Result {
+ write!(f, "({start}, {end}, {step}")?;
+ if let Some(lookback) = lookback {
+ write!(f, ", {lookback}")?;
+ }
+ write!(f, ") {query}")
+}
+
/// TQL EVAL (<start>, <end>, <step>, [lookback]) <promql>
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
pub struct TqlEval {
@@ -31,6 +59,20 @@ pub struct TqlEval {
pub query: String,
}
+impl Display for TqlEval {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "TQL EVAL")?;
+ format_tql(
+ f,
+ &self.start,
+ &self.end,
+ &self.step,
+ self.lookback.as_deref(),
+ &self.query,
+ )
+ }
+}
+
/// TQL EXPLAIN [VERBOSE] [<start>, <end>, <step>, [lookback]] <promql>
/// doesn't execute the query but tells how the query would be executed (similar to SQL EXPLAIN).
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
@@ -43,6 +85,23 @@ pub struct TqlExplain {
pub is_verbose: bool,
}
+impl Display for TqlExplain {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "TQL EXPLAIN ")?;
+ if self.is_verbose {
+ write!(f, "VERBOSE ")?;
+ }
+ format_tql(
+ f,
+ &self.start,
+ &self.end,
+ &self.step,
+ self.lookback.as_deref(),
+ &self.query,
+ )
+ }
+}
+
/// TQL ANALYZE [VERBOSE] (<start>, <end>, <step>, [lookback]) <promql>
/// executes the plan and tells the detailed per-step execution time (similar to SQL ANALYZE).
#[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)]
@@ -55,6 +114,23 @@ pub struct TqlAnalyze {
pub is_verbose: bool,
}
+impl Display for TqlAnalyze {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "TQL ANALYZE ")?;
+ if self.is_verbose {
+ write!(f, "VERBOSE ")?;
+ }
+ format_tql(
+ f,
+ &self.start,
+ &self.end,
+ &self.step,
+ self.lookback.as_deref(),
+ &self.query,
+ )
+ }
+}
+
/// Intermediate structure used to unify parameter mappings for various TQL operations.
/// This struct serves as a common parameter container for parsing TQL queries
/// and constructing corresponding TQL operations: `TqlEval`, `TqlAnalyze` or `TqlExplain`.
diff --git a/src/sql/src/statements/transform/type_alias.rs b/src/sql/src/statements/transform/type_alias.rs
index 353c19f68c67..464d0ca0c01a 100644
--- a/src/sql/src/statements/transform/type_alias.rs
+++ b/src/sql/src/statements/transform/type_alias.rs
@@ -365,7 +365,7 @@ CREATE TABLE data_types (
match &stmts[0] {
Statement::CreateTable(c) => {
- let expected = r#"CREATE TABLE data_types (
+ let expected = r#"CREATE TABLE data_types (
s STRING,
tt TEXT,
mt TEXT,
diff --git a/src/sql/src/statements/truncate.rs b/src/sql/src/statements/truncate.rs
index aa08cde559b4..c1a063f959ce 100644
--- a/src/sql/src/statements/truncate.rs
+++ b/src/sql/src/statements/truncate.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::Display;
+
use sqlparser::ast::ObjectName;
use sqlparser_derive::{Visit, VisitMut};
@@ -31,3 +33,42 @@ impl TruncateTable {
&self.table_name
}
}
+
+impl Display for TruncateTable {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let table_name = self.table_name();
+ write!(f, r#"TRUNCATE TABLE {table_name}"#)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use crate::dialect::GreptimeDbDialect;
+ use crate::parser::{ParseOptions, ParserContext};
+ use crate::statements::statement::Statement;
+
+ #[test]
+ fn test_display_for_tuncate_table() {
+ let sql = r"truncate table t1;";
+ let stmts: Vec<Statement> =
+ ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default())
+ .unwrap();
+ assert_eq!(1, stmts.len());
+ assert_matches!(&stmts[0], Statement::TruncateTable { .. });
+ match &stmts[0] {
+ Statement::TruncateTable(trunc) => {
+ let new_sql = format!("\n{}", trunc);
+ assert_eq!(
+ r#"
+TRUNCATE TABLE t1"#,
+ &new_sql
+ );
+ }
+ _ => {
+ unreachable!();
+ }
+ }
+ }
+}
|
feat
|
impl Display for Statement (#3744)
|
5d1761f3e5ab8c5b3755c171138e176e8f0f0762
|
2025-02-05 16:15:51
|
yihong
|
docs: fix memory perf command wrong (#5470)
| false
|
diff --git a/docs/how-to/how-to-profile-memory.md b/docs/how-to/how-to-profile-memory.md
index 3284df9e328f..06a063acca4a 100644
--- a/docs/how-to/how-to-profile-memory.md
+++ b/docs/how-to/how-to-profile-memory.md
@@ -4,6 +4,16 @@ This crate provides an easy approach to dump memory profiling info.
## Prerequisites
### jemalloc
+jeprof is already compiled in the target directory of GreptimeDB. You can find the binary and use it.
+```
+# find jeprof binary
+find . -name 'jeprof'
+# add executable permission
+chmod +x <path_to_jeprof>
+```
+The path is usually under `./target/${PROFILE}/build/tikv-jemalloc-sys-${HASH}/out/build/bin/jeprof`.
+The default version of jemalloc installed from the package manager may not have the `--collapsed` option.
+You may need to check the whether the `jeprof` version is >= `5.3.0` if you want to install it from the package manager.
```bash
# for macOS
brew install jemalloc
@@ -23,7 +33,11 @@ curl https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph
Start GreptimeDB instance with environment variables:
```bash
+# for Linux
MALLOC_CONF=prof:true ./target/debug/greptime standalone start
+
+# for macOS
+_RJEM_MALLOC_CONF=prof:true ./target/debug/greptime standalone start
```
Dump memory profiling data through HTTP API:
|
docs
|
fix memory perf command wrong (#5470)
|
6a12c27e78ad484a4848855dee7032436c5dd128
|
2024-01-22 19:44:03
|
dennis zhuang
|
feat: make query be aware of timezone setting (#3175)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c9986c705860..abc90012a3ce 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1710,6 +1710,7 @@ dependencies = [
"paste",
"ron",
"serde",
+ "session",
"snafu",
"statrs",
]
diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs
index 066d89e69d5b..c8f5ae841854 100644
--- a/src/cmd/src/cli/repl.rs
+++ b/src/cmd/src/cli/repl.rs
@@ -164,12 +164,13 @@ impl Repl {
let plan = query_engine
.planner()
- .plan(stmt, query_ctx)
+ .plan(stmt, query_ctx.clone())
.await
.context(PlanStatementSnafu)?;
- let LogicalPlan::DfPlan(plan) =
- query_engine.optimize(&plan).context(PlanStatementSnafu)?;
+ let LogicalPlan::DfPlan(plan) = query_engine
+ .optimize(&query_engine.engine_context(query_ctx), &plan)
+ .context(PlanStatementSnafu)?;
let plan = DFLogicalSubstraitConvertor {}
.encode(&plan)
diff --git a/src/common/function/Cargo.toml b/src/common/function/Cargo.toml
index 3db195668e60..05dbdce23f5c 100644
--- a/src/common/function/Cargo.toml
+++ b/src/common/function/Cargo.toml
@@ -19,6 +19,7 @@ num = "0.4"
num-traits = "0.2"
once_cell.workspace = true
paste = "1.0"
+session.workspace = true
snafu.workspace = true
statrs = "0.16"
diff --git a/src/common/function/src/function.rs b/src/common/function/src/function.rs
index 3b6a51b0289f..a1e43aca4a97 100644
--- a/src/common/function/src/function.rs
+++ b/src/common/function/src/function.rs
@@ -17,20 +17,20 @@ use std::sync::Arc;
use common_query::error::Result;
use common_query::prelude::Signature;
-use common_time::timezone::get_timezone;
-use common_time::Timezone;
use datatypes::data_type::ConcreteDataType;
use datatypes::vectors::VectorRef;
+use session::context::{QueryContextBuilder, QueryContextRef};
+/// The function execution context
#[derive(Clone)]
pub struct FunctionContext {
- pub timezone: Timezone,
+ pub query_ctx: QueryContextRef,
}
impl Default for FunctionContext {
fn default() -> Self {
Self {
- timezone: get_timezone(None).clone(),
+ query_ctx: QueryContextBuilder::default().build(),
}
}
}
diff --git a/src/common/function/src/scalars/date/date_format.rs b/src/common/function/src/scalars/date/date_format.rs
index d94f115e54a9..fc82dbe06edc 100644
--- a/src/common/function/src/scalars/date/date_format.rs
+++ b/src/common/function/src/scalars/date/date_format.rs
@@ -79,7 +79,7 @@ impl Function for DateFormatFunction {
let result = match (ts, format) {
(Some(ts), Some(fmt)) => Some(
- ts.as_formatted_string(&fmt, Some(&func_ctx.timezone))
+ ts.as_formatted_string(&fmt, Some(&func_ctx.query_ctx.timezone()))
.map_err(BoxedError::new)
.context(error::ExecuteSnafu)?,
),
@@ -96,7 +96,7 @@ impl Function for DateFormatFunction {
let result = match (date, format) {
(Some(date), Some(fmt)) => date
- .as_formatted_string(&fmt, Some(&func_ctx.timezone))
+ .as_formatted_string(&fmt, Some(&func_ctx.query_ctx.timezone()))
.map_err(BoxedError::new)
.context(error::ExecuteSnafu)?,
_ => None,
@@ -112,7 +112,7 @@ impl Function for DateFormatFunction {
let result = match (datetime, format) {
(Some(datetime), Some(fmt)) => datetime
- .as_formatted_string(&fmt, Some(&func_ctx.timezone))
+ .as_formatted_string(&fmt, Some(&func_ctx.query_ctx.timezone()))
.map_err(BoxedError::new)
.context(error::ExecuteSnafu)?,
_ => None,
diff --git a/src/common/function/src/scalars/timestamp/greatest.rs b/src/common/function/src/scalars/timestamp/greatest.rs
index fd3fe0a16897..e8dfd21a65b9 100644
--- a/src/common/function/src/scalars/timestamp/greatest.rs
+++ b/src/common/function/src/scalars/timestamp/greatest.rs
@@ -104,7 +104,6 @@ impl fmt::Display for GreatestFunction {
#[cfg(test)]
mod tests {
- use std::str::FromStr;
use std::sync::Arc;
use common_time::Date;
@@ -137,11 +136,11 @@ mod tests {
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
- Value::Date(Date::from_str("2001-02-01").unwrap())
+ Value::Date(Date::from_str_utc("2001-02-01").unwrap())
);
assert_eq!(
result.get(1),
- Value::Date(Date::from_str("2012-12-23").unwrap())
+ Value::Date(Date::from_str_utc("2012-12-23").unwrap())
);
}
@@ -162,11 +161,11 @@ mod tests {
assert_eq!(result.len(), 2);
assert_eq!(
result.get(0),
- Value::Date(Date::from_str("1970-01-01").unwrap())
+ Value::Date(Date::from_str_utc("1970-01-01").unwrap())
);
assert_eq!(
result.get(1),
- Value::Date(Date::from_str("1970-01-03").unwrap())
+ Value::Date(Date::from_str_utc("1970-01-03").unwrap())
);
}
}
diff --git a/src/common/function/src/scalars/timestamp/to_unixtime.rs b/src/common/function/src/scalars/timestamp/to_unixtime.rs
index 4d914ecba919..cc297942d114 100644
--- a/src/common/function/src/scalars/timestamp/to_unixtime.rs
+++ b/src/common/function/src/scalars/timestamp/to_unixtime.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::fmt;
-use std::str::FromStr;
use std::sync::Arc;
use common_query::error::{InvalidFuncArgsSnafu, Result, UnsupportedInputDataTypeSnafu};
@@ -31,16 +30,17 @@ pub struct ToUnixtimeFunction;
const NAME: &str = "to_unixtime";
-fn convert_to_seconds(arg: &str) -> Option<i64> {
- if let Ok(dt) = DateTime::from_str(arg) {
+fn convert_to_seconds(arg: &str, func_ctx: &FunctionContext) -> Option<i64> {
+ let timezone = &func_ctx.query_ctx.timezone();
+ if let Ok(dt) = DateTime::from_str(arg, Some(timezone)) {
return Some(dt.val() / 1000);
}
- if let Ok(ts) = Timestamp::from_str(arg) {
+ if let Ok(ts) = Timestamp::from_str(arg, Some(timezone)) {
return Some(ts.split().0);
}
- if let Ok(date) = Date::from_str(arg) {
+ if let Ok(date) = Date::from_str(arg, Some(timezone)) {
return Some(date.to_secs());
}
@@ -92,7 +92,7 @@ impl Function for ToUnixtimeFunction {
)
}
- fn eval(&self, _func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
+ fn eval(&self, func_ctx: FunctionContext, columns: &[VectorRef]) -> Result<VectorRef> {
ensure!(
columns.len() == 1,
InvalidFuncArgsSnafu {
@@ -108,7 +108,7 @@ impl Function for ToUnixtimeFunction {
match columns[0].data_type() {
ConcreteDataType::String(_) => Ok(Arc::new(Int64Vector::from(
(0..vector.len())
- .map(|i| convert_to_seconds(&vector.get(i).to_string()))
+ .map(|i| convert_to_seconds(&vector.get(i).to_string(), &func_ctx))
.collect::<Vec<_>>(),
))),
ConcreteDataType::Int64(_) | ConcreteDataType::Int32(_) => {
diff --git a/src/common/function/src/scalars/udf.rs b/src/common/function/src/scalars/udf.rs
index 5b91ad1302ac..0555581e6117 100644
--- a/src/common/function/src/scalars/udf.rs
+++ b/src/common/function/src/scalars/udf.rs
@@ -21,22 +21,24 @@ use common_query::prelude::{
use datatypes::error::Error as DataTypeError;
use datatypes::prelude::*;
use datatypes::vectors::Helper;
+use session::context::QueryContextRef;
use snafu::ResultExt;
use crate::function::{FunctionContext, FunctionRef};
-/// Create a ScalarUdf from function.
-pub fn create_udf(func: FunctionRef) -> ScalarUdf {
+/// Create a ScalarUdf from function and query context.
+pub fn create_udf(func: FunctionRef, query_ctx: QueryContextRef) -> ScalarUdf {
let func_cloned = func.clone();
let return_type: ReturnTypeFunction = Arc::new(move |input_types: &[ConcreteDataType]| {
Ok(Arc::new(func_cloned.return_type(input_types)?))
});
let func_cloned = func.clone();
+
let fun: ScalarFunctionImplementation = Arc::new(move |args: &[ColumnarValue]| {
- // FIXME(dennis): set the timezone into function context
- // Question: how to get the timezone from the query context?
- let func_ctx = FunctionContext::default();
+ let func_ctx = FunctionContext {
+ query_ctx: query_ctx.clone(),
+ };
let len = args
.iter()
@@ -72,6 +74,7 @@ mod tests {
use datatypes::prelude::{ScalarVector, Vector, VectorRef};
use datatypes::value::Value;
use datatypes::vectors::{BooleanVector, ConstantVector};
+ use session::context::QueryContextBuilder;
use super::*;
use crate::function::Function;
@@ -80,6 +83,7 @@ mod tests {
#[test]
fn test_create_udf() {
let f = Arc::new(TestAndFunction);
+ let query_ctx = QueryContextBuilder::default().build();
let args: Vec<VectorRef> = vec![
Arc::new(ConstantVector::new(
@@ -97,7 +101,7 @@ mod tests {
}
// create a udf and test it again
- let udf = create_udf(f.clone());
+ let udf = create_udf(f.clone(), query_ctx);
assert_eq!("test_and", udf.name);
assert_eq!(f.signature(), udf.signature);
diff --git a/src/common/query/src/logical_plan.rs b/src/common/query/src/logical_plan.rs
index 0b8d67ae6abb..ac20c74b5b58 100644
--- a/src/common/query/src/logical_plan.rs
+++ b/src/common/query/src/logical_plan.rs
@@ -134,7 +134,7 @@ mod tests {
assert_eq!(return_type, (udf.return_type)(&[]).unwrap());
// test into_df_udf
- let df_udf: DfScalarUDF = udf.into_df_udf();
+ let df_udf: DfScalarUDF = udf.into();
assert_eq!("and", df_udf.name);
let types = vec![DataType::Boolean, DataType::Boolean];
diff --git a/src/common/query/src/logical_plan/udf.rs b/src/common/query/src/logical_plan/udf.rs
index 6a48a7b97ff9..31d356174502 100644
--- a/src/common/query/src/logical_plan/udf.rs
+++ b/src/common/query/src/logical_plan/udf.rs
@@ -66,14 +66,15 @@ impl ScalarUdf {
fun: fun.clone(),
}
}
+}
- /// Cast self into datafusion UDF.
- pub fn into_df_udf(self) -> DfScalarUDF {
+impl From<ScalarUdf> for DfScalarUDF {
+ fn from(udf: ScalarUdf) -> Self {
DfScalarUDF::new(
- &self.name,
- &self.signature.into(),
- &to_df_return_type(self.return_type),
- &to_df_scalar_func(self.fun),
+ &udf.name,
+ &udf.signature.into(),
+ &to_df_return_type(udf.return_type),
+ &to_df_scalar_func(udf.fun),
)
}
}
diff --git a/src/common/time/src/date.rs b/src/common/time/src/date.rs
index d4182b7c1b6a..a04b529fd448 100644
--- a/src/common/time/src/date.rs
+++ b/src/common/time/src/date.rs
@@ -13,16 +13,16 @@
// limitations under the License.
use std::fmt::{Display, Formatter, Write};
-use std::str::FromStr;
-use chrono::{Datelike, Days, Months, NaiveDate, NaiveTime, TimeZone};
+use chrono::{Datelike, Days, LocalResult, Months, NaiveDate, NaiveTime, TimeZone};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use snafu::ResultExt;
-use crate::error::{Error, ParseDateStrSnafu, Result};
+use crate::error::{InvalidDateStrSnafu, ParseDateStrSnafu, Result};
use crate::interval::Interval;
use crate::timezone::get_timezone;
+use crate::util::datetime_to_utc;
use crate::Timezone;
const UNIX_EPOCH_FROM_CE: i32 = 719_163;
@@ -40,16 +40,6 @@ impl From<Date> for Value {
}
}
-impl FromStr for Date {
- type Err = Error;
-
- fn from_str(s: &str) -> Result<Self> {
- let s = s.trim();
- let date = NaiveDate::parse_from_str(s, "%F").context(ParseDateStrSnafu { raw: s })?;
- Ok(Self(date.num_days_from_ce() - UNIX_EPOCH_FROM_CE))
- }
-}
-
impl From<i32> for Date {
fn from(v: i32) -> Self {
Self(v)
@@ -74,6 +64,26 @@ impl Display for Date {
}
impl Date {
+ /// Try parsing a string into [`Date`] with UTC timezone.
+ pub fn from_str_utc(s: &str) -> Result<Self> {
+ Self::from_str(s, None)
+ }
+
+ /// Try parsing a string into [`Date`] with given timezone.
+ pub fn from_str(s: &str, timezone: Option<&Timezone>) -> Result<Self> {
+ let s = s.trim();
+ let date = NaiveDate::parse_from_str(s, "%F").context(ParseDateStrSnafu { raw: s })?;
+ let Some(timezone) = timezone else {
+ return Ok(Self(date.num_days_from_ce() - UNIX_EPOCH_FROM_CE));
+ };
+
+ let datetime = date.and_time(NaiveTime::default());
+ match datetime_to_utc(&datetime, timezone) {
+ LocalResult::None => InvalidDateStrSnafu { raw: s }.fail(),
+ LocalResult::Single(utc) | LocalResult::Ambiguous(utc, _) => Ok(Date::from(utc.date())),
+ }
+ }
+
pub fn new(val: i32) -> Self {
Self(val)
}
@@ -168,23 +178,64 @@ mod tests {
pub fn test_date_parse() {
assert_eq!(
"1970-01-01",
- Date::from_str("1970-01-01").unwrap().to_string()
+ Date::from_str("1970-01-01", None).unwrap().to_string()
);
assert_eq!(
"1969-01-01",
- Date::from_str("1969-01-01").unwrap().to_string()
+ Date::from_str("1969-01-01", None).unwrap().to_string()
);
assert_eq!(
"1969-01-01",
- Date::from_str(" 1969-01-01 ")
+ Date::from_str(" 1969-01-01 ", None)
.unwrap()
.to_string()
);
let now = Utc::now().date_naive().format("%F").to_string();
- assert_eq!(now, Date::from_str(&now).unwrap().to_string());
+ assert_eq!(now, Date::from_str(&now, None).unwrap().to_string());
+
+ // with timezone
+ assert_eq!(
+ "1969-12-31",
+ Date::from_str(
+ "1970-01-01",
+ Some(&Timezone::from_tz_string("Asia/Shanghai").unwrap())
+ )
+ .unwrap()
+ .to_string()
+ );
+
+ assert_eq!(
+ "1969-12-31",
+ Date::from_str(
+ "1970-01-01",
+ Some(&Timezone::from_tz_string("+16:00").unwrap())
+ )
+ .unwrap()
+ .to_string()
+ );
+
+ assert_eq!(
+ "1970-01-01",
+ Date::from_str(
+ "1970-01-01",
+ Some(&Timezone::from_tz_string("-8:00").unwrap())
+ )
+ .unwrap()
+ .to_string()
+ );
+
+ assert_eq!(
+ "1970-01-01",
+ Date::from_str(
+ "1970-01-01",
+ Some(&Timezone::from_tz_string("-16:00").unwrap())
+ )
+ .unwrap()
+ .to_string()
+ );
}
#[test]
@@ -201,9 +252,9 @@ mod tests {
#[test]
pub fn test_min_max() {
- let mut date = Date::from_str("9999-12-31").unwrap();
+ let mut date = Date::from_str("9999-12-31", None).unwrap();
date.0 += 1000;
- assert_eq!(date, Date::from_str(&date.to_string()).unwrap());
+ assert_eq!(date, Date::from_str(&date.to_string(), None).unwrap());
}
#[test]
@@ -245,11 +296,11 @@ mod tests {
#[test]
fn test_to_secs() {
- let d = Date::from_str("1970-01-01").unwrap();
+ let d = Date::from_str("1970-01-01", None).unwrap();
assert_eq!(d.to_secs(), 0);
- let d = Date::from_str("1970-01-02").unwrap();
+ let d = Date::from_str("1970-01-02", None).unwrap();
assert_eq!(d.to_secs(), 24 * 3600);
- let d = Date::from_str("1970-01-03").unwrap();
+ let d = Date::from_str("1970-01-03", None).unwrap();
assert_eq!(d.to_secs(), 2 * 24 * 3600);
}
}
diff --git a/src/common/time/src/datetime.rs b/src/common/time/src/datetime.rs
index 722467cfab1f..26df42490c98 100644
--- a/src/common/time/src/datetime.rs
+++ b/src/common/time/src/datetime.rs
@@ -13,16 +13,15 @@
// limitations under the License.
use std::fmt::{Display, Formatter, Write};
-use std::str::FromStr;
use std::time::Duration;
use chrono::{Days, LocalResult, Months, NaiveDateTime, TimeZone as ChronoTimeZone, Utc};
use serde::{Deserialize, Serialize};
use snafu::ResultExt;
-use crate::error::{Error, InvalidDateStrSnafu, Result};
+use crate::error::{InvalidDateStrSnafu, Result};
use crate::timezone::{get_timezone, Timezone};
-use crate::util::{format_utc_datetime, local_datetime_to_utc};
+use crate::util::{datetime_to_utc, format_utc_datetime};
use crate::{Date, Interval};
const DATETIME_FORMAT: &str = "%F %T";
@@ -60,17 +59,41 @@ impl From<NaiveDateTime> for DateTime {
}
}
-impl FromStr for DateTime {
- type Err = Error;
+impl From<i64> for DateTime {
+ fn from(v: i64) -> Self {
+ Self(v)
+ }
+}
+
+impl From<Date> for DateTime {
+ fn from(value: Date) -> Self {
+ // It's safe, i32 * 86400000 won't be overflow
+ Self(value.to_secs() * 1000)
+ }
+}
- fn from_str(s: &str) -> Result<Self> {
+impl DateTime {
+ /// Try parsing a string into [`DateTime`] with the system timezone.
+ /// See `DateTime::from_str`.
+ pub fn from_str_system(s: &str) -> Result<Self> {
+ Self::from_str(s, None)
+ }
+
+ /// Try parsing a string into [`DateTime`] with the given timezone.
+ /// Supported format:
+ /// - RFC3339 in the naive UTC timezone.
+ /// - `%F %T` with the given timezone
+ /// - `%F %T%z` with the timezone in string
+ pub fn from_str(s: &str, timezone: Option<&Timezone>) -> Result<Self> {
let s = s.trim();
- let timestamp_millis = if let Ok(d) = NaiveDateTime::parse_from_str(s, DATETIME_FORMAT) {
- match local_datetime_to_utc(&d) {
+ let timestamp_millis = if let Ok(dt) = chrono::DateTime::parse_from_rfc3339(s) {
+ dt.naive_utc().timestamp_millis()
+ } else if let Ok(d) = NaiveDateTime::parse_from_str(s, DATETIME_FORMAT) {
+ match datetime_to_utc(&d, get_timezone(timezone)) {
LocalResult::None => {
return InvalidDateStrSnafu { raw: s }.fail();
}
- LocalResult::Single(d) | LocalResult::Ambiguous(d, _) => d.timestamp_millis(),
+ LocalResult::Single(utc) | LocalResult::Ambiguous(utc, _) => utc.timestamp_millis(),
}
} else if let Ok(v) = chrono::DateTime::parse_from_str(s, DATETIME_FORMAT_WITH_TZ) {
v.timestamp_millis()
@@ -80,22 +103,7 @@ impl FromStr for DateTime {
Ok(Self(timestamp_millis))
}
-}
-
-impl From<i64> for DateTime {
- fn from(v: i64) -> Self {
- Self(v)
- }
-}
-
-impl From<Date> for DateTime {
- fn from(value: Date) -> Self {
- // It's safe, i32 * 86400000 won't be overflow
- Self(value.to_secs() * 1000)
- }
-}
-impl DateTime {
/// Create a new [DateTime] from milliseconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch).
pub fn new(millis: i64) -> Self {
Self(millis)
@@ -201,9 +209,9 @@ mod tests {
pub fn test_parse_from_string() {
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let time = "1970-01-01 00:00:00+0800";
- let dt = DateTime::from_str(time).unwrap();
+ let dt = DateTime::from_str(time, None).unwrap();
assert_eq!(time, &dt.to_string());
- let dt = DateTime::from_str(" 1970-01-01 00:00:00+0800 ").unwrap();
+ let dt = DateTime::from_str(" 1970-01-01 00:00:00+0800 ", None).unwrap();
assert_eq!(time, &dt.to_string());
}
@@ -230,17 +238,63 @@ mod tests {
set_default_timezone(Some("Asia/Shanghai")).unwrap();
assert_eq!(
-28800000,
- DateTime::from_str("1970-01-01 00:00:00").unwrap().val()
+ DateTime::from_str("1970-01-01 00:00:00", None)
+ .unwrap()
+ .val()
+ );
+ assert_eq!(
+ 0,
+ DateTime::from_str("1970-01-01 08:00:00", None)
+ .unwrap()
+ .val()
+ );
+
+ assert_eq!(
+ 0,
+ DateTime::from_str(
+ "1970-01-01 08:00:00",
+ Some(&Timezone::from_tz_string("Asia/Shanghai").unwrap())
+ )
+ .unwrap()
+ .val()
+ );
+
+ assert_eq!(
+ -28800000,
+ DateTime::from_str(
+ "1970-01-01 00:00:00",
+ Some(&Timezone::from_tz_string("Asia/Shanghai").unwrap())
+ )
+ .unwrap()
+ .val()
+ );
+
+ assert_eq!(
+ 28800000,
+ DateTime::from_str(
+ "1970-01-01 00:00:00",
+ Some(&Timezone::from_tz_string("-8:00").unwrap())
+ )
+ .unwrap()
+ .val()
);
- assert_eq!(0, DateTime::from_str("1970-01-01 08:00:00").unwrap().val());
}
#[test]
fn test_parse_local_date_time_with_tz() {
- let ts = DateTime::from_str("1970-01-01 08:00:00+0000")
+ let ts = DateTime::from_str("1970-01-01 08:00:00+0000", None)
.unwrap()
.val();
assert_eq!(28800000, ts);
+
+ // the string has the time zone info, the argument doesn't change the result
+ let ts = DateTime::from_str(
+ "1970-01-01 08:00:00+0000",
+ Some(&Timezone::from_tz_string("-8:00").unwrap()),
+ )
+ .unwrap()
+ .val();
+ assert_eq!(28800000, ts);
}
#[test]
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index 2f13b09a1503..6aeac49b257e 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -16,21 +16,19 @@ use core::default::Default;
use std::cmp::Ordering;
use std::fmt::{Display, Formatter, Write};
use std::hash::{Hash, Hasher};
-use std::str::FromStr;
use std::time::Duration;
use arrow::datatypes::TimeUnit as ArrowTimeUnit;
use chrono::{
- DateTime, Days, Months, NaiveDate, NaiveDateTime, NaiveTime, TimeZone as ChronoTimeZone, Utc,
+ DateTime, Days, LocalResult, Months, NaiveDate, NaiveDateTime, NaiveTime,
+ TimeZone as ChronoTimeZone, Utc,
};
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
-use crate::error::{
- ArithmeticOverflowSnafu, Error, ParseTimestampSnafu, Result, TimestampOverflowSnafu,
-};
+use crate::error::{ArithmeticOverflowSnafu, ParseTimestampSnafu, Result, TimestampOverflowSnafu};
use crate::timezone::{get_timezone, Timezone};
-use crate::util::div_ceil;
+use crate::util::{datetime_to_utc, div_ceil};
use crate::{error, Interval};
/// Timestamp represents the value of units(seconds/milliseconds/microseconds/nanoseconds) elapsed
@@ -372,10 +370,12 @@ impl Timestamp {
pub fn from_chrono_date(date: NaiveDate) -> Option<Self> {
Timestamp::from_chrono_datetime(date.and_time(NaiveTime::default()))
}
-}
-impl FromStr for Timestamp {
- type Err = Error;
+ /// Accepts a string in RFC3339 / ISO8601 standard format and some variants and converts it to a nanosecond precision timestamp.
+ /// It no timezone specified in string, it cast to nanosecond epoch timestamp in UTC.
+ pub fn from_str_utc(s: &str) -> Result<Self> {
+ Self::from_str(s, None)
+ }
/// Accepts a string in RFC3339 / ISO8601 standard format and some variants and converts it to a nanosecond precision timestamp.
/// This code is copied from [arrow-datafusion](https://github.com/apache/arrow-datafusion/blob/arrow2/datafusion-physical-expr/src/arrow_temporal_util.rs#L71)
@@ -383,13 +383,13 @@ impl FromStr for Timestamp {
/// Supported format:
/// - `2022-09-20T14:16:43.012345Z` (Zulu timezone)
/// - `2022-09-20T14:16:43.012345+08:00` (Explicit offset)
- /// - `2022-09-20T14:16:43.012345` (Zulu timezone, with T)
+ /// - `2022-09-20T14:16:43.012345` (The given timezone, with T)
/// - `2022-09-20T14:16:43` (Zulu timezone, no fractional seconds, with T)
/// - `2022-09-20 14:16:43.012345Z` (Zulu timezone, without T)
- /// - `2022-09-20 14:16:43` (Zulu timezone, without T)
- /// - `2022-09-20 14:16:43.012345` (Zulu timezone, without T)
+ /// - `2022-09-20 14:16:43` (The given timezone, without T)
+ /// - `2022-09-20 14:16:43.012345` (The given timezone, without T)
#[allow(deprecated)]
- fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
+ pub fn from_str(s: &str, timezone: Option<&Timezone>) -> Result<Self> {
// RFC3339 timestamp (with a T)
let s = s.trim();
if let Ok(ts) = DateTime::parse_from_rfc3339(s) {
@@ -406,19 +406,19 @@ impl FromStr for Timestamp {
}
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S") {
- return naive_datetime_to_timestamp(s, ts);
+ return naive_datetime_to_timestamp(s, ts, timezone);
}
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S%.f") {
- return naive_datetime_to_timestamp(s, ts);
+ return naive_datetime_to_timestamp(s, ts, timezone);
}
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S") {
- return naive_datetime_to_timestamp(s, ts);
+ return naive_datetime_to_timestamp(s, ts, timezone);
}
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S%.f") {
- return naive_datetime_to_timestamp(s, ts);
+ return naive_datetime_to_timestamp(s, ts, timezone);
}
ParseTimestampSnafu { raw: s }.fail()
@@ -430,9 +430,19 @@ impl FromStr for Timestamp {
fn naive_datetime_to_timestamp(
s: &str,
datetime: NaiveDateTime,
+ timezone: Option<&Timezone>,
) -> crate::error::Result<Timestamp> {
- Timestamp::from_chrono_datetime(Utc.from_utc_datetime(&datetime).naive_utc())
- .context(ParseTimestampSnafu { raw: s })
+ let Some(timezone) = timezone else {
+ return Timestamp::from_chrono_datetime(Utc.from_utc_datetime(&datetime).naive_utc())
+ .context(ParseTimestampSnafu { raw: s });
+ };
+
+ match datetime_to_utc(&datetime, timezone) {
+ LocalResult::None => ParseTimestampSnafu { raw: s }.fail(),
+ LocalResult::Single(utc) | LocalResult::Ambiguous(utc, _) => {
+ Timestamp::from_chrono_datetime(utc).context(ParseTimestampSnafu { raw: s })
+ }
+ }
}
impl From<i64> for Timestamp {
@@ -786,7 +796,7 @@ mod tests {
// Input timestamp string is regarded as local timezone if no timezone is specified,
// but expected timestamp is in UTC timezone
fn check_from_str(s: &str, expect: &str) {
- let ts = Timestamp::from_str(s).unwrap();
+ let ts = Timestamp::from_str_utc(s).unwrap();
let time = ts.to_chrono_datetime().unwrap();
assert_eq!(expect, time.to_string());
}
@@ -812,7 +822,7 @@ mod tests {
fn test_to_iso8601_string() {
set_default_timezone(Some("Asia/Shanghai")).unwrap();
let datetime_str = "2020-09-08 13:42:29.042+0000";
- let ts = Timestamp::from_str(datetime_str).unwrap();
+ let ts = Timestamp::from_str_utc(datetime_str).unwrap();
assert_eq!("2020-09-08 21:42:29.042+0800", ts.to_iso8601_string());
let ts_millis = 1668070237000;
@@ -1079,17 +1089,17 @@ mod tests {
std::env::set_var("TZ", "Asia/Shanghai");
assert_eq!(
Timestamp::new(28800, TimeUnit::Second),
- Timestamp::from_str("1970-01-01 08:00:00.000").unwrap()
+ Timestamp::from_str_utc("1970-01-01 08:00:00.000").unwrap()
);
assert_eq!(
Timestamp::new(28800, TimeUnit::Second),
- Timestamp::from_str("1970-01-01 08:00:00").unwrap()
+ Timestamp::from_str_utc("1970-01-01 08:00:00").unwrap()
);
assert_eq!(
Timestamp::new(28800, TimeUnit::Second),
- Timestamp::from_str(" 1970-01-01 08:00:00 ").unwrap()
+ Timestamp::from_str_utc(" 1970-01-01 08:00:00 ").unwrap()
);
}
@@ -1286,7 +1296,7 @@ mod tests {
];
for s in valid_strings {
- Timestamp::from_str(s).unwrap();
+ Timestamp::from_str_utc(s).unwrap();
}
}
}
diff --git a/src/common/time/src/util.rs b/src/common/time/src/util.rs
index 1a890ec2092f..6ce824764a2b 100644
--- a/src/common/time/src/util.rs
+++ b/src/common/time/src/util.rs
@@ -18,6 +18,7 @@ use chrono::{LocalResult, NaiveDateTime, TimeZone};
use chrono_tz::Tz;
use crate::timezone::get_timezone;
+use crate::Timezone;
pub fn format_utc_datetime(utc: &NaiveDateTime, pattern: &str) -> String {
match get_timezone(None) {
@@ -28,10 +29,20 @@ pub fn format_utc_datetime(utc: &NaiveDateTime, pattern: &str) -> String {
}
}
-pub fn local_datetime_to_utc(local: &NaiveDateTime) -> LocalResult<NaiveDateTime> {
- match get_timezone(None) {
- crate::Timezone::Offset(offset) => offset.from_local_datetime(local).map(|x| x.naive_utc()),
- crate::Timezone::Named(tz) => tz.from_local_datetime(local).map(|x| x.naive_utc()),
+pub fn system_datetime_to_utc(local: &NaiveDateTime) -> LocalResult<NaiveDateTime> {
+ datetime_to_utc(local, get_timezone(None))
+}
+
+/// Cast a [`NaiveDateTime`] with the given timezone.
+pub fn datetime_to_utc(
+ datetime: &NaiveDateTime,
+ timezone: &Timezone,
+) -> LocalResult<NaiveDateTime> {
+ match timezone {
+ crate::Timezone::Offset(offset) => {
+ offset.from_local_datetime(datetime).map(|x| x.naive_utc())
+ }
+ crate::Timezone::Named(tz) => tz.from_local_datetime(datetime).map(|x| x.naive_utc()),
}
}
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index e04dd32907ee..1a38c88f0c8d 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -28,7 +28,7 @@ use query::dataframe::DataFrame;
use query::plan::LogicalPlan;
use query::planner::LogicalPlanner;
use query::query_engine::DescribeResult;
-use query::QueryEngine;
+use query::{QueryEngine, QueryEngineContext};
use session::context::QueryContextRef;
use store_api::metadata::RegionMetadataRef;
use store_api::region_engine::{RegionEngine, RegionRole, SetReadonlyResponse};
@@ -57,7 +57,11 @@ impl QueryEngine for MockQueryEngine {
"MockQueryEngine"
}
- async fn describe(&self, _plan: LogicalPlan) -> query::error::Result<DescribeResult> {
+ async fn describe(
+ &self,
+ _plan: LogicalPlan,
+ _query_ctx: QueryContextRef,
+ ) -> query::error::Result<DescribeResult> {
unimplemented!()
}
@@ -78,6 +82,10 @@ impl QueryEngine for MockQueryEngine {
fn read_table(&self, _table: TableRef) -> query::error::Result<DataFrame> {
unimplemented!()
}
+
+ fn engine_context(&self, _query_ctx: QueryContextRef) -> QueryEngineContext {
+ unimplemented!()
+ }
}
/// Create a region server without any engine
diff --git a/src/datatypes/src/types/cast.rs b/src/datatypes/src/types/cast.rs
index d92f5f9bbfbb..702244a14291 100644
--- a/src/datatypes/src/types/cast.rs
+++ b/src/datatypes/src/types/cast.rs
@@ -172,8 +172,6 @@ fn invalid_type_cast(src_value: &Value, dest_type: &ConcreteDataType) -> Error {
#[cfg(test)]
mod tests {
- use std::str::FromStr;
-
use common_base::bytes::StringBytes;
use common_time::time::Time;
use common_time::timezone::set_default_timezone;
@@ -283,7 +281,7 @@ mod tests {
// date -> other types
test_can_cast!(
- Value::Date(Date::from_str("2021-01-01").unwrap()),
+ Value::Date(Date::from_str_utc("2021-01-01").unwrap()),
null_datatype,
int32_datatype,
timestamp_second_datatype,
@@ -292,7 +290,7 @@ mod tests {
// datetime -> other types
test_can_cast!(
- Value::DateTime(DateTime::from_str("2021-01-01 00:00:00").unwrap()),
+ Value::DateTime(DateTime::from_str_system("2021-01-01 00:00:00").unwrap()),
null_datatype,
int64_datatype,
timestamp_second_datatype,
@@ -301,7 +299,7 @@ mod tests {
// timestamp -> other types
test_can_cast!(
- Value::Timestamp(Timestamp::from_str("2021-01-01 00:00:00").unwrap()),
+ Value::Timestamp(Timestamp::from_str_utc("2021-01-01 00:00:00").unwrap()),
null_datatype,
int64_datatype,
date_datatype,
diff --git a/src/datatypes/src/types/date_type.rs b/src/datatypes/src/types/date_type.rs
index a0df0b5a2151..5481b6fa5bcf 100644
--- a/src/datatypes/src/types/date_type.rs
+++ b/src/datatypes/src/types/date_type.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::str::FromStr;
-
use arrow::datatypes::{DataType as ArrowDataType, Date32Type};
use common_time::Date;
use serde::{Deserialize, Serialize};
@@ -55,7 +53,7 @@ impl DataType for DateType {
fn try_cast(&self, from: Value) -> Option<Value> {
match from {
Value::Int32(v) => Some(Value::Date(Date::from(v))),
- Value::String(v) => Date::from_str(v.as_utf8()).map(Value::Date).ok(),
+ Value::String(v) => Date::from_str_utc(v.as_utf8()).map(Value::Date).ok(),
Value::Timestamp(v) => v.to_chrono_date().map(|date| Value::Date(date.into())),
Value::DateTime(v) => Some(Value::DateTime(v)),
_ => None,
@@ -111,32 +109,32 @@ mod tests {
fn test_date_cast() {
set_default_timezone(Some("Asia/Shanghai")).unwrap();
// timestamp -> date
- let ts = Value::Timestamp(Timestamp::from_str("2000-01-01 08:00:01").unwrap());
+ let ts = Value::Timestamp(Timestamp::from_str_utc("2000-01-01 08:00:01").unwrap());
let date = ConcreteDataType::date_datatype().try_cast(ts).unwrap();
- assert_eq!(date, Value::Date(Date::from_str("2000-01-01").unwrap()));
+ assert_eq!(date, Value::Date(Date::from_str_utc("2000-01-01").unwrap()));
// this case bind with Zulu timezone.
- let ts = Value::Timestamp(Timestamp::from_str("2000-01-02 07:59:59").unwrap());
+ let ts = Value::Timestamp(Timestamp::from_str_utc("2000-01-02 07:59:59").unwrap());
let date = ConcreteDataType::date_datatype().try_cast(ts).unwrap();
- assert_eq!(date, Value::Date(Date::from_str("2000-01-02").unwrap()));
+ assert_eq!(date, Value::Date(Date::from_str_utc("2000-01-02").unwrap()));
// while this case is offsetted to Asia/Shanghai.
- let ts = Value::Timestamp(Timestamp::from_str("2000-01-02 07:59:59+08:00").unwrap());
+ let ts = Value::Timestamp(Timestamp::from_str_utc("2000-01-02 07:59:59+08:00").unwrap());
let date = ConcreteDataType::date_datatype().try_cast(ts).unwrap();
- assert_eq!(date, Value::Date(Date::from_str("2000-01-01").unwrap()));
+ assert_eq!(date, Value::Date(Date::from_str_utc("2000-01-01").unwrap()));
// Int32 -> date
let val = Value::Int32(0);
let date = ConcreteDataType::date_datatype().try_cast(val).unwrap();
- assert_eq!(date, Value::Date(Date::from_str("1970-01-01").unwrap()));
+ assert_eq!(date, Value::Date(Date::from_str_utc("1970-01-01").unwrap()));
let val = Value::Int32(19614);
let date = ConcreteDataType::date_datatype().try_cast(val).unwrap();
- assert_eq!(date, Value::Date(Date::from_str("2023-09-14").unwrap()));
+ assert_eq!(date, Value::Date(Date::from_str_utc("2023-09-14").unwrap()));
// String -> date
let s = Value::String(StringBytes::from("1970-02-12"));
let date = ConcreteDataType::date_datatype().try_cast(s).unwrap();
- assert_eq!(date, Value::Date(Date::from_str("1970-02-12").unwrap()));
+ assert_eq!(date, Value::Date(Date::from_str_utc("1970-02-12").unwrap()));
}
}
diff --git a/src/datatypes/src/types/datetime_type.rs b/src/datatypes/src/types/datetime_type.rs
index 4e23982a2e34..699eea3067ea 100644
--- a/src/datatypes/src/types/datetime_type.rs
+++ b/src/datatypes/src/types/datetime_type.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::str::FromStr;
-
use arrow::datatypes::{DataType as ArrowDataType, Date64Type};
use common_time::DateTime;
use serde::{Deserialize, Serialize};
@@ -54,7 +52,9 @@ impl DataType for DateTimeType {
match from {
Value::Int64(v) => Some(Value::DateTime(DateTime::from(v))),
Value::Timestamp(v) => v.to_chrono_datetime().map(|d| Value::DateTime(d.into())),
- Value::String(v) => DateTime::from_str(v.as_utf8()).map(Value::DateTime).ok(),
+ Value::String(v) => DateTime::from_str_system(v.as_utf8())
+ .map(Value::DateTime)
+ .ok(),
_ => None,
}
}
@@ -119,15 +119,15 @@ mod tests {
let dt = ConcreteDataType::datetime_datatype().try_cast(val).unwrap();
assert_eq!(
dt,
- Value::DateTime(DateTime::from_str("1970-01-01 00:00:00+0800").unwrap())
+ Value::DateTime(DateTime::from_str_system("1970-01-01 00:00:00+0800").unwrap())
);
// cast from Timestamp
- let val = Value::Timestamp(Timestamp::from_str("2020-09-08 21:42:29+0800").unwrap());
+ let val = Value::Timestamp(Timestamp::from_str_utc("2020-09-08 21:42:29+0800").unwrap());
let dt = ConcreteDataType::datetime_datatype().try_cast(val).unwrap();
assert_eq!(
dt,
- Value::DateTime(DateTime::from_str("2020-09-08 21:42:29+0800").unwrap())
+ Value::DateTime(DateTime::from_str_system("2020-09-08 21:42:29+0800").unwrap())
);
}
}
diff --git a/src/datatypes/src/types/timestamp_type.rs b/src/datatypes/src/types/timestamp_type.rs
index bca9d3e8e2e2..cda1c2603a60 100644
--- a/src/datatypes/src/types/timestamp_type.rs
+++ b/src/datatypes/src/types/timestamp_type.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::str::FromStr;
-
use arrow::datatypes::{
DataType as ArrowDataType, TimeUnit as ArrowTimeUnit,
TimestampMicrosecondType as ArrowTimestampMicrosecondType,
@@ -132,7 +130,7 @@ macro_rules! impl_data_type_for_timestamp {
fn try_cast(&self, from: Value)-> Option<Value>{
match from {
Value::Timestamp(v) => v.convert_to(TimeUnit::$unit).map(Value::Timestamp),
- Value::String(v) => Timestamp::from_str(v.as_utf8()).map(Value::Timestamp).ok(),
+ Value::String(v) => Timestamp::from_str_utc(v.as_utf8()).map(Value::Timestamp).ok(),
Value::Int64(v) => Some(Value::Timestamp(Timestamp::new(v, TimeUnit::$unit))),
Value::DateTime(v) => Timestamp::new_second(v.val()).convert_to(TimeUnit::$unit).map(Value::Timestamp),
Value::Date(v) => Timestamp::new_second(v.to_secs()).convert_to(TimeUnit::$unit).map(Value::Timestamp),
@@ -259,7 +257,7 @@ mod tests {
assert_eq!(ts, Value::Timestamp(Timestamp::new_second(1234567)));
// Date -> TimestampMillisecond
- let d = Value::Date(Date::from_str("1970-01-01").unwrap());
+ let d = Value::Date(Date::from_str_utc("1970-01-01").unwrap());
let ts = ConcreteDataType::timestamp_millisecond_datatype()
.try_cast(d)
.unwrap();
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index fede1420bb06..bf3445a922d9 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -14,7 +14,6 @@
use std::cmp::Ordering;
use std::fmt::{Display, Formatter};
-use std::str::FromStr;
use std::sync::Arc;
use arrow::datatypes::{DataType as ArrowDataType, Field};
@@ -26,7 +25,7 @@ use common_time::datetime::DateTime;
use common_time::interval::IntervalUnit;
use common_time::time::Time;
use common_time::timestamp::{TimeUnit, Timestamp};
-use common_time::{Duration, Interval};
+use common_time::{Duration, Interval, Timezone};
use datafusion_common::ScalarValue;
pub use ordered_float::OrderedFloat;
use serde::{Deserialize, Serialize};
@@ -428,11 +427,15 @@ pub fn duration_to_scalar_value(unit: TimeUnit, val: Option<i64>) -> ScalarValue
}
}
-/// Convert [ScalarValue] to [Timestamp].
+/// Convert [`ScalarValue`] to [`Timestamp`].
+/// If it's `ScalarValue::Utf8`, try to parse it with the given timezone.
/// Return `None` if given scalar value cannot be converted to a valid timestamp.
-pub fn scalar_value_to_timestamp(scalar: &ScalarValue) -> Option<Timestamp> {
+pub fn scalar_value_to_timestamp(
+ scalar: &ScalarValue,
+ timezone: Option<&Timezone>,
+) -> Option<Timestamp> {
match scalar {
- ScalarValue::Utf8(Some(s)) => match Timestamp::from_str(s) {
+ ScalarValue::Utf8(Some(s)) => match Timestamp::from_str(s, timezone) {
Ok(t) => Some(t),
Err(e) => {
logging::error!(e;"Failed to convert string literal {s} to timestamp");
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index f73d490de74f..ae22121100b2 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -374,11 +374,11 @@ impl SqlQueryHandler for Instance {
let plan = self
.query_engine
.planner()
- .plan(QueryStatement::Sql(stmt), query_ctx)
+ .plan(QueryStatement::Sql(stmt), query_ctx.clone())
.await
.context(PlanStatementSnafu)?;
self.query_engine
- .describe(plan)
+ .describe(plan, query_ctx)
.await
.map(Some)
.context(error::DescribeStatementSnafu)
diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs
index 93106b8587a1..86600a8c8b9a 100644
--- a/src/operator/src/statement.rs
+++ b/src/operator/src/statement.rs
@@ -21,7 +21,6 @@ mod dml;
mod show;
mod tql;
-use std::str::FromStr;
use std::sync::Arc;
use catalog::CatalogManagerRef;
@@ -331,8 +330,8 @@ fn to_copy_database_request(
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
- let start_timestamp = extract_timestamp(&arg.with, COPY_DATABASE_TIME_START_KEY)?;
- let end_timestamp = extract_timestamp(&arg.with, COPY_DATABASE_TIME_END_KEY)?;
+ let start_timestamp = extract_timestamp(&arg.with, COPY_DATABASE_TIME_START_KEY, query_ctx)?;
+ let end_timestamp = extract_timestamp(&arg.with, COPY_DATABASE_TIME_END_KEY, query_ctx)?;
let time_range = match (start_timestamp, end_timestamp) {
(Some(start), Some(end)) => TimestampRange::new(start, end),
@@ -352,10 +351,14 @@ fn to_copy_database_request(
}
/// Extracts timestamp from a [HashMap<String, String>] with given key.
-fn extract_timestamp(map: &OptionMap, key: &str) -> Result<Option<Timestamp>> {
+fn extract_timestamp(
+ map: &OptionMap,
+ key: &str,
+ query_ctx: &QueryContextRef,
+) -> Result<Option<Timestamp>> {
map.get(key)
.map(|v| {
- Timestamp::from_str(v)
+ Timestamp::from_str(v, Some(&query_ctx.timezone()))
.map_err(|_| error::InvalidCopyParameterSnafu { key, value: v }.build())
})
.transpose()
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index b1c421b01cca..6dab18487c56 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -26,7 +26,6 @@ use common_base::Plugins;
use common_error::ext::BoxedError;
use common_function::function::FunctionRef;
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
-use common_function::scalars::udf::create_udf;
use common_query::physical_plan::{DfPhysicalPlanAdapter, PhysicalPlan, PhysicalPlanAdapter};
use common_query::prelude::ScalarUdf;
use common_query::Output;
@@ -84,7 +83,7 @@ impl DatafusionQueryEngine {
plan: LogicalPlan,
query_ctx: QueryContextRef,
) -> Result<Output> {
- let mut ctx = QueryEngineContext::new(self.state.session_state(), query_ctx.clone());
+ let mut ctx = self.engine_context(query_ctx.clone());
// `create_physical_plan` will optimize logical plan internally
let physical_plan = self.create_physical_plan(&mut ctx, &plan).await?;
@@ -242,8 +241,13 @@ impl QueryEngine for DatafusionQueryEngine {
"datafusion"
}
- async fn describe(&self, plan: LogicalPlan) -> Result<DescribeResult> {
- let optimised_plan = self.optimize(&plan)?;
+ async fn describe(
+ &self,
+ plan: LogicalPlan,
+ query_ctx: QueryContextRef,
+ ) -> Result<DescribeResult> {
+ let ctx = self.engine_context(query_ctx);
+ let optimised_plan = self.optimize(&ctx, &plan)?;
Ok(DescribeResult {
schema: optimised_plan.schema()?,
logical_plan: optimised_plan,
@@ -259,10 +263,6 @@ impl QueryEngine for DatafusionQueryEngine {
}
}
- fn register_udf(&self, udf: ScalarUdf) {
- self.state.register_udf(udf);
- }
-
/// Note in SQL queries, aggregate names are looked up using
/// lowercase unless the query uses quotes. For example,
///
@@ -274,8 +274,15 @@ impl QueryEngine for DatafusionQueryEngine {
self.state.register_aggregate_function(func);
}
+ /// Register a [`ScalarUdf`].
+ fn register_udf(&self, udf: ScalarUdf) {
+ self.state.register_udf(udf);
+ }
+
+ /// Register an UDF function.
+ /// Will override if the function with same name is already registered.
fn register_function(&self, func: FunctionRef) {
- self.state.register_udf(create_udf(func));
+ self.state.register_function(func);
}
fn read_table(&self, table: TableRef) -> Result<DataFrame> {
@@ -287,18 +294,31 @@ impl QueryEngine for DatafusionQueryEngine {
.context(QueryExecutionSnafu)?,
))
}
+
+ fn engine_context(&self, query_ctx: QueryContextRef) -> QueryEngineContext {
+ QueryEngineContext::new(self.state.session_state(), query_ctx)
+ }
}
impl LogicalOptimizer for DatafusionQueryEngine {
#[tracing::instrument(skip_all)]
- fn optimize(&self, plan: &LogicalPlan) -> Result<LogicalPlan> {
+ fn optimize(&self, context: &QueryEngineContext, plan: &LogicalPlan) -> Result<LogicalPlan> {
let _timer = metrics::METRIC_OPTIMIZE_LOGICAL_ELAPSED.start_timer();
match plan {
LogicalPlan::DfPlan(df_plan) => {
+ // Optimized by extension rules
+ let optimized_plan = self
+ .state
+ .optimize_by_extension_rules(df_plan.clone(), context)
+ .context(error::DatafusionSnafu)
+ .map_err(BoxedError::new)
+ .context(QueryExecutionSnafu)?;
+
+ // Optimized by datafusion optimizer
let optimized_plan = self
.state
.session_state()
- .optimize(df_plan)
+ .optimize(&optimized_plan)
.context(error::DatafusionSnafu)
.map_err(BoxedError::new)
.context(QueryExecutionSnafu)?;
@@ -654,7 +674,7 @@ mod tests {
let DescribeResult {
schema,
logical_plan,
- } = engine.describe(plan).await.unwrap();
+ } = engine.describe(plan, QueryContext::arc()).await.unwrap();
assert_eq!(
schema.column_schemas()[0],
diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs
index 983c4155f7e1..4e4b02b0fdb8 100644
--- a/src/query/src/datafusion/planner.rs
+++ b/src/query/src/datafusion/planner.rs
@@ -18,6 +18,7 @@ use std::sync::Arc;
use arrow_schema::DataType;
use catalog::table_source::DfTableSourceProvider;
+use common_function::scalars::udf::create_udf;
use common_query::logical_plan::create_aggregate_function;
use datafusion::catalog::TableReference;
use datafusion::error::Result as DfResult;
@@ -41,6 +42,7 @@ pub struct DfContextProviderAdapter {
session_state: SessionState,
tables: HashMap<String, Arc<dyn TableSource>>,
table_provider: DfTableSourceProvider,
+ query_ctx: QueryContextRef,
}
impl DfContextProviderAdapter {
@@ -67,6 +69,7 @@ impl DfContextProviderAdapter {
session_state,
tables,
table_provider,
+ query_ctx,
})
}
}
@@ -104,7 +107,10 @@ impl ContextProvider for DfContextProviderAdapter {
}
fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>> {
- self.session_state.scalar_functions().get(name).cloned()
+ self.engine_state.udf_function(name).map_or_else(
+ || self.session_state.scalar_functions().get(name).cloned(),
+ |func| Some(Arc::new(create_udf(func, self.query_ctx.clone()).into())),
+ )
}
fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> {
diff --git a/src/query/src/error.rs b/src/query/src/error.rs
index c343d25d51f5..96db70810481 100644
--- a/src/query/src/error.rs
+++ b/src/query/src/error.rs
@@ -115,6 +115,9 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Invalid timestamp `{}`", raw))]
+ InvalidTimestamp { raw: String, location: Location },
+
#[snafu(display("Failed to parse float number `{}`", raw))]
ParseFloat {
raw: String,
@@ -271,6 +274,7 @@ impl ErrorExt for Error {
| UnknownTable { .. }
| TimeIndexNotFound { .. }
| ParseTimestamp { .. }
+ | InvalidTimestamp { .. }
| ParseFloat { .. }
| MissingRequiredField { .. }
| BuildRegex { .. }
diff --git a/src/query/src/logical_optimizer.rs b/src/query/src/logical_optimizer.rs
index 97e5a70d4a07..ab9bff445879 100644
--- a/src/query/src/logical_optimizer.rs
+++ b/src/query/src/logical_optimizer.rs
@@ -14,7 +14,10 @@
use crate::error::Result;
use crate::plan::LogicalPlan;
+use crate::QueryEngineContext;
+/// Logical plan optimizer, rewrite the [`LogicalPlan`] in some way.
pub trait LogicalOptimizer {
- fn optimize(&self, plan: &LogicalPlan) -> Result<LogicalPlan>;
+ /// Optimize the `plan`
+ fn optimize(&self, context: &QueryEngineContext, plan: &LogicalPlan) -> Result<LogicalPlan>;
}
diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs
index 34108fe4889d..cfce77f039d8 100644
--- a/src/query/src/optimizer.rs
+++ b/src/query/src/optimizer.rs
@@ -12,6 +12,28 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use datafusion_common::config::ConfigOptions;
+use datafusion_common::Result;
+use datafusion_expr::LogicalPlan;
+
+use crate::QueryEngineContext;
+
+/// [`ExtensionAnalyzerRule`]s transform [`LogicalPlan`]s in some way to make
+/// the plan valid prior to the rest of the DataFusion optimization process.
+/// It's an extension of datafusion [`AnalyzerRule`]s but accepts [`QueryEngineContext` as the second parameter.
+pub trait ExtensionAnalyzerRule {
+ /// Rewrite `plan`
+ fn analyze(
+ &self,
+ plan: LogicalPlan,
+ ctx: &QueryEngineContext,
+ config: &ConfigOptions,
+ ) -> Result<LogicalPlan>;
+
+ /// A human readable name for this analyzer rule
+ fn name(&self) -> &str;
+}
+
pub mod order_hint;
pub mod string_normalization;
pub mod type_conversion;
diff --git a/src/query/src/optimizer/type_conversion.rs b/src/query/src/optimizer/type_conversion.rs
index 07cba75d7f44..aa1fc73d4c1a 100644
--- a/src/query/src/optimizer/type_conversion.rs
+++ b/src/query/src/optimizer/type_conversion.rs
@@ -12,9 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::str::FromStr;
-
use common_time::timestamp::{TimeUnit, Timestamp};
+use common_time::Timezone;
use datafusion::config::ConfigOptions;
use datafusion_common::tree_node::{Transformed, TreeNode, TreeNodeRewriter};
use datafusion_common::{DFSchemaRef, DataFusionError, Result, ScalarValue};
@@ -22,9 +21,12 @@ use datafusion_expr::expr::InList;
use datafusion_expr::{
Between, BinaryExpr, Expr, ExprSchemable, Filter, LogicalPlan, Operator, TableScan,
};
-use datafusion_optimizer::analyzer::AnalyzerRule;
use datatypes::arrow::compute;
use datatypes::arrow::datatypes::DataType;
+use session::context::QueryContextRef;
+
+use crate::optimizer::ExtensionAnalyzerRule;
+use crate::QueryEngineContext;
/// TypeConversionRule converts some literal values in logical plan to other types according
/// to data type of corresponding columns.
@@ -33,12 +35,18 @@ use datatypes::arrow::datatypes::DataType;
/// - string literal of boolean is converted to `Expr::Literal(ScalarValue::Boolean)`
pub struct TypeConversionRule;
-impl AnalyzerRule for TypeConversionRule {
- fn analyze(&self, plan: LogicalPlan, _config: &ConfigOptions) -> Result<LogicalPlan> {
+impl ExtensionAnalyzerRule for TypeConversionRule {
+ fn analyze(
+ &self,
+ plan: LogicalPlan,
+ ctx: &QueryEngineContext,
+ _config: &ConfigOptions,
+ ) -> Result<LogicalPlan> {
plan.transform(&|plan| match plan {
LogicalPlan::Filter(filter) => {
let mut converter = TypeConverter {
schema: filter.input.schema().clone(),
+ query_ctx: ctx.query_ctx(),
};
let rewritten = filter.predicate.clone().rewrite(&mut converter)?;
Ok(Transformed::Yes(LogicalPlan::Filter(Filter::try_new(
@@ -56,6 +64,7 @@ impl AnalyzerRule for TypeConversionRule {
}) => {
let mut converter = TypeConverter {
schema: projected_schema.clone(),
+ query_ctx: ctx.query_ctx(),
};
let rewrite_filters = filters
.into_iter()
@@ -76,7 +85,6 @@ impl AnalyzerRule for TypeConversionRule {
| LogicalPlan::Repartition { .. }
| LogicalPlan::Extension { .. }
| LogicalPlan::Sort { .. }
- | LogicalPlan::Explain { .. }
| LogicalPlan::Limit { .. }
| LogicalPlan::Union { .. }
| LogicalPlan::Join { .. }
@@ -86,6 +94,7 @@ impl AnalyzerRule for TypeConversionRule {
| LogicalPlan::Analyze { .. } => {
let mut converter = TypeConverter {
schema: plan.schema().clone(),
+ query_ctx: ctx.query_ctx(),
};
let inputs = plan.inputs().into_iter().cloned().collect::<Vec<_>>();
let expr = plan
@@ -98,6 +107,7 @@ impl AnalyzerRule for TypeConversionRule {
}
LogicalPlan::Subquery { .. }
+ | LogicalPlan::Explain { .. }
| LogicalPlan::SubqueryAlias { .. }
| LogicalPlan::EmptyRelation(_)
| LogicalPlan::Prepare(_)
@@ -116,6 +126,7 @@ impl AnalyzerRule for TypeConversionRule {
}
struct TypeConverter {
+ query_ctx: QueryContextRef,
schema: DFSchemaRef,
}
@@ -129,9 +140,15 @@ impl TypeConverter {
None
}
- fn cast_scalar_value(value: &ScalarValue, target_type: &DataType) -> Result<ScalarValue> {
+ fn cast_scalar_value(
+ &self,
+ value: &ScalarValue,
+ target_type: &DataType,
+ ) -> Result<ScalarValue> {
match (target_type, value) {
- (DataType::Timestamp(_, _), ScalarValue::Utf8(Some(v))) => string_to_timestamp_ms(v),
+ (DataType::Timestamp(_, _), ScalarValue::Utf8(Some(v))) => {
+ string_to_timestamp_ms(v, Some(self.query_ctx.timezone().as_ref()))
+ }
(DataType::Boolean, ScalarValue::Utf8(Some(v))) => match v.to_lowercase().as_str() {
"true" => Ok(ScalarValue::Boolean(Some(true))),
"false" => Ok(ScalarValue::Boolean(Some(false))),
@@ -167,7 +184,7 @@ impl TypeConverter {
match (left, right) {
(Expr::Column(col), Expr::Literal(value)) => {
- let casted_right = Self::cast_scalar_value(value, target_type)?;
+ let casted_right = self.cast_scalar_value(value, target_type)?;
if casted_right.is_null() {
return Err(DataFusionError::Plan(format!(
"column:{col:?}. Casting value:{value:?} to {target_type:?} is invalid",
@@ -176,7 +193,7 @@ impl TypeConverter {
Ok((left.clone(), Expr::Literal(casted_right)))
}
(Expr::Literal(value), Expr::Column(col)) => {
- let casted_left = Self::cast_scalar_value(value, target_type)?;
+ let casted_left = self.cast_scalar_value(value, target_type)?;
if casted_left.is_null() {
return Err(DataFusionError::Plan(format!(
"column:{col:?}. Casting value:{value:?} to {target_type:?} is invalid",
@@ -273,8 +290,9 @@ fn timestamp_to_timestamp_ms_expr(val: i64, unit: TimeUnit) -> Expr {
Expr::Literal(ScalarValue::TimestampMillisecond(Some(timestamp), None))
}
-fn string_to_timestamp_ms(string: &str) -> Result<ScalarValue> {
- let ts = Timestamp::from_str(string).map_err(|e| DataFusionError::External(Box::new(e)))?;
+fn string_to_timestamp_ms(string: &str, timezone: Option<&Timezone>) -> Result<ScalarValue> {
+ let ts = Timestamp::from_str(string, timezone)
+ .map_err(|e| DataFusionError::External(Box::new(e)))?;
let value = Some(ts.value());
let scalar = match ts.unit() {
@@ -295,19 +313,38 @@ mod tests {
use datafusion_common::{Column, DFField, DFSchema};
use datafusion_expr::{AggregateFunction, LogicalPlanBuilder};
use datafusion_sql::TableReference;
+ use session::context::QueryContext;
use super::*;
#[test]
fn test_string_to_timestamp_ms() {
assert_eq!(
- string_to_timestamp_ms("2022-02-02 19:00:00+08:00").unwrap(),
+ string_to_timestamp_ms("2022-02-02 19:00:00+08:00", None).unwrap(),
ScalarValue::TimestampSecond(Some(1643799600), None)
);
assert_eq!(
- string_to_timestamp_ms("2009-02-13 23:31:30Z").unwrap(),
+ string_to_timestamp_ms("2009-02-13 23:31:30Z", None).unwrap(),
ScalarValue::TimestampSecond(Some(1234567890), None)
);
+
+ assert_eq!(
+ string_to_timestamp_ms(
+ "2009-02-13 23:31:30",
+ Some(&Timezone::from_tz_string("Asia/Shanghai").unwrap())
+ )
+ .unwrap(),
+ ScalarValue::TimestampSecond(Some(1234567890 - 8 * 3600), None)
+ );
+
+ assert_eq!(
+ string_to_timestamp_ms(
+ "2009-02-13 23:31:30",
+ Some(&Timezone::from_tz_string("-8:00").unwrap())
+ )
+ .unwrap(),
+ ScalarValue::TimestampSecond(Some(1234567890 + 8 * 3600), None)
+ );
}
#[test]
@@ -363,7 +400,10 @@ mod tests {
)
.unwrap(),
);
- let mut converter = TypeConverter { schema };
+ let mut converter = TypeConverter {
+ schema,
+ query_ctx: QueryContext::arc(),
+ };
assert_eq!(
Expr::Column(Column::from_name("ts")).gt(Expr::Literal(ScalarValue::TimestampSecond(
@@ -395,7 +435,10 @@ mod tests {
)
.unwrap(),
);
- let mut converter = TypeConverter { schema };
+ let mut converter = TypeConverter {
+ schema,
+ query_ctx: QueryContext::arc(),
+ };
assert_eq!(
Expr::Column(Column::from_name(col_name))
@@ -442,9 +485,10 @@ mod tests {
.unwrap()
.build()
.unwrap();
+ let context = QueryEngineContext::mock();
let transformed_plan = TypeConversionRule
- .analyze(plan, &ConfigOptions::default())
+ .analyze(plan, &context, &ConfigOptions::default())
.unwrap();
let expected = String::from(
"Aggregate: groupBy=[[]], aggr=[[COUNT(column1)]]\
@@ -457,6 +501,8 @@ mod tests {
#[test]
fn test_reverse_non_ts_type() {
+ let context = QueryEngineContext::mock();
+
let plan =
LogicalPlanBuilder::values(vec![vec![Expr::Literal(ScalarValue::Float64(Some(1.0)))]])
.unwrap()
@@ -473,7 +519,7 @@ mod tests {
.build()
.unwrap();
let transformed_plan = TypeConversionRule
- .analyze(plan, &ConfigOptions::default())
+ .analyze(plan, &context, &ConfigOptions::default())
.unwrap();
let expected = String::from(
"Filter: Utf8(\"1.2345\") < column1\
diff --git a/src/query/src/parser.rs b/src/query/src/parser.rs
index 7ab3df442626..0ecd38188636 100644
--- a/src/query/src/parser.rs
+++ b/src/query/src/parser.rs
@@ -168,7 +168,6 @@ impl QueryLanguageParser {
}
fn parse_promql_timestamp(timestamp: &str) -> Result<SystemTime> {
- // FIXME(dennis): aware of timezone
// try rfc3339 format
let rfc3339_result = DateTime::parse_from_rfc3339(timestamp)
.context(ParseTimestampSnafu { raw: timestamp })
diff --git a/src/query/src/planner.rs b/src/query/src/planner.rs
index 97e8d3d931ee..cf3f47761192 100644
--- a/src/query/src/planner.rs
+++ b/src/query/src/planner.rs
@@ -26,12 +26,12 @@ use session::context::QueryContextRef;
use snafu::ResultExt;
use sql::statements::statement::Statement;
-use crate::error::{PlanSqlSnafu, QueryPlanSnafu, Result, SqlSnafu};
+use crate::error::{DataFusionSnafu, PlanSqlSnafu, QueryPlanSnafu, Result, SqlSnafu};
use crate::parser::QueryStatement;
use crate::plan::LogicalPlan;
use crate::query_engine::QueryEngineState;
use crate::range_select::plan_rewrite::RangePlanRewriter;
-use crate::DfContextProviderAdapter;
+use crate::{DfContextProviderAdapter, QueryEngineContext};
#[async_trait]
pub trait LogicalPlanner: Send + Sync {
@@ -66,7 +66,7 @@ impl DfLogicalPlanner {
self.engine_state.clone(),
self.session_state.clone(),
&df_stmt,
- query_ctx,
+ query_ctx.clone(),
)
.await?;
@@ -81,9 +81,17 @@ impl DfLogicalPlanner {
let result = sql_to_rel
.statement_to_plan(df_stmt)
.context(PlanSqlSnafu)?;
- let plan = RangePlanRewriter::new(table_provider)
+ let plan = RangePlanRewriter::new(table_provider, query_ctx.clone())
.rewrite(result)
.await?;
+
+ // Optimize logical plan by extension rules
+ let context = QueryEngineContext::new(self.session_state.clone(), query_ctx);
+ let plan = self
+ .engine_state
+ .optimize_by_extension_rules(plan, &context)
+ .context(DataFusionSnafu)?;
+
Ok(LogicalPlan::DfPlan(plan))
}
diff --git a/src/query/src/query_engine.rs b/src/query/src/query_engine.rs
index ec271a818fc9..2ae0b298281d 100644
--- a/src/query/src/query_engine.rs
+++ b/src/query/src/query_engine.rs
@@ -56,22 +56,40 @@ pub trait QueryEngine: Send + Sync {
/// so that it can be downcast to a specific implementation.
fn as_any(&self) -> &dyn Any;
+ /// Returns the logical planner
fn planner(&self) -> Arc<dyn LogicalPlanner>;
+ /// Returns the query engine name.
fn name(&self) -> &str;
- async fn describe(&self, plan: LogicalPlan) -> Result<DescribeResult>;
+ /// Describe the given [`LogicalPlan`].
+ async fn describe(
+ &self,
+ plan: LogicalPlan,
+ query_ctx: QueryContextRef,
+ ) -> Result<DescribeResult>;
+ /// Execute the given [`LogicalPlan`].
async fn execute(&self, plan: LogicalPlan, query_ctx: QueryContextRef) -> Result<Output>;
+ /// Register a [`ScalarUdf`].
fn register_udf(&self, udf: ScalarUdf);
+ /// Register an aggregate function.
+ ///
+ /// # Panics
+ /// Will panic if the function with same name is already registered.
fn register_aggregate_function(&self, func: AggregateFunctionMetaRef);
+ /// Register a SQL function.
+ /// Will override if the function with same name is already registered.
fn register_function(&self, func: FunctionRef);
/// Create a DataFrame from a table.
fn read_table(&self, table: TableRef) -> Result<DataFrame>;
+
+ /// Create a [`QueryEngineContext`].
+ fn engine_context(&self, query_ctx: QueryContextRef) -> QueryEngineContext;
}
pub struct QueryEngineFactory {
@@ -118,6 +136,7 @@ impl QueryEngineFactory {
}
}
+/// Register all functions implemented by GreptimeDB
fn register_functions(query_engine: &Arc<DatafusionQueryEngine>) {
for func in FUNCTION_REGISTRY.functions() {
query_engine.register_function(func);
diff --git a/src/query/src/query_engine/context.rs b/src/query/src/query_engine/context.rs
index 29eb2171c20e..d5361b4e5f2c 100644
--- a/src/query/src/query_engine/context.rs
+++ b/src/query/src/query_engine/context.rs
@@ -51,4 +51,23 @@ impl QueryEngineContext {
state.runtime_env().clone(),
))
}
+
+ /// Mock an engine context for unit tests.
+ #[cfg(any(test, feature = "test"))]
+ pub fn mock() -> Self {
+ use common_base::Plugins;
+ use session::context::QueryContext;
+
+ use crate::query_engine::QueryEngineState;
+
+ let state = Arc::new(QueryEngineState::new(
+ catalog::memory::new_memory_catalog_manager().unwrap(),
+ None,
+ None,
+ false,
+ Plugins::default(),
+ ));
+
+ QueryEngineContext::new(state.session_state(), QueryContext::arc())
+ }
}
diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs
index 4da21338df5c..a8259694fa88 100644
--- a/src/query/src/query_engine/state.rs
+++ b/src/query/src/query_engine/state.rs
@@ -19,9 +19,11 @@ use std::sync::{Arc, RwLock};
use async_trait::async_trait;
use catalog::CatalogManagerRef;
use common_base::Plugins;
+use common_function::function::FunctionRef;
use common_function::scalars::aggregate::AggregateFunctionMetaRef;
use common_query::physical_plan::SessionContext;
use common_query::prelude::ScalarUdf;
+use common_telemetry::warn;
use datafusion::catalog::MemoryCatalogList;
use datafusion::dataframe::DataFrame;
use datafusion::error::Result as DfResult;
@@ -42,10 +44,12 @@ use crate::dist_plan::{DistExtensionPlanner, DistPlannerAnalyzer};
use crate::optimizer::order_hint::OrderHintRule;
use crate::optimizer::string_normalization::StringNormalizationRule;
use crate::optimizer::type_conversion::TypeConversionRule;
+use crate::optimizer::ExtensionAnalyzerRule;
use crate::query_engine::options::QueryOptions;
use crate::range_select::planner::RangeSelectPlanner;
use crate::region_query::RegionQueryHandlerRef;
use crate::table_mutation::TableMutationHandlerRef;
+use crate::QueryEngineContext;
/// Query engine global state
// TODO(yingwen): This QueryEngineState still relies on datafusion, maybe we can define a trait for it,
@@ -56,7 +60,9 @@ pub struct QueryEngineState {
df_context: SessionContext,
catalog_manager: CatalogManagerRef,
table_mutation_handler: Option<TableMutationHandlerRef>,
+ udf_functions: Arc<RwLock<HashMap<String, FunctionRef>>>,
aggregate_functions: Arc<RwLock<HashMap<String, AggregateFunctionMetaRef>>>,
+ extension_rules: Vec<Arc<dyn ExtensionAnalyzerRule + Send + Sync>>,
plugins: Plugins,
}
@@ -78,9 +84,12 @@ impl QueryEngineState {
) -> Self {
let runtime_env = Arc::new(RuntimeEnv::default());
let session_config = SessionConfig::new().with_create_default_catalog_and_schema(false);
- // Apply the type conversion rule first.
+ // Apply extension rules
+ let mut extension_rules = Vec::new();
+ // The [`TypeConversionRule`] must be at first
+ extension_rules.insert(0, Arc::new(TypeConversionRule) as _);
+ // Apply the datafusion rules
let mut analyzer = Analyzer::new();
- analyzer.rules.insert(0, Arc::new(TypeConversionRule));
analyzer.rules.insert(0, Arc::new(StringNormalizationRule));
Self::remove_analyzer_rule(&mut analyzer.rules, CountWildcardRule {}.name());
analyzer.rules.insert(0, Arc::new(CountWildcardRule {}));
@@ -110,7 +119,9 @@ impl QueryEngineState {
catalog_manager: catalog_list,
table_mutation_handler,
aggregate_functions: Arc::new(RwLock::new(HashMap::new())),
+ extension_rules,
plugins,
+ udf_functions: Arc::new(RwLock::new(HashMap::new())),
}
}
@@ -118,12 +129,44 @@ impl QueryEngineState {
rules.retain(|rule| rule.name() != name);
}
- /// Register a udf function
- // TODO(dennis): manage UDFs by ourself.
- pub fn register_udf(&self, udf: ScalarUdf) {
- self.df_context.register_udf(udf.into_df_udf());
+ /// Optimize the logical plan by the extension anayzer rules.
+ pub fn optimize_by_extension_rules(
+ &self,
+ plan: DfLogicalPlan,
+ context: &QueryEngineContext,
+ ) -> DfResult<DfLogicalPlan> {
+ self.extension_rules
+ .iter()
+ .try_fold(plan, |acc_plan, rule| {
+ rule.analyze(acc_plan, context, self.session_state().config_options())
+ })
+ }
+
+ /// Register an udf function.
+ /// Will override if the function with same name is already registered.
+ pub fn register_function(&self, func: FunctionRef) {
+ let name = func.name().to_string();
+ let x = self
+ .udf_functions
+ .write()
+ .unwrap()
+ .insert(name.clone(), func);
+
+ if x.is_some() {
+ warn!("Already registered udf function '{name}'");
+ }
}
+ /// Retrieve the udf function by name
+ pub fn udf_function(&self, function_name: &str) -> Option<FunctionRef> {
+ self.udf_functions
+ .read()
+ .unwrap()
+ .get(function_name)
+ .cloned()
+ }
+
+ /// Retrieve the aggregate function by name
pub fn aggregate_function(&self, function_name: &str) -> Option<AggregateFunctionMetaRef> {
self.aggregate_functions
.read()
@@ -132,6 +175,11 @@ impl QueryEngineState {
.cloned()
}
+ /// Register a [`ScalarUdf`].
+ pub fn register_udf(&self, udf: ScalarUdf) {
+ self.df_context.register_udf(udf.into());
+ }
+
/// Register an aggregate function.
///
/// # Panics
diff --git a/src/query/src/range_select/plan_rewrite.rs b/src/query/src/range_select/plan_rewrite.rs
index dcb1ba7de5c8..5b93180b0d2b 100644
--- a/src/query/src/range_select/plan_rewrite.rs
+++ b/src/query/src/range_select/plan_rewrite.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use std::collections::BTreeSet;
-use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
@@ -22,7 +21,7 @@ use async_recursion::async_recursion;
use catalog::table_source::DfTableSourceProvider;
use common_time::interval::NANOS_PER_MILLI;
use common_time::timestamp::TimeUnit;
-use common_time::{Interval, Timestamp};
+use common_time::{Interval, Timestamp, Timezone};
use datafusion::datasource::DefaultTableSource;
use datafusion::prelude::Column;
use datafusion::scalar::ScalarValue;
@@ -35,6 +34,7 @@ use datafusion_expr::{
};
use datatypes::prelude::ConcreteDataType;
use promql_parser::util::parse_duration;
+use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use table::table::adapter::DfTableProviderAdapter;
@@ -56,6 +56,7 @@ pub struct RangeExprRewriter<'a> {
/// Use `BTreeSet` to avoid in case like `avg(a) RANGE '5m' + avg(a) RANGE '5m'`, duplicate range expr `avg(a) RANGE '5m'` be calculate twice
range_fn: BTreeSet<RangeFn>,
sub_aggr: &'a Aggregate,
+ query_ctx: &'a QueryContextRef,
}
impl<'a> RangeExprRewriter<'a> {
@@ -134,7 +135,7 @@ fn parse_duration_expr(args: &[Expr], i: usize) -> DFResult<Duration> {
/// 1. NOW: align to current execute time
/// 2. Timestamp string: align to specific timestamp
/// 3. leave empty (as Default Option): align to unix epoch 0
-fn parse_align_to(args: &[Expr], i: usize) -> DFResult<i64> {
+fn parse_align_to(args: &[Expr], i: usize, timezone: Option<&Timezone>) -> DFResult<i64> {
let s = parse_str_expr(args, i)?;
let upper = s.to_uppercase();
match upper.as_str() {
@@ -143,7 +144,8 @@ fn parse_align_to(args: &[Expr], i: usize) -> DFResult<i64> {
"" => return Ok(0),
_ => (),
}
- Timestamp::from_str(s)
+
+ Timestamp::from_str(s, timezone)
.map_err(|e| {
DataFusionError::Plan(format!(
"Illegal `align to` argument `{}` in range select query, can't be parse as NOW/CALENDAR/Timestamp, error: {}",
@@ -206,7 +208,11 @@ impl<'a> TreeNodeRewriter for RangeExprRewriter<'a> {
.map_err(|e| DataFusionError::Plan(e.to_string()))?;
let by = parse_expr_list(&func.args, 4, byc)?;
let align = parse_duration_expr(&func.args, byc + 4)?;
- let align_to = parse_align_to(&func.args, byc + 5)?;
+ let align_to = parse_align_to(
+ &func.args,
+ byc + 5,
+ Some(self.query_ctx.timezone().as_ref()),
+ )?;
let mut data_type = range_expr.get_type(self.input_plan.schema())?;
let mut need_cast = false;
let fill = Fill::try_from_str(parse_str_expr(&func.args, 2)?, &data_type)?;
@@ -247,11 +253,15 @@ impl<'a> TreeNodeRewriter for RangeExprRewriter<'a> {
/// collecting info we need to generate RangeSelect Query LogicalPlan and rewrite th original LogicalPlan.
pub struct RangePlanRewriter {
table_provider: DfTableSourceProvider,
+ query_ctx: QueryContextRef,
}
impl RangePlanRewriter {
- pub fn new(table_provider: DfTableSourceProvider) -> Self {
- Self { table_provider }
+ pub fn new(table_provider: DfTableSourceProvider, query_ctx: QueryContextRef) -> Self {
+ Self {
+ table_provider,
+ query_ctx,
+ }
}
pub async fn rewrite(&mut self, plan: LogicalPlan) -> Result<LogicalPlan> {
@@ -295,6 +305,7 @@ impl RangePlanRewriter {
by: vec![],
range_fn: BTreeSet::new(),
sub_aggr: aggr_plan,
+ query_ctx: &self.query_ctx,
};
let new_expr = expr
.iter()
@@ -747,15 +758,28 @@ mod test {
fn test_parse_align_to() {
// test NOW
let args = vec![Expr::Literal(ScalarValue::Utf8(Some("NOW".into())))];
- let epsinon = parse_align_to(&args, 0).unwrap() - Timestamp::current_millis().value();
+ let epsinon = parse_align_to(&args, 0, None).unwrap() - Timestamp::current_millis().value();
assert!(epsinon.abs() < 100);
// test default
let args = vec![Expr::Literal(ScalarValue::Utf8(Some("".into())))];
- assert!(parse_align_to(&args, 0).unwrap() == 0);
+ assert!(parse_align_to(&args, 0, None).unwrap() == 0);
// test Timestamp
let args = vec![Expr::Literal(ScalarValue::Utf8(Some(
"1970-01-01T00:00:00+08:00".into(),
)))];
- assert!(parse_align_to(&args, 0).unwrap() == -8 * 60 * 60 * 1000);
+ assert!(parse_align_to(&args, 0, None).unwrap() == -8 * 60 * 60 * 1000);
+ // timezone
+ let args = vec![Expr::Literal(ScalarValue::Utf8(Some(
+ "1970-01-01T00:00:00".into(),
+ )))];
+ assert!(
+ parse_align_to(
+ &args,
+ 0,
+ Some(&Timezone::from_tz_string("Asia/Shanghai").unwrap())
+ )
+ .unwrap()
+ == -8 * 60 * 60 * 1000
+ );
}
}
diff --git a/src/query/src/tests.rs b/src/query/src/tests.rs
index 1c43cfd4b202..8496a648b680 100644
--- a/src/query/src/tests.rs
+++ b/src/query/src/tests.rs
@@ -40,7 +40,7 @@ async fn exec_selection(engine: QueryEngineRef, sql: &str) -> Vec<RecordBatch> {
let stmt = QueryLanguageParser::parse_sql(sql, &query_ctx).unwrap();
let plan = engine
.planner()
- .plan(stmt, QueryContext::arc())
+ .plan(stmt, query_ctx.clone())
.await
.unwrap();
let Output::Stream(stream) = engine.execute(plan, query_ctx).await.unwrap() else {
diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs
index 2730f28d9a84..70ff108271c1 100644
--- a/src/script/src/python/engine.rs
+++ b/src/script/src/python/engine.rs
@@ -150,7 +150,7 @@ impl Function for PyUDF {
fn eval(
&self,
- _func_ctx: common_function::function::FunctionContext,
+ func_ctx: common_function::function::FunctionContext,
columns: &[datatypes::vectors::VectorRef],
) -> common_query::error::Result<datatypes::vectors::VectorRef> {
// FIXME(discord9): exec_parsed require a RecordBatch(basically a Vector+Schema), where schema can't pop out from nowhere, right?
@@ -158,15 +158,17 @@ impl Function for PyUDF {
let columns = columns.to_vec();
let rb = Some(RecordBatch::new(schema, columns).context(UdfTempRecordBatchSnafu)?);
- // FIXME(dennis): Create EvalContext from FunctionContext.
- let res = exec_parsed(&self.copr, &rb, &HashMap::new(), &EvalContext::default()).map_err(
- |err| {
- PyUdfSnafu {
- msg: format!("{err:#?}"),
- }
- .build()
+ let res = exec_parsed(
+ &self.copr,
+ &rb,
+ &HashMap::new(),
+ &EvalContext {
+ query_ctx: func_ctx.query_ctx.clone(),
},
- )?;
+ )
+ .map_err(BoxedError::new)
+ .context(common_query::error::ExecuteSnafu)?;
+
let len = res.columns().len();
if len == 0 {
return PyUdfSnafu {
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index 2efc45128a1f..5744894b9f91 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -37,8 +37,8 @@ static SHOW_LOWER_CASE_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES LIKE 'lower_case_table_names'(.*))").unwrap());
static SHOW_COLLATION_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new("(?i)^(show collation where(.*))").unwrap());
-static SHOW_VARIABLES_PATTERN: Lazy<Regex> =
- Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES(.*))").unwrap());
+static SHOW_VARIABLES_LIKE_PATTERN: Lazy<Regex> =
+ Lazy::new(|| Regex::new("(?i)^(SHOW VARIABLES( LIKE (.*))?)").unwrap());
static SELECT_DATABASE_PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?i)^(SELECT DATABASE\(\s*\))").unwrap());
@@ -247,7 +247,8 @@ fn check_show_variables(query: &str) -> Option<Output> {
Some(show_variables("sql_mode", "ONLY_FULL_GROUP_BY STRICT_TRANS_TABLES NO_ZERO_IN_DATE NO_ZERO_DATE ERROR_FOR_DIVISION_BY_ZERO NO_ENGINE_SUBSTITUTION"))
} else if SHOW_LOWER_CASE_PATTERN.is_match(query) {
Some(show_variables("lower_case_table_names", "0"))
- } else if SHOW_COLLATION_PATTERN.is_match(query) || SHOW_VARIABLES_PATTERN.is_match(query) {
+ } else if SHOW_COLLATION_PATTERN.is_match(query) || SHOW_VARIABLES_LIKE_PATTERN.is_match(query)
+ {
Some(show_variables("", ""))
} else {
None
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index dabde7e00013..ff60ae007fa6 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -99,10 +99,10 @@ impl SqlQueryHandler for DummyInstance {
let plan = self
.query_engine
.planner()
- .plan(QueryStatement::Sql(stmt), query_ctx)
+ .plan(QueryStatement::Sql(stmt), query_ctx.clone())
.await
.unwrap();
- let schema = self.query_engine.describe(plan).await.unwrap();
+ let schema = self.query_engine.describe(plan, query_ctx).await.unwrap();
Ok(Some(schema))
} else {
Ok(None)
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index c8da838234b9..724c4cc42297 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -74,7 +74,7 @@ fn parse_string_to_value(
match data_type {
ConcreteDataType::String(_) => Ok(Value::String(s.into())),
ConcreteDataType::Date(_) => {
- if let Ok(date) = common_time::date::Date::from_str(&s) {
+ if let Ok(date) = common_time::date::Date::from_str_utc(&s) {
Ok(Value::Date(date))
} else {
ParseSqlValueSnafu {
@@ -84,7 +84,7 @@ fn parse_string_to_value(
}
}
ConcreteDataType::DateTime(_) => {
- if let Ok(datetime) = common_time::datetime::DateTime::from_str(&s) {
+ if let Ok(datetime) = common_time::datetime::DateTime::from_str_system(&s) {
Ok(Value::DateTime(datetime))
} else {
ParseSqlValueSnafu {
@@ -94,7 +94,7 @@ fn parse_string_to_value(
}
}
ConcreteDataType::Timestamp(t) => {
- if let Ok(ts) = Timestamp::from_str(&s) {
+ if let Ok(ts) = Timestamp::from_str_utc(&s) {
Ok(Value::Timestamp(ts.convert_to(t.unit()).context(
TimestampOverflowSnafu {
timestamp: ts,
diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs
index 514541f2dd5c..bd5d6d2d1818 100644
--- a/src/table/src/predicate.rs
+++ b/src/table/src/predicate.rs
@@ -19,6 +19,7 @@ use common_telemetry::{error, warn};
use common_time::range::TimestampRange;
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
+use datafusion::common::ScalarValue;
use datafusion::physical_optimizer::pruning::{PruningPredicate, PruningStatistics};
use datafusion_common::ToDFSchema;
use datafusion_expr::expr::InList;
@@ -34,6 +35,22 @@ use crate::error;
#[cfg(test)]
mod stats;
+/// Assert the scalar value is not utf8. Returns `None` if it's utf8.
+/// In theory, it should be converted to a timestamp scalar value by `TypeConversionRule`.
+macro_rules! return_none_if_utf8 {
+ ($lit: ident) => {
+ if matches!($lit, ScalarValue::Utf8(_)) {
+ warn!(
+ "Unexpected ScalarValue::Utf8 in time range predicate: {:?}. Maybe it's an implicit bug, please report it to https://github.com/GreptimeTeam/greptimedb/issues",
+ $lit
+ );
+
+ // Make the predicate ineffective.
+ return None;
+ }
+ };
+}
+
#[derive(Debug, Clone)]
pub struct Predicate {
/// logical exprs
@@ -282,7 +299,9 @@ impl<'a> TimeRangePredicateBuilder<'a> {
if col.name != self.ts_col_name {
return None;
}
- scalar_value_to_timestamp(lit).map(|t| (t, reverse))
+
+ return_none_if_utf8!(lit);
+ scalar_value_to_timestamp(lit, None).map(|t| (t, reverse))
}
fn extract_from_between_expr(
@@ -305,9 +324,12 @@ impl<'a> TimeRangePredicateBuilder<'a> {
match (low, high) {
(DfExpr::Literal(low), DfExpr::Literal(high)) => {
- let low_opt =
- scalar_value_to_timestamp(low).and_then(|ts| ts.convert_to(self.ts_col_unit));
- let high_opt = scalar_value_to_timestamp(high)
+ return_none_if_utf8!(low);
+ return_none_if_utf8!(high);
+
+ let low_opt = scalar_value_to_timestamp(low, None)
+ .and_then(|ts| ts.convert_to(self.ts_col_unit));
+ let high_opt = scalar_value_to_timestamp(high, None)
.and_then(|ts| ts.convert_to_ceil(self.ts_col_unit));
Some(TimestampRange::new_inclusive(low_opt, high_opt))
}
@@ -338,7 +360,8 @@ impl<'a> TimeRangePredicateBuilder<'a> {
let mut init_range = TimestampRange::empty();
for expr in list {
if let DfExpr::Literal(scalar) = expr {
- if let Some(timestamp) = scalar_value_to_timestamp(scalar) {
+ return_none_if_utf8!(scalar);
+ if let Some(timestamp) = scalar_value_to_timestamp(scalar, None) {
init_range = init_range.or(&TimestampRange::single(timestamp))
} else {
// TODO(hl): maybe we should raise an error here since cannot parse
diff --git a/tests/cases/standalone/common/system/timezone.result b/tests/cases/standalone/common/system/timezone.result
new file mode 100644
index 000000000000..8f3c59fbd248
--- /dev/null
+++ b/tests/cases/standalone/common/system/timezone.result
@@ -0,0 +1,271 @@
+--- tests for timezone ---
+SHOW VARIABLES time_zone;
+
++-----------+
+| TIME_ZONE |
++-----------+
+| UTC |
++-----------+
+
+SHOW VARIABLES system_time_zone;
+
++------------------+
+| SYSTEM_TIME_ZONE |
++------------------+
+| UTC |
++------------------+
+
+CREATE TABLE test(d double, ts timestamp_ms time index);
+
+Affected Rows: 0
+
+INSERT INTO test values
+ (1, '2024-01-01 00:00:00'),
+ (2, '2024-01-02 08:00:00'),
+ (3, '2024-01-03 16:00:00'),
+ (4, '2024-01-04 00:00:00'),
+ (5, '2024-01-05 00:00:00+08:00');
+
+Affected Rows: 5
+
+SELECT * from test;
+
++-----+---------------------+
+| d | ts |
++-----+---------------------+
+| 1.0 | 2024-01-01T00:00:00 |
+| 2.0 | 2024-01-02T08:00:00 |
+| 3.0 | 2024-01-03T16:00:00 |
+| 4.0 | 2024-01-04T00:00:00 |
+| 5.0 | 2024-01-04T16:00:00 |
++-----+---------------------+
+
+SELECT * from test where ts >= '2024-01-02 08:00:00';
+
++-----+---------------------+
+| d | ts |
++-----+---------------------+
+| 2.0 | 2024-01-02T08:00:00 |
+| 3.0 | 2024-01-03T16:00:00 |
+| 4.0 | 2024-01-04T00:00:00 |
+| 5.0 | 2024-01-04T16:00:00 |
++-----+---------------------+
+
+SELECT * from test where ts <= '2024-01-03 16:00:00';
+
++-----+---------------------+
+| d | ts |
++-----+---------------------+
+| 1.0 | 2024-01-01T00:00:00 |
+| 2.0 | 2024-01-02T08:00:00 |
+| 3.0 | 2024-01-03T16:00:00 |
++-----+---------------------+
+
+select date_format(ts, '%Y-%m-%d %H:%M:%S:%3f') from test;
+
++----------------------------------------------------+
+| date_format(test.ts,Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
++----------------------------------------------------+
+| 2024-01-01 00:00:00:000 |
+| 2024-01-02 08:00:00:000 |
+| 2024-01-03 16:00:00:000 |
+| 2024-01-04 00:00:00:000 |
+| 2024-01-04 16:00:00:000 |
++----------------------------------------------------+
+
+select to_unixtime('2024-01-02 00:00:00');
+
++------------------------------------------+
+| to_unixtime(Utf8("2024-01-02 00:00:00")) |
++------------------------------------------+
+| 1704153600 |
++------------------------------------------+
+
+select to_unixtime('2024-01-02T00:00:00+08:00');
+
++------------------------------------------------+
+| to_unixtime(Utf8("2024-01-02T00:00:00+08:00")) |
++------------------------------------------------+
+| 1704124800 |
++------------------------------------------------+
+
+--- UTC+8 ---
+SET TIME_ZONE = '+8:00';
+
+Affected Rows: 0
+
+SHOW VARIABLES time_zone;
+
++-----------+
+| TIME_ZONE |
++-----------+
+| +08:00 |
++-----------+
+
+SHOW VARIABLES system_time_zone;
+
++------------------+
+| SYSTEM_TIME_ZONE |
++------------------+
+| UTC |
++------------------+
+
+SELECT * from test;
+
++-----+---------------------+
+| d | ts |
++-----+---------------------+
+| 1.0 | 2024-01-01T00:00:00 |
+| 2.0 | 2024-01-02T08:00:00 |
+| 3.0 | 2024-01-03T16:00:00 |
+| 4.0 | 2024-01-04T00:00:00 |
+| 5.0 | 2024-01-04T16:00:00 |
++-----+---------------------+
+
+SELECT * from test where ts >= '2024-01-02 08:00:00';
+
++-----+---------------------+
+| d | ts |
++-----+---------------------+
+| 2.0 | 2024-01-02T08:00:00 |
+| 3.0 | 2024-01-03T16:00:00 |
+| 4.0 | 2024-01-04T00:00:00 |
+| 5.0 | 2024-01-04T16:00:00 |
++-----+---------------------+
+
+SELECT * from test where ts <= '2024-01-03 16:00:00';
+
++-----+---------------------+
+| d | ts |
++-----+---------------------+
+| 1.0 | 2024-01-01T00:00:00 |
+| 2.0 | 2024-01-02T08:00:00 |
++-----+---------------------+
+
+select date_format(ts, '%Y-%m-%d %H:%M:%S:%3f') from test;
+
++----------------------------------------------------+
+| date_format(test.ts,Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
++----------------------------------------------------+
+| 2024-01-01 08:00:00:000 |
+| 2024-01-02 16:00:00:000 |
+| 2024-01-04 00:00:00:000 |
+| 2024-01-04 08:00:00:000 |
+| 2024-01-05 00:00:00:000 |
++----------------------------------------------------+
+
+select to_unixtime('2024-01-02 00:00:00');
+
++------------------------------------------+
+| to_unixtime(Utf8("2024-01-02 00:00:00")) |
++------------------------------------------+
+| 1704124800 |
++------------------------------------------+
+
+select to_unixtime('2024-01-02 00:00:00+08:00');
+
++------------------------------------------------+
+| to_unixtime(Utf8("2024-01-02 00:00:00+08:00")) |
++------------------------------------------------+
+| 1704124800 |
++------------------------------------------------+
+
+--- UTC-8 ---
+SET TIME_ZONE = '-8:00';
+
+Affected Rows: 0
+
+SHOW VARIABLES time_zone;
+
++-----------+
+| TIME_ZONE |
++-----------+
+| -08:00 |
++-----------+
+
+SHOW VARIABLES system_time_zone;
+
++------------------+
+| SYSTEM_TIME_ZONE |
++------------------+
+| UTC |
++------------------+
+
+SELECT * from test;
+
++-----+---------------------+
+| d | ts |
++-----+---------------------+
+| 1.0 | 2024-01-01T00:00:00 |
+| 2.0 | 2024-01-02T08:00:00 |
+| 3.0 | 2024-01-03T16:00:00 |
+| 4.0 | 2024-01-04T00:00:00 |
+| 5.0 | 2024-01-04T16:00:00 |
++-----+---------------------+
+
+SELECT * from test where ts >= '2024-01-02 08:00:00';
+
++-----+---------------------+
+| d | ts |
++-----+---------------------+
+| 3.0 | 2024-01-03T16:00:00 |
+| 4.0 | 2024-01-04T00:00:00 |
+| 5.0 | 2024-01-04T16:00:00 |
++-----+---------------------+
+
+SELECT * from test where ts <= '2024-01-03 16:00:00';
+
++-----+---------------------+
+| d | ts |
++-----+---------------------+
+| 1.0 | 2024-01-01T00:00:00 |
+| 2.0 | 2024-01-02T08:00:00 |
+| 3.0 | 2024-01-03T16:00:00 |
+| 4.0 | 2024-01-04T00:00:00 |
++-----+---------------------+
+
+select date_format(ts, '%Y-%m-%d %H:%M:%S:%3f') from test;
+
++----------------------------------------------------+
+| date_format(test.ts,Utf8("%Y-%m-%d %H:%M:%S:%3f")) |
++----------------------------------------------------+
+| 2023-12-31 16:00:00:000 |
+| 2024-01-02 00:00:00:000 |
+| 2024-01-03 08:00:00:000 |
+| 2024-01-03 16:00:00:000 |
+| 2024-01-04 08:00:00:000 |
++----------------------------------------------------+
+
+select to_unixtime('2024-01-02 00:00:00');
+
++------------------------------------------+
+| to_unixtime(Utf8("2024-01-02 00:00:00")) |
++------------------------------------------+
+| 1704182400 |
++------------------------------------------+
+
+select to_unixtime('2024-01-02 00:00:00+08:00');
+
++------------------------------------------------+
+| to_unixtime(Utf8("2024-01-02 00:00:00+08:00")) |
++------------------------------------------------+
+| 1704124800 |
++------------------------------------------------+
+
+drop table test;
+
+Affected Rows: 0
+
+-- revert timezone to UTC
+SET TIME_ZONE = 'UTC';
+
+Affected Rows: 0
+
+SHOW VARIABLES time_zone;
+
++-----------+
+| TIME_ZONE |
++-----------+
+| UTC |
++-----------+
+
diff --git a/tests/cases/standalone/common/system/timezone.sql b/tests/cases/standalone/common/system/timezone.sql
new file mode 100644
index 000000000000..4b3d5f4f2d14
--- /dev/null
+++ b/tests/cases/standalone/common/system/timezone.sql
@@ -0,0 +1,70 @@
+--- tests for timezone ---
+SHOW VARIABLES time_zone;
+
+SHOW VARIABLES system_time_zone;
+
+CREATE TABLE test(d double, ts timestamp_ms time index);
+
+INSERT INTO test values
+ (1, '2024-01-01 00:00:00'),
+ (2, '2024-01-02 08:00:00'),
+ (3, '2024-01-03 16:00:00'),
+ (4, '2024-01-04 00:00:00'),
+ (5, '2024-01-05 00:00:00+08:00');
+
+SELECT * from test;
+
+SELECT * from test where ts >= '2024-01-02 08:00:00';
+
+SELECT * from test where ts <= '2024-01-03 16:00:00';
+
+select date_format(ts, '%Y-%m-%d %H:%M:%S:%3f') from test;
+
+select to_unixtime('2024-01-02 00:00:00');
+
+select to_unixtime('2024-01-02T00:00:00+08:00');
+
+--- UTC+8 ---
+SET TIME_ZONE = '+8:00';
+
+SHOW VARIABLES time_zone;
+
+SHOW VARIABLES system_time_zone;
+
+SELECT * from test;
+
+SELECT * from test where ts >= '2024-01-02 08:00:00';
+
+SELECT * from test where ts <= '2024-01-03 16:00:00';
+
+select date_format(ts, '%Y-%m-%d %H:%M:%S:%3f') from test;
+
+select to_unixtime('2024-01-02 00:00:00');
+
+select to_unixtime('2024-01-02 00:00:00+08:00');
+
+--- UTC-8 ---
+SET TIME_ZONE = '-8:00';
+
+SHOW VARIABLES time_zone;
+
+SHOW VARIABLES system_time_zone;
+
+SELECT * from test;
+
+SELECT * from test where ts >= '2024-01-02 08:00:00';
+
+SELECT * from test where ts <= '2024-01-03 16:00:00';
+
+select date_format(ts, '%Y-%m-%d %H:%M:%S:%3f') from test;
+
+select to_unixtime('2024-01-02 00:00:00');
+
+select to_unixtime('2024-01-02 00:00:00+08:00');
+
+drop table test;
+
+-- revert timezone to UTC
+SET TIME_ZONE = 'UTC';
+
+SHOW VARIABLES time_zone;
diff --git a/tests/cases/standalone/common/types/timestamp/timestamp.result b/tests/cases/standalone/common/types/timestamp/timestamp.result
index 6eaeb2e7a4f1..2c21d1390ca1 100644
--- a/tests/cases/standalone/common/types/timestamp/timestamp.result
+++ b/tests/cases/standalone/common/types/timestamp/timestamp.result
@@ -101,8 +101,21 @@ SELECT t%t FROM timestamp;
Error: 3000(PlanQuery), Failed to plan SQL: Error during planning: Cannot get result type for temporal operation Timestamp(Millisecond, None) % Timestamp(Millisecond, None): Invalid argument error: Invalid timestamp arithmetic operation: Timestamp(Millisecond, None) % Timestamp(Millisecond, None)
--- TODO(dennis): It can't run on distributed mode, uncomment it when the issue is fixed: https://github.com/GreptimeTeam/greptimedb/issues/2071 --
--- SELECT t-t FROM timestamp; --
+SELECT t-t FROM timestamp;
+
++---------------------------+
+| timestamp.t - timestamp.t |
++---------------------------+
+| PT0S |
+| |
+| PT0S |
+| PT0S |
+| PT0S |
+| PT0S |
+| PT0S |
+| PT0S |
++---------------------------+
+
SELECT EXTRACT(YEAR from TIMESTAMP '1992-01-01 01:01:01');
+-----------------------------------------------------+
diff --git a/tests/cases/standalone/common/types/timestamp/timestamp.sql b/tests/cases/standalone/common/types/timestamp/timestamp.sql
index aa97e778bfa8..e2924bc2e0f6 100644
--- a/tests/cases/standalone/common/types/timestamp/timestamp.sql
+++ b/tests/cases/standalone/common/types/timestamp/timestamp.sql
@@ -32,8 +32,7 @@ SELECT t/t FROM timestamp;
SELECT t%t FROM timestamp;
--- TODO(dennis): It can't run on distributed mode, uncomment it when the issue is fixed: https://github.com/GreptimeTeam/greptimedb/issues/2071 --
--- SELECT t-t FROM timestamp; --
+SELECT t-t FROM timestamp;
SELECT EXTRACT(YEAR from TIMESTAMP '1992-01-01 01:01:01');
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 2378f088513e..8d1f189b7266 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -411,13 +411,32 @@ impl Database for GreptimeDB {
}
let mut client = self.client.lock().await;
+
if query.trim().to_lowercase().starts_with("use ") {
+ // use [db]
let database = query
.split_ascii_whitespace()
.nth(1)
.expect("Illegal `USE` statement: expecting a database.")
.trim_end_matches(';');
client.set_schema(database);
+ Box::new(ResultDisplayer {
+ result: Ok(Output::AffectedRows(0)),
+ }) as _
+ } else if query.trim().to_lowercase().starts_with("set time_zone") {
+ // set time_zone='xxx'
+ let timezone = query
+ .split('=')
+ .nth(1)
+ .expect("Illegal `SET TIMEZONE` statement: expecting a timezone expr.")
+ .trim()
+ .strip_prefix('\'')
+ .unwrap()
+ .strip_suffix("';")
+ .unwrap();
+
+ client.set_timezone(timezone);
+
Box::new(ResultDisplayer {
result: Ok(Output::AffectedRows(0)),
}) as _
|
feat
|
make query be aware of timezone setting (#3175)
|
8a400669aadf226f8c6fb2d944d402df6f8adb34
|
2022-09-19 13:09:53
|
Ning Sun
|
feat: postgre wire protocol for frontend (#269)
| false
|
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index b77294ce0220..9256ba7d69f8 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -38,6 +38,9 @@ struct StartCommand {
grpc_addr: Option<String>,
#[clap(long)]
mysql_addr: Option<String>,
+ #[cfg(feature = "postgres")]
+ #[clap(long)]
+ postgres_addr: Option<String>,
#[clap(short, long)]
config_file: Option<String>,
}
@@ -69,6 +72,10 @@ impl TryFrom<StartCommand> for FrontendOptions {
if let Some(addr) = cmd.mysql_addr {
opts.mysql_addr = Some(addr);
}
+ #[cfg(feature = "postgres")]
+ if let Some(addr) = cmd.postgres_addr {
+ opts.postgres_addr = Some(addr);
+ }
Ok(opts)
}
}
@@ -83,15 +90,24 @@ mod tests {
http_addr: Some("127.0.0.1:1234".to_string()),
grpc_addr: None,
mysql_addr: Some("127.0.0.1:5678".to_string()),
+ #[cfg(feature = "postgres")]
+ postgres_addr: Some("127.0.0.1:5432".to_string()),
config_file: None,
};
let opts: FrontendOptions = command.try_into().unwrap();
assert_eq!(opts.http_addr, Some("127.0.0.1:1234".to_string()));
assert_eq!(opts.mysql_addr, Some("127.0.0.1:5678".to_string()));
+ #[cfg(feature = "postgres")]
+ assert_eq!(opts.postgres_addr, Some("127.0.0.1:5432".to_string()));
let default_opts = FrontendOptions::default();
assert_eq!(opts.grpc_addr, default_opts.grpc_addr);
assert_eq!(opts.mysql_runtime_size, default_opts.mysql_runtime_size);
+ #[cfg(feature = "postgres")]
+ assert_eq!(
+ opts.postgres_runtime_size,
+ default_opts.postgres_runtime_size
+ );
}
}
diff --git a/src/datatypes/src/arrow_array.rs b/src/datatypes/src/arrow_array.rs
index f784c9574bd0..8107754ddc52 100644
--- a/src/datatypes/src/arrow_array.rs
+++ b/src/datatypes/src/arrow_array.rs
@@ -80,7 +80,9 @@ pub fn arrow_array_get(array: &dyn Array, idx: usize) -> Result<Value> {
mod test {
use arrow::array::Int64Array as ArrowI64Array;
use arrow::array::*;
- use common_time::timestamp::TimeUnit;
+ use arrow::buffer::Buffer;
+ use arrow::datatypes::{DataType, TimeUnit as ArrowTimeUnit};
+ use common_time::timestamp::{TimeUnit, Timestamp};
use super::*;
use crate::prelude::Vector;
@@ -142,5 +144,25 @@ mod test {
value,
Value::Timestamp(Timestamp::new(2, TimeUnit::Millisecond))
);
+
+ let array4 = PrimitiveArray::<i64>::from_data(
+ DataType::Timestamp(ArrowTimeUnit::Millisecond, None),
+ Buffer::from_slice(&vec![1, 2, 3, 4]),
+ None,
+ );
+ assert_eq!(
+ Value::Timestamp(Timestamp::new(1, TimeUnit::Millisecond)),
+ arrow_array_get(&array4, 0).unwrap()
+ );
+
+ let array4 = PrimitiveArray::<i64>::from_data(
+ DataType::Timestamp(ArrowTimeUnit::Nanosecond, None),
+ Buffer::from_slice(&vec![1, 2, 3, 4]),
+ None,
+ );
+ assert_eq!(
+ Value::Timestamp(Timestamp::new(1, TimeUnit::Nanosecond)),
+ arrow_array_get(&array4, 0).unwrap()
+ );
}
}
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 3fea2cf4f2c5..b8a3341bb461 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -37,3 +37,7 @@ futures = "0.3"
tempdir = "0.3"
tonic = "0.8"
tower = "0.4"
+
+[features]
+default = ["postgres"]
+postgres = ["servers/postgres"]
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index 4464062566e5..6f55cc1e9da3 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -13,6 +13,10 @@ pub struct FrontendOptions {
pub grpc_addr: Option<String>,
pub mysql_addr: Option<String>,
pub mysql_runtime_size: u32,
+ #[cfg(feature = "postgres")]
+ pub postgres_addr: Option<String>,
+ #[cfg(feature = "postgres")]
+ pub postgres_runtime_size: u32,
}
impl Default for FrontendOptions {
@@ -22,6 +26,10 @@ impl Default for FrontendOptions {
grpc_addr: Some("0.0.0.0:4001".to_string()),
mysql_addr: Some("0.0.0.0:4002".to_string()),
mysql_runtime_size: 2,
+ #[cfg(feature = "postgres")]
+ postgres_addr: Some("0.0.0.0:4003".to_string()),
+ #[cfg(feature = "postgres")]
+ postgres_runtime_size: 2,
}
}
}
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 19d10a792a72..12f6d07766ae 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -5,6 +5,8 @@ use common_runtime::Builder as RuntimeBuilder;
use servers::grpc::GrpcServer;
use servers::http::HttpServer;
use servers::mysql::server::MysqlServer;
+#[cfg(feature = "postgres")]
+use servers::postgres::PostgresServer;
use servers::server::Server;
use snafu::ResultExt;
use tokio::try_join;
@@ -55,10 +57,32 @@ impl Services {
None
};
+ #[cfg(feature = "postgres")]
+ let postgres_server_and_addr = if let Some(pg_addr) = &opts.postgres_addr {
+ let pg_addr = parse_addr(pg_addr)?;
+
+ let pg_io_runtime = Arc::new(
+ RuntimeBuilder::default()
+ .worker_threads(opts.postgres_runtime_size as usize)
+ .thread_name("pg-io-handlers")
+ .build()
+ .context(error::RuntimeResourceSnafu)?,
+ );
+
+ let pg_server =
+ Box::new(PostgresServer::new(instance.clone(), pg_io_runtime)) as Box<dyn Server>;
+
+ Some((pg_server, pg_addr))
+ } else {
+ None
+ };
+
try_join!(
start_server(http_server_and_addr),
start_server(grpc_server_and_addr),
- start_server(mysql_server_and_addr)
+ start_server(mysql_server_and_addr),
+ #[cfg(feature = "postgres")]
+ start_server(postgres_server_and_addr),
)
.context(error::StartServerSnafu)?;
Ok(())
|
feat
|
postgre wire protocol for frontend (#269)
|
d39bafcfbdaa0cb70ae7e0e500a26a8351377249
|
2024-08-26 18:34:06
|
liyang
|
fix: change toolchain file name (#4621)
| false
|
diff --git a/.github/workflows/release-dev-builder-images.yaml b/.github/workflows/release-dev-builder-images.yaml
index 36a296c55c50..78e66e80925e 100644
--- a/.github/workflows/release-dev-builder-images.yaml
+++ b/.github/workflows/release-dev-builder-images.yaml
@@ -39,7 +39,7 @@ jobs:
commitShortSHA=`echo ${{ github.sha }} | cut -c1-8`
buildTime=`date +%Y%m%d%H%M%S`
BUILD_VERSION="$commitShortSHA-$buildTime"
- RUST_TOOLCHAIN_VERSION=$(cat rust_toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
+ RUST_TOOLCHAIN_VERSION=$(cat rust-toolchain.toml | grep -Eo '[0-9]{4}-[0-9]{2}-[0-9]{2}')
VERSION="${RUST_TOOLCHAIN_VERSION}-${BUILD_VERSION}"
echo "VERSION=${VERSION}" >> $GITHUB_ENV
echo "version=VERSION" >> $GITHUB_OUTPUT
|
fix
|
change toolchain file name (#4621)
|
85eebcb16f1807224f514590cb38125f14016651
|
2023-11-24 00:24:59
|
Ning Sun
|
fix: correct mysql timestamp encoding for binary protocol (#2797)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index def2104e34be..da0b43f05d94 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -840,17 +840,6 @@ dependencies = [
"tokio",
]
-[[package]]
-name = "bigdecimal"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa"
-dependencies = [
- "num-bigint",
- "num-integer",
- "num-traits",
-]
-
[[package]]
name = "bigdecimal"
version = "0.4.2"
@@ -1019,6 +1008,15 @@ dependencies = [
"regex-automata 0.1.10",
]
+[[package]]
+name = "btoi"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9dd6407f73a9b8b6162d8a2ef999fe6afd7cc15902ebf42c5cd296addf17e0ad"
+dependencies = [
+ "num-traits",
+]
+
[[package]]
name = "build-data"
version = "0.1.5"
@@ -1643,7 +1641,7 @@ name = "common-decimal"
version = "0.4.3"
dependencies = [
"arrow",
- "bigdecimal 0.4.2",
+ "bigdecimal",
"common-error",
"common-macro",
"rust_decimal",
@@ -4120,6 +4118,15 @@ dependencies = [
"cpufeatures",
]
+[[package]]
+name = "keyed_priority_queue"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ee7893dab2e44ae5f9d0173f26ff4aa327c10b01b06a72b52dd9405b628640d"
+dependencies = [
+ "indexmap 2.1.0",
+]
+
[[package]]
name = "lalrpop"
version = "0.19.12"
@@ -4166,15 +4173,6 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
-[[package]]
-name = "lexical"
-version = "6.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7aefb36fd43fef7003334742cbf77b243fcd36418a1d1bdd480d613a67968f6"
-dependencies = [
- "lexical-core",
-]
-
[[package]]
name = "lexical-core"
version = "0.8.5"
@@ -4424,11 +4422,11 @@ dependencies = [
[[package]]
name = "lru"
-version = "0.10.1"
+version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670"
+checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7"
dependencies = [
- "hashbrown 0.13.2",
+ "hashbrown 0.14.2",
]
[[package]]
@@ -4908,8 +4906,9 @@ dependencies = [
[[package]]
name = "mysql_async"
-version = "0.32.1"
-source = "git+https://github.com/blackbeam/mysql_async.git?rev=32c6f2a986789f97108502c2d0c755a089411b66#32c6f2a986789f97108502c2d0c755a089411b66"
+version = "0.33.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6750b17ce50f8f112ef1a8394121090d47c596b56a6a17569ca680a9626e2ef2"
dependencies = [
"bytes",
"crossbeam",
@@ -4917,15 +4916,16 @@ dependencies = [
"futures-core",
"futures-sink",
"futures-util",
+ "keyed_priority_queue",
"lazy_static",
"lru",
"mio",
"mysql_common",
"once_cell",
- "pem 2.0.1",
+ "pem 3.0.2",
"percent-encoding",
"pin-project",
- "priority-queue",
+ "rand",
"rustls 0.21.9",
"rustls-pemfile",
"serde",
@@ -4938,20 +4938,21 @@ dependencies = [
"twox-hash",
"url",
"webpki",
- "webpki-roots 0.23.1",
+ "webpki-roots 0.25.3",
]
[[package]]
name = "mysql_common"
-version = "0.30.6"
+version = "0.31.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c"
+checksum = "06f19e4cfa0ab5a76b627cec2d81331c49b034988eaf302c3bafeada684eadef"
dependencies = [
"base64 0.21.5",
- "bigdecimal 0.3.1",
+ "bigdecimal",
"bindgen",
"bitflags 2.4.1",
"bitvec",
+ "btoi",
"byteorder",
"bytes",
"cc",
@@ -4961,7 +4962,6 @@ dependencies = [
"flate2",
"frunk",
"lazy_static",
- "lexical",
"mysql-common-derive",
"num-bigint",
"num-traits",
@@ -4978,6 +4978,7 @@ dependencies = [
"thiserror",
"time",
"uuid",
+ "zstd 0.12.4",
]
[[package]]
@@ -5349,9 +5350,9 @@ dependencies = [
[[package]]
name = "opensrv-mysql"
-version = "0.4.1"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c66063eb6aca9e6b5354f91db29f7244a8e7f9c01219b3ce76a5340a78d9f6f"
+checksum = "208bfa36c4b4a8d6ac90eda62e34efa66f7e692df91bd3626bc47329844a86b1"
dependencies = [
"async-trait",
"byteorder",
@@ -5821,6 +5822,16 @@ dependencies = [
"serde",
]
+[[package]]
+name = "pem"
+version = "3.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923"
+dependencies = [
+ "base64 0.21.5",
+ "serde",
+]
+
[[package]]
name = "pem-rfc7468"
version = "0.3.1"
@@ -6218,16 +6229,6 @@ dependencies = [
"syn 2.0.39",
]
-[[package]]
-name = "priority-queue"
-version = "1.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fff39edfcaec0d64e8d0da38564fad195d2d51b680940295fcc307366e101e61"
-dependencies = [
- "autocfg",
- "indexmap 1.9.3",
-]
-
[[package]]
name = "proc-macro-crate"
version = "1.3.1"
@@ -7408,7 +7409,7 @@ checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9"
dependencies = [
"log",
"ring 0.17.5",
- "rustls-webpki 0.101.7",
+ "rustls-webpki",
"sct",
]
@@ -7433,16 +7434,6 @@ dependencies = [
"base64 0.21.5",
]
-[[package]]
-name = "rustls-webpki"
-version = "0.100.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3"
-dependencies = [
- "ring 0.16.20",
- "untrusted 0.7.1",
-]
-
[[package]]
name = "rustls-webpki"
version = "0.101.7"
@@ -9211,6 +9202,7 @@ dependencies = [
"itertools 0.10.5",
"meta-client",
"meta-srv",
+ "mysql_async",
"num_cpus",
"object-store",
"once_cell",
@@ -9236,6 +9228,7 @@ dependencies = [
"substrait 0.4.3",
"table",
"tempfile",
+ "time",
"tokio",
"tokio-postgres",
"tonic 0.10.2",
@@ -10477,12 +10470,9 @@ dependencies = [
[[package]]
name = "webpki-roots"
-version = "0.23.1"
+version = "0.25.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338"
-dependencies = [
- "rustls-webpki 0.100.3",
-]
+checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10"
[[package]]
name = "which"
diff --git a/src/common/time/src/datetime.rs b/src/common/time/src/datetime.rs
index 451dc57cf7ed..7f1517523072 100644
--- a/src/common/time/src/datetime.rs
+++ b/src/common/time/src/datetime.rs
@@ -15,10 +15,11 @@
use std::fmt::{Display, Formatter};
use std::str::FromStr;
-use chrono::{LocalResult, NaiveDateTime};
+use chrono::{LocalResult, NaiveDateTime, TimeZone as ChronoTimeZone, Utc};
use serde::{Deserialize, Serialize};
use crate::error::{Error, InvalidDateStrSnafu, Result};
+use crate::timezone::TimeZone;
use crate::util::{format_utc_datetime, local_datetime_to_utc};
use crate::Date;
@@ -108,6 +109,15 @@ impl DateTime {
NaiveDateTime::from_timestamp_millis(self.0)
}
+ pub fn to_chrono_datetime_with_timezone(&self, tz: Option<TimeZone>) -> Option<NaiveDateTime> {
+ let datetime = self.to_chrono_datetime();
+ datetime.map(|v| match tz {
+ Some(TimeZone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
+ Some(TimeZone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(),
+ None => Utc.from_utc_datetime(&v).naive_local(),
+ })
+ }
+
/// Convert to [common_time::date].
pub fn to_date(&self) -> Option<Date> {
self.to_chrono_datetime().map(|d| Date::from(d.date()))
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index 1036b86a22f2..898e1c7b39c4 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -21,7 +21,7 @@ use std::time::Duration;
use arrow::datatypes::TimeUnit as ArrowTimeUnit;
use chrono::{
- DateTime, LocalResult, NaiveDate, NaiveDateTime, NaiveTime, TimeZone as ChronoTimeZone,
+ DateTime, LocalResult, NaiveDate, NaiveDateTime, NaiveTime, TimeZone as ChronoTimeZone, Utc,
};
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
@@ -252,6 +252,15 @@ impl Timestamp {
NaiveDateTime::from_timestamp_opt(sec, nsec)
}
+ pub fn to_chrono_datetime_with_timezone(&self, tz: Option<TimeZone>) -> Option<NaiveDateTime> {
+ let datetime = self.to_chrono_datetime();
+ datetime.map(|v| match tz {
+ Some(TimeZone::Offset(offset)) => offset.from_utc_datetime(&v).naive_local(),
+ Some(TimeZone::Named(tz)) => tz.from_utc_datetime(&v).naive_local(),
+ None => Utc.from_utc_datetime(&v).naive_local(),
+ })
+ }
+
/// Convert timestamp to chrono date.
pub fn to_chrono_date(&self) -> Option<NaiveDate> {
self.to_chrono_datetime().map(|ndt| ndt.date())
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index caeb4863e362..0ea4641cd5ee 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -54,7 +54,7 @@ lazy_static.workspace = true
mime_guess = "2.0"
once_cell.workspace = true
openmetrics-parser = "0.4"
-opensrv-mysql = "0.4"
+opensrv-mysql = "0.5"
opentelemetry-proto.workspace = true
parking_lot = "0.12"
pgwire = "0.16"
@@ -103,7 +103,7 @@ catalog = { workspace = true, features = ["testing"] }
client.workspace = true
common-base.workspace = true
common-test-util.workspace = true
-mysql_async = { git = "https://github.com/blackbeam/mysql_async.git", rev = "32c6f2a986789f97108502c2d0c755a089411b66", default-features = false, features = [
+mysql_async = { version = "0.33", default-features = false, features = [
"default-rustls",
] }
rand.workspace = true
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index f76f0f5826d2..6d92fb3804e6 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -192,9 +192,11 @@ impl<'a, W: AsyncWrite + Unpin> MysqlResultWriter<'a, W> {
Value::String(v) => row_writer.write_col(v.as_utf8())?,
Value::Binary(v) => row_writer.write_col(v.deref())?,
Value::Date(v) => row_writer.write_col(v.to_chrono_date())?,
- Value::DateTime(v) => row_writer.write_col(v.to_chrono_datetime())?,
+ // convert datetime and timestamp to timezone of current connection
+ Value::DateTime(v) => row_writer
+ .write_col(v.to_chrono_datetime_with_timezone(query_context.time_zone()))?,
Value::Timestamp(v) => row_writer
- .write_col(v.to_timezone_aware_string(query_context.time_zone()))?,
+ .write_col(v.to_chrono_datetime_with_timezone(query_context.time_zone()))?,
Value::Interval(v) => row_writer.write_col(v.to_iso8601_string())?,
Value::Duration(v) => row_writer.write_col(v.to_std_duration())?,
Value::List(_) => {
diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml
index 4c09640de424..bb613302f79d 100644
--- a/tests-integration/Cargo.toml
+++ b/tests-integration/Cargo.toml
@@ -36,6 +36,9 @@ frontend = { workspace = true, features = ["testing"] }
futures.workspace = true
meta-client.workspace = true
meta-srv = { workspace = true, features = ["mock"] }
+mysql_async = { version = "0.33", default-features = false, features = [
+ "default-rustls",
+] }
object-store.workspace = true
once_cell.workspace = true
operator.workspace = true
@@ -59,6 +62,7 @@ sqlx = { version = "0.6", features = [
substrait.workspace = true
table.workspace = true
tempfile.workspace = true
+time = "0.3"
tokio.workspace = true
tonic.workspace = true
tower = "0.4"
diff --git a/tests-integration/tests/sql.rs b/tests-integration/tests/sql.rs
index 1f1a17164d33..d3c7914b4291 100644
--- a/tests-integration/tests/sql.rs
+++ b/tests-integration/tests/sql.rs
@@ -56,6 +56,7 @@ macro_rules! sql_tests {
test_mysql_auth,
test_mysql_crud,
test_mysql_timezone,
+ test_mysql_async_timestamp,
test_postgres_auth,
test_postgres_crud,
test_postgres_parameter_inference,
@@ -423,3 +424,158 @@ pub async fn test_postgres_parameter_inference(store_type: StorageType) {
let _ = fe_pg_server.shutdown().await;
guard.remove_all().await;
}
+
+pub async fn test_mysql_async_timestamp(store_type: StorageType) {
+ use mysql_async::prelude::*;
+ use time::PrimitiveDateTime;
+
+ #[derive(Debug)]
+ struct CpuMetric {
+ hostname: String,
+ environment: String,
+ usage_user: f64,
+ usage_system: f64,
+ usage_idle: f64,
+ ts: i64,
+ }
+
+ impl CpuMetric {
+ fn new(
+ hostname: String,
+ environment: String,
+ usage_user: f64,
+ usage_system: f64,
+ usage_idle: f64,
+ ts: i64,
+ ) -> Self {
+ Self {
+ hostname,
+ environment,
+ usage_user,
+ usage_system,
+ usage_idle,
+ ts,
+ }
+ }
+ }
+ common_telemetry::init_default_ut_logging();
+
+ let (addr, mut guard, fe_mysql_server) = setup_mysql_server(store_type, "sql_timestamp").await;
+ let url = format!("mysql://{addr}/public");
+ let opts = mysql_async::Opts::from_url(&url).unwrap();
+ let mut conn = mysql_async::Conn::new(opts)
+ .await
+ .expect("create connection failure");
+
+ r"CREATE TABLE IF NOT EXISTS cpu_metrics (
+ hostname STRING,
+ environment STRING,
+ usage_user DOUBLE,
+ usage_system DOUBLE,
+ usage_idle DOUBLE,
+ ts TIMESTAMP,
+ TIME INDEX(ts),
+ PRIMARY KEY(hostname, environment)
+);"
+ .ignore(&mut conn)
+ .await
+ .expect("create table failure");
+
+ let metrics = vec![
+ CpuMetric::new(
+ "host0".into(),
+ "test".into(),
+ 32f64,
+ 3f64,
+ 4f64,
+ 1680307200050,
+ ),
+ CpuMetric::new(
+ "host1".into(),
+ "test".into(),
+ 29f64,
+ 32f64,
+ 50f64,
+ 1680307200050,
+ ),
+ CpuMetric::new(
+ "host0".into(),
+ "test".into(),
+ 32f64,
+ 3f64,
+ 4f64,
+ 1680307260050,
+ ),
+ CpuMetric::new(
+ "host1".into(),
+ "test".into(),
+ 29f64,
+ 32f64,
+ 50f64,
+ 1680307260050,
+ ),
+ CpuMetric::new(
+ "host0".into(),
+ "test".into(),
+ 32f64,
+ 3f64,
+ 4f64,
+ 1680307320050,
+ ),
+ CpuMetric::new(
+ "host1".into(),
+ "test".into(),
+ 29f64,
+ 32f64,
+ 50f64,
+ 1680307320050,
+ ),
+ ];
+
+ r"INSERT INTO cpu_metrics (hostname, environment, usage_user, usage_system, usage_idle, ts)
+ VALUES (:hostname, :environment, :usage_user, :usage_system, :usage_idle, :ts)"
+ .with(metrics.iter().map(|metric| {
+ params! {
+ "hostname" => &metric.hostname,
+ "environment" => &metric.environment,
+ "usage_user" => metric.usage_user,
+ "usage_system" => metric.usage_system,
+ "usage_idle" => metric.usage_idle,
+ "ts" => metric.ts,
+ }
+ }))
+ .batch(&mut conn)
+ .await
+ .expect("insert data failure");
+
+ // query data
+ let loaded_metrics = "SELECT * FROM cpu_metrics"
+ .with(())
+ .map(
+ &mut conn,
+ |(hostname, environment, usage_user, usage_system, usage_idle, raw_ts): (
+ String,
+ String,
+ f64,
+ f64,
+ f64,
+ PrimitiveDateTime,
+ )| {
+ let ts = raw_ts.assume_utc().unix_timestamp() * 1000;
+ CpuMetric::new(
+ hostname,
+ environment,
+ usage_user,
+ usage_system,
+ usage_idle,
+ ts,
+ )
+ },
+ )
+ .await
+ .expect("query data failure");
+ assert_eq!(loaded_metrics.len(), 6);
+
+ let _ = fe_mysql_server.shutdown().await;
+ guard.remove_all().await;
+}
|
fix
|
correct mysql timestamp encoding for binary protocol (#2797)
|
a6893aad421fcc242a67b859a7aa21624bbe8a17
|
2024-12-11 13:34:02
|
jeremyhi
|
chore: set store_key_prefix for all kvbackend (#5132)
| false
|
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 47afa0ab416b..85770e1f3d4d 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -206,43 +206,41 @@ pub async fn metasrv_builder(
plugins: Plugins,
kv_backend: Option<KvBackendRef>,
) -> Result<MetasrvBuilder> {
- let (kv_backend, election) = match (kv_backend, &opts.backend) {
+ let (mut kv_backend, election) = match (kv_backend, &opts.backend) {
(Some(kv_backend), _) => (kv_backend, None),
(None, BackendImpl::MemoryStore) => (Arc::new(MemoryKvBackend::new()) as _, None),
(None, BackendImpl::EtcdStore) => {
let etcd_client = create_etcd_client(opts).await?;
- let kv_backend = {
- let etcd_backend =
- EtcdStore::with_etcd_client(etcd_client.clone(), opts.max_txn_ops);
- if !opts.store_key_prefix.is_empty() {
- Arc::new(ChrootKvBackend::new(
- opts.store_key_prefix.clone().into_bytes(),
- etcd_backend,
- ))
- } else {
- etcd_backend
- }
- };
- (
- kv_backend,
- Some(
- EtcdElection::with_etcd_client(
- &opts.server_addr,
- etcd_client.clone(),
- opts.store_key_prefix.clone(),
- )
- .await?,
- ),
+ let kv_backend = EtcdStore::with_etcd_client(etcd_client.clone(), opts.max_txn_ops);
+ let election = EtcdElection::with_etcd_client(
+ &opts.server_addr,
+ etcd_client,
+ opts.store_key_prefix.clone(),
)
+ .await?;
+
+ (kv_backend, Some(election))
}
#[cfg(feature = "pg_kvbackend")]
(None, BackendImpl::PostgresStore) => {
let pg_client = create_postgres_client(opts).await?;
let kv_backend = PgStore::with_pg_client(pg_client).await.unwrap();
+ // TODO(jeremy, weny): implement election for postgres
(kv_backend, None)
}
};
+ if !opts.store_key_prefix.is_empty() {
+ info!(
+ "using chroot kv backend with prefix: {prefix}",
+ prefix = opts.store_key_prefix
+ );
+ kv_backend = Arc::new(ChrootKvBackend::new(
+ opts.store_key_prefix.clone().into_bytes(),
+ kv_backend,
+ ))
+ }
+
let in_memory = Arc::new(MemoryKvBackend::new()) as ResettableKvBackendRef;
let selector = match opts.selector {
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 716b85f83485..da614ac9b943 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -470,6 +470,10 @@ impl Metasrv {
});
}
} else {
+ warn!(
+ "Ensure only one instance of Metasrv is running, as there is no election service."
+ );
+
if let Err(e) = self.wal_options_allocator.start().await {
error!(e; "Failed to start wal options allocator");
}
|
chore
|
set store_key_prefix for all kvbackend (#5132)
|
be22da775a1464176f1ee6cf2ef857f3b289625a
|
2025-01-09 16:43:48
|
Weny Xu
|
build: disable local IP detection feature in Android binary (#5327)
| false
|
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index 4804b3ba7186..d875095f7544 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -11,6 +11,9 @@ pg_kvbackend = ["dep:tokio-postgres", "common-meta/pg_kvbackend"]
[lints]
workspace = true
+[target.'cfg(not(target_os = "android"))'.dependencies]
+local-ip-address.workspace = true
+
[dependencies]
api.workspace = true
async-trait = "0.1"
@@ -45,7 +48,6 @@ humantime.workspace = true
humantime-serde.workspace = true
itertools.workspace = true
lazy_static.workspace = true
-local-ip-address.workspace = true
once_cell.workspace = true
parking_lot.workspace = true
prometheus.workspace = true
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 029b049d68b5..092b6a33598b 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -204,6 +204,7 @@ impl Configurable for MetasrvOptions {
impl MetasrvOptions {
/// Detect server address if `auto_server_addr` is true.
+ #[cfg(not(target_os = "android"))]
pub fn detect_server_addr(&mut self) {
if self.server_addr.is_empty() {
match local_ip_address::local_ip() {
@@ -225,6 +226,13 @@ impl MetasrvOptions {
}
}
}
+
+ #[cfg(target_os = "android")]
+ pub fn detect_hostname(&mut self) {
+ if self.server_addr.is_empty() {
+ common_telemetry::debug!("detect local IP is not supported on Android");
+ }
+ }
}
pub struct MetasrvInfo {
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 2013b6e1e677..bc1992ddeecc 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -14,6 +14,9 @@ testing = []
[lints]
workspace = true
+[target.'cfg(not(target_os = "android"))'.dependencies]
+local-ip-address.workspace = true
+
[dependencies]
ahash = "0.8"
api.workspace = true
@@ -65,7 +68,6 @@ influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", bran
itertools.workspace = true
jsonb.workspace = true
lazy_static.workspace = true
-local-ip-address.workspace = true
log-query.workspace = true
loki-api = "0.1"
mime_guess = "2.0"
diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs
index 8225f7cd8cdd..795884d492c1 100644
--- a/src/servers/src/grpc.rs
+++ b/src/servers/src/grpc.rs
@@ -67,6 +67,7 @@ pub struct GrpcOptions {
impl GrpcOptions {
/// Detect hostname if `auto_hostname` is true.
+ #[cfg(not(target_os = "android"))]
pub fn detect_hostname(&mut self) {
if self.hostname.is_empty() {
match local_ip_address::local_ip() {
@@ -88,6 +89,13 @@ impl GrpcOptions {
}
}
}
+
+ #[cfg(target_os = "android")]
+ pub fn detect_hostname(&mut self) {
+ if self.hostname.is_empty() {
+ common_telemetry::debug!("detect local IP is not supported on Android");
+ }
+ }
}
const DEFAULT_GRPC_ADDR_PORT: &str = "4001";
|
build
|
disable local IP detection feature in Android binary (#5327)
|
336b941113e79e92df8a44228571b5ef74289fd0
|
2025-02-11 13:46:27
|
Zhenchi
|
feat: change puffin stager eviction policy (#5511)
| false
|
diff --git a/src/puffin/src/puffin_manager/stager/bounded_stager.rs b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
index 476eaf9bb62a..d74c9f821337 100644
--- a/src/puffin/src/puffin_manager/stager/bounded_stager.rs
+++ b/src/puffin/src/puffin_manager/stager/bounded_stager.rs
@@ -26,6 +26,7 @@ use common_runtime::runtime::RuntimeTrait;
use common_telemetry::{info, warn};
use futures::{FutureExt, StreamExt};
use moka::future::Cache;
+use moka::policy::EvictionPolicy;
use sha2::{Digest, Sha256};
use snafu::ResultExt;
use tokio::fs;
@@ -81,6 +82,7 @@ impl BoundedStager {
let cache = Cache::builder()
.max_capacity(capacity)
.weigher(|_: &String, v: &CacheValue| v.weight())
+ .eviction_policy(EvictionPolicy::lru())
.async_eviction_listener(move |k, v, _| {
let recycle_bin = recycle_bin_cloned.clone();
async move {
|
feat
|
change puffin stager eviction policy (#5511)
|
2dcc67769e7174ebabc6de9c4e2e1ae37d6e179a
|
2023-08-11 15:04:58
|
LFC
|
fix: runs sqlness test on windows-latest-8-cores (#2158)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index ad624837cd26..1f2ffa64aa17 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -76,7 +76,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ ubuntu-latest-8-cores, windows-latest ]
+ os: [ ubuntu-latest-8-cores, windows-latest-8-cores ]
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
|
fix
|
runs sqlness test on windows-latest-8-cores (#2158)
|
fe34ebf7703a2c4827d7fe99df40df1be952bb4a
|
2024-05-16 07:28:45
|
Ning Sun
|
test: give windows file watcher more time (#3953)
| false
|
diff --git a/src/servers/src/tls.rs b/src/servers/src/tls.rs
index 9a3082b46c7e..b2b35505968f 100644
--- a/src/servers/src/tls.rs
+++ b/src/servers/src/tls.rs
@@ -427,7 +427,13 @@ mod tests {
.expect("failed to copy key to tmpdir");
// waiting for async load
- std::thread::sleep(std::time::Duration::from_millis(300));
+ #[cfg(not(target_os = "windows"))]
+ let timeout_millis = 300;
+ #[cfg(target_os = "windows")]
+ let timeout_millis = 2000;
+
+ std::thread::sleep(std::time::Duration::from_millis(timeout_millis));
+
assert!(server_config.get_version() > 1);
assert!(server_config.get_server_config().is_some());
}
|
test
|
give windows file watcher more time (#3953)
|
872ac8058f89a22d82fc8df785494901531f101e
|
2022-11-16 12:29:48
|
LFC
|
feat: distributed execute gRPC and Prometheus query in Frontend (#520)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index b1a8f2b8c33b..5ea4f8e48568 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1179,10 +1179,13 @@ dependencies = [
"async-trait",
"common-base",
"common-error",
+ "common-query",
+ "common-recordbatch",
"common-runtime",
"criterion 0.4.0",
"dashmap",
"datafusion",
+ "datatypes",
"rand 0.8.5",
"snafu",
"tokio",
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index e0503f3d8d2c..df8e5a94fb9c 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -1,9 +1,9 @@
node_id = 42
mode = 'distributed'
-rpc_addr = '0.0.0.0:3001'
+rpc_addr = '127.0.0.1:3001'
wal_dir = '/tmp/greptimedb/wal'
rpc_runtime_size = 8
-mysql_addr = '0.0.0.0:3306'
+mysql_addr = '127.0.0.1:3306'
mysql_runtime_size = 4
[storage]
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index a9cb1e969f0d..87719e8b6dbf 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -1,6 +1,6 @@
mode = 'distributed'
datanode_rpc_addr = '127.0.0.1:3001'
-http_addr = '0.0.0.0:4000'
+http_addr = '127.0.0.1:4000'
[meta_client_opts]
metasrv_addr = '1.1.1.1:3002'
diff --git a/config/metasrv.example.toml b/config/metasrv.example.toml
index 00ca366dc0e4..5531e6e4a650 100644
--- a/config/metasrv.example.toml
+++ b/config/metasrv.example.toml
@@ -1,4 +1,4 @@
bind_addr = '127.0.0.1:3002'
-server_addr = '0.0.0.0:3002'
-store_addr = '127.0.0.1:2380'
-datanode_lease_secs = 30
+server_addr = '127.0.0.1:3002'
+store_addr = '127.0.0.1:2379'
+datanode_lease_secs = 15
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 1c3d3f3fad5b..fa7ab8725612 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -1,7 +1,7 @@
node_id = 0
mode = 'standalone'
-http_addr = '0.0.0.0:4000'
-datanode_mysql_addr = '0.0.0.0:3306'
+http_addr = '127.0.0.1:4000'
+datanode_mysql_addr = '127.0.0.1:3306'
datanode_mysql_runtime_size = 4
wal_dir = '/tmp/greptimedb/wal/'
@@ -10,18 +10,18 @@ type = 'File'
data_dir = '/tmp/greptimedb/data/'
[grpc_options]
-addr = '0.0.0.0:4001'
+addr = '127.0.0.1:4001'
runtime_size = 8
[mysql_options]
-addr = '0.0.0.0:4002'
+addr = '127.0.0.1:4002'
runtime_size = 2
[influxdb_options]
enable = true
[opentsdb_options]
-addr = '0.0.0.0:4242'
+addr = '127.0.0.1:4242'
enable = true
runtime_size = 2
@@ -29,6 +29,6 @@ runtime_size = 2
enable = true
[postgres_options]
-addr = '0.0.0.0:4003'
+addr = '127.0.0.1:4003'
runtime_size = 2
check_pwd = false
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 689edfe145f9..3415acd2eced 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -15,6 +15,7 @@
use std::sync::Arc;
use api::v1::codec::SelectResult as GrpcSelectResult;
+use api::v1::column::SemanticType;
use api::v1::{
object_expr, object_result, select_expr, DatabaseRequest, ExprHeader, InsertExpr,
MutateResult as GrpcMutateResult, ObjectExpr, ObjectResult as GrpcObjectResult, PhysicalPlan,
@@ -219,7 +220,12 @@ impl TryFrom<ObjectResult> for Output {
.map(|(column, vector)| {
let datatype = vector.data_type();
// nullable or not, does not affect the output
- ColumnSchema::new(&column.column_name, datatype, true)
+ let mut column_schema =
+ ColumnSchema::new(&column.column_name, datatype, true);
+ if column.semantic_type == SemanticType::Timestamp as i32 {
+ column_schema = column_schema.with_time_index(true);
+ }
+ column_schema
})
.collect::<Vec<ColumnSchema>>();
@@ -251,7 +257,7 @@ impl TryFrom<ObjectResult> for Output {
mod tests {
use api::helper::ColumnDataTypeWrapper;
use api::v1::Column;
- use datanode::server::grpc::select::{null_mask, values};
+ use common_grpc::select::{null_mask, values};
use datatypes::vectors::{
BinaryVector, BooleanVector, DateTimeVector, DateVector, Float32Vector, Float64Vector,
Int16Vector, Int32Vector, Int64Vector, Int8Vector, StringVector, UInt16Vector,
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index c08d41539add..692c120b64e1 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -145,9 +145,9 @@ mod tests {
)),
};
let options: DatanodeOptions = cmd.try_into().unwrap();
- assert_eq!("0.0.0.0:3001".to_string(), options.rpc_addr);
+ assert_eq!("127.0.0.1:3001".to_string(), options.rpc_addr);
assert_eq!("/tmp/greptimedb/wal".to_string(), options.wal_dir);
- assert_eq!("0.0.0.0:3306".to_string(), options.mysql_addr);
+ assert_eq!("127.0.0.1:3306".to_string(), options.mysql_addr);
assert_eq!(4, options.mysql_runtime_size);
assert_eq!(
"1.1.1.1:3002".to_string(),
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index a6cb988cc1c4..3d9018527d06 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -104,13 +104,13 @@ mod tests {
fn test_read_from_cmd() {
let cmd = StartCommand {
bind_addr: Some("127.0.0.1:3002".to_string()),
- server_addr: Some("0.0.0.0:3002".to_string()),
+ server_addr: Some("127.0.0.1:3002".to_string()),
store_addr: Some("127.0.0.1:2380".to_string()),
config_file: None,
};
let options: MetaSrvOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
- assert_eq!("0.0.0.0:3002".to_string(), options.server_addr);
+ assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
}
@@ -127,8 +127,8 @@ mod tests {
};
let options: MetaSrvOptions = cmd.try_into().unwrap();
assert_eq!("127.0.0.1:3002".to_string(), options.bind_addr);
- assert_eq!("0.0.0.0:3002".to_string(), options.server_addr);
- assert_eq!("127.0.0.1:2380".to_string(), options.store_addr);
- assert_eq!(30, options.datanode_lease_secs);
+ assert_eq!("127.0.0.1:3002".to_string(), options.server_addr);
+ assert_eq!("127.0.0.1:2379".to_string(), options.store_addr);
+ assert_eq!(15, options.datanode_lease_secs);
}
}
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 60c6fae6fe56..c0152273b96e 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -77,7 +77,7 @@ pub struct StandaloneOptions {
impl Default for StandaloneOptions {
fn default() -> Self {
Self {
- http_addr: Some("0.0.0.0:4000".to_string()),
+ http_addr: Some("127.0.0.1:4000".to_string()),
grpc_options: Some(GrpcOptions::default()),
mysql_options: Some(MysqlOptions::default()),
postgres_options: Some(PostgresOptions::default()),
@@ -87,7 +87,7 @@ impl Default for StandaloneOptions {
mode: Mode::Standalone,
wal_dir: "/tmp/greptimedb/wal".to_string(),
storage: ObjectStoreConfig::default(),
- datanode_mysql_addr: "0.0.0.0:3306".to_string(),
+ datanode_mysql_addr: "127.0.0.1:3306".to_string(),
datanode_mysql_runtime_size: 4,
}
}
@@ -274,12 +274,15 @@ mod tests {
let fe_opts = FrontendOptions::try_from(cmd).unwrap();
assert_eq!(Mode::Standalone, fe_opts.mode);
assert_eq!("127.0.0.1:3001".to_string(), fe_opts.datanode_rpc_addr);
- assert_eq!(Some("0.0.0.0:4000".to_string()), fe_opts.http_addr);
+ assert_eq!(Some("127.0.0.1:4000".to_string()), fe_opts.http_addr);
assert_eq!(
- "0.0.0.0:4001".to_string(),
+ "127.0.0.1:4001".to_string(),
fe_opts.grpc_options.unwrap().addr
);
- assert_eq!("0.0.0.0:4002", fe_opts.mysql_options.as_ref().unwrap().addr);
+ assert_eq!(
+ "127.0.0.1:4002",
+ fe_opts.mysql_options.as_ref().unwrap().addr
+ );
assert_eq!(2, fe_opts.mysql_options.as_ref().unwrap().runtime_size);
assert!(fe_opts.influxdb_options.as_ref().unwrap().enable);
}
diff --git a/src/common/grpc/Cargo.toml b/src/common/grpc/Cargo.toml
index 0e1c8283d4e3..eef5357a3f5c 100644
--- a/src/common/grpc/Cargo.toml
+++ b/src/common/grpc/Cargo.toml
@@ -9,7 +9,10 @@ api = { path = "../../api" }
async-trait = "0.1"
common-base = { path = "../base" }
common-error = { path = "../error" }
+common-query = { path = "../query" }
+common-recordbatch = { path = "../recordbatch" }
common-runtime = { path = "../runtime" }
+datatypes = { path = "../../datatypes" }
dashmap = "5.4"
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", branch = "arrow2", features = [
"simd",
diff --git a/src/common/grpc/src/error.rs b/src/common/grpc/src/error.rs
index 9ec77d7355b6..bfc326e5978a 100644
--- a/src/common/grpc/src/error.rs
+++ b/src/common/grpc/src/error.rs
@@ -69,6 +69,21 @@ pub enum Error {
source: tonic::transport::Error,
backtrace: Backtrace,
},
+
+ #[snafu(display("Failed to collect RecordBatches, source: {}", source))]
+ CollectRecordBatches {
+ #[snafu(backtrace)]
+ source: common_recordbatch::error::Error,
+ },
+
+ #[snafu(display("Failed to convert Arrow type: {}", from))]
+ Conversion { from: String, backtrace: Backtrace },
+
+ #[snafu(display("Column datatype error, source: {}", source))]
+ ColumnDataType {
+ #[snafu(backtrace)]
+ source: api::error::Error,
+ },
}
impl ErrorExt for Error {
@@ -83,7 +98,10 @@ impl ErrorExt for Error {
}
Error::NewProjection { .. }
| Error::DecodePhysicalPlanNode { .. }
- | Error::CreateChannel { .. } => StatusCode::Internal,
+ | Error::CreateChannel { .. }
+ | Error::Conversion { .. } => StatusCode::Internal,
+ Error::CollectRecordBatches { source } => source.status_code(),
+ Error::ColumnDataType { source } => source.status_code(),
}
}
diff --git a/src/common/grpc/src/lib.rs b/src/common/grpc/src/lib.rs
index f489a9ee93bb..8444de9086bd 100644
--- a/src/common/grpc/src/lib.rs
+++ b/src/common/grpc/src/lib.rs
@@ -15,6 +15,7 @@
pub mod channel_manager;
pub mod error;
pub mod physical;
+pub mod select;
pub mod writer;
pub use error::Error;
diff --git a/src/datanode/src/server/grpc/select.rs b/src/common/grpc/src/select.rs
similarity index 98%
rename from src/datanode/src/server/grpc/select.rs
rename to src/common/grpc/src/select.rs
index 769cb62f0892..58a3f10bfa96 100644
--- a/src/datanode/src/server/grpc/select.rs
+++ b/src/common/grpc/src/select.rs
@@ -21,6 +21,7 @@ use api::v1::column::{SemanticType, Values};
use api::v1::{Column, ObjectResult};
use arrow::array::{Array, BooleanArray, PrimitiveArray};
use common_base::BitVec;
+use common_error::prelude::ErrorExt;
use common_error::status_code::StatusCode;
use common_query::Output;
use common_recordbatch::{util, RecordBatches, SendableRecordBatchStream};
@@ -30,7 +31,7 @@ use snafu::{OptionExt, ResultExt};
use crate::error::{self, ConversionSnafu, Result};
-pub async fn to_object_result(output: Result<Output>) -> ObjectResult {
+pub async fn to_object_result(output: std::result::Result<Output, impl ErrorExt>) -> ObjectResult {
let result = match output {
Ok(Output::AffectedRows(rows)) => Ok(ObjectResultBuilder::new()
.status_code(StatusCode::Success as u32)
@@ -208,7 +209,7 @@ mod tests {
use datatypes::schema::Schema;
use datatypes::vectors::{UInt32Vector, VectorRef};
- use crate::server::grpc::select::{null_mask, try_convert, values};
+ use crate::select::{null_mask, try_convert, values};
#[test]
fn test_convert_record_batches_to_select_result() {
diff --git a/src/common/query/src/error.rs b/src/common/query/src/error.rs
index e90df0c300ae..90a127a64ced 100644
--- a/src/common/query/src/error.rs
+++ b/src/common/query/src/error.rs
@@ -114,6 +114,12 @@ pub enum InnerError {
#[snafu(backtrace)]
source: DataTypeError,
},
+
+ #[snafu(display("Failed to execute physical plan, source: {}", source))]
+ ExecutePhysicalPlan {
+ #[snafu(backtrace)]
+ source: BoxedError,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -141,6 +147,7 @@ impl ErrorExt for InnerError {
InnerError::UnsupportedInputDataType { .. } => StatusCode::InvalidArguments,
InnerError::ConvertDfRecordBatchStream { source, .. } => source.status_code(),
+ InnerError::ExecutePhysicalPlan { source } => source.status_code(),
}
}
@@ -165,6 +172,12 @@ impl From<Error> for DataFusionError {
}
}
+impl From<BoxedError> for Error {
+ fn from(source: BoxedError) -> Self {
+ InnerError::ExecutePhysicalPlan { source }.into()
+ }
+}
+
#[cfg(test)]
mod tests {
use arrow::error::ArrowError;
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 78da04cea733..3eb9a967dc11 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -55,9 +55,9 @@ impl Default for DatanodeOptions {
fn default() -> Self {
Self {
node_id: 0,
- rpc_addr: "0.0.0.0:3001".to_string(),
+ rpc_addr: "127.0.0.1:3001".to_string(),
rpc_runtime_size: 8,
- mysql_addr: "0.0.0.0:3306".to_string(),
+ mysql_addr: "127.0.0.1:3306".to_string(),
mysql_runtime_size: 2,
meta_client_opts: MetaClientOpts::default(),
wal_dir: "/tmp/greptimedb/wal".to_string(),
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index f5593db57cc1..d837b7799d41 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -145,9 +145,6 @@ pub enum Error {
backtrace: Backtrace,
},
- #[snafu(display("Failed to convert datafusion type: {}", from))]
- Conversion { from: String },
-
#[snafu(display("Unsupported expr type: {}", name))]
UnsupportedExpr { name: String },
@@ -229,12 +226,6 @@ pub enum Error {
source: script::error::Error,
},
- #[snafu(display("Failed to collect RecordBatches, source: {}", source))]
- CollectRecordBatches {
- #[snafu(backtrace)]
- source: common_recordbatch::error::Error,
- },
-
#[snafu(display(
"Failed to parse string to timestamp, string: {}, source: {}",
raw,
@@ -338,7 +329,6 @@ impl ErrorExt for Error {
| Error::CreateDir { .. }
| Error::InsertSystemCatalog { .. }
| Error::RegisterSchema { .. }
- | Error::Conversion { .. }
| Error::IntoPhysicalPlan { .. }
| Error::UnsupportedExpr { .. }
| Error::ColumnDataType { .. }
@@ -349,8 +339,6 @@ impl ErrorExt for Error {
Error::StartScriptManager { source } => source.status_code(),
Error::OpenStorageEngine { source } => source.status_code(),
Error::RuntimeResource { .. } => StatusCode::RuntimeResourcesExhausted,
- Error::CollectRecordBatches { source } => source.status_code(),
-
Error::MetaClientInit { source, .. } => source.status_code(),
Error::InsertData { source, .. } => source.status_code(),
Error::EmptyInsertBatch => StatusCode::InvalidArguments,
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index 5a58709d45df..c863743d2998 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -21,6 +21,7 @@ use async_trait::async_trait;
use common_catalog::consts::DEFAULT_CATALOG_NAME;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
+use common_grpc::select::to_object_result;
use common_insert::insertion_expr_to_request;
use common_query::Output;
use query::plan::LogicalPlan;
@@ -36,7 +37,6 @@ use crate::error::{
};
use crate::instance::Instance;
use crate::server::grpc::plan::PhysicalPlanner;
-use crate::server::grpc::select::to_object_result;
impl Instance {
pub async fn execute_grpc_insert(
diff --git a/src/datanode/src/server/grpc.rs b/src/datanode/src/server/grpc.rs
index 2900ba751c08..1a75f4c571ad 100644
--- a/src/datanode/src/server/grpc.rs
+++ b/src/datanode/src/server/grpc.rs
@@ -14,4 +14,3 @@
mod ddl;
pub(crate) mod plan;
-pub mod select;
diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs
index ddd3e3f4bcf0..e14a3d8e8480 100644
--- a/src/datatypes/src/data_type.rs
+++ b/src/datatypes/src/data_type.rs
@@ -70,7 +70,7 @@ impl ConcreteDataType {
matches!(self, ConcreteDataType::Boolean(_))
}
- pub fn is_string(&self) -> bool {
+ pub fn stringifiable(&self) -> bool {
matches!(
self,
ConcreteDataType::String(_)
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index ad5ba0a6e7f6..8b71372e505e 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -389,6 +389,30 @@ pub enum Error {
#[snafu(backtrace)]
source: datatypes::error::Error,
},
+
+ #[snafu(display("Failed to build DataFusion logical plan, source: {}", source))]
+ BuildDfLogicalPlan {
+ source: datafusion_common::DataFusionError,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Failed to convert Arrow schema, source: {}", source))]
+ ConvertArrowSchema {
+ #[snafu(backtrace)]
+ source: datatypes::error::Error,
+ },
+
+ #[snafu(display("Failed to collect Recordbatch stream, source: {}", source))]
+ CollectRecordbatchStream {
+ #[snafu(backtrace)]
+ source: common_recordbatch::error::Error,
+ },
+
+ #[snafu(display("Failed to create Recordbatches, source: {}", source))]
+ CreateRecordbatches {
+ #[snafu(backtrace)]
+ source: common_recordbatch::error::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -418,7 +442,8 @@ impl ErrorExt for Error {
Error::ConvertColumnDefaultConstraint { source, .. }
| Error::ConvertScalarValue { source, .. }
- | Error::VectorComputation { source } => source.status_code(),
+ | Error::VectorComputation { source }
+ | Error::ConvertArrowSchema { source } => source.status_code(),
Error::ConnectDatanode { source, .. }
| Error::RequestDatanode { source }
@@ -434,7 +459,8 @@ impl ErrorExt for Error {
| Error::FindLeaderPeer { .. }
| Error::FindRegionPartition { .. }
| Error::IllegalTableRoutesData { .. }
- | Error::UnsupportedExpr { .. } => StatusCode::Internal,
+ | Error::UnsupportedExpr { .. }
+ | Error::BuildDfLogicalPlan { .. } => StatusCode::Internal,
Error::IllegalFrontendState { .. } | Error::IncompleteGrpcResult { .. } => {
StatusCode::Unexpected
@@ -467,6 +493,9 @@ impl ErrorExt for Error {
Error::ExecuteSql { source, .. } => source.status_code(),
Error::InsertBatchToRequest { source, .. } => source.status_code(),
Error::CreateDatabase { source, .. } => source.status_code(),
+ Error::CollectRecordbatchStream { source } | Error::CreateRecordbatches { source } => {
+ source.status_code()
+ }
}
}
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index c35b9a61c529..f9ee4c3a7c28 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -44,7 +44,7 @@ pub struct FrontendOptions {
impl Default for FrontendOptions {
fn default() -> Self {
Self {
- http_addr: Some("0.0.0.0:4000".to_string()),
+ http_addr: Some("127.0.0.1:4000".to_string()),
grpc_options: Some(GrpcOptions::default()),
mysql_options: Some(MysqlOptions::default()),
postgres_options: Some(PostgresOptions::default()),
diff --git a/src/frontend/src/grpc.rs b/src/frontend/src/grpc.rs
index 8c20556f1af9..49044dfc4f45 100644
--- a/src/frontend/src/grpc.rs
+++ b/src/frontend/src/grpc.rs
@@ -23,7 +23,7 @@ pub struct GrpcOptions {
impl Default for GrpcOptions {
fn default() -> Self {
Self {
- addr: "0.0.0.0:4001".to_string(),
+ addr: "127.0.0.1:4001".to_string(),
runtime_size: 8,
}
}
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index e34e8a271f68..51fbe1706caf 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -26,8 +26,8 @@ use api::v1::alter_expr::Kind;
use api::v1::codec::InsertBatch;
use api::v1::object_expr::Expr;
use api::v1::{
- admin_expr, insert_expr, AddColumns, AdminExpr, AdminResult, AlterExpr, CreateDatabaseExpr,
- CreateExpr, InsertExpr, ObjectExpr, ObjectResult as GrpcObjectResult,
+ admin_expr, insert_expr, select_expr, AddColumns, AdminExpr, AdminResult, AlterExpr,
+ CreateDatabaseExpr, CreateExpr, InsertExpr, ObjectExpr, ObjectResult as GrpcObjectResult,
};
use async_trait::async_trait;
use catalog::remote::MetaKvBackend;
@@ -37,6 +37,7 @@ use client::{Client, Database, Select};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_error::prelude::{BoxedError, StatusCode};
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
+use common_grpc::select::to_object_result;
use common_query::Output;
use common_telemetry::{debug, error, info};
use distributed::DistInstance;
@@ -531,21 +532,26 @@ impl Instance {
}
}
+fn parse_stmt(sql: &str) -> Result<Statement> {
+ let mut stmt = ParserContext::create_with_dialect(sql, &GenericDialect {})
+ .context(error::ParseSqlSnafu)?;
+ // TODO(LFC): Support executing multiple SQL queries,
+ // which seems to be a major change to our whole server framework?
+ ensure!(
+ stmt.len() == 1,
+ error::InvalidSqlSnafu {
+ err_msg: "Currently executing multiple SQL queries are not supported."
+ }
+ );
+ Ok(stmt.remove(0))
+}
+
#[async_trait]
impl SqlQueryHandler for Instance {
async fn do_query(&self, query: &str) -> server_error::Result<Output> {
- let mut stmt = ParserContext::create_with_dialect(query, &GenericDialect {})
+ let stmt = parse_stmt(query)
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu { query })?;
- if stmt.len() != 1 {
- // TODO(LFC): Support executing multiple SQLs,
- // which seems to be a major change to our whole server framework?
- return server_error::NotSupportedSnafu {
- feat: "Only one SQL is allowed to be executed at one time.",
- }
- .fail();
- }
- let stmt = stmt.remove(0);
match stmt {
Statement::Query(_) => self
@@ -680,16 +686,40 @@ impl GrpcQueryHandler for Instance {
query: format!("{:?}", query),
})
}
-
- // FIXME(hl): refactor
- _ => self
- .database(DEFAULT_SCHEMA_NAME)
- .object(query.clone())
- .await
- .map_err(BoxedError::new)
- .with_context(|_| server_error::ExecuteQuerySnafu {
- query: format!("{:?}", query),
- }),
+ Expr::Select(select) => {
+ let select = select
+ .expr
+ .as_ref()
+ .context(server_error::InvalidQuerySnafu {
+ reason: "empty query",
+ })?;
+ match select {
+ select_expr::Expr::Sql(sql) => {
+ let output = SqlQueryHandler::do_query(self, sql).await;
+ Ok(to_object_result(output).await)
+ }
+ _ => {
+ if self.dist_instance.is_some() {
+ return server_error::NotSupportedSnafu {
+ feat: "Executing plan directly in Frontend.",
+ }
+ .fail();
+ }
+ // FIXME(hl): refactor
+ self.database(DEFAULT_SCHEMA_NAME)
+ .object(query.clone())
+ .await
+ .map_err(BoxedError::new)
+ .with_context(|_| server_error::ExecuteQuerySnafu {
+ query: format!("{:?}", query),
+ })
+ }
+ }
+ }
+ _ => server_error::NotSupportedSnafu {
+ feat: "Currently only insert and select is supported in GRPC service.",
+ }
+ .fail(),
}
} else {
server_error::InvalidQuerySnafu {
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index 224cb6bed70e..e0b81c008c24 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -15,9 +15,11 @@
use api::prometheus::remote::read_request::ResponseType;
use api::prometheus::remote::{Query, QueryResult, ReadRequest, ReadResponse, WriteRequest};
use async_trait::async_trait;
-use client::{Database, ObjectResult, Select};
+use client::{ObjectResult, Select};
use common_error::prelude::BoxedError;
+use common_grpc::select::to_object_result;
use common_telemetry::logging;
+use futures_util::TryFutureExt;
use prost::Message;
use servers::error::{self, Result as ServerResult};
use servers::prometheus::{self, Metrics};
@@ -25,7 +27,7 @@ use servers::query_handler::{PrometheusProtocolHandler, PrometheusResponse};
use snafu::{OptionExt, ResultExt};
use crate::frontend::Mode;
-use crate::instance::Instance;
+use crate::instance::{parse_stmt, Instance};
const SAMPLES_RESPONSE_TYPE: i32 = ResponseType::Samples as i32;
@@ -73,31 +75,37 @@ fn object_result_to_query_result(
})
}
-async fn handle_remote_queries(
- db: &Database,
- queries: &[Query],
-) -> ServerResult<Vec<(String, ObjectResult)>> {
- let mut results = Vec::with_capacity(queries.len());
-
- for q in queries {
- let (table_name, sql) = prometheus::query_to_sql(db.name(), q)?;
-
- logging::debug!(
- "prometheus remote read, table: {}, sql: {}",
- table_name,
- sql
- );
-
- let object_result = db
- .select(Select::Sql(sql.clone()))
- .await
+impl Instance {
+ async fn handle_remote_queries(
+ &self,
+ db: &str,
+ queries: &[Query],
+ ) -> ServerResult<Vec<(String, ObjectResult)>> {
+ let mut results = Vec::with_capacity(queries.len());
+
+ for query in queries {
+ let (table_name, sql) = prometheus::query_to_sql(db, query)?;
+ logging::debug!(
+ "prometheus remote read, table: {}, sql: {}",
+ table_name,
+ sql
+ );
+
+ let object_result = if let Some(dist_instance) = &self.dist_instance {
+ let output = futures::future::ready(parse_stmt(&sql))
+ .and_then(|stmt| dist_instance.handle_sql(&sql, stmt))
+ .await;
+ to_object_result(output).await.try_into()
+ } else {
+ self.database(db).select(Select::Sql(sql.clone())).await
+ }
.map_err(BoxedError::new)
.context(error::ExecuteQuerySnafu { query: sql })?;
- results.push((table_name, object_result));
+ results.push((table_name, object_result));
+ }
+ Ok(results)
}
-
- Ok(results)
}
#[async_trait]
@@ -138,7 +146,9 @@ impl PrometheusProtocolHandler for Instance {
let response_type = negotiate_response_type(&request.accepted_response_types)?;
// TODO(dennis): use read_hints to speedup query if possible
- let results = handle_remote_queries(&self.database(database), &request.queries).await?;
+ let results = self
+ .handle_remote_queries(database, &request.queries)
+ .await?;
match response_type {
ResponseType::Samples => {
diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs
index 1059f1081de4..82807d058221 100644
--- a/src/frontend/src/lib.rs
+++ b/src/frontend/src/lib.rs
@@ -22,7 +22,6 @@ pub mod frontend;
pub mod grpc;
pub mod influxdb;
pub mod instance;
-pub(crate) mod mock;
pub mod mysql;
pub mod opentsdb;
pub mod partitioning;
diff --git a/src/frontend/src/mysql.rs b/src/frontend/src/mysql.rs
index 56ef19834a35..71bb600753a5 100644
--- a/src/frontend/src/mysql.rs
+++ b/src/frontend/src/mysql.rs
@@ -23,7 +23,7 @@ pub struct MysqlOptions {
impl Default for MysqlOptions {
fn default() -> Self {
Self {
- addr: "0.0.0.0:4002".to_string(),
+ addr: "127.0.0.1:4002".to_string(),
runtime_size: 2,
}
}
diff --git a/src/frontend/src/opentsdb.rs b/src/frontend/src/opentsdb.rs
index c905189a17ec..16cc5c5fa8a3 100644
--- a/src/frontend/src/opentsdb.rs
+++ b/src/frontend/src/opentsdb.rs
@@ -23,7 +23,7 @@ pub struct OpentsdbOptions {
impl Default for OpentsdbOptions {
fn default() -> Self {
Self {
- addr: "0.0.0.0:4242".to_string(),
+ addr: "127.0.0.1:4242".to_string(),
runtime_size: 2,
}
}
diff --git a/src/frontend/src/postgres.rs b/src/frontend/src/postgres.rs
index a77243ce3715..41a11233bc33 100644
--- a/src/frontend/src/postgres.rs
+++ b/src/frontend/src/postgres.rs
@@ -24,7 +24,7 @@ pub struct PostgresOptions {
impl Default for PostgresOptions {
fn default() -> Self {
Self {
- addr: "0.0.0.0:4003".to_string(),
+ addr: "127.0.0.1:4003".to_string(),
runtime_size: 2,
check_pwd: false,
}
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 9d34f1a81cdc..da41bbe46cc0 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -20,6 +20,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use client::Database;
+use common_error::prelude::BoxedError;
use common_query::error::Result as QueryResult;
use common_query::logical_plan::Expr;
use common_query::physical_plan::{PhysicalPlan, PhysicalPlanRef};
@@ -40,7 +41,6 @@ use tokio::sync::RwLock;
use crate::datanode::DatanodeClients;
use crate::error::{self, Error, Result};
-use crate::mock::{DatanodeInstance, TableScanPlan};
use crate::partitioning::columns::RangeColumnsPartitionRule;
use crate::partitioning::range::RangePartitionRule;
use crate::partitioning::{
@@ -48,7 +48,10 @@ use crate::partitioning::{
};
use crate::spliter::WriteSpliter;
use crate::table::route::TableRoutes;
+use crate::table::scan::{DatanodeInstance, TableScanPlan};
+
pub mod insert;
+pub(crate) mod scan;
#[derive(Clone)]
pub struct DistTable {
@@ -399,7 +402,7 @@ impl PhysicalPlan for DistTableScan {
_runtime: Arc<RuntimeEnv>,
) -> QueryResult<SendableRecordBatchStream> {
let exec = &self.partition_execs[partition];
- exec.maybe_init().await;
+ exec.maybe_init().await.map_err(BoxedError::new)?;
Ok(exec.as_stream().await)
}
}
@@ -415,14 +418,14 @@ struct PartitionExec {
}
impl PartitionExec {
- async fn maybe_init(&self) {
+ async fn maybe_init(&self) -> Result<()> {
if self.batches.read().await.is_some() {
- return;
+ return Ok(());
}
let mut batches = self.batches.write().await;
if batches.is_some() {
- return;
+ return Ok(());
}
let plan = TableScanPlan {
@@ -431,8 +434,9 @@ impl PartitionExec {
filters: self.filters.clone(),
limit: self.limit,
};
- let result = self.datanode_instance.grpc_table_scan(plan).await;
+ let result = self.datanode_instance.grpc_table_scan(plan).await?;
let _ = batches.insert(result);
+ Ok(())
}
async fn as_stream(&self) -> SendableRecordBatchStream {
diff --git a/src/frontend/src/table/insert.rs b/src/frontend/src/table/insert.rs
index 09dabcb02aad..ceb6780e13c8 100644
--- a/src/frontend/src/table/insert.rs
+++ b/src/frontend/src/table/insert.rs
@@ -29,7 +29,7 @@ use table::requests::InsertRequest;
use super::DistTable;
use crate::error;
use crate::error::Result;
-use crate::mock::DatanodeInstance;
+use crate::table::scan::DatanodeInstance;
impl DistTable {
pub async fn dist_insert(
diff --git a/src/frontend/src/mock.rs b/src/frontend/src/table/scan.rs
similarity index 69%
rename from src/frontend/src/mock.rs
rename to src/frontend/src/table/scan.rs
index 9dfbcd256b17..1919dc0fb6c7 100644
--- a/src/frontend/src/mock.rs
+++ b/src/frontend/src/table/scan.rs
@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// FIXME(LFC): no mock
-
use std::fmt::Formatter;
use std::sync::Arc;
@@ -24,13 +22,16 @@ use common_query::Output;
use common_recordbatch::{util, RecordBatches};
use datafusion::logical_plan::{LogicalPlan as DfLogicPlan, LogicalPlanBuilder};
use datafusion_expr::Expr as DfExpr;
-use datatypes::prelude::Value;
+use datatypes::prelude::*;
use datatypes::schema::SchemaRef;
use meta_client::rpc::TableName;
use query::plan::LogicalPlan;
+use snafu::ResultExt;
use table::table::adapter::DfTableProviderAdapter;
use table::TableRef;
+use crate::error::{self, Result};
+
#[derive(Clone)]
pub struct DatanodeInstance {
table: TableRef,
@@ -52,25 +53,33 @@ impl DatanodeInstance {
self.db.insert(request).await
}
- pub(crate) async fn grpc_table_scan(&self, plan: TableScanPlan) -> RecordBatches {
- let logical_plan = self.build_logical_plan(&plan);
- common_telemetry::info!("logical_plan: {:?}", logical_plan);
- // TODO(LFC): Directly pass in logical plan to GRPC interface when our substrait codec supports filter.
- let sql = to_sql(logical_plan);
- let result = self.db.select(Select::Sql(sql)).await.unwrap();
+ pub(crate) async fn grpc_table_scan(&self, plan: TableScanPlan) -> Result<RecordBatches> {
+ let logical_plan = self.build_logical_plan(&plan)?;
- let output: Output = result.try_into().unwrap();
- let recordbatches = match output {
- Output::Stream(stream) => util::collect(stream).await.unwrap(),
- Output::RecordBatches(x) => x.take(),
+ // TODO(LFC): Directly pass in logical plan to GRPC interface when our substrait codec supports filter.
+ let sql = to_sql(logical_plan)?;
+
+ let output = self
+ .db
+ .select(Select::Sql(sql))
+ .await
+ .and_then(Output::try_from)
+ .context(error::SelectSnafu)?;
+
+ Ok(match output {
+ Output::Stream(stream) => {
+ let schema = stream.schema();
+ let batches = util::collect(stream)
+ .await
+ .context(error::CollectRecordbatchStreamSnafu)?;
+ RecordBatches::try_new(schema, batches).context(error::CreateRecordbatchesSnafu)?
+ }
+ Output::RecordBatches(x) => x,
_ => unreachable!(),
- };
-
- let schema = recordbatches.first().unwrap().schema.clone();
- RecordBatches::try_new(schema, recordbatches).unwrap()
+ })
}
- fn build_logical_plan(&self, table_scan: &TableScanPlan) -> LogicalPlan {
+ fn build_logical_plan(&self, table_scan: &TableScanPlan) -> Result<LogicalPlan> {
let table_provider = Arc::new(DfTableProviderAdapter::new(self.table.clone()));
let mut builder = LogicalPlanBuilder::scan_with_filters(
@@ -83,13 +92,16 @@ impl DatanodeInstance {
.map(|x| x.df_expr().clone())
.collect::<Vec<_>>(),
)
- .unwrap();
+ .context(error::BuildDfLogicalPlanSnafu)?;
+
if let Some(limit) = table_scan.limit {
- builder = builder.limit(limit).unwrap();
+ builder = builder
+ .limit(limit)
+ .context(error::BuildDfLogicalPlanSnafu)?;
}
- let plan = builder.build().unwrap();
- LogicalPlan::DfPlan(plan)
+ let plan = builder.build().context(error::BuildDfLogicalPlanSnafu)?;
+ Ok(LogicalPlan::DfPlan(plan))
}
}
@@ -101,14 +113,20 @@ pub(crate) struct TableScanPlan {
pub limit: Option<usize>,
}
-fn to_sql(plan: LogicalPlan) -> String {
+fn to_sql(plan: LogicalPlan) -> Result<String> {
let LogicalPlan::DfPlan(plan) = plan;
let table_scan = match plan {
DfLogicPlan::TableScan(table_scan) => table_scan,
_ => unreachable!("unknown plan: {:?}", plan),
};
- let schema: SchemaRef = Arc::new(table_scan.source.schema().try_into().unwrap());
+ let schema: SchemaRef = Arc::new(
+ table_scan
+ .source
+ .schema()
+ .try_into()
+ .context(error::ConvertArrowSchemaSnafu)?,
+ );
let projection = table_scan
.projection
.map(|x| {
@@ -131,7 +149,7 @@ fn to_sql(plan: LogicalPlan) -> String {
.filters
.iter()
.map(expr_to_sql)
- .collect::<Vec<String>>()
+ .collect::<Result<Vec<String>>>()?
.join(" AND ");
if !filters.is_empty() {
sql.push_str(" where ");
@@ -142,30 +160,31 @@ fn to_sql(plan: LogicalPlan) -> String {
sql.push_str(" limit ");
sql.push_str(&limit.to_string());
}
- sql
+ Ok(sql)
}
-fn expr_to_sql(expr: &DfExpr) -> String {
- match expr {
+fn expr_to_sql(expr: &DfExpr) -> Result<String> {
+ Ok(match expr {
DfExpr::BinaryExpr {
ref left,
ref right,
ref op,
} => format!(
"{} {} {}",
- expr_to_sql(left.as_ref()),
+ expr_to_sql(left.as_ref())?,
op,
- expr_to_sql(right.as_ref())
+ expr_to_sql(right.as_ref())?
),
DfExpr::Column(c) => c.name.clone(),
DfExpr::Literal(sv) => {
- let v: Value = Value::try_from(sv.clone()).unwrap();
- if v.data_type().is_string() {
+ let v: Value = Value::try_from(sv.clone())
+ .with_context(|_| error::ConvertScalarValueSnafu { value: sv.clone() })?;
+ if matches!(v.data_type(), ConcreteDataType::String(_)) {
format!("'{}'", sv)
} else {
format!("{}", sv)
}
}
_ => unimplemented!("not implemented for {:?}", expr),
- }
+ })
}
diff --git a/src/frontend/src/tests.rs b/src/frontend/src/tests.rs
index 18ec172c10c1..7e59bb3908cf 100644
--- a/src/frontend/src/tests.rs
+++ b/src/frontend/src/tests.rs
@@ -69,8 +69,8 @@ pub(crate) async fn create_datanode_client(
// Move client to an option so we can _move_ the inner value
// on the first attempt to connect. All other attempts will fail.
let mut client = Some(client);
- // "0.0.0.0:3001" is just a placeholder, does not actually connect to it.
- let addr = "0.0.0.0:3001";
+ // "127.0.0.1:3001" is just a placeholder, does not actually connect to it.
+ let addr = "127.0.0.1:3001";
let channel_manager = ChannelManager::new();
channel_manager
.reset_with_connector(
diff --git a/src/meta-srv/src/handler/datanode_lease.rs b/src/meta-srv/src/handler/datanode_lease.rs
index ad2467287d62..66f94eeb31b0 100644
--- a/src/meta-srv/src/handler/datanode_lease.rs
+++ b/src/meta-srv/src/handler/datanode_lease.rs
@@ -78,7 +78,7 @@ mod tests {
let kv_store = Arc::new(MemStore::new());
let ctx = Context {
datanode_lease_secs: 30,
- server_addr: "0.0.0.0:0000".to_string(),
+ server_addr: "127.0.0.1:0000".to_string(),
kv_store,
election: None,
skip_all: Arc::new(AtomicBool::new(false)),
diff --git a/src/meta-srv/src/handler/response_header.rs b/src/meta-srv/src/handler/response_header.rs
index 97981eb497ce..509d3e9aefdb 100644
--- a/src/meta-srv/src/handler/response_header.rs
+++ b/src/meta-srv/src/handler/response_header.rs
@@ -55,7 +55,7 @@ mod tests {
let kv_store = Arc::new(MemStore::new());
let ctx = Context {
datanode_lease_secs: 30,
- server_addr: "0.0.0.0:0000".to_string(),
+ server_addr: "127.0.0.1:0000".to_string(),
kv_store,
election: None,
skip_all: Arc::new(AtomicBool::new(false)),
diff --git a/src/meta-srv/src/keys.rs b/src/meta-srv/src/keys.rs
index db292042c7f6..71a24acbd681 100644
--- a/src/meta-srv/src/keys.rs
+++ b/src/meta-srv/src/keys.rs
@@ -182,7 +182,7 @@ mod tests {
fn test_datanode_lease_value() {
let value = LeaseValue {
timestamp_millis: 111,
- node_addr: "0.0.0.0:3002".to_string(),
+ node_addr: "127.0.0.1:3002".to_string(),
};
let value_bytes: Vec<u8> = value.clone().try_into().unwrap();
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index e0f2548ce1f2..e6a8373f18c2 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -42,9 +42,9 @@ pub struct MetaSrvOptions {
impl Default for MetaSrvOptions {
fn default() -> Self {
Self {
- bind_addr: "0.0.0.0:3002".to_string(),
- server_addr: "0.0.0.0:3002".to_string(),
- store_addr: "0.0.0.0:2379".to_string(),
+ bind_addr: "127.0.0.1:3002".to_string(),
+ server_addr: "127.0.0.1:3002".to_string(),
+ store_addr: "127.0.0.1:2379".to_string(),
datanode_lease_secs: 15,
}
}
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index 9d24b5ebf0de..3b9ee90b2951 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -74,7 +74,7 @@ fn parse_string_to_value(
data_type: &ConcreteDataType,
) -> Result<Value> {
ensure!(
- data_type.is_string(),
+ data_type.stringifiable(),
ColumnTypeMismatchSnafu {
column_name,
expect: data_type.clone(),
|
feat
|
distributed execute gRPC and Prometheus query in Frontend (#520)
|
4fe7e162af000f198df5bcca0ef350c4442f4fd9
|
2023-10-10 12:52:12
|
Wei
|
fix: human_time mismatch (#2558)
| false
|
diff --git a/src/common/time/src/datetime.rs b/src/common/time/src/datetime.rs
index 39f451db8e11..451dc57cf7ed 100644
--- a/src/common/time/src/datetime.rs
+++ b/src/common/time/src/datetime.rs
@@ -93,18 +93,22 @@ impl From<Date> for DateTime {
}
impl DateTime {
- pub fn new(val: i64) -> Self {
- Self(val)
+ /// Create a new [DateTime] from milliseconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch).
+ pub fn new(millis: i64) -> Self {
+ Self(millis)
}
+ /// Get the milliseconds elapsed since "1970-01-01 00:00:00 UTC" (UNIX Epoch).
pub fn val(&self) -> i64 {
self.0
}
+ /// Convert to [NaiveDateTime].
pub fn to_chrono_datetime(&self) -> Option<NaiveDateTime> {
NaiveDateTime::from_timestamp_millis(self.0)
}
+ /// Convert to [common_time::date].
pub fn to_date(&self) -> Option<Date> {
self.to_chrono_datetime().map(|d| Date::from(d.date()))
}
diff --git a/src/meta-srv/src/service/admin/node_lease.rs b/src/meta-srv/src/service/admin/node_lease.rs
index 3655cd3aae11..4b94674a25eb 100644
--- a/src/meta-srv/src/service/admin/node_lease.rs
+++ b/src/meta-srv/src/service/admin/node_lease.rs
@@ -43,7 +43,7 @@ impl HttpHandler for NodeLeaseHandler {
.into_iter()
.map(|(k, v)| HumanLease {
name: k,
- human_time: common_time::DateTime::new(v.timestamp_millis / 1000).to_string(),
+ human_time: common_time::DateTime::new(v.timestamp_millis).to_string(),
lease: v,
})
.collect::<Vec<_>>();
|
fix
|
human_time mismatch (#2558)
|
beb07fc8959fab3c37be19cd449f27c8f1b2fc1c
|
2022-12-05 17:29:23
|
Ruihang Xia
|
feat: new datatypes subcrate based on the official arrow (#705)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 2badba5f8c8e..a6f9216b6d22 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -40,6 +40,19 @@ dependencies = [
"version_check",
]
+[[package]]
+name = "ahash"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107"
+dependencies = [
+ "cfg-if",
+ "const-random",
+ "getrandom 0.2.7",
+ "once_cell",
+ "version_check",
+]
+
[[package]]
name = "aho-corasick"
version = "0.7.19"
@@ -182,8 +195,8 @@ dependencies = [
"bitflags",
"chrono",
"csv",
- "flatbuffers",
- "half",
+ "flatbuffers 2.1.1",
+ "half 1.8.2",
"hex",
"indexmap",
"lazy_static",
@@ -197,6 +210,72 @@ dependencies = [
"serde_json",
]
+[[package]]
+name = "arrow"
+version = "26.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e24e2bcd431a4aa0ff003fdd2dc21c78cfb42f31459c89d2312c2746fe17a5ac"
+dependencies = [
+ "ahash 0.8.2",
+ "arrow-array",
+ "arrow-buffer",
+ "arrow-data",
+ "arrow-schema",
+ "arrow-select",
+ "bitflags",
+ "chrono",
+ "csv",
+ "flatbuffers 22.9.29",
+ "half 2.1.0",
+ "hashbrown",
+ "indexmap",
+ "lazy_static",
+ "lexical-core",
+ "multiversion",
+ "num",
+ "regex",
+ "regex-syntax",
+ "serde_json",
+]
+
+[[package]]
+name = "arrow-array"
+version = "26.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c9044300874385f19e77cbf90911e239bd23630d8f23bb0f948f9067998a13b7"
+dependencies = [
+ "ahash 0.8.2",
+ "arrow-buffer",
+ "arrow-data",
+ "arrow-schema",
+ "chrono",
+ "half 2.1.0",
+ "hashbrown",
+ "num",
+]
+
+[[package]]
+name = "arrow-buffer"
+version = "26.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78476cbe9e3f808dcecab86afe42d573863c63e149c62e6e379ed2522743e626"
+dependencies = [
+ "half 2.1.0",
+ "num",
+]
+
+[[package]]
+name = "arrow-data"
+version = "26.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4d916feee158c485dad4f701cba31bc9a90a8db87d9df8e2aa8adc0c20a2bbb9"
+dependencies = [
+ "arrow-buffer",
+ "arrow-schema",
+ "half 2.1.0",
+ "num",
+]
+
[[package]]
name = "arrow-format"
version = "0.4.0"
@@ -207,13 +286,32 @@ dependencies = [
"serde",
]
+[[package]]
+name = "arrow-schema"
+version = "26.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0f9406eb7834ca6bd8350d1baa515d18b9fcec487eddacfb62f5e19511f7bd37"
+
+[[package]]
+name = "arrow-select"
+version = "26.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6593a01586751c74498495d2f5a01fcd438102b52965c11dd98abf4ebcacef37"
+dependencies = [
+ "arrow-array",
+ "arrow-buffer",
+ "arrow-data",
+ "arrow-schema",
+ "num",
+]
+
[[package]]
name = "arrow2"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e387b20dd573a96f36b173d9027483898f944d696521afd74e2caa3c813d86e"
dependencies = [
- "ahash",
+ "ahash 0.7.6",
"arrow-format",
"base64",
"bytemuck",
@@ -551,7 +649,7 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
name = "benchmarks"
version = "0.1.0"
dependencies = [
- "arrow",
+ "arrow 10.0.0",
"clap 4.0.18",
"client",
"indicatif",
@@ -961,7 +1059,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b"
dependencies = [
"ciborium-io",
- "half",
+ "half 1.8.2",
]
[[package]]
@@ -1207,7 +1305,7 @@ dependencies = [
"common-function-macro",
"common-query",
"common-time",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datatypes",
"libc",
"num",
@@ -1283,7 +1381,7 @@ dependencies = [
"common-recordbatch",
"common-time",
"datafusion",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datafusion-expr",
"datatypes",
"snafu",
@@ -1297,7 +1395,7 @@ version = "0.1.0"
dependencies = [
"common-error",
"datafusion",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datatypes",
"futures",
"paste",
@@ -1412,6 +1510,28 @@ dependencies = [
"tracing-subscriber",
]
+[[package]]
+name = "const-random"
+version = "0.1.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "368a7a772ead6ce7e1de82bfb04c485f3db8ec744f72925af5735e29a22cc18e"
+dependencies = [
+ "const-random-macro",
+ "proc-macro-hack",
+]
+
+[[package]]
+name = "const-random-macro"
+version = "0.1.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb"
+dependencies = [
+ "getrandom 0.2.7",
+ "once_cell",
+ "proc-macro-hack",
+ "tiny-keccak",
+]
+
[[package]]
name = "constant_time_eq"
version = "0.1.5"
@@ -1724,12 +1844,12 @@ name = "datafusion"
version = "7.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?branch=arrow2#744b2626081db95a254fc882820fc7812f95aa51"
dependencies = [
- "ahash",
+ "ahash 0.7.6",
"arrow2",
"async-trait",
"chrono",
"comfy-table 5.0.1",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datafusion-expr",
"datafusion-physical-expr",
"futures",
@@ -1744,7 +1864,7 @@ dependencies = [
"pin-project-lite",
"rand 0.8.5",
"smallvec",
- "sqlparser",
+ "sqlparser 0.15.0",
"tempfile",
"tokio",
"tokio-stream",
@@ -1758,7 +1878,19 @@ dependencies = [
"arrow2",
"ordered-float 2.10.0",
"parquet2",
- "sqlparser",
+ "sqlparser 0.15.0",
+]
+
+[[package]]
+name = "datafusion-common"
+version = "14.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "15f1ffcbc1f040c9ab99f41db1c743d95aff267bb2e7286aaa010738b7402251"
+dependencies = [
+ "arrow 26.0.0",
+ "chrono",
+ "ordered-float 3.1.0",
+ "sqlparser 0.26.0",
]
[[package]]
@@ -1766,10 +1898,10 @@ name = "datafusion-expr"
version = "7.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?branch=arrow2#744b2626081db95a254fc882820fc7812f95aa51"
dependencies = [
- "ahash",
+ "ahash 0.7.6",
"arrow2",
- "datafusion-common",
- "sqlparser",
+ "datafusion-common 7.0.0",
+ "sqlparser 0.15.0",
]
[[package]]
@@ -1777,12 +1909,12 @@ name = "datafusion-physical-expr"
version = "7.0.0"
source = "git+https://github.com/apache/arrow-datafusion.git?branch=arrow2#744b2626081db95a254fc882820fc7812f95aa51"
dependencies = [
- "ahash",
+ "ahash 0.7.6",
"arrow2",
"blake2",
"blake3",
"chrono",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datafusion-expr",
"hashbrown",
"lazy_static",
@@ -1818,7 +1950,7 @@ dependencies = [
"common-telemetry",
"common-time",
"datafusion",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datatypes",
"frontend",
"futures",
@@ -1857,7 +1989,26 @@ dependencies = [
"common-base",
"common-error",
"common-time",
- "datafusion-common",
+ "datafusion-common 7.0.0",
+ "enum_dispatch",
+ "num",
+ "num-traits",
+ "ordered-float 3.1.0",
+ "paste",
+ "serde",
+ "serde_json",
+ "snafu",
+]
+
+[[package]]
+name = "datatypes2"
+version = "0.1.0"
+dependencies = [
+ "arrow 26.0.0",
+ "common-base",
+ "common-error",
+ "common-time",
+ "datafusion-common 14.0.0",
"enum_dispatch",
"num",
"num-traits",
@@ -2159,6 +2310,16 @@ dependencies = [
"thiserror",
]
+[[package]]
+name = "flatbuffers"
+version = "22.9.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ce016b9901aef3579617931fbb2df8fc9a9f7cb95a16eb8acc8148209bb9e70"
+dependencies = [
+ "bitflags",
+ "thiserror",
+]
+
[[package]]
name = "flate2"
version = "1.0.24"
@@ -2215,7 +2376,7 @@ dependencies = [
"common-telemetry",
"common-time",
"datafusion",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datafusion-expr",
"datanode",
"datatypes",
@@ -2235,7 +2396,7 @@ dependencies = [
"session",
"snafu",
"sql",
- "sqlparser",
+ "sqlparser 0.15.0",
"store-api",
"table",
"tempdir",
@@ -2517,6 +2678,16 @@ version = "1.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
+[[package]]
+name = "half"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad6a9459c9c30b177b925162351f97e7d967c7ea8bab3b8352805327daf45554"
+dependencies = [
+ "crunchy",
+ "num-traits",
+]
+
[[package]]
name = "hash_hasher"
version = "2.0.3"
@@ -2529,7 +2700,7 @@ version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
dependencies = [
- "ahash",
+ "ahash 0.7.6",
]
[[package]]
@@ -3218,7 +3389,7 @@ version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849"
dependencies = [
- "ahash",
+ "ahash 0.7.6",
"metrics-macros",
"portable-atomic",
]
@@ -3324,7 +3495,7 @@ dependencies = [
"common-telemetry",
"common-time",
"datafusion",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datatypes",
"futures",
"log-store",
@@ -3884,7 +4055,7 @@ version = "10.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53e9c8fc20af9b92d85d42ec86e5217b2eaf1340fbba75c4b4296de764ea7921"
dependencies = [
- "arrow",
+ "arrow 10.0.0",
"base64",
"brotli",
"byteorder",
@@ -4504,7 +4675,7 @@ dependencies = [
"common-telemetry",
"common-time",
"datafusion",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datafusion-physical-expr",
"datatypes",
"format_num",
@@ -5035,7 +5206,7 @@ name = "rustpython-compiler-core"
version = "0.1.2"
source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
dependencies = [
- "ahash",
+ "ahash 0.7.6",
"indexmap",
"itertools",
"log",
@@ -5077,7 +5248,7 @@ name = "rustpython-parser"
version = "0.1.2"
source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
dependencies = [
- "ahash",
+ "ahash 0.7.6",
"lalrpop-util",
"log",
"num-bigint",
@@ -5106,7 +5277,7 @@ version = "0.1.2"
source = "git+https://github.com/RustPython/RustPython?rev=02a1d1d#02a1d1d7db57afbb78049599c2585cc7cd59e6d3"
dependencies = [
"adler32",
- "ahash",
+ "ahash 0.7.6",
"ascii",
"atty",
"bitflags",
@@ -5118,7 +5289,7 @@ dependencies = [
"exitcode",
"flate2",
"getrandom 0.2.7",
- "half",
+ "half 1.8.2",
"hex",
"hexf-parse",
"indexmap",
@@ -5343,7 +5514,7 @@ dependencies = [
"common-time",
"console",
"datafusion",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datafusion-expr",
"datafusion-physical-expr",
"datatypes",
@@ -5428,7 +5599,7 @@ version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
dependencies = [
- "half",
+ "half 1.8.2",
"serde",
]
@@ -5775,7 +5946,7 @@ dependencies = [
"mito",
"once_cell",
"snafu",
- "sqlparser",
+ "sqlparser 0.15.0",
]
[[package]]
@@ -5813,6 +5984,15 @@ dependencies = [
"log",
]
+[[package]]
+name = "sqlparser"
+version = "0.26.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "86be66ea0b2b22749cfa157d16e2e84bf793e626a3375f4d378dc289fa03affb"
+dependencies = [
+ "log",
+]
+
[[package]]
name = "sre-engine"
version = "0.1.2"
@@ -6118,7 +6298,7 @@ dependencies = [
"common-recordbatch",
"common-telemetry",
"datafusion",
- "datafusion-common",
+ "datafusion-common 7.0.0",
"datafusion-expr",
"datatypes",
"derive_builder",
diff --git a/Cargo.toml b/Cargo.toml
index d49c91a2661a..512f0906482c 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -20,6 +20,7 @@ members = [
"src/common/time",
"src/datanode",
"src/datatypes",
+ "src/datatypes2",
"src/frontend",
"src/log-store",
"src/meta-client",
diff --git a/src/common/recordbatch/src/recordbatch.rs b/src/common/recordbatch/src/recordbatch.rs
index b768a2f0bcc8..5fc886f8b9d1 100644
--- a/src/common/recordbatch/src/recordbatch.rs
+++ b/src/common/recordbatch/src/recordbatch.rs
@@ -23,6 +23,7 @@ use snafu::ResultExt;
use crate::error::{self, Result};
+// TODO(yingwen): We should hold vectors in the RecordBatch.
#[derive(Clone, Debug, PartialEq)]
pub struct RecordBatch {
pub schema: SchemaRef,
@@ -103,6 +104,7 @@ impl<'a> Iterator for RecordBatchRowIterator<'a> {
} else {
let mut row = Vec::with_capacity(self.columns);
+ // TODO(yingwen): Get from the vector if RecordBatch also holds vectors.
for col in 0..self.columns {
let column_array = self.record_batch.df_recordbatch.column(col);
match arrow_array_get(column_array.as_ref(), self.row_cursor)
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index fd0f148d9658..5ff20f702be3 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -147,6 +147,18 @@ impl From<i64> for Timestamp {
}
}
+impl From<Timestamp> for i64 {
+ fn from(t: Timestamp) -> Self {
+ t.value
+ }
+}
+
+impl From<Timestamp> for serde_json::Value {
+ fn from(d: Timestamp) -> Self {
+ serde_json::Value::String(d.to_iso8601_string())
+ }
+}
+
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum TimeUnit {
Second,
@@ -197,6 +209,7 @@ impl Hash for Timestamp {
#[cfg(test)]
mod tests {
use chrono::Offset;
+ use serde_json::Value;
use super::*;
@@ -318,4 +331,39 @@ mod tests {
let ts = Timestamp::from_millis(ts_millis);
assert_eq!("1969-12-31 23:59:58.999+0000", ts.to_iso8601_string());
}
+
+ #[test]
+ fn test_serialize_to_json_value() {
+ assert_eq!(
+ "1970-01-01 00:00:01+0000",
+ match serde_json::Value::from(Timestamp::new(1, TimeUnit::Second)) {
+ Value::String(s) => s,
+ _ => unreachable!(),
+ }
+ );
+
+ assert_eq!(
+ "1970-01-01 00:00:00.001+0000",
+ match serde_json::Value::from(Timestamp::new(1, TimeUnit::Millisecond)) {
+ Value::String(s) => s,
+ _ => unreachable!(),
+ }
+ );
+
+ assert_eq!(
+ "1970-01-01 00:00:00.000001+0000",
+ match serde_json::Value::from(Timestamp::new(1, TimeUnit::Microsecond)) {
+ Value::String(s) => s,
+ _ => unreachable!(),
+ }
+ );
+
+ assert_eq!(
+ "1970-01-01 00:00:00.000000001+0000",
+ match serde_json::Value::from(Timestamp::new(1, TimeUnit::Nanosecond)) {
+ Value::String(s) => s,
+ _ => unreachable!(),
+ }
+ );
+ }
}
diff --git a/src/datatypes2/Cargo.toml b/src/datatypes2/Cargo.toml
new file mode 100644
index 000000000000..34941606d4d2
--- /dev/null
+++ b/src/datatypes2/Cargo.toml
@@ -0,0 +1,24 @@
+[package]
+name = "datatypes2"
+version = "0.1.0"
+edition = "2021"
+license = "Apache-2.0"
+
+[features]
+default = []
+test = []
+
+[dependencies]
+common-base = { path = "../common/base" }
+common-error = { path = "../common/error" }
+common-time = { path = "../common/time" }
+datafusion-common = "14.0"
+enum_dispatch = "0.3"
+num = "0.4"
+num-traits = "0.2"
+ordered-float = { version = "3.0", features = ["serde"] }
+paste = "1.0"
+serde = { version = "1.0", features = ["derive"] }
+serde_json = "1.0"
+snafu = { version = "0.7", features = ["backtraces"] }
+arrow = "26.0"
diff --git a/src/datatypes2/src/arrow_array.rs b/src/datatypes2/src/arrow_array.rs
new file mode 100644
index 000000000000..7405c8a665af
--- /dev/null
+++ b/src/datatypes2/src/arrow_array.rs
@@ -0,0 +1,242 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use arrow::array::{
+ Array, BooleanArray, Date32Array, Date64Array, Float32Array, Float64Array, Int16Array,
+ Int32Array, Int64Array, Int8Array, ListArray, UInt16Array, UInt32Array, UInt64Array,
+ UInt8Array,
+};
+use arrow::datatypes::DataType;
+use common_time::timestamp::TimeUnit;
+use common_time::Timestamp;
+use snafu::OptionExt;
+
+use crate::data_type::ConcreteDataType;
+use crate::error::{ConversionSnafu, Result};
+use crate::value::{ListValue, Value};
+
+pub type BinaryArray = arrow::array::LargeBinaryArray;
+pub type MutableBinaryArray = arrow::array::LargeBinaryBuilder;
+pub type StringArray = arrow::array::StringArray;
+pub type MutableStringArray = arrow::array::StringBuilder;
+
+macro_rules! cast_array {
+ ($arr: ident, $CastType: ty) => {
+ $arr.as_any()
+ .downcast_ref::<$CastType>()
+ .with_context(|| ConversionSnafu {
+ from: format!("{:?}", $arr.data_type()),
+ })?
+ };
+}
+
+// TODO(yingwen): Remove this function.
+pub fn arrow_array_get(array: &dyn Array, idx: usize) -> Result<Value> {
+ if array.is_null(idx) {
+ return Ok(Value::Null);
+ }
+
+ let result = match array.data_type() {
+ DataType::Null => Value::Null,
+ DataType::Boolean => Value::Boolean(cast_array!(array, BooleanArray).value(idx)),
+ DataType::Binary => Value::Binary(cast_array!(array, BinaryArray).value(idx).into()),
+ DataType::Int8 => Value::Int8(cast_array!(array, Int8Array).value(idx)),
+ DataType::Int16 => Value::Int16(cast_array!(array, Int16Array).value(idx)),
+ DataType::Int32 => Value::Int32(cast_array!(array, Int32Array).value(idx)),
+ DataType::Int64 => Value::Int64(cast_array!(array, Int64Array).value(idx)),
+ DataType::UInt8 => Value::UInt8(cast_array!(array, UInt8Array).value(idx)),
+ DataType::UInt16 => Value::UInt16(cast_array!(array, UInt16Array).value(idx)),
+ DataType::UInt32 => Value::UInt32(cast_array!(array, UInt32Array).value(idx)),
+ DataType::UInt64 => Value::UInt64(cast_array!(array, UInt64Array).value(idx)),
+ DataType::Float32 => Value::Float32(cast_array!(array, Float32Array).value(idx).into()),
+ DataType::Float64 => Value::Float64(cast_array!(array, Float64Array).value(idx).into()),
+ DataType::Utf8 => Value::String(cast_array!(array, StringArray).value(idx).into()),
+ DataType::Date32 => Value::Date(cast_array!(array, Date32Array).value(idx).into()),
+ DataType::Date64 => Value::DateTime(cast_array!(array, Date64Array).value(idx).into()),
+ DataType::Timestamp(t, _) => match t {
+ arrow::datatypes::TimeUnit::Second => Value::Timestamp(Timestamp::new(
+ cast_array!(array, arrow::array::TimestampSecondArray).value(idx),
+ TimeUnit::Second,
+ )),
+ arrow::datatypes::TimeUnit::Millisecond => Value::Timestamp(Timestamp::new(
+ cast_array!(array, arrow::array::TimestampMillisecondArray).value(idx),
+ TimeUnit::Millisecond,
+ )),
+ arrow::datatypes::TimeUnit::Microsecond => Value::Timestamp(Timestamp::new(
+ cast_array!(array, arrow::array::TimestampMicrosecondArray).value(idx),
+ TimeUnit::Microsecond,
+ )),
+ arrow::datatypes::TimeUnit::Nanosecond => Value::Timestamp(Timestamp::new(
+ cast_array!(array, arrow::array::TimestampNanosecondArray).value(idx),
+ TimeUnit::Nanosecond,
+ )),
+ },
+ DataType::List(_) => {
+ let array = cast_array!(array, ListArray).value(idx);
+ let item_type = ConcreteDataType::try_from(array.data_type())?;
+ let values = (0..array.len())
+ .map(|i| arrow_array_get(&*array, i))
+ .collect::<Result<Vec<Value>>>()?;
+ Value::List(ListValue::new(Some(Box::new(values)), item_type))
+ }
+ _ => unimplemented!("Arrow array datatype: {:?}", array.data_type()),
+ };
+
+ Ok(result)
+}
+
+#[cfg(test)]
+mod test {
+ use std::sync::Arc;
+
+ use arrow::array::{
+ BooleanArray, Float32Array, Float64Array, Int16Array, Int32Array, Int64Array, Int8Array,
+ LargeBinaryArray, TimestampMicrosecondArray, TimestampMillisecondArray,
+ TimestampNanosecondArray, TimestampSecondArray, UInt16Array, UInt32Array, UInt64Array,
+ UInt8Array,
+ };
+ use arrow::datatypes::Int32Type;
+ use common_time::timestamp::{TimeUnit, Timestamp};
+ use paste::paste;
+
+ use super::*;
+ use crate::data_type::ConcreteDataType;
+ use crate::types::TimestampType;
+
+ macro_rules! test_arrow_array_get_for_timestamps {
+ ( $($unit: ident), *) => {
+ $(
+ paste! {
+ let mut builder = arrow::array::[<Timestamp $unit Array>]::builder(3);
+ builder.append_value(1);
+ builder.append_value(0);
+ builder.append_value(-1);
+ let ts_array = Arc::new(builder.finish()) as Arc<dyn Array>;
+ let v = arrow_array_get(&ts_array, 1).unwrap();
+ assert_eq!(
+ ConcreteDataType::Timestamp(TimestampType::$unit(
+ $crate::types::[<Timestamp $unit Type>]::default(),
+ )),
+ v.data_type()
+ );
+ }
+ )*
+ };
+ }
+
+ #[test]
+ fn test_timestamp_array() {
+ test_arrow_array_get_for_timestamps![Second, Millisecond, Microsecond, Nanosecond];
+ }
+
+ #[test]
+ fn test_arrow_array_access() {
+ let array1 = BooleanArray::from(vec![true, true, false, false]);
+ assert_eq!(Value::Boolean(true), arrow_array_get(&array1, 1).unwrap());
+ let array1 = Int8Array::from(vec![1, 2, 3, 4]);
+ assert_eq!(Value::Int8(2), arrow_array_get(&array1, 1).unwrap());
+ let array1 = UInt8Array::from(vec![1, 2, 3, 4]);
+ assert_eq!(Value::UInt8(2), arrow_array_get(&array1, 1).unwrap());
+ let array1 = Int16Array::from(vec![1, 2, 3, 4]);
+ assert_eq!(Value::Int16(2), arrow_array_get(&array1, 1).unwrap());
+ let array1 = UInt16Array::from(vec![1, 2, 3, 4]);
+ assert_eq!(Value::UInt16(2), arrow_array_get(&array1, 1).unwrap());
+ let array1 = Int32Array::from(vec![1, 2, 3, 4]);
+ assert_eq!(Value::Int32(2), arrow_array_get(&array1, 1).unwrap());
+ let array1 = UInt32Array::from(vec![1, 2, 3, 4]);
+ assert_eq!(Value::UInt32(2), arrow_array_get(&array1, 1).unwrap());
+ let array = Int64Array::from(vec![1, 2, 3, 4]);
+ assert_eq!(Value::Int64(2), arrow_array_get(&array, 1).unwrap());
+ let array1 = UInt64Array::from(vec![1, 2, 3, 4]);
+ assert_eq!(Value::UInt64(2), arrow_array_get(&array1, 1).unwrap());
+ let array1 = Float32Array::from(vec![1f32, 2f32, 3f32, 4f32]);
+ assert_eq!(
+ Value::Float32(2f32.into()),
+ arrow_array_get(&array1, 1).unwrap()
+ );
+ let array1 = Float64Array::from(vec![1f64, 2f64, 3f64, 4f64]);
+ assert_eq!(
+ Value::Float64(2f64.into()),
+ arrow_array_get(&array1, 1).unwrap()
+ );
+
+ let array2 = StringArray::from(vec![Some("hello"), None, Some("world")]);
+ assert_eq!(
+ Value::String("hello".into()),
+ arrow_array_get(&array2, 0).unwrap()
+ );
+ assert_eq!(Value::Null, arrow_array_get(&array2, 1).unwrap());
+
+ let array3 = LargeBinaryArray::from(vec![
+ Some("hello".as_bytes()),
+ None,
+ Some("world".as_bytes()),
+ ]);
+ assert_eq!(Value::Null, arrow_array_get(&array3, 1).unwrap());
+
+ let array = TimestampSecondArray::from(vec![1, 2, 3]);
+ let value = arrow_array_get(&array, 1).unwrap();
+ assert_eq!(value, Value::Timestamp(Timestamp::new(2, TimeUnit::Second)));
+ let array = TimestampMillisecondArray::from(vec![1, 2, 3]);
+ let value = arrow_array_get(&array, 1).unwrap();
+ assert_eq!(
+ value,
+ Value::Timestamp(Timestamp::new(2, TimeUnit::Millisecond))
+ );
+ let array = TimestampMicrosecondArray::from(vec![1, 2, 3]);
+ let value = arrow_array_get(&array, 1).unwrap();
+ assert_eq!(
+ value,
+ Value::Timestamp(Timestamp::new(2, TimeUnit::Microsecond))
+ );
+ let array = TimestampNanosecondArray::from(vec![1, 2, 3]);
+ let value = arrow_array_get(&array, 1).unwrap();
+ assert_eq!(
+ value,
+ Value::Timestamp(Timestamp::new(2, TimeUnit::Nanosecond))
+ );
+
+ // test list array
+ let data = vec![
+ Some(vec![Some(1), Some(2), Some(3)]),
+ None,
+ Some(vec![Some(4), None, Some(6)]),
+ ];
+ let arrow_array = ListArray::from_iter_primitive::<Int32Type, _, _>(data);
+
+ let v0 = arrow_array_get(&arrow_array, 0).unwrap();
+ match v0 {
+ Value::List(list) => {
+ assert!(matches!(list.datatype(), ConcreteDataType::Int32(_)));
+ let items = list.items().as_ref().unwrap();
+ assert_eq!(
+ **items,
+ vec![Value::Int32(1), Value::Int32(2), Value::Int32(3)]
+ );
+ }
+ _ => unreachable!(),
+ }
+
+ assert_eq!(Value::Null, arrow_array_get(&arrow_array, 1).unwrap());
+ let v2 = arrow_array_get(&arrow_array, 2).unwrap();
+ match v2 {
+ Value::List(list) => {
+ assert!(matches!(list.datatype(), ConcreteDataType::Int32(_)));
+ let items = list.items().as_ref().unwrap();
+ assert_eq!(**items, vec![Value::Int32(4), Value::Null, Value::Int32(6)]);
+ }
+ _ => unreachable!(),
+ }
+ }
+}
diff --git a/src/datatypes2/src/data_type.rs b/src/datatypes2/src/data_type.rs
new file mode 100644
index 000000000000..0d06d566b667
--- /dev/null
+++ b/src/datatypes2/src/data_type.rs
@@ -0,0 +1,486 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow::datatypes::{DataType as ArrowDataType, TimeUnit as ArrowTimeUnit};
+use common_time::timestamp::TimeUnit;
+use paste::paste;
+use serde::{Deserialize, Serialize};
+
+use crate::error::{self, Error, Result};
+use crate::type_id::LogicalTypeId;
+use crate::types::{
+ BinaryType, BooleanType, DateTimeType, DateType, Float32Type, Float64Type, Int16Type,
+ Int32Type, Int64Type, Int8Type, ListType, NullType, StringType, TimestampMicrosecondType,
+ TimestampMillisecondType, TimestampNanosecondType, TimestampSecondType, TimestampType,
+ UInt16Type, UInt32Type, UInt64Type, UInt8Type,
+};
+use crate::value::Value;
+use crate::vectors::MutableVector;
+
+#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+#[enum_dispatch::enum_dispatch(DataType)]
+pub enum ConcreteDataType {
+ Null(NullType),
+ Boolean(BooleanType),
+
+ // Numeric types:
+ Int8(Int8Type),
+ Int16(Int16Type),
+ Int32(Int32Type),
+ Int64(Int64Type),
+ UInt8(UInt8Type),
+ UInt16(UInt16Type),
+ UInt32(UInt32Type),
+ UInt64(UInt64Type),
+ Float32(Float32Type),
+ Float64(Float64Type),
+
+ // String types:
+ Binary(BinaryType),
+ String(StringType),
+
+ // Date types:
+ Date(DateType),
+ DateTime(DateTimeType),
+ Timestamp(TimestampType),
+
+ // Compound types:
+ List(ListType),
+}
+
+// TODO(yingwen): Refactor these `is_xxx()` methods, such as adding a `properties()` method
+// returning all these properties to the `DataType` trait
+impl ConcreteDataType {
+ pub fn is_float(&self) -> bool {
+ matches!(
+ self,
+ ConcreteDataType::Float64(_) | ConcreteDataType::Float32(_)
+ )
+ }
+
+ pub fn is_boolean(&self) -> bool {
+ matches!(self, ConcreteDataType::Boolean(_))
+ }
+
+ pub fn is_stringifiable(&self) -> bool {
+ matches!(
+ self,
+ ConcreteDataType::String(_)
+ | ConcreteDataType::Date(_)
+ | ConcreteDataType::DateTime(_)
+ | ConcreteDataType::Timestamp(_)
+ )
+ }
+
+ pub fn is_signed(&self) -> bool {
+ matches!(
+ self,
+ ConcreteDataType::Int8(_)
+ | ConcreteDataType::Int16(_)
+ | ConcreteDataType::Int32(_)
+ | ConcreteDataType::Int64(_)
+ | ConcreteDataType::Date(_)
+ | ConcreteDataType::DateTime(_)
+ | ConcreteDataType::Timestamp(_)
+ )
+ }
+
+ pub fn is_unsigned(&self) -> bool {
+ matches!(
+ self,
+ ConcreteDataType::UInt8(_)
+ | ConcreteDataType::UInt16(_)
+ | ConcreteDataType::UInt32(_)
+ | ConcreteDataType::UInt64(_)
+ )
+ }
+
+ pub fn numerics() -> Vec<ConcreteDataType> {
+ vec![
+ ConcreteDataType::int8_datatype(),
+ ConcreteDataType::int16_datatype(),
+ ConcreteDataType::int32_datatype(),
+ ConcreteDataType::int64_datatype(),
+ ConcreteDataType::uint8_datatype(),
+ ConcreteDataType::uint16_datatype(),
+ ConcreteDataType::uint32_datatype(),
+ ConcreteDataType::uint64_datatype(),
+ ConcreteDataType::float32_datatype(),
+ ConcreteDataType::float64_datatype(),
+ ]
+ }
+
+ /// Convert arrow data type to [ConcreteDataType].
+ ///
+ /// # Panics
+ /// Panic if given arrow data type is not supported.
+ pub fn from_arrow_type(dt: &ArrowDataType) -> Self {
+ ConcreteDataType::try_from(dt).expect("Unimplemented type")
+ }
+
+ pub fn is_null(&self) -> bool {
+ matches!(self, ConcreteDataType::Null(NullType))
+ }
+}
+
+impl TryFrom<&ArrowDataType> for ConcreteDataType {
+ type Error = Error;
+
+ fn try_from(dt: &ArrowDataType) -> Result<ConcreteDataType> {
+ let concrete_type = match dt {
+ ArrowDataType::Null => Self::null_datatype(),
+ ArrowDataType::Boolean => Self::boolean_datatype(),
+ ArrowDataType::UInt8 => Self::uint8_datatype(),
+ ArrowDataType::UInt16 => Self::uint16_datatype(),
+ ArrowDataType::UInt32 => Self::uint32_datatype(),
+ ArrowDataType::UInt64 => Self::uint64_datatype(),
+ ArrowDataType::Int8 => Self::int8_datatype(),
+ ArrowDataType::Int16 => Self::int16_datatype(),
+ ArrowDataType::Int32 => Self::int32_datatype(),
+ ArrowDataType::Int64 => Self::int64_datatype(),
+ ArrowDataType::Float32 => Self::float32_datatype(),
+ ArrowDataType::Float64 => Self::float64_datatype(),
+ ArrowDataType::Date32 => Self::date_datatype(),
+ ArrowDataType::Date64 => Self::datetime_datatype(),
+ ArrowDataType::Timestamp(u, _) => ConcreteDataType::from_arrow_time_unit(u),
+ ArrowDataType::Binary | ArrowDataType::LargeBinary => Self::binary_datatype(),
+ ArrowDataType::Utf8 | ArrowDataType::LargeUtf8 => Self::string_datatype(),
+ ArrowDataType::List(field) => Self::List(ListType::new(
+ ConcreteDataType::from_arrow_type(field.data_type()),
+ )),
+ _ => {
+ return error::UnsupportedArrowTypeSnafu {
+ arrow_type: dt.clone(),
+ }
+ .fail()
+ }
+ };
+
+ Ok(concrete_type)
+ }
+}
+
+macro_rules! impl_new_concrete_type_functions {
+ ($($Type: ident), +) => {
+ paste! {
+ impl ConcreteDataType {
+ $(
+ pub fn [<$Type:lower _datatype>]() -> ConcreteDataType {
+ ConcreteDataType::$Type([<$Type Type>]::default())
+ }
+ )+
+ }
+ }
+ }
+}
+
+impl_new_concrete_type_functions!(
+ Null, Boolean, UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Float32, Float64,
+ Binary, Date, DateTime, String
+);
+
+impl ConcreteDataType {
+ pub fn timestamp_second_datatype() -> Self {
+ ConcreteDataType::Timestamp(TimestampType::Second(TimestampSecondType::default()))
+ }
+
+ pub fn timestamp_millisecond_datatype() -> Self {
+ ConcreteDataType::Timestamp(TimestampType::Millisecond(
+ TimestampMillisecondType::default(),
+ ))
+ }
+
+ pub fn timestamp_microsecond_datatype() -> Self {
+ ConcreteDataType::Timestamp(TimestampType::Microsecond(
+ TimestampMicrosecondType::default(),
+ ))
+ }
+
+ pub fn timestamp_nanosecond_datatype() -> Self {
+ ConcreteDataType::Timestamp(TimestampType::Nanosecond(TimestampNanosecondType::default()))
+ }
+
+ pub fn timestamp_datatype(unit: TimeUnit) -> Self {
+ match unit {
+ TimeUnit::Second => Self::timestamp_second_datatype(),
+ TimeUnit::Millisecond => Self::timestamp_millisecond_datatype(),
+ TimeUnit::Microsecond => Self::timestamp_microsecond_datatype(),
+ TimeUnit::Nanosecond => Self::timestamp_nanosecond_datatype(),
+ }
+ }
+
+ /// Converts from arrow timestamp unit to
+ pub fn from_arrow_time_unit(t: &ArrowTimeUnit) -> Self {
+ match t {
+ ArrowTimeUnit::Second => Self::timestamp_second_datatype(),
+ ArrowTimeUnit::Millisecond => Self::timestamp_millisecond_datatype(),
+ ArrowTimeUnit::Microsecond => Self::timestamp_microsecond_datatype(),
+ ArrowTimeUnit::Nanosecond => Self::timestamp_nanosecond_datatype(),
+ }
+ }
+
+ pub fn list_datatype(item_type: ConcreteDataType) -> ConcreteDataType {
+ ConcreteDataType::List(ListType::new(item_type))
+ }
+}
+
+/// Data type abstraction.
+#[enum_dispatch::enum_dispatch]
+pub trait DataType: std::fmt::Debug + Send + Sync {
+ /// Name of this data type.
+ fn name(&self) -> &str;
+
+ /// Returns id of the Logical data type.
+ fn logical_type_id(&self) -> LogicalTypeId;
+
+ /// Returns the default value of this type.
+ fn default_value(&self) -> Value;
+
+ /// Convert this type as [arrow::datatypes::DataType].
+ fn as_arrow_type(&self) -> ArrowDataType;
+
+ /// Creates a mutable vector with given `capacity` of this type.
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector>;
+
+ /// Returns true if the data type is compatible with timestamp type so we can
+ /// use it as a timestamp.
+ fn is_timestamp_compatible(&self) -> bool;
+}
+
+pub type DataTypeRef = Arc<dyn DataType>;
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::Field;
+
+ use super::*;
+
+ #[test]
+ fn test_concrete_type_as_datatype_trait() {
+ let concrete_type = ConcreteDataType::boolean_datatype();
+
+ assert_eq!("Boolean", concrete_type.name());
+ assert_eq!(Value::Boolean(false), concrete_type.default_value());
+ assert_eq!(LogicalTypeId::Boolean, concrete_type.logical_type_id());
+ assert_eq!(ArrowDataType::Boolean, concrete_type.as_arrow_type());
+ }
+
+ #[test]
+ fn test_from_arrow_type() {
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Null),
+ ConcreteDataType::Null(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Boolean),
+ ConcreteDataType::Boolean(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Binary),
+ ConcreteDataType::Binary(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::LargeBinary),
+ ConcreteDataType::Binary(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Int8),
+ ConcreteDataType::Int8(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Int16),
+ ConcreteDataType::Int16(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Int32),
+ ConcreteDataType::Int32(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Int64),
+ ConcreteDataType::Int64(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::UInt8),
+ ConcreteDataType::UInt8(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::UInt16),
+ ConcreteDataType::UInt16(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::UInt32),
+ ConcreteDataType::UInt32(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::UInt64),
+ ConcreteDataType::UInt64(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Float32),
+ ConcreteDataType::Float32(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Float64),
+ ConcreteDataType::Float64(_)
+ ));
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Utf8),
+ ConcreteDataType::String(_)
+ ));
+ assert_eq!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::List(Box::new(Field::new(
+ "item",
+ ArrowDataType::Int32,
+ true,
+ )))),
+ ConcreteDataType::List(ListType::new(ConcreteDataType::int32_datatype()))
+ );
+ assert!(matches!(
+ ConcreteDataType::from_arrow_type(&ArrowDataType::Date32),
+ ConcreteDataType::Date(_)
+ ));
+ }
+
+ #[test]
+ fn test_from_arrow_timestamp() {
+ assert_eq!(
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ ConcreteDataType::from_arrow_time_unit(&ArrowTimeUnit::Millisecond)
+ );
+ assert_eq!(
+ ConcreteDataType::timestamp_microsecond_datatype(),
+ ConcreteDataType::from_arrow_time_unit(&ArrowTimeUnit::Microsecond)
+ );
+ assert_eq!(
+ ConcreteDataType::timestamp_nanosecond_datatype(),
+ ConcreteDataType::from_arrow_time_unit(&ArrowTimeUnit::Nanosecond)
+ );
+ assert_eq!(
+ ConcreteDataType::timestamp_second_datatype(),
+ ConcreteDataType::from_arrow_time_unit(&ArrowTimeUnit::Second)
+ );
+ }
+
+ #[test]
+ fn test_is_timestamp_compatible() {
+ assert!(ConcreteDataType::timestamp_datatype(TimeUnit::Second).is_timestamp_compatible());
+ assert!(
+ ConcreteDataType::timestamp_datatype(TimeUnit::Millisecond).is_timestamp_compatible()
+ );
+ assert!(
+ ConcreteDataType::timestamp_datatype(TimeUnit::Microsecond).is_timestamp_compatible()
+ );
+ assert!(
+ ConcreteDataType::timestamp_datatype(TimeUnit::Nanosecond).is_timestamp_compatible()
+ );
+ assert!(ConcreteDataType::timestamp_second_datatype().is_timestamp_compatible());
+ assert!(ConcreteDataType::timestamp_millisecond_datatype().is_timestamp_compatible());
+ assert!(ConcreteDataType::timestamp_microsecond_datatype().is_timestamp_compatible());
+ assert!(ConcreteDataType::timestamp_nanosecond_datatype().is_timestamp_compatible());
+ assert!(ConcreteDataType::int64_datatype().is_timestamp_compatible());
+ assert!(!ConcreteDataType::null_datatype().is_timestamp_compatible());
+ assert!(!ConcreteDataType::binary_datatype().is_timestamp_compatible());
+ assert!(!ConcreteDataType::boolean_datatype().is_timestamp_compatible());
+ assert!(!ConcreteDataType::date_datatype().is_timestamp_compatible());
+ assert!(!ConcreteDataType::datetime_datatype().is_timestamp_compatible());
+ assert!(!ConcreteDataType::string_datatype().is_timestamp_compatible());
+ assert!(!ConcreteDataType::int32_datatype().is_timestamp_compatible());
+ assert!(!ConcreteDataType::uint64_datatype().is_timestamp_compatible());
+ }
+
+ #[test]
+ fn test_is_null() {
+ assert!(ConcreteDataType::null_datatype().is_null());
+ assert!(!ConcreteDataType::int32_datatype().is_null());
+ }
+
+ #[test]
+ fn test_is_float() {
+ assert!(!ConcreteDataType::int32_datatype().is_float());
+ assert!(ConcreteDataType::float32_datatype().is_float());
+ assert!(ConcreteDataType::float64_datatype().is_float());
+ }
+
+ #[test]
+ fn test_is_boolean() {
+ assert!(!ConcreteDataType::int32_datatype().is_boolean());
+ assert!(!ConcreteDataType::float32_datatype().is_boolean());
+ assert!(ConcreteDataType::boolean_datatype().is_boolean());
+ }
+
+ #[test]
+ fn test_is_stringifiable() {
+ assert!(!ConcreteDataType::int32_datatype().is_stringifiable());
+ assert!(!ConcreteDataType::float32_datatype().is_stringifiable());
+ assert!(ConcreteDataType::string_datatype().is_stringifiable());
+ assert!(ConcreteDataType::date_datatype().is_stringifiable());
+ assert!(ConcreteDataType::datetime_datatype().is_stringifiable());
+ assert!(ConcreteDataType::timestamp_second_datatype().is_stringifiable());
+ assert!(ConcreteDataType::timestamp_millisecond_datatype().is_stringifiable());
+ assert!(ConcreteDataType::timestamp_microsecond_datatype().is_stringifiable());
+ assert!(ConcreteDataType::timestamp_nanosecond_datatype().is_stringifiable());
+ }
+
+ #[test]
+ fn test_is_signed() {
+ assert!(ConcreteDataType::int8_datatype().is_signed());
+ assert!(ConcreteDataType::int16_datatype().is_signed());
+ assert!(ConcreteDataType::int32_datatype().is_signed());
+ assert!(ConcreteDataType::int64_datatype().is_signed());
+ assert!(ConcreteDataType::date_datatype().is_signed());
+ assert!(ConcreteDataType::datetime_datatype().is_signed());
+ assert!(ConcreteDataType::timestamp_second_datatype().is_signed());
+ assert!(ConcreteDataType::timestamp_millisecond_datatype().is_signed());
+ assert!(ConcreteDataType::timestamp_microsecond_datatype().is_signed());
+ assert!(ConcreteDataType::timestamp_nanosecond_datatype().is_signed());
+
+ assert!(!ConcreteDataType::uint8_datatype().is_signed());
+ assert!(!ConcreteDataType::uint16_datatype().is_signed());
+ assert!(!ConcreteDataType::uint32_datatype().is_signed());
+ assert!(!ConcreteDataType::uint64_datatype().is_signed());
+
+ assert!(!ConcreteDataType::float32_datatype().is_signed());
+ assert!(!ConcreteDataType::float64_datatype().is_signed());
+ }
+
+ #[test]
+ fn test_is_unsigned() {
+ assert!(!ConcreteDataType::int8_datatype().is_unsigned());
+ assert!(!ConcreteDataType::int16_datatype().is_unsigned());
+ assert!(!ConcreteDataType::int32_datatype().is_unsigned());
+ assert!(!ConcreteDataType::int64_datatype().is_unsigned());
+ assert!(!ConcreteDataType::date_datatype().is_unsigned());
+ assert!(!ConcreteDataType::datetime_datatype().is_unsigned());
+ assert!(!ConcreteDataType::timestamp_second_datatype().is_unsigned());
+ assert!(!ConcreteDataType::timestamp_millisecond_datatype().is_unsigned());
+ assert!(!ConcreteDataType::timestamp_microsecond_datatype().is_unsigned());
+ assert!(!ConcreteDataType::timestamp_nanosecond_datatype().is_unsigned());
+
+ assert!(ConcreteDataType::uint8_datatype().is_unsigned());
+ assert!(ConcreteDataType::uint16_datatype().is_unsigned());
+ assert!(ConcreteDataType::uint32_datatype().is_unsigned());
+ assert!(ConcreteDataType::uint64_datatype().is_unsigned());
+
+ assert!(!ConcreteDataType::float32_datatype().is_unsigned());
+ assert!(!ConcreteDataType::float64_datatype().is_unsigned());
+ }
+
+ #[test]
+ fn test_numerics() {
+ let nums = ConcreteDataType::numerics();
+ assert_eq!(10, nums.len());
+ }
+}
diff --git a/src/datatypes2/src/error.rs b/src/datatypes2/src/error.rs
new file mode 100644
index 000000000000..50b49cf2b4bb
--- /dev/null
+++ b/src/datatypes2/src/error.rs
@@ -0,0 +1,144 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+
+use common_error::prelude::{ErrorCompat, ErrorExt, Snafu, StatusCode};
+use snafu::Backtrace;
+
+#[derive(Debug, Snafu)]
+#[snafu(visibility(pub))]
+pub enum Error {
+ #[snafu(display("Failed to serialize data, source: {}", source))]
+ Serialize {
+ source: serde_json::Error,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Failed to deserialize data, source: {}, json: {}", source, json))]
+ Deserialize {
+ source: serde_json::Error,
+ backtrace: Backtrace,
+ json: String,
+ },
+
+ #[snafu(display("Failed to convert datafusion type: {}", from))]
+ Conversion { from: String, backtrace: Backtrace },
+
+ #[snafu(display("Bad array access, Index out of bounds: {}, size: {}", index, size))]
+ BadArrayAccess {
+ index: usize,
+ size: usize,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Unknown vector, {}", msg))]
+ UnknownVector { msg: String, backtrace: Backtrace },
+
+ #[snafu(display("Unsupported arrow data type, type: {:?}", arrow_type))]
+ UnsupportedArrowType {
+ arrow_type: arrow::datatypes::DataType,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Timestamp column {} not found", name,))]
+ TimestampNotFound { name: String, backtrace: Backtrace },
+
+ #[snafu(display(
+ "Failed to parse version in schema meta, value: {}, source: {}",
+ value,
+ source
+ ))]
+ ParseSchemaVersion {
+ value: String,
+ source: std::num::ParseIntError,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Invalid timestamp index: {}", index))]
+ InvalidTimestampIndex { index: usize, backtrace: Backtrace },
+
+ #[snafu(display("Duplicate timestamp index, exists: {}, new: {}", exists, new))]
+ DuplicateTimestampIndex {
+ exists: usize,
+ new: usize,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("{}", msg))]
+ CastType { msg: String, backtrace: Backtrace },
+
+ #[snafu(display("Arrow failed to compute, source: {}", source))]
+ ArrowCompute {
+ source: arrow::error::ArrowError,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Unsupported column default constraint expression: {}", expr))]
+ UnsupportedDefaultExpr { expr: String, backtrace: Backtrace },
+
+ #[snafu(display("Default value should not be null for non null column"))]
+ NullDefault { backtrace: Backtrace },
+
+ #[snafu(display("Incompatible default value type, reason: {}", reason))]
+ DefaultValueType {
+ reason: String,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Duplicated metadata for {}", key))]
+ DuplicateMeta { key: String, backtrace: Backtrace },
+}
+
+impl ErrorExt for Error {
+ fn status_code(&self) -> StatusCode {
+ // Inner encoding and decoding error should not be exposed to users.
+ StatusCode::Internal
+ }
+
+ fn backtrace_opt(&self) -> Option<&Backtrace> {
+ ErrorCompat::backtrace(self)
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+}
+
+pub type Result<T> = std::result::Result<T, Error>;
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashMap;
+
+ use snafu::ResultExt;
+
+ use super::*;
+
+ #[test]
+ pub fn test_error() {
+ let mut map = HashMap::new();
+ map.insert(true, 1);
+ map.insert(false, 2);
+
+ let result = serde_json::to_string(&map).context(SerializeSnafu);
+ assert!(result.is_err(), "serialize result is: {:?}", result);
+ let err = serde_json::to_string(&map)
+ .context(SerializeSnafu)
+ .err()
+ .unwrap();
+ assert!(err.backtrace_opt().is_some());
+ assert_eq!(StatusCode::Internal, err.status_code());
+ }
+}
diff --git a/src/datatypes2/src/lib.rs b/src/datatypes2/src/lib.rs
new file mode 100644
index 000000000000..256d347eacb0
--- /dev/null
+++ b/src/datatypes2/src/lib.rs
@@ -0,0 +1,33 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![feature(generic_associated_types)]
+#![feature(assert_matches)]
+
+pub mod arrow_array;
+pub mod data_type;
+pub mod error;
+pub mod macros;
+pub mod prelude;
+mod scalars;
+pub mod schema;
+pub mod serialize;
+mod timestamp;
+pub mod type_id;
+pub mod types;
+pub mod value;
+pub mod vectors;
+
+pub use arrow;
+pub use error::{Error, Result};
diff --git a/src/datatypes2/src/macros.rs b/src/datatypes2/src/macros.rs
new file mode 100644
index 000000000000..37c0a42e3f55
--- /dev/null
+++ b/src/datatypes2/src/macros.rs
@@ -0,0 +1,68 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Some helper macros for datatypes, copied from databend.
+
+/// Apply the macro rules to all primitive types.
+#[macro_export]
+macro_rules! for_all_primitive_types {
+ ($macro:tt $(, $x:tt)*) => {
+ $macro! {
+ [$($x),*],
+ { i8 },
+ { i16 },
+ { i32 },
+ { i64 },
+ { u8 },
+ { u16 },
+ { u32 },
+ { u64 },
+ { f32 },
+ { f64 }
+ }
+ };
+}
+
+/// Match the logical type and apply `$body` to all primitive types and
+/// `nbody` to other types.
+#[macro_export]
+macro_rules! with_match_primitive_type_id {
+ ($key_type:expr, | $_:tt $T:ident | $body:tt, $nbody:tt) => {{
+ macro_rules! __with_ty__ {
+ ( $_ $T:ident ) => {
+ $body
+ };
+ }
+
+ use $crate::type_id::LogicalTypeId;
+ use $crate::types::{
+ Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, UInt16Type,
+ UInt32Type, UInt64Type, UInt8Type,
+ };
+ match $key_type {
+ LogicalTypeId::Int8 => __with_ty__! { Int8Type },
+ LogicalTypeId::Int16 => __with_ty__! { Int16Type },
+ LogicalTypeId::Int32 => __with_ty__! { Int32Type },
+ LogicalTypeId::Int64 => __with_ty__! { Int64Type },
+ LogicalTypeId::UInt8 => __with_ty__! { UInt8Type },
+ LogicalTypeId::UInt16 => __with_ty__! { UInt16Type },
+ LogicalTypeId::UInt32 => __with_ty__! { UInt32Type },
+ LogicalTypeId::UInt64 => __with_ty__! { UInt64Type },
+ LogicalTypeId::Float32 => __with_ty__! { Float32Type },
+ LogicalTypeId::Float64 => __with_ty__! { Float64Type },
+
+ _ => $nbody,
+ }
+ }};
+}
diff --git a/src/datatypes2/src/prelude.rs b/src/datatypes2/src/prelude.rs
new file mode 100644
index 000000000000..f6bd298316db
--- /dev/null
+++ b/src/datatypes2/src/prelude.rs
@@ -0,0 +1,20 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub use crate::data_type::{ConcreteDataType, DataType, DataTypeRef};
+pub use crate::macros::*;
+pub use crate::scalars::{Scalar, ScalarRef, ScalarVector, ScalarVectorBuilder};
+pub use crate::type_id::LogicalTypeId;
+pub use crate::value::{Value, ValueRef};
+pub use crate::vectors::{MutableVector, Validity, Vector, VectorRef};
diff --git a/src/datatypes2/src/scalars.rs b/src/datatypes2/src/scalars.rs
new file mode 100644
index 000000000000..327ebaa629a2
--- /dev/null
+++ b/src/datatypes2/src/scalars.rs
@@ -0,0 +1,443 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+
+use common_time::{Date, DateTime};
+
+use crate::types::{
+ Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, UInt16Type, UInt32Type,
+ UInt64Type, UInt8Type,
+};
+use crate::value::{ListValue, ListValueRef, Value};
+use crate::vectors::{
+ BinaryVector, BooleanVector, DateTimeVector, DateVector, ListVector, MutableVector,
+ PrimitiveVector, StringVector, Vector,
+};
+
+fn get_iter_capacity<T, I: Iterator<Item = T>>(iter: &I) -> usize {
+ match iter.size_hint() {
+ (_lower, Some(upper)) => upper,
+ (0, None) => 1024,
+ (lower, None) => lower,
+ }
+}
+
+/// Owned scalar value
+/// primitive types, bool, Vec<u8> ...
+pub trait Scalar: 'static + Sized + Default + Any
+where
+ for<'a> Self::VectorType: ScalarVector<RefItem<'a> = Self::RefType<'a>>,
+{
+ type VectorType: ScalarVector<OwnedItem = Self>;
+ type RefType<'a>: ScalarRef<'a, ScalarType = Self>
+ where
+ Self: 'a;
+ /// Get a reference of the current value.
+ fn as_scalar_ref(&self) -> Self::RefType<'_>;
+
+ /// Upcast GAT type's lifetime.
+ fn upcast_gat<'short, 'long: 'short>(long: Self::RefType<'long>) -> Self::RefType<'short>;
+}
+
+pub trait ScalarRef<'a>: std::fmt::Debug + Clone + Copy + Send + 'a {
+ /// The corresponding [`Scalar`] type.
+ type ScalarType: Scalar<RefType<'a> = Self>;
+
+ /// Convert the reference into an owned value.
+ fn to_owned_scalar(&self) -> Self::ScalarType;
+}
+
+/// A sub trait of Vector to add scalar operation support.
+// This implementation refers to Datebend's [ScalarColumn](https://github.com/datafuselabs/databend/blob/main/common/datavalues/src/scalars/type_.rs)
+// and skyzh's [type-exercise-in-rust](https://github.com/skyzh/type-exercise-in-rust).
+pub trait ScalarVector: Vector + Send + Sync + Sized + 'static
+where
+ for<'a> Self::OwnedItem: Scalar<RefType<'a> = Self::RefItem<'a>>,
+{
+ type OwnedItem: Scalar<VectorType = Self>;
+ /// The reference item of this vector.
+ type RefItem<'a>: ScalarRef<'a, ScalarType = Self::OwnedItem>
+ where
+ Self: 'a;
+
+ /// Iterator type of this vector.
+ type Iter<'a>: Iterator<Item = Option<Self::RefItem<'a>>>
+ where
+ Self: 'a;
+
+ /// Builder type to build this vector.
+ type Builder: ScalarVectorBuilder<VectorType = Self>;
+
+ /// Returns the reference to an element at given position.
+ ///
+ /// Note: `get()` has bad performance, avoid call this function inside loop.
+ ///
+ /// # Panics
+ /// Panics if `idx >= self.len()`.
+ fn get_data(&self, idx: usize) -> Option<Self::RefItem<'_>>;
+
+ /// Returns iterator of current vector.
+ fn iter_data(&self) -> Self::Iter<'_>;
+
+ fn from_slice(data: &[Self::RefItem<'_>]) -> Self {
+ let mut builder = Self::Builder::with_capacity(data.len());
+ for item in data {
+ builder.push(Some(*item));
+ }
+ builder.finish()
+ }
+
+ fn from_iterator<'a>(it: impl Iterator<Item = Self::RefItem<'a>>) -> Self {
+ let mut builder = Self::Builder::with_capacity(get_iter_capacity(&it));
+ for item in it {
+ builder.push(Some(item));
+ }
+ builder.finish()
+ }
+
+ fn from_owned_iterator(it: impl Iterator<Item = Option<Self::OwnedItem>>) -> Self {
+ let mut builder = Self::Builder::with_capacity(get_iter_capacity(&it));
+ for item in it {
+ match item {
+ Some(item) => builder.push(Some(item.as_scalar_ref())),
+ None => builder.push(None),
+ }
+ }
+ builder.finish()
+ }
+
+ fn from_vec<I: Into<Self::OwnedItem>>(values: Vec<I>) -> Self {
+ let it = values.into_iter();
+ let mut builder = Self::Builder::with_capacity(get_iter_capacity(&it));
+ for item in it {
+ builder.push(Some(item.into().as_scalar_ref()));
+ }
+ builder.finish()
+ }
+}
+
+/// A trait over all vector builders.
+pub trait ScalarVectorBuilder: MutableVector {
+ type VectorType: ScalarVector<Builder = Self>;
+
+ /// Create a new builder with initial `capacity`.
+ fn with_capacity(capacity: usize) -> Self;
+
+ /// Push a value into the builder.
+ fn push(&mut self, value: Option<<Self::VectorType as ScalarVector>::RefItem<'_>>);
+
+ /// Finish build and return a new vector.
+ fn finish(&mut self) -> Self::VectorType;
+}
+
+macro_rules! impl_scalar_for_native {
+ ($Native: ident, $DataType: ident) => {
+ impl Scalar for $Native {
+ type VectorType = PrimitiveVector<$DataType>;
+ type RefType<'a> = $Native;
+
+ #[inline]
+ fn as_scalar_ref(&self) -> $Native {
+ *self
+ }
+
+ #[allow(clippy::needless_lifetimes)]
+ #[inline]
+ fn upcast_gat<'short, 'long: 'short>(long: $Native) -> $Native {
+ long
+ }
+ }
+
+ /// Implement [`ScalarRef`] for primitive types. Note that primitive types are both [`Scalar`] and [`ScalarRef`].
+ impl<'a> ScalarRef<'a> for $Native {
+ type ScalarType = $Native;
+
+ #[inline]
+ fn to_owned_scalar(&self) -> $Native {
+ *self
+ }
+ }
+ };
+}
+
+impl_scalar_for_native!(u8, UInt8Type);
+impl_scalar_for_native!(u16, UInt16Type);
+impl_scalar_for_native!(u32, UInt32Type);
+impl_scalar_for_native!(u64, UInt64Type);
+impl_scalar_for_native!(i8, Int8Type);
+impl_scalar_for_native!(i16, Int16Type);
+impl_scalar_for_native!(i32, Int32Type);
+impl_scalar_for_native!(i64, Int64Type);
+impl_scalar_for_native!(f32, Float32Type);
+impl_scalar_for_native!(f64, Float64Type);
+
+impl Scalar for bool {
+ type VectorType = BooleanVector;
+ type RefType<'a> = bool;
+
+ #[inline]
+ fn as_scalar_ref(&self) -> bool {
+ *self
+ }
+
+ #[allow(clippy::needless_lifetimes)]
+ #[inline]
+ fn upcast_gat<'short, 'long: 'short>(long: bool) -> bool {
+ long
+ }
+}
+
+impl<'a> ScalarRef<'a> for bool {
+ type ScalarType = bool;
+
+ #[inline]
+ fn to_owned_scalar(&self) -> bool {
+ *self
+ }
+}
+
+impl Scalar for String {
+ type VectorType = StringVector;
+ type RefType<'a> = &'a str;
+
+ #[inline]
+ fn as_scalar_ref(&self) -> &str {
+ self
+ }
+
+ #[inline]
+ fn upcast_gat<'short, 'long: 'short>(long: &'long str) -> &'short str {
+ long
+ }
+}
+
+impl<'a> ScalarRef<'a> for &'a str {
+ type ScalarType = String;
+
+ #[inline]
+ fn to_owned_scalar(&self) -> String {
+ self.to_string()
+ }
+}
+
+impl Scalar for Vec<u8> {
+ type VectorType = BinaryVector;
+ type RefType<'a> = &'a [u8];
+
+ #[inline]
+ fn as_scalar_ref(&self) -> &[u8] {
+ self
+ }
+
+ #[inline]
+ fn upcast_gat<'short, 'long: 'short>(long: &'long [u8]) -> &'short [u8] {
+ long
+ }
+}
+
+impl<'a> ScalarRef<'a> for &'a [u8] {
+ type ScalarType = Vec<u8>;
+
+ #[inline]
+ fn to_owned_scalar(&self) -> Vec<u8> {
+ self.to_vec()
+ }
+}
+
+impl Scalar for Date {
+ type VectorType = DateVector;
+ type RefType<'a> = Date;
+
+ fn as_scalar_ref(&self) -> Self::RefType<'_> {
+ *self
+ }
+
+ fn upcast_gat<'short, 'long: 'short>(long: Self::RefType<'long>) -> Self::RefType<'short> {
+ long
+ }
+}
+
+impl<'a> ScalarRef<'a> for Date {
+ type ScalarType = Date;
+
+ fn to_owned_scalar(&self) -> Self::ScalarType {
+ *self
+ }
+}
+
+impl Scalar for DateTime {
+ type VectorType = DateTimeVector;
+ type RefType<'a> = DateTime;
+
+ fn as_scalar_ref(&self) -> Self::RefType<'_> {
+ *self
+ }
+
+ fn upcast_gat<'short, 'long: 'short>(long: Self::RefType<'long>) -> Self::RefType<'short> {
+ long
+ }
+}
+
+impl<'a> ScalarRef<'a> for DateTime {
+ type ScalarType = DateTime;
+
+ fn to_owned_scalar(&self) -> Self::ScalarType {
+ *self
+ }
+}
+
+// Timestamp types implement Scalar and ScalarRef in `src/timestamp.rs`.
+
+impl Scalar for ListValue {
+ type VectorType = ListVector;
+ type RefType<'a> = ListValueRef<'a>;
+
+ fn as_scalar_ref(&self) -> Self::RefType<'_> {
+ ListValueRef::Ref { val: self }
+ }
+
+ fn upcast_gat<'short, 'long: 'short>(long: Self::RefType<'long>) -> Self::RefType<'short> {
+ long
+ }
+}
+
+impl<'a> ScalarRef<'a> for ListValueRef<'a> {
+ type ScalarType = ListValue;
+
+ fn to_owned_scalar(&self) -> Self::ScalarType {
+ match self {
+ ListValueRef::Indexed { vector, idx } => match vector.get(*idx) {
+ // Normally should not get `Value::Null` if the `ListValueRef` comes
+ // from the iterator of the ListVector, but we avoid panic and just
+ // returns a default list value in such case since `ListValueRef` may
+ // be constructed manually.
+ Value::Null => ListValue::default(),
+ Value::List(v) => v,
+ _ => unreachable!(),
+ },
+ ListValueRef::Ref { val } => (*val).clone(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::data_type::ConcreteDataType;
+ use crate::timestamp::TimestampSecond;
+ use crate::vectors::{BinaryVector, Int32Vector, ListVectorBuilder, TimestampSecondVector};
+
+ fn build_vector_from_slice<T: ScalarVector>(items: &[Option<T::RefItem<'_>>]) -> T {
+ let mut builder = T::Builder::with_capacity(items.len());
+ for item in items {
+ builder.push(*item);
+ }
+ builder.finish()
+ }
+
+ fn assert_vector_eq<'a, T: ScalarVector>(expect: &[Option<T::RefItem<'a>>], vector: &'a T)
+ where
+ T::RefItem<'a>: PartialEq + std::fmt::Debug,
+ {
+ for (a, b) in expect.iter().zip(vector.iter_data()) {
+ assert_eq!(*a, b);
+ }
+ }
+
+ #[test]
+ fn test_build_i32_vector() {
+ let expect = vec![Some(1), Some(2), Some(3), None, Some(5)];
+ let vector: Int32Vector = build_vector_from_slice(&expect);
+ assert_vector_eq(&expect, &vector);
+ }
+
+ #[test]
+ fn test_build_binary_vector() {
+ let expect: Vec<Option<&'static [u8]>> = vec![
+ Some(b"a"),
+ Some(b"b"),
+ Some(b"c"),
+ None,
+ Some(b"e"),
+ Some(b""),
+ ];
+ let vector: BinaryVector = build_vector_from_slice(&expect);
+ assert_vector_eq(&expect, &vector);
+ }
+
+ #[test]
+ fn test_build_date_vector() {
+ let expect: Vec<Option<Date>> = vec![
+ Some(Date::new(0)),
+ Some(Date::new(-1)),
+ None,
+ Some(Date::new(1)),
+ ];
+ let vector: DateVector = build_vector_from_slice(&expect);
+ assert_vector_eq(&expect, &vector);
+ }
+
+ #[test]
+ fn test_date_scalar() {
+ let date = Date::new(1);
+ assert_eq!(date, date.as_scalar_ref());
+ assert_eq!(date, date.to_owned_scalar());
+ }
+
+ #[test]
+ fn test_datetime_scalar() {
+ let dt = DateTime::new(123);
+ assert_eq!(dt, dt.as_scalar_ref());
+ assert_eq!(dt, dt.to_owned_scalar());
+ }
+
+ #[test]
+ fn test_list_value_scalar() {
+ let list_value = ListValue::new(
+ Some(Box::new(vec![Value::Int32(123)])),
+ ConcreteDataType::int32_datatype(),
+ );
+ let list_ref = ListValueRef::Ref { val: &list_value };
+ assert_eq!(list_ref, list_value.as_scalar_ref());
+ assert_eq!(list_value, list_ref.to_owned_scalar());
+
+ let mut builder =
+ ListVectorBuilder::with_type_capacity(ConcreteDataType::int32_datatype(), 1);
+ builder.push(None);
+ builder.push(Some(list_value.as_scalar_ref()));
+ let vector = builder.finish();
+
+ let ref_on_vec = ListValueRef::Indexed {
+ vector: &vector,
+ idx: 0,
+ };
+ assert_eq!(ListValue::default(), ref_on_vec.to_owned_scalar());
+ let ref_on_vec = ListValueRef::Indexed {
+ vector: &vector,
+ idx: 1,
+ };
+ assert_eq!(list_value, ref_on_vec.to_owned_scalar());
+ }
+
+ #[test]
+ fn test_build_timestamp_vector() {
+ let expect: Vec<Option<TimestampSecond>> = vec![Some(10.into()), None, Some(42.into())];
+ let vector: TimestampSecondVector = build_vector_from_slice(&expect);
+ assert_vector_eq(&expect, &vector);
+ let val = vector.get_data(0).unwrap();
+ assert_eq!(val, val.as_scalar_ref());
+ assert_eq!(TimestampSecond::from(10), val.to_owned_scalar());
+ }
+}
diff --git a/src/datatypes2/src/schema.rs b/src/datatypes2/src/schema.rs
new file mode 100644
index 000000000000..328fe0de24dc
--- /dev/null
+++ b/src/datatypes2/src/schema.rs
@@ -0,0 +1,430 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod column_schema;
+mod constraint;
+mod raw;
+
+use std::collections::HashMap;
+use std::sync::Arc;
+
+use arrow::datatypes::{Field, Schema as ArrowSchema};
+use snafu::{ensure, ResultExt};
+
+use crate::data_type::DataType;
+use crate::error::{self, Error, Result};
+pub use crate::schema::column_schema::{ColumnSchema, Metadata};
+pub use crate::schema::constraint::ColumnDefaultConstraint;
+pub use crate::schema::raw::RawSchema;
+
+/// Key used to store version number of the schema in metadata.
+const VERSION_KEY: &str = "greptime:version";
+
+/// A common schema, should be immutable.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct Schema {
+ column_schemas: Vec<ColumnSchema>,
+ name_to_index: HashMap<String, usize>,
+ arrow_schema: Arc<ArrowSchema>,
+ /// Index of the timestamp key column.
+ ///
+ /// Timestamp key column is the column holds the timestamp and forms part of
+ /// the primary key. None means there is no timestamp key column.
+ timestamp_index: Option<usize>,
+ /// Version of the schema.
+ ///
+ /// Initial value is zero. The version should bump after altering schema.
+ version: u32,
+}
+
+impl Schema {
+ /// Initial version of the schema.
+ pub const INITIAL_VERSION: u32 = 0;
+
+ /// Create a schema from a vector of [ColumnSchema].
+ ///
+ /// # Panics
+ /// Panics when ColumnSchema's `default_constraint` can't be serialized into json.
+ pub fn new(column_schemas: Vec<ColumnSchema>) -> Schema {
+ // Builder won't fail in this case
+ SchemaBuilder::try_from(column_schemas)
+ .unwrap()
+ .build()
+ .unwrap()
+ }
+
+ /// Try to Create a schema from a vector of [ColumnSchema].
+ pub fn try_new(column_schemas: Vec<ColumnSchema>) -> Result<Schema> {
+ SchemaBuilder::try_from(column_schemas)?.build()
+ }
+
+ #[inline]
+ pub fn arrow_schema(&self) -> &Arc<ArrowSchema> {
+ &self.arrow_schema
+ }
+
+ #[inline]
+ pub fn column_schemas(&self) -> &[ColumnSchema] {
+ &self.column_schemas
+ }
+
+ pub fn column_schema_by_name(&self, name: &str) -> Option<&ColumnSchema> {
+ self.name_to_index
+ .get(name)
+ .map(|index| &self.column_schemas[*index])
+ }
+
+ /// Retrieve the column's name by index
+ /// # Panics
+ /// This method **may** panic if the index is out of range of column schemas.
+ #[inline]
+ pub fn column_name_by_index(&self, idx: usize) -> &str {
+ &self.column_schemas[idx].name
+ }
+
+ #[inline]
+ pub fn column_index_by_name(&self, name: &str) -> Option<usize> {
+ self.name_to_index.get(name).copied()
+ }
+
+ #[inline]
+ pub fn contains_column(&self, name: &str) -> bool {
+ self.name_to_index.contains_key(name)
+ }
+
+ #[inline]
+ pub fn num_columns(&self) -> usize {
+ self.column_schemas.len()
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.column_schemas.is_empty()
+ }
+
+ /// Returns index of the timestamp key column.
+ #[inline]
+ pub fn timestamp_index(&self) -> Option<usize> {
+ self.timestamp_index
+ }
+
+ #[inline]
+ pub fn timestamp_column(&self) -> Option<&ColumnSchema> {
+ self.timestamp_index.map(|idx| &self.column_schemas[idx])
+ }
+
+ #[inline]
+ pub fn version(&self) -> u32 {
+ self.version
+ }
+
+ #[inline]
+ pub fn metadata(&self) -> &HashMap<String, String> {
+ &self.arrow_schema.metadata
+ }
+}
+
+#[derive(Default)]
+pub struct SchemaBuilder {
+ column_schemas: Vec<ColumnSchema>,
+ name_to_index: HashMap<String, usize>,
+ fields: Vec<Field>,
+ timestamp_index: Option<usize>,
+ version: u32,
+ metadata: HashMap<String, String>,
+}
+
+impl TryFrom<Vec<ColumnSchema>> for SchemaBuilder {
+ type Error = Error;
+
+ fn try_from(column_schemas: Vec<ColumnSchema>) -> Result<SchemaBuilder> {
+ SchemaBuilder::try_from_columns(column_schemas)
+ }
+}
+
+impl SchemaBuilder {
+ pub fn try_from_columns(column_schemas: Vec<ColumnSchema>) -> Result<Self> {
+ let FieldsAndIndices {
+ fields,
+ name_to_index,
+ timestamp_index,
+ } = collect_fields(&column_schemas)?;
+
+ Ok(Self {
+ column_schemas,
+ name_to_index,
+ fields,
+ timestamp_index,
+ ..Default::default()
+ })
+ }
+
+ pub fn version(mut self, version: u32) -> Self {
+ self.version = version;
+ self
+ }
+
+ /// Add key value pair to metadata.
+ ///
+ /// Old metadata with same key would be overwritten.
+ pub fn add_metadata(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
+ self.metadata.insert(key.into(), value.into());
+ self
+ }
+
+ pub fn build(mut self) -> Result<Schema> {
+ if let Some(timestamp_index) = self.timestamp_index {
+ validate_timestamp_index(&self.column_schemas, timestamp_index)?;
+ }
+
+ self.metadata
+ .insert(VERSION_KEY.to_string(), self.version.to_string());
+
+ let arrow_schema = ArrowSchema::new(self.fields).with_metadata(self.metadata);
+
+ Ok(Schema {
+ column_schemas: self.column_schemas,
+ name_to_index: self.name_to_index,
+ arrow_schema: Arc::new(arrow_schema),
+ timestamp_index: self.timestamp_index,
+ version: self.version,
+ })
+ }
+}
+
+struct FieldsAndIndices {
+ fields: Vec<Field>,
+ name_to_index: HashMap<String, usize>,
+ timestamp_index: Option<usize>,
+}
+
+fn collect_fields(column_schemas: &[ColumnSchema]) -> Result<FieldsAndIndices> {
+ let mut fields = Vec::with_capacity(column_schemas.len());
+ let mut name_to_index = HashMap::with_capacity(column_schemas.len());
+ let mut timestamp_index = None;
+ for (index, column_schema) in column_schemas.iter().enumerate() {
+ if column_schema.is_time_index() {
+ ensure!(
+ timestamp_index.is_none(),
+ error::DuplicateTimestampIndexSnafu {
+ exists: timestamp_index.unwrap(),
+ new: index,
+ }
+ );
+ timestamp_index = Some(index);
+ }
+ let field = Field::try_from(column_schema)?;
+ fields.push(field);
+ name_to_index.insert(column_schema.name.clone(), index);
+ }
+
+ Ok(FieldsAndIndices {
+ fields,
+ name_to_index,
+ timestamp_index,
+ })
+}
+
+fn validate_timestamp_index(column_schemas: &[ColumnSchema], timestamp_index: usize) -> Result<()> {
+ ensure!(
+ timestamp_index < column_schemas.len(),
+ error::InvalidTimestampIndexSnafu {
+ index: timestamp_index,
+ }
+ );
+
+ let column_schema = &column_schemas[timestamp_index];
+ ensure!(
+ column_schema.data_type.is_timestamp_compatible(),
+ error::InvalidTimestampIndexSnafu {
+ index: timestamp_index,
+ }
+ );
+ ensure!(
+ column_schema.is_time_index(),
+ error::InvalidTimestampIndexSnafu {
+ index: timestamp_index,
+ }
+ );
+
+ Ok(())
+}
+
+pub type SchemaRef = Arc<Schema>;
+
+impl TryFrom<Arc<ArrowSchema>> for Schema {
+ type Error = Error;
+
+ fn try_from(arrow_schema: Arc<ArrowSchema>) -> Result<Schema> {
+ let mut column_schemas = Vec::with_capacity(arrow_schema.fields.len());
+ let mut name_to_index = HashMap::with_capacity(arrow_schema.fields.len());
+ for field in &arrow_schema.fields {
+ let column_schema = ColumnSchema::try_from(field)?;
+ name_to_index.insert(field.name().to_string(), column_schemas.len());
+ column_schemas.push(column_schema);
+ }
+
+ let mut timestamp_index = None;
+ for (index, column_schema) in column_schemas.iter().enumerate() {
+ if column_schema.is_time_index() {
+ validate_timestamp_index(&column_schemas, index)?;
+ ensure!(
+ timestamp_index.is_none(),
+ error::DuplicateTimestampIndexSnafu {
+ exists: timestamp_index.unwrap(),
+ new: index,
+ }
+ );
+ timestamp_index = Some(index);
+ }
+ }
+
+ let version = try_parse_version(&arrow_schema.metadata, VERSION_KEY)?;
+
+ Ok(Self {
+ column_schemas,
+ name_to_index,
+ arrow_schema,
+ timestamp_index,
+ version,
+ })
+ }
+}
+
+impl TryFrom<ArrowSchema> for Schema {
+ type Error = Error;
+
+ fn try_from(arrow_schema: ArrowSchema) -> Result<Schema> {
+ let arrow_schema = Arc::new(arrow_schema);
+
+ Schema::try_from(arrow_schema)
+ }
+}
+
+fn try_parse_version(metadata: &HashMap<String, String>, key: &str) -> Result<u32> {
+ if let Some(value) = metadata.get(key) {
+ let version = value
+ .parse()
+ .context(error::ParseSchemaVersionSnafu { value })?;
+
+ Ok(version)
+ } else {
+ Ok(Schema::INITIAL_VERSION)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::data_type::ConcreteDataType;
+
+ #[test]
+ fn test_build_empty_schema() {
+ let schema = SchemaBuilder::default().build().unwrap();
+ assert_eq!(0, schema.num_columns());
+ assert!(schema.is_empty());
+ }
+
+ #[test]
+ fn test_schema_no_timestamp() {
+ let column_schemas = vec![
+ ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), false),
+ ColumnSchema::new("col2", ConcreteDataType::float64_datatype(), true),
+ ];
+ let schema = Schema::new(column_schemas.clone());
+
+ assert_eq!(2, schema.num_columns());
+ assert!(!schema.is_empty());
+ assert!(schema.timestamp_index().is_none());
+ assert!(schema.timestamp_column().is_none());
+ assert_eq!(Schema::INITIAL_VERSION, schema.version());
+
+ for column_schema in &column_schemas {
+ let found = schema.column_schema_by_name(&column_schema.name).unwrap();
+ assert_eq!(column_schema, found);
+ }
+ assert!(schema.column_schema_by_name("col3").is_none());
+
+ let new_schema = Schema::try_from(schema.arrow_schema().clone()).unwrap();
+
+ assert_eq!(schema, new_schema);
+ assert_eq!(column_schemas, schema.column_schemas());
+ }
+
+ #[test]
+ fn test_metadata() {
+ let column_schemas = vec![ColumnSchema::new(
+ "col1",
+ ConcreteDataType::int32_datatype(),
+ false,
+ )];
+ let schema = SchemaBuilder::try_from(column_schemas)
+ .unwrap()
+ .add_metadata("k1", "v1")
+ .build()
+ .unwrap();
+
+ assert_eq!("v1", schema.metadata().get("k1").unwrap());
+ }
+
+ #[test]
+ fn test_schema_with_timestamp() {
+ let column_schemas = vec![
+ ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ )
+ .with_time_index(true),
+ ];
+ let schema = SchemaBuilder::try_from(column_schemas.clone())
+ .unwrap()
+ .version(123)
+ .build()
+ .unwrap();
+
+ assert_eq!(1, schema.timestamp_index().unwrap());
+ assert_eq!(&column_schemas[1], schema.timestamp_column().unwrap());
+ assert_eq!(123, schema.version());
+
+ let new_schema = Schema::try_from(schema.arrow_schema().clone()).unwrap();
+ assert_eq!(1, schema.timestamp_index().unwrap());
+ assert_eq!(schema, new_schema);
+ }
+
+ #[test]
+ fn test_schema_wrong_timestamp() {
+ let column_schemas = vec![
+ ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true)
+ .with_time_index(true),
+ ColumnSchema::new("col2", ConcreteDataType::float64_datatype(), false),
+ ];
+ assert!(SchemaBuilder::try_from(column_schemas)
+ .unwrap()
+ .build()
+ .is_err());
+
+ let column_schemas = vec![
+ ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new("col2", ConcreteDataType::float64_datatype(), false)
+ .with_time_index(true),
+ ];
+
+ assert!(SchemaBuilder::try_from(column_schemas)
+ .unwrap()
+ .build()
+ .is_err());
+ }
+}
diff --git a/src/datatypes2/src/schema/column_schema.rs b/src/datatypes2/src/schema/column_schema.rs
new file mode 100644
index 000000000000..0577ca6affca
--- /dev/null
+++ b/src/datatypes2/src/schema/column_schema.rs
@@ -0,0 +1,305 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::BTreeMap;
+
+use arrow::datatypes::Field;
+use serde::{Deserialize, Serialize};
+use snafu::{ensure, ResultExt};
+
+use crate::data_type::{ConcreteDataType, DataType};
+use crate::error::{self, Error, Result};
+use crate::schema::constraint::ColumnDefaultConstraint;
+use crate::vectors::VectorRef;
+
+pub type Metadata = BTreeMap<String, String>;
+
+/// Key used to store whether the column is time index in arrow field's metadata.
+const TIME_INDEX_KEY: &str = "greptime:time_index";
+/// Key used to store default constraint in arrow field's metadata.
+const DEFAULT_CONSTRAINT_KEY: &str = "greptime:default_constraint";
+
+/// Schema of a column, used as an immutable struct.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct ColumnSchema {
+ pub name: String,
+ pub data_type: ConcreteDataType,
+ is_nullable: bool,
+ is_time_index: bool,
+ default_constraint: Option<ColumnDefaultConstraint>,
+ metadata: Metadata,
+}
+
+impl ColumnSchema {
+ pub fn new<T: Into<String>>(
+ name: T,
+ data_type: ConcreteDataType,
+ is_nullable: bool,
+ ) -> ColumnSchema {
+ ColumnSchema {
+ name: name.into(),
+ data_type,
+ is_nullable,
+ is_time_index: false,
+ default_constraint: None,
+ metadata: Metadata::new(),
+ }
+ }
+
+ #[inline]
+ pub fn is_time_index(&self) -> bool {
+ self.is_time_index
+ }
+
+ #[inline]
+ pub fn is_nullable(&self) -> bool {
+ self.is_nullable
+ }
+
+ #[inline]
+ pub fn default_constraint(&self) -> Option<&ColumnDefaultConstraint> {
+ self.default_constraint.as_ref()
+ }
+
+ #[inline]
+ pub fn metadata(&self) -> &Metadata {
+ &self.metadata
+ }
+
+ pub fn with_time_index(mut self, is_time_index: bool) -> Self {
+ self.is_time_index = is_time_index;
+ if is_time_index {
+ self.metadata
+ .insert(TIME_INDEX_KEY.to_string(), "true".to_string());
+ } else {
+ self.metadata.remove(TIME_INDEX_KEY);
+ }
+ self
+ }
+
+ pub fn with_default_constraint(
+ mut self,
+ default_constraint: Option<ColumnDefaultConstraint>,
+ ) -> Result<Self> {
+ if let Some(constraint) = &default_constraint {
+ constraint.validate(&self.data_type, self.is_nullable)?;
+ }
+
+ self.default_constraint = default_constraint;
+ Ok(self)
+ }
+
+ /// Creates a new [`ColumnSchema`] with given metadata.
+ pub fn with_metadata(mut self, metadata: Metadata) -> Self {
+ self.metadata = metadata;
+ self
+ }
+
+ pub fn create_default_vector(&self, num_rows: usize) -> Result<Option<VectorRef>> {
+ match &self.default_constraint {
+ Some(c) => c
+ .create_default_vector(&self.data_type, self.is_nullable, num_rows)
+ .map(Some),
+ None => {
+ if self.is_nullable {
+ // No default constraint, use null as default value.
+ // TODO(yingwen): Use NullVector once it supports setting logical type.
+ ColumnDefaultConstraint::null_value()
+ .create_default_vector(&self.data_type, self.is_nullable, num_rows)
+ .map(Some)
+ } else {
+ Ok(None)
+ }
+ }
+ }
+ }
+}
+
+impl TryFrom<&Field> for ColumnSchema {
+ type Error = Error;
+
+ fn try_from(field: &Field) -> Result<ColumnSchema> {
+ let data_type = ConcreteDataType::try_from(field.data_type())?;
+ let mut metadata = field.metadata().cloned().unwrap_or_default();
+ let default_constraint = match metadata.remove(DEFAULT_CONSTRAINT_KEY) {
+ Some(json) => {
+ Some(serde_json::from_str(&json).context(error::DeserializeSnafu { json })?)
+ }
+ None => None,
+ };
+ let is_time_index = metadata.contains_key(TIME_INDEX_KEY);
+
+ Ok(ColumnSchema {
+ name: field.name().clone(),
+ data_type,
+ is_nullable: field.is_nullable(),
+ is_time_index,
+ default_constraint,
+ metadata,
+ })
+ }
+}
+
+impl TryFrom<&ColumnSchema> for Field {
+ type Error = Error;
+
+ fn try_from(column_schema: &ColumnSchema) -> Result<Field> {
+ let mut metadata = column_schema.metadata.clone();
+ if let Some(value) = &column_schema.default_constraint {
+ // Adds an additional metadata to store the default constraint.
+ let old = metadata.insert(
+ DEFAULT_CONSTRAINT_KEY.to_string(),
+ serde_json::to_string(&value).context(error::SerializeSnafu)?,
+ );
+
+ ensure!(
+ old.is_none(),
+ error::DuplicateMetaSnafu {
+ key: DEFAULT_CONSTRAINT_KEY,
+ }
+ );
+ }
+
+ Ok(Field::new(
+ &column_schema.name,
+ column_schema.data_type.as_arrow_type(),
+ column_schema.is_nullable(),
+ )
+ .with_metadata(Some(metadata)))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::DataType as ArrowDataType;
+
+ use super::*;
+ use crate::value::Value;
+
+ #[test]
+ fn test_column_schema() {
+ let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), true);
+ let field = Field::try_from(&column_schema).unwrap();
+ assert_eq!("test", field.name());
+ assert_eq!(ArrowDataType::Int32, *field.data_type());
+ assert!(field.is_nullable());
+
+ let new_column_schema = ColumnSchema::try_from(&field).unwrap();
+ assert_eq!(column_schema, new_column_schema);
+ }
+
+ #[test]
+ fn test_column_schema_with_default_constraint() {
+ let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), true)
+ .with_default_constraint(Some(ColumnDefaultConstraint::Value(Value::from(99))))
+ .unwrap();
+ assert!(column_schema
+ .metadata()
+ .get(DEFAULT_CONSTRAINT_KEY)
+ .is_none());
+
+ let field = Field::try_from(&column_schema).unwrap();
+ assert_eq!("test", field.name());
+ assert_eq!(ArrowDataType::Int32, *field.data_type());
+ assert!(field.is_nullable());
+ assert_eq!(
+ "{\"Value\":{\"Int32\":99}}",
+ field
+ .metadata()
+ .unwrap()
+ .get(DEFAULT_CONSTRAINT_KEY)
+ .unwrap()
+ );
+
+ let new_column_schema = ColumnSchema::try_from(&field).unwrap();
+ assert_eq!(column_schema, new_column_schema);
+ }
+
+ #[test]
+ fn test_column_schema_with_metadata() {
+ let mut metadata = Metadata::new();
+ metadata.insert("k1".to_string(), "v1".to_string());
+ let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), true)
+ .with_metadata(metadata)
+ .with_default_constraint(Some(ColumnDefaultConstraint::null_value()))
+ .unwrap();
+ assert_eq!("v1", column_schema.metadata().get("k1").unwrap());
+ assert!(column_schema
+ .metadata()
+ .get(DEFAULT_CONSTRAINT_KEY)
+ .is_none());
+
+ let field = Field::try_from(&column_schema).unwrap();
+ assert_eq!("v1", field.metadata().unwrap().get("k1").unwrap());
+ assert!(field
+ .metadata()
+ .unwrap()
+ .get(DEFAULT_CONSTRAINT_KEY)
+ .is_some());
+
+ let new_column_schema = ColumnSchema::try_from(&field).unwrap();
+ assert_eq!(column_schema, new_column_schema);
+ }
+
+ #[test]
+ fn test_column_schema_with_duplicate_metadata() {
+ let mut metadata = Metadata::new();
+ metadata.insert(DEFAULT_CONSTRAINT_KEY.to_string(), "v1".to_string());
+ let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), true)
+ .with_metadata(metadata)
+ .with_default_constraint(Some(ColumnDefaultConstraint::null_value()))
+ .unwrap();
+ Field::try_from(&column_schema).unwrap_err();
+ }
+
+ #[test]
+ fn test_column_schema_invalid_default_constraint() {
+ ColumnSchema::new("test", ConcreteDataType::int32_datatype(), false)
+ .with_default_constraint(Some(ColumnDefaultConstraint::null_value()))
+ .unwrap_err();
+ }
+
+ #[test]
+ fn test_column_default_constraint_try_into_from() {
+ let default_constraint = ColumnDefaultConstraint::Value(Value::from(42i64));
+
+ let bytes: Vec<u8> = default_constraint.clone().try_into().unwrap();
+ let from_value = ColumnDefaultConstraint::try_from(&bytes[..]).unwrap();
+
+ assert_eq!(default_constraint, from_value);
+ }
+
+ #[test]
+ fn test_column_schema_create_default_null() {
+ // Implicit default null.
+ let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), true);
+ let v = column_schema.create_default_vector(5).unwrap().unwrap();
+ assert_eq!(5, v.len());
+ assert!(v.only_null());
+
+ // Explicit default null.
+ let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), true)
+ .with_default_constraint(Some(ColumnDefaultConstraint::null_value()))
+ .unwrap();
+ let v = column_schema.create_default_vector(5).unwrap().unwrap();
+ assert_eq!(5, v.len());
+ assert!(v.only_null());
+ }
+
+ #[test]
+ fn test_column_schema_no_default() {
+ let column_schema = ColumnSchema::new("test", ConcreteDataType::int32_datatype(), false);
+ assert!(column_schema.create_default_vector(5).unwrap().is_none());
+ }
+}
diff --git a/src/datatypes2/src/schema/constraint.rs b/src/datatypes2/src/schema/constraint.rs
new file mode 100644
index 000000000000..4dd3ecc14b7f
--- /dev/null
+++ b/src/datatypes2/src/schema/constraint.rs
@@ -0,0 +1,306 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::{Display, Formatter};
+use std::sync::Arc;
+
+use common_time::util;
+use serde::{Deserialize, Serialize};
+use snafu::{ensure, ResultExt};
+
+use crate::data_type::{ConcreteDataType, DataType};
+use crate::error::{self, Result};
+use crate::value::Value;
+use crate::vectors::{Int64Vector, TimestampMillisecondVector, VectorRef};
+
+const CURRENT_TIMESTAMP: &str = "current_timestamp()";
+
+/// Column's default constraint.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub enum ColumnDefaultConstraint {
+ // A function invocation
+ // TODO(dennis): we save the function expression here, maybe use a struct in future.
+ Function(String),
+ // A value
+ Value(Value),
+}
+
+impl TryFrom<&[u8]> for ColumnDefaultConstraint {
+ type Error = error::Error;
+
+ fn try_from(bytes: &[u8]) -> Result<Self> {
+ let json = String::from_utf8_lossy(bytes);
+ serde_json::from_str(&json).context(error::DeserializeSnafu { json })
+ }
+}
+
+impl TryFrom<ColumnDefaultConstraint> for Vec<u8> {
+ type Error = error::Error;
+
+ fn try_from(value: ColumnDefaultConstraint) -> std::result::Result<Self, Self::Error> {
+ let s = serde_json::to_string(&value).context(error::SerializeSnafu)?;
+ Ok(s.into_bytes())
+ }
+}
+
+impl Display for ColumnDefaultConstraint {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ match self {
+ ColumnDefaultConstraint::Function(expr) => write!(f, "{}", expr),
+ ColumnDefaultConstraint::Value(v) => write!(f, "{}", v),
+ }
+ }
+}
+
+impl ColumnDefaultConstraint {
+ /// Returns a default null constraint.
+ pub fn null_value() -> ColumnDefaultConstraint {
+ ColumnDefaultConstraint::Value(Value::Null)
+ }
+
+ /// Check whether the constraint is valid for columns with given `data_type`
+ /// and `is_nullable` attributes.
+ pub fn validate(&self, data_type: &ConcreteDataType, is_nullable: bool) -> Result<()> {
+ ensure!(is_nullable || !self.maybe_null(), error::NullDefaultSnafu);
+
+ match self {
+ ColumnDefaultConstraint::Function(expr) => {
+ ensure!(
+ expr == CURRENT_TIMESTAMP,
+ error::UnsupportedDefaultExprSnafu { expr }
+ );
+ ensure!(
+ data_type.is_timestamp_compatible(),
+ error::DefaultValueTypeSnafu {
+ reason: "return value of the function must has timestamp type",
+ }
+ );
+ }
+ ColumnDefaultConstraint::Value(v) => {
+ if !v.is_null() {
+ // Whether the value could be nullable has been checked before, only need
+ // to check the type compatibility here.
+ ensure!(
+ data_type.logical_type_id() == v.logical_type_id(),
+ error::DefaultValueTypeSnafu {
+ reason: format!(
+ "column has type {:?} but default value has type {:?}",
+ data_type.logical_type_id(),
+ v.logical_type_id()
+ ),
+ }
+ );
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Create a vector that contains `num_rows` default values for given `data_type`.
+ ///
+ /// If `is_nullable` is `true`, then this method would returns error if the created
+ /// default value is null.
+ ///
+ /// # Panics
+ /// Panics if `num_rows == 0`.
+ pub fn create_default_vector(
+ &self,
+ data_type: &ConcreteDataType,
+ is_nullable: bool,
+ num_rows: usize,
+ ) -> Result<VectorRef> {
+ assert!(num_rows > 0);
+
+ match self {
+ ColumnDefaultConstraint::Function(expr) => {
+ // Functions should also ensure its return value is not null when
+ // is_nullable is true.
+ match &expr[..] {
+ // TODO(dennis): we only supports current_timestamp right now,
+ // it's better to use a expression framework in future.
+ CURRENT_TIMESTAMP => create_current_timestamp_vector(data_type, num_rows),
+ _ => error::UnsupportedDefaultExprSnafu { expr }.fail(),
+ }
+ }
+ ColumnDefaultConstraint::Value(v) => {
+ ensure!(is_nullable || !v.is_null(), error::NullDefaultSnafu);
+
+ // TODO(yingwen):
+ // 1. For null value, we could use NullVector once it supports custom logical type.
+ // 2. For non null value, we could use ConstantVector, but it would cause all codes
+ // attempt to downcast the vector fail if they don't check whether the vector is const
+ // first.
+ let mut mutable_vector = data_type.create_mutable_vector(1);
+ mutable_vector.push_value_ref(v.as_value_ref())?;
+ let base_vector = mutable_vector.to_vector();
+ Ok(base_vector.replicate(&[num_rows]))
+ }
+ }
+ }
+
+ /// Returns true if this constraint might creates NULL.
+ fn maybe_null(&self) -> bool {
+ // Once we support more functions, we may return true if given function
+ // could return null.
+ matches!(self, ColumnDefaultConstraint::Value(Value::Null))
+ }
+}
+
+fn create_current_timestamp_vector(
+ data_type: &ConcreteDataType,
+ num_rows: usize,
+) -> Result<VectorRef> {
+ // FIXME(yingwen): We should implements cast in VectorOp so we could cast the millisecond vector
+ // to other data type and avoid this match.
+ match data_type {
+ ConcreteDataType::Timestamp(_) => Ok(Arc::new(TimestampMillisecondVector::from_values(
+ std::iter::repeat(util::current_time_millis()).take(num_rows),
+ ))),
+ ConcreteDataType::Int64(_) => Ok(Arc::new(Int64Vector::from_values(
+ std::iter::repeat(util::current_time_millis()).take(num_rows),
+ ))),
+ _ => error::DefaultValueTypeSnafu {
+ reason: format!(
+ "Not support to assign current timestamp to {:?} type",
+ data_type
+ ),
+ }
+ .fail(),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::error::Error;
+ use crate::vectors::Int32Vector;
+
+ #[test]
+ fn test_null_default_constraint() {
+ let constraint = ColumnDefaultConstraint::null_value();
+ assert!(constraint.maybe_null());
+ let constraint = ColumnDefaultConstraint::Value(Value::Int32(10));
+ assert!(!constraint.maybe_null());
+ }
+
+ #[test]
+ fn test_validate_null_constraint() {
+ let constraint = ColumnDefaultConstraint::null_value();
+ let data_type = ConcreteDataType::int32_datatype();
+ constraint.validate(&data_type, false).unwrap_err();
+ constraint.validate(&data_type, true).unwrap();
+ }
+
+ #[test]
+ fn test_validate_value_constraint() {
+ let constraint = ColumnDefaultConstraint::Value(Value::Int32(10));
+ let data_type = ConcreteDataType::int32_datatype();
+ constraint.validate(&data_type, false).unwrap();
+ constraint.validate(&data_type, true).unwrap();
+
+ constraint
+ .validate(&ConcreteDataType::uint32_datatype(), true)
+ .unwrap_err();
+ }
+
+ #[test]
+ fn test_validate_function_constraint() {
+ let constraint = ColumnDefaultConstraint::Function(CURRENT_TIMESTAMP.to_string());
+ constraint
+ .validate(&ConcreteDataType::timestamp_millisecond_datatype(), false)
+ .unwrap();
+ constraint
+ .validate(&ConcreteDataType::boolean_datatype(), false)
+ .unwrap_err();
+
+ let constraint = ColumnDefaultConstraint::Function("hello()".to_string());
+ constraint
+ .validate(&ConcreteDataType::timestamp_millisecond_datatype(), false)
+ .unwrap_err();
+ }
+
+ #[test]
+ fn test_create_default_vector_by_null() {
+ let constraint = ColumnDefaultConstraint::null_value();
+ let data_type = ConcreteDataType::int32_datatype();
+ constraint
+ .create_default_vector(&data_type, false, 10)
+ .unwrap_err();
+
+ let constraint = ColumnDefaultConstraint::null_value();
+ let v = constraint
+ .create_default_vector(&data_type, true, 3)
+ .unwrap();
+ assert_eq!(3, v.len());
+ for i in 0..v.len() {
+ assert_eq!(Value::Null, v.get(i));
+ }
+ }
+
+ #[test]
+ fn test_create_default_vector_by_value() {
+ let constraint = ColumnDefaultConstraint::Value(Value::Int32(10));
+ let data_type = ConcreteDataType::int32_datatype();
+ let v = constraint
+ .create_default_vector(&data_type, false, 4)
+ .unwrap();
+ let expect: VectorRef = Arc::new(Int32Vector::from_values(vec![10; 4]));
+ assert_eq!(expect, v);
+ }
+
+ #[test]
+ fn test_create_default_vector_by_func() {
+ let constraint = ColumnDefaultConstraint::Function(CURRENT_TIMESTAMP.to_string());
+ // Timestamp type.
+ let data_type = ConcreteDataType::timestamp_millisecond_datatype();
+ let v = constraint
+ .create_default_vector(&data_type, false, 4)
+ .unwrap();
+ assert_eq!(4, v.len());
+ assert!(
+ matches!(v.get(0), Value::Timestamp(_)),
+ "v {:?} is not timestamp",
+ v.get(0)
+ );
+
+ // Int64 type.
+ let data_type = ConcreteDataType::int64_datatype();
+ let v = constraint
+ .create_default_vector(&data_type, false, 4)
+ .unwrap();
+ assert_eq!(4, v.len());
+ assert!(
+ matches!(v.get(0), Value::Int64(_)),
+ "v {:?} is not timestamp",
+ v.get(0)
+ );
+
+ let constraint = ColumnDefaultConstraint::Function("no".to_string());
+ let data_type = ConcreteDataType::timestamp_millisecond_datatype();
+ constraint
+ .create_default_vector(&data_type, false, 4)
+ .unwrap_err();
+ }
+
+ #[test]
+ fn test_create_by_func_and_invalid_type() {
+ let constraint = ColumnDefaultConstraint::Function(CURRENT_TIMESTAMP.to_string());
+ let data_type = ConcreteDataType::boolean_datatype();
+ let err = constraint
+ .create_default_vector(&data_type, false, 4)
+ .unwrap_err();
+ assert!(matches!(err, Error::DefaultValueType { .. }), "{:?}", err);
+ }
+}
diff --git a/src/datatypes2/src/schema/raw.rs b/src/datatypes2/src/schema/raw.rs
new file mode 100644
index 000000000000..75f0853b4b74
--- /dev/null
+++ b/src/datatypes2/src/schema/raw.rs
@@ -0,0 +1,77 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use serde::{Deserialize, Serialize};
+
+use crate::error::{Error, Result};
+use crate::schema::{ColumnSchema, Schema, SchemaBuilder};
+
+/// Struct used to serialize and deserialize [`Schema`](crate::schema::Schema).
+///
+/// This struct only contains necessary data to recover the Schema.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct RawSchema {
+ pub column_schemas: Vec<ColumnSchema>,
+ pub timestamp_index: Option<usize>,
+ pub version: u32,
+}
+
+impl TryFrom<RawSchema> for Schema {
+ type Error = Error;
+
+ fn try_from(raw: RawSchema) -> Result<Schema> {
+ SchemaBuilder::try_from(raw.column_schemas)?
+ .version(raw.version)
+ .build()
+ }
+}
+
+impl From<&Schema> for RawSchema {
+ fn from(schema: &Schema) -> RawSchema {
+ RawSchema {
+ column_schemas: schema.column_schemas.clone(),
+ timestamp_index: schema.timestamp_index,
+ version: schema.version,
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::data_type::ConcreteDataType;
+
+ #[test]
+ fn test_raw_convert() {
+ let column_schemas = vec![
+ ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ )
+ .with_time_index(true),
+ ];
+ let schema = SchemaBuilder::try_from(column_schemas)
+ .unwrap()
+ .version(123)
+ .build()
+ .unwrap();
+
+ let raw = RawSchema::from(&schema);
+ let schema_new = Schema::try_from(raw).unwrap();
+
+ assert_eq!(schema, schema_new);
+ }
+}
diff --git a/src/datatypes2/src/serialize.rs b/src/datatypes2/src/serialize.rs
new file mode 100644
index 000000000000..1cbf04cedd79
--- /dev/null
+++ b/src/datatypes2/src/serialize.rs
@@ -0,0 +1,20 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::error::Result;
+
+pub trait Serializable: Send + Sync {
+ /// Serialize a column of value with given type to JSON value
+ fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>>;
+}
diff --git a/src/datatypes2/src/timestamp.rs b/src/datatypes2/src/timestamp.rs
new file mode 100644
index 000000000000..f14e91a6c614
--- /dev/null
+++ b/src/datatypes2/src/timestamp.rs
@@ -0,0 +1,135 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+use common_time::timestamp::TimeUnit;
+use common_time::Timestamp;
+use paste::paste;
+use serde::{Deserialize, Serialize};
+
+use crate::prelude::{Scalar, Value, ValueRef};
+use crate::scalars::ScalarRef;
+use crate::types::{
+ TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
+ TimestampSecondType, WrapperType,
+};
+use crate::vectors::{
+ TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
+ TimestampSecondVector,
+};
+
+macro_rules! define_timestamp_with_unit {
+ ($unit: ident) => {
+ paste! {
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
+ pub struct [<Timestamp $unit>](pub Timestamp);
+
+ impl [<Timestamp $unit>] {
+ pub fn new(val: i64) -> Self {
+ Self(Timestamp::new(val, TimeUnit::$unit))
+ }
+ }
+
+ impl Default for [<Timestamp $unit>] {
+ fn default() -> Self {
+ Self::new(0)
+ }
+ }
+
+ impl From<[<Timestamp $unit>]> for Value {
+ fn from(t: [<Timestamp $unit>]) -> Value {
+ Value::Timestamp(t.0)
+ }
+ }
+
+ impl From<[<Timestamp $unit>]> for serde_json::Value {
+ fn from(t: [<Timestamp $unit>]) -> Self {
+ t.0.into()
+ }
+ }
+
+ impl From<[<Timestamp $unit>]> for ValueRef<'static> {
+ fn from(t: [<Timestamp $unit>]) -> Self {
+ ValueRef::Timestamp(t.0)
+ }
+ }
+
+ impl Scalar for [<Timestamp $unit>] {
+ type VectorType = [<Timestamp $unit Vector>];
+ type RefType<'a> = [<Timestamp $unit>];
+
+ fn as_scalar_ref(&self) -> Self::RefType<'_> {
+ *self
+ }
+
+ fn upcast_gat<'short, 'long: 'short>(
+ long: Self::RefType<'long>,
+ ) -> Self::RefType<'short> {
+ long
+ }
+ }
+
+ impl<'a> ScalarRef<'a> for [<Timestamp $unit>] {
+ type ScalarType = [<Timestamp $unit>];
+
+ fn to_owned_scalar(&self) -> Self::ScalarType {
+ *self
+ }
+ }
+
+ impl WrapperType for [<Timestamp $unit>] {
+ type LogicalType = [<Timestamp $unit Type>];
+ type Native = i64;
+
+ fn from_native(value: Self::Native) -> Self {
+ Self::new(value)
+ }
+
+ fn into_native(self) -> Self::Native {
+ self.0.into()
+ }
+ }
+
+ impl From<i64> for [<Timestamp $unit>] {
+ fn from(val: i64) -> Self {
+ [<Timestamp $unit>]::from_native(val)
+ }
+ }
+ }
+ };
+}
+
+define_timestamp_with_unit!(Second);
+define_timestamp_with_unit!(Millisecond);
+define_timestamp_with_unit!(Microsecond);
+define_timestamp_with_unit!(Nanosecond);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_timestamp_scalar() {
+ let ts = TimestampSecond::new(123);
+ assert_eq!(ts, ts.as_scalar_ref());
+ assert_eq!(ts, ts.to_owned_scalar());
+ let ts = TimestampMillisecond::new(123);
+ assert_eq!(ts, ts.as_scalar_ref());
+ assert_eq!(ts, ts.to_owned_scalar());
+ let ts = TimestampMicrosecond::new(123);
+ assert_eq!(ts, ts.as_scalar_ref());
+ assert_eq!(ts, ts.to_owned_scalar());
+ let ts = TimestampNanosecond::new(123);
+ assert_eq!(ts, ts.as_scalar_ref());
+ assert_eq!(ts, ts.to_owned_scalar());
+ }
+}
diff --git a/src/datatypes2/src/type_id.rs b/src/datatypes2/src/type_id.rs
new file mode 100644
index 000000000000..bcb7ea52b129
--- /dev/null
+++ b/src/datatypes2/src/type_id.rs
@@ -0,0 +1,93 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/// Unique identifier for logical data type.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum LogicalTypeId {
+ Null,
+
+ // Numeric types:
+ Boolean,
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ UInt8,
+ UInt16,
+ UInt32,
+ UInt64,
+ Float32,
+ Float64,
+
+ // String types:
+ String,
+ Binary,
+
+ // Date & Time types:
+ /// Date representing the elapsed time since UNIX epoch (1970-01-01)
+ /// in days (32 bits).
+ Date,
+ /// Datetime representing the elapsed time since UNIX epoch (1970-01-01) in
+ /// seconds/milliseconds/microseconds/nanoseconds, determined by precision.
+ DateTime,
+
+ TimestampSecond,
+ TimestampMillisecond,
+ TimestampMicrosecond,
+ TimestampNanosecond,
+
+ List,
+}
+
+impl LogicalTypeId {
+ /// Create ConcreteDataType based on this id. This method is for test only as it
+ /// would lost some info.
+ ///
+ /// # Panics
+ /// Panics if data type is not supported.
+ #[cfg(any(test, feature = "test"))]
+ pub fn data_type(&self) -> crate::data_type::ConcreteDataType {
+ use crate::data_type::ConcreteDataType;
+
+ match self {
+ LogicalTypeId::Null => ConcreteDataType::null_datatype(),
+ LogicalTypeId::Boolean => ConcreteDataType::boolean_datatype(),
+ LogicalTypeId::Int8 => ConcreteDataType::int8_datatype(),
+ LogicalTypeId::Int16 => ConcreteDataType::int16_datatype(),
+ LogicalTypeId::Int32 => ConcreteDataType::int32_datatype(),
+ LogicalTypeId::Int64 => ConcreteDataType::int64_datatype(),
+ LogicalTypeId::UInt8 => ConcreteDataType::uint8_datatype(),
+ LogicalTypeId::UInt16 => ConcreteDataType::uint16_datatype(),
+ LogicalTypeId::UInt32 => ConcreteDataType::uint32_datatype(),
+ LogicalTypeId::UInt64 => ConcreteDataType::uint64_datatype(),
+ LogicalTypeId::Float32 => ConcreteDataType::float32_datatype(),
+ LogicalTypeId::Float64 => ConcreteDataType::float64_datatype(),
+ LogicalTypeId::String => ConcreteDataType::string_datatype(),
+ LogicalTypeId::Binary => ConcreteDataType::binary_datatype(),
+ LogicalTypeId::Date => ConcreteDataType::date_datatype(),
+ LogicalTypeId::DateTime => ConcreteDataType::datetime_datatype(),
+ LogicalTypeId::TimestampSecond => ConcreteDataType::timestamp_second_datatype(),
+ LogicalTypeId::TimestampMillisecond => {
+ ConcreteDataType::timestamp_millisecond_datatype()
+ }
+ LogicalTypeId::TimestampMicrosecond => {
+ ConcreteDataType::timestamp_microsecond_datatype()
+ }
+ LogicalTypeId::TimestampNanosecond => ConcreteDataType::timestamp_nanosecond_datatype(),
+ LogicalTypeId::List => {
+ ConcreteDataType::list_datatype(ConcreteDataType::null_datatype())
+ }
+ }
+ }
+}
diff --git a/src/datatypes2/src/types.rs b/src/datatypes2/src/types.rs
new file mode 100644
index 000000000000..186704fdfdb3
--- /dev/null
+++ b/src/datatypes2/src/types.rs
@@ -0,0 +1,37 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod binary_type;
+mod boolean_type;
+mod date_type;
+mod datetime_type;
+mod list_type;
+mod null_type;
+mod primitive_type;
+mod string_type;
+
+mod timestamp_type;
+
+pub use binary_type::BinaryType;
+pub use boolean_type::BooleanType;
+pub use date_type::DateType;
+pub use datetime_type::DateTimeType;
+pub use list_type::ListType;
+pub use null_type::NullType;
+pub use primitive_type::{
+ Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, LogicalPrimitiveType,
+ NativeType, UInt16Type, UInt32Type, UInt64Type, UInt8Type, WrapperType,
+};
+pub use string_type::StringType;
+pub use timestamp_type::*;
diff --git a/src/datatypes2/src/types/binary_type.rs b/src/datatypes2/src/types/binary_type.rs
new file mode 100644
index 000000000000..0d06724fffb4
--- /dev/null
+++ b/src/datatypes2/src/types/binary_type.rs
@@ -0,0 +1,60 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow::datatypes::DataType as ArrowDataType;
+use common_base::bytes::StringBytes;
+use serde::{Deserialize, Serialize};
+
+use crate::data_type::{DataType, DataTypeRef};
+use crate::scalars::ScalarVectorBuilder;
+use crate::type_id::LogicalTypeId;
+use crate::value::Value;
+use crate::vectors::{BinaryVectorBuilder, MutableVector};
+
+#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct BinaryType;
+
+impl BinaryType {
+ pub fn arc() -> DataTypeRef {
+ Arc::new(Self)
+ }
+}
+
+impl DataType for BinaryType {
+ fn name(&self) -> &str {
+ "Binary"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::Binary
+ }
+
+ fn default_value(&self) -> Value {
+ StringBytes::default().into()
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::LargeBinary
+ }
+
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
+ Box::new(BinaryVectorBuilder::with_capacity(capacity))
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ false
+ }
+}
diff --git a/src/datatypes2/src/types/boolean_type.rs b/src/datatypes2/src/types/boolean_type.rs
new file mode 100644
index 000000000000..36d92169eb01
--- /dev/null
+++ b/src/datatypes2/src/types/boolean_type.rs
@@ -0,0 +1,59 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow::datatypes::DataType as ArrowDataType;
+use serde::{Deserialize, Serialize};
+
+use crate::data_type::{DataType, DataTypeRef};
+use crate::scalars::ScalarVectorBuilder;
+use crate::type_id::LogicalTypeId;
+use crate::value::Value;
+use crate::vectors::{BooleanVectorBuilder, MutableVector};
+
+#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct BooleanType;
+
+impl BooleanType {
+ pub fn arc() -> DataTypeRef {
+ Arc::new(Self)
+ }
+}
+
+impl DataType for BooleanType {
+ fn name(&self) -> &str {
+ "Boolean"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::Boolean
+ }
+
+ fn default_value(&self) -> Value {
+ bool::default().into()
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::Boolean
+ }
+
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
+ Box::new(BooleanVectorBuilder::with_capacity(capacity))
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ false
+ }
+}
diff --git a/src/datatypes2/src/types/date_type.rs b/src/datatypes2/src/types/date_type.rs
new file mode 100644
index 000000000000..052b837a3d58
--- /dev/null
+++ b/src/datatypes2/src/types/date_type.rs
@@ -0,0 +1,90 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use arrow::datatypes::{DataType as ArrowDataType, Date32Type};
+use common_time::Date;
+use serde::{Deserialize, Serialize};
+use snafu::OptionExt;
+
+use crate::data_type::{ConcreteDataType, DataType};
+use crate::error::{self, Result};
+use crate::scalars::ScalarVectorBuilder;
+use crate::type_id::LogicalTypeId;
+use crate::types::LogicalPrimitiveType;
+use crate::value::{Value, ValueRef};
+use crate::vectors::{DateVector, DateVectorBuilder, MutableVector, Vector};
+
+/// Data type for Date (YYYY-MM-DD).
+#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct DateType;
+
+impl DataType for DateType {
+ fn name(&self) -> &str {
+ "Date"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::Date
+ }
+
+ fn default_value(&self) -> Value {
+ Value::Date(Default::default())
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::Date32
+ }
+
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
+ Box::new(DateVectorBuilder::with_capacity(capacity))
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ false
+ }
+}
+
+impl LogicalPrimitiveType for DateType {
+ type ArrowPrimitive = Date32Type;
+ type Native = i32;
+ type Wrapper = Date;
+
+ fn build_data_type() -> ConcreteDataType {
+ ConcreteDataType::date_datatype()
+ }
+
+ fn type_name() -> &'static str {
+ "Date"
+ }
+
+ fn cast_vector(vector: &dyn Vector) -> Result<&DateVector> {
+ vector
+ .as_any()
+ .downcast_ref::<DateVector>()
+ .with_context(|| error::CastTypeSnafu {
+ msg: format!("Failed to cast {} to DateVector", vector.vector_type_name(),),
+ })
+ }
+
+ fn cast_value_ref(value: ValueRef) -> Result<Option<Date>> {
+ match value {
+ ValueRef::Null => Ok(None),
+ ValueRef::Date(v) => Ok(Some(v)),
+ other => error::CastTypeSnafu {
+ msg: format!("Failed to cast value {:?} to Date", other,),
+ }
+ .fail(),
+ }
+ }
+}
diff --git a/src/datatypes2/src/types/datetime_type.rs b/src/datatypes2/src/types/datetime_type.rs
new file mode 100644
index 000000000000..d74a02effe4f
--- /dev/null
+++ b/src/datatypes2/src/types/datetime_type.rs
@@ -0,0 +1,91 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use arrow::datatypes::{DataType as ArrowDataType, Date64Type};
+use common_time::DateTime;
+use serde::{Deserialize, Serialize};
+use snafu::OptionExt;
+
+use crate::data_type::{ConcreteDataType, DataType};
+use crate::error::{self, Result};
+use crate::prelude::{LogicalTypeId, MutableVector, ScalarVectorBuilder, Value, ValueRef, Vector};
+use crate::types::LogicalPrimitiveType;
+use crate::vectors::{DateTimeVector, DateTimeVectorBuilder, PrimitiveVector};
+
+/// Data type for [`DateTime`].
+#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct DateTimeType;
+
+impl DataType for DateTimeType {
+ fn name(&self) -> &str {
+ "DateTime"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::DateTime
+ }
+
+ fn default_value(&self) -> Value {
+ Value::DateTime(DateTime::default())
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::Date64
+ }
+
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
+ Box::new(DateTimeVectorBuilder::with_capacity(capacity))
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ false
+ }
+}
+
+impl LogicalPrimitiveType for DateTimeType {
+ type ArrowPrimitive = Date64Type;
+ type Native = i64;
+ type Wrapper = DateTime;
+
+ fn build_data_type() -> ConcreteDataType {
+ ConcreteDataType::datetime_datatype()
+ }
+
+ fn type_name() -> &'static str {
+ "DateTime"
+ }
+
+ fn cast_vector(vector: &dyn Vector) -> Result<&PrimitiveVector<Self>> {
+ vector
+ .as_any()
+ .downcast_ref::<DateTimeVector>()
+ .with_context(|| error::CastTypeSnafu {
+ msg: format!(
+ "Failed to cast {} to DateTimeVector",
+ vector.vector_type_name()
+ ),
+ })
+ }
+
+ fn cast_value_ref(value: ValueRef) -> Result<Option<Self::Wrapper>> {
+ match value {
+ ValueRef::Null => Ok(None),
+ ValueRef::DateTime(v) => Ok(Some(v)),
+ other => error::CastTypeSnafu {
+ msg: format!("Failed to cast value {:?} to DateTime", other,),
+ }
+ .fail(),
+ }
+ }
+}
diff --git a/src/datatypes2/src/types/list_type.rs b/src/datatypes2/src/types/list_type.rs
new file mode 100644
index 000000000000..b9875ca36263
--- /dev/null
+++ b/src/datatypes2/src/types/list_type.rs
@@ -0,0 +1,95 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use arrow::datatypes::{DataType as ArrowDataType, Field};
+use serde::{Deserialize, Serialize};
+
+use crate::data_type::{ConcreteDataType, DataType};
+use crate::type_id::LogicalTypeId;
+use crate::value::{ListValue, Value};
+use crate::vectors::{ListVectorBuilder, MutableVector};
+
+/// Used to represent the List datatype.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct ListType {
+ /// The type of List's item.
+ // Use Box to avoid recursive dependency, as enum ConcreteDataType depends on ListType.
+ item_type: Box<ConcreteDataType>,
+}
+
+impl Default for ListType {
+ fn default() -> Self {
+ ListType::new(ConcreteDataType::null_datatype())
+ }
+}
+
+impl ListType {
+ /// Create a new `ListType` whose item's data type is `item_type`.
+ pub fn new(item_type: ConcreteDataType) -> Self {
+ ListType {
+ item_type: Box::new(item_type),
+ }
+ }
+}
+
+impl DataType for ListType {
+ fn name(&self) -> &str {
+ "List"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::List
+ }
+
+ fn default_value(&self) -> Value {
+ Value::List(ListValue::new(None, *self.item_type.clone()))
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ let field = Box::new(Field::new("item", self.item_type.as_arrow_type(), true));
+ ArrowDataType::List(field)
+ }
+
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
+ Box::new(ListVectorBuilder::with_type_capacity(
+ *self.item_type.clone(),
+ capacity,
+ ))
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ false
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::value::ListValue;
+
+ #[test]
+ fn test_list_type() {
+ let t = ListType::new(ConcreteDataType::boolean_datatype());
+ assert_eq!("List", t.name());
+ assert_eq!(LogicalTypeId::List, t.logical_type_id());
+ assert_eq!(
+ Value::List(ListValue::new(None, ConcreteDataType::boolean_datatype())),
+ t.default_value()
+ );
+ assert_eq!(
+ ArrowDataType::List(Box::new(Field::new("item", ArrowDataType::Boolean, true))),
+ t.as_arrow_type()
+ );
+ }
+}
diff --git a/src/datatypes2/src/types/null_type.rs b/src/datatypes2/src/types/null_type.rs
new file mode 100644
index 000000000000..b9bb2dc7526d
--- /dev/null
+++ b/src/datatypes2/src/types/null_type.rs
@@ -0,0 +1,58 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow::datatypes::DataType as ArrowDataType;
+use serde::{Deserialize, Serialize};
+
+use crate::data_type::{DataType, DataTypeRef};
+use crate::type_id::LogicalTypeId;
+use crate::value::Value;
+use crate::vectors::{MutableVector, NullVectorBuilder};
+
+#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct NullType;
+
+impl NullType {
+ pub fn arc() -> DataTypeRef {
+ Arc::new(NullType)
+ }
+}
+
+impl DataType for NullType {
+ fn name(&self) -> &str {
+ "Null"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::Null
+ }
+
+ fn default_value(&self) -> Value {
+ Value::Null
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::Null
+ }
+
+ fn create_mutable_vector(&self, _capacity: usize) -> Box<dyn MutableVector> {
+ Box::new(NullVectorBuilder::default())
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ false
+ }
+}
diff --git a/src/datatypes2/src/types/primitive_type.rs b/src/datatypes2/src/types/primitive_type.rs
new file mode 100644
index 000000000000..e389ca13bf91
--- /dev/null
+++ b/src/datatypes2/src/types/primitive_type.rs
@@ -0,0 +1,358 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::cmp::Ordering;
+
+use arrow::datatypes::{ArrowNativeType, ArrowPrimitiveType, DataType as ArrowDataType};
+use common_time::{Date, DateTime};
+use num::NumCast;
+use serde::{Deserialize, Serialize};
+use snafu::OptionExt;
+
+use crate::data_type::{ConcreteDataType, DataType};
+use crate::error::{self, Result};
+use crate::scalars::{Scalar, ScalarRef, ScalarVectorBuilder};
+use crate::type_id::LogicalTypeId;
+use crate::types::{DateTimeType, DateType};
+use crate::value::{Value, ValueRef};
+use crate::vectors::{MutableVector, PrimitiveVector, PrimitiveVectorBuilder, Vector};
+
+/// Data types that can be used as arrow's native type.
+pub trait NativeType: ArrowNativeType + NumCast {
+ /// Largest numeric type this primitive type can be cast to.
+ type LargestType: NativeType;
+}
+
+macro_rules! impl_native_type {
+ ($Type: ident, $LargestType: ident) => {
+ impl NativeType for $Type {
+ type LargestType = $LargestType;
+ }
+ };
+}
+
+impl_native_type!(u8, u64);
+impl_native_type!(u16, u64);
+impl_native_type!(u32, u64);
+impl_native_type!(u64, u64);
+impl_native_type!(i8, i64);
+impl_native_type!(i16, i64);
+impl_native_type!(i32, i64);
+impl_native_type!(i64, i64);
+impl_native_type!(f32, f64);
+impl_native_type!(f64, f64);
+
+/// Represents the wrapper type that wraps a native type using the `newtype pattern`,
+/// such as [Date](`common_time::Date`) is a wrapper type for the underlying native
+/// type `i32`.
+pub trait WrapperType:
+ Copy
+ + Scalar
+ + PartialEq
+ + Into<Value>
+ + Into<ValueRef<'static>>
+ + Serialize
+ + Into<serde_json::Value>
+{
+ /// Logical primitive type that this wrapper type belongs to.
+ type LogicalType: LogicalPrimitiveType<Wrapper = Self, Native = Self::Native>;
+ /// The underlying native type.
+ type Native: NativeType;
+
+ /// Convert native type into this wrapper type.
+ fn from_native(value: Self::Native) -> Self;
+
+ /// Convert this wrapper type into native type.
+ fn into_native(self) -> Self::Native;
+}
+
+/// Trait bridging the logical primitive type with [ArrowPrimitiveType].
+pub trait LogicalPrimitiveType: 'static + Sized {
+ /// Arrow primitive type of this logical type.
+ type ArrowPrimitive: ArrowPrimitiveType<Native = Self::Native>;
+ /// Native (physical) type of this logical type.
+ type Native: NativeType;
+ /// Wrapper type that the vector returns.
+ type Wrapper: WrapperType<LogicalType = Self, Native = Self::Native>
+ + for<'a> Scalar<VectorType = PrimitiveVector<Self>, RefType<'a> = Self::Wrapper>
+ + for<'a> ScalarRef<'a, ScalarType = Self::Wrapper>;
+
+ /// Construct the data type struct.
+ fn build_data_type() -> ConcreteDataType;
+
+ /// Return the name of the type.
+ fn type_name() -> &'static str;
+
+ /// Dynamic cast the vector to the concrete vector type.
+ fn cast_vector(vector: &dyn Vector) -> Result<&PrimitiveVector<Self>>;
+
+ /// Cast value ref to the primitive type.
+ fn cast_value_ref(value: ValueRef) -> Result<Option<Self::Wrapper>>;
+}
+
+/// A new type for [WrapperType], complement the `Ord` feature for it. Wrapping non ordered
+/// primitive types like `f32` and `f64` in `OrdPrimitive` can make them be used in places that
+/// require `Ord`. For example, in `Median` or `Percentile` UDAFs.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub struct OrdPrimitive<T: WrapperType>(pub T);
+
+impl<T: WrapperType> OrdPrimitive<T> {
+ pub fn as_primitive(&self) -> T {
+ self.0
+ }
+}
+
+impl<T: WrapperType> Eq for OrdPrimitive<T> {}
+
+impl<T: WrapperType> PartialOrd for OrdPrimitive<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<T: WrapperType> Ord for OrdPrimitive<T> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ Into::<Value>::into(self.0).cmp(&Into::<Value>::into(other.0))
+ }
+}
+
+impl<T: WrapperType> From<OrdPrimitive<T>> for Value {
+ fn from(p: OrdPrimitive<T>) -> Self {
+ p.0.into()
+ }
+}
+
+macro_rules! impl_wrapper {
+ ($Type: ident, $LogicalType: ident) => {
+ impl WrapperType for $Type {
+ type LogicalType = $LogicalType;
+ type Native = $Type;
+
+ fn from_native(value: Self::Native) -> Self {
+ value
+ }
+
+ fn into_native(self) -> Self::Native {
+ self
+ }
+ }
+ };
+}
+
+impl_wrapper!(u8, UInt8Type);
+impl_wrapper!(u16, UInt16Type);
+impl_wrapper!(u32, UInt32Type);
+impl_wrapper!(u64, UInt64Type);
+impl_wrapper!(i8, Int8Type);
+impl_wrapper!(i16, Int16Type);
+impl_wrapper!(i32, Int32Type);
+impl_wrapper!(i64, Int64Type);
+impl_wrapper!(f32, Float32Type);
+impl_wrapper!(f64, Float64Type);
+
+impl WrapperType for Date {
+ type LogicalType = DateType;
+ type Native = i32;
+
+ fn from_native(value: i32) -> Self {
+ Date::new(value)
+ }
+
+ fn into_native(self) -> i32 {
+ self.val()
+ }
+}
+
+impl WrapperType for DateTime {
+ type LogicalType = DateTimeType;
+ type Native = i64;
+
+ fn from_native(value: Self::Native) -> Self {
+ DateTime::new(value)
+ }
+
+ fn into_native(self) -> Self::Native {
+ self.val()
+ }
+}
+
+macro_rules! define_logical_primitive_type {
+ ($Native: ident, $TypeId: ident, $DataType: ident) => {
+ // We need to define it as an empty struct `struct DataType {}` instead of a struct-unit
+ // `struct DataType;` to ensure the serialized JSON string is compatible with previous
+ // implementation.
+ #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
+ pub struct $DataType {}
+
+ impl LogicalPrimitiveType for $DataType {
+ type ArrowPrimitive = arrow::datatypes::$DataType;
+ type Native = $Native;
+ type Wrapper = $Native;
+
+ fn build_data_type() -> ConcreteDataType {
+ ConcreteDataType::$TypeId($DataType::default())
+ }
+
+ fn type_name() -> &'static str {
+ stringify!($TypeId)
+ }
+
+ fn cast_vector(vector: &dyn Vector) -> Result<&PrimitiveVector<$DataType>> {
+ vector
+ .as_any()
+ .downcast_ref::<PrimitiveVector<$DataType>>()
+ .with_context(|| error::CastTypeSnafu {
+ msg: format!(
+ "Failed to cast {} to vector of primitive type {}",
+ vector.vector_type_name(),
+ stringify!($TypeId)
+ ),
+ })
+ }
+
+ fn cast_value_ref(value: ValueRef) -> Result<Option<$Native>> {
+ match value {
+ ValueRef::Null => Ok(None),
+ ValueRef::$TypeId(v) => Ok(Some(v.into())),
+ other => error::CastTypeSnafu {
+ msg: format!(
+ "Failed to cast value {:?} to primitive type {}",
+ other,
+ stringify!($TypeId),
+ ),
+ }
+ .fail(),
+ }
+ }
+ }
+ };
+}
+
+macro_rules! define_non_timestamp_primitive {
+ ($Native: ident, $TypeId: ident, $DataType: ident) => {
+ define_logical_primitive_type!($Native, $TypeId, $DataType);
+
+ impl DataType for $DataType {
+ fn name(&self) -> &str {
+ stringify!($TypeId)
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::$TypeId
+ }
+
+ fn default_value(&self) -> Value {
+ $Native::default().into()
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::$TypeId
+ }
+
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
+ Box::new(PrimitiveVectorBuilder::<$DataType>::with_capacity(capacity))
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ false
+ }
+ }
+ };
+}
+
+define_non_timestamp_primitive!(u8, UInt8, UInt8Type);
+define_non_timestamp_primitive!(u16, UInt16, UInt16Type);
+define_non_timestamp_primitive!(u32, UInt32, UInt32Type);
+define_non_timestamp_primitive!(u64, UInt64, UInt64Type);
+define_non_timestamp_primitive!(i8, Int8, Int8Type);
+define_non_timestamp_primitive!(i16, Int16, Int16Type);
+define_non_timestamp_primitive!(i32, Int32, Int32Type);
+define_non_timestamp_primitive!(f32, Float32, Float32Type);
+define_non_timestamp_primitive!(f64, Float64, Float64Type);
+
+// Timestamp primitive:
+define_logical_primitive_type!(i64, Int64, Int64Type);
+
+impl DataType for Int64Type {
+ fn name(&self) -> &str {
+ "Int64"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::Int64
+ }
+
+ fn default_value(&self) -> Value {
+ Value::Int64(0)
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::Int64
+ }
+
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
+ Box::new(PrimitiveVectorBuilder::<Int64Type>::with_capacity(capacity))
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ true
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::BinaryHeap;
+
+ use super::*;
+
+ #[test]
+ fn test_ord_primitive() {
+ struct Foo<T>
+ where
+ T: WrapperType,
+ {
+ heap: BinaryHeap<OrdPrimitive<T>>,
+ }
+
+ impl<T> Foo<T>
+ where
+ T: WrapperType,
+ {
+ fn push(&mut self, value: T) {
+ let value = OrdPrimitive::<T>(value);
+ self.heap.push(value);
+ }
+ }
+
+ macro_rules! test {
+ ($Type:ident) => {
+ let mut foo = Foo::<$Type> {
+ heap: BinaryHeap::new(),
+ };
+ foo.push($Type::default());
+ };
+ }
+
+ test!(u8);
+ test!(u16);
+ test!(u32);
+ test!(u64);
+ test!(i8);
+ test!(i16);
+ test!(i32);
+ test!(i64);
+ test!(f32);
+ test!(f64);
+ }
+}
diff --git a/src/datatypes2/src/types/string_type.rs b/src/datatypes2/src/types/string_type.rs
new file mode 100644
index 000000000000..799cbbbdd345
--- /dev/null
+++ b/src/datatypes2/src/types/string_type.rs
@@ -0,0 +1,60 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use arrow::datatypes::DataType as ArrowDataType;
+use common_base::bytes::StringBytes;
+use serde::{Deserialize, Serialize};
+
+use crate::data_type::{DataType, DataTypeRef};
+use crate::prelude::ScalarVectorBuilder;
+use crate::type_id::LogicalTypeId;
+use crate::value::Value;
+use crate::vectors::{MutableVector, StringVectorBuilder};
+
+#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct StringType;
+
+impl StringType {
+ pub fn arc() -> DataTypeRef {
+ Arc::new(Self)
+ }
+}
+
+impl DataType for StringType {
+ fn name(&self) -> &str {
+ "String"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::String
+ }
+
+ fn default_value(&self) -> Value {
+ StringBytes::default().into()
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::Utf8
+ }
+
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
+ Box::new(StringVectorBuilder::with_capacity(capacity))
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ false
+ }
+}
diff --git a/src/datatypes2/src/types/timestamp_type.rs b/src/datatypes2/src/types/timestamp_type.rs
new file mode 100644
index 000000000000..fe86eeb8fdbc
--- /dev/null
+++ b/src/datatypes2/src/types/timestamp_type.rs
@@ -0,0 +1,140 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use arrow::datatypes::{
+ DataType as ArrowDataType, TimeUnit as ArrowTimeUnit,
+ TimestampMicrosecondType as ArrowTimestampMicrosecondType,
+ TimestampMillisecondType as ArrowTimestampMillisecondType,
+ TimestampNanosecondType as ArrowTimestampNanosecondType,
+ TimestampSecondType as ArrowTimestampSecondType,
+};
+use common_time::timestamp::TimeUnit;
+use common_time::Timestamp;
+use enum_dispatch::enum_dispatch;
+use paste::paste;
+use serde::{Deserialize, Serialize};
+use snafu::OptionExt;
+
+use crate::data_type::ConcreteDataType;
+use crate::error;
+use crate::prelude::{
+ DataType, LogicalTypeId, MutableVector, ScalarVectorBuilder, Value, ValueRef, Vector,
+};
+use crate::timestamp::{
+ TimestampMicrosecond, TimestampMillisecond, TimestampNanosecond, TimestampSecond,
+};
+use crate::types::LogicalPrimitiveType;
+use crate::vectors::{
+ PrimitiveVector, TimestampMicrosecondVector, TimestampMicrosecondVectorBuilder,
+ TimestampMillisecondVector, TimestampMillisecondVectorBuilder, TimestampNanosecondVector,
+ TimestampNanosecondVectorBuilder, TimestampSecondVector, TimestampSecondVectorBuilder,
+};
+
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+#[enum_dispatch(DataType)]
+pub enum TimestampType {
+ Second(TimestampSecondType),
+ Millisecond(TimestampMillisecondType),
+ Microsecond(TimestampMicrosecondType),
+ Nanosecond(TimestampNanosecondType),
+}
+
+macro_rules! impl_data_type_for_timestamp {
+ ($unit: ident) => {
+ paste! {
+ #[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
+ pub struct [<Timestamp $unit Type>];
+
+ impl DataType for [<Timestamp $unit Type>] {
+ fn name(&self) -> &str {
+ stringify!([<Timestamp $unit Type>])
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::[<Timestamp $unit>]
+ }
+
+ fn default_value(&self) -> Value {
+ Value::Timestamp(Timestamp::new(0, TimeUnit::$unit))
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::Timestamp(ArrowTimeUnit::$unit, None)
+ }
+
+ fn create_mutable_vector(&self, capacity: usize) -> Box<dyn MutableVector> {
+ Box::new([<Timestamp $unit Vector Builder>]::with_capacity(capacity))
+ }
+
+ fn is_timestamp_compatible(&self) -> bool {
+ true
+ }
+ }
+
+
+ impl LogicalPrimitiveType for [<Timestamp $unit Type>] {
+ type ArrowPrimitive = [<Arrow Timestamp $unit Type>];
+ type Native = i64;
+ type Wrapper = [<Timestamp $unit>];
+
+ fn build_data_type() -> ConcreteDataType {
+ ConcreteDataType::Timestamp(TimestampType::$unit(
+ [<Timestamp $unit Type>]::default(),
+ ))
+ }
+
+ fn type_name() -> &'static str {
+ stringify!([<Timestamp $unit Type>])
+ }
+
+ fn cast_vector(vector: &dyn Vector) -> crate::Result<&PrimitiveVector<Self>> {
+ vector
+ .as_any()
+ .downcast_ref::<[<Timestamp $unit Vector>]>()
+ .with_context(|| error::CastTypeSnafu {
+ msg: format!(
+ "Failed to cast {} to {}",
+ vector.vector_type_name(), stringify!([<Timestamp $unit Vector>])
+ ),
+ })
+ }
+
+ fn cast_value_ref(value: ValueRef) -> crate::Result<Option<Self::Wrapper>> {
+ match value {
+ ValueRef::Null => Ok(None),
+ ValueRef::Timestamp(t) => match t.unit() {
+ TimeUnit::$unit => Ok(Some([<Timestamp $unit>](t))),
+ other => error::CastTypeSnafu {
+ msg: format!(
+ "Failed to cast Timestamp value with different unit {:?} to {}",
+ other, stringify!([<Timestamp $unit>])
+ ),
+ }
+ .fail(),
+ },
+ other => error::CastTypeSnafu {
+ msg: format!("Failed to cast value {:?} to {}", other, stringify!([<Timestamp $unit>])),
+ }
+ .fail(),
+ }
+ }
+ }
+ }
+ }
+}
+
+impl_data_type_for_timestamp!(Nanosecond);
+impl_data_type_for_timestamp!(Second);
+impl_data_type_for_timestamp!(Millisecond);
+impl_data_type_for_timestamp!(Microsecond);
diff --git a/src/datatypes2/src/value.rs b/src/datatypes2/src/value.rs
new file mode 100644
index 000000000000..bade88d419af
--- /dev/null
+++ b/src/datatypes2/src/value.rs
@@ -0,0 +1,1275 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::cmp::Ordering;
+use std::fmt::{Display, Formatter};
+
+use common_base::bytes::{Bytes, StringBytes};
+use common_time::date::Date;
+use common_time::datetime::DateTime;
+use common_time::timestamp::{TimeUnit, Timestamp};
+use datafusion_common::ScalarValue;
+pub use ordered_float::OrderedFloat;
+use serde::{Deserialize, Serialize};
+
+use crate::error::{self, Result};
+use crate::prelude::*;
+use crate::type_id::LogicalTypeId;
+use crate::vectors::ListVector;
+
+pub type OrderedF32 = OrderedFloat<f32>;
+pub type OrderedF64 = OrderedFloat<f64>;
+
+/// Value holds a single arbitrary value of any [DataType](crate::data_type::DataType).
+///
+/// Comparison between values with different types (expect Null) is not allowed.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub enum Value {
+ Null,
+
+ // Numeric types:
+ Boolean(bool),
+ UInt8(u8),
+ UInt16(u16),
+ UInt32(u32),
+ UInt64(u64),
+ Int8(i8),
+ Int16(i16),
+ Int32(i32),
+ Int64(i64),
+ Float32(OrderedF32),
+ Float64(OrderedF64),
+
+ // String types:
+ String(StringBytes),
+ Binary(Bytes),
+
+ // Date & Time types:
+ Date(Date),
+ DateTime(DateTime),
+ Timestamp(Timestamp),
+
+ List(ListValue),
+}
+
+impl Display for Value {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Value::Null => write!(f, "{}", self.data_type().name()),
+ Value::Boolean(v) => write!(f, "{}", v),
+ Value::UInt8(v) => write!(f, "{}", v),
+ Value::UInt16(v) => write!(f, "{}", v),
+ Value::UInt32(v) => write!(f, "{}", v),
+ Value::UInt64(v) => write!(f, "{}", v),
+ Value::Int8(v) => write!(f, "{}", v),
+ Value::Int16(v) => write!(f, "{}", v),
+ Value::Int32(v) => write!(f, "{}", v),
+ Value::Int64(v) => write!(f, "{}", v),
+ Value::Float32(v) => write!(f, "{}", v),
+ Value::Float64(v) => write!(f, "{}", v),
+ Value::String(v) => write!(f, "{}", v.as_utf8()),
+ Value::Binary(v) => {
+ let hex = v
+ .iter()
+ .map(|b| format!("{:02x}", b))
+ .collect::<Vec<String>>()
+ .join("");
+ write!(f, "{}", hex)
+ }
+ Value::Date(v) => write!(f, "{}", v),
+ Value::DateTime(v) => write!(f, "{}", v),
+ Value::Timestamp(v) => write!(f, "{}", v.to_iso8601_string()),
+ Value::List(v) => {
+ let default = Box::new(vec![]);
+ let items = v.items().as_ref().unwrap_or(&default);
+ let items = items
+ .iter()
+ .map(|i| i.to_string())
+ .collect::<Vec<String>>()
+ .join(", ");
+ write!(f, "{}[{}]", v.datatype.name(), items)
+ }
+ }
+ }
+}
+
+impl Value {
+ /// Returns data type of the value.
+ ///
+ /// # Panics
+ /// Panics if the data type is not supported.
+ pub fn data_type(&self) -> ConcreteDataType {
+ // TODO(yingwen): Implement this once all data types are implemented.
+ match self {
+ Value::Null => ConcreteDataType::null_datatype(),
+ Value::Boolean(_) => ConcreteDataType::boolean_datatype(),
+ Value::UInt8(_) => ConcreteDataType::uint8_datatype(),
+ Value::UInt16(_) => ConcreteDataType::uint16_datatype(),
+ Value::UInt32(_) => ConcreteDataType::uint32_datatype(),
+ Value::UInt64(_) => ConcreteDataType::uint64_datatype(),
+ Value::Int8(_) => ConcreteDataType::int8_datatype(),
+ Value::Int16(_) => ConcreteDataType::int16_datatype(),
+ Value::Int32(_) => ConcreteDataType::int32_datatype(),
+ Value::Int64(_) => ConcreteDataType::int64_datatype(),
+ Value::Float32(_) => ConcreteDataType::float32_datatype(),
+ Value::Float64(_) => ConcreteDataType::float64_datatype(),
+ Value::String(_) => ConcreteDataType::string_datatype(),
+ Value::Binary(_) => ConcreteDataType::binary_datatype(),
+ Value::Date(_) => ConcreteDataType::date_datatype(),
+ Value::DateTime(_) => ConcreteDataType::datetime_datatype(),
+ Value::Timestamp(v) => ConcreteDataType::timestamp_datatype(v.unit()),
+ Value::List(list) => ConcreteDataType::list_datatype(list.datatype().clone()),
+ }
+ }
+
+ /// Returns true if this is a null value.
+ pub fn is_null(&self) -> bool {
+ matches!(self, Value::Null)
+ }
+
+ /// Cast itself to [ListValue].
+ pub fn as_list(&self) -> Result<Option<&ListValue>> {
+ match self {
+ Value::Null => Ok(None),
+ Value::List(v) => Ok(Some(v)),
+ other => error::CastTypeSnafu {
+ msg: format!("Failed to cast {:?} to list value", other),
+ }
+ .fail(),
+ }
+ }
+
+ /// Cast itself to [ValueRef].
+ pub fn as_value_ref(&self) -> ValueRef {
+ match self {
+ Value::Null => ValueRef::Null,
+ Value::Boolean(v) => ValueRef::Boolean(*v),
+ Value::UInt8(v) => ValueRef::UInt8(*v),
+ Value::UInt16(v) => ValueRef::UInt16(*v),
+ Value::UInt32(v) => ValueRef::UInt32(*v),
+ Value::UInt64(v) => ValueRef::UInt64(*v),
+ Value::Int8(v) => ValueRef::Int8(*v),
+ Value::Int16(v) => ValueRef::Int16(*v),
+ Value::Int32(v) => ValueRef::Int32(*v),
+ Value::Int64(v) => ValueRef::Int64(*v),
+ Value::Float32(v) => ValueRef::Float32(*v),
+ Value::Float64(v) => ValueRef::Float64(*v),
+ Value::String(v) => ValueRef::String(v.as_utf8()),
+ Value::Binary(v) => ValueRef::Binary(v),
+ Value::Date(v) => ValueRef::Date(*v),
+ Value::DateTime(v) => ValueRef::DateTime(*v),
+ Value::List(v) => ValueRef::List(ListValueRef::Ref { val: v }),
+ Value::Timestamp(v) => ValueRef::Timestamp(*v),
+ }
+ }
+
+ /// Returns the logical type of the value.
+ pub fn logical_type_id(&self) -> LogicalTypeId {
+ match self {
+ Value::Null => LogicalTypeId::Null,
+ Value::Boolean(_) => LogicalTypeId::Boolean,
+ Value::UInt8(_) => LogicalTypeId::UInt8,
+ Value::UInt16(_) => LogicalTypeId::UInt16,
+ Value::UInt32(_) => LogicalTypeId::UInt32,
+ Value::UInt64(_) => LogicalTypeId::UInt64,
+ Value::Int8(_) => LogicalTypeId::Int8,
+ Value::Int16(_) => LogicalTypeId::Int16,
+ Value::Int32(_) => LogicalTypeId::Int32,
+ Value::Int64(_) => LogicalTypeId::Int64,
+ Value::Float32(_) => LogicalTypeId::Float32,
+ Value::Float64(_) => LogicalTypeId::Float64,
+ Value::String(_) => LogicalTypeId::String,
+ Value::Binary(_) => LogicalTypeId::Binary,
+ Value::List(_) => LogicalTypeId::List,
+ Value::Date(_) => LogicalTypeId::Date,
+ Value::DateTime(_) => LogicalTypeId::DateTime,
+ Value::Timestamp(t) => match t.unit() {
+ TimeUnit::Second => LogicalTypeId::TimestampSecond,
+ TimeUnit::Millisecond => LogicalTypeId::TimestampMillisecond,
+ TimeUnit::Microsecond => LogicalTypeId::TimestampMicrosecond,
+ TimeUnit::Nanosecond => LogicalTypeId::TimestampNanosecond,
+ },
+ }
+ }
+}
+
+macro_rules! impl_ord_for_value_like {
+ ($Type: ident, $left: ident, $right: ident) => {
+ if $left.is_null() && !$right.is_null() {
+ return Ordering::Less;
+ } else if !$left.is_null() && $right.is_null() {
+ return Ordering::Greater;
+ } else {
+ match ($left, $right) {
+ ($Type::Null, $Type::Null) => Ordering::Equal,
+ ($Type::Boolean(v1), $Type::Boolean(v2)) => v1.cmp(v2),
+ ($Type::UInt8(v1), $Type::UInt8(v2)) => v1.cmp(v2),
+ ($Type::UInt16(v1), $Type::UInt16(v2)) => v1.cmp(v2),
+ ($Type::UInt32(v1), $Type::UInt32(v2)) => v1.cmp(v2),
+ ($Type::UInt64(v1), $Type::UInt64(v2)) => v1.cmp(v2),
+ ($Type::Int8(v1), $Type::Int8(v2)) => v1.cmp(v2),
+ ($Type::Int16(v1), $Type::Int16(v2)) => v1.cmp(v2),
+ ($Type::Int32(v1), $Type::Int32(v2)) => v1.cmp(v2),
+ ($Type::Int64(v1), $Type::Int64(v2)) => v1.cmp(v2),
+ ($Type::Float32(v1), $Type::Float32(v2)) => v1.cmp(v2),
+ ($Type::Float64(v1), $Type::Float64(v2)) => v1.cmp(v2),
+ ($Type::String(v1), $Type::String(v2)) => v1.cmp(v2),
+ ($Type::Binary(v1), $Type::Binary(v2)) => v1.cmp(v2),
+ ($Type::Date(v1), $Type::Date(v2)) => v1.cmp(v2),
+ ($Type::DateTime(v1), $Type::DateTime(v2)) => v1.cmp(v2),
+ ($Type::Timestamp(v1), $Type::Timestamp(v2)) => v1.cmp(v2),
+ ($Type::List(v1), $Type::List(v2)) => v1.cmp(v2),
+ _ => panic!(
+ "Cannot compare different values {:?} and {:?}",
+ $left, $right
+ ),
+ }
+ }
+ };
+}
+
+impl PartialOrd for Value {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for Value {
+ fn cmp(&self, other: &Self) -> Ordering {
+ impl_ord_for_value_like!(Value, self, other)
+ }
+}
+
+macro_rules! impl_value_from {
+ ($Variant: ident, $Type: ident) => {
+ impl From<$Type> for Value {
+ fn from(value: $Type) -> Self {
+ Value::$Variant(value.into())
+ }
+ }
+
+ impl From<Option<$Type>> for Value {
+ fn from(value: Option<$Type>) -> Self {
+ match value {
+ Some(v) => Value::$Variant(v.into()),
+ None => Value::Null,
+ }
+ }
+ }
+ };
+}
+
+impl_value_from!(Boolean, bool);
+impl_value_from!(UInt8, u8);
+impl_value_from!(UInt16, u16);
+impl_value_from!(UInt32, u32);
+impl_value_from!(UInt64, u64);
+impl_value_from!(Int8, i8);
+impl_value_from!(Int16, i16);
+impl_value_from!(Int32, i32);
+impl_value_from!(Int64, i64);
+impl_value_from!(Float32, f32);
+impl_value_from!(Float64, f64);
+impl_value_from!(String, StringBytes);
+impl_value_from!(Binary, Bytes);
+impl_value_from!(Date, Date);
+impl_value_from!(DateTime, DateTime);
+impl_value_from!(Timestamp, Timestamp);
+
+impl From<String> for Value {
+ fn from(string: String) -> Value {
+ Value::String(string.into())
+ }
+}
+
+impl From<&str> for Value {
+ fn from(string: &str) -> Value {
+ Value::String(string.into())
+ }
+}
+
+impl From<Vec<u8>> for Value {
+ fn from(bytes: Vec<u8>) -> Value {
+ Value::Binary(bytes.into())
+ }
+}
+
+impl From<&[u8]> for Value {
+ fn from(bytes: &[u8]) -> Value {
+ Value::Binary(bytes.into())
+ }
+}
+
+impl TryFrom<Value> for serde_json::Value {
+ type Error = serde_json::Error;
+
+ fn try_from(value: Value) -> serde_json::Result<serde_json::Value> {
+ let json_value = match value {
+ Value::Null => serde_json::Value::Null,
+ Value::Boolean(v) => serde_json::Value::Bool(v),
+ Value::UInt8(v) => serde_json::Value::from(v),
+ Value::UInt16(v) => serde_json::Value::from(v),
+ Value::UInt32(v) => serde_json::Value::from(v),
+ Value::UInt64(v) => serde_json::Value::from(v),
+ Value::Int8(v) => serde_json::Value::from(v),
+ Value::Int16(v) => serde_json::Value::from(v),
+ Value::Int32(v) => serde_json::Value::from(v),
+ Value::Int64(v) => serde_json::Value::from(v),
+ Value::Float32(v) => serde_json::Value::from(v.0),
+ Value::Float64(v) => serde_json::Value::from(v.0),
+ Value::String(bytes) => serde_json::Value::String(bytes.as_utf8().to_string()),
+ Value::Binary(bytes) => serde_json::to_value(bytes)?,
+ Value::Date(v) => serde_json::Value::Number(v.val().into()),
+ Value::DateTime(v) => serde_json::Value::Number(v.val().into()),
+ Value::List(v) => serde_json::to_value(v)?,
+ Value::Timestamp(v) => serde_json::to_value(v.value())?,
+ };
+
+ Ok(json_value)
+ }
+}
+
+// TODO(yingwen): Consider removing the `datatype` field from `ListValue`.
+/// List value.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+pub struct ListValue {
+ /// List of nested Values (boxed to reduce size_of(Value))
+ #[allow(clippy::box_collection)]
+ items: Option<Box<Vec<Value>>>,
+ /// Inner values datatype, to distinguish empty lists of different datatypes.
+ /// Restricted by DataFusion, cannot use null datatype for empty list.
+ datatype: ConcreteDataType,
+}
+
+impl Eq for ListValue {}
+
+impl ListValue {
+ pub fn new(items: Option<Box<Vec<Value>>>, datatype: ConcreteDataType) -> Self {
+ Self { items, datatype }
+ }
+
+ pub fn items(&self) -> &Option<Box<Vec<Value>>> {
+ &self.items
+ }
+
+ pub fn datatype(&self) -> &ConcreteDataType {
+ &self.datatype
+ }
+}
+
+impl Default for ListValue {
+ fn default() -> ListValue {
+ ListValue::new(None, ConcreteDataType::null_datatype())
+ }
+}
+
+impl PartialOrd for ListValue {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for ListValue {
+ fn cmp(&self, other: &Self) -> Ordering {
+ assert_eq!(
+ self.datatype, other.datatype,
+ "Cannot compare different datatypes!"
+ );
+ self.items.cmp(&other.items)
+ }
+}
+
+impl TryFrom<ScalarValue> for Value {
+ type Error = error::Error;
+
+ fn try_from(v: ScalarValue) -> Result<Self> {
+ let v = match v {
+ ScalarValue::Null => Value::Null,
+ ScalarValue::Boolean(b) => Value::from(b),
+ ScalarValue::Float32(f) => Value::from(f),
+ ScalarValue::Float64(f) => Value::from(f),
+ ScalarValue::Int8(i) => Value::from(i),
+ ScalarValue::Int16(i) => Value::from(i),
+ ScalarValue::Int32(i) => Value::from(i),
+ ScalarValue::Int64(i) => Value::from(i),
+ ScalarValue::UInt8(u) => Value::from(u),
+ ScalarValue::UInt16(u) => Value::from(u),
+ ScalarValue::UInt32(u) => Value::from(u),
+ ScalarValue::UInt64(u) => Value::from(u),
+ ScalarValue::Utf8(s) | ScalarValue::LargeUtf8(s) => {
+ Value::from(s.map(StringBytes::from))
+ }
+ ScalarValue::Binary(b)
+ | ScalarValue::LargeBinary(b)
+ | ScalarValue::FixedSizeBinary(_, b) => Value::from(b.map(Bytes::from)),
+ ScalarValue::List(vs, field) => {
+ let items = if let Some(vs) = vs {
+ let vs = vs
+ .into_iter()
+ .map(ScalarValue::try_into)
+ .collect::<Result<_>>()?;
+ Some(Box::new(vs))
+ } else {
+ None
+ };
+ let datatype = ConcreteDataType::try_from(field.data_type())?;
+ Value::List(ListValue::new(items, datatype))
+ }
+ ScalarValue::Date32(d) => d.map(|x| Value::Date(Date::new(x))).unwrap_or(Value::Null),
+ ScalarValue::Date64(d) => d
+ .map(|x| Value::DateTime(DateTime::new(x)))
+ .unwrap_or(Value::Null),
+ ScalarValue::TimestampSecond(t, _) => t
+ .map(|x| Value::Timestamp(Timestamp::new(x, TimeUnit::Second)))
+ .unwrap_or(Value::Null),
+ ScalarValue::TimestampMillisecond(t, _) => t
+ .map(|x| Value::Timestamp(Timestamp::new(x, TimeUnit::Millisecond)))
+ .unwrap_or(Value::Null),
+ ScalarValue::TimestampMicrosecond(t, _) => t
+ .map(|x| Value::Timestamp(Timestamp::new(x, TimeUnit::Microsecond)))
+ .unwrap_or(Value::Null),
+ ScalarValue::TimestampNanosecond(t, _) => t
+ .map(|x| Value::Timestamp(Timestamp::new(x, TimeUnit::Nanosecond)))
+ .unwrap_or(Value::Null),
+ ScalarValue::Decimal128(_, _, _)
+ | ScalarValue::Time64(_)
+ | ScalarValue::IntervalYearMonth(_)
+ | ScalarValue::IntervalDayTime(_)
+ | ScalarValue::IntervalMonthDayNano(_)
+ | ScalarValue::Struct(_, _)
+ | ScalarValue::Dictionary(_, _) => {
+ return error::UnsupportedArrowTypeSnafu {
+ arrow_type: v.get_datatype(),
+ }
+ .fail()
+ }
+ };
+ Ok(v)
+ }
+}
+
+/// Reference to [Value].
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum ValueRef<'a> {
+ Null,
+
+ // Numeric types:
+ Boolean(bool),
+ UInt8(u8),
+ UInt16(u16),
+ UInt32(u32),
+ UInt64(u64),
+ Int8(i8),
+ Int16(i16),
+ Int32(i32),
+ Int64(i64),
+ Float32(OrderedF32),
+ Float64(OrderedF64),
+
+ // String types:
+ String(&'a str),
+ Binary(&'a [u8]),
+
+ // Date & Time types:
+ Date(Date),
+ DateTime(DateTime),
+ Timestamp(Timestamp),
+ List(ListValueRef<'a>),
+}
+
+macro_rules! impl_as_for_value_ref {
+ ($value: ident, $Variant: ident) => {
+ match $value {
+ ValueRef::Null => Ok(None),
+ ValueRef::$Variant(v) => Ok(Some(*v)),
+ other => error::CastTypeSnafu {
+ msg: format!(
+ "Failed to cast value ref {:?} to {}",
+ other,
+ stringify!($Variant)
+ ),
+ }
+ .fail(),
+ }
+ };
+}
+
+impl<'a> ValueRef<'a> {
+ /// Returns true if this is null.
+ pub fn is_null(&self) -> bool {
+ matches!(self, ValueRef::Null)
+ }
+
+ /// Cast itself to binary slice.
+ pub fn as_binary(&self) -> Result<Option<&[u8]>> {
+ impl_as_for_value_ref!(self, Binary)
+ }
+
+ /// Cast itself to string slice.
+ pub fn as_string(&self) -> Result<Option<&str>> {
+ impl_as_for_value_ref!(self, String)
+ }
+
+ /// Cast itself to boolean.
+ pub fn as_boolean(&self) -> Result<Option<bool>> {
+ impl_as_for_value_ref!(self, Boolean)
+ }
+
+ /// Cast itself to [Date].
+ pub fn as_date(&self) -> Result<Option<Date>> {
+ impl_as_for_value_ref!(self, Date)
+ }
+
+ /// Cast itself to [DateTime].
+ pub fn as_datetime(&self) -> Result<Option<DateTime>> {
+ impl_as_for_value_ref!(self, DateTime)
+ }
+
+ pub fn as_timestamp(&self) -> Result<Option<Timestamp>> {
+ impl_as_for_value_ref!(self, Timestamp)
+ }
+
+ /// Cast itself to [ListValueRef].
+ pub fn as_list(&self) -> Result<Option<ListValueRef>> {
+ impl_as_for_value_ref!(self, List)
+ }
+}
+
+impl<'a> PartialOrd for ValueRef<'a> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<'a> Ord for ValueRef<'a> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ impl_ord_for_value_like!(ValueRef, self, other)
+ }
+}
+
+macro_rules! impl_value_ref_from {
+ ($Variant:ident, $Type:ident) => {
+ impl From<$Type> for ValueRef<'_> {
+ fn from(value: $Type) -> Self {
+ ValueRef::$Variant(value.into())
+ }
+ }
+
+ impl From<Option<$Type>> for ValueRef<'_> {
+ fn from(value: Option<$Type>) -> Self {
+ match value {
+ Some(v) => ValueRef::$Variant(v.into()),
+ None => ValueRef::Null,
+ }
+ }
+ }
+ };
+}
+
+impl_value_ref_from!(Boolean, bool);
+impl_value_ref_from!(UInt8, u8);
+impl_value_ref_from!(UInt16, u16);
+impl_value_ref_from!(UInt32, u32);
+impl_value_ref_from!(UInt64, u64);
+impl_value_ref_from!(Int8, i8);
+impl_value_ref_from!(Int16, i16);
+impl_value_ref_from!(Int32, i32);
+impl_value_ref_from!(Int64, i64);
+impl_value_ref_from!(Float32, f32);
+impl_value_ref_from!(Float64, f64);
+impl_value_ref_from!(Date, Date);
+impl_value_ref_from!(DateTime, DateTime);
+impl_value_ref_from!(Timestamp, Timestamp);
+
+impl<'a> From<&'a str> for ValueRef<'a> {
+ fn from(string: &'a str) -> ValueRef<'a> {
+ ValueRef::String(string)
+ }
+}
+
+impl<'a> From<&'a [u8]> for ValueRef<'a> {
+ fn from(bytes: &'a [u8]) -> ValueRef<'a> {
+ ValueRef::Binary(bytes)
+ }
+}
+
+impl<'a> From<Option<ListValueRef<'a>>> for ValueRef<'a> {
+ fn from(list: Option<ListValueRef>) -> ValueRef {
+ match list {
+ Some(v) => ValueRef::List(v),
+ None => ValueRef::Null,
+ }
+ }
+}
+
+/// Reference to a [ListValue].
+///
+/// Now comparison still requires some allocation (call of `to_value()`) and
+/// might be avoidable by downcasting and comparing the underlying array slice
+/// if it becomes bottleneck.
+#[derive(Debug, Clone, Copy)]
+pub enum ListValueRef<'a> {
+ // TODO(yingwen): Consider replace this by VectorRef.
+ Indexed { vector: &'a ListVector, idx: usize },
+ Ref { val: &'a ListValue },
+}
+
+impl<'a> ListValueRef<'a> {
+ /// Convert self to [Value]. This method would clone the underlying data.
+ fn to_value(self) -> Value {
+ match self {
+ ListValueRef::Indexed { vector, idx } => vector.get(idx),
+ ListValueRef::Ref { val } => Value::List(val.clone()),
+ }
+ }
+}
+
+impl<'a> PartialEq for ListValueRef<'a> {
+ fn eq(&self, other: &Self) -> bool {
+ self.to_value().eq(&other.to_value())
+ }
+}
+
+impl<'a> Eq for ListValueRef<'a> {}
+
+impl<'a> Ord for ListValueRef<'a> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ // Respect the order of `Value` by converting into value before comparison.
+ self.to_value().cmp(&other.to_value())
+ }
+}
+
+impl<'a> PartialOrd for ListValueRef<'a> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::DataType as ArrowDataType;
+ use num_traits::Float;
+
+ use super::*;
+
+ #[test]
+ fn test_try_from_scalar_value() {
+ assert_eq!(
+ Value::Boolean(true),
+ ScalarValue::Boolean(Some(true)).try_into().unwrap()
+ );
+ assert_eq!(
+ Value::Boolean(false),
+ ScalarValue::Boolean(Some(false)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Boolean(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::Float32(1.0f32.into()),
+ ScalarValue::Float32(Some(1.0f32)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Float32(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::Float64(2.0f64.into()),
+ ScalarValue::Float64(Some(2.0f64)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Float64(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::Int8(i8::MAX),
+ ScalarValue::Int8(Some(i8::MAX)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Int8(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::Int16(i16::MAX),
+ ScalarValue::Int16(Some(i16::MAX)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Int16(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::Int32(i32::MAX),
+ ScalarValue::Int32(Some(i32::MAX)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Int32(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::Int64(i64::MAX),
+ ScalarValue::Int64(Some(i64::MAX)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Int64(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::UInt8(u8::MAX),
+ ScalarValue::UInt8(Some(u8::MAX)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::UInt8(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::UInt16(u16::MAX),
+ ScalarValue::UInt16(Some(u16::MAX)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::UInt16(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::UInt32(u32::MAX),
+ ScalarValue::UInt32(Some(u32::MAX)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::UInt32(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::UInt64(u64::MAX),
+ ScalarValue::UInt64(Some(u64::MAX)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::UInt64(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::from("hello"),
+ ScalarValue::Utf8(Some("hello".to_string()))
+ .try_into()
+ .unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Utf8(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::from("large_hello"),
+ ScalarValue::LargeUtf8(Some("large_hello".to_string()))
+ .try_into()
+ .unwrap()
+ );
+ assert_eq!(
+ Value::Null,
+ ScalarValue::LargeUtf8(None).try_into().unwrap()
+ );
+
+ assert_eq!(
+ Value::from("world".as_bytes()),
+ ScalarValue::Binary(Some("world".as_bytes().to_vec()))
+ .try_into()
+ .unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Binary(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::from("large_world".as_bytes()),
+ ScalarValue::LargeBinary(Some("large_world".as_bytes().to_vec()))
+ .try_into()
+ .unwrap()
+ );
+ assert_eq!(
+ Value::Null,
+ ScalarValue::LargeBinary(None).try_into().unwrap()
+ );
+
+ assert_eq!(
+ Value::List(ListValue::new(
+ Some(Box::new(vec![Value::Int32(1), Value::Null])),
+ ConcreteDataType::int32_datatype()
+ )),
+ ScalarValue::new_list(
+ Some(vec![ScalarValue::Int32(Some(1)), ScalarValue::Int32(None)]),
+ ArrowDataType::Int32,
+ )
+ .try_into()
+ .unwrap()
+ );
+ assert_eq!(
+ Value::List(ListValue::new(None, ConcreteDataType::uint32_datatype())),
+ ScalarValue::new_list(None, ArrowDataType::UInt32)
+ .try_into()
+ .unwrap()
+ );
+
+ assert_eq!(
+ Value::Date(Date::new(123)),
+ ScalarValue::Date32(Some(123)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Date32(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::DateTime(DateTime::new(456)),
+ ScalarValue::Date64(Some(456)).try_into().unwrap()
+ );
+ assert_eq!(Value::Null, ScalarValue::Date64(None).try_into().unwrap());
+
+ assert_eq!(
+ Value::Timestamp(Timestamp::new(1, TimeUnit::Second)),
+ ScalarValue::TimestampSecond(Some(1), None)
+ .try_into()
+ .unwrap()
+ );
+ assert_eq!(
+ Value::Null,
+ ScalarValue::TimestampSecond(None, None).try_into().unwrap()
+ );
+
+ assert_eq!(
+ Value::Timestamp(Timestamp::new(1, TimeUnit::Millisecond)),
+ ScalarValue::TimestampMillisecond(Some(1), None)
+ .try_into()
+ .unwrap()
+ );
+ assert_eq!(
+ Value::Null,
+ ScalarValue::TimestampMillisecond(None, None)
+ .try_into()
+ .unwrap()
+ );
+
+ assert_eq!(
+ Value::Timestamp(Timestamp::new(1, TimeUnit::Microsecond)),
+ ScalarValue::TimestampMicrosecond(Some(1), None)
+ .try_into()
+ .unwrap()
+ );
+ assert_eq!(
+ Value::Null,
+ ScalarValue::TimestampMicrosecond(None, None)
+ .try_into()
+ .unwrap()
+ );
+
+ assert_eq!(
+ Value::Timestamp(Timestamp::new(1, TimeUnit::Nanosecond)),
+ ScalarValue::TimestampNanosecond(Some(1), None)
+ .try_into()
+ .unwrap()
+ );
+ assert_eq!(
+ Value::Null,
+ ScalarValue::TimestampNanosecond(None, None)
+ .try_into()
+ .unwrap()
+ );
+
+ let result: Result<Value> = ScalarValue::Decimal128(Some(1), 0, 0).try_into();
+ result
+ .unwrap_err()
+ .to_string()
+ .contains("Unsupported arrow data type, type: Decimal(0, 0)");
+ }
+
+ #[test]
+ fn test_value_from_inner() {
+ assert_eq!(Value::Boolean(true), Value::from(true));
+ assert_eq!(Value::Boolean(false), Value::from(false));
+
+ assert_eq!(Value::UInt8(u8::MIN), Value::from(u8::MIN));
+ assert_eq!(Value::UInt8(u8::MAX), Value::from(u8::MAX));
+
+ assert_eq!(Value::UInt16(u16::MIN), Value::from(u16::MIN));
+ assert_eq!(Value::UInt16(u16::MAX), Value::from(u16::MAX));
+
+ assert_eq!(Value::UInt32(u32::MIN), Value::from(u32::MIN));
+ assert_eq!(Value::UInt32(u32::MAX), Value::from(u32::MAX));
+
+ assert_eq!(Value::UInt64(u64::MIN), Value::from(u64::MIN));
+ assert_eq!(Value::UInt64(u64::MAX), Value::from(u64::MAX));
+
+ assert_eq!(Value::Int8(i8::MIN), Value::from(i8::MIN));
+ assert_eq!(Value::Int8(i8::MAX), Value::from(i8::MAX));
+
+ assert_eq!(Value::Int16(i16::MIN), Value::from(i16::MIN));
+ assert_eq!(Value::Int16(i16::MAX), Value::from(i16::MAX));
+
+ assert_eq!(Value::Int32(i32::MIN), Value::from(i32::MIN));
+ assert_eq!(Value::Int32(i32::MAX), Value::from(i32::MAX));
+
+ assert_eq!(Value::Int64(i64::MIN), Value::from(i64::MIN));
+ assert_eq!(Value::Int64(i64::MAX), Value::from(i64::MAX));
+
+ assert_eq!(
+ Value::Float32(OrderedFloat(f32::MIN)),
+ Value::from(f32::MIN)
+ );
+ assert_eq!(
+ Value::Float32(OrderedFloat(f32::MAX)),
+ Value::from(f32::MAX)
+ );
+
+ assert_eq!(
+ Value::Float64(OrderedFloat(f64::MIN)),
+ Value::from(f64::MIN)
+ );
+ assert_eq!(
+ Value::Float64(OrderedFloat(f64::MAX)),
+ Value::from(f64::MAX)
+ );
+
+ let string_bytes = StringBytes::from("hello");
+ assert_eq!(
+ Value::String(string_bytes.clone()),
+ Value::from(string_bytes)
+ );
+
+ let bytes = Bytes::from(b"world".as_slice());
+ assert_eq!(Value::Binary(bytes.clone()), Value::from(bytes));
+ }
+
+ fn check_type_and_value(data_type: &ConcreteDataType, value: &Value) {
+ assert_eq!(*data_type, value.data_type());
+ assert_eq!(data_type.logical_type_id(), value.logical_type_id());
+ }
+
+ #[test]
+ fn test_value_datatype() {
+ check_type_and_value(&ConcreteDataType::boolean_datatype(), &Value::Boolean(true));
+ check_type_and_value(&ConcreteDataType::uint8_datatype(), &Value::UInt8(u8::MIN));
+ check_type_and_value(
+ &ConcreteDataType::uint16_datatype(),
+ &Value::UInt16(u16::MIN),
+ );
+ check_type_and_value(
+ &ConcreteDataType::uint16_datatype(),
+ &Value::UInt16(u16::MAX),
+ );
+ check_type_and_value(
+ &ConcreteDataType::uint32_datatype(),
+ &Value::UInt32(u32::MIN),
+ );
+ check_type_and_value(
+ &ConcreteDataType::uint64_datatype(),
+ &Value::UInt64(u64::MIN),
+ );
+ check_type_and_value(&ConcreteDataType::int8_datatype(), &Value::Int8(i8::MIN));
+ check_type_and_value(&ConcreteDataType::int16_datatype(), &Value::Int16(i16::MIN));
+ check_type_and_value(&ConcreteDataType::int32_datatype(), &Value::Int32(i32::MIN));
+ check_type_and_value(&ConcreteDataType::int64_datatype(), &Value::Int64(i64::MIN));
+ check_type_and_value(
+ &ConcreteDataType::float32_datatype(),
+ &Value::Float32(OrderedFloat(f32::MIN)),
+ );
+ check_type_and_value(
+ &ConcreteDataType::float64_datatype(),
+ &Value::Float64(OrderedFloat(f64::MIN)),
+ );
+ check_type_and_value(
+ &ConcreteDataType::string_datatype(),
+ &Value::String(StringBytes::from("hello")),
+ );
+ check_type_and_value(
+ &ConcreteDataType::binary_datatype(),
+ &Value::Binary(Bytes::from(b"world".as_slice())),
+ );
+ check_type_and_value(
+ &ConcreteDataType::list_datatype(ConcreteDataType::int32_datatype()),
+ &Value::List(ListValue::new(
+ Some(Box::new(vec![Value::Int32(10)])),
+ ConcreteDataType::int32_datatype(),
+ )),
+ );
+ check_type_and_value(
+ &ConcreteDataType::list_datatype(ConcreteDataType::null_datatype()),
+ &Value::List(ListValue::default()),
+ );
+ check_type_and_value(
+ &ConcreteDataType::date_datatype(),
+ &Value::Date(Date::new(1)),
+ );
+ check_type_and_value(
+ &ConcreteDataType::datetime_datatype(),
+ &Value::DateTime(DateTime::new(1)),
+ );
+ check_type_and_value(
+ &ConcreteDataType::timestamp_millisecond_datatype(),
+ &Value::Timestamp(Timestamp::from_millis(1)),
+ );
+ }
+
+ #[test]
+ fn test_value_from_string() {
+ let hello = "hello".to_string();
+ assert_eq!(
+ Value::String(StringBytes::from(hello.clone())),
+ Value::from(hello)
+ );
+
+ let world = "world";
+ assert_eq!(Value::String(StringBytes::from(world)), Value::from(world));
+ }
+
+ #[test]
+ fn test_value_from_bytes() {
+ let hello = b"hello".to_vec();
+ assert_eq!(
+ Value::Binary(Bytes::from(hello.clone())),
+ Value::from(hello)
+ );
+
+ let world: &[u8] = b"world";
+ assert_eq!(Value::Binary(Bytes::from(world)), Value::from(world));
+ }
+
+ fn to_json(value: Value) -> serde_json::Value {
+ value.try_into().unwrap()
+ }
+
+ #[test]
+ fn test_to_json_value() {
+ assert_eq!(serde_json::Value::Null, to_json(Value::Null));
+ assert_eq!(serde_json::Value::Bool(true), to_json(Value::Boolean(true)));
+ assert_eq!(
+ serde_json::Value::Number(20u8.into()),
+ to_json(Value::UInt8(20))
+ );
+ assert_eq!(
+ serde_json::Value::Number(20i8.into()),
+ to_json(Value::Int8(20))
+ );
+ assert_eq!(
+ serde_json::Value::Number(2000u16.into()),
+ to_json(Value::UInt16(2000))
+ );
+ assert_eq!(
+ serde_json::Value::Number(2000i16.into()),
+ to_json(Value::Int16(2000))
+ );
+ assert_eq!(
+ serde_json::Value::Number(3000u32.into()),
+ to_json(Value::UInt32(3000))
+ );
+ assert_eq!(
+ serde_json::Value::Number(3000i32.into()),
+ to_json(Value::Int32(3000))
+ );
+ assert_eq!(
+ serde_json::Value::Number(4000u64.into()),
+ to_json(Value::UInt64(4000))
+ );
+ assert_eq!(
+ serde_json::Value::Number(4000i64.into()),
+ to_json(Value::Int64(4000))
+ );
+ assert_eq!(
+ serde_json::Value::from(125.0f32),
+ to_json(Value::Float32(125.0.into()))
+ );
+ assert_eq!(
+ serde_json::Value::from(125.0f64),
+ to_json(Value::Float64(125.0.into()))
+ );
+ assert_eq!(
+ serde_json::Value::String(String::from("hello")),
+ to_json(Value::String(StringBytes::from("hello")))
+ );
+ assert_eq!(
+ serde_json::Value::from(b"world".as_slice()),
+ to_json(Value::Binary(Bytes::from(b"world".as_slice())))
+ );
+ assert_eq!(
+ serde_json::Value::Number(5000i32.into()),
+ to_json(Value::Date(Date::new(5000)))
+ );
+ assert_eq!(
+ serde_json::Value::Number(5000i64.into()),
+ to_json(Value::DateTime(DateTime::new(5000)))
+ );
+
+ assert_eq!(
+ serde_json::Value::Number(1.into()),
+ to_json(Value::Timestamp(Timestamp::from_millis(1)))
+ );
+
+ let json_value: serde_json::Value =
+ serde_json::from_str(r#"{"items":[{"Int32":123}],"datatype":{"Int32":{}}}"#).unwrap();
+ assert_eq!(
+ json_value,
+ to_json(Value::List(ListValue {
+ items: Some(Box::new(vec![Value::Int32(123)])),
+ datatype: ConcreteDataType::int32_datatype(),
+ }))
+ );
+ }
+
+ #[test]
+ fn test_null_value() {
+ assert!(Value::Null.is_null());
+ assert!(!Value::Boolean(true).is_null());
+ assert!(Value::Null < Value::Boolean(false));
+ assert!(Value::Boolean(true) > Value::Null);
+ assert!(Value::Null < Value::Int32(10));
+ assert!(Value::Int32(10) > Value::Null);
+ }
+
+ #[test]
+ fn test_null_value_ref() {
+ assert!(ValueRef::Null.is_null());
+ assert!(!ValueRef::Boolean(true).is_null());
+ assert!(ValueRef::Null < ValueRef::Boolean(false));
+ assert!(ValueRef::Boolean(true) > ValueRef::Null);
+ assert!(ValueRef::Null < ValueRef::Int32(10));
+ assert!(ValueRef::Int32(10) > ValueRef::Null);
+ }
+
+ #[test]
+ fn test_as_value_ref() {
+ macro_rules! check_as_value_ref {
+ ($Variant: ident, $data: expr) => {
+ let value = Value::$Variant($data);
+ let value_ref = value.as_value_ref();
+ let expect_ref = ValueRef::$Variant($data);
+
+ assert_eq!(expect_ref, value_ref);
+ };
+ }
+
+ assert_eq!(ValueRef::Null, Value::Null.as_value_ref());
+ check_as_value_ref!(Boolean, true);
+ check_as_value_ref!(UInt8, 123);
+ check_as_value_ref!(UInt16, 123);
+ check_as_value_ref!(UInt32, 123);
+ check_as_value_ref!(UInt64, 123);
+ check_as_value_ref!(Int8, -12);
+ check_as_value_ref!(Int16, -12);
+ check_as_value_ref!(Int32, -12);
+ check_as_value_ref!(Int64, -12);
+ check_as_value_ref!(Float32, OrderedF32::from(16.0));
+ check_as_value_ref!(Float64, OrderedF64::from(16.0));
+ check_as_value_ref!(Timestamp, Timestamp::from_millis(1));
+
+ assert_eq!(
+ ValueRef::String("hello"),
+ Value::String("hello".into()).as_value_ref()
+ );
+ assert_eq!(
+ ValueRef::Binary(b"hello"),
+ Value::Binary("hello".as_bytes().into()).as_value_ref()
+ );
+
+ check_as_value_ref!(Date, Date::new(103));
+ check_as_value_ref!(DateTime, DateTime::new(1034));
+
+ let list = ListValue {
+ items: None,
+ datatype: ConcreteDataType::int32_datatype(),
+ };
+ assert_eq!(
+ ValueRef::List(ListValueRef::Ref { val: &list }),
+ Value::List(list.clone()).as_value_ref()
+ );
+ }
+
+ #[test]
+ fn test_value_ref_as() {
+ macro_rules! check_as_null {
+ ($method: ident) => {
+ assert_eq!(None, ValueRef::Null.$method().unwrap());
+ };
+ }
+
+ check_as_null!(as_binary);
+ check_as_null!(as_string);
+ check_as_null!(as_boolean);
+ check_as_null!(as_date);
+ check_as_null!(as_datetime);
+ check_as_null!(as_list);
+
+ macro_rules! check_as_correct {
+ ($data: expr, $Variant: ident, $method: ident) => {
+ assert_eq!(Some($data), ValueRef::$Variant($data).$method().unwrap());
+ };
+ }
+
+ check_as_correct!("hello", String, as_string);
+ check_as_correct!("hello".as_bytes(), Binary, as_binary);
+ check_as_correct!(true, Boolean, as_boolean);
+ check_as_correct!(Date::new(123), Date, as_date);
+ check_as_correct!(DateTime::new(12), DateTime, as_datetime);
+ let list = ListValue {
+ items: None,
+ datatype: ConcreteDataType::int32_datatype(),
+ };
+ check_as_correct!(ListValueRef::Ref { val: &list }, List, as_list);
+
+ let wrong_value = ValueRef::Int32(12345);
+ assert!(wrong_value.as_binary().is_err());
+ assert!(wrong_value.as_string().is_err());
+ assert!(wrong_value.as_boolean().is_err());
+ assert!(wrong_value.as_date().is_err());
+ assert!(wrong_value.as_datetime().is_err());
+ assert!(wrong_value.as_list().is_err());
+ }
+
+ #[test]
+ fn test_display() {
+ assert_eq!(Value::Null.to_string(), "Null");
+ assert_eq!(Value::UInt8(8).to_string(), "8");
+ assert_eq!(Value::UInt16(16).to_string(), "16");
+ assert_eq!(Value::UInt32(32).to_string(), "32");
+ assert_eq!(Value::UInt64(64).to_string(), "64");
+ assert_eq!(Value::Int8(-8).to_string(), "-8");
+ assert_eq!(Value::Int16(-16).to_string(), "-16");
+ assert_eq!(Value::Int32(-32).to_string(), "-32");
+ assert_eq!(Value::Int64(-64).to_string(), "-64");
+ assert_eq!(Value::Float32((-32.123).into()).to_string(), "-32.123");
+ assert_eq!(Value::Float64((-64.123).into()).to_string(), "-64.123");
+ assert_eq!(Value::Float64(OrderedF64::infinity()).to_string(), "inf");
+ assert_eq!(Value::Float64(OrderedF64::nan()).to_string(), "NaN");
+ assert_eq!(Value::String(StringBytes::from("123")).to_string(), "123");
+ assert_eq!(
+ Value::Binary(Bytes::from(vec![1, 2, 3])).to_string(),
+ "010203"
+ );
+ assert_eq!(Value::Date(Date::new(0)).to_string(), "1970-01-01");
+ assert_eq!(
+ Value::DateTime(DateTime::new(0)).to_string(),
+ "1970-01-01 00:00:00"
+ );
+ assert_eq!(
+ Value::Timestamp(Timestamp::new(1000, TimeUnit::Millisecond)).to_string(),
+ "1970-01-01 00:00:01+0000"
+ );
+ assert_eq!(
+ Value::List(ListValue::new(
+ Some(Box::new(vec![Value::Int8(1), Value::Int8(2)])),
+ ConcreteDataType::int8_datatype(),
+ ))
+ .to_string(),
+ "Int8[1, 2]"
+ );
+ assert_eq!(
+ Value::List(ListValue::new(
+ Some(Box::new(vec![])),
+ ConcreteDataType::timestamp_second_datatype(),
+ ))
+ .to_string(),
+ "TimestampSecondType[]"
+ );
+ assert_eq!(
+ Value::List(ListValue::new(
+ Some(Box::new(vec![])),
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ ))
+ .to_string(),
+ "TimestampMillisecondType[]"
+ );
+ assert_eq!(
+ Value::List(ListValue::new(
+ Some(Box::new(vec![])),
+ ConcreteDataType::timestamp_microsecond_datatype(),
+ ))
+ .to_string(),
+ "TimestampMicrosecondType[]"
+ );
+ assert_eq!(
+ Value::List(ListValue::new(
+ Some(Box::new(vec![])),
+ ConcreteDataType::timestamp_nanosecond_datatype(),
+ ))
+ .to_string(),
+ "TimestampNanosecondType[]"
+ );
+ }
+}
diff --git a/src/datatypes2/src/vectors.rs b/src/datatypes2/src/vectors.rs
new file mode 100644
index 000000000000..38fa762d4b3c
--- /dev/null
+++ b/src/datatypes2/src/vectors.rs
@@ -0,0 +1,309 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::fmt::Debug;
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayRef};
+use snafu::ensure;
+
+use crate::data_type::ConcreteDataType;
+use crate::error::{self, Result};
+use crate::serialize::Serializable;
+use crate::value::{Value, ValueRef};
+use crate::vectors::operations::VectorOp;
+
+mod binary;
+mod boolean;
+mod constant;
+mod date;
+mod datetime;
+mod eq;
+mod helper;
+mod list;
+mod null;
+mod operations;
+mod primitive;
+mod string;
+mod timestamp;
+mod validity;
+
+pub use binary::{BinaryVector, BinaryVectorBuilder};
+pub use boolean::{BooleanVector, BooleanVectorBuilder};
+pub use constant::ConstantVector;
+pub use date::{DateVector, DateVectorBuilder};
+pub use datetime::{DateTimeVector, DateTimeVectorBuilder};
+pub use helper::Helper;
+pub use list::{ListIter, ListVector, ListVectorBuilder};
+pub use null::{NullVector, NullVectorBuilder};
+pub use primitive::{
+ Float32Vector, Float32VectorBuilder, Float64Vector, Float64VectorBuilder, Int16Vector,
+ Int16VectorBuilder, Int32Vector, Int32VectorBuilder, Int64Vector, Int64VectorBuilder,
+ Int8Vector, Int8VectorBuilder, PrimitiveIter, PrimitiveVector, PrimitiveVectorBuilder,
+ UInt16Vector, UInt16VectorBuilder, UInt32Vector, UInt32VectorBuilder, UInt64Vector,
+ UInt64VectorBuilder, UInt8Vector, UInt8VectorBuilder,
+};
+pub use string::{StringVector, StringVectorBuilder};
+pub use timestamp::{
+ TimestampMicrosecondVector, TimestampMicrosecondVectorBuilder, TimestampMillisecondVector,
+ TimestampMillisecondVectorBuilder, TimestampNanosecondVector, TimestampNanosecondVectorBuilder,
+ TimestampSecondVector, TimestampSecondVectorBuilder,
+};
+pub use validity::Validity;
+
+// TODO(yingwen): arrow 28.0 implements Clone for all arrays, we could upgrade to it and simplify
+// some codes in methods such as `to_arrow_array()` and `to_boxed_arrow_array()`.
+/// Vector of data values.
+pub trait Vector: Send + Sync + Serializable + Debug + VectorOp {
+ /// Returns the data type of the vector.
+ ///
+ /// This may require heap allocation.
+ fn data_type(&self) -> ConcreteDataType;
+
+ fn vector_type_name(&self) -> String;
+
+ /// Returns the vector as [Any](std::any::Any) so that it can be
+ /// downcast to a specific implementation.
+ fn as_any(&self) -> &dyn Any;
+
+ /// Returns number of elements in the vector.
+ fn len(&self) -> usize;
+
+ /// Returns whether the vector is empty.
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Convert this vector to a new arrow [ArrayRef].
+ fn to_arrow_array(&self) -> ArrayRef;
+
+ /// Convert this vector to a new boxed arrow [Array].
+ fn to_boxed_arrow_array(&self) -> Box<dyn Array>;
+
+ /// Returns the validity of the Array.
+ fn validity(&self) -> Validity;
+
+ /// Returns the memory size of vector.
+ fn memory_size(&self) -> usize;
+
+ /// The number of null slots on this [`Vector`].
+ /// # Implementation
+ /// This is `O(1)`.
+ fn null_count(&self) -> usize;
+
+ /// Returns true when it's a ConstantColumn
+ fn is_const(&self) -> bool {
+ false
+ }
+
+ /// Returns whether row is null.
+ fn is_null(&self, row: usize) -> bool;
+
+ /// If the only value vector can contain is NULL.
+ fn only_null(&self) -> bool {
+ self.null_count() == self.len()
+ }
+
+ /// Slices the `Vector`, returning a new `VectorRef`.
+ ///
+ /// # Panics
+ /// This function panics if `offset + length > self.len()`.
+ fn slice(&self, offset: usize, length: usize) -> VectorRef;
+
+ /// Returns the clone of value at `index`.
+ ///
+ /// # Panics
+ /// Panic if `index` is out of bound.
+ fn get(&self, index: usize) -> Value;
+
+ /// Returns the clone of value at `index` or error if `index`
+ /// is out of bound.
+ fn try_get(&self, index: usize) -> Result<Value> {
+ ensure!(
+ index < self.len(),
+ error::BadArrayAccessSnafu {
+ index,
+ size: self.len()
+ }
+ );
+ Ok(self.get(index))
+ }
+
+ /// Returns the reference of value at `index`.
+ ///
+ /// # Panics
+ /// Panic if `index` is out of bound.
+ fn get_ref(&self, index: usize) -> ValueRef;
+}
+
+pub type VectorRef = Arc<dyn Vector>;
+
+/// Mutable vector that could be used to build an immutable vector.
+pub trait MutableVector: Send + Sync {
+ /// Returns the data type of the vector.
+ fn data_type(&self) -> ConcreteDataType;
+
+ /// Returns the length of the vector.
+ fn len(&self) -> usize;
+
+ /// Returns whether the vector is empty.
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Convert to Any, to enable dynamic casting.
+ fn as_any(&self) -> &dyn Any;
+
+ /// Convert to mutable Any, to enable dynamic casting.
+ fn as_mut_any(&mut self) -> &mut dyn Any;
+
+ /// Convert `self` to an (immutable) [VectorRef] and reset `self`.
+ fn to_vector(&mut self) -> VectorRef;
+
+ /// Push value ref to this mutable vector.
+ ///
+ /// Returns error if data type unmatch.
+ fn push_value_ref(&mut self, value: ValueRef) -> Result<()>;
+
+ /// Extend this mutable vector by slice of `vector`.
+ ///
+ /// Returns error if data type unmatch.
+ ///
+ /// # Panics
+ /// Panics if `offset + length > vector.len()`.
+ fn extend_slice_of(&mut self, vector: &dyn Vector, offset: usize, length: usize) -> Result<()>;
+}
+
+/// Helper to define `try_from_arrow_array(array: arrow::array::ArrayRef)` function.
+macro_rules! impl_try_from_arrow_array_for_vector {
+ ($Array: ident, $Vector: ident) => {
+ impl $Vector {
+ pub fn try_from_arrow_array(
+ array: impl AsRef<dyn arrow::array::Array>,
+ ) -> crate::error::Result<$Vector> {
+ use snafu::OptionExt;
+
+ let data = array
+ .as_ref()
+ .as_any()
+ .downcast_ref::<$Array>()
+ .with_context(|| crate::error::ConversionSnafu {
+ from: std::format!("{:?}", array.as_ref().data_type()),
+ })?
+ .data()
+ .clone();
+
+ let concrete_array = $Array::from(data);
+ Ok($Vector::from(concrete_array))
+ }
+ }
+ };
+}
+
+macro_rules! impl_validity_for_vector {
+ ($array: expr) => {
+ Validity::from_array_data($array.data())
+ };
+}
+
+macro_rules! impl_get_for_vector {
+ ($array: expr, $index: ident) => {
+ if $array.is_valid($index) {
+ // Safety: The index have been checked by `is_valid()`.
+ unsafe { $array.value_unchecked($index).into() }
+ } else {
+ Value::Null
+ }
+ };
+}
+
+macro_rules! impl_get_ref_for_vector {
+ ($array: expr, $index: ident) => {
+ if $array.is_valid($index) {
+ // Safety: The index have been checked by `is_valid()`.
+ unsafe { $array.value_unchecked($index).into() }
+ } else {
+ ValueRef::Null
+ }
+ };
+}
+
+macro_rules! impl_extend_for_builder {
+ ($mutable_vector: expr, $vector: ident, $VectorType: ident, $offset: ident, $length: ident) => {{
+ use snafu::OptionExt;
+
+ let sliced_vector = $vector.slice($offset, $length);
+ let concrete_vector = sliced_vector
+ .as_any()
+ .downcast_ref::<$VectorType>()
+ .with_context(|| crate::error::CastTypeSnafu {
+ msg: format!(
+ "Failed to cast vector from {} to {}",
+ $vector.vector_type_name(),
+ stringify!($VectorType)
+ ),
+ })?;
+ for value in concrete_vector.iter_data() {
+ $mutable_vector.push(value);
+ }
+ Ok(())
+ }};
+}
+
+pub(crate) use {
+ impl_extend_for_builder, impl_get_for_vector, impl_get_ref_for_vector,
+ impl_try_from_arrow_array_for_vector, impl_validity_for_vector,
+};
+
+#[cfg(test)]
+pub mod tests {
+ use arrow::array::{Array, Int32Array, UInt8Array};
+ use serde_json;
+
+ use super::*;
+ use crate::data_type::DataType;
+ use crate::types::{Int32Type, LogicalPrimitiveType};
+ use crate::vectors::helper::Helper;
+
+ #[test]
+ fn test_df_columns_to_vector() {
+ let df_column: Arc<dyn Array> = Arc::new(Int32Array::from(vec![1, 2, 3]));
+ let vector = Helper::try_into_vector(df_column).unwrap();
+ assert_eq!(
+ Int32Type::build_data_type().as_arrow_type(),
+ vector.data_type().as_arrow_type()
+ );
+ }
+
+ #[test]
+ fn test_serialize_i32_vector() {
+ let df_column: Arc<dyn Array> = Arc::new(Int32Array::from(vec![1, 2, 3]));
+ let json_value = Helper::try_into_vector(df_column)
+ .unwrap()
+ .serialize_to_json()
+ .unwrap();
+ assert_eq!("[1,2,3]", serde_json::to_string(&json_value).unwrap());
+ }
+
+ #[test]
+ fn test_serialize_i8_vector() {
+ let df_column: Arc<dyn Array> = Arc::new(UInt8Array::from(vec![1, 2, 3]));
+ let json_value = Helper::try_into_vector(df_column)
+ .unwrap()
+ .serialize_to_json()
+ .unwrap();
+ assert_eq!("[1,2,3]", serde_json::to_string(&json_value).unwrap());
+ }
+}
diff --git a/src/datatypes2/src/vectors/binary.rs b/src/datatypes2/src/vectors/binary.rs
new file mode 100644
index 000000000000..3b5defc8ec6e
--- /dev/null
+++ b/src/datatypes2/src/vectors/binary.rs
@@ -0,0 +1,353 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayBuilder, ArrayData, ArrayIter, ArrayRef};
+use snafu::ResultExt;
+
+use crate::arrow_array::{BinaryArray, MutableBinaryArray};
+use crate::data_type::ConcreteDataType;
+use crate::error::{self, Result};
+use crate::scalars::{ScalarVector, ScalarVectorBuilder};
+use crate::serialize::Serializable;
+use crate::value::{Value, ValueRef};
+use crate::vectors::{self, MutableVector, Validity, Vector, VectorRef};
+
+/// Vector of binary strings.
+#[derive(Debug, PartialEq)]
+pub struct BinaryVector {
+ array: BinaryArray,
+}
+
+impl BinaryVector {
+ pub(crate) fn as_arrow(&self) -> &dyn Array {
+ &self.array
+ }
+
+ fn to_array_data(&self) -> ArrayData {
+ self.array.data().clone()
+ }
+
+ fn from_array_data(data: ArrayData) -> BinaryVector {
+ BinaryVector {
+ array: BinaryArray::from(data),
+ }
+ }
+}
+
+impl From<BinaryArray> for BinaryVector {
+ fn from(array: BinaryArray) -> Self {
+ Self { array }
+ }
+}
+
+impl From<Vec<Option<Vec<u8>>>> for BinaryVector {
+ fn from(data: Vec<Option<Vec<u8>>>) -> Self {
+ Self {
+ array: BinaryArray::from_iter(data),
+ }
+ }
+}
+
+impl Vector for BinaryVector {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::binary_datatype()
+ }
+
+ fn vector_type_name(&self) -> String {
+ "BinaryVector".to_string()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn len(&self) -> usize {
+ self.array.len()
+ }
+
+ fn to_arrow_array(&self) -> ArrayRef {
+ let data = self.to_array_data();
+ Arc::new(BinaryArray::from(data))
+ }
+
+ fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
+ let data = self.to_array_data();
+ Box::new(BinaryArray::from(data))
+ }
+
+ fn validity(&self) -> Validity {
+ vectors::impl_validity_for_vector!(self.array)
+ }
+
+ fn memory_size(&self) -> usize {
+ self.array.get_buffer_memory_size()
+ }
+
+ fn null_count(&self) -> usize {
+ self.array.null_count()
+ }
+
+ fn is_null(&self, row: usize) -> bool {
+ self.array.is_null(row)
+ }
+
+ fn slice(&self, offset: usize, length: usize) -> VectorRef {
+ let data = self.array.data().slice(offset, length);
+ Arc::new(Self::from_array_data(data))
+ }
+
+ fn get(&self, index: usize) -> Value {
+ vectors::impl_get_for_vector!(self.array, index)
+ }
+
+ fn get_ref(&self, index: usize) -> ValueRef {
+ vectors::impl_get_ref_for_vector!(self.array, index)
+ }
+}
+
+impl ScalarVector for BinaryVector {
+ type OwnedItem = Vec<u8>;
+ type RefItem<'a> = &'a [u8];
+ type Iter<'a> = ArrayIter<&'a BinaryArray>;
+ type Builder = BinaryVectorBuilder;
+
+ fn get_data(&self, idx: usize) -> Option<Self::RefItem<'_>> {
+ if self.array.is_valid(idx) {
+ Some(self.array.value(idx))
+ } else {
+ None
+ }
+ }
+
+ fn iter_data(&self) -> Self::Iter<'_> {
+ self.array.iter()
+ }
+}
+
+pub struct BinaryVectorBuilder {
+ mutable_array: MutableBinaryArray,
+}
+
+impl MutableVector for BinaryVectorBuilder {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::binary_datatype()
+ }
+
+ fn len(&self) -> usize {
+ self.mutable_array.len()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn as_mut_any(&mut self) -> &mut dyn Any {
+ self
+ }
+
+ fn to_vector(&mut self) -> VectorRef {
+ Arc::new(self.finish())
+ }
+
+ fn push_value_ref(&mut self, value: ValueRef) -> Result<()> {
+ match value.as_binary()? {
+ Some(v) => self.mutable_array.append_value(v),
+ None => self.mutable_array.append_null(),
+ }
+ Ok(())
+ }
+
+ fn extend_slice_of(&mut self, vector: &dyn Vector, offset: usize, length: usize) -> Result<()> {
+ vectors::impl_extend_for_builder!(self, vector, BinaryVector, offset, length)
+ }
+}
+
+impl ScalarVectorBuilder for BinaryVectorBuilder {
+ type VectorType = BinaryVector;
+
+ fn with_capacity(capacity: usize) -> Self {
+ Self {
+ mutable_array: MutableBinaryArray::with_capacity(capacity, 0),
+ }
+ }
+
+ fn push(&mut self, value: Option<<Self::VectorType as ScalarVector>::RefItem<'_>>) {
+ match value {
+ Some(v) => self.mutable_array.append_value(v),
+ None => self.mutable_array.append_null(),
+ }
+ }
+
+ fn finish(&mut self) -> Self::VectorType {
+ BinaryVector {
+ array: self.mutable_array.finish(),
+ }
+ }
+}
+
+impl Serializable for BinaryVector {
+ fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>> {
+ self.iter_data()
+ .map(|v| match v {
+ None => Ok(serde_json::Value::Null), // if binary vector not present, map to NULL
+ Some(vec) => serde_json::to_value(vec),
+ })
+ .collect::<serde_json::Result<_>>()
+ .context(error::SerializeSnafu)
+ }
+}
+
+vectors::impl_try_from_arrow_array_for_vector!(BinaryArray, BinaryVector);
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::DataType as ArrowDataType;
+ use common_base::bytes::Bytes;
+ use serde_json;
+
+ use super::*;
+ use crate::arrow_array::BinaryArray;
+ use crate::data_type::DataType;
+ use crate::serialize::Serializable;
+ use crate::types::BinaryType;
+
+ #[test]
+ fn test_binary_vector_misc() {
+ let v = BinaryVector::from(BinaryArray::from_iter_values(&[
+ vec![1, 2, 3],
+ vec![1, 2, 3],
+ ]));
+
+ assert_eq!(2, v.len());
+ assert_eq!("BinaryVector", v.vector_type_name());
+ assert!(!v.is_const());
+ assert!(v.validity().is_all_valid());
+ assert!(!v.only_null());
+ assert_eq!(128, v.memory_size());
+
+ for i in 0..2 {
+ assert!(!v.is_null(i));
+ assert_eq!(Value::Binary(Bytes::from(vec![1, 2, 3])), v.get(i));
+ assert_eq!(ValueRef::Binary(&[1, 2, 3]), v.get_ref(i));
+ }
+
+ let arrow_arr = v.to_arrow_array();
+ assert_eq!(2, arrow_arr.len());
+ assert_eq!(&ArrowDataType::LargeBinary, arrow_arr.data_type());
+ }
+
+ #[test]
+ fn test_serialize_binary_vector_to_json() {
+ let vector = BinaryVector::from(BinaryArray::from_iter_values(&[
+ vec![1, 2, 3],
+ vec![1, 2, 3],
+ ]));
+
+ let json_value = vector.serialize_to_json().unwrap();
+ assert_eq!(
+ "[[1,2,3],[1,2,3]]",
+ serde_json::to_string(&json_value).unwrap()
+ );
+ }
+
+ #[test]
+ fn test_serialize_binary_vector_with_null_to_json() {
+ let mut builder = BinaryVectorBuilder::with_capacity(4);
+ builder.push(Some(&[1, 2, 3]));
+ builder.push(None);
+ builder.push(Some(&[4, 5, 6]));
+ let vector = builder.finish();
+
+ let json_value = vector.serialize_to_json().unwrap();
+ assert_eq!(
+ "[[1,2,3],null,[4,5,6]]",
+ serde_json::to_string(&json_value).unwrap()
+ );
+ }
+
+ #[test]
+ fn test_from_arrow_array() {
+ let arrow_array = BinaryArray::from_iter_values(&[vec![1, 2, 3], vec![1, 2, 3]]);
+ let original = BinaryArray::from(arrow_array.data().clone());
+ let vector = BinaryVector::from(arrow_array);
+ assert_eq!(original, vector.array);
+ }
+
+ #[test]
+ fn test_binary_vector_build_get() {
+ let mut builder = BinaryVectorBuilder::with_capacity(4);
+ builder.push(Some(b"hello"));
+ builder.push(Some(b"happy"));
+ builder.push(Some(b"world"));
+ builder.push(None);
+
+ let vector = builder.finish();
+ assert_eq!(b"hello", vector.get_data(0).unwrap());
+ assert_eq!(None, vector.get_data(3));
+
+ assert_eq!(Value::Binary(b"hello".as_slice().into()), vector.get(0));
+ assert_eq!(Value::Null, vector.get(3));
+
+ let mut iter = vector.iter_data();
+ assert_eq!(b"hello", iter.next().unwrap().unwrap());
+ assert_eq!(b"happy", iter.next().unwrap().unwrap());
+ assert_eq!(b"world", iter.next().unwrap().unwrap());
+ assert_eq!(None, iter.next().unwrap());
+ assert_eq!(None, iter.next());
+ }
+
+ #[test]
+ fn test_binary_vector_validity() {
+ let mut builder = BinaryVectorBuilder::with_capacity(4);
+ builder.push(Some(b"hello"));
+ builder.push(Some(b"world"));
+ let vector = builder.finish();
+ assert_eq!(0, vector.null_count());
+ assert!(vector.validity().is_all_valid());
+
+ let mut builder = BinaryVectorBuilder::with_capacity(3);
+ builder.push(Some(b"hello"));
+ builder.push(None);
+ builder.push(Some(b"world"));
+ let vector = builder.finish();
+ assert_eq!(1, vector.null_count());
+ let validity = vector.validity();
+ assert!(!validity.is_set(1));
+
+ assert_eq!(1, validity.null_count());
+ assert!(!validity.is_set(1));
+ }
+
+ #[test]
+ fn test_binary_vector_builder() {
+ let input = BinaryVector::from_slice(&[b"world", b"one", b"two"]);
+
+ let mut builder = BinaryType::default().create_mutable_vector(3);
+ builder
+ .push_value_ref(ValueRef::Binary("hello".as_bytes()))
+ .unwrap();
+ assert!(builder.push_value_ref(ValueRef::Int32(123)).is_err());
+ builder.extend_slice_of(&input, 1, 2).unwrap();
+ assert!(builder
+ .extend_slice_of(&crate::vectors::Int32Vector::from_slice(&[13]), 0, 1)
+ .is_err());
+ let vector = builder.to_vector();
+
+ let expect: VectorRef = Arc::new(BinaryVector::from_slice(&[b"hello", b"one", b"two"]));
+ assert_eq!(expect, vector);
+ }
+}
diff --git a/src/datatypes2/src/vectors/boolean.rs b/src/datatypes2/src/vectors/boolean.rs
new file mode 100644
index 000000000000..2b4e5b8e10d9
--- /dev/null
+++ b/src/datatypes2/src/vectors/boolean.rs
@@ -0,0 +1,371 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::borrow::Borrow;
+use std::sync::Arc;
+
+use arrow::array::{
+ Array, ArrayBuilder, ArrayData, ArrayIter, ArrayRef, BooleanArray, BooleanBuilder,
+};
+use snafu::ResultExt;
+
+use crate::data_type::ConcreteDataType;
+use crate::error::Result;
+use crate::scalars::{ScalarVector, ScalarVectorBuilder};
+use crate::serialize::Serializable;
+use crate::value::{Value, ValueRef};
+use crate::vectors::{self, MutableVector, Validity, Vector, VectorRef};
+
+/// Vector of boolean.
+#[derive(Debug, PartialEq)]
+pub struct BooleanVector {
+ array: BooleanArray,
+}
+
+impl BooleanVector {
+ pub(crate) fn as_arrow(&self) -> &dyn Array {
+ &self.array
+ }
+
+ pub(crate) fn as_boolean_array(&self) -> &BooleanArray {
+ &self.array
+ }
+
+ fn to_array_data(&self) -> ArrayData {
+ self.array.data().clone()
+ }
+
+ fn from_array_data(data: ArrayData) -> BooleanVector {
+ BooleanVector {
+ array: BooleanArray::from(data),
+ }
+ }
+
+ pub(crate) fn false_count(&self) -> usize {
+ self.array.false_count()
+ }
+}
+
+impl From<Vec<bool>> for BooleanVector {
+ fn from(data: Vec<bool>) -> Self {
+ BooleanVector {
+ array: BooleanArray::from(data),
+ }
+ }
+}
+
+impl From<BooleanArray> for BooleanVector {
+ fn from(array: BooleanArray) -> Self {
+ Self { array }
+ }
+}
+
+impl From<Vec<Option<bool>>> for BooleanVector {
+ fn from(data: Vec<Option<bool>>) -> Self {
+ BooleanVector {
+ array: BooleanArray::from(data),
+ }
+ }
+}
+
+impl<Ptr: Borrow<Option<bool>>> FromIterator<Ptr> for BooleanVector {
+ fn from_iter<I: IntoIterator<Item = Ptr>>(iter: I) -> Self {
+ BooleanVector {
+ array: BooleanArray::from_iter(iter),
+ }
+ }
+}
+
+impl Vector for BooleanVector {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::boolean_datatype()
+ }
+
+ fn vector_type_name(&self) -> String {
+ "BooleanVector".to_string()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn len(&self) -> usize {
+ self.array.len()
+ }
+
+ fn to_arrow_array(&self) -> ArrayRef {
+ let data = self.to_array_data();
+ Arc::new(BooleanArray::from(data))
+ }
+
+ fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
+ let data = self.to_array_data();
+ Box::new(BooleanArray::from(data))
+ }
+
+ fn validity(&self) -> Validity {
+ vectors::impl_validity_for_vector!(self.array)
+ }
+
+ fn memory_size(&self) -> usize {
+ self.array.get_buffer_memory_size()
+ }
+
+ fn null_count(&self) -> usize {
+ self.array.null_count()
+ }
+
+ fn is_null(&self, row: usize) -> bool {
+ self.array.is_null(row)
+ }
+
+ fn slice(&self, offset: usize, length: usize) -> VectorRef {
+ let data = self.array.data().slice(offset, length);
+ Arc::new(Self::from_array_data(data))
+ }
+
+ fn get(&self, index: usize) -> Value {
+ vectors::impl_get_for_vector!(self.array, index)
+ }
+
+ fn get_ref(&self, index: usize) -> ValueRef {
+ vectors::impl_get_ref_for_vector!(self.array, index)
+ }
+}
+
+impl ScalarVector for BooleanVector {
+ type OwnedItem = bool;
+ type RefItem<'a> = bool;
+ type Iter<'a> = ArrayIter<&'a BooleanArray>;
+ type Builder = BooleanVectorBuilder;
+
+ fn get_data(&self, idx: usize) -> Option<Self::RefItem<'_>> {
+ if self.array.is_valid(idx) {
+ Some(self.array.value(idx))
+ } else {
+ None
+ }
+ }
+
+ fn iter_data(&self) -> Self::Iter<'_> {
+ self.array.iter()
+ }
+}
+
+pub struct BooleanVectorBuilder {
+ mutable_array: BooleanBuilder,
+}
+
+impl MutableVector for BooleanVectorBuilder {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::boolean_datatype()
+ }
+
+ fn len(&self) -> usize {
+ self.mutable_array.len()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn as_mut_any(&mut self) -> &mut dyn Any {
+ self
+ }
+
+ fn to_vector(&mut self) -> VectorRef {
+ Arc::new(self.finish())
+ }
+
+ fn push_value_ref(&mut self, value: ValueRef) -> Result<()> {
+ match value.as_boolean()? {
+ Some(v) => self.mutable_array.append_value(v),
+ None => self.mutable_array.append_null(),
+ }
+ Ok(())
+ }
+
+ fn extend_slice_of(&mut self, vector: &dyn Vector, offset: usize, length: usize) -> Result<()> {
+ vectors::impl_extend_for_builder!(self, vector, BooleanVector, offset, length)
+ }
+}
+
+impl ScalarVectorBuilder for BooleanVectorBuilder {
+ type VectorType = BooleanVector;
+
+ fn with_capacity(capacity: usize) -> Self {
+ Self {
+ mutable_array: BooleanBuilder::with_capacity(capacity),
+ }
+ }
+
+ fn push(&mut self, value: Option<<Self::VectorType as ScalarVector>::RefItem<'_>>) {
+ match value {
+ Some(v) => self.mutable_array.append_value(v),
+ None => self.mutable_array.append_null(),
+ }
+ }
+
+ fn finish(&mut self) -> Self::VectorType {
+ BooleanVector {
+ array: self.mutable_array.finish(),
+ }
+ }
+}
+
+impl Serializable for BooleanVector {
+ fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>> {
+ self.iter_data()
+ .map(serde_json::to_value)
+ .collect::<serde_json::Result<_>>()
+ .context(crate::error::SerializeSnafu)
+ }
+}
+
+vectors::impl_try_from_arrow_array_for_vector!(BooleanArray, BooleanVector);
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::DataType as ArrowDataType;
+ use serde_json;
+
+ use super::*;
+ use crate::data_type::DataType;
+ use crate::serialize::Serializable;
+ use crate::types::BooleanType;
+
+ #[test]
+ fn test_boolean_vector_misc() {
+ let bools = vec![true, false, true, true, false, false, true, true, false];
+ let v = BooleanVector::from(bools.clone());
+ assert_eq!(9, v.len());
+ assert_eq!("BooleanVector", v.vector_type_name());
+ assert!(!v.is_const());
+ assert!(v.validity().is_all_valid());
+ assert!(!v.only_null());
+ assert_eq!(64, v.memory_size());
+
+ for (i, b) in bools.iter().enumerate() {
+ assert!(!v.is_null(i));
+ assert_eq!(Value::Boolean(*b), v.get(i));
+ assert_eq!(ValueRef::Boolean(*b), v.get_ref(i));
+ }
+
+ let arrow_arr = v.to_arrow_array();
+ assert_eq!(9, arrow_arr.len());
+ assert_eq!(&ArrowDataType::Boolean, arrow_arr.data_type());
+ }
+
+ #[test]
+ fn test_serialize_boolean_vector_to_json() {
+ let vector = BooleanVector::from(vec![true, false, true, true, false, false]);
+
+ let json_value = vector.serialize_to_json().unwrap();
+ assert_eq!(
+ "[true,false,true,true,false,false]",
+ serde_json::to_string(&json_value).unwrap(),
+ );
+ }
+
+ #[test]
+ fn test_serialize_boolean_vector_with_null_to_json() {
+ let vector = BooleanVector::from(vec![Some(true), None, Some(false)]);
+
+ let json_value = vector.serialize_to_json().unwrap();
+ assert_eq!(
+ "[true,null,false]",
+ serde_json::to_string(&json_value).unwrap(),
+ );
+ }
+
+ #[test]
+ fn test_boolean_vector_from_vec() {
+ let input = vec![false, true, false, true];
+ let vec = BooleanVector::from(input.clone());
+ assert_eq!(4, vec.len());
+ for (i, v) in input.into_iter().enumerate() {
+ assert_eq!(Some(v), vec.get_data(i), "failed at {}", i)
+ }
+ }
+
+ #[test]
+ fn test_boolean_vector_from_iter() {
+ let input = vec![Some(false), Some(true), Some(false), Some(true)];
+ let vec = input.iter().collect::<BooleanVector>();
+ assert_eq!(4, vec.len());
+ for (i, v) in input.into_iter().enumerate() {
+ assert_eq!(v, vec.get_data(i), "failed at {}", i)
+ }
+ }
+
+ #[test]
+ fn test_boolean_vector_from_vec_option() {
+ let input = vec![Some(false), Some(true), None, Some(true)];
+ let vec = BooleanVector::from(input.clone());
+ assert_eq!(4, vec.len());
+ for (i, v) in input.into_iter().enumerate() {
+ assert_eq!(v, vec.get_data(i), "failed at {}", i)
+ }
+ }
+
+ #[test]
+ fn test_boolean_vector_build_get() {
+ let input = [Some(true), None, Some(false)];
+ let mut builder = BooleanVectorBuilder::with_capacity(3);
+ for v in input {
+ builder.push(v);
+ }
+ let vector = builder.finish();
+ assert_eq!(input.len(), vector.len());
+
+ let res: Vec<_> = vector.iter_data().collect();
+ assert_eq!(input, &res[..]);
+
+ for (i, v) in input.into_iter().enumerate() {
+ assert_eq!(v, vector.get_data(i));
+ assert_eq!(Value::from(v), vector.get(i));
+ }
+ }
+
+ #[test]
+ fn test_boolean_vector_validity() {
+ let vector = BooleanVector::from(vec![Some(true), None, Some(false)]);
+ assert_eq!(1, vector.null_count());
+ let validity = vector.validity();
+ assert_eq!(1, validity.null_count());
+ assert!(!validity.is_set(1));
+
+ let vector = BooleanVector::from(vec![true, false, false]);
+ assert_eq!(0, vector.null_count());
+ assert!(vector.validity().is_all_valid());
+ }
+
+ #[test]
+ fn test_boolean_vector_builder() {
+ let input = BooleanVector::from_slice(&[true, false, true]);
+
+ let mut builder = BooleanType::default().create_mutable_vector(3);
+ builder.push_value_ref(ValueRef::Boolean(true)).unwrap();
+ assert!(builder.push_value_ref(ValueRef::Int32(123)).is_err());
+ builder.extend_slice_of(&input, 1, 2).unwrap();
+ assert!(builder
+ .extend_slice_of(&crate::vectors::Int32Vector::from_slice(&[13]), 0, 1)
+ .is_err());
+ let vector = builder.to_vector();
+
+ let expect: VectorRef = Arc::new(BooleanVector::from_slice(&[true, false, true]));
+ assert_eq!(expect, vector);
+ }
+}
diff --git a/src/datatypes2/src/vectors/constant.rs b/src/datatypes2/src/vectors/constant.rs
new file mode 100644
index 000000000000..87739e91318b
--- /dev/null
+++ b/src/datatypes2/src/vectors/constant.rs
@@ -0,0 +1,218 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::fmt;
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayRef};
+use snafu::ResultExt;
+
+use crate::data_type::ConcreteDataType;
+use crate::error::{Result, SerializeSnafu};
+use crate::serialize::Serializable;
+use crate::value::{Value, ValueRef};
+use crate::vectors::{BooleanVector, Helper, Validity, Vector, VectorRef};
+
+#[derive(Clone)]
+pub struct ConstantVector {
+ length: usize,
+ vector: VectorRef,
+}
+
+impl ConstantVector {
+ /// Create a new [ConstantVector].
+ ///
+ /// # Panics
+ /// Panics if `vector.len() != 1`.
+ pub fn new(vector: VectorRef, length: usize) -> Self {
+ assert_eq!(1, vector.len());
+
+ // Avoid const recursion.
+ if vector.is_const() {
+ let vec: &ConstantVector = unsafe { Helper::static_cast(&vector) };
+ return Self::new(vec.inner().clone(), length);
+ }
+ Self { vector, length }
+ }
+
+ pub fn inner(&self) -> &VectorRef {
+ &self.vector
+ }
+
+ /// Returns the constant value.
+ pub fn get_constant_ref(&self) -> ValueRef {
+ self.vector.get_ref(0)
+ }
+
+ pub(crate) fn replicate_vector(&self, offsets: &[usize]) -> VectorRef {
+ assert_eq!(offsets.len(), self.len());
+
+ if offsets.is_empty() {
+ return self.slice(0, 0);
+ }
+
+ Arc::new(ConstantVector::new(
+ self.vector.clone(),
+ *offsets.last().unwrap(),
+ ))
+ }
+
+ pub(crate) fn filter_vector(&self, filter: &BooleanVector) -> Result<VectorRef> {
+ let length = self.len() - filter.false_count();
+ if length == self.len() {
+ return Ok(Arc::new(self.clone()));
+ }
+ Ok(Arc::new(ConstantVector::new(self.inner().clone(), length)))
+ }
+}
+
+impl Vector for ConstantVector {
+ fn data_type(&self) -> ConcreteDataType {
+ self.vector.data_type()
+ }
+
+ fn vector_type_name(&self) -> String {
+ "ConstantVector".to_string()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn len(&self) -> usize {
+ self.length
+ }
+
+ fn to_arrow_array(&self) -> ArrayRef {
+ let v = self.vector.replicate(&[self.length]);
+ v.to_arrow_array()
+ }
+
+ fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
+ let v = self.vector.replicate(&[self.length]);
+ v.to_boxed_arrow_array()
+ }
+
+ fn is_const(&self) -> bool {
+ true
+ }
+
+ fn validity(&self) -> Validity {
+ if self.vector.is_null(0) {
+ Validity::all_null(self.length)
+ } else {
+ Validity::all_valid(self.length)
+ }
+ }
+
+ fn memory_size(&self) -> usize {
+ self.vector.memory_size()
+ }
+
+ fn is_null(&self, _row: usize) -> bool {
+ self.vector.is_null(0)
+ }
+
+ fn only_null(&self) -> bool {
+ self.vector.is_null(0)
+ }
+
+ fn slice(&self, _offset: usize, length: usize) -> VectorRef {
+ Arc::new(Self {
+ vector: self.vector.clone(),
+ length,
+ })
+ }
+
+ fn get(&self, _index: usize) -> Value {
+ self.vector.get(0)
+ }
+
+ fn get_ref(&self, _index: usize) -> ValueRef {
+ self.vector.get_ref(0)
+ }
+
+ fn null_count(&self) -> usize {
+ if self.only_null() {
+ self.len()
+ } else {
+ 0
+ }
+ }
+}
+
+impl fmt::Debug for ConstantVector {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "ConstantVector([{:?}; {}])", self.get(0), self.len())
+ }
+}
+
+impl Serializable for ConstantVector {
+ fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>> {
+ std::iter::repeat(self.get(0))
+ .take(self.len())
+ .map(serde_json::Value::try_from)
+ .collect::<serde_json::Result<_>>()
+ .context(SerializeSnafu)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::DataType as ArrowDataType;
+
+ use super::*;
+ use crate::vectors::Int32Vector;
+
+ #[test]
+ fn test_constant_vector_misc() {
+ let a = Int32Vector::from_slice(vec![1]);
+ let c = ConstantVector::new(Arc::new(a), 10);
+
+ assert_eq!("ConstantVector", c.vector_type_name());
+ assert!(c.is_const());
+ assert_eq!(10, c.len());
+ assert!(c.validity().is_all_valid());
+ assert!(!c.only_null());
+ assert_eq!(64, c.memory_size());
+
+ for i in 0..10 {
+ assert!(!c.is_null(i));
+ assert_eq!(Value::Int32(1), c.get(i));
+ }
+
+ let arrow_arr = c.to_arrow_array();
+ assert_eq!(10, arrow_arr.len());
+ assert_eq!(&ArrowDataType::Int32, arrow_arr.data_type());
+ }
+
+ #[test]
+ fn test_debug_null_array() {
+ let a = Int32Vector::from_slice(vec![1]);
+ let c = ConstantVector::new(Arc::new(a), 10);
+
+ let s = format!("{:?}", c);
+ assert_eq!(s, "ConstantVector([Int32(1); 10])");
+ }
+
+ #[test]
+ fn test_serialize_json() {
+ let a = Int32Vector::from_slice(vec![1]);
+ let c = ConstantVector::new(Arc::new(a), 10);
+
+ let s = serde_json::to_string(&c.serialize_to_json().unwrap()).unwrap();
+ assert_eq!(s, "[1,1,1,1,1,1,1,1,1,1]");
+ }
+}
diff --git a/src/datatypes2/src/vectors/date.rs b/src/datatypes2/src/vectors/date.rs
new file mode 100644
index 000000000000..d0a66b80fb63
--- /dev/null
+++ b/src/datatypes2/src/vectors/date.rs
@@ -0,0 +1,103 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::types::DateType;
+use crate::vectors::{PrimitiveVector, PrimitiveVectorBuilder};
+
+// Vector for [`Date`](common_time::Date).
+pub type DateVector = PrimitiveVector<DateType>;
+// Builder to build DateVector.
+pub type DateVectorBuilder = PrimitiveVectorBuilder<DateType>;
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use arrow::array::Array;
+ use common_time::date::Date;
+
+ use super::*;
+ use crate::data_type::DataType;
+ use crate::scalars::{ScalarVector, ScalarVectorBuilder};
+ use crate::serialize::Serializable;
+ use crate::types::DateType;
+ use crate::value::{Value, ValueRef};
+ use crate::vectors::{Vector, VectorRef};
+
+ #[test]
+ fn test_build_date_vector() {
+ let mut builder = DateVectorBuilder::with_capacity(4);
+ builder.push(Some(Date::new(1)));
+ builder.push(None);
+ builder.push(Some(Date::new(-1)));
+ let vector = builder.finish();
+ assert_eq!(3, vector.len());
+ assert_eq!(Value::Date(Date::new(1)), vector.get(0));
+ assert_eq!(ValueRef::Date(Date::new(1)), vector.get_ref(0));
+ assert_eq!(Some(Date::new(1)), vector.get_data(0));
+ assert_eq!(None, vector.get_data(1));
+ assert_eq!(Value::Null, vector.get(1));
+ assert_eq!(ValueRef::Null, vector.get_ref(1));
+ assert_eq!(Some(Date::new(-1)), vector.get_data(2));
+ let mut iter = vector.iter_data();
+ assert_eq!(Some(Date::new(1)), iter.next().unwrap());
+ assert_eq!(None, iter.next().unwrap());
+ assert_eq!(Some(Date::new(-1)), iter.next().unwrap());
+ }
+
+ #[test]
+ fn test_date_scalar() {
+ let vector = DateVector::from_slice(&[1, 2]);
+ assert_eq!(2, vector.len());
+ assert_eq!(Some(Date::new(1)), vector.get_data(0));
+ assert_eq!(Some(Date::new(2)), vector.get_data(1));
+ }
+
+ #[test]
+ fn test_date_vector_builder() {
+ let input = DateVector::from_slice(&[1, 2, 3]);
+
+ let mut builder = DateType::default().create_mutable_vector(3);
+ builder
+ .push_value_ref(ValueRef::Date(Date::new(5)))
+ .unwrap();
+ assert!(builder.push_value_ref(ValueRef::Int32(123)).is_err());
+ builder.extend_slice_of(&input, 1, 2).unwrap();
+ assert!(builder
+ .extend_slice_of(&crate::vectors::Int32Vector::from_slice(&[13]), 0, 1)
+ .is_err());
+ let vector = builder.to_vector();
+
+ let expect: VectorRef = Arc::new(DateVector::from_slice(&[5, 2, 3]));
+ assert_eq!(expect, vector);
+ }
+
+ #[test]
+ fn test_date_from_arrow() {
+ let vector = DateVector::from_slice(&[1, 2]);
+ let arrow = vector.as_arrow().slice(0, vector.len());
+ let vector2 = DateVector::try_from_arrow_array(&arrow).unwrap();
+ assert_eq!(vector, vector2);
+ }
+
+ #[test]
+ fn test_serialize_date_vector() {
+ let vector = DateVector::from_slice(&[-1, 0, 1]);
+ let serialized_json = serde_json::to_string(&vector.serialize_to_json().unwrap()).unwrap();
+ assert_eq!(
+ r#"["1969-12-31","1970-01-01","1970-01-02"]"#,
+ serialized_json
+ );
+ }
+}
diff --git a/src/datatypes2/src/vectors/datetime.rs b/src/datatypes2/src/vectors/datetime.rs
new file mode 100644
index 000000000000..a40a3e54d330
--- /dev/null
+++ b/src/datatypes2/src/vectors/datetime.rs
@@ -0,0 +1,116 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::types::DateTimeType;
+use crate::vectors::{PrimitiveVector, PrimitiveVectorBuilder};
+
+/// Vector of [`DateTime`](common_time::Date)
+pub type DateTimeVector = PrimitiveVector<DateTimeType>;
+/// Builder for [`DateTimeVector`].
+pub type DateTimeVectorBuilder = PrimitiveVectorBuilder<DateTimeType>;
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use arrow::array::{Array, PrimitiveArray};
+ use common_time::DateTime;
+ use datafusion_common::from_slice::FromSlice;
+
+ use super::*;
+ use crate::data_type::DataType;
+ use crate::prelude::{
+ ConcreteDataType, ScalarVector, ScalarVectorBuilder, Value, ValueRef, Vector, VectorRef,
+ };
+ use crate::serialize::Serializable;
+
+ #[test]
+ fn test_datetime_vector() {
+ let v = DateTimeVector::new(PrimitiveArray::from_slice(&[1, 2, 3]));
+ assert_eq!(ConcreteDataType::datetime_datatype(), v.data_type());
+ assert_eq!(3, v.len());
+ assert_eq!("DateTimeVector", v.vector_type_name());
+ assert_eq!(
+ &arrow::datatypes::DataType::Date64,
+ v.to_arrow_array().data_type()
+ );
+
+ assert_eq!(Some(DateTime::new(1)), v.get_data(0));
+ assert_eq!(Value::DateTime(DateTime::new(1)), v.get(0));
+ assert_eq!(ValueRef::DateTime(DateTime::new(1)), v.get_ref(0));
+
+ let mut iter = v.iter_data();
+ assert_eq!(Some(DateTime::new(1)), iter.next().unwrap());
+ assert_eq!(Some(DateTime::new(2)), iter.next().unwrap());
+ assert_eq!(Some(DateTime::new(3)), iter.next().unwrap());
+ assert!(!v.is_null(0));
+ assert_eq!(64, v.memory_size());
+
+ if let Value::DateTime(d) = v.get(0) {
+ assert_eq!(1, d.val());
+ } else {
+ unreachable!()
+ }
+ assert_eq!(
+ "[\"1970-01-01 00:00:01\",\"1970-01-01 00:00:02\",\"1970-01-01 00:00:03\"]",
+ serde_json::to_string(&v.serialize_to_json().unwrap()).unwrap()
+ );
+ }
+
+ #[test]
+ fn test_datetime_vector_builder() {
+ let mut builder = DateTimeVectorBuilder::with_capacity(3);
+ builder.push(Some(DateTime::new(1)));
+ builder.push(None);
+ builder.push(Some(DateTime::new(-1)));
+
+ let v = builder.finish();
+ assert_eq!(ConcreteDataType::datetime_datatype(), v.data_type());
+ assert_eq!(Value::DateTime(DateTime::new(1)), v.get(0));
+ assert_eq!(Value::Null, v.get(1));
+ assert_eq!(Value::DateTime(DateTime::new(-1)), v.get(2));
+
+ let input = DateTimeVector::from_wrapper_slice(&[
+ DateTime::new(1),
+ DateTime::new(2),
+ DateTime::new(3),
+ ]);
+
+ let mut builder = DateTimeType::default().create_mutable_vector(3);
+ builder
+ .push_value_ref(ValueRef::DateTime(DateTime::new(5)))
+ .unwrap();
+ assert!(builder.push_value_ref(ValueRef::Int32(123)).is_err());
+ builder.extend_slice_of(&input, 1, 2).unwrap();
+ assert!(builder
+ .extend_slice_of(&crate::vectors::Int32Vector::from_slice(&[13]), 0, 1)
+ .is_err());
+ let vector = builder.to_vector();
+
+ let expect: VectorRef = Arc::new(DateTimeVector::from_wrapper_slice(&[
+ DateTime::new(5),
+ DateTime::new(2),
+ DateTime::new(3),
+ ]));
+ assert_eq!(expect, vector);
+ }
+
+ #[test]
+ fn test_datetime_from_arrow() {
+ let vector = DateTimeVector::from_wrapper_slice(&[DateTime::new(1), DateTime::new(2)]);
+ let arrow = vector.as_arrow().slice(0, vector.len());
+ let vector2 = DateTimeVector::try_from_arrow_array(&arrow).unwrap();
+ assert_eq!(vector, vector2);
+ }
+}
diff --git a/src/datatypes2/src/vectors/eq.rs b/src/datatypes2/src/vectors/eq.rs
new file mode 100644
index 000000000000..55359026d479
--- /dev/null
+++ b/src/datatypes2/src/vectors/eq.rs
@@ -0,0 +1,228 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use crate::data_type::DataType;
+use crate::types::TimestampType;
+use crate::vectors::constant::ConstantVector;
+use crate::vectors::{
+ BinaryVector, BooleanVector, DateTimeVector, DateVector, ListVector, PrimitiveVector,
+ StringVector, TimestampMicrosecondVector, TimestampMillisecondVector,
+ TimestampNanosecondVector, TimestampSecondVector, Vector,
+};
+use crate::with_match_primitive_type_id;
+
+impl Eq for dyn Vector + '_ {}
+
+impl PartialEq for dyn Vector + '_ {
+ fn eq(&self, other: &dyn Vector) -> bool {
+ equal(self, other)
+ }
+}
+
+impl PartialEq<dyn Vector> for Arc<dyn Vector + '_> {
+ fn eq(&self, other: &dyn Vector) -> bool {
+ equal(&**self, other)
+ }
+}
+
+macro_rules! is_vector_eq {
+ ($VectorType: ident, $lhs: ident, $rhs: ident) => {{
+ let lhs = $lhs.as_any().downcast_ref::<$VectorType>().unwrap();
+ let rhs = $rhs.as_any().downcast_ref::<$VectorType>().unwrap();
+
+ lhs == rhs
+ }};
+}
+
+fn equal(lhs: &dyn Vector, rhs: &dyn Vector) -> bool {
+ if lhs.data_type() != rhs.data_type() || lhs.len() != rhs.len() {
+ return false;
+ }
+
+ if lhs.is_const() || rhs.is_const() {
+ // Length has been checked before, so we only need to compare inner
+ // vector here.
+ return equal(
+ &**lhs
+ .as_any()
+ .downcast_ref::<ConstantVector>()
+ .unwrap()
+ .inner(),
+ &**lhs
+ .as_any()
+ .downcast_ref::<ConstantVector>()
+ .unwrap()
+ .inner(),
+ );
+ }
+
+ use crate::data_type::ConcreteDataType::*;
+
+ let lhs_type = lhs.data_type();
+ match lhs.data_type() {
+ Null(_) => true,
+ Boolean(_) => is_vector_eq!(BooleanVector, lhs, rhs),
+ Binary(_) => is_vector_eq!(BinaryVector, lhs, rhs),
+ String(_) => is_vector_eq!(StringVector, lhs, rhs),
+ Date(_) => is_vector_eq!(DateVector, lhs, rhs),
+ DateTime(_) => is_vector_eq!(DateTimeVector, lhs, rhs),
+ Timestamp(t) => match t {
+ TimestampType::Second(_) => {
+ is_vector_eq!(TimestampSecondVector, lhs, rhs)
+ }
+ TimestampType::Millisecond(_) => {
+ is_vector_eq!(TimestampMillisecondVector, lhs, rhs)
+ }
+ TimestampType::Microsecond(_) => {
+ is_vector_eq!(TimestampMicrosecondVector, lhs, rhs)
+ }
+ TimestampType::Nanosecond(_) => {
+ is_vector_eq!(TimestampNanosecondVector, lhs, rhs)
+ }
+ },
+ List(_) => is_vector_eq!(ListVector, lhs, rhs),
+ UInt8(_) | UInt16(_) | UInt32(_) | UInt64(_) | Int8(_) | Int16(_) | Int32(_) | Int64(_)
+ | Float32(_) | Float64(_) => {
+ with_match_primitive_type_id!(lhs_type.logical_type_id(), |$T| {
+ let lhs = lhs.as_any().downcast_ref::<PrimitiveVector<$T>>().unwrap();
+ let rhs = rhs.as_any().downcast_ref::<PrimitiveVector<$T>>().unwrap();
+
+ lhs == rhs
+ },
+ {
+ unreachable!("should not compare {} with {}", lhs.vector_type_name(), rhs.vector_type_name())
+ })
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::vectors::{
+ list, Float32Vector, Float64Vector, Int16Vector, Int32Vector, Int64Vector, Int8Vector,
+ NullVector, UInt16Vector, UInt32Vector, UInt64Vector, UInt8Vector, VectorRef,
+ };
+
+ fn assert_vector_ref_eq(vector: VectorRef) {
+ let rhs = vector.clone();
+ assert_eq!(vector, rhs);
+ assert_dyn_vector_eq(&*vector, &*rhs);
+ }
+
+ fn assert_dyn_vector_eq(lhs: &dyn Vector, rhs: &dyn Vector) {
+ assert_eq!(lhs, rhs);
+ }
+
+ fn assert_vector_ref_ne(lhs: VectorRef, rhs: VectorRef) {
+ assert_ne!(lhs, rhs);
+ }
+
+ #[test]
+ fn test_vector_eq() {
+ assert_vector_ref_eq(Arc::new(BinaryVector::from(vec![
+ Some(b"hello".to_vec()),
+ Some(b"world".to_vec()),
+ ])));
+ assert_vector_ref_eq(Arc::new(BooleanVector::from(vec![true, false])));
+ assert_vector_ref_eq(Arc::new(ConstantVector::new(
+ Arc::new(BooleanVector::from(vec![true])),
+ 5,
+ )));
+ assert_vector_ref_eq(Arc::new(BooleanVector::from(vec![true, false])));
+ assert_vector_ref_eq(Arc::new(DateVector::from(vec![Some(100), Some(120)])));
+ assert_vector_ref_eq(Arc::new(DateTimeVector::from(vec![Some(100), Some(120)])));
+ assert_vector_ref_eq(Arc::new(TimestampSecondVector::from_values([100, 120])));
+ assert_vector_ref_eq(Arc::new(TimestampMillisecondVector::from_values([
+ 100, 120,
+ ])));
+ assert_vector_ref_eq(Arc::new(TimestampMicrosecondVector::from_values([
+ 100, 120,
+ ])));
+ assert_vector_ref_eq(Arc::new(TimestampNanosecondVector::from_values([100, 120])));
+
+ let list_vector = list::tests::new_list_vector(&[
+ Some(vec![Some(1), Some(2)]),
+ None,
+ Some(vec![Some(3), Some(4)]),
+ ]);
+ assert_vector_ref_eq(Arc::new(list_vector));
+
+ assert_vector_ref_eq(Arc::new(NullVector::new(4)));
+ assert_vector_ref_eq(Arc::new(StringVector::from(vec![
+ Some("hello"),
+ Some("world"),
+ ])));
+
+ assert_vector_ref_eq(Arc::new(Int8Vector::from_slice(&[1, 2, 3, 4])));
+ assert_vector_ref_eq(Arc::new(UInt8Vector::from_slice(&[1, 2, 3, 4])));
+ assert_vector_ref_eq(Arc::new(Int16Vector::from_slice(&[1, 2, 3, 4])));
+ assert_vector_ref_eq(Arc::new(UInt16Vector::from_slice(&[1, 2, 3, 4])));
+ assert_vector_ref_eq(Arc::new(Int32Vector::from_slice(&[1, 2, 3, 4])));
+ assert_vector_ref_eq(Arc::new(UInt32Vector::from_slice(&[1, 2, 3, 4])));
+ assert_vector_ref_eq(Arc::new(Int64Vector::from_slice(&[1, 2, 3, 4])));
+ assert_vector_ref_eq(Arc::new(UInt64Vector::from_slice(&[1, 2, 3, 4])));
+ assert_vector_ref_eq(Arc::new(Float32Vector::from_slice(&[1.0, 2.0, 3.0, 4.0])));
+ assert_vector_ref_eq(Arc::new(Float64Vector::from_slice(&[1.0, 2.0, 3.0, 4.0])));
+ }
+
+ #[test]
+ fn test_vector_ne() {
+ assert_vector_ref_ne(
+ Arc::new(Int32Vector::from_slice(&[1, 2, 3, 4])),
+ Arc::new(Int32Vector::from_slice(&[1, 2])),
+ );
+ assert_vector_ref_ne(
+ Arc::new(Int32Vector::from_slice(&[1, 2, 3, 4])),
+ Arc::new(Int8Vector::from_slice(&[1, 2, 3, 4])),
+ );
+ assert_vector_ref_ne(
+ Arc::new(Int32Vector::from_slice(&[1, 2, 3, 4])),
+ Arc::new(BooleanVector::from(vec![true, true])),
+ );
+ assert_vector_ref_ne(
+ Arc::new(ConstantVector::new(
+ Arc::new(BooleanVector::from(vec![true])),
+ 5,
+ )),
+ Arc::new(ConstantVector::new(
+ Arc::new(BooleanVector::from(vec![true])),
+ 4,
+ )),
+ );
+ assert_vector_ref_ne(
+ Arc::new(ConstantVector::new(
+ Arc::new(BooleanVector::from(vec![true])),
+ 5,
+ )),
+ Arc::new(ConstantVector::new(
+ Arc::new(BooleanVector::from(vec![false])),
+ 4,
+ )),
+ );
+ assert_vector_ref_ne(
+ Arc::new(ConstantVector::new(
+ Arc::new(BooleanVector::from(vec![true])),
+ 5,
+ )),
+ Arc::new(ConstantVector::new(
+ Arc::new(Int32Vector::from_slice(vec![1])),
+ 4,
+ )),
+ );
+ assert_vector_ref_ne(Arc::new(NullVector::new(5)), Arc::new(NullVector::new(8)));
+ }
+}
diff --git a/src/datatypes2/src/vectors/helper.rs b/src/datatypes2/src/vectors/helper.rs
new file mode 100644
index 000000000000..f3236ca0ec42
--- /dev/null
+++ b/src/datatypes2/src/vectors/helper.rs
@@ -0,0 +1,431 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Vector helper functions, inspired by databend Series mod
+
+use std::any::Any;
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayRef, StringArray};
+use arrow::compute;
+use arrow::compute::kernels::comparison;
+use arrow::datatypes::{DataType as ArrowDataType, TimeUnit};
+use datafusion_common::ScalarValue;
+use snafu::{OptionExt, ResultExt};
+
+use crate::data_type::ConcreteDataType;
+use crate::error::{self, Result};
+use crate::scalars::{Scalar, ScalarVectorBuilder};
+use crate::value::{ListValue, ListValueRef};
+use crate::vectors::{
+ BinaryVector, BooleanVector, ConstantVector, DateTimeVector, DateVector, Float32Vector,
+ Float64Vector, Int16Vector, Int32Vector, Int64Vector, Int8Vector, ListVector,
+ ListVectorBuilder, MutableVector, NullVector, StringVector, TimestampMicrosecondVector,
+ TimestampMillisecondVector, TimestampNanosecondVector, TimestampSecondVector, UInt16Vector,
+ UInt32Vector, UInt64Vector, UInt8Vector, Vector, VectorRef,
+};
+
+/// Helper functions for `Vector`.
+pub struct Helper;
+
+impl Helper {
+ /// Get a pointer to the underlying data of this vectors.
+ /// Can be useful for fast comparisons.
+ /// # Safety
+ /// Assumes that the `vector` is T.
+ pub unsafe fn static_cast<T: Any>(vector: &VectorRef) -> &T {
+ let object = vector.as_ref();
+ debug_assert!(object.as_any().is::<T>());
+ &*(object as *const dyn Vector as *const T)
+ }
+
+ pub fn check_get_scalar<T: Scalar>(vector: &VectorRef) -> Result<&<T as Scalar>::VectorType> {
+ let arr = vector
+ .as_any()
+ .downcast_ref::<<T as Scalar>::VectorType>()
+ .with_context(|| error::UnknownVectorSnafu {
+ msg: format!(
+ "downcast vector error, vector type: {:?}, expected vector: {:?}",
+ vector.vector_type_name(),
+ std::any::type_name::<T>(),
+ ),
+ });
+ arr
+ }
+
+ pub fn check_get<T: 'static + Vector>(vector: &VectorRef) -> Result<&T> {
+ let arr = vector
+ .as_any()
+ .downcast_ref::<T>()
+ .with_context(|| error::UnknownVectorSnafu {
+ msg: format!(
+ "downcast vector error, vector type: {:?}, expected vector: {:?}",
+ vector.vector_type_name(),
+ std::any::type_name::<T>(),
+ ),
+ });
+ arr
+ }
+
+ pub fn check_get_mutable_vector<T: 'static + MutableVector>(
+ vector: &mut dyn MutableVector,
+ ) -> Result<&mut T> {
+ let ty = vector.data_type();
+ let arr = vector
+ .as_mut_any()
+ .downcast_mut()
+ .with_context(|| error::UnknownVectorSnafu {
+ msg: format!(
+ "downcast vector error, vector type: {:?}, expected vector: {:?}",
+ ty,
+ std::any::type_name::<T>(),
+ ),
+ });
+ arr
+ }
+
+ pub fn check_get_scalar_vector<T: Scalar>(
+ vector: &VectorRef,
+ ) -> Result<&<T as Scalar>::VectorType> {
+ let arr = vector
+ .as_any()
+ .downcast_ref::<<T as Scalar>::VectorType>()
+ .with_context(|| error::UnknownVectorSnafu {
+ msg: format!(
+ "downcast vector error, vector type: {:?}, expected vector: {:?}",
+ vector.vector_type_name(),
+ std::any::type_name::<T>(),
+ ),
+ });
+ arr
+ }
+
+ /// Try to cast an arrow scalar value into vector
+ pub fn try_from_scalar_value(value: ScalarValue, length: usize) -> Result<VectorRef> {
+ let vector = match value {
+ ScalarValue::Null => ConstantVector::new(Arc::new(NullVector::new(1)), length),
+ ScalarValue::Boolean(v) => {
+ ConstantVector::new(Arc::new(BooleanVector::from(vec![v])), length)
+ }
+ ScalarValue::Float32(v) => {
+ ConstantVector::new(Arc::new(Float32Vector::from(vec![v])), length)
+ }
+ ScalarValue::Float64(v) => {
+ ConstantVector::new(Arc::new(Float64Vector::from(vec![v])), length)
+ }
+ ScalarValue::Int8(v) => {
+ ConstantVector::new(Arc::new(Int8Vector::from(vec![v])), length)
+ }
+ ScalarValue::Int16(v) => {
+ ConstantVector::new(Arc::new(Int16Vector::from(vec![v])), length)
+ }
+ ScalarValue::Int32(v) => {
+ ConstantVector::new(Arc::new(Int32Vector::from(vec![v])), length)
+ }
+ ScalarValue::Int64(v) => {
+ ConstantVector::new(Arc::new(Int64Vector::from(vec![v])), length)
+ }
+ ScalarValue::UInt8(v) => {
+ ConstantVector::new(Arc::new(UInt8Vector::from(vec![v])), length)
+ }
+ ScalarValue::UInt16(v) => {
+ ConstantVector::new(Arc::new(UInt16Vector::from(vec![v])), length)
+ }
+ ScalarValue::UInt32(v) => {
+ ConstantVector::new(Arc::new(UInt32Vector::from(vec![v])), length)
+ }
+ ScalarValue::UInt64(v) => {
+ ConstantVector::new(Arc::new(UInt64Vector::from(vec![v])), length)
+ }
+ ScalarValue::Utf8(v) | ScalarValue::LargeUtf8(v) => {
+ ConstantVector::new(Arc::new(StringVector::from(vec![v])), length)
+ }
+ ScalarValue::Binary(v)
+ | ScalarValue::LargeBinary(v)
+ | ScalarValue::FixedSizeBinary(_, v) => {
+ ConstantVector::new(Arc::new(BinaryVector::from(vec![v])), length)
+ }
+ ScalarValue::List(v, field) => {
+ let item_type = ConcreteDataType::try_from(field.data_type())?;
+ let mut builder = ListVectorBuilder::with_type_capacity(item_type.clone(), 1);
+ if let Some(values) = v {
+ let values = values
+ .into_iter()
+ .map(ScalarValue::try_into)
+ .collect::<Result<_>>()?;
+ let list_value = ListValue::new(Some(Box::new(values)), item_type);
+ builder.push(Some(ListValueRef::Ref { val: &list_value }));
+ } else {
+ builder.push(None);
+ }
+ let list_vector = builder.to_vector();
+ ConstantVector::new(list_vector, length)
+ }
+ ScalarValue::Date32(v) => {
+ ConstantVector::new(Arc::new(DateVector::from(vec![v])), length)
+ }
+ ScalarValue::Date64(v) => {
+ ConstantVector::new(Arc::new(DateTimeVector::from(vec![v])), length)
+ }
+ ScalarValue::TimestampSecond(v, _) => {
+ // Timezone is unimplemented now.
+ ConstantVector::new(Arc::new(TimestampSecondVector::from(vec![v])), length)
+ }
+ ScalarValue::TimestampMillisecond(v, _) => {
+ // Timezone is unimplemented now.
+ ConstantVector::new(Arc::new(TimestampMillisecondVector::from(vec![v])), length)
+ }
+ ScalarValue::TimestampMicrosecond(v, _) => {
+ // Timezone is unimplemented now.
+ ConstantVector::new(Arc::new(TimestampMicrosecondVector::from(vec![v])), length)
+ }
+ ScalarValue::TimestampNanosecond(v, _) => {
+ // Timezone is unimplemented now.
+ ConstantVector::new(Arc::new(TimestampNanosecondVector::from(vec![v])), length)
+ }
+ ScalarValue::Decimal128(_, _, _)
+ | ScalarValue::Time64(_)
+ | ScalarValue::IntervalYearMonth(_)
+ | ScalarValue::IntervalDayTime(_)
+ | ScalarValue::IntervalMonthDayNano(_)
+ | ScalarValue::Struct(_, _)
+ | ScalarValue::Dictionary(_, _) => {
+ return error::ConversionSnafu {
+ from: format!("Unsupported scalar value: {}", value),
+ }
+ .fail()
+ }
+ };
+
+ Ok(Arc::new(vector))
+ }
+
+ /// Try to cast an arrow array into vector
+ ///
+ /// # Panics
+ /// Panic if given arrow data type is not supported.
+ pub fn try_into_vector(array: impl AsRef<dyn Array>) -> Result<VectorRef> {
+ Ok(match array.as_ref().data_type() {
+ ArrowDataType::Null => Arc::new(NullVector::try_from_arrow_array(array)?),
+ ArrowDataType::Boolean => Arc::new(BooleanVector::try_from_arrow_array(array)?),
+ ArrowDataType::LargeBinary => Arc::new(BinaryVector::try_from_arrow_array(array)?),
+ ArrowDataType::Int8 => Arc::new(Int8Vector::try_from_arrow_array(array)?),
+ ArrowDataType::Int16 => Arc::new(Int16Vector::try_from_arrow_array(array)?),
+ ArrowDataType::Int32 => Arc::new(Int32Vector::try_from_arrow_array(array)?),
+ ArrowDataType::Int64 => Arc::new(Int64Vector::try_from_arrow_array(array)?),
+ ArrowDataType::UInt8 => Arc::new(UInt8Vector::try_from_arrow_array(array)?),
+ ArrowDataType::UInt16 => Arc::new(UInt16Vector::try_from_arrow_array(array)?),
+ ArrowDataType::UInt32 => Arc::new(UInt32Vector::try_from_arrow_array(array)?),
+ ArrowDataType::UInt64 => Arc::new(UInt64Vector::try_from_arrow_array(array)?),
+ ArrowDataType::Float32 => Arc::new(Float32Vector::try_from_arrow_array(array)?),
+ ArrowDataType::Float64 => Arc::new(Float64Vector::try_from_arrow_array(array)?),
+ ArrowDataType::Utf8 => Arc::new(StringVector::try_from_arrow_array(array)?),
+ ArrowDataType::Date32 => Arc::new(DateVector::try_from_arrow_array(array)?),
+ ArrowDataType::Date64 => Arc::new(DateTimeVector::try_from_arrow_array(array)?),
+ ArrowDataType::List(_) => Arc::new(ListVector::try_from_arrow_array(array)?),
+ ArrowDataType::Timestamp(unit, _) => match unit {
+ TimeUnit::Second => Arc::new(TimestampSecondVector::try_from_arrow_array(array)?),
+ TimeUnit::Millisecond => {
+ Arc::new(TimestampMillisecondVector::try_from_arrow_array(array)?)
+ }
+ TimeUnit::Microsecond => {
+ Arc::new(TimestampMicrosecondVector::try_from_arrow_array(array)?)
+ }
+ TimeUnit::Nanosecond => {
+ Arc::new(TimestampNanosecondVector::try_from_arrow_array(array)?)
+ }
+ },
+ ArrowDataType::Float16
+ | ArrowDataType::Time32(_)
+ | ArrowDataType::Time64(_)
+ | ArrowDataType::Duration(_)
+ | ArrowDataType::Interval(_)
+ | ArrowDataType::Binary
+ | ArrowDataType::FixedSizeBinary(_)
+ | ArrowDataType::LargeUtf8
+ | ArrowDataType::LargeList(_)
+ | ArrowDataType::FixedSizeList(_, _)
+ | ArrowDataType::Struct(_)
+ | ArrowDataType::Union(_, _, _)
+ | ArrowDataType::Dictionary(_, _)
+ | ArrowDataType::Decimal128(_, _)
+ | ArrowDataType::Decimal256(_, _)
+ | ArrowDataType::Map(_, _) => {
+ unimplemented!("Arrow array datatype: {:?}", array.as_ref().data_type())
+ }
+ })
+ }
+
+ /// Try to cast slice of `arrays` to vectors.
+ pub fn try_into_vectors(arrays: &[ArrayRef]) -> Result<Vec<VectorRef>> {
+ arrays.iter().map(Self::try_into_vector).collect()
+ }
+
+ /// Perform SQL like operation on `names` and a scalar `s`.
+ pub fn like_utf8(names: Vec<String>, s: &str) -> Result<VectorRef> {
+ let array = StringArray::from(names);
+
+ let filter = comparison::like_utf8_scalar(&array, s).context(error::ArrowComputeSnafu)?;
+
+ let result = compute::filter(&array, &filter).context(error::ArrowComputeSnafu)?;
+ Helper::try_into_vector(result)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use arrow::array::{
+ ArrayRef, BooleanArray, Date32Array, Date64Array, Float32Array, Float64Array, Int16Array,
+ Int32Array, Int64Array, Int8Array, LargeBinaryArray, ListArray, NullArray,
+ TimestampMicrosecondArray, TimestampMillisecondArray, TimestampNanosecondArray,
+ TimestampSecondArray, UInt16Array, UInt32Array, UInt64Array, UInt8Array,
+ };
+ use arrow::datatypes::{Field, Int32Type};
+ use common_time::{Date, DateTime};
+
+ use super::*;
+ use crate::value::Value;
+ use crate::vectors::ConcreteDataType;
+
+ #[test]
+ fn test_try_into_vectors() {
+ let arrays: Vec<ArrayRef> = vec![
+ Arc::new(Int32Array::from(vec![1])),
+ Arc::new(Int32Array::from(vec![2])),
+ Arc::new(Int32Array::from(vec![3])),
+ ];
+ let vectors = Helper::try_into_vectors(&arrays);
+ assert!(vectors.is_ok());
+ let vectors = vectors.unwrap();
+ vectors.iter().for_each(|v| assert_eq!(1, v.len()));
+ assert_eq!(Value::Int32(1), vectors[0].get(0));
+ assert_eq!(Value::Int32(2), vectors[1].get(0));
+ assert_eq!(Value::Int32(3), vectors[2].get(0));
+ }
+
+ #[test]
+ fn test_try_into_date_vector() {
+ let vector = DateVector::from(vec![Some(1), Some(2), None]);
+ let arrow_array = vector.to_arrow_array();
+ assert_eq!(&ArrowDataType::Date32, arrow_array.data_type());
+ let vector_converted = Helper::try_into_vector(arrow_array).unwrap();
+ assert_eq!(vector.len(), vector_converted.len());
+ for i in 0..vector_converted.len() {
+ assert_eq!(vector.get(i), vector_converted.get(i));
+ }
+ }
+
+ #[test]
+ fn test_try_from_scalar_date_value() {
+ let vector = Helper::try_from_scalar_value(ScalarValue::Date32(Some(42)), 3).unwrap();
+ assert_eq!(ConcreteDataType::date_datatype(), vector.data_type());
+ assert_eq!(3, vector.len());
+ for i in 0..vector.len() {
+ assert_eq!(Value::Date(Date::new(42)), vector.get(i));
+ }
+ }
+
+ #[test]
+ fn test_try_from_scalar_datetime_value() {
+ let vector = Helper::try_from_scalar_value(ScalarValue::Date64(Some(42)), 3).unwrap();
+ assert_eq!(ConcreteDataType::datetime_datatype(), vector.data_type());
+ assert_eq!(3, vector.len());
+ for i in 0..vector.len() {
+ assert_eq!(Value::DateTime(DateTime::new(42)), vector.get(i));
+ }
+ }
+
+ #[test]
+ fn test_try_from_list_value() {
+ let value = ScalarValue::List(
+ Some(vec![
+ ScalarValue::Int32(Some(1)),
+ ScalarValue::Int32(Some(2)),
+ ]),
+ Box::new(Field::new("item", ArrowDataType::Int32, true)),
+ );
+ let vector = Helper::try_from_scalar_value(value, 3).unwrap();
+ assert_eq!(
+ ConcreteDataType::list_datatype(ConcreteDataType::int32_datatype()),
+ vector.data_type()
+ );
+ assert_eq!(3, vector.len());
+ for i in 0..vector.len() {
+ let v = vector.get(i);
+ let items = v.as_list().unwrap().unwrap().items().as_ref().unwrap();
+ assert_eq!(vec![Value::Int32(1), Value::Int32(2)], **items);
+ }
+ }
+
+ #[test]
+ fn test_like_utf8() {
+ fn assert_vector(expected: Vec<&str>, actual: &VectorRef) {
+ let actual = actual.as_any().downcast_ref::<StringVector>().unwrap();
+ assert_eq!(*actual, StringVector::from(expected));
+ }
+
+ let names: Vec<String> = vec!["greptime", "hello", "public", "world"]
+ .into_iter()
+ .map(|x| x.to_string())
+ .collect();
+
+ let ret = Helper::like_utf8(names.clone(), "%ll%").unwrap();
+ assert_vector(vec!["hello"], &ret);
+
+ let ret = Helper::like_utf8(names.clone(), "%time").unwrap();
+ assert_vector(vec!["greptime"], &ret);
+
+ let ret = Helper::like_utf8(names.clone(), "%ld").unwrap();
+ assert_vector(vec!["world"], &ret);
+
+ let ret = Helper::like_utf8(names, "%").unwrap();
+ assert_vector(vec!["greptime", "hello", "public", "world"], &ret);
+ }
+
+ fn check_try_into_vector(array: impl Array + 'static) {
+ let array: ArrayRef = Arc::new(array);
+ let vector = Helper::try_into_vector(array.clone()).unwrap();
+ assert_eq!(&array, &vector.to_arrow_array());
+ }
+
+ #[test]
+ fn test_try_into_vector() {
+ check_try_into_vector(NullArray::new(2));
+ check_try_into_vector(BooleanArray::from(vec![true, false]));
+ check_try_into_vector(LargeBinaryArray::from(vec![
+ "hello".as_bytes(),
+ "world".as_bytes(),
+ ]));
+ check_try_into_vector(Int8Array::from(vec![1, 2, 3]));
+ check_try_into_vector(Int16Array::from(vec![1, 2, 3]));
+ check_try_into_vector(Int32Array::from(vec![1, 2, 3]));
+ check_try_into_vector(Int64Array::from(vec![1, 2, 3]));
+ check_try_into_vector(UInt8Array::from(vec![1, 2, 3]));
+ check_try_into_vector(UInt16Array::from(vec![1, 2, 3]));
+ check_try_into_vector(UInt32Array::from(vec![1, 2, 3]));
+ check_try_into_vector(UInt64Array::from(vec![1, 2, 3]));
+ check_try_into_vector(Float32Array::from(vec![1.0, 2.0, 3.0]));
+ check_try_into_vector(Float64Array::from(vec![1.0, 2.0, 3.0]));
+ check_try_into_vector(StringArray::from(vec!["hello", "world"]));
+ check_try_into_vector(Date32Array::from(vec![1, 2, 3]));
+ check_try_into_vector(Date64Array::from(vec![1, 2, 3]));
+ let data = vec![None, Some(vec![Some(6), Some(7)])];
+ let list_array = ListArray::from_iter_primitive::<Int32Type, _, _>(data);
+ check_try_into_vector(list_array);
+ check_try_into_vector(TimestampSecondArray::from(vec![1, 2, 3]));
+ check_try_into_vector(TimestampMillisecondArray::from(vec![1, 2, 3]));
+ check_try_into_vector(TimestampMicrosecondArray::from(vec![1, 2, 3]));
+ check_try_into_vector(TimestampNanosecondArray::from(vec![1, 2, 3]));
+ }
+}
diff --git a/src/datatypes2/src/vectors/list.rs b/src/datatypes2/src/vectors/list.rs
new file mode 100644
index 000000000000..747e03557ba2
--- /dev/null
+++ b/src/datatypes2/src/vectors/list.rs
@@ -0,0 +1,747 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::sync::Arc;
+
+use arrow::array::{
+ Array, ArrayData, ArrayRef, BooleanBufferBuilder, Int32BufferBuilder, ListArray,
+};
+use arrow::buffer::Buffer;
+use arrow::datatypes::DataType as ArrowDataType;
+use serde_json::Value as JsonValue;
+
+use crate::data_type::{ConcreteDataType, DataType};
+use crate::error::Result;
+use crate::scalars::{ScalarVector, ScalarVectorBuilder};
+use crate::serialize::Serializable;
+use crate::types::ListType;
+use crate::value::{ListValue, ListValueRef, Value, ValueRef};
+use crate::vectors::{self, Helper, MutableVector, Validity, Vector, VectorRef};
+
+/// Vector of Lists, basically backed by Arrow's `ListArray`.
+#[derive(Debug, PartialEq)]
+pub struct ListVector {
+ array: ListArray,
+ /// The datatype of the items in the list.
+ item_type: ConcreteDataType,
+}
+
+impl ListVector {
+ /// Iterate elements as [VectorRef].
+ pub fn values_iter(&self) -> impl Iterator<Item = Result<Option<VectorRef>>> + '_ {
+ self.array
+ .iter()
+ .map(|value_opt| value_opt.map(Helper::try_into_vector).transpose())
+ }
+
+ fn to_array_data(&self) -> ArrayData {
+ self.array.data().clone()
+ }
+
+ fn from_array_data_and_type(data: ArrayData, item_type: ConcreteDataType) -> Self {
+ Self {
+ array: ListArray::from(data),
+ item_type,
+ }
+ }
+
+ pub(crate) fn as_arrow(&self) -> &dyn Array {
+ &self.array
+ }
+}
+
+impl Vector for ListVector {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::List(ListType::new(self.item_type.clone()))
+ }
+
+ fn vector_type_name(&self) -> String {
+ "ListVector".to_string()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn len(&self) -> usize {
+ self.array.len()
+ }
+
+ fn to_arrow_array(&self) -> ArrayRef {
+ let data = self.to_array_data();
+ Arc::new(ListArray::from(data))
+ }
+
+ fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
+ let data = self.to_array_data();
+ Box::new(ListArray::from(data))
+ }
+
+ fn validity(&self) -> Validity {
+ vectors::impl_validity_for_vector!(self.array)
+ }
+
+ fn memory_size(&self) -> usize {
+ self.array.get_buffer_memory_size()
+ }
+
+ fn null_count(&self) -> usize {
+ self.array.null_count()
+ }
+
+ fn is_null(&self, row: usize) -> bool {
+ self.array.is_null(row)
+ }
+
+ fn slice(&self, offset: usize, length: usize) -> VectorRef {
+ let data = self.array.data().slice(offset, length);
+ Arc::new(Self::from_array_data_and_type(data, self.item_type.clone()))
+ }
+
+ fn get(&self, index: usize) -> Value {
+ if !self.array.is_valid(index) {
+ return Value::Null;
+ }
+
+ let array = &self.array.value(index);
+ let vector = Helper::try_into_vector(array).unwrap_or_else(|_| {
+ panic!(
+ "arrow array with datatype {:?} cannot converted to our vector",
+ array.data_type()
+ )
+ });
+ let values = (0..vector.len())
+ .map(|i| vector.get(i))
+ .collect::<Vec<Value>>();
+ Value::List(ListValue::new(
+ Some(Box::new(values)),
+ self.item_type.clone(),
+ ))
+ }
+
+ fn get_ref(&self, index: usize) -> ValueRef {
+ ValueRef::List(ListValueRef::Indexed {
+ vector: self,
+ idx: index,
+ })
+ }
+}
+
+impl Serializable for ListVector {
+ fn serialize_to_json(&self) -> Result<Vec<JsonValue>> {
+ self.array
+ .iter()
+ .map(|v| match v {
+ None => Ok(JsonValue::Null),
+ Some(v) => Helper::try_into_vector(v)
+ .and_then(|v| v.serialize_to_json())
+ .map(JsonValue::Array),
+ })
+ .collect()
+ }
+}
+
+impl From<ListArray> for ListVector {
+ fn from(array: ListArray) -> Self {
+ let item_type = ConcreteDataType::from_arrow_type(match array.data_type() {
+ ArrowDataType::List(field) => field.data_type(),
+ other => panic!(
+ "Try to create ListVector from an arrow array with type {:?}",
+ other
+ ),
+ });
+ Self { array, item_type }
+ }
+}
+
+vectors::impl_try_from_arrow_array_for_vector!(ListArray, ListVector);
+
+pub struct ListIter<'a> {
+ vector: &'a ListVector,
+ idx: usize,
+}
+
+impl<'a> ListIter<'a> {
+ fn new(vector: &'a ListVector) -> ListIter {
+ ListIter { vector, idx: 0 }
+ }
+}
+
+impl<'a> Iterator for ListIter<'a> {
+ type Item = Option<ListValueRef<'a>>;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.idx >= self.vector.len() {
+ return None;
+ }
+
+ let idx = self.idx;
+ self.idx += 1;
+
+ if self.vector.is_null(idx) {
+ return Some(None);
+ }
+
+ Some(Some(ListValueRef::Indexed {
+ vector: self.vector,
+ idx,
+ }))
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.vector.len(), Some(self.vector.len()))
+ }
+}
+
+impl ScalarVector for ListVector {
+ type OwnedItem = ListValue;
+ type RefItem<'a> = ListValueRef<'a>;
+ type Iter<'a> = ListIter<'a>;
+ type Builder = ListVectorBuilder;
+
+ fn get_data(&self, idx: usize) -> Option<Self::RefItem<'_>> {
+ if self.array.is_valid(idx) {
+ Some(ListValueRef::Indexed { vector: self, idx })
+ } else {
+ None
+ }
+ }
+
+ fn iter_data(&self) -> Self::Iter<'_> {
+ ListIter::new(self)
+ }
+}
+
+// Ports from arrow's GenericListBuilder.
+// See https://github.com/apache/arrow-rs/blob/94565bca99b5d9932a3e9a8e094aaf4e4384b1e5/arrow-array/src/builder/generic_list_builder.rs
+/// [ListVector] builder.
+pub struct ListVectorBuilder {
+ item_type: ConcreteDataType,
+ offsets_builder: Int32BufferBuilder,
+ null_buffer_builder: NullBufferBuilder,
+ values_builder: Box<dyn MutableVector>,
+}
+
+impl ListVectorBuilder {
+ /// Creates a new [`ListVectorBuilder`]. `item_type` is the data type of the list item, `capacity`
+ /// is the number of items to pre-allocate space for in this builder.
+ pub fn with_type_capacity(item_type: ConcreteDataType, capacity: usize) -> ListVectorBuilder {
+ let mut offsets_builder = Int32BufferBuilder::new(capacity + 1);
+ offsets_builder.append(0);
+ // The actual required capacity might be greater than the capacity of the `ListVector`
+ // if the child vector has more than one element.
+ let values_builder = item_type.create_mutable_vector(capacity);
+
+ ListVectorBuilder {
+ item_type,
+ offsets_builder,
+ null_buffer_builder: NullBufferBuilder::new(capacity),
+ values_builder,
+ }
+ }
+
+ /// Finish the current variable-length list vector slot.
+ fn finish_list(&mut self, is_valid: bool) {
+ self.offsets_builder
+ .append(i32::try_from(self.values_builder.len()).unwrap());
+ self.null_buffer_builder.append(is_valid);
+ }
+
+ fn push_null(&mut self) {
+ self.finish_list(false);
+ }
+
+ fn push_list_value(&mut self, list_value: &ListValue) -> Result<()> {
+ if let Some(items) = list_value.items() {
+ for item in &**items {
+ self.values_builder.push_value_ref(item.as_value_ref())?;
+ }
+ }
+
+ self.finish_list(true);
+ Ok(())
+ }
+}
+
+impl MutableVector for ListVectorBuilder {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::list_datatype(self.item_type.clone())
+ }
+
+ fn len(&self) -> usize {
+ self.null_buffer_builder.len()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn as_mut_any(&mut self) -> &mut dyn Any {
+ self
+ }
+
+ fn to_vector(&mut self) -> VectorRef {
+ Arc::new(self.finish())
+ }
+
+ fn push_value_ref(&mut self, value: ValueRef) -> Result<()> {
+ if let Some(list_ref) = value.as_list()? {
+ match list_ref {
+ ListValueRef::Indexed { vector, idx } => match vector.get(idx).as_list()? {
+ Some(list_value) => self.push_list_value(list_value)?,
+ None => self.push_null(),
+ },
+ ListValueRef::Ref { val } => self.push_list_value(val)?,
+ }
+ } else {
+ self.push_null();
+ }
+
+ Ok(())
+ }
+
+ fn extend_slice_of(&mut self, vector: &dyn Vector, offset: usize, length: usize) -> Result<()> {
+ for idx in offset..offset + length {
+ let value = vector.get_ref(idx);
+ self.push_value_ref(value)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl ScalarVectorBuilder for ListVectorBuilder {
+ type VectorType = ListVector;
+
+ fn with_capacity(_capacity: usize) -> Self {
+ panic!("Must use ListVectorBuilder::with_type_capacity()");
+ }
+
+ fn push(&mut self, value: Option<<Self::VectorType as ScalarVector>::RefItem<'_>>) {
+ // We expect the input ListValue has the same inner type as the builder when using
+ // push(), so just panic if `push_value_ref()` returns error, which indicate an
+ // invalid input value type.
+ self.push_value_ref(value.into()).unwrap_or_else(|e| {
+ panic!(
+ "Failed to push value, expect value type {:?}, err:{}",
+ self.item_type, e
+ );
+ });
+ }
+
+ fn finish(&mut self) -> Self::VectorType {
+ let len = self.len();
+ let values_vector = self.values_builder.to_vector();
+ let values_arr = values_vector.to_arrow_array();
+ let values_data = values_arr.data();
+
+ let offset_buffer = self.offsets_builder.finish();
+ let null_bit_buffer = self.null_buffer_builder.finish();
+ // Re-initialize the offsets_builder.
+ self.offsets_builder.append(0);
+ let data_type = ConcreteDataType::list_datatype(self.item_type.clone()).as_arrow_type();
+ let array_data_builder = ArrayData::builder(data_type)
+ .len(len)
+ .add_buffer(offset_buffer)
+ .add_child_data(values_data.clone())
+ .null_bit_buffer(null_bit_buffer);
+
+ let array_data = unsafe { array_data_builder.build_unchecked() };
+ let array = ListArray::from(array_data);
+
+ ListVector {
+ array,
+ item_type: self.item_type.clone(),
+ }
+ }
+}
+
+// Ports from https://github.com/apache/arrow-rs/blob/94565bca99b5d9932a3e9a8e094aaf4e4384b1e5/arrow-array/src/builder/null_buffer_builder.rs
+/// Builder for creating the null bit buffer.
+/// This builder only materializes the buffer when we append `false`.
+/// If you only append `true`s to the builder, what you get will be
+/// `None` when calling [`finish`](#method.finish).
+/// This optimization is **very** important for the performance.
+#[derive(Debug)]
+struct NullBufferBuilder {
+ bitmap_builder: Option<BooleanBufferBuilder>,
+ /// Store the length of the buffer before materializing.
+ len: usize,
+ capacity: usize,
+}
+
+impl NullBufferBuilder {
+ /// Creates a new empty builder.
+ /// `capacity` is the number of bits in the null buffer.
+ fn new(capacity: usize) -> Self {
+ Self {
+ bitmap_builder: None,
+ len: 0,
+ capacity,
+ }
+ }
+
+ fn len(&self) -> usize {
+ if let Some(b) = &self.bitmap_builder {
+ b.len()
+ } else {
+ self.len
+ }
+ }
+
+ /// Appends a `true` into the builder
+ /// to indicate that this item is not null.
+ #[inline]
+ fn append_non_null(&mut self) {
+ if let Some(buf) = self.bitmap_builder.as_mut() {
+ buf.append(true)
+ } else {
+ self.len += 1;
+ }
+ }
+
+ /// Appends a `false` into the builder
+ /// to indicate that this item is null.
+ #[inline]
+ fn append_null(&mut self) {
+ self.materialize_if_needed();
+ self.bitmap_builder.as_mut().unwrap().append(false);
+ }
+
+ /// Appends a boolean value into the builder.
+ #[inline]
+ fn append(&mut self, not_null: bool) {
+ if not_null {
+ self.append_non_null()
+ } else {
+ self.append_null()
+ }
+ }
+
+ /// Builds the null buffer and resets the builder.
+ /// Returns `None` if the builder only contains `true`s.
+ fn finish(&mut self) -> Option<Buffer> {
+ let buf = self.bitmap_builder.as_mut().map(|b| b.finish());
+ self.bitmap_builder = None;
+ self.len = 0;
+ buf
+ }
+
+ #[inline]
+ fn materialize_if_needed(&mut self) {
+ if self.bitmap_builder.is_none() {
+ self.materialize()
+ }
+ }
+
+ #[cold]
+ fn materialize(&mut self) {
+ if self.bitmap_builder.is_none() {
+ let mut b = BooleanBufferBuilder::new(self.len.max(self.capacity));
+ b.append_n(self.len, true);
+ self.bitmap_builder = Some(b);
+ }
+ }
+}
+
+#[cfg(test)]
+pub mod tests {
+ use arrow::array::{Int32Array, Int32Builder, ListBuilder};
+ use serde_json::json;
+
+ use super::*;
+ use crate::scalars::ScalarRef;
+ use crate::types::ListType;
+ use crate::vectors::Int32Vector;
+
+ pub fn new_list_vector(data: &[Option<Vec<Option<i32>>>]) -> ListVector {
+ let mut builder =
+ ListVectorBuilder::with_type_capacity(ConcreteDataType::int32_datatype(), 8);
+ for vec_opt in data {
+ if let Some(vec) = vec_opt {
+ let values = vec.iter().map(|v| Value::from(*v)).collect();
+ let values = Some(Box::new(values));
+ let list_value = ListValue::new(values, ConcreteDataType::int32_datatype());
+
+ builder.push(Some(ListValueRef::Ref { val: &list_value }));
+ } else {
+ builder.push(None);
+ }
+ }
+
+ builder.finish()
+ }
+
+ fn new_list_array(data: &[Option<Vec<Option<i32>>>]) -> ListArray {
+ let mut builder = ListBuilder::new(Int32Builder::new());
+ for vec_opt in data {
+ if let Some(vec) = vec_opt {
+ for value_opt in vec {
+ builder.values().append_option(*value_opt);
+ }
+
+ builder.append(true);
+ } else {
+ builder.append(false);
+ }
+ }
+
+ builder.finish()
+ }
+
+ #[test]
+ fn test_list_vector() {
+ let data = vec![
+ Some(vec![Some(1), Some(2), Some(3)]),
+ None,
+ Some(vec![Some(4), None, Some(6)]),
+ ];
+
+ let list_vector = new_list_vector(&data);
+
+ assert_eq!(
+ ConcreteDataType::List(ListType::new(ConcreteDataType::int32_datatype())),
+ list_vector.data_type()
+ );
+ assert_eq!("ListVector", list_vector.vector_type_name());
+ assert_eq!(3, list_vector.len());
+ assert!(!list_vector.is_null(0));
+ assert!(list_vector.is_null(1));
+ assert!(!list_vector.is_null(2));
+
+ let arrow_array = new_list_array(&data);
+ assert_eq!(
+ arrow_array,
+ *list_vector
+ .to_arrow_array()
+ .as_any()
+ .downcast_ref::<ListArray>()
+ .unwrap()
+ );
+ let validity = list_vector.validity();
+ assert!(!validity.is_all_null());
+ assert!(!validity.is_all_valid());
+ assert!(validity.is_set(0));
+ assert!(!validity.is_set(1));
+ assert!(validity.is_set(2));
+ assert_eq!(256, list_vector.memory_size());
+
+ let slice = list_vector.slice(0, 2).to_arrow_array();
+ let sliced_array = slice.as_any().downcast_ref::<ListArray>().unwrap();
+ assert_eq!(
+ Int32Array::from_iter_values([1, 2, 3]),
+ *sliced_array
+ .value(0)
+ .as_any()
+ .downcast_ref::<Int32Array>()
+ .unwrap()
+ );
+ assert!(sliced_array.is_null(1));
+
+ assert_eq!(
+ Value::List(ListValue::new(
+ Some(Box::new(vec![
+ Value::Int32(1),
+ Value::Int32(2),
+ Value::Int32(3)
+ ])),
+ ConcreteDataType::int32_datatype()
+ )),
+ list_vector.get(0)
+ );
+ let value_ref = list_vector.get_ref(0);
+ assert!(matches!(
+ value_ref,
+ ValueRef::List(ListValueRef::Indexed { .. })
+ ));
+ let value_ref = list_vector.get_ref(1);
+ if let ValueRef::List(ListValueRef::Indexed { idx, .. }) = value_ref {
+ assert_eq!(1, idx);
+ } else {
+ unreachable!()
+ }
+ assert_eq!(Value::Null, list_vector.get(1));
+ assert_eq!(
+ Value::List(ListValue::new(
+ Some(Box::new(vec![
+ Value::Int32(4),
+ Value::Null,
+ Value::Int32(6)
+ ])),
+ ConcreteDataType::int32_datatype()
+ )),
+ list_vector.get(2)
+ );
+ }
+
+ #[test]
+ fn test_from_arrow_array() {
+ let data = vec![
+ Some(vec![Some(1), Some(2), Some(3)]),
+ None,
+ Some(vec![Some(4), None, Some(6)]),
+ ];
+
+ let arrow_array = new_list_array(&data);
+ let array_ref: ArrayRef = Arc::new(arrow_array);
+ let expect = new_list_vector(&data);
+
+ // Test try from ArrayRef
+ let list_vector = ListVector::try_from_arrow_array(array_ref).unwrap();
+ assert_eq!(expect, list_vector);
+
+ // Test from
+ let arrow_array = new_list_array(&data);
+ let list_vector = ListVector::from(arrow_array);
+ assert_eq!(expect, list_vector);
+ }
+
+ #[test]
+ fn test_iter_list_vector_values() {
+ let data = vec![
+ Some(vec![Some(1), Some(2), Some(3)]),
+ None,
+ Some(vec![Some(4), None, Some(6)]),
+ ];
+
+ let list_vector = new_list_vector(&data);
+
+ assert_eq!(
+ ConcreteDataType::List(ListType::new(ConcreteDataType::int32_datatype())),
+ list_vector.data_type()
+ );
+ let mut iter = list_vector.values_iter();
+ assert_eq!(
+ Arc::new(Int32Vector::from_slice(&[1, 2, 3])) as VectorRef,
+ *iter.next().unwrap().unwrap().unwrap()
+ );
+ assert!(iter.next().unwrap().unwrap().is_none());
+ assert_eq!(
+ Arc::new(Int32Vector::from(vec![Some(4), None, Some(6)])) as VectorRef,
+ *iter.next().unwrap().unwrap().unwrap(),
+ );
+ assert!(iter.next().is_none())
+ }
+
+ #[test]
+ fn test_serialize_to_json() {
+ let data = vec![
+ Some(vec![Some(1), Some(2), Some(3)]),
+ None,
+ Some(vec![Some(4), None, Some(6)]),
+ ];
+
+ let list_vector = new_list_vector(&data);
+ assert_eq!(
+ vec![json!([1, 2, 3]), json!(null), json!([4, null, 6]),],
+ list_vector.serialize_to_json().unwrap()
+ );
+ }
+
+ #[test]
+ fn test_list_vector_builder() {
+ let mut builder =
+ ListType::new(ConcreteDataType::int32_datatype()).create_mutable_vector(3);
+ builder
+ .push_value_ref(ValueRef::List(ListValueRef::Ref {
+ val: &ListValue::new(
+ Some(Box::new(vec![
+ Value::Int32(4),
+ Value::Null,
+ Value::Int32(6),
+ ])),
+ ConcreteDataType::int32_datatype(),
+ ),
+ }))
+ .unwrap();
+ assert!(builder.push_value_ref(ValueRef::Int32(123)).is_err());
+
+ let data = vec![
+ Some(vec![Some(1), Some(2), Some(3)]),
+ None,
+ Some(vec![Some(7), Some(8), None]),
+ ];
+ let input = new_list_vector(&data);
+ builder.extend_slice_of(&input, 1, 2).unwrap();
+ assert!(builder
+ .extend_slice_of(&crate::vectors::Int32Vector::from_slice(&[13]), 0, 1)
+ .is_err());
+ let vector = builder.to_vector();
+
+ let expect: VectorRef = Arc::new(new_list_vector(&[
+ Some(vec![Some(4), None, Some(6)]),
+ None,
+ Some(vec![Some(7), Some(8), None]),
+ ]));
+ assert_eq!(expect, vector);
+ }
+
+ #[test]
+ fn test_list_vector_for_scalar() {
+ let mut builder =
+ ListVectorBuilder::with_type_capacity(ConcreteDataType::int32_datatype(), 2);
+ builder.push(None);
+ builder.push(Some(ListValueRef::Ref {
+ val: &ListValue::new(
+ Some(Box::new(vec![
+ Value::Int32(4),
+ Value::Null,
+ Value::Int32(6),
+ ])),
+ ConcreteDataType::int32_datatype(),
+ ),
+ }));
+ let vector = builder.finish();
+
+ let expect = new_list_vector(&[None, Some(vec![Some(4), None, Some(6)])]);
+ assert_eq!(expect, vector);
+
+ assert!(vector.get_data(0).is_none());
+ assert_eq!(
+ ListValueRef::Indexed {
+ vector: &vector,
+ idx: 1
+ },
+ vector.get_data(1).unwrap()
+ );
+ assert_eq!(
+ *vector.get(1).as_list().unwrap().unwrap(),
+ vector.get_data(1).unwrap().to_owned_scalar()
+ );
+
+ let mut iter = vector.iter_data();
+ assert!(iter.next().unwrap().is_none());
+ assert_eq!(
+ ListValueRef::Indexed {
+ vector: &vector,
+ idx: 1
+ },
+ iter.next().unwrap().unwrap()
+ );
+ assert!(iter.next().is_none());
+
+ let mut iter = vector.iter_data();
+ assert_eq!(2, iter.size_hint().0);
+ assert_eq!(
+ ListValueRef::Indexed {
+ vector: &vector,
+ idx: 1
+ },
+ iter.nth(1).unwrap().unwrap()
+ );
+ }
+}
diff --git a/src/datatypes2/src/vectors/null.rs b/src/datatypes2/src/vectors/null.rs
new file mode 100644
index 000000000000..bb66e09b392b
--- /dev/null
+++ b/src/datatypes2/src/vectors/null.rs
@@ -0,0 +1,282 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::fmt;
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayData, ArrayRef, NullArray};
+use snafu::{ensure, OptionExt};
+
+use crate::data_type::ConcreteDataType;
+use crate::error::{self, Result};
+use crate::serialize::Serializable;
+use crate::types::NullType;
+use crate::value::{Value, ValueRef};
+use crate::vectors::{self, MutableVector, Validity, Vector, VectorRef};
+
+/// A vector where all elements are nulls.
+#[derive(PartialEq)]
+pub struct NullVector {
+ array: NullArray,
+}
+
+// TODO(yingwen): Support null vector with other logical types.
+impl NullVector {
+ /// Create a new `NullVector` with `n` elements.
+ pub fn new(n: usize) -> Self {
+ Self {
+ array: NullArray::new(n),
+ }
+ }
+
+ pub(crate) fn as_arrow(&self) -> &dyn Array {
+ &self.array
+ }
+
+ fn to_array_data(&self) -> ArrayData {
+ self.array.data().clone()
+ }
+}
+
+impl From<NullArray> for NullVector {
+ fn from(array: NullArray) -> Self {
+ Self { array }
+ }
+}
+
+impl Vector for NullVector {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::Null(NullType::default())
+ }
+
+ fn vector_type_name(&self) -> String {
+ "NullVector".to_string()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn len(&self) -> usize {
+ self.array.len()
+ }
+
+ fn to_arrow_array(&self) -> ArrayRef {
+ // TODO(yingwen): Replaced by clone after upgrading to arrow 28.0.
+ let data = self.to_array_data();
+ Arc::new(NullArray::from(data))
+ }
+
+ fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
+ let data = self.to_array_data();
+ Box::new(NullArray::from(data))
+ }
+
+ fn validity(&self) -> Validity {
+ Validity::all_null(self.array.len())
+ }
+
+ fn memory_size(&self) -> usize {
+ 0
+ }
+
+ fn null_count(&self) -> usize {
+ self.array.null_count()
+ }
+
+ fn is_null(&self, _row: usize) -> bool {
+ true
+ }
+
+ fn only_null(&self) -> bool {
+ true
+ }
+
+ fn slice(&self, _offset: usize, length: usize) -> VectorRef {
+ Arc::new(Self::new(length))
+ }
+
+ fn get(&self, _index: usize) -> Value {
+ // Skips bound check for null array.
+ Value::Null
+ }
+
+ fn get_ref(&self, _index: usize) -> ValueRef {
+ // Skips bound check for null array.
+ ValueRef::Null
+ }
+}
+
+impl fmt::Debug for NullVector {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "NullVector({})", self.len())
+ }
+}
+
+impl Serializable for NullVector {
+ fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>> {
+ Ok(std::iter::repeat(serde_json::Value::Null)
+ .take(self.len())
+ .collect())
+ }
+}
+
+vectors::impl_try_from_arrow_array_for_vector!(NullArray, NullVector);
+
+#[derive(Default)]
+pub struct NullVectorBuilder {
+ length: usize,
+}
+
+impl MutableVector for NullVectorBuilder {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::null_datatype()
+ }
+
+ fn len(&self) -> usize {
+ self.length
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn as_mut_any(&mut self) -> &mut dyn Any {
+ self
+ }
+
+ fn to_vector(&mut self) -> VectorRef {
+ let vector = Arc::new(NullVector::new(self.length));
+ self.length = 0;
+ vector
+ }
+
+ fn push_value_ref(&mut self, value: ValueRef) -> Result<()> {
+ ensure!(
+ value.is_null(),
+ error::CastTypeSnafu {
+ msg: format!("Failed to cast value ref {:?} to null", value),
+ }
+ );
+
+ self.length += 1;
+ Ok(())
+ }
+
+ fn extend_slice_of(&mut self, vector: &dyn Vector, offset: usize, length: usize) -> Result<()> {
+ vector
+ .as_any()
+ .downcast_ref::<NullVector>()
+ .with_context(|| error::CastTypeSnafu {
+ msg: format!(
+ "Failed to convert vector from {} to NullVector",
+ vector.vector_type_name()
+ ),
+ })?;
+ assert!(
+ offset + length <= vector.len(),
+ "offset {} + length {} must less than {}",
+ offset,
+ length,
+ vector.len()
+ );
+
+ self.length += length;
+ Ok(())
+ }
+}
+
+pub(crate) fn replicate_null(vector: &NullVector, offsets: &[usize]) -> VectorRef {
+ assert_eq!(offsets.len(), vector.len());
+
+ if offsets.is_empty() {
+ return vector.slice(0, 0);
+ }
+
+ Arc::new(NullVector::new(*offsets.last().unwrap()))
+}
+
+#[cfg(test)]
+mod tests {
+ use serde_json;
+
+ use super::*;
+ use crate::data_type::DataType;
+
+ #[test]
+ fn test_null_vector_misc() {
+ let v = NullVector::new(32);
+
+ assert_eq!(v.len(), 32);
+ assert_eq!(0, v.memory_size());
+ let arrow_arr = v.to_arrow_array();
+ assert_eq!(arrow_arr.null_count(), 32);
+
+ let array2 = arrow_arr.slice(8, 16);
+ assert_eq!(array2.len(), 16);
+ assert_eq!(array2.null_count(), 16);
+
+ assert_eq!("NullVector", v.vector_type_name());
+ assert!(!v.is_const());
+ assert!(v.validity().is_all_null());
+ assert!(v.only_null());
+
+ for i in 0..32 {
+ assert!(v.is_null(i));
+ assert_eq!(Value::Null, v.get(i));
+ assert_eq!(ValueRef::Null, v.get_ref(i));
+ }
+ }
+
+ #[test]
+ fn test_debug_null_vector() {
+ let array = NullVector::new(1024 * 1024);
+ assert_eq!(format!("{:?}", array), "NullVector(1048576)");
+ }
+
+ #[test]
+ fn test_serialize_json() {
+ let vector = NullVector::new(3);
+ let json_value = vector.serialize_to_json().unwrap();
+ assert_eq!(
+ "[null,null,null]",
+ serde_json::to_string(&json_value).unwrap()
+ );
+ }
+
+ #[test]
+ fn test_null_vector_validity() {
+ let vector = NullVector::new(5);
+ assert!(vector.validity().is_all_null());
+ assert_eq!(5, vector.null_count());
+ }
+
+ #[test]
+ fn test_null_vector_builder() {
+ let mut builder = NullType::default().create_mutable_vector(3);
+ builder.push_value_ref(ValueRef::Null).unwrap();
+ assert!(builder.push_value_ref(ValueRef::Int32(123)).is_err());
+
+ let input = NullVector::new(3);
+ builder.extend_slice_of(&input, 1, 2).unwrap();
+ assert!(builder
+ .extend_slice_of(&crate::vectors::Int32Vector::from_slice(&[13]), 0, 1)
+ .is_err());
+ let vector = builder.to_vector();
+
+ let expect: VectorRef = Arc::new(input);
+ assert_eq!(expect, vector);
+ }
+}
diff --git a/src/datatypes2/src/vectors/operations.rs b/src/datatypes2/src/vectors/operations.rs
new file mode 100644
index 000000000000..70ddb4a0317a
--- /dev/null
+++ b/src/datatypes2/src/vectors/operations.rs
@@ -0,0 +1,127 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod filter;
+mod find_unique;
+mod replicate;
+
+use common_base::BitVec;
+
+use crate::error::Result;
+use crate::types::LogicalPrimitiveType;
+use crate::vectors::constant::ConstantVector;
+use crate::vectors::{
+ BinaryVector, BooleanVector, ListVector, NullVector, PrimitiveVector, StringVector, Vector,
+ VectorRef,
+};
+
+/// Vector compute operations.
+pub trait VectorOp {
+ /// Copies each element according `offsets` parameter.
+ /// - `i-th` element should be copied `offsets[i] - offsets[i - 1]` times
+ /// - `0-th` element would be copied `offsets[0]` times
+ ///
+ /// # Panics
+ /// Panics if `offsets.len() != self.len()`.
+ fn replicate(&self, offsets: &[usize]) -> VectorRef;
+
+ /// Mark `i-th` bit of `selected` to `true` if the `i-th` element of `self` is unique, which
+ /// means there is no elements behind it have same value as it.
+ ///
+ /// The caller should ensure
+ /// 1. the length of `selected` bitmap is equal to `vector.len()`.
+ /// 2. `vector` and `prev_vector` are sorted.
+ ///
+ /// If there are multiple duplicate elements, this function retains the **first** element.
+ /// The first element is considered as unique if the first element of `self` is different
+ /// from its previous element, that is the last element of `prev_vector`.
+ ///
+ /// # Panics
+ /// Panics if
+ /// - `selected.len() < self.len()`.
+ /// - `prev_vector` and `self` have different data types.
+ fn find_unique(&self, selected: &mut BitVec, prev_vector: Option<&dyn Vector>);
+
+ /// Filters the vector, returns elements matching the `filter` (i.e. where the values are true).
+ ///
+ /// Note that the nulls of `filter` are interpreted as `false` will lead to these elements being masked out.
+ fn filter(&self, filter: &BooleanVector) -> Result<VectorRef>;
+}
+
+macro_rules! impl_scalar_vector_op {
+ ($($VectorType: ident),+) => {$(
+ impl VectorOp for $VectorType {
+ fn replicate(&self, offsets: &[usize]) -> VectorRef {
+ replicate::replicate_scalar(self, offsets)
+ }
+
+ fn find_unique(&self, selected: &mut BitVec, prev_vector: Option<&dyn Vector>) {
+ let prev_vector = prev_vector.map(|pv| pv.as_any().downcast_ref::<$VectorType>().unwrap());
+ find_unique::find_unique_scalar(self, selected, prev_vector);
+ }
+
+ fn filter(&self, filter: &BooleanVector) -> Result<VectorRef> {
+ filter::filter_non_constant!(self, $VectorType, filter)
+ }
+ }
+ )+};
+}
+
+impl_scalar_vector_op!(BinaryVector, BooleanVector, ListVector, StringVector);
+
+impl<T: LogicalPrimitiveType> VectorOp for PrimitiveVector<T> {
+ fn replicate(&self, offsets: &[usize]) -> VectorRef {
+ std::sync::Arc::new(replicate::replicate_primitive(self, offsets))
+ }
+
+ fn find_unique(&self, selected: &mut BitVec, prev_vector: Option<&dyn Vector>) {
+ let prev_vector =
+ prev_vector.and_then(|pv| pv.as_any().downcast_ref::<PrimitiveVector<T>>());
+ find_unique::find_unique_scalar(self, selected, prev_vector);
+ }
+
+ fn filter(&self, filter: &BooleanVector) -> Result<VectorRef> {
+ filter::filter_non_constant!(self, PrimitiveVector<T>, filter)
+ }
+}
+
+impl VectorOp for NullVector {
+ fn replicate(&self, offsets: &[usize]) -> VectorRef {
+ replicate::replicate_null(self, offsets)
+ }
+
+ fn find_unique(&self, selected: &mut BitVec, prev_vector: Option<&dyn Vector>) {
+ let prev_vector = prev_vector.and_then(|pv| pv.as_any().downcast_ref::<NullVector>());
+ find_unique::find_unique_null(self, selected, prev_vector);
+ }
+
+ fn filter(&self, filter: &BooleanVector) -> Result<VectorRef> {
+ filter::filter_non_constant!(self, NullVector, filter)
+ }
+}
+
+impl VectorOp for ConstantVector {
+ fn replicate(&self, offsets: &[usize]) -> VectorRef {
+ self.replicate_vector(offsets)
+ }
+
+ fn find_unique(&self, selected: &mut BitVec, prev_vector: Option<&dyn Vector>) {
+ let prev_vector = prev_vector.and_then(|pv| pv.as_any().downcast_ref::<ConstantVector>());
+ find_unique::find_unique_constant(self, selected, prev_vector);
+ }
+
+ fn filter(&self, filter: &BooleanVector) -> Result<VectorRef> {
+ self.filter_vector(filter)
+ }
+}
diff --git a/src/datatypes2/src/vectors/operations/filter.rs b/src/datatypes2/src/vectors/operations/filter.rs
new file mode 100644
index 000000000000..8368a6afb4c4
--- /dev/null
+++ b/src/datatypes2/src/vectors/operations/filter.rs
@@ -0,0 +1,145 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+macro_rules! filter_non_constant {
+ ($vector: expr, $VectorType: ty, $filter: ident) => {{
+ use std::sync::Arc;
+
+ use arrow::compute;
+ use snafu::ResultExt;
+
+ let arrow_array = $vector.as_arrow();
+ let filtered = compute::filter(arrow_array, $filter.as_boolean_array())
+ .context(crate::error::ArrowComputeSnafu)?;
+ Ok(Arc::new(<$VectorType>::try_from_arrow_array(filtered)?))
+ }};
+}
+
+pub(crate) use filter_non_constant;
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use common_time::{Date, DateTime};
+
+ use crate::scalars::ScalarVector;
+ use crate::timestamp::{
+ TimestampMicrosecond, TimestampMillisecond, TimestampNanosecond, TimestampSecond,
+ };
+ use crate::types::WrapperType;
+ use crate::vectors::constant::ConstantVector;
+ use crate::vectors::{
+ BooleanVector, Int32Vector, NullVector, StringVector, VectorOp, VectorRef,
+ };
+
+ fn check_filter_primitive(expect: &[i32], input: &[i32], filter: &[bool]) {
+ let v = Int32Vector::from_slice(&input);
+ let filter = BooleanVector::from_slice(filter);
+ let out = v.filter(&filter).unwrap();
+
+ let expect: VectorRef = Arc::new(Int32Vector::from_slice(&expect));
+ assert_eq!(expect, out);
+ }
+
+ #[test]
+ fn test_filter_primitive() {
+ check_filter_primitive(&[], &[], &[]);
+ check_filter_primitive(&[5], &[5], &[true]);
+ check_filter_primitive(&[], &[5], &[false]);
+ check_filter_primitive(&[], &[5, 6], &[false, false]);
+ check_filter_primitive(&[5, 6], &[5, 6], &[true, true]);
+ check_filter_primitive(&[], &[5, 6, 7], &[false, false, false]);
+ check_filter_primitive(&[5], &[5, 6, 7], &[true, false, false]);
+ check_filter_primitive(&[6], &[5, 6, 7], &[false, true, false]);
+ check_filter_primitive(&[7], &[5, 6, 7], &[false, false, true]);
+ check_filter_primitive(&[5, 7], &[5, 6, 7], &[true, false, true]);
+ }
+
+ fn check_filter_constant(expect_length: usize, input_length: usize, filter: &[bool]) {
+ let v = ConstantVector::new(Arc::new(Int32Vector::from_slice(&[123])), input_length);
+ let filter = BooleanVector::from_slice(filter);
+ let out = v.filter(&filter).unwrap();
+
+ assert!(out.is_const());
+ assert_eq!(expect_length, out.len());
+ }
+
+ #[test]
+ fn test_filter_constant() {
+ check_filter_constant(0, 0, &[]);
+ check_filter_constant(1, 1, &[true]);
+ check_filter_constant(0, 1, &[false]);
+ check_filter_constant(1, 2, &[false, true]);
+ check_filter_constant(2, 2, &[true, true]);
+ check_filter_constant(1, 4, &[false, false, false, true]);
+ check_filter_constant(2, 4, &[false, true, false, true]);
+ }
+
+ #[test]
+ fn test_filter_scalar() {
+ let v = StringVector::from_slice(&["0", "1", "2", "3"]);
+ let filter = BooleanVector::from_slice(&[false, true, false, true]);
+ let out = v.filter(&filter).unwrap();
+
+ let expect: VectorRef = Arc::new(StringVector::from_slice(&["1", "3"]));
+ assert_eq!(expect, out);
+ }
+
+ #[test]
+ fn test_filter_null() {
+ let v = NullVector::new(5);
+ let filter = BooleanVector::from_slice(&[false, true, false, true, true]);
+ let out = v.filter(&filter).unwrap();
+
+ let expect: VectorRef = Arc::new(NullVector::new(3));
+ assert_eq!(expect, out);
+ }
+
+ macro_rules! impl_filter_date_like_test {
+ ($VectorType: ident, $ValueType: ident, $method: ident) => {{
+ use std::sync::Arc;
+
+ use $crate::vectors::{$VectorType, VectorRef};
+
+ let v = $VectorType::from_iterator((0..5).map($ValueType::$method));
+ let filter = BooleanVector::from_slice(&[false, true, false, true, true]);
+ let out = v.filter(&filter).unwrap();
+
+ let expect: VectorRef = Arc::new($VectorType::from_iterator(
+ [1, 3, 4].into_iter().map($ValueType::$method),
+ ));
+ assert_eq!(expect, out);
+ }};
+ }
+
+ #[test]
+ fn test_filter_date_like() {
+ impl_filter_date_like_test!(DateVector, Date, new);
+ impl_filter_date_like_test!(DateTimeVector, DateTime, new);
+
+ impl_filter_date_like_test!(TimestampSecondVector, TimestampSecond, from_native);
+ impl_filter_date_like_test!(
+ TimestampMillisecondVector,
+ TimestampMillisecond,
+ from_native
+ );
+ impl_filter_date_like_test!(
+ TimestampMicrosecondVector,
+ TimestampMicrosecond,
+ from_native
+ );
+ impl_filter_date_like_test!(TimestampNanosecondVector, TimestampNanosecond, from_native);
+ }
+}
diff --git a/src/datatypes2/src/vectors/operations/find_unique.rs b/src/datatypes2/src/vectors/operations/find_unique.rs
new file mode 100644
index 000000000000..7116a9e90d53
--- /dev/null
+++ b/src/datatypes2/src/vectors/operations/find_unique.rs
@@ -0,0 +1,367 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use common_base::BitVec;
+
+use crate::scalars::ScalarVector;
+use crate::vectors::constant::ConstantVector;
+use crate::vectors::{NullVector, Vector};
+
+// To implement `find_unique()` correctly, we need to keep in mind that always marks an element as
+// selected when it is different from the previous one, and leaves the `selected` unchanged
+// in any other case.
+pub(crate) fn find_unique_scalar<'a, T: ScalarVector>(
+ vector: &'a T,
+ selected: &'a mut BitVec,
+ prev_vector: Option<&'a T>,
+) where
+ T::RefItem<'a>: PartialEq,
+{
+ assert!(selected.len() >= vector.len());
+
+ if vector.is_empty() {
+ return;
+ }
+
+ for ((i, current), next) in vector
+ .iter_data()
+ .enumerate()
+ .zip(vector.iter_data().skip(1))
+ {
+ if current != next {
+ // If next element is a different element, we mark it as selected.
+ selected.set(i + 1, true);
+ }
+ }
+
+ // Marks first element as selected if it is different from previous element, otherwise
+ // keep selected bitmap unchanged.
+ let is_first_not_duplicate = prev_vector
+ .map(|pv| {
+ if pv.is_empty() {
+ true
+ } else {
+ let last = pv.get_data(pv.len() - 1);
+ last != vector.get_data(0)
+ }
+ })
+ .unwrap_or(true);
+ if is_first_not_duplicate {
+ selected.set(0, true);
+ }
+}
+
+pub(crate) fn find_unique_null(
+ vector: &NullVector,
+ selected: &mut BitVec,
+ prev_vector: Option<&NullVector>,
+) {
+ if vector.is_empty() {
+ return;
+ }
+
+ let is_first_not_duplicate = prev_vector.map(NullVector::is_empty).unwrap_or(true);
+ if is_first_not_duplicate {
+ selected.set(0, true);
+ }
+}
+
+pub(crate) fn find_unique_constant(
+ vector: &ConstantVector,
+ selected: &mut BitVec,
+ prev_vector: Option<&ConstantVector>,
+) {
+ if vector.is_empty() {
+ return;
+ }
+
+ let is_first_not_duplicate = prev_vector
+ .map(|pv| {
+ if pv.is_empty() {
+ true
+ } else {
+ vector.get_constant_ref() != pv.get_constant_ref()
+ }
+ })
+ .unwrap_or(true);
+
+ if is_first_not_duplicate {
+ selected.set(0, true);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use common_time::{Date, DateTime};
+
+ use super::*;
+ use crate::timestamp::*;
+ use crate::vectors::{Int32Vector, StringVector, Vector, VectorOp};
+
+ fn check_bitmap(expect: &[bool], selected: &BitVec) {
+ let actual = selected.iter().collect::<Vec<_>>();
+ assert_eq!(expect, actual);
+ }
+
+ fn check_find_unique_scalar(expect: &[bool], input: &[i32], prev: Option<&[i32]>) {
+ check_find_unique_scalar_opt(expect, input.iter().map(|v| Some(*v)), prev);
+ }
+
+ fn check_find_unique_scalar_opt(
+ expect: &[bool],
+ input: impl Iterator<Item = Option<i32>>,
+ prev: Option<&[i32]>,
+ ) {
+ let input = Int32Vector::from(input.collect::<Vec<_>>());
+ let prev = prev.map(Int32Vector::from_slice);
+
+ let mut selected = BitVec::repeat(false, input.len());
+ input.find_unique(&mut selected, prev.as_ref().map(|v| v as _));
+
+ check_bitmap(expect, &selected);
+ }
+
+ #[test]
+ fn test_find_unique_scalar() {
+ check_find_unique_scalar(&[], &[], None);
+ check_find_unique_scalar(&[true], &[1], None);
+ check_find_unique_scalar(&[true, false], &[1, 1], None);
+ check_find_unique_scalar(&[true, true], &[1, 2], None);
+ check_find_unique_scalar(&[true, true, true, true], &[1, 2, 3, 4], None);
+ check_find_unique_scalar(&[true, false, true, false], &[1, 1, 3, 3], None);
+ check_find_unique_scalar(&[true, false, false, false, true], &[2, 2, 2, 2, 3], None);
+
+ check_find_unique_scalar(&[true], &[5], Some(&[]));
+ check_find_unique_scalar(&[true], &[5], Some(&[3]));
+ check_find_unique_scalar(&[false], &[5], Some(&[5]));
+ check_find_unique_scalar(&[false], &[5], Some(&[4, 5]));
+ check_find_unique_scalar(&[false, true], &[5, 6], Some(&[4, 5]));
+ check_find_unique_scalar(&[false, true, false], &[5, 6, 6], Some(&[4, 5]));
+ check_find_unique_scalar(
+ &[false, true, false, true, true],
+ &[5, 6, 6, 7, 8],
+ Some(&[4, 5]),
+ );
+
+ check_find_unique_scalar_opt(
+ &[true, true, false, true, false],
+ [Some(1), Some(2), Some(2), None, None].into_iter(),
+ None,
+ );
+ }
+
+ #[test]
+ fn test_find_unique_scalar_multi_times_with_prev() {
+ let prev = Int32Vector::from_slice(&[1]);
+
+ let v1 = Int32Vector::from_slice(&[2, 3, 4]);
+ let mut selected = BitVec::repeat(false, v1.len());
+ v1.find_unique(&mut selected, Some(&prev));
+
+ // Though element in v2 are the same as prev, but we should still keep them.
+ let v2 = Int32Vector::from_slice(&[1, 1, 1]);
+ v2.find_unique(&mut selected, Some(&prev));
+
+ check_bitmap(&[true, true, true], &selected);
+ }
+
+ fn new_bitmap(bits: &[bool]) -> BitVec {
+ BitVec::from_iter(bits)
+ }
+
+ #[test]
+ fn test_find_unique_scalar_with_prev() {
+ let prev = Int32Vector::from_slice(&[1]);
+
+ let mut selected = new_bitmap(&[true, false, true, false]);
+ let v = Int32Vector::from_slice(&[2, 3, 4, 5]);
+ v.find_unique(&mut selected, Some(&prev));
+ // All elements are different.
+ check_bitmap(&[true, true, true, true], &selected);
+
+ let mut selected = new_bitmap(&[true, false, true, false]);
+ let v = Int32Vector::from_slice(&[1, 2, 3, 4]);
+ v.find_unique(&mut selected, Some(&prev));
+ // Though first element is duplicate, but we keep the flag unchanged.
+ check_bitmap(&[true, true, true, true], &selected);
+
+ // Same case as above, but now `prev` is None.
+ let mut selected = new_bitmap(&[true, false, true, false]);
+ let v = Int32Vector::from_slice(&[1, 2, 3, 4]);
+ v.find_unique(&mut selected, None);
+ check_bitmap(&[true, true, true, true], &selected);
+
+ // Same case as above, but now `prev` is empty.
+ let mut selected = new_bitmap(&[true, false, true, false]);
+ let v = Int32Vector::from_slice(&[1, 2, 3, 4]);
+ v.find_unique(&mut selected, Some(&Int32Vector::from_slice(&[])));
+ check_bitmap(&[true, true, true, true], &selected);
+
+ let mut selected = new_bitmap(&[false, false, false, false]);
+ let v = Int32Vector::from_slice(&[2, 2, 4, 5]);
+ v.find_unique(&mut selected, Some(&prev));
+ // only v[1] is duplicate.
+ check_bitmap(&[true, false, true, true], &selected);
+ }
+
+ fn check_find_unique_null(len: usize) {
+ let input = NullVector::new(len);
+ let mut selected = BitVec::repeat(false, input.len());
+ input.find_unique(&mut selected, None);
+
+ let mut expect = vec![false; len];
+ if !expect.is_empty() {
+ expect[0] = true;
+ }
+ check_bitmap(&expect, &selected);
+
+ let mut selected = BitVec::repeat(false, input.len());
+ let prev = Some(NullVector::new(1));
+ input.find_unique(&mut selected, prev.as_ref().map(|v| v as _));
+ let expect = vec![false; len];
+ check_bitmap(&expect, &selected);
+ }
+
+ #[test]
+ fn test_find_unique_null() {
+ for len in 0..5 {
+ check_find_unique_null(len);
+ }
+ }
+
+ #[test]
+ fn test_find_unique_null_with_prev() {
+ let prev = NullVector::new(1);
+
+ // Keep flags unchanged.
+ let mut selected = new_bitmap(&[true, false, true, false]);
+ let v = NullVector::new(4);
+ v.find_unique(&mut selected, Some(&prev));
+ check_bitmap(&[true, false, true, false], &selected);
+
+ // Keep flags unchanged.
+ let mut selected = new_bitmap(&[false, false, true, false]);
+ v.find_unique(&mut selected, Some(&prev));
+ check_bitmap(&[false, false, true, false], &selected);
+
+ // Prev is None, select first element.
+ let mut selected = new_bitmap(&[false, false, true, false]);
+ v.find_unique(&mut selected, None);
+ check_bitmap(&[true, false, true, false], &selected);
+
+ // Prev is empty, select first element.
+ let mut selected = new_bitmap(&[false, false, true, false]);
+ v.find_unique(&mut selected, Some(&NullVector::new(0)));
+ check_bitmap(&[true, false, true, false], &selected);
+ }
+
+ fn check_find_unique_constant(len: usize) {
+ let input = ConstantVector::new(Arc::new(Int32Vector::from_slice(&[8])), len);
+ let mut selected = BitVec::repeat(false, len);
+ input.find_unique(&mut selected, None);
+
+ let mut expect = vec![false; len];
+ if !expect.is_empty() {
+ expect[0] = true;
+ }
+ check_bitmap(&expect, &selected);
+
+ let mut selected = BitVec::repeat(false, len);
+ let prev = Some(ConstantVector::new(
+ Arc::new(Int32Vector::from_slice(&[8])),
+ 1,
+ ));
+ input.find_unique(&mut selected, prev.as_ref().map(|v| v as _));
+ let expect = vec![false; len];
+ check_bitmap(&expect, &selected);
+ }
+
+ #[test]
+ fn test_find_unique_constant() {
+ for len in 0..5 {
+ check_find_unique_constant(len);
+ }
+ }
+
+ #[test]
+ fn test_find_unique_constant_with_prev() {
+ let prev = ConstantVector::new(Arc::new(Int32Vector::from_slice(&[1])), 1);
+
+ // Keep flags unchanged.
+ let mut selected = new_bitmap(&[true, false, true, false]);
+ let v = ConstantVector::new(Arc::new(Int32Vector::from_slice(&[1])), 4);
+ v.find_unique(&mut selected, Some(&prev));
+ check_bitmap(&[true, false, true, false], &selected);
+
+ // Keep flags unchanged.
+ let mut selected = new_bitmap(&[false, false, true, false]);
+ v.find_unique(&mut selected, Some(&prev));
+ check_bitmap(&[false, false, true, false], &selected);
+
+ // Prev is None, select first element.
+ let mut selected = new_bitmap(&[false, false, true, false]);
+ v.find_unique(&mut selected, None);
+ check_bitmap(&[true, false, true, false], &selected);
+
+ // Prev is empty, select first element.
+ let mut selected = new_bitmap(&[false, false, true, false]);
+ v.find_unique(
+ &mut selected,
+ Some(&ConstantVector::new(
+ Arc::new(Int32Vector::from_slice(&[1])),
+ 0,
+ )),
+ );
+ check_bitmap(&[true, false, true, false], &selected);
+
+ // Different constant vector.
+ let mut selected = new_bitmap(&[false, false, true, false]);
+ let v = ConstantVector::new(Arc::new(Int32Vector::from_slice(&[2])), 4);
+ v.find_unique(&mut selected, Some(&prev));
+ check_bitmap(&[true, false, true, false], &selected);
+ }
+
+ #[test]
+ fn test_find_unique_string() {
+ let input = StringVector::from_slice(&["a", "a", "b", "c"]);
+ let mut selected = BitVec::repeat(false, 4);
+ input.find_unique(&mut selected, None);
+ let expect = vec![true, false, true, true];
+ check_bitmap(&expect, &selected);
+ }
+
+ macro_rules! impl_find_unique_date_like_test {
+ ($VectorType: ident, $ValueType: ident, $method: ident) => {{
+ use $crate::vectors::$VectorType;
+
+ let v = $VectorType::from_iterator([8, 8, 9, 10].into_iter().map($ValueType::$method));
+ let mut selected = BitVec::repeat(false, 4);
+ v.find_unique(&mut selected, None);
+ let expect = vec![true, false, true, true];
+ check_bitmap(&expect, &selected);
+ }};
+ }
+
+ #[test]
+ fn test_find_unique_date_like() {
+ impl_find_unique_date_like_test!(DateVector, Date, new);
+ impl_find_unique_date_like_test!(DateTimeVector, DateTime, new);
+ impl_find_unique_date_like_test!(TimestampSecondVector, TimestampSecond, from);
+ impl_find_unique_date_like_test!(TimestampMillisecondVector, TimestampMillisecond, from);
+ impl_find_unique_date_like_test!(TimestampMicrosecondVector, TimestampMicrosecond, from);
+ impl_find_unique_date_like_test!(TimestampNanosecondVector, TimestampNanosecond, from);
+ }
+}
diff --git a/src/datatypes2/src/vectors/operations/replicate.rs b/src/datatypes2/src/vectors/operations/replicate.rs
new file mode 100644
index 000000000000..8216517fc62d
--- /dev/null
+++ b/src/datatypes2/src/vectors/operations/replicate.rs
@@ -0,0 +1,170 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::prelude::*;
+pub(crate) use crate::vectors::null::replicate_null;
+pub(crate) use crate::vectors::primitive::replicate_primitive;
+
+pub(crate) fn replicate_scalar<C: ScalarVector>(c: &C, offsets: &[usize]) -> VectorRef {
+ assert_eq!(offsets.len(), c.len());
+
+ if offsets.is_empty() {
+ return c.slice(0, 0);
+ }
+ let mut builder = <<C as ScalarVector>::Builder>::with_capacity(c.len());
+
+ let mut previous_offset = 0;
+ for (i, offset) in offsets.iter().enumerate() {
+ let data = c.get_data(i);
+ for _ in previous_offset..*offset {
+ builder.push(data);
+ }
+ previous_offset = *offset;
+ }
+ builder.to_vector()
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use common_time::timestamp::TimeUnit;
+ use common_time::{Date, DateTime, Timestamp};
+ use paste::paste;
+
+ use super::*;
+ use crate::vectors::constant::ConstantVector;
+ use crate::vectors::{Int32Vector, NullVector, StringVector, VectorOp};
+
+ #[test]
+ fn test_replicate_primitive() {
+ let v = Int32Vector::from_iterator(0..5);
+ let offsets = [0, 1, 2, 3, 4];
+
+ let v = v.replicate(&offsets);
+ assert_eq!(4, v.len());
+
+ for i in 0..4 {
+ assert_eq!(Value::Int32(i as i32 + 1), v.get(i));
+ }
+ }
+
+ #[test]
+ fn test_replicate_nullable_primitive() {
+ let v = Int32Vector::from(vec![None, Some(1), None, Some(2)]);
+ let offsets = [2, 4, 6, 8];
+ let v = v.replicate(&offsets);
+ assert_eq!(8, v.len());
+
+ let expect: VectorRef = Arc::new(Int32Vector::from(vec![
+ None,
+ None,
+ Some(1),
+ Some(1),
+ None,
+ None,
+ Some(2),
+ Some(2),
+ ]));
+ assert_eq!(expect, v);
+ }
+
+ #[test]
+ fn test_replicate_scalar() {
+ let v = StringVector::from_slice(&["0", "1", "2", "3"]);
+ let offsets = [1, 3, 5, 6];
+
+ let v = v.replicate(&offsets);
+ assert_eq!(6, v.len());
+
+ let expect: VectorRef = Arc::new(StringVector::from_slice(&["0", "1", "1", "2", "2", "3"]));
+ assert_eq!(expect, v);
+ }
+
+ #[test]
+ fn test_replicate_constant() {
+ let v = Arc::new(StringVector::from_slice(&["hello"]));
+ let cv = ConstantVector::new(v.clone(), 2);
+ let offsets = [1, 4];
+
+ let cv = cv.replicate(&offsets);
+ assert_eq!(4, cv.len());
+
+ let expect: VectorRef = Arc::new(ConstantVector::new(v, 4));
+ assert_eq!(expect, cv);
+ }
+
+ #[test]
+ fn test_replicate_null() {
+ let v = NullVector::new(0);
+ let offsets = [];
+ let v = v.replicate(&offsets);
+ assert!(v.is_empty());
+
+ let v = NullVector::new(3);
+ let offsets = [1, 3, 5];
+
+ let v = v.replicate(&offsets);
+ assert_eq!(5, v.len());
+ }
+
+ macro_rules! impl_replicate_date_like_test {
+ ($VectorType: ident, $ValueType: ident, $method: ident) => {{
+ use $crate::vectors::$VectorType;
+
+ let v = $VectorType::from_iterator((0..5).map($ValueType::$method));
+ let offsets = [0, 1, 2, 3, 4];
+
+ let v = v.replicate(&offsets);
+ assert_eq!(4, v.len());
+
+ for i in 0..4 {
+ assert_eq!(
+ Value::$ValueType($ValueType::$method((i as i32 + 1).into())),
+ v.get(i)
+ );
+ }
+ }};
+ }
+
+ macro_rules! impl_replicate_timestamp_test {
+ ($unit: ident) => {{
+ paste!{
+ use $crate::vectors::[<Timestamp $unit Vector>];
+ use $crate::timestamp::[<Timestamp $unit>];
+ let v = [<Timestamp $unit Vector>]::from_iterator((0..5).map([<Timestamp $unit>]::from));
+ let offsets = [0, 1, 2, 3, 4];
+ let v = v.replicate(&offsets);
+ assert_eq!(4, v.len());
+ for i in 0..4 {
+ assert_eq!(
+ Value::Timestamp(Timestamp::new(i as i64 + 1, TimeUnit::$unit)),
+ v.get(i)
+ );
+ }
+ }
+ }};
+ }
+
+ #[test]
+ fn test_replicate_date_like() {
+ impl_replicate_date_like_test!(DateVector, Date, new);
+ impl_replicate_date_like_test!(DateTimeVector, DateTime, new);
+
+ impl_replicate_timestamp_test!(Second);
+ impl_replicate_timestamp_test!(Millisecond);
+ impl_replicate_timestamp_test!(Microsecond);
+ impl_replicate_timestamp_test!(Nanosecond);
+ }
+}
diff --git a/src/datatypes2/src/vectors/primitive.rs b/src/datatypes2/src/vectors/primitive.rs
new file mode 100644
index 000000000000..7829c3173131
--- /dev/null
+++ b/src/datatypes2/src/vectors/primitive.rs
@@ -0,0 +1,552 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::fmt;
+use std::sync::Arc;
+
+use arrow::array::{
+ Array, ArrayBuilder, ArrayData, ArrayIter, ArrayRef, PrimitiveArray, PrimitiveBuilder,
+};
+use serde_json::Value as JsonValue;
+use snafu::OptionExt;
+
+use crate::data_type::ConcreteDataType;
+use crate::error::{self, Result};
+use crate::scalars::{Scalar, ScalarRef, ScalarVector, ScalarVectorBuilder};
+use crate::serialize::Serializable;
+use crate::types::{
+ Float32Type, Float64Type, Int16Type, Int32Type, Int64Type, Int8Type, LogicalPrimitiveType,
+ UInt16Type, UInt32Type, UInt64Type, UInt8Type, WrapperType,
+};
+use crate::value::{Value, ValueRef};
+use crate::vectors::{self, MutableVector, Validity, Vector, VectorRef};
+
+pub type UInt8Vector = PrimitiveVector<UInt8Type>;
+pub type UInt16Vector = PrimitiveVector<UInt16Type>;
+pub type UInt32Vector = PrimitiveVector<UInt32Type>;
+pub type UInt64Vector = PrimitiveVector<UInt64Type>;
+
+pub type Int8Vector = PrimitiveVector<Int8Type>;
+pub type Int16Vector = PrimitiveVector<Int16Type>;
+pub type Int32Vector = PrimitiveVector<Int32Type>;
+pub type Int64Vector = PrimitiveVector<Int64Type>;
+
+pub type Float32Vector = PrimitiveVector<Float32Type>;
+pub type Float64Vector = PrimitiveVector<Float64Type>;
+
+/// Vector for primitive data types.
+pub struct PrimitiveVector<T: LogicalPrimitiveType> {
+ array: PrimitiveArray<T::ArrowPrimitive>,
+}
+
+impl<T: LogicalPrimitiveType> PrimitiveVector<T> {
+ pub fn new(array: PrimitiveArray<T::ArrowPrimitive>) -> Self {
+ Self { array }
+ }
+
+ pub fn try_from_arrow_array(array: impl AsRef<dyn Array>) -> Result<Self> {
+ let data = array
+ .as_ref()
+ .as_any()
+ .downcast_ref::<PrimitiveArray<T::ArrowPrimitive>>()
+ .with_context(|| error::ConversionSnafu {
+ from: format!("{:?}", array.as_ref().data_type()),
+ })?
+ .data()
+ .clone();
+ let concrete_array = PrimitiveArray::<T::ArrowPrimitive>::from(data);
+ Ok(Self::new(concrete_array))
+ }
+
+ pub fn from_slice<P: AsRef<[T::Native]>>(slice: P) -> Self {
+ let iter = slice.as_ref().iter().copied();
+ Self {
+ array: PrimitiveArray::from_iter_values(iter),
+ }
+ }
+
+ pub fn from_wrapper_slice<P: AsRef<[T::Wrapper]>>(slice: P) -> Self {
+ let iter = slice.as_ref().iter().copied().map(WrapperType::into_native);
+ Self {
+ array: PrimitiveArray::from_iter_values(iter),
+ }
+ }
+
+ pub fn from_vec(array: Vec<T::Native>) -> Self {
+ Self {
+ array: PrimitiveArray::from_iter_values(array),
+ }
+ }
+
+ pub fn from_values<I: IntoIterator<Item = T::Native>>(iter: I) -> Self {
+ Self {
+ array: PrimitiveArray::from_iter_values(iter),
+ }
+ }
+
+ pub(crate) fn as_arrow(&self) -> &PrimitiveArray<T::ArrowPrimitive> {
+ &self.array
+ }
+
+ fn to_array_data(&self) -> ArrayData {
+ self.array.data().clone()
+ }
+
+ fn from_array_data(data: ArrayData) -> Self {
+ Self {
+ array: PrimitiveArray::from(data),
+ }
+ }
+
+ // To distinguish with `Vector::slice()`.
+ fn get_slice(&self, offset: usize, length: usize) -> Self {
+ let data = self.array.data().slice(offset, length);
+ Self::from_array_data(data)
+ }
+}
+
+impl<T: LogicalPrimitiveType> Vector for PrimitiveVector<T> {
+ fn data_type(&self) -> ConcreteDataType {
+ T::build_data_type()
+ }
+
+ fn vector_type_name(&self) -> String {
+ format!("{}Vector", T::type_name())
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn len(&self) -> usize {
+ self.array.len()
+ }
+
+ fn to_arrow_array(&self) -> ArrayRef {
+ let data = self.to_array_data();
+ Arc::new(PrimitiveArray::<T::ArrowPrimitive>::from(data))
+ }
+
+ fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
+ let data = self.to_array_data();
+ Box::new(PrimitiveArray::<T::ArrowPrimitive>::from(data))
+ }
+
+ fn validity(&self) -> Validity {
+ vectors::impl_validity_for_vector!(self.array)
+ }
+
+ fn memory_size(&self) -> usize {
+ self.array.get_buffer_memory_size()
+ }
+
+ fn null_count(&self) -> usize {
+ self.array.null_count()
+ }
+
+ fn is_null(&self, row: usize) -> bool {
+ self.array.is_null(row)
+ }
+
+ fn slice(&self, offset: usize, length: usize) -> VectorRef {
+ let data = self.array.data().slice(offset, length);
+ Arc::new(Self::from_array_data(data))
+ }
+
+ fn get(&self, index: usize) -> Value {
+ if self.array.is_valid(index) {
+ // Safety: The index have been checked by `is_valid()`.
+ let wrapper = unsafe { T::Wrapper::from_native(self.array.value_unchecked(index)) };
+ wrapper.into()
+ } else {
+ Value::Null
+ }
+ }
+
+ fn get_ref(&self, index: usize) -> ValueRef {
+ if self.array.is_valid(index) {
+ // Safety: The index have been checked by `is_valid()`.
+ let wrapper = unsafe { T::Wrapper::from_native(self.array.value_unchecked(index)) };
+ wrapper.into()
+ } else {
+ ValueRef::Null
+ }
+ }
+}
+
+impl<T: LogicalPrimitiveType> fmt::Debug for PrimitiveVector<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("PrimitiveVector")
+ .field("array", &self.array)
+ .finish()
+ }
+}
+
+impl<T: LogicalPrimitiveType> From<PrimitiveArray<T::ArrowPrimitive>> for PrimitiveVector<T> {
+ fn from(array: PrimitiveArray<T::ArrowPrimitive>) -> Self {
+ Self { array }
+ }
+}
+
+impl<T: LogicalPrimitiveType> From<Vec<Option<T::Native>>> for PrimitiveVector<T> {
+ fn from(v: Vec<Option<T::Native>>) -> Self {
+ Self {
+ array: PrimitiveArray::from_iter(v),
+ }
+ }
+}
+
+pub struct PrimitiveIter<'a, T: LogicalPrimitiveType> {
+ iter: ArrayIter<&'a PrimitiveArray<T::ArrowPrimitive>>,
+}
+
+impl<'a, T: LogicalPrimitiveType> Iterator for PrimitiveIter<'a, T> {
+ type Item = Option<T::Wrapper>;
+
+ fn next(&mut self) -> Option<Option<T::Wrapper>> {
+ self.iter
+ .next()
+ .map(|item| item.map(T::Wrapper::from_native))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+impl<T: LogicalPrimitiveType> ScalarVector for PrimitiveVector<T> {
+ type OwnedItem = T::Wrapper;
+ type RefItem<'a> = T::Wrapper;
+ type Iter<'a> = PrimitiveIter<'a, T>;
+ type Builder = PrimitiveVectorBuilder<T>;
+
+ fn get_data(&self, idx: usize) -> Option<Self::RefItem<'_>> {
+ if self.array.is_valid(idx) {
+ Some(T::Wrapper::from_native(self.array.value(idx)))
+ } else {
+ None
+ }
+ }
+
+ fn iter_data(&self) -> Self::Iter<'_> {
+ PrimitiveIter {
+ iter: self.array.iter(),
+ }
+ }
+}
+
+impl<T: LogicalPrimitiveType> Serializable for PrimitiveVector<T> {
+ fn serialize_to_json(&self) -> Result<Vec<JsonValue>> {
+ let res = self
+ .iter_data()
+ .map(|v| match v {
+ None => serde_json::Value::Null,
+ // use WrapperType's Into<serde_json::Value> bound instead of
+ // serde_json::to_value to facilitate customized serialization
+ // for WrapperType
+ Some(v) => v.into(),
+ })
+ .collect::<Vec<_>>();
+ Ok(res)
+ }
+}
+
+impl<T: LogicalPrimitiveType> PartialEq for PrimitiveVector<T> {
+ fn eq(&self, other: &PrimitiveVector<T>) -> bool {
+ self.array == other.array
+ }
+}
+
+pub type UInt8VectorBuilder = PrimitiveVectorBuilder<UInt8Type>;
+pub type UInt16VectorBuilder = PrimitiveVectorBuilder<UInt16Type>;
+pub type UInt32VectorBuilder = PrimitiveVectorBuilder<UInt32Type>;
+pub type UInt64VectorBuilder = PrimitiveVectorBuilder<UInt64Type>;
+
+pub type Int8VectorBuilder = PrimitiveVectorBuilder<Int8Type>;
+pub type Int16VectorBuilder = PrimitiveVectorBuilder<Int16Type>;
+pub type Int32VectorBuilder = PrimitiveVectorBuilder<Int32Type>;
+pub type Int64VectorBuilder = PrimitiveVectorBuilder<Int64Type>;
+
+pub type Float32VectorBuilder = PrimitiveVectorBuilder<Float32Type>;
+pub type Float64VectorBuilder = PrimitiveVectorBuilder<Float64Type>;
+
+/// Builder to build a primitive vector.
+pub struct PrimitiveVectorBuilder<T: LogicalPrimitiveType> {
+ mutable_array: PrimitiveBuilder<T::ArrowPrimitive>,
+}
+
+impl<T: LogicalPrimitiveType> MutableVector for PrimitiveVectorBuilder<T> {
+ fn data_type(&self) -> ConcreteDataType {
+ T::build_data_type()
+ }
+
+ fn len(&self) -> usize {
+ self.mutable_array.len()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn as_mut_any(&mut self) -> &mut dyn Any {
+ self
+ }
+
+ fn to_vector(&mut self) -> VectorRef {
+ Arc::new(self.finish())
+ }
+
+ fn push_value_ref(&mut self, value: ValueRef) -> Result<()> {
+ let primitive = T::cast_value_ref(value)?;
+ match primitive {
+ Some(v) => self.mutable_array.append_value(v.into_native()),
+ None => self.mutable_array.append_null(),
+ }
+ Ok(())
+ }
+
+ fn extend_slice_of(&mut self, vector: &dyn Vector, offset: usize, length: usize) -> Result<()> {
+ let primitive = T::cast_vector(vector)?;
+ // Slice the underlying array to avoid creating a new Arc.
+ let slice = primitive.get_slice(offset, length);
+ for v in slice.iter_data() {
+ self.push(v);
+ }
+ Ok(())
+ }
+}
+
+impl<T> ScalarVectorBuilder for PrimitiveVectorBuilder<T>
+where
+ T: LogicalPrimitiveType,
+ T::Wrapper: Scalar<VectorType = PrimitiveVector<T>>,
+ for<'a> T::Wrapper: ScalarRef<'a, ScalarType = T::Wrapper>,
+ for<'a> T::Wrapper: Scalar<RefType<'a> = T::Wrapper>,
+{
+ type VectorType = PrimitiveVector<T>;
+
+ fn with_capacity(capacity: usize) -> Self {
+ Self {
+ mutable_array: PrimitiveBuilder::with_capacity(capacity),
+ }
+ }
+
+ fn push(&mut self, value: Option<<Self::VectorType as ScalarVector>::RefItem<'_>>) {
+ self.mutable_array
+ .append_option(value.map(|v| v.into_native()));
+ }
+
+ fn finish(&mut self) -> Self::VectorType {
+ PrimitiveVector {
+ array: self.mutable_array.finish(),
+ }
+ }
+}
+
+pub(crate) fn replicate_primitive<T: LogicalPrimitiveType>(
+ vector: &PrimitiveVector<T>,
+ offsets: &[usize],
+) -> PrimitiveVector<T> {
+ assert_eq!(offsets.len(), vector.len());
+
+ if offsets.is_empty() {
+ return vector.get_slice(0, 0);
+ }
+
+ let mut builder = PrimitiveVectorBuilder::<T>::with_capacity(*offsets.last().unwrap() as usize);
+
+ let mut previous_offset = 0;
+
+ for (offset, value) in offsets.iter().zip(vector.array.iter()) {
+ let repeat_times = *offset - previous_offset;
+ match value {
+ Some(data) => {
+ unsafe {
+ // Safety: std::iter::Repeat and std::iter::Take implement TrustedLen.
+ builder
+ .mutable_array
+ .append_trusted_len_iter(std::iter::repeat(data).take(repeat_times));
+ }
+ }
+ None => {
+ builder.mutable_array.append_nulls(repeat_times);
+ }
+ }
+ previous_offset = *offset;
+ }
+ builder.finish()
+}
+
+#[cfg(test)]
+mod tests {
+ use arrow::array::Int32Array;
+ use arrow::datatypes::DataType as ArrowDataType;
+ use serde_json;
+
+ use super::*;
+ use crate::data_type::DataType;
+ use crate::serialize::Serializable;
+ use crate::types::Int64Type;
+
+ fn check_vec(v: Int32Vector) {
+ assert_eq!(4, v.len());
+ assert_eq!("Int32Vector", v.vector_type_name());
+ assert!(!v.is_const());
+ assert!(v.validity().is_all_valid());
+ assert!(!v.only_null());
+
+ for i in 0..4 {
+ assert!(!v.is_null(i));
+ assert_eq!(Value::Int32(i as i32 + 1), v.get(i));
+ assert_eq!(ValueRef::Int32(i as i32 + 1), v.get_ref(i));
+ }
+
+ let json_value = v.serialize_to_json().unwrap();
+ assert_eq!("[1,2,3,4]", serde_json::to_string(&json_value).unwrap(),);
+
+ let arrow_arr = v.to_arrow_array();
+ assert_eq!(4, arrow_arr.len());
+ assert_eq!(&ArrowDataType::Int32, arrow_arr.data_type());
+ }
+
+ #[test]
+ fn test_from_values() {
+ let v = Int32Vector::from_values(vec![1, 2, 3, 4]);
+ check_vec(v);
+ }
+
+ #[test]
+ fn test_from_vec() {
+ let v = Int32Vector::from_vec(vec![1, 2, 3, 4]);
+ check_vec(v);
+ }
+
+ #[test]
+ fn test_from_slice() {
+ let v = Int32Vector::from_slice(vec![1, 2, 3, 4]);
+ check_vec(v);
+ }
+
+ #[test]
+ fn test_serialize_primitive_vector_with_null_to_json() {
+ let input = [Some(1i32), Some(2i32), None, Some(4i32), None];
+ let mut builder = Int32VectorBuilder::with_capacity(input.len());
+ for v in input {
+ builder.push(v);
+ }
+ let vector = builder.finish();
+
+ let json_value = vector.serialize_to_json().unwrap();
+ assert_eq!(
+ "[1,2,null,4,null]",
+ serde_json::to_string(&json_value).unwrap(),
+ );
+ }
+
+ #[test]
+ fn test_from_arrow_array() {
+ let arrow_array = Int32Array::from(vec![1, 2, 3, 4]);
+ let v = Int32Vector::from(arrow_array);
+ check_vec(v);
+ }
+
+ #[test]
+ fn test_primitive_vector_build_get() {
+ let input = [Some(1i32), Some(2i32), None, Some(4i32), None];
+ let mut builder = Int32VectorBuilder::with_capacity(input.len());
+ for v in input {
+ builder.push(v);
+ }
+ let vector = builder.finish();
+ assert_eq!(input.len(), vector.len());
+
+ for (i, v) in input.into_iter().enumerate() {
+ assert_eq!(v, vector.get_data(i));
+ assert_eq!(Value::from(v), vector.get(i));
+ }
+
+ let res: Vec<_> = vector.iter_data().collect();
+ assert_eq!(input, &res[..]);
+ }
+
+ #[test]
+ fn test_primitive_vector_validity() {
+ let input = [Some(1i32), Some(2i32), None, None];
+ let mut builder = Int32VectorBuilder::with_capacity(input.len());
+ for v in input {
+ builder.push(v);
+ }
+ let vector = builder.finish();
+ assert_eq!(2, vector.null_count());
+ let validity = vector.validity();
+ assert_eq!(2, validity.null_count());
+ assert!(!validity.is_set(2));
+ assert!(!validity.is_set(3));
+
+ let vector = Int32Vector::from_slice(vec![1, 2, 3, 4]);
+ assert_eq!(0, vector.null_count());
+ assert!(vector.validity().is_all_valid());
+ }
+
+ #[test]
+ fn test_memory_size() {
+ let v = Int32Vector::from_slice((0..5).collect::<Vec<i32>>());
+ assert_eq!(64, v.memory_size());
+ let v = Int64Vector::from(vec![Some(0i64), Some(1i64), Some(2i64), None, None]);
+ assert_eq!(128, v.memory_size());
+ }
+
+ #[test]
+ fn test_primitive_vector_builder() {
+ let mut builder = Int64Type::default().create_mutable_vector(3);
+ builder.push_value_ref(ValueRef::Int64(123)).unwrap();
+ assert!(builder.push_value_ref(ValueRef::Int32(123)).is_err());
+
+ let input = Int64Vector::from_slice(&[7, 8, 9]);
+ builder.extend_slice_of(&input, 1, 2).unwrap();
+ assert!(builder
+ .extend_slice_of(&Int32Vector::from_slice(&[13]), 0, 1)
+ .is_err());
+ let vector = builder.to_vector();
+
+ let expect: VectorRef = Arc::new(Int64Vector::from_slice(&[123, 8, 9]));
+ assert_eq!(expect, vector);
+ }
+
+ #[test]
+ fn test_from_wrapper_slice() {
+ macro_rules! test_from_wrapper_slice {
+ ($vec: ident, $ty: ident) => {
+ let from_wrapper_slice = $vec::from_wrapper_slice(&[
+ $ty::from_native($ty::MAX),
+ $ty::from_native($ty::MIN),
+ ]);
+ let from_slice = $vec::from_slice(&[$ty::MAX, $ty::MIN]);
+ assert_eq!(from_wrapper_slice, from_slice);
+ };
+ }
+
+ test_from_wrapper_slice!(UInt8Vector, u8);
+ test_from_wrapper_slice!(Int8Vector, i8);
+ test_from_wrapper_slice!(UInt16Vector, u16);
+ test_from_wrapper_slice!(Int16Vector, i16);
+ test_from_wrapper_slice!(UInt32Vector, u32);
+ test_from_wrapper_slice!(Int32Vector, i32);
+ test_from_wrapper_slice!(UInt64Vector, u64);
+ test_from_wrapper_slice!(Int64Vector, i64);
+ test_from_wrapper_slice!(Float32Vector, f32);
+ test_from_wrapper_slice!(Float64Vector, f64);
+ }
+}
diff --git a/src/datatypes2/src/vectors/string.rs b/src/datatypes2/src/vectors/string.rs
new file mode 100644
index 000000000000..252116b3b2dd
--- /dev/null
+++ b/src/datatypes2/src/vectors/string.rs
@@ -0,0 +1,370 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::any::Any;
+use std::sync::Arc;
+
+use arrow::array::{Array, ArrayBuilder, ArrayData, ArrayIter, ArrayRef};
+use snafu::ResultExt;
+
+use crate::arrow_array::{MutableStringArray, StringArray};
+use crate::data_type::ConcreteDataType;
+use crate::error::{self, Result};
+use crate::scalars::{ScalarVector, ScalarVectorBuilder};
+use crate::serialize::Serializable;
+use crate::value::{Value, ValueRef};
+use crate::vectors::{self, MutableVector, Validity, Vector, VectorRef};
+
+/// Vector of strings.
+#[derive(Debug, PartialEq)]
+pub struct StringVector {
+ array: StringArray,
+}
+
+impl StringVector {
+ pub(crate) fn as_arrow(&self) -> &dyn Array {
+ &self.array
+ }
+
+ fn to_array_data(&self) -> ArrayData {
+ self.array.data().clone()
+ }
+
+ fn from_array_data(data: ArrayData) -> Self {
+ Self {
+ array: StringArray::from(data),
+ }
+ }
+}
+
+impl From<StringArray> for StringVector {
+ fn from(array: StringArray) -> Self {
+ Self { array }
+ }
+}
+
+impl From<Vec<Option<String>>> for StringVector {
+ fn from(data: Vec<Option<String>>) -> Self {
+ Self {
+ array: StringArray::from_iter(data),
+ }
+ }
+}
+
+impl From<Vec<Option<&str>>> for StringVector {
+ fn from(data: Vec<Option<&str>>) -> Self {
+ Self {
+ array: StringArray::from_iter(data),
+ }
+ }
+}
+
+impl From<&[Option<String>]> for StringVector {
+ fn from(data: &[Option<String>]) -> Self {
+ Self {
+ array: StringArray::from_iter(data),
+ }
+ }
+}
+
+impl From<&[Option<&str>]> for StringVector {
+ fn from(data: &[Option<&str>]) -> Self {
+ Self {
+ array: StringArray::from_iter(data),
+ }
+ }
+}
+
+impl From<Vec<String>> for StringVector {
+ fn from(data: Vec<String>) -> Self {
+ Self {
+ array: StringArray::from_iter(data.into_iter().map(Some)),
+ }
+ }
+}
+
+impl From<Vec<&str>> for StringVector {
+ fn from(data: Vec<&str>) -> Self {
+ Self {
+ array: StringArray::from_iter(data.into_iter().map(Some)),
+ }
+ }
+}
+
+impl Vector for StringVector {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::string_datatype()
+ }
+
+ fn vector_type_name(&self) -> String {
+ "StringVector".to_string()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn len(&self) -> usize {
+ self.array.len()
+ }
+
+ fn to_arrow_array(&self) -> ArrayRef {
+ let data = self.to_array_data();
+ Arc::new(StringArray::from(data))
+ }
+
+ fn to_boxed_arrow_array(&self) -> Box<dyn Array> {
+ let data = self.to_array_data();
+ Box::new(StringArray::from(data))
+ }
+
+ fn validity(&self) -> Validity {
+ vectors::impl_validity_for_vector!(self.array)
+ }
+
+ fn memory_size(&self) -> usize {
+ self.array.get_buffer_memory_size()
+ }
+
+ fn null_count(&self) -> usize {
+ self.array.null_count()
+ }
+
+ fn is_null(&self, row: usize) -> bool {
+ self.array.is_null(row)
+ }
+
+ fn slice(&self, offset: usize, length: usize) -> VectorRef {
+ let data = self.array.data().slice(offset, length);
+ Arc::new(Self::from_array_data(data))
+ }
+
+ fn get(&self, index: usize) -> Value {
+ vectors::impl_get_for_vector!(self.array, index)
+ }
+
+ fn get_ref(&self, index: usize) -> ValueRef {
+ vectors::impl_get_ref_for_vector!(self.array, index)
+ }
+}
+
+impl ScalarVector for StringVector {
+ type OwnedItem = String;
+ type RefItem<'a> = &'a str;
+ type Iter<'a> = ArrayIter<&'a StringArray>;
+ type Builder = StringVectorBuilder;
+
+ fn get_data(&self, idx: usize) -> Option<Self::RefItem<'_>> {
+ if self.array.is_valid(idx) {
+ Some(self.array.value(idx))
+ } else {
+ None
+ }
+ }
+
+ fn iter_data(&self) -> Self::Iter<'_> {
+ self.array.iter()
+ }
+}
+
+pub struct StringVectorBuilder {
+ mutable_array: MutableStringArray,
+}
+
+impl MutableVector for StringVectorBuilder {
+ fn data_type(&self) -> ConcreteDataType {
+ ConcreteDataType::string_datatype()
+ }
+
+ fn len(&self) -> usize {
+ self.mutable_array.len()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn as_mut_any(&mut self) -> &mut dyn Any {
+ self
+ }
+
+ fn to_vector(&mut self) -> VectorRef {
+ Arc::new(self.finish())
+ }
+
+ fn push_value_ref(&mut self, value: ValueRef) -> Result<()> {
+ match value.as_string()? {
+ Some(v) => self.mutable_array.append_value(v),
+ None => self.mutable_array.append_null(),
+ }
+ Ok(())
+ }
+
+ fn extend_slice_of(&mut self, vector: &dyn Vector, offset: usize, length: usize) -> Result<()> {
+ vectors::impl_extend_for_builder!(self, vector, StringVector, offset, length)
+ }
+}
+
+impl ScalarVectorBuilder for StringVectorBuilder {
+ type VectorType = StringVector;
+
+ fn with_capacity(capacity: usize) -> Self {
+ Self {
+ mutable_array: MutableStringArray::with_capacity(capacity, 0),
+ }
+ }
+
+ fn push(&mut self, value: Option<<Self::VectorType as ScalarVector>::RefItem<'_>>) {
+ match value {
+ Some(v) => self.mutable_array.append_value(v),
+ None => self.mutable_array.append_null(),
+ }
+ }
+
+ fn finish(&mut self) -> Self::VectorType {
+ StringVector {
+ array: self.mutable_array.finish(),
+ }
+ }
+}
+
+impl Serializable for StringVector {
+ fn serialize_to_json(&self) -> Result<Vec<serde_json::Value>> {
+ self.iter_data()
+ .map(serde_json::to_value)
+ .collect::<serde_json::Result<_>>()
+ .context(error::SerializeSnafu)
+ }
+}
+
+vectors::impl_try_from_arrow_array_for_vector!(StringArray, StringVector);
+
+#[cfg(test)]
+mod tests {
+ use arrow::datatypes::DataType;
+
+ use super::*;
+
+ #[test]
+ fn test_string_vector_build_get() {
+ let mut builder = StringVectorBuilder::with_capacity(4);
+ builder.push(Some("hello"));
+ builder.push(None);
+ builder.push(Some("world"));
+ let vector = builder.finish();
+
+ assert_eq!(Some("hello"), vector.get_data(0));
+ assert_eq!(None, vector.get_data(1));
+ assert_eq!(Some("world"), vector.get_data(2));
+
+ // Get out of bound
+ assert!(vector.try_get(3).is_err());
+
+ assert_eq!(Value::String("hello".into()), vector.get(0));
+ assert_eq!(Value::Null, vector.get(1));
+ assert_eq!(Value::String("world".into()), vector.get(2));
+
+ let mut iter = vector.iter_data();
+ assert_eq!("hello", iter.next().unwrap().unwrap());
+ assert_eq!(None, iter.next().unwrap());
+ assert_eq!("world", iter.next().unwrap().unwrap());
+ assert_eq!(None, iter.next());
+ }
+
+ #[test]
+ fn test_string_vector_builder() {
+ let mut builder = StringVectorBuilder::with_capacity(3);
+ builder.push_value_ref(ValueRef::String("hello")).unwrap();
+ assert!(builder.push_value_ref(ValueRef::Int32(123)).is_err());
+
+ let input = StringVector::from_slice(&["world", "one", "two"]);
+ builder.extend_slice_of(&input, 1, 2).unwrap();
+ assert!(builder
+ .extend_slice_of(&crate::vectors::Int32Vector::from_slice(&[13]), 0, 1)
+ .is_err());
+ let vector = builder.to_vector();
+
+ let expect: VectorRef = Arc::new(StringVector::from_slice(&["hello", "one", "two"]));
+ assert_eq!(expect, vector);
+ }
+
+ #[test]
+ fn test_string_vector_misc() {
+ let strs = vec!["hello", "greptime", "rust"];
+ let v = StringVector::from(strs.clone());
+ assert_eq!(3, v.len());
+ assert_eq!("StringVector", v.vector_type_name());
+ assert!(!v.is_const());
+ assert!(v.validity().is_all_valid());
+ assert!(!v.only_null());
+ assert_eq!(128, v.memory_size());
+
+ for (i, s) in strs.iter().enumerate() {
+ assert_eq!(Value::from(*s), v.get(i));
+ assert_eq!(ValueRef::from(*s), v.get_ref(i));
+ assert_eq!(Value::from(*s), v.try_get(i).unwrap());
+ }
+
+ let arrow_arr = v.to_arrow_array();
+ assert_eq!(3, arrow_arr.len());
+ assert_eq!(&DataType::Utf8, arrow_arr.data_type());
+ }
+
+ #[test]
+ fn test_serialize_string_vector() {
+ let mut builder = StringVectorBuilder::with_capacity(3);
+ builder.push(Some("hello"));
+ builder.push(None);
+ builder.push(Some("world"));
+ let string_vector = builder.finish();
+ let serialized =
+ serde_json::to_string(&string_vector.serialize_to_json().unwrap()).unwrap();
+ assert_eq!(r#"["hello",null,"world"]"#, serialized);
+ }
+
+ #[test]
+ fn test_from_arrow_array() {
+ let mut builder = MutableStringArray::new();
+ builder.append_option(Some("A"));
+ builder.append_option(Some("B"));
+ builder.append_null();
+ builder.append_option(Some("D"));
+ let string_array: StringArray = builder.finish();
+ let vector = StringVector::from(string_array);
+ assert_eq!(
+ r#"["A","B",null,"D"]"#,
+ serde_json::to_string(&vector.serialize_to_json().unwrap()).unwrap(),
+ );
+ }
+
+ #[test]
+ fn test_from_non_option_string() {
+ let nul = String::from_utf8(vec![0]).unwrap();
+ let corpus = vec!["😅😅😅", "😍😍😍😍", "🥵🥵", nul.as_str()];
+ let vector = StringVector::from(corpus);
+ let serialized = serde_json::to_string(&vector.serialize_to_json().unwrap()).unwrap();
+ assert_eq!(r#"["😅😅😅","😍😍😍😍","🥵🥵","\u0000"]"#, serialized);
+
+ let corpus = vec![
+ "🀀🀀🀀".to_string(),
+ "🀁🀁🀁".to_string(),
+ "🀂🀂🀂".to_string(),
+ "🀃🀃🀃".to_string(),
+ "🀆🀆".to_string(),
+ ];
+ let vector = StringVector::from(corpus);
+ let serialized = serde_json::to_string(&vector.serialize_to_json().unwrap()).unwrap();
+ assert_eq!(r#"["🀀🀀🀀","🀁🀁🀁","🀂🀂🀂","🀃🀃🀃","🀆🀆"]"#, serialized);
+ }
+}
diff --git a/src/datatypes2/src/vectors/timestamp.rs b/src/datatypes2/src/vectors/timestamp.rs
new file mode 100644
index 000000000000..5d9f7f2ed1fc
--- /dev/null
+++ b/src/datatypes2/src/vectors/timestamp.rs
@@ -0,0 +1,31 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::types::{
+ TimestampMicrosecondType, TimestampMillisecondType, TimestampNanosecondType,
+ TimestampSecondType,
+};
+use crate::vectors::{PrimitiveVector, PrimitiveVectorBuilder};
+
+pub type TimestampSecondVector = PrimitiveVector<TimestampSecondType>;
+pub type TimestampSecondVectorBuilder = PrimitiveVectorBuilder<TimestampSecondType>;
+
+pub type TimestampMillisecondVector = PrimitiveVector<TimestampMillisecondType>;
+pub type TimestampMillisecondVectorBuilder = PrimitiveVectorBuilder<TimestampMillisecondType>;
+
+pub type TimestampMicrosecondVector = PrimitiveVector<TimestampMicrosecondType>;
+pub type TimestampMicrosecondVectorBuilder = PrimitiveVectorBuilder<TimestampMicrosecondType>;
+
+pub type TimestampNanosecondVector = PrimitiveVector<TimestampNanosecondType>;
+pub type TimestampNanosecondVectorBuilder = PrimitiveVectorBuilder<TimestampNanosecondType>;
diff --git a/src/datatypes2/src/vectors/validity.rs b/src/datatypes2/src/vectors/validity.rs
new file mode 100644
index 000000000000..01c7faa7895b
--- /dev/null
+++ b/src/datatypes2/src/vectors/validity.rs
@@ -0,0 +1,159 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use arrow::array::ArrayData;
+use arrow::bitmap::Bitmap;
+
+#[derive(Debug, PartialEq)]
+enum ValidityKind<'a> {
+ /// Whether the array slot is valid or not (null).
+ Slots {
+ bitmap: &'a Bitmap,
+ len: usize,
+ null_count: usize,
+ },
+ /// All slots are valid.
+ AllValid { len: usize },
+ /// All slots are null.
+ AllNull { len: usize },
+}
+
+/// Validity of a vector.
+#[derive(Debug, PartialEq)]
+pub struct Validity<'a> {
+ kind: ValidityKind<'a>,
+}
+
+impl<'a> Validity<'a> {
+ /// Creates a `Validity` from [`ArrayData`].
+ pub fn from_array_data(data: &'a ArrayData) -> Validity<'a> {
+ match data.null_bitmap() {
+ Some(bitmap) => Validity {
+ kind: ValidityKind::Slots {
+ bitmap,
+ len: data.len(),
+ null_count: data.null_count(),
+ },
+ },
+ None => Validity::all_valid(data.len()),
+ }
+ }
+
+ /// Returns `Validity` that all elements are valid.
+ pub fn all_valid(len: usize) -> Validity<'a> {
+ Validity {
+ kind: ValidityKind::AllValid { len },
+ }
+ }
+
+ /// Returns `Validity` that all elements are null.
+ pub fn all_null(len: usize) -> Validity<'a> {
+ Validity {
+ kind: ValidityKind::AllNull { len },
+ }
+ }
+
+ /// Returns whether `i-th` bit is set.
+ pub fn is_set(&self, i: usize) -> bool {
+ match self.kind {
+ ValidityKind::Slots { bitmap, .. } => bitmap.is_set(i),
+ ValidityKind::AllValid { len } => i < len,
+ ValidityKind::AllNull { .. } => false,
+ }
+ }
+
+ /// Returns true if all bits are null.
+ pub fn is_all_null(&self) -> bool {
+ match self.kind {
+ ValidityKind::Slots {
+ len, null_count, ..
+ } => len == null_count,
+ ValidityKind::AllValid { .. } => false,
+ ValidityKind::AllNull { .. } => true,
+ }
+ }
+
+ /// Returns true if all bits are valid.
+ pub fn is_all_valid(&self) -> bool {
+ match self.kind {
+ ValidityKind::Slots { null_count, .. } => null_count == 0,
+ ValidityKind::AllValid { .. } => true,
+ ValidityKind::AllNull { .. } => false,
+ }
+ }
+
+ /// The number of null slots on this [`Vector`].
+ pub fn null_count(&self) -> usize {
+ match self.kind {
+ ValidityKind::Slots { null_count, .. } => null_count,
+ ValidityKind::AllValid { .. } => 0,
+ ValidityKind::AllNull { len } => len,
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use arrow::array::{Array, Int32Array};
+
+ use super::*;
+
+ #[test]
+ fn test_all_valid() {
+ let validity = Validity::all_valid(5);
+ assert!(validity.is_all_valid());
+ assert!(!validity.is_all_null());
+ assert_eq!(0, validity.null_count());
+ for i in 0..5 {
+ assert!(validity.is_set(i));
+ }
+ assert!(!validity.is_set(5));
+ }
+
+ #[test]
+ fn test_all_null() {
+ let validity = Validity::all_null(5);
+ assert!(validity.is_all_null());
+ assert!(!validity.is_all_valid());
+ assert_eq!(5, validity.null_count());
+ for i in 0..5 {
+ assert!(!validity.is_set(i));
+ }
+ assert!(!validity.is_set(5));
+ }
+
+ #[test]
+ fn test_from_array_data() {
+ let array = Int32Array::from_iter([None, Some(1), None]);
+ let validity = Validity::from_array_data(array.data());
+ assert_eq!(2, validity.null_count());
+ assert!(!validity.is_set(0));
+ assert!(validity.is_set(1));
+ assert!(!validity.is_set(2));
+ assert!(!validity.is_all_null());
+ assert!(!validity.is_all_valid());
+
+ let array = Int32Array::from_iter([None, None]);
+ let validity = Validity::from_array_data(array.data());
+ assert!(validity.is_all_null());
+ assert!(!validity.is_all_valid());
+ assert_eq!(2, validity.null_count());
+
+ let array = Int32Array::from_iter_values([1, 2]);
+ let validity = Validity::from_array_data(array.data());
+ assert!(!validity.is_all_null());
+ assert!(validity.is_all_valid());
+ assert_eq!(0, validity.null_count());
+ }
+}
|
feat
|
new datatypes subcrate based on the official arrow (#705)
|
3477fde0e5a53c7cfc3dd4f7b470e53d7007dbf2
|
2024-05-17 17:40:28
|
discord9
|
feat(flow): tumble window func (#3968)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 23bf8f1200ab..88e0a92b043b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3835,8 +3835,11 @@ dependencies = [
"common-decimal",
"common-error",
"common-frontend",
+ "common-function",
"common-macro",
"common-meta",
+ "common-query",
+ "common-recordbatch",
"common-runtime",
"common-telemetry",
"common-time",
diff --git a/src/common/meta/src/ddl/create_flow.rs b/src/common/meta/src/ddl/create_flow.rs
index 474eec53c3d8..a7fb59b05623 100644
--- a/src/common/meta/src/ddl/create_flow.rs
+++ b/src/common/meta/src/ddl/create_flow.rs
@@ -119,12 +119,11 @@ impl CreateFlowProcedure {
&sink_table_name.table_name,
))
.await?;
- ensure!(
- !exists,
- error::TableAlreadyExistsSnafu {
- table_name: sink_table_name.to_string(),
- }
- );
+ // TODO(discord9): due to undefined behavior in flow's plan in how to transform types in mfp, sometime flow can't deduce correct schema
+ // and require manually create sink table
+ if exists {
+ common_telemetry::warn!("Table already exists, table: {}", sink_table_name);
+ }
self.collect_source_tables().await?;
self.allocate_flow_id().await?;
diff --git a/src/flow/Cargo.toml b/src/flow/Cargo.toml
index 3d674bbb12b8..1f1bd1562f0a 100644
--- a/src/flow/Cargo.toml
+++ b/src/flow/Cargo.toml
@@ -26,7 +26,10 @@ futures = "0.3"
# This fork is simply for keeping our dependency in our org, and pin the version
# it is the same with upstream repo
async-trait.workspace = true
+common-function.workspace = true
common-meta.workspace = true
+common-query.workspace = true
+common-recordbatch.workspace = true
enum-as-inner = "0.6.0"
greptime-proto.workspace = true
hydroflow = { git = "https://github.com/GreptimeTeam/hydroflow.git", branch = "main" }
diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs
index f440043f66fe..f75288831041 100644
--- a/src/flow/src/adapter.rs
+++ b/src/flow/src/adapter.rs
@@ -18,7 +18,7 @@
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
-use std::time::Instant;
+use std::time::{Instant, SystemTime};
use api::v1::{RowDeleteRequest, RowDeleteRequests, RowInsertRequest, RowInsertRequests};
use catalog::CatalogManagerRef;
@@ -49,7 +49,7 @@ use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
use crate::compute::ErrCollector;
use crate::expr::GlobalId;
use crate::repr::{self, DiffRow, Row};
-use crate::transform::sql_to_flow_plan;
+use crate::transform::{register_function_to_query_engine, sql_to_flow_plan};
pub(crate) mod error;
mod flownode_impl;
@@ -120,6 +120,8 @@ impl FlownodeBuilder {
);
let query_engine = query_engine_factory.query_engine();
+ register_function_to_query_engine(&query_engine);
+
let (tx, rx) = oneshot::channel();
let node_id = Some(self.flow_node_id);
@@ -261,7 +263,7 @@ impl FlownodeManager {
let ctx = Arc::new(QueryContext::with(&catalog, &schema));
// TODO(discord9): instead of auto build table from request schema, actually build table
// before `create flow` to be able to assign pk and ts etc.
- let (primary_keys, schema) = if let Some(table_id) = self
+ let (primary_keys, schema, is_auto_create) = if let Some(table_id) = self
.table_info_source
.get_table_id_from_name(&table_name)
.await?
@@ -278,54 +280,65 @@ impl FlownodeManager {
.map(|i| meta.schema.column_schemas[i].name.clone())
.collect_vec();
let schema = meta.schema.column_schemas;
- (primary_keys, schema)
+ let is_auto_create = schema
+ .last()
+ .map(|s| s.name == "__ts_placeholder")
+ .unwrap_or(false);
+ (primary_keys, schema, is_auto_create)
} else {
- // TODO(discord9): get ts column from `RelationType` once we are done rewriting flow plan to attach ts
- let (primary_keys, schema) = {
- let node_ctx = self.node_context.lock().await;
- let gid: GlobalId = node_ctx
- .table_repr
- .get_by_name(&table_name)
- .map(|x| x.1)
- .unwrap();
- let schema = node_ctx
- .schema
- .get(&gid)
- .with_context(|| TableNotFoundSnafu {
- name: format!("Table name = {:?}", table_name),
- })?
- .clone();
- // TODO(discord9): use default key from schema
- let primary_keys = schema
- .keys
- .first()
- .map(|v| {
- v.column_indices
- .iter()
- .map(|i| format!("Col_{i}"))
- .collect_vec()
- })
- .unwrap_or_default();
- let ts_col = ColumnSchema::new(
- "ts",
- ConcreteDataType::timestamp_millisecond_datatype(),
- true,
- )
- .with_time_index(true);
-
- let wout_ts = schema
- .column_types
- .into_iter()
- .enumerate()
- .map(|(idx, typ)| {
- ColumnSchema::new(format!("Col_{idx}"), typ.scalar_type, typ.nullable)
- })
- .collect_vec();
- let mut with_ts = wout_ts.clone();
- with_ts.push(ts_col);
- (primary_keys, with_ts)
- };
- (primary_keys, schema)
+ // TODO(discord9): condiser remove buggy auto create by schema
+
+ let node_ctx = self.node_context.lock().await;
+ let gid: GlobalId = node_ctx
+ .table_repr
+ .get_by_name(&table_name)
+ .map(|x| x.1)
+ .unwrap();
+ let schema = node_ctx
+ .schema
+ .get(&gid)
+ .with_context(|| TableNotFoundSnafu {
+ name: format!("Table name = {:?}", table_name),
+ })?
+ .clone();
+ // TODO(discord9): use default key from schema
+ let primary_keys = schema
+ .keys
+ .first()
+ .map(|v| {
+ v.column_indices
+ .iter()
+ .map(|i| format!("Col_{i}"))
+ .collect_vec()
+ })
+ .unwrap_or_default();
+ let update_at = ColumnSchema::new(
+ "update_at",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ true,
+ );
+ // TODO(discord9): bugged so we can't infer time index from flow plan, so we have to manually set one
+ let ts_col = ColumnSchema::new(
+ "__ts_placeholder",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ true,
+ )
+ .with_time_index(true);
+
+ let wout_ts = schema
+ .column_types
+ .into_iter()
+ .enumerate()
+ .map(|(idx, typ)| {
+ ColumnSchema::new(format!("Col_{idx}"), typ.scalar_type, typ.nullable)
+ })
+ .collect_vec();
+
+ let mut with_ts = wout_ts.clone();
+ with_ts.push(update_at);
+ with_ts.push(ts_col);
+
+ (primary_keys, with_ts, true)
};
let proto_schema = column_schemas_to_proto(schema, &primary_keys)?;
@@ -336,16 +349,32 @@ impl FlownodeManager {
table_name.join("."),
reqs
);
-
+ let now = SystemTime::now();
+ let now = now
+ .duration_since(SystemTime::UNIX_EPOCH)
+ .map(|s| s.as_millis() as repr::Timestamp)
+ .unwrap_or_else(|_| {
+ -(SystemTime::UNIX_EPOCH
+ .duration_since(now)
+ .unwrap()
+ .as_millis() as repr::Timestamp)
+ });
for req in reqs {
match req {
DiffRequest::Insert(insert) => {
let rows_proto: Vec<v1::Row> = insert
.into_iter()
.map(|(mut row, _ts)| {
- row.extend(Some(Value::from(
- common_time::Timestamp::new_millisecond(0),
- )));
+ // `update_at` col
+ row.extend([Value::from(common_time::Timestamp::new_millisecond(
+ now,
+ ))]);
+ // ts col, if auto create
+ if is_auto_create {
+ row.extend([Value::from(
+ common_time::Timestamp::new_millisecond(0),
+ )]);
+ }
row.into()
})
.collect::<Vec<_>>();
diff --git a/src/flow/src/adapter/node_context.rs b/src/flow/src/adapter/node_context.rs
index 82900aac3644..b1d01373fb8a 100644
--- a/src/flow/src/adapter/node_context.rs
+++ b/src/flow/src/adapter/node_context.rs
@@ -30,7 +30,7 @@ use crate::expr::GlobalId;
use crate::repr::{DiffRow, RelationType, BROADCAST_CAP};
/// A context that holds the information of the dataflow
-#[derive(Default)]
+#[derive(Default, Debug)]
pub struct FlownodeContext {
/// mapping from source table to tasks, useful for schedule which task to run when a source table is updated
pub source_to_tasks: BTreeMap<TableId, BTreeSet<FlowId>>,
@@ -64,6 +64,7 @@ pub struct FlownodeContext {
///
/// receiver still use tokio broadcast channel, since only sender side need to know
/// backpressure and adjust dataflow running duration to avoid blocking
+#[derive(Debug)]
pub struct SourceSender {
sender: broadcast::Sender<DiffRow>,
send_buf: VecDeque<DiffRow>,
diff --git a/src/flow/src/compute/render.rs b/src/flow/src/compute/render.rs
index 8279974b4781..44b025359852 100644
--- a/src/flow/src/compute/render.rs
+++ b/src/flow/src/compute/render.rs
@@ -223,11 +223,11 @@ mod test {
use hydroflow::scheduled::graph::Hydroflow;
use hydroflow::scheduled::graph_ext::GraphExt;
use hydroflow::scheduled::handoff::VecHandoff;
+ use pretty_assertions::{assert_eq, assert_ne};
use super::*;
use crate::expr::BinaryFunc;
use crate::repr::Row;
-
pub fn run_and_check(
state: &mut DataflowState,
df: &mut Hydroflow,
diff --git a/src/flow/src/compute/render/reduce.rs b/src/flow/src/compute/render/reduce.rs
index c43ce54f8cac..e46f8c2bedc3 100644
--- a/src/flow/src/compute/render/reduce.rs
+++ b/src/flow/src/compute/render/reduce.rs
@@ -739,6 +739,7 @@ mod test {
use std::cell::RefCell;
use std::rc::Rc;
+ use common_time::{DateTime, Interval, Timestamp};
use datatypes::data_type::{ConcreteDataType, ConcreteDataType as CDT};
use hydroflow::scheduled::graph::Hydroflow;
@@ -748,6 +749,165 @@ mod test {
use crate::expr::{self, AggregateFunc, BinaryFunc, GlobalId, MapFilterProject, UnaryFunc};
use crate::repr::{ColumnType, RelationType};
+ /// SELECT sum(number) FROM numbers_with_ts GROUP BY tumble(ts, '1 second', '2021-07-01 00:00:00')
+ /// input table columns: number, ts
+ /// expected: sum(number), window_start, window_end
+ #[test]
+ fn test_tumble_group_by() {
+ let mut df = Hydroflow::new();
+ let mut state = DataflowState::default();
+ let mut ctx = harness_test_ctx(&mut df, &mut state);
+ const START: i64 = 1625097600000;
+ let rows = vec![
+ (1u32, START + 1000),
+ (2u32, START + 1500),
+ (3u32, START + 2000),
+ (1u32, START + 2500),
+ (2u32, START + 3000),
+ (3u32, START + 3500),
+ ];
+ let rows = rows
+ .into_iter()
+ .map(|(number, ts)| {
+ (
+ Row::new(vec![number.into(), Timestamp::new_millisecond(ts).into()]),
+ 1,
+ 1,
+ )
+ })
+ .collect_vec();
+
+ let collection = ctx.render_constant(rows.clone());
+ ctx.insert_global(GlobalId::User(1), collection);
+
+ let aggr_expr = AggregateExpr {
+ func: AggregateFunc::SumUInt32,
+ expr: ScalarExpr::Column(0),
+ distinct: false,
+ };
+ let expected = TypedPlan {
+ typ: RelationType::new(vec![
+ ColumnType::new(CDT::uint64_datatype(), true), // sum(number)
+ ColumnType::new(CDT::datetime_datatype(), false), // window start
+ ColumnType::new(CDT::datetime_datatype(), false), // window end
+ ]),
+ // TODO(discord9): mfp indirectly ref to key columns
+ /*
+ .with_key(vec![1])
+ .with_time_index(Some(0)),*/
+ plan: Plan::Mfp {
+ input: Box::new(
+ Plan::Reduce {
+ input: Box::new(
+ Plan::Get {
+ id: crate::expr::Id::Global(GlobalId::User(1)),
+ }
+ .with_types(RelationType::new(vec![
+ ColumnType::new(ConcreteDataType::uint32_datatype(), false),
+ ColumnType::new(ConcreteDataType::datetime_datatype(), false),
+ ])),
+ ),
+ key_val_plan: KeyValPlan {
+ key_plan: MapFilterProject::new(2)
+ .map(vec![
+ ScalarExpr::Column(1).call_unary(
+ UnaryFunc::TumbleWindowFloor {
+ window_size: Interval::from_month_day_nano(
+ 0,
+ 0,
+ 1_000_000_000,
+ ),
+ start_time: Some(DateTime::new(1625097600000)),
+ },
+ ),
+ ScalarExpr::Column(1).call_unary(
+ UnaryFunc::TumbleWindowCeiling {
+ window_size: Interval::from_month_day_nano(
+ 0,
+ 0,
+ 1_000_000_000,
+ ),
+ start_time: Some(DateTime::new(1625097600000)),
+ },
+ ),
+ ])
+ .unwrap()
+ .project(vec![2, 3])
+ .unwrap()
+ .into_safe(),
+ val_plan: MapFilterProject::new(2)
+ .project(vec![0, 1])
+ .unwrap()
+ .into_safe(),
+ },
+ reduce_plan: ReducePlan::Accumulable(AccumulablePlan {
+ full_aggrs: vec![aggr_expr.clone()],
+ simple_aggrs: vec![AggrWithIndex::new(aggr_expr.clone(), 0, 0)],
+ distinct_aggrs: vec![],
+ }),
+ }
+ .with_types(
+ RelationType::new(vec![
+ ColumnType::new(CDT::datetime_datatype(), false), // window start
+ ColumnType::new(CDT::datetime_datatype(), false), // window end
+ ColumnType::new(CDT::uint64_datatype(), true), //sum(number)
+ ])
+ .with_key(vec![1])
+ .with_time_index(Some(0)),
+ ),
+ ),
+ mfp: MapFilterProject::new(3)
+ .map(vec![
+ ScalarExpr::Column(2),
+ ScalarExpr::Column(3),
+ ScalarExpr::Column(0),
+ ScalarExpr::Column(1),
+ ])
+ .unwrap()
+ .project(vec![4, 5, 6])
+ .unwrap(),
+ },
+ };
+
+ let bundle = ctx.render_plan(expected).unwrap();
+
+ let output = get_output_handle(&mut ctx, bundle);
+ drop(ctx);
+ let expected = BTreeMap::from([(
+ 1,
+ vec![
+ (
+ Row::new(vec![
+ 3u64.into(),
+ Timestamp::new_millisecond(START + 1000).into(),
+ Timestamp::new_millisecond(START + 2000).into(),
+ ]),
+ 1,
+ 1,
+ ),
+ (
+ Row::new(vec![
+ 4u64.into(),
+ Timestamp::new_millisecond(START + 2000).into(),
+ Timestamp::new_millisecond(START + 3000).into(),
+ ]),
+ 1,
+ 1,
+ ),
+ (
+ Row::new(vec![
+ 5u64.into(),
+ Timestamp::new_millisecond(START + 3000).into(),
+ Timestamp::new_millisecond(START + 4000).into(),
+ ]),
+ 1,
+ 1,
+ ),
+ ],
+ )]);
+ run_and_check(&mut state, &mut df, 1..2, expected, output);
+ }
+
/// select avg(number) from number;
#[test]
fn test_avg_eval() {
diff --git a/src/flow/src/expr/func.rs b/src/flow/src/expr/func.rs
index 12335fdf1f9c..7957f70cb6c4 100644
--- a/src/flow/src/expr/func.rs
+++ b/src/flow/src/expr/func.rs
@@ -17,8 +17,10 @@
use std::collections::HashMap;
use std::sync::OnceLock;
+use common_error::ext::BoxedError;
use common_telemetry::debug;
-use common_time::DateTime;
+use common_time::timestamp::TimeUnit;
+use common_time::{DateTime, Timestamp};
use datafusion_expr::Operator;
use datatypes::data_type::ConcreteDataType;
use datatypes::types::cast;
@@ -30,14 +32,14 @@ use snafu::{ensure, OptionExt, ResultExt};
use strum::{EnumIter, IntoEnumIterator};
use substrait::df_logical_plan::consumer::name_to_op;
-use crate::adapter::error::{Error, InvalidQuerySnafu, PlanSnafu};
+use crate::adapter::error::{Error, ExternalSnafu, InvalidQuerySnafu, PlanSnafu};
use crate::expr::error::{
- CastValueSnafu, DivisionByZeroSnafu, EvalError, InternalSnafu, TryFromValueSnafu,
- TypeMismatchSnafu,
+ CastValueSnafu, DivisionByZeroSnafu, EvalError, InternalSnafu, OverflowSnafu,
+ TryFromValueSnafu, TypeMismatchSnafu,
};
use crate::expr::signature::{GenericFn, Signature};
-use crate::expr::{InvalidArgumentSnafu, ScalarExpr};
-use crate::repr::{value_to_internal_ts, Row};
+use crate::expr::{InvalidArgumentSnafu, ScalarExpr, TypedExpr};
+use crate::repr::{self, value_to_internal_ts, Row};
/// UnmaterializableFunc is a function that can't be eval independently,
/// and require special handling
@@ -45,6 +47,11 @@ use crate::repr::{value_to_internal_ts, Row};
pub enum UnmaterializableFunc {
Now,
CurrentSchema,
+ TumbleWindow {
+ ts: Box<TypedExpr>,
+ window_size: common_time::Interval,
+ start_time: Option<DateTime>,
+ },
}
impl UnmaterializableFunc {
@@ -61,14 +68,51 @@ impl UnmaterializableFunc {
output: ConcreteDataType::string_datatype(),
generic_fn: GenericFn::CurrentSchema,
},
+ Self::TumbleWindow { .. } => Signature {
+ input: smallvec![ConcreteDataType::timestamp_millisecond_datatype()],
+ output: ConcreteDataType::timestamp_millisecond_datatype(),
+ generic_fn: GenericFn::TumbleWindow,
+ },
}
}
/// Create a UnmaterializableFunc from a string of the function name
- pub fn from_str(name: &str) -> Result<Self, Error> {
- match name {
+ pub fn from_str_args(name: &str, args: Vec<TypedExpr>) -> Result<Self, Error> {
+ match name.to_lowercase().as_str() {
"now" => Ok(Self::Now),
"current_schema" => Ok(Self::CurrentSchema),
+ "tumble" => {
+ let ts = args.first().context(InvalidQuerySnafu {
+ reason: "Tumble window function requires a timestamp argument",
+ })?;
+ let window_size = args
+ .get(1)
+ .and_then(|expr| expr.expr.as_literal())
+ .context(InvalidQuerySnafu {
+ reason: "Tumble window function requires a window size argument"
+ })?.as_string() // TODO(discord9): since df to substrait convertor does not support interval type yet, we need to take a string and cast it to interval instead
+ .map(|s|cast(Value::from(s), &ConcreteDataType::interval_month_day_nano_datatype())).transpose().map_err(BoxedError::new).context(
+ ExternalSnafu
+ )?.and_then(|v|v.as_interval())
+ .with_context(||InvalidQuerySnafu {
+ reason: format!("Tumble window function requires window size argument to be a string describe a interval, found {:?}", args.get(1))
+ })?;
+ let start_time = match args.get(2) {
+ Some(start_time) => start_time.expr.as_literal(),
+ None => None,
+ }
+ .map(|s| cast(s.clone(), &ConcreteDataType::datetime_datatype())).transpose().map_err(BoxedError::new).context(ExternalSnafu)?.map(|v|v.as_datetime().with_context(
+ ||InvalidQuerySnafu {
+ reason: format!("Tumble window function requires start time argument to be a datetime describe in string, found {:?}", args.get(2))
+ }
+ )).transpose()?;
+
+ Ok(Self::TumbleWindow {
+ ts: Box::new(ts.clone()),
+ window_size,
+ start_time,
+ })
+ }
_ => InvalidQuerySnafu {
reason: format!("Unknown unmaterializable function: {}", name),
}
@@ -87,6 +131,14 @@ pub enum UnaryFunc {
IsFalse,
StepTimestamp,
Cast(ConcreteDataType),
+ TumbleWindowFloor {
+ window_size: common_time::Interval,
+ start_time: Option<DateTime>,
+ },
+ TumbleWindowCeiling {
+ window_size: common_time::Interval,
+ start_time: Option<DateTime>,
+ },
}
impl UnaryFunc {
@@ -118,6 +170,16 @@ impl UnaryFunc {
output: to.clone(),
generic_fn: GenericFn::Cast,
},
+ Self::TumbleWindowFloor { .. } => Signature {
+ input: smallvec![ConcreteDataType::timestamp_millisecond_datatype()],
+ output: ConcreteDataType::timestamp_millisecond_datatype(),
+ generic_fn: GenericFn::TumbleWindow,
+ },
+ Self::TumbleWindowCeiling { .. } => Signature {
+ input: smallvec![ConcreteDataType::timestamp_millisecond_datatype()],
+ output: ConcreteDataType::timestamp_millisecond_datatype(),
+ generic_fn: GenericFn::TumbleWindow,
+ },
}
}
@@ -211,10 +273,51 @@ impl UnaryFunc {
debug!("Cast to type: {to:?}, result: {:?}", res);
res
}
+ Self::TumbleWindowFloor {
+ window_size,
+ start_time,
+ } => {
+ let ts = get_ts_as_millisecond(arg)?;
+ let start_time = start_time.map(|t| t.val()).unwrap_or(0);
+ let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
+ let window_start = start_time + (ts - start_time) / window_size * window_size;
+
+ let ret = Timestamp::new_millisecond(window_start);
+ Ok(Value::from(ret))
+ }
+ Self::TumbleWindowCeiling {
+ window_size,
+ start_time,
+ } => {
+ let ts = get_ts_as_millisecond(arg)?;
+ let start_time = start_time.map(|t| t.val()).unwrap_or(0);
+ let window_size = (window_size.to_nanosecond() / 1_000_000) as repr::Duration; // nanosecond to millisecond
+ let window_start = start_time + (ts - start_time) / window_size * window_size;
+
+ let window_end = window_start + window_size;
+ let ret = Timestamp::new_millisecond(window_end);
+ Ok(Value::from(ret))
+ }
}
}
}
+fn get_ts_as_millisecond(arg: Value) -> Result<repr::Timestamp, EvalError> {
+ let ts = if let Some(ts) = arg.as_timestamp() {
+ ts.convert_to(TimeUnit::Millisecond)
+ .context(OverflowSnafu)?
+ .value()
+ } else if let Some(ts) = arg.as_datetime() {
+ ts.val()
+ } else {
+ InvalidArgumentSnafu {
+ reason: "Expect input to be timestamp or datetime type",
+ }
+ .fail()?
+ };
+ Ok(ts)
+}
+
/// BinaryFunc is a function that takes two arguments.
/// Also notice this enum doesn't contain function arguments, since the arguments are stored in the expression.
///
diff --git a/src/flow/src/expr/scalar.rs b/src/flow/src/expr/scalar.rs
index 098de9c102e1..53c570e7a6c2 100644
--- a/src/flow/src/expr/scalar.rs
+++ b/src/flow/src/expr/scalar.rs
@@ -26,10 +26,10 @@ use crate::adapter::error::{
};
use crate::expr::error::{EvalError, InvalidArgumentSnafu, OptimizeSnafu};
use crate::expr::func::{BinaryFunc, UnaryFunc, UnmaterializableFunc, VariadicFunc};
-use crate::repr::ColumnType;
+use crate::repr::{ColumnType, RelationType};
/// A scalar expression with a known type.
-#[derive(Debug, Clone)]
+#[derive(Ord, PartialOrd, Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct TypedExpr {
/// The expression.
pub expr: ScalarExpr,
@@ -43,7 +43,73 @@ impl TypedExpr {
}
}
-/// TODO(discord9): add tumble function here
+impl TypedExpr {
+ /// expand multi-value expression to multiple expressions with new indices
+ pub fn expand_multi_value(
+ input_typ: &RelationType,
+ exprs: &[TypedExpr],
+ ) -> Result<Vec<TypedExpr>, Error> {
+ // old indices in mfp, expanded expr
+ let mut ret = vec![];
+ let input_arity = input_typ.column_types.len();
+ for (old_idx, expr) in exprs.iter().enumerate() {
+ if let ScalarExpr::CallUnmaterializable(UnmaterializableFunc::TumbleWindow {
+ ts,
+ window_size,
+ start_time,
+ }) = &expr.expr
+ {
+ let floor = UnaryFunc::TumbleWindowFloor {
+ window_size: *window_size,
+ start_time: *start_time,
+ };
+ let ceil = UnaryFunc::TumbleWindowCeiling {
+ window_size: *window_size,
+ start_time: *start_time,
+ };
+ let floor = ScalarExpr::CallUnary {
+ func: floor,
+ expr: Box::new(ts.expr.clone()),
+ }
+ .with_type(ts.typ.clone());
+ ret.push((None, floor));
+
+ let ceil = ScalarExpr::CallUnary {
+ func: ceil,
+ expr: Box::new(ts.expr.clone()),
+ }
+ .with_type(ts.typ.clone());
+ ret.push((None, ceil));
+ } else {
+ ret.push((Some(input_arity + old_idx), expr.clone()))
+ }
+ }
+
+ // get shuffled index(old_idx -> new_idx)
+ // note index is offset by input_arity because mfp is designed to be first include input columns then intermediate columns
+ let shuffle = ret
+ .iter()
+ .map(|(old_idx, _)| *old_idx) // [Option<opt_idx>]
+ .enumerate()
+ .map(|(new, old)| (old, new + input_arity))
+ .flat_map(|(old, new)| old.map(|o| (o, new)))
+ .chain((0..input_arity).map(|i| (i, i))) // also remember to chain the input columns as not changed
+ .collect::<BTreeMap<_, _>>();
+
+ // shuffle expr's index
+ let exprs = ret
+ .into_iter()
+ .map(|(_, mut expr)| {
+ // invariant: it is expect that no expr will try to refer the column being expanded
+ expr.expr.permute_map(&shuffle)?;
+ Ok(expr)
+ })
+ .collect::<Result<Vec<_>, _>>()?;
+
+ Ok(dbg!(exprs))
+ }
+}
+
/// A scalar expression, which can be evaluated to a value.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum ScalarExpr {
@@ -84,6 +150,10 @@ pub enum ScalarExpr {
}
impl ScalarExpr {
+ pub fn with_type(self, typ: ColumnType) -> TypedExpr {
+ TypedExpr::new(self, typ)
+ }
+
/// try to determine the type of the expression
pub fn typ(&self, context: &[ColumnType]) -> Result<ColumnType, Error> {
match self {
diff --git a/src/flow/src/expr/signature.rs b/src/flow/src/expr/signature.rs
index a7615502a520..d61a60dea5e2 100644
--- a/src/flow/src/expr/signature.rs
+++ b/src/flow/src/expr/signature.rs
@@ -64,4 +64,5 @@ pub enum GenericFn {
// unmaterized func
Now,
CurrentSchema,
+ TumbleWindow,
}
diff --git a/src/flow/src/repr/relation.rs b/src/flow/src/repr/relation.rs
index 9494a013bb75..59edb31616fa 100644
--- a/src/flow/src/repr/relation.rs
+++ b/src/flow/src/repr/relation.rs
@@ -206,6 +206,15 @@ impl RelationType {
self
}
+ /// will also remove time index from keys if it's in keys
+ pub fn with_time_index(mut self, time_index: Option<usize>) -> Self {
+ self.time_index = time_index;
+ for key in &mut self.keys {
+ key.remove_col(time_index.unwrap_or(usize::MAX));
+ }
+ self
+ }
+
/// Computes the number of columns in the relation.
pub fn arity(&self) -> usize {
self.column_types.len()
diff --git a/src/flow/src/transform.rs b/src/flow/src/transform.rs
index 8eca0788e8a1..9fe0b73d3642 100644
--- a/src/flow/src/transform.rs
+++ b/src/flow/src/transform.rs
@@ -130,12 +130,60 @@ pub async fn sql_to_flow_plan(
Ok(flow_plan)
}
+/// register flow-specific functions to the query engine
+pub fn register_function_to_query_engine(engine: &Arc<dyn QueryEngine>) {
+ engine.register_function(Arc::new(TumbleFunction {}));
+}
+
+#[derive(Debug)]
+pub struct TumbleFunction {}
+
+const TUMBLE_NAME: &str = "tumble";
+
+impl std::fmt::Display for TumbleFunction {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ write!(f, "{}", TUMBLE_NAME.to_ascii_uppercase())
+ }
+}
+
+impl common_function::function::Function for TumbleFunction {
+ fn name(&self) -> &str {
+ TUMBLE_NAME
+ }
+
+ fn return_type(&self, _input_types: &[CDT]) -> common_query::error::Result<CDT> {
+ Ok(CDT::datetime_datatype())
+ }
+
+ fn signature(&self) -> common_query::prelude::Signature {
+ common_query::prelude::Signature::variadic_any(common_query::prelude::Volatility::Immutable)
+ }
+
+ fn eval(
+ &self,
+ _func_ctx: common_function::function::FunctionContext,
+ _columns: &[datatypes::prelude::VectorRef],
+ ) -> common_query::error::Result<datatypes::prelude::VectorRef> {
+ UnexpectedSnafu {
+ reason: "Tumbler function is not implemented for datafusion executor",
+ }
+ .fail()
+ .map_err(BoxedError::new)
+ .context(common_query::error::ExecuteSnafu)
+ }
+}
+
#[cfg(test)]
mod test {
use std::sync::Arc;
use catalog::RegisterTableRequest;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, NUMBERS_TABLE_ID};
+ use common_time::{Date, DateTime};
+ use datatypes::prelude::*;
+ use datatypes::schema::Schema;
+ use datatypes::vectors::VectorRef;
+ use itertools::Itertools;
use prost::Message;
use query::parser::QueryLanguageParser;
use query::plan::LogicalPlan;
@@ -144,23 +192,45 @@ mod test {
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use substrait_proto::proto;
use table::table::numbers::{NumbersTable, NUMBERS_TABLE_NAME};
+ use table::test_util::MemTable;
use super::*;
use crate::adapter::node_context::IdToNameMap;
use crate::repr::ColumnType;
pub fn create_test_ctx() -> FlownodeContext {
- let gid = GlobalId::User(0);
- let name = [
- "greptime".to_string(),
- "public".to_string(),
- "numbers".to_string(),
- ];
- let schema = RelationType::new(vec![ColumnType::new(CDT::uint32_datatype(), false)]);
+ let mut schemas = HashMap::new();
let mut tri_map = IdToNameMap::new();
- tri_map.insert(Some(name.clone()), Some(0), gid);
+ {
+ let gid = GlobalId::User(0);
+ let name = [
+ "greptime".to_string(),
+ "public".to_string(),
+ "numbers".to_string(),
+ ];
+ let schema = RelationType::new(vec![ColumnType::new(CDT::uint32_datatype(), false)]);
+
+ tri_map.insert(Some(name.clone()), Some(1024), gid);
+ schemas.insert(gid, schema);
+ }
+
+ {
+ let gid = GlobalId::User(1);
+ let name = [
+ "greptime".to_string(),
+ "public".to_string(),
+ "numbers_with_ts".to_string(),
+ ];
+ let schema = RelationType::new(vec![
+ ColumnType::new(CDT::uint32_datatype(), false),
+ ColumnType::new(CDT::datetime_datatype(), false),
+ ]);
+ schemas.insert(gid, schema);
+ tri_map.insert(Some(name.clone()), Some(1025), gid);
+ }
+
FlownodeContext {
- schema: HashMap::from([(gid, schema)]),
+ schema: schemas,
table_repr: tri_map,
query_context: Some(Arc::new(QueryContext::with("greptime", "public"))),
..Default::default()
@@ -177,9 +247,37 @@ mod test {
table: NumbersTable::table(NUMBERS_TABLE_ID),
};
catalog_list.register_table_sync(req).unwrap();
+
+ let schema = vec![
+ datatypes::schema::ColumnSchema::new("number", CDT::uint32_datatype(), false),
+ datatypes::schema::ColumnSchema::new("ts", CDT::datetime_datatype(), false),
+ ];
+ let mut columns = vec![];
+ let numbers = (1..=10).collect_vec();
+ let column: VectorRef = Arc::new(<u32 as Scalar>::VectorType::from_vec(numbers));
+ columns.push(column);
+
+ let ts = (1..=10).collect_vec();
+ let column: VectorRef = Arc::new(<DateTime as Scalar>::VectorType::from_vec(ts));
+ columns.push(column);
+
+ let schema = Arc::new(Schema::new(schema));
+ let recordbatch = common_recordbatch::RecordBatch::new(schema, columns).unwrap();
+ let table = MemTable::table("numbers_with_ts", recordbatch);
+
+ let req_with_ts = RegisterTableRequest {
+ catalog: DEFAULT_CATALOG_NAME.to_string(),
+ schema: DEFAULT_SCHEMA_NAME.to_string(),
+ table_name: "numbers_with_ts".to_string(),
+ table_id: 1024,
+ table,
+ };
+ catalog_list.register_table_sync(req_with_ts).unwrap();
+
let factory = query::QueryEngineFactory::new(catalog_list, None, None, None, false);
let engine = factory.query_engine();
+ engine.register_function(Arc::new(TumbleFunction {}));
assert_eq!("datafusion", engine.name());
engine
diff --git a/src/flow/src/transform/aggr.rs b/src/flow/src/transform/aggr.rs
index 3f3bf3fb7c9f..d21df2cf6907 100644
--- a/src/flow/src/transform/aggr.rs
+++ b/src/flow/src/transform/aggr.rs
@@ -302,8 +302,26 @@ impl TypedPlan {
return not_impl_err!("Aggregate without an input is not supported");
};
- let group_exprs =
- TypedExpr::from_substrait_agg_grouping(ctx, &agg.groupings, &input.typ, extensions)?;
+ let group_exprs = {
+ let group_exprs = TypedExpr::from_substrait_agg_grouping(
+ ctx,
+ &agg.groupings,
+ &input.typ,
+ extensions,
+ )?;
+
+ TypedExpr::expand_multi_value(&input.typ, &group_exprs)?
+ };
+
+ let time_index = group_exprs.iter().position(|expr| {
+ matches!(
+ &expr.expr,
+ ScalarExpr::CallUnary {
+ func: UnaryFunc::TumbleWindowFloor { .. },
+ expr: _
+ }
+ )
+ });
let (mut aggr_exprs, post_mfp) =
AggregateExpr::from_substrait_agg_measures(ctx, &agg.measures, &input.typ, extensions)?;
@@ -314,6 +332,7 @@ impl TypedPlan {
input.typ.column_types.len(),
)?;
+ // output type is group_exprs + aggr_exprs
let output_type = {
let mut output_types = Vec::new();
// first append group_expr as key, then aggr_expr as value
@@ -332,7 +351,8 @@ impl TypedPlan {
} else {
RelationType::new(output_types).with_key((0..group_exprs.len()).collect_vec())
}
- };
+ }
+ .with_time_index(time_index);
// copy aggr_exprs to full_aggrs, and split them into simple_aggrs and distinct_aggrs
// also set them input/output column
@@ -406,6 +426,7 @@ impl TypedPlan {
#[cfg(test)]
mod test {
+ use common_time::{DateTime, Interval};
use datatypes::prelude::ConcreteDataType;
use pretty_assertions::{assert_eq, assert_ne};
@@ -414,6 +435,106 @@ mod test {
use crate::repr::{self, ColumnType, RelationType};
use crate::transform::test::{create_test_ctx, create_test_query_engine, sql_to_substrait};
+ #[tokio::test]
+ async fn test_tumble_parse() {
+ let engine = create_test_query_engine();
+ let sql = "SELECT sum(number) FROM numbers_with_ts GROUP BY tumble(ts, '1 hour', '2021-07-01 00:00:00')";
+ let plan = sql_to_substrait(engine.clone(), sql).await;
+
+ let mut ctx = create_test_ctx();
+ let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan).unwrap();
+
+ let aggr_expr = AggregateExpr {
+ func: AggregateFunc::SumUInt32,
+ expr: ScalarExpr::Column(0),
+ distinct: false,
+ };
+ let expected = TypedPlan {
+ typ: RelationType::new(vec![
+ ColumnType::new(CDT::uint64_datatype(), true), // sum(number)
+ ColumnType::new(CDT::datetime_datatype(), false), // window start
+ ColumnType::new(CDT::datetime_datatype(), false), // window end
+ ]),
+ // TODO(discord9): mfp indirectly ref to key columns
+ /*
+ .with_key(vec![1])
+ .with_time_index(Some(0)),*/
+ plan: Plan::Mfp {
+ input: Box::new(
+ Plan::Reduce {
+ input: Box::new(
+ Plan::Get {
+ id: crate::expr::Id::Global(GlobalId::User(1)),
+ }
+ .with_types(RelationType::new(vec![
+ ColumnType::new(ConcreteDataType::uint32_datatype(), false),
+ ColumnType::new(ConcreteDataType::datetime_datatype(), false),
+ ])),
+ ),
+ key_val_plan: KeyValPlan {
+ key_plan: MapFilterProject::new(2)
+ .map(vec![
+ ScalarExpr::Column(1).call_unary(
+ UnaryFunc::TumbleWindowFloor {
+ window_size: Interval::from_month_day_nano(
+ 0,
+ 0,
+ 3_600_000_000_000,
+ ),
+ start_time: Some(DateTime::new(1625097600000)),
+ },
+ ),
+ ScalarExpr::Column(1).call_unary(
+ UnaryFunc::TumbleWindowCeiling {
+ window_size: Interval::from_month_day_nano(
+ 0,
+ 0,
+ 3_600_000_000_000,
+ ),
+ start_time: Some(DateTime::new(1625097600000)),
+ },
+ ),
+ ])
+ .unwrap()
+ .project(vec![2, 3])
+ .unwrap()
+ .into_safe(),
+ val_plan: MapFilterProject::new(2)
+ .project(vec![0, 1])
+ .unwrap()
+ .into_safe(),
+ },
+ reduce_plan: ReducePlan::Accumulable(AccumulablePlan {
+ full_aggrs: vec![aggr_expr.clone()],
+ simple_aggrs: vec![AggrWithIndex::new(aggr_expr.clone(), 0, 0)],
+ distinct_aggrs: vec![],
+ }),
+ }
+ .with_types(
+ RelationType::new(vec![
+ ColumnType::new(CDT::datetime_datatype(), false), // window start
+ ColumnType::new(CDT::datetime_datatype(), false), // window end
+ ColumnType::new(CDT::uint64_datatype(), true), //sum(number)
+ ])
+ .with_key(vec![1])
+ .with_time_index(Some(0)),
+ ),
+ ),
+ mfp: MapFilterProject::new(3)
+ .map(vec![
+ ScalarExpr::Column(2),
+ ScalarExpr::Column(3),
+ ScalarExpr::Column(0),
+ ScalarExpr::Column(1),
+ ])
+ .unwrap()
+ .project(vec![4, 5, 6])
+ .unwrap(),
+ },
+ };
+ assert_eq!(flow_plan, expected);
+ }
+
#[tokio::test]
async fn test_avg_group_by() {
let engine = create_test_query_engine();
@@ -514,7 +635,8 @@ mod test {
let plan = sql_to_substrait(engine.clone(), sql).await;
let mut ctx = create_test_ctx();
- let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan);
+
+ let flow_plan = TypedPlan::from_substrait_plan(&mut ctx, &plan).unwrap();
let aggr_exprs = vec![
AggregateExpr {
@@ -587,7 +709,7 @@ mod test {
.unwrap(),
},
};
- assert_eq!(flow_plan.unwrap(), expected);
+ assert_eq!(flow_plan, expected);
}
#[tokio::test]
diff --git a/src/flow/src/transform/expr.rs b/src/flow/src/transform/expr.rs
index 72d5da620d40..7e0dc2df3b62 100644
--- a/src/flow/src/transform/expr.rs
+++ b/src/flow/src/transform/expr.rs
@@ -71,7 +71,7 @@ impl TypedExpr {
),
})?;
let arg_len = f.arguments.len();
- let arg_exprs: Vec<TypedExpr> = f
+ let arg_typed_exprs: Vec<TypedExpr> = f
.arguments
.iter()
.map(|arg| match &arg.arg_type {
@@ -83,7 +83,8 @@ impl TypedExpr {
.try_collect()?;
// literal's type is determined by the function and type of other args
- let (arg_exprs, arg_types): (Vec<_>, Vec<_>) = arg_exprs
+ let (arg_exprs, arg_types): (Vec<_>, Vec<_>) = arg_typed_exprs
+ .clone()
.into_iter()
.map(
|TypedExpr {
@@ -174,7 +175,9 @@ impl TypedExpr {
};
expr.optimize();
Ok(TypedExpr::new(expr, ret_type))
- } else if let Ok(func) = UnmaterializableFunc::from_str(fn_name) {
+ } else if let Ok(func) =
+ UnmaterializableFunc::from_str_args(fn_name, arg_typed_exprs)
+ {
let ret_type = ColumnType::new_nullable(func.signature().output.clone());
Ok(TypedExpr::new(
ScalarExpr::CallUnmaterializable(func),
diff --git a/src/flow/src/transform/plan.rs b/src/flow/src/transform/plan.rs
index 083b4a7a1f05..0dedc9e5356b 100644
--- a/src/flow/src/transform/plan.rs
+++ b/src/flow/src/transform/plan.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::BTreeMap;
+
use itertools::Itertools;
use snafu::OptionExt;
use substrait_proto::proto::expression::MaskExpression;
@@ -22,8 +24,8 @@ use substrait_proto::proto::{plan_rel, Plan as SubPlan, Rel};
use crate::adapter::error::{
Error, InvalidQuerySnafu, NotImplementedSnafu, PlanSnafu, UnexpectedSnafu,
};
-use crate::expr::{MapFilterProject, TypedExpr};
-use crate::plan::{Plan, TypedPlan};
+use crate::expr::{MapFilterProject, ScalarExpr, TypedExpr, UnaryFunc};
+use crate::plan::{KeyValPlan, Plan, ReducePlan, TypedPlan};
use crate::repr::{self, RelationType};
use crate::transform::{substrait_proto, FlownodeContext, FunctionExtensions};
@@ -75,6 +77,7 @@ impl TypedPlan {
} else {
return not_impl_err!("Projection without an input is not supported");
};
+
let mut exprs: Vec<TypedExpr> = vec![];
for e in &p.expressions {
let expr = TypedExpr::from_substrait_rex(e, &input.typ, extensions)?;
@@ -97,6 +100,127 @@ impl TypedPlan {
};
Ok(TypedPlan { typ, plan })
} else {
+ /// if reduce_plan contains the special function like tumble floor/ceiling, add them to the proj_exprs
+ fn rewrite_projection_after_reduce(
+ key_val_plan: KeyValPlan,
+ _reduce_plan: ReducePlan,
+ reduce_output_type: &RelationType,
+ proj_exprs: &mut Vec<TypedExpr>,
+ ) -> Result<(), Error> {
+ // TODO: get keys correctly
+ let key_exprs = key_val_plan
+ .key_plan
+ .projection
+ .clone()
+ .into_iter()
+ .map(|i| {
+ if i < key_val_plan.key_plan.input_arity {
+ ScalarExpr::Column(i)
+ } else {
+ key_val_plan.key_plan.expressions
+ [i - key_val_plan.key_plan.input_arity]
+ .clone()
+ }
+ })
+ .collect_vec();
+ let mut shift_offset = 0;
+ let special_keys = key_exprs
+ .into_iter()
+ .enumerate()
+ .filter(|(_idx, p)| {
+ if matches!(
+ p,
+ ScalarExpr::CallUnary {
+ func: UnaryFunc::TumbleWindowFloor { .. },
+ ..
+ } | ScalarExpr::CallUnary {
+ func: UnaryFunc::TumbleWindowCeiling { .. },
+ ..
+ }
+ ) {
+ if matches!(
+ p,
+ ScalarExpr::CallUnary {
+ func: UnaryFunc::TumbleWindowFloor { .. },
+ ..
+ }
+ ) {
+ shift_offset += 1;
+ }
+ true
+ } else {
+ false
+ }
+ })
+ .collect_vec();
+ let spec_key_arity = special_keys.len();
+ if spec_key_arity == 0 {
+ return Ok(());
+ }
+
+ {
+ // shift proj_exprs to the right by spec_key_arity
+ let max_used_col_in_proj = proj_exprs
+ .iter()
+ .map(|expr| {
+ expr.expr
+ .get_all_ref_columns()
+ .into_iter()
+ .max()
+ .unwrap_or_default()
+ })
+ .max()
+ .unwrap_or_default();
+
+ let shuffle = (0..=max_used_col_in_proj)
+ .map(|col| (col, col + shift_offset))
+ .collect::<BTreeMap<_, _>>();
+ for proj_expr in proj_exprs.iter_mut() {
+ proj_expr.expr.permute_map(&shuffle)?;
+ } // add key to the end
+ for (key_idx, _key_expr) in special_keys {
+ // here we assume the output type of reduce operator is just first keys columns, then append value columns
+ proj_exprs.push(
+ ScalarExpr::Column(key_idx).with_type(
+ reduce_output_type.column_types[key_idx].clone(),
+ ),
+ );
+ }
+ }
+
+ Ok(())
+ }
+
+ match input.plan.clone() {
+ Plan::Reduce {
+ key_val_plan,
+ reduce_plan,
+ ..
+ } => {
+ rewrite_projection_after_reduce(
+ key_val_plan,
+ reduce_plan,
+ &input.typ,
+ &mut exprs,
+ )?;
+ }
+ Plan::Mfp { input, mfp: _ } => {
+ if let Plan::Reduce {
+ key_val_plan,
+ reduce_plan,
+ ..
+ } = input.plan
+ {
+ rewrite_projection_after_reduce(
+ key_val_plan,
+ reduce_plan,
+ &input.typ,
+ &mut exprs,
+ )?;
+ }
+ }
+ _ => (),
+ }
input.projection(exprs)
}
}
|
feat
|
tumble window func (#3968)
|
122bd5f0ab465537c878fbab50e8072db4adcce9
|
2023-05-16 04:58:24
|
Eugene Tolbakov
|
feat(tql): add initial implementation for explain & analyze (#1427)
| false
|
diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs
index d38e0390b159..f0eda492b4ba 100644
--- a/src/datanode/src/instance/sql.rs
+++ b/src/datanode/src/instance/sql.rs
@@ -171,9 +171,7 @@ impl Instance {
) -> Result<Output> {
let query = PromQuery {
query: promql.to_string(),
- start: "0".to_string(),
- end: "0".to_string(),
- step: "5m".to_string(),
+ ..PromQuery::default()
};
let mut stmt = QueryLanguageParser::parse_promql(&query).context(ExecuteSqlSnafu)?;
match &mut stmt {
diff --git a/src/frontend/src/statement/tql.rs b/src/frontend/src/statement/tql.rs
index 0f4a4f50a9e2..9b7f72c821db 100644
--- a/src/frontend/src/statement/tql.rs
+++ b/src/frontend/src/statement/tql.rs
@@ -12,20 +12,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
+
use common_query::Output;
-use query::parser::{PromQuery, QueryLanguageParser};
+use query::parser::{PromQuery, QueryLanguageParser, ANALYZE_NODE_NAME, EXPLAIN_NODE_NAME};
use session::context::QueryContextRef;
use snafu::ResultExt;
use sql::statements::tql::Tql;
-use crate::error::{
- ExecLogicalPlanSnafu, NotSupportedSnafu, ParseQuerySnafu, PlanStatementSnafu, Result,
-};
+use crate::error::{ExecLogicalPlanSnafu, ParseQuerySnafu, PlanStatementSnafu, Result};
use crate::statement::StatementExecutor;
impl StatementExecutor {
pub(super) async fn execute_tql(&self, tql: Tql, query_ctx: QueryContextRef) -> Result<Output> {
- let plan = match tql {
+ let stmt = match tql {
Tql::Eval(eval) => {
let promql = PromQuery {
start: eval.start,
@@ -33,20 +33,39 @@ impl StatementExecutor {
step: eval.step,
query: eval.query,
};
- let stmt = QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?;
- self.query_engine
- .planner()
- .plan(stmt, query_ctx.clone())
- .await
- .context(PlanStatementSnafu)?
+ QueryLanguageParser::parse_promql(&promql).context(ParseQuerySnafu)?
}
- Tql::Explain(_) => {
- return NotSupportedSnafu {
- feat: "TQL EXPLAIN",
- }
- .fail()
+ Tql::Explain(explain) => {
+ let promql = PromQuery {
+ query: explain.query,
+ ..PromQuery::default()
+ };
+ let params = HashMap::from([("name".to_string(), EXPLAIN_NODE_NAME.to_string())]);
+ QueryLanguageParser::parse_promql(&promql)
+ .context(ParseQuerySnafu)?
+ .post_process(params)
+ .unwrap()
+ }
+ Tql::Analyze(tql_analyze) => {
+ let promql = PromQuery {
+ start: tql_analyze.start,
+ end: tql_analyze.end,
+ step: tql_analyze.step,
+ query: tql_analyze.query,
+ };
+ let params = HashMap::from([("name".to_string(), ANALYZE_NODE_NAME.to_string())]);
+ QueryLanguageParser::parse_promql(&promql)
+ .context(ParseQuerySnafu)?
+ .post_process(params)
+ .unwrap()
}
};
+ let plan = self
+ .query_engine
+ .planner()
+ .plan(stmt, query_ctx.clone())
+ .await
+ .context(PlanStatementSnafu)?;
self.query_engine
.execute(plan, query_ctx)
.await
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index 69a79309789a..13743cd2eaf3 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -393,10 +393,30 @@ impl PromPlanner {
.build()
.context(DataFusionPlanningSnafu)?
}
- PromExpr::Extension(_) => UnsupportedExprSnafu {
- name: "Prom Extension",
+ PromExpr::Extension(promql_parser::parser::ast::Extension { expr }) => {
+ let children = expr.children();
+ let plan = self.prom_expr_to_plan(children[0].clone()).await?;
+ // Wrapper for the explanation/analyze of the existing plan
+ // https://docs.rs/datafusion-expr/latest/datafusion_expr/logical_plan/builder/struct.LogicalPlanBuilder.html#method.explain
+ // if `analyze` is true, runs the actual plan and produces
+ // information about metrics during run.
+ // if `verbose` is true, prints out additional details when VERBOSE keyword is specified
+ match expr.name() {
+ "ANALYZE" => LogicalPlanBuilder::from(plan)
+ .explain(false, true)
+ .unwrap()
+ .build()
+ .context(DataFusionPlanningSnafu)?,
+ "EXPLAIN" => LogicalPlanBuilder::from(plan)
+ .explain(false, false)
+ .unwrap()
+ .build()
+ .context(DataFusionPlanningSnafu)?,
+ _ => LogicalPlanBuilder::empty(true)
+ .build()
+ .context(DataFusionPlanningSnafu)?,
+ }
}
- .fail()?,
};
Ok(res)
}
@@ -559,7 +579,7 @@ impl PromPlanner {
Ok(logical_plan)
}
- /// Convert [AggModifier] to [Column] exprs for aggregation.
+ /// Convert [LabelModifier] to [Column] exprs for aggregation.
/// Timestamp column and tag columns will be included.
///
/// # Side effect
diff --git a/src/query/src/error.rs b/src/query/src/error.rs
index 393728d9a959..3661dd7b5da9 100644
--- a/src/query/src/error.rs
+++ b/src/query/src/error.rs
@@ -26,6 +26,12 @@ pub enum Error {
#[snafu(display("Unsupported expr type: {}", name))]
UnsupportedExpr { name: String, location: Location },
+ #[snafu(display("Operation {} not implemented yet", operation))]
+ Unimplemented {
+ operation: String,
+ location: Location,
+ },
+
#[snafu(display("General catalog error: {}", source))]
Catalog {
#[snafu(backtrace)]
@@ -183,6 +189,7 @@ impl ErrorExt for Error {
match self {
QueryParse { .. } | MultipleStatements { .. } => StatusCode::InvalidSyntax,
UnsupportedExpr { .. }
+ | Unimplemented { .. }
| CatalogNotFound { .. }
| SchemaNotFound { .. }
| TableNotFound { .. }
diff --git a/src/query/src/parser.rs b/src/query/src/parser.rs
index 799dd6997b64..49971771e80a 100644
--- a/src/query/src/parser.rs
+++ b/src/query/src/parser.rs
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::any::Any;
+use std::collections::HashMap;
+use std::sync::Arc;
use std::time::{Duration, SystemTime};
use chrono::DateTime;
@@ -19,7 +22,9 @@ use common_error::ext::PlainError;
use common_error::prelude::BoxedError;
use common_error::status_code::StatusCode;
use common_telemetry::timer;
-use promql_parser::parser::EvalStmt;
+use promql_parser::parser::ast::{Extension as NodeExtension, ExtensionExpr};
+use promql_parser::parser::Expr::Extension;
+use promql_parser::parser::{EvalStmt, Expr, ValueType};
use snafu::ResultExt;
use sql::dialect::GenericDialect;
use sql::parser::ParserContext;
@@ -27,10 +32,13 @@ use sql::statements::statement::Statement;
use crate::error::{
MultipleStatementsSnafu, ParseFloatSnafu, ParseTimestampSnafu, QueryParseSnafu, Result,
+ UnimplementedSnafu,
};
use crate::metrics::{METRIC_PARSE_PROMQL_ELAPSED, METRIC_PARSE_SQL_ELAPSED};
const DEFAULT_LOOKBACK: u64 = 5 * 60; // 5m
+pub const EXPLAIN_NODE_NAME: &str = "EXPLAIN";
+pub const ANALYZE_NODE_NAME: &str = "ANALYZE";
#[derive(Debug, Clone)]
pub enum QueryStatement {
@@ -38,6 +46,43 @@ pub enum QueryStatement {
Promql(EvalStmt),
}
+impl QueryStatement {
+ pub fn post_process(&self, params: HashMap<String, String>) -> Result<QueryStatement> {
+ match self {
+ QueryStatement::Sql(_) => UnimplementedSnafu {
+ operation: "sql post process",
+ }
+ .fail(),
+ QueryStatement::Promql(eval_stmt) => {
+ let node_name = match params.get("name") {
+ Some(name) => name.as_str(),
+ None => "",
+ };
+ let extension_node = Self::create_extension_node(node_name, &eval_stmt.expr);
+ Ok(QueryStatement::Promql(EvalStmt {
+ expr: Extension(extension_node.unwrap()),
+ start: eval_stmt.start,
+ end: eval_stmt.end,
+ interval: eval_stmt.interval,
+ lookback_delta: eval_stmt.lookback_delta,
+ }))
+ }
+ }
+ }
+
+ fn create_extension_node(node_name: &str, expr: &Expr) -> Option<NodeExtension> {
+ match node_name {
+ ANALYZE_NODE_NAME => Some(NodeExtension {
+ expr: Arc::new(AnalyzeExpr { expr: expr.clone() }),
+ }),
+ EXPLAIN_NODE_NAME => Some(NodeExtension {
+ expr: Arc::new(ExplainExpr { expr: expr.clone() }),
+ }),
+ _ => None,
+ }
+ }
+}
+
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct PromQuery {
pub query: String,
@@ -46,6 +91,17 @@ pub struct PromQuery {
pub step: String,
}
+impl Default for PromQuery {
+ fn default() -> Self {
+ PromQuery {
+ query: String::new(),
+ start: String::from("0"),
+ end: String::from("0"),
+ step: String::from("5m"),
+ }
+ }
+}
+
pub struct QueryLanguageParser {}
impl QueryLanguageParser {
@@ -66,7 +122,6 @@ impl QueryLanguageParser {
}
}
- // TODO(ruihang): implement this method when parser is ready.
pub fn parse_promql(query: &PromQuery) -> Result<QueryStatement> {
let _timer = timer!(METRIC_PARSE_PROMQL_ELAPSED);
@@ -142,6 +197,51 @@ fn max_system_timestamp() -> SystemTime {
.unwrap()
}
+macro_rules! define_node_ast_extension {
+ ($name:ident, $name_expr:ident, $expr_type:ty, $extension_name:expr) => {
+ /// The implementation of the `$name_expr` extension AST node
+ #[derive(Debug, Clone)]
+ pub struct $name_expr {
+ pub expr: $expr_type,
+ }
+
+ impl ExtensionExpr for $name_expr {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn name(&self) -> &str {
+ $extension_name
+ }
+
+ fn value_type(&self) -> ValueType {
+ self.expr.value_type()
+ }
+
+ fn children(&self) -> &[Expr] {
+ std::slice::from_ref(&self.expr)
+ }
+ }
+
+ #[allow(rustdoc::broken_intra_doc_links)]
+ #[derive(Debug, Clone)]
+ pub struct $name {
+ pub expr: Arc<$name_expr>,
+ }
+
+ impl $name {
+ pub fn new(expr: $expr_type) -> Self {
+ Self {
+ expr: Arc::new($name_expr { expr }),
+ }
+ }
+ }
+ };
+}
+
+define_node_ast_extension!(Analyze, AnalyzeExpr, Expr, ANALYZE_NODE_NAME);
+define_node_ast_extension!(Explain, ExplainExpr, Expr, EXPLAIN_NODE_NAME);
+
#[cfg(test)]
mod test {
use super::*;
diff --git a/src/sql/src/parsers/tql_parser.rs b/src/sql/src/parsers/tql_parser.rs
index 8b82a9082ee9..9ef6d97ea4ba 100644
--- a/src/sql/src/parsers/tql_parser.rs
+++ b/src/sql/src/parsers/tql_parser.rs
@@ -20,7 +20,7 @@ use sqlparser::tokenizer::Token;
use crate::error::{self, Result};
use crate::parser::ParserContext;
use crate::statements::statement::Statement;
-use crate::statements::tql::{Tql, TqlEval, TqlExplain};
+use crate::statements::tql::{Tql, TqlAnalyze, TqlEval, TqlExplain};
pub const TQL: &str = "TQL";
const EVAL: &str = "EVAL";
@@ -31,6 +31,7 @@ use sqlparser::parser::Parser;
/// TQL extension parser, including:
/// - TQL EVAL <query>
/// - TQL EXPLAIN <query>
+/// - TQL ANALYZE <query>
impl<'a> ParserContext<'a> {
pub(crate) fn parse_tql(&mut self) -> Result<Statement> {
self.parser.next_token();
@@ -53,6 +54,11 @@ impl<'a> ParserContext<'a> {
self.parse_tql_explain()
}
+ Keyword::ANALYZE => {
+ self.parser.next_token();
+ self.parse_tql_analyze()
+ .context(error::SyntaxSnafu { sql: self.sql })
+ }
_ => self.unsupported(self.peek_token_as_string()),
}
}
@@ -122,10 +128,39 @@ impl<'a> ParserContext<'a> {
}
fn parse_tql_explain(&mut self) -> Result<Statement> {
- let query = Self::parse_tql_query(&mut self.parser, self.sql, EXPLAIN)
+ let parser = &mut self.parser;
+ let delimiter = match parser.expect_token(&Token::LParen) {
+ Ok(_) => ")",
+ Err(_) => EXPLAIN,
+ };
+ let start = Self::parse_string_or_number(parser, Token::Comma).unwrap_or("0".to_string());
+ let end = Self::parse_string_or_number(parser, Token::Comma).unwrap_or("0".to_string());
+ let step = Self::parse_string_or_number(parser, Token::RParen).unwrap_or("5m".to_string());
+ let query = Self::parse_tql_query(parser, self.sql, delimiter)
.context(error::SyntaxSnafu { sql: self.sql })?;
- Ok(Statement::Tql(Tql::Explain(TqlExplain { query })))
+ Ok(Statement::Tql(Tql::Explain(TqlExplain {
+ query,
+ start,
+ end,
+ step,
+ })))
+ }
+
+ // TODO code reuse from `parse_tql_eval`
+ fn parse_tql_analyze(&mut self) -> std::result::Result<Statement, ParserError> {
+ let parser = &mut self.parser;
+ parser.expect_token(&Token::LParen)?;
+ let start = Self::parse_string_or_number(parser, Token::Comma)?;
+ let end = Self::parse_string_or_number(parser, Token::Comma)?;
+ let step = Self::parse_string_or_number(parser, Token::RParen)?;
+ let query = Self::parse_tql_query(parser, self.sql, ")")?;
+ Ok(Statement::Tql(Tql::Analyze(TqlAnalyze {
+ start,
+ end,
+ step,
+ query,
+ })))
}
}
@@ -135,7 +170,7 @@ mod tests {
use super::*;
#[test]
- fn test_parse_tql() {
+ fn test_parse_tql_eval() {
let sql = "TQL EVAL (1676887657, 1676887659, '1m') http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
let mut result = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
@@ -191,7 +226,10 @@ mod tests {
}
_ => unreachable!(),
}
+ }
+ #[test]
+ fn test_parse_tql_explain() {
let sql = "TQL EXPLAIN http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
let mut result = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
@@ -201,6 +239,42 @@ mod tests {
match statement {
Statement::Tql(Tql::Explain(explain)) => {
assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert_eq!(explain.start, "0");
+ assert_eq!(explain.end, "0");
+ assert_eq!(explain.step, "5m");
+ }
+ _ => unreachable!(),
+ }
+
+ let sql = "TQL EXPLAIN (20,100,10) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+
+ let mut result = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
+ assert_eq!(1, result.len());
+
+ let statement = result.remove(0);
+ match statement {
+ Statement::Tql(Tql::Explain(explain)) => {
+ assert_eq!(explain.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
+ assert_eq!(explain.start, "20");
+ assert_eq!(explain.end, "100");
+ assert_eq!(explain.step, "10");
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ #[test]
+ fn test_parse_tql_analyze() {
+ let sql = "TQL ANALYZE (1676887657.1, 1676887659.5, 30.3) http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m";
+ let mut result = ParserContext::create_with_dialect(sql, &GenericDialect {}).unwrap();
+ assert_eq!(1, result.len());
+ let statement = result.remove(0);
+ match statement {
+ Statement::Tql(Tql::Analyze(analyze)) => {
+ assert_eq!(analyze.start, "1676887657.1");
+ assert_eq!(analyze.end, "1676887659.5");
+ assert_eq!(analyze.step, "30.3");
+ assert_eq!(analyze.query, "http_requests_total{environment=~'staging|testing|development',method!='GET'} @ 1609746000 offset 5m");
}
_ => unreachable!(),
}
diff --git a/src/sql/src/statements/tql.rs b/src/sql/src/statements/tql.rs
index 6967c46fb7ab..d9b7187bf733 100644
--- a/src/sql/src/statements/tql.rs
+++ b/src/sql/src/statements/tql.rs
@@ -15,6 +15,7 @@
pub enum Tql {
Eval(TqlEval),
Explain(TqlExplain),
+ Analyze(TqlAnalyze),
}
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -25,7 +26,20 @@ pub struct TqlEval {
pub query: String,
}
+/// TQL EXPLAIN (like SQL EXPLAIN): doesn't execute the query but tells how the query would be executed.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TqlExplain {
+ pub start: String,
+ pub end: String,
+ pub step: String,
+ pub query: String,
+}
+
+/// TQL ANALYZE (like SQL ANALYZE): executes the plan and tells the detailed per-step execution time.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct TqlAnalyze {
+ pub start: String,
+ pub end: String,
+ pub step: String,
pub query: String,
}
diff --git a/tests-integration/src/tests/promql_test.rs b/tests-integration/src/tests/promql_test.rs
index 9e8fbfb1ad61..61b1f177f010 100644
--- a/tests-integration/src/tests/promql_test.rs
+++ b/tests-integration/src/tests/promql_test.rs
@@ -42,9 +42,7 @@ async fn create_insert_query_assert(
let query = PromQuery {
query: promql.to_string(),
- start: "0".to_string(),
- end: "0".to_string(),
- step: "5m".to_string(),
+ ..PromQuery::default()
};
let QueryStatement::Promql(mut eval_stmt) = QueryLanguageParser::parse_promql(&query).unwrap() else { unreachable!() };
eval_stmt.start = start;
diff --git a/tests/cases/standalone/common/tql/analyze.result b/tests/cases/standalone/common/tql/analyze.result
new file mode 100644
index 000000000000..cfe9239883dd
--- /dev/null
+++ b/tests/cases/standalone/common/tql/analyze.result
@@ -0,0 +1,31 @@
+CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);
+
+Affected Rows: 0
+
+INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
+
+Affected Rows: 3
+
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+TQL ANALYZE (0, 10, '5s') test;
+
++-+-+
+| plan_type_| plan_|
++-+-+
+| Plan with Metrics | CoalescePartitionsExec, REDACTED
+|_|_PromInstantManipulateExec: range=[0..10000], lookback=[300000], interval=[5000], time index=[j], REDACTED
+|_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false], REDACTED
+|_|_PromSeriesDivideExec: tags=["k"], REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_RepartitionExec: partitioning=REDACTED
+|_|_ExecutionPlan(PlaceHolder), REDACTED
+|_|_|
++-+-+
+
+DROP TABLE test;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/common/tql/analyze.sql b/tests/cases/standalone/common/tql/analyze.sql
new file mode 100644
index 000000000000..677e9069bcd9
--- /dev/null
+++ b/tests/cases/standalone/common/tql/analyze.sql
@@ -0,0 +1,13 @@
+CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);
+
+-- insert two points at 1ms and one point at 2ms
+INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
+
+-- analyze at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+-- SQLNESS REPLACE (-+) -
+-- SQLNESS REPLACE (\s\s+) _
+TQL ANALYZE (0, 10, '5s') test;
+
+DROP TABLE test;
diff --git a/tests/cases/standalone/common/tql/explain.result b/tests/cases/standalone/common/tql/explain.result
new file mode 100644
index 000000000000..ee22ed598da1
--- /dev/null
+++ b/tests/cases/standalone/common/tql/explain.result
@@ -0,0 +1,31 @@
+CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);
+
+Affected Rows: 0
+
+INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
+
+Affected Rows: 3
+
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+TQL EXPLAIN (0, 10, '5s') test;
+
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+
+| plan_type | plan |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+
+| logical_plan | PromInstantManipulate: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
+| | PromSeriesNormalize: offset=[0], time index=[j], filter NaN: [false] |
+| | PromSeriesDivide: tags=["k"] |
+| | Sort: test.k DESC NULLS LAST, test.j DESC NULLS LAST |
+| | TableScan: test projection=[i, j, k], partial_filters=[j >= TimestampMillisecond(-300000, None), j <= TimestampMillisecond(300000, None)] |
+| physical_plan | PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j] |
+| | PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false] |
+| | PromSeriesDivideExec: tags=["k"] |
+| | RepartitionExec: partitioning=REDACTED
+| | ExecutionPlan(PlaceHolder) |
+| | |
++---------------+---------------------------------------------------------------------------------------------------------------------------------------------------+
+
+DROP TABLE test;
+
+Affected Rows: 1
+
diff --git a/tests/cases/standalone/common/tql/explain.sql b/tests/cases/standalone/common/tql/explain.sql
new file mode 100644
index 000000000000..448c185c01b7
--- /dev/null
+++ b/tests/cases/standalone/common/tql/explain.sql
@@ -0,0 +1,10 @@
+CREATE TABLE test(i DOUBLE, j TIMESTAMP TIME INDEX, k STRING PRIMARY KEY);
+
+-- insert two points at 1ms and one point at 2ms
+INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
+
+-- explain at 0s, 5s and 10s. No point at 0s.
+-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
+TQL EXPLAIN (0, 10, '5s') test;
+
+DROP TABLE test;
|
feat
|
add initial implementation for explain & analyze (#1427)
|
8a74bd36f5316614a8289076b2719730bde88d01
|
2023-12-12 15:15:09
|
Ruihang Xia
|
style: rename `*Adaptor` to `*Adapter` (#2914)
| false
|
diff --git a/src/catalog/src/information_schema.rs b/src/catalog/src/information_schema.rs
index ec118f942cad..484529cd57a4 100644
--- a/src/catalog/src/information_schema.rs
+++ b/src/catalog/src/information_schema.rs
@@ -20,7 +20,7 @@ use std::sync::{Arc, Weak};
use common_catalog::consts::INFORMATION_SCHEMA_NAME;
use common_error::ext::BoxedError;
-use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
+use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use datatypes::schema::SchemaRef;
use futures_util::StreamExt;
use snafu::ResultExt;
@@ -171,7 +171,7 @@ impl DataSource for InformationTableDataSource {
None => batch,
});
- let stream = RecordBatchStreamAdaptor {
+ let stream = RecordBatchStreamWrapper {
schema: projected_schema,
stream: Box::pin(stream),
output_ordering: None,
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index ebbb4fa60a44..4060cc4797fc 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -27,7 +27,7 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_query::Output;
use common_recordbatch::error::ExternalSnafu;
-use common_recordbatch::RecordBatchStreamAdaptor;
+use common_recordbatch::RecordBatchStreamWrapper;
use common_telemetry::logging;
use common_telemetry::tracing_context::W3cTrace;
use futures_util::StreamExt;
@@ -315,7 +315,7 @@ impl Database {
yield Ok(record_batch);
}
}));
- let record_batch_stream = RecordBatchStreamAdaptor {
+ let record_batch_stream = RecordBatchStreamWrapper {
schema,
stream,
output_ordering: None,
diff --git a/src/client/src/region.rs b/src/client/src/region.rs
index 95bef40b2ac2..3967c23ed047 100644
--- a/src/client/src/region.rs
+++ b/src/client/src/region.rs
@@ -23,7 +23,7 @@ use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_meta::datanode_manager::{AffectedRows, Datanode};
use common_meta::error::{self as meta_error, Result as MetaResult};
use common_recordbatch::error::ExternalSnafu;
-use common_recordbatch::{RecordBatchStreamAdaptor, SendableRecordBatchStream};
+use common_recordbatch::{RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::error;
use prost::Message;
use snafu::{location, Location, OptionExt, ResultExt};
@@ -136,7 +136,7 @@ impl RegionRequester {
yield Ok(record_batch);
}
}));
- let record_batch_stream = RecordBatchStreamAdaptor {
+ let record_batch_stream = RecordBatchStreamWrapper {
schema,
stream,
output_ordering: None,
diff --git a/src/common/recordbatch/src/lib.rs b/src/common/recordbatch/src/lib.rs
index 0e34dc4cff04..1b0d33f915a2 100644
--- a/src/common/recordbatch/src/lib.rs
+++ b/src/common/recordbatch/src/lib.rs
@@ -202,16 +202,16 @@ impl Stream for SimpleRecordBatchStream {
}
/// Adapt a [Stream] of [RecordBatch] to a [RecordBatchStream].
-pub struct RecordBatchStreamAdaptor<S> {
+pub struct RecordBatchStreamWrapper<S> {
pub schema: SchemaRef,
pub stream: S,
pub output_ordering: Option<Vec<OrderOption>>,
}
-impl<S> RecordBatchStreamAdaptor<S> {
- /// Creates a RecordBatchStreamAdaptor without output ordering requirement.
- pub fn new(schema: SchemaRef, stream: S) -> RecordBatchStreamAdaptor<S> {
- RecordBatchStreamAdaptor {
+impl<S> RecordBatchStreamWrapper<S> {
+ /// Creates a [RecordBatchStreamWrapper] without output ordering requirement.
+ pub fn new(schema: SchemaRef, stream: S) -> RecordBatchStreamWrapper<S> {
+ RecordBatchStreamWrapper {
schema,
stream,
output_ordering: None,
@@ -220,7 +220,7 @@ impl<S> RecordBatchStreamAdaptor<S> {
}
impl<S: Stream<Item = Result<RecordBatch>> + Unpin> RecordBatchStream
- for RecordBatchStreamAdaptor<S>
+ for RecordBatchStreamWrapper<S>
{
fn schema(&self) -> SchemaRef {
self.schema.clone()
@@ -231,7 +231,7 @@ impl<S: Stream<Item = Result<RecordBatch>> + Unpin> RecordBatchStream
}
}
-impl<S: Stream<Item = Result<RecordBatch>> + Unpin> Stream for RecordBatchStreamAdaptor<S> {
+impl<S: Stream<Item = Result<RecordBatch>> + Unpin> Stream for RecordBatchStreamWrapper<S> {
type Item = Result<RecordBatch>;
fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 5831a9ab3215..712bbce0b6f2 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -25,8 +25,8 @@ use servers::metrics_handler::MetricsHandler;
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
use servers::opentsdb::OpentsdbServer;
use servers::postgres::PostgresServer;
-use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
-use servers::query_handler::sql::ServerSqlQueryHandlerAdaptor;
+use servers::query_handler::grpc::ServerGrpcQueryHandlerAdapter;
+use servers::query_handler::sql::ServerSqlQueryHandlerAdapter;
use servers::server::{Server, ServerHandler, ServerHandlers};
use snafu::ResultExt;
@@ -70,7 +70,7 @@ impl Services {
};
let grpc_server = GrpcServer::new(
Some(grpc_config),
- Some(ServerGrpcQueryHandlerAdaptor::arc(instance.clone())),
+ Some(ServerGrpcQueryHandlerAdapter::arc(instance.clone())),
Some(instance.clone()),
None,
None,
@@ -88,8 +88,8 @@ impl Services {
let mut http_server_builder = HttpServerBuilder::new(http_options.clone());
let _ = http_server_builder
- .with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(instance.clone()))
- .with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(instance.clone()));
+ .with_sql_handler(ServerSqlQueryHandlerAdapter::arc(instance.clone()))
+ .with_grpc_handler(ServerGrpcQueryHandlerAdapter::arc(instance.clone()));
if let Some(user_provider) = user_provider.clone() {
let _ = http_server_builder.with_user_provider(user_provider);
@@ -137,7 +137,7 @@ impl Services {
let mysql_server = MysqlServer::create_server(
mysql_io_runtime,
Arc::new(MysqlSpawnRef::new(
- ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
+ ServerSqlQueryHandlerAdapter::arc(instance.clone()),
user_provider.clone(),
)),
Arc::new(MysqlSpawnConfig::new(
@@ -167,7 +167,7 @@ impl Services {
);
let pg_server = Box::new(PostgresServer::new(
- ServerSqlQueryHandlerAdaptor::arc(instance.clone()),
+ ServerSqlQueryHandlerAdapter::arc(instance.clone()),
opts.tls.clone(),
pg_io_runtime,
user_provider.clone(),
diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs
index 31956a0c2d22..f963568cad59 100644
--- a/src/mito2/src/read/seq_scan.rs
+++ b/src/mito2/src/read/seq_scan.rs
@@ -20,7 +20,7 @@ use std::time::{Duration, Instant};
use async_stream::try_stream;
use common_error::ext::BoxedError;
use common_recordbatch::error::ExternalSnafu;
-use common_recordbatch::{RecordBatch, RecordBatchStreamAdaptor, SendableRecordBatchStream};
+use common_recordbatch::{RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream};
use common_telemetry::{debug, error};
use common_time::range::TimestampRange;
use snafu::ResultExt;
@@ -164,7 +164,7 @@ impl SeqScan {
// Update metrics.
READ_STAGE_ELAPSED.with_label_values(&["total"]).observe(metrics.scan_cost.as_secs_f64());
};
- let stream = Box::pin(RecordBatchStreamAdaptor::new(
+ let stream = Box::pin(RecordBatchStreamWrapper::new(
self.mapper.output_schema(),
Box::pin(stream),
));
diff --git a/src/query/src/dist_plan/merge_scan.rs b/src/query/src/dist_plan/merge_scan.rs
index 845e7b813d6c..80f1492d459e 100644
--- a/src/query/src/dist_plan/merge_scan.rs
+++ b/src/query/src/dist_plan/merge_scan.rs
@@ -25,7 +25,7 @@ use common_query::physical_plan::TaskContext;
use common_recordbatch::adapter::DfRecordBatchStreamAdapter;
use common_recordbatch::error::ExternalSnafu;
use common_recordbatch::{
- DfSendableRecordBatchStream, RecordBatch, RecordBatchStreamAdaptor, SendableRecordBatchStream,
+ DfSendableRecordBatchStream, RecordBatch, RecordBatchStreamWrapper, SendableRecordBatchStream,
};
use common_telemetry::tracing;
use common_telemetry::tracing_context::TracingContext;
@@ -217,7 +217,7 @@ impl MergeScanExec {
}
}));
- Ok(Box::pin(RecordBatchStreamAdaptor {
+ Ok(Box::pin(RecordBatchStreamWrapper {
schema: self.schema.clone(),
stream,
output_ordering: None,
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 9fdbad5237a9..864ed652ca00 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -802,8 +802,8 @@ mod test {
use super::*;
use crate::error::Error;
- use crate::query_handler::grpc::{GrpcQueryHandler, ServerGrpcQueryHandlerAdaptor};
- use crate::query_handler::sql::{ServerSqlQueryHandlerAdaptor, SqlQueryHandler};
+ use crate::query_handler::grpc::{GrpcQueryHandler, ServerGrpcQueryHandlerAdapter};
+ use crate::query_handler::sql::{ServerSqlQueryHandlerAdapter, SqlQueryHandler};
struct DummyInstance {
_tx: mpsc::Sender<(String, Vec<u8>)>,
@@ -869,8 +869,8 @@ mod test {
fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
let instance = Arc::new(DummyInstance { _tx: tx });
- let sql_instance = ServerSqlQueryHandlerAdaptor::arc(instance.clone());
- let grpc_instance = ServerGrpcQueryHandlerAdaptor::arc(instance);
+ let sql_instance = ServerSqlQueryHandlerAdapter::arc(instance.clone());
+ let grpc_instance = ServerGrpcQueryHandlerAdapter::arc(instance);
let server = HttpServerBuilder::new(HttpOptions::default())
.with_sql_handler(sql_instance)
.with_grpc_handler(grpc_instance)
diff --git a/src/servers/src/query_handler/grpc.rs b/src/servers/src/query_handler/grpc.rs
index ce99c31dde63..01464012d647 100644
--- a/src/servers/src/query_handler/grpc.rs
+++ b/src/servers/src/query_handler/grpc.rs
@@ -37,16 +37,16 @@ pub trait GrpcQueryHandler {
) -> std::result::Result<Output, Self::Error>;
}
-pub struct ServerGrpcQueryHandlerAdaptor<E>(GrpcQueryHandlerRef<E>);
+pub struct ServerGrpcQueryHandlerAdapter<E>(GrpcQueryHandlerRef<E>);
-impl<E> ServerGrpcQueryHandlerAdaptor<E> {
+impl<E> ServerGrpcQueryHandlerAdapter<E> {
pub fn arc(handler: GrpcQueryHandlerRef<E>) -> Arc<Self> {
Arc::new(Self(handler))
}
}
#[async_trait]
-impl<E> GrpcQueryHandler for ServerGrpcQueryHandlerAdaptor<E>
+impl<E> GrpcQueryHandler for ServerGrpcQueryHandlerAdapter<E>
where
E: ErrorExt + Send + Sync + 'static,
{
diff --git a/src/servers/src/query_handler/sql.rs b/src/servers/src/query_handler/sql.rs
index cc788ad27213..79e63f86e7a7 100644
--- a/src/servers/src/query_handler/sql.rs
+++ b/src/servers/src/query_handler/sql.rs
@@ -64,16 +64,16 @@ pub trait SqlQueryHandler {
) -> std::result::Result<bool, Self::Error>;
}
-pub struct ServerSqlQueryHandlerAdaptor<E>(SqlQueryHandlerRef<E>);
+pub struct ServerSqlQueryHandlerAdapter<E>(SqlQueryHandlerRef<E>);
-impl<E> ServerSqlQueryHandlerAdaptor<E> {
+impl<E> ServerSqlQueryHandlerAdapter<E> {
pub fn arc(handler: SqlQueryHandlerRef<E>) -> Arc<Self> {
Arc::new(Self(handler))
}
}
#[async_trait]
-impl<E> SqlQueryHandler for ServerSqlQueryHandlerAdaptor<E>
+impl<E> SqlQueryHandler for ServerSqlQueryHandlerAdapter<E>
where
E: ErrorExt + Send + Sync + 'static,
{
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index ad69c40ada33..6bb91b89eb98 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -45,8 +45,8 @@ use servers::http::{HttpOptions, HttpServerBuilder};
use servers::metrics_handler::MetricsHandler;
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
use servers::postgres::PostgresServer;
-use servers::query_handler::grpc::ServerGrpcQueryHandlerAdaptor;
-use servers::query_handler::sql::{ServerSqlQueryHandlerAdaptor, SqlQueryHandler};
+use servers::query_handler::grpc::ServerGrpcQueryHandlerAdapter;
+use servers::query_handler::sql::{ServerSqlQueryHandlerAdapter, SqlQueryHandler};
use servers::server::Server;
use servers::Mode;
use session::context::QueryContext;
@@ -378,8 +378,8 @@ pub async fn setup_test_http_app(store_type: StorageType, name: &str) -> (Router
..Default::default()
};
let http_server = HttpServerBuilder::new(http_opts)
- .with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(instance.instance.clone()))
- .with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(
+ .with_sql_handler(ServerSqlQueryHandlerAdapter::arc(instance.instance.clone()))
+ .with_grpc_handler(ServerGrpcQueryHandlerAdapter::arc(
instance.instance.clone(),
))
.with_metrics_handler(MetricsHandler)
@@ -412,8 +412,8 @@ pub async fn setup_test_http_app_with_frontend_and_user_provider(
let mut http_server = HttpServerBuilder::new(http_opts);
http_server
- .with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(instance.instance.clone()))
- .with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(
+ .with_sql_handler(ServerSqlQueryHandlerAdapter::arc(instance.instance.clone()))
+ .with_grpc_handler(ServerGrpcQueryHandlerAdapter::arc(
instance.instance.clone(),
))
.with_script_handler(instance.instance.clone())
@@ -449,8 +449,8 @@ pub async fn setup_test_prom_app_with_frontend(
};
let frontend_ref = instance.instance.clone();
let http_server = HttpServerBuilder::new(http_opts)
- .with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(frontend_ref.clone()))
- .with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(frontend_ref.clone()))
+ .with_sql_handler(ServerSqlQueryHandlerAdapter::arc(frontend_ref.clone()))
+ .with_grpc_handler(ServerGrpcQueryHandlerAdapter::arc(frontend_ref.clone()))
.with_script_handler(frontend_ref.clone())
.with_prom_handler(frontend_ref.clone())
.with_prometheus_handler(frontend_ref)
@@ -493,14 +493,14 @@ pub async fn setup_grpc_server_with(
let fe_instance_ref = instance.instance.clone();
let flight_handler = Arc::new(GreptimeRequestHandler::new(
- ServerGrpcQueryHandlerAdaptor::arc(fe_instance_ref.clone()),
+ ServerGrpcQueryHandlerAdapter::arc(fe_instance_ref.clone()),
user_provider.clone(),
runtime.clone(),
));
let fe_grpc_server = Arc::new(GrpcServer::new(
grpc_config,
- Some(ServerGrpcQueryHandlerAdaptor::arc(fe_instance_ref.clone())),
+ Some(ServerGrpcQueryHandlerAdapter::arc(fe_instance_ref.clone())),
Some(fe_instance_ref.clone()),
Some(flight_handler),
None,
@@ -563,7 +563,7 @@ pub async fn setup_mysql_server_with_user_provider(
let fe_mysql_server = Arc::new(MysqlServer::create_server(
runtime,
Arc::new(MysqlSpawnRef::new(
- ServerSqlQueryHandlerAdaptor::arc(fe_instance_ref),
+ ServerSqlQueryHandlerAdapter::arc(fe_instance_ref),
user_provider,
)),
Arc::new(MysqlSpawnConfig::new(
@@ -615,7 +615,7 @@ pub async fn setup_pg_server_with_user_provider(
..Default::default()
};
let fe_pg_server = Arc::new(Box::new(PostgresServer::new(
- ServerSqlQueryHandlerAdaptor::arc(fe_instance_ref),
+ ServerSqlQueryHandlerAdapter::arc(fe_instance_ref),
opts.tls.clone(),
runtime,
user_provider,
|
style
|
rename `*Adaptor` to `*Adapter` (#2914)
|
93d9f48dd7f7ec206085194cf0c30327de0447b5
|
2024-02-23 12:00:09
|
dependabot[bot]
|
build(deps): bump libgit2-sys from 0.16.1+1.7.1 to 0.16.2+1.7.2 (#3367)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index d5790c499c58..84d28103f312 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4695,9 +4695,9 @@ checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
[[package]]
name = "libgit2-sys"
-version = "0.16.1+1.7.1"
+version = "0.16.2+1.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2a2bb3680b094add03bb3732ec520ece34da31a8cd2d633d1389d0f0fb60d0c"
+checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8"
dependencies = [
"cc",
"libc",
|
build
|
bump libgit2-sys from 0.16.1+1.7.1 to 0.16.2+1.7.2 (#3367)
|
9dd6e033a72b42d9f2da7bb7dbd479aec755c432
|
2024-05-28 08:51:43
|
tison
|
refactor: move Database to client crate behind testing feature (#4059)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 62261bce1688..aeaec198f71f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -904,7 +904,6 @@ dependencies = [
"rskafka",
"serde",
"store-api",
- "tests-integration",
"tokio",
"toml 0.8.12",
"uuid",
@@ -9631,7 +9630,6 @@ dependencies = [
"strum 0.25.0",
"table",
"tempfile",
- "tests-integration",
"tikv-jemalloc-ctl",
"tokio",
"tokio-postgres",
@@ -9996,7 +9994,6 @@ dependencies = [
"serde_json",
"sqlness",
"tempfile",
- "tests-integration",
"tinytemplate",
"tokio",
]
diff --git a/Cargo.toml b/Cargo.toml
index 8b02b3f0ce96..aa10d987d274 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -233,8 +233,6 @@ sql = { path = "src/sql" }
store-api = { path = "src/store-api" }
substrait = { path = "src/common/substrait" }
table = { path = "src/table" }
-# TODO some code depends on this
-tests-integration = { path = "tests-integration" }
[workspace.dependencies.meter-macros]
git = "https://github.com/GreptimeTeam/greptime-meter.git"
diff --git a/benchmarks/Cargo.toml b/benchmarks/Cargo.toml
index ed7f038596e3..dc5bf8ba9727 100644
--- a/benchmarks/Cargo.toml
+++ b/benchmarks/Cargo.toml
@@ -12,7 +12,7 @@ api.workspace = true
arrow.workspace = true
chrono.workspace = true
clap.workspace = true
-client.workspace = true
+client = { workspace = true, features = ["testing"] }
common-base.workspace = true
common-telemetry.workspace = true
common-wal.workspace = true
@@ -33,8 +33,6 @@ rand.workspace = true
rskafka.workspace = true
serde.workspace = true
store-api.workspace = true
-# TODO depend `Database` client
-tests-integration.workspace = true
tokio.workspace = true
toml.workspace = true
uuid.workspace = true
diff --git a/tests-integration/src/database.rs b/src/client/src/database.rs
similarity index 80%
rename from tests-integration/src/database.rs
rename to src/client/src/database.rs
index a378c68ea423..e310a73e584d 100644
--- a/tests-integration/src/database.rs
+++ b/src/client/src/database.rs
@@ -23,8 +23,6 @@ use api::v1::{
};
use arrow_flight::Ticket;
use async_stream::stream;
-use client::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
-use client::{from_grpc_response, Client, Result};
use common_error::ext::{BoxedError, ErrorExt};
use common_grpc::flight::{FlightDecoder, FlightMessage};
use common_query::Output;
@@ -37,7 +35,8 @@ use prost::Message;
use snafu::{ensure, ResultExt};
use tonic::transport::Channel;
-pub const DEFAULT_LOOKBACK_STRING: &str = "5m";
+use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu};
+use crate::{from_grpc_response, Client, Result};
#[derive(Clone, Debug, Default)]
pub struct Database {
@@ -105,10 +104,18 @@ impl Database {
self.catalog = catalog.into();
}
+ pub fn catalog(&self) -> &String {
+ &self.catalog
+ }
+
pub fn set_schema(&mut self, schema: impl Into<String>) {
self.schema = schema.into();
}
+ pub fn schema(&self) -> &String {
+ &self.schema
+ }
+
pub fn set_timezone(&mut self, timezone: impl Into<String>) {
self.timezone = timezone.into();
}
@@ -156,6 +163,13 @@ impl Database {
.await
}
+ pub async fn logical_plan(&self, logical_plan: Vec<u8>) -> Result<Output> {
+ self.do_get(Request::Query(QueryRequest {
+ query: Some(Query::LogicalPlan(logical_plan)),
+ }))
+ .await
+ }
+
pub async fn create(&self, expr: CreateTableExpr) -> Result<Output> {
self.do_get(Request::Ddl(DdlRequest {
expr: Some(DdlExpr::CreateTable(expr)),
@@ -269,17 +283,12 @@ struct FlightContext {
#[cfg(test)]
mod tests {
+ use std::assert_matches::assert_matches;
+
use api::v1::auth_header::AuthScheme;
use api::v1::{AuthHeader, Basic};
- use clap::Parser;
- use client::Client;
- use cmd::error::Result as CmdResult;
- use cmd::options::GlobalOptions;
- use cmd::{cli, standalone, App};
- use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use common_telemetry::logging::LoggingOptions;
- use super::{Database, FlightContext};
+ use super::*;
#[test]
fn test_flight_ctx() {
@@ -295,76 +304,11 @@ mod tests {
auth_scheme: Some(basic),
});
- assert!(matches!(
+ assert_matches!(
ctx.auth_header,
Some(AuthHeader {
auth_scheme: Some(AuthScheme::Basic(_)),
})
- ))
- }
-
- #[tokio::test(flavor = "multi_thread")]
- async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
- let output_dir = tempfile::tempdir().unwrap();
-
- let standalone = standalone::Command::parse_from([
- "standalone",
- "start",
- "--data-home",
- &*output_dir.path().to_string_lossy(),
- ]);
-
- let standalone_opts = standalone.load_options(&GlobalOptions::default()).unwrap();
- let mut instance = standalone.build(standalone_opts).await?;
- instance.start().await?;
-
- let client = Client::with_urls(["127.0.0.1:4001"]);
- let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
- database
- .sql(r#"CREATE DATABASE "cli.export.create_table";"#)
- .await
- .unwrap();
- database
- .sql(
- r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
- ts TIMESTAMP,
- TIME INDEX (ts)
- ) engine=mito;
- "#,
- )
- .await
- .unwrap();
-
- let output_dir = tempfile::tempdir().unwrap();
- let cli = cli::Command::parse_from([
- "cli",
- "export",
- "--addr",
- "127.0.0.1:4000",
- "--output-dir",
- &*output_dir.path().to_string_lossy(),
- "--target",
- "create-table",
- ]);
- let mut cli_app = cli.build(LoggingOptions::default()).await?;
- cli_app.start().await?;
-
- instance.stop().await?;
-
- let output_file = output_dir
- .path()
- .join("greptime-cli.export.create_table.sql");
- let res = std::fs::read_to_string(output_file).unwrap();
- let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
- "ts" TIMESTAMP(3) NOT NULL,
- TIME INDEX ("ts")
-)
-
-ENGINE=mito
-;
-"#;
- assert_eq!(res.trim(), expect.trim());
-
- Ok(())
+ )
}
}
diff --git a/src/client/src/lib.rs b/src/client/src/lib.rs
index be8346faf7b0..0741c8e1c7a6 100644
--- a/src/client/src/lib.rs
+++ b/src/client/src/lib.rs
@@ -12,8 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#![feature(assert_matches)]
+
mod client;
pub mod client_manager;
+#[cfg(feature = "testing")]
+mod database;
pub mod error;
pub mod load_balance;
mod metrics;
@@ -29,6 +33,8 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream};
use snafu::OptionExt;
pub use self::client::Client;
+#[cfg(feature = "testing")]
+pub use self::database::Database;
pub use self::error::{Error, Result};
use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu};
diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml
index c1a7dcdaec9c..9bc3d77564ad 100644
--- a/src/cmd/Cargo.toml
+++ b/src/cmd/Cargo.toml
@@ -80,6 +80,7 @@ tracing-appender = "0.2"
tikv-jemallocator = "0.5"
[dev-dependencies]
+client = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
serde.workspace = true
temp-env = "0.3"
diff --git a/src/cmd/src/cli.rs b/src/cmd/src/cli.rs
index 3df64b91ebb5..fc443a169d2b 100644
--- a/src/cmd/src/cli.rs
+++ b/src/cmd/src/cli.rs
@@ -22,8 +22,8 @@ mod helper;
// Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373
#[allow(unused)]
-// mod repl;
-// TODO(weny): Removes it
+mod repl;
+// TODO(tisonkun): migrate deprecated methods
#[allow(deprecated)]
mod upgrade;
@@ -31,8 +31,8 @@ use async_trait::async_trait;
use bench::BenchTableMetadataCommand;
use clap::Parser;
use common_telemetry::logging::{LoggingOptions, TracingOptions};
+pub use repl::Repl;
use tracing_appender::non_blocking::WorkerGuard;
-// pub use repl::Repl;
use upgrade::UpgradeCommand;
use self::export::ExportCommand;
diff --git a/src/cmd/src/cli/export.rs b/src/cmd/src/cli/export.rs
index b83ac16bfefe..00d916407458 100644
--- a/src/cmd/src/cli/export.rs
+++ b/src/cmd/src/cli/export.rs
@@ -434,3 +434,80 @@ fn split_database(database: &str) -> Result<(String, Option<String>)> {
Ok((catalog.to_string(), Some(schema.to_string())))
}
}
+
+#[cfg(test)]
+mod tests {
+ use clap::Parser;
+ use client::{Client, Database};
+ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+ use common_telemetry::logging::LoggingOptions;
+
+ use crate::error::Result as CmdResult;
+ use crate::options::GlobalOptions;
+ use crate::{cli, standalone, App};
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_export_create_table_with_quoted_names() -> CmdResult<()> {
+ let output_dir = tempfile::tempdir().unwrap();
+
+ let standalone = standalone::Command::parse_from([
+ "standalone",
+ "start",
+ "--data-home",
+ &*output_dir.path().to_string_lossy(),
+ ]);
+
+ let standalone_opts = standalone.load_options(&GlobalOptions::default()).unwrap();
+ let mut instance = standalone.build(standalone_opts).await?;
+ instance.start().await?;
+
+ let client = Client::with_urls(["127.0.0.1:4001"]);
+ let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client);
+ database
+ .sql(r#"CREATE DATABASE "cli.export.create_table";"#)
+ .await
+ .unwrap();
+ database
+ .sql(
+ r#"CREATE TABLE "cli.export.create_table"."a.b.c"(
+ ts TIMESTAMP,
+ TIME INDEX (ts)
+ ) engine=mito;
+ "#,
+ )
+ .await
+ .unwrap();
+
+ let output_dir = tempfile::tempdir().unwrap();
+ let cli = cli::Command::parse_from([
+ "cli",
+ "export",
+ "--addr",
+ "127.0.0.1:4000",
+ "--output-dir",
+ &*output_dir.path().to_string_lossy(),
+ "--target",
+ "create-table",
+ ]);
+ let mut cli_app = cli.build(LoggingOptions::default()).await?;
+ cli_app.start().await?;
+
+ instance.stop().await?;
+
+ let output_file = output_dir
+ .path()
+ .join("greptime-cli.export.create_table.sql");
+ let res = std::fs::read_to_string(output_file).unwrap();
+ let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" (
+ "ts" TIMESTAMP(3) NOT NULL,
+ TIME INDEX ("ts")
+)
+
+ENGINE=mito
+;
+"#;
+ assert_eq!(res.trim(), expect.trim());
+
+ Ok(())
+ }
+}
diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs
index 6759a923fc43..a9e2e21967f9 100644
--- a/src/cmd/src/cli/repl.rs
+++ b/src/cmd/src/cli/repl.rs
@@ -16,14 +16,18 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
+use cache::{
+ build_fundamental_cache_registry, with_default_composite_cache_registry, TABLE_CACHE_NAME,
+ TABLE_ROUTE_CACHE_NAME,
+};
use catalog::kvbackend::{
- CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager,
+ CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend,
};
-use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_base::Plugins;
use common_config::Mode;
use common_error::ext::ErrorExt;
-use common_meta::cache_invalidator::MultiCacheInvalidator;
+use common_meta::cache::{CacheRegistryBuilder, LayeredCacheRegistryBuilder};
use common_query::Output;
use common_recordbatch::RecordBatches;
use common_telemetry::debug;
@@ -38,12 +42,13 @@ use query::QueryEngine;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use session::context::QueryContext;
-use snafu::ResultExt;
+use snafu::{OptionExt, ResultExt};
use substrait::{DFLogicalSubstraitConvertor, SubstraitPlan};
use crate::cli::cmd::ReplCommand;
use crate::cli::helper::RustylineHelper;
use crate::cli::AttachCommand;
+use crate::error;
use crate::error::{
CollectRecordBatchesSnafu, ParseSqlSnafu, PlanStatementSnafu, PrettyPrintRecordBatchesSnafu,
ReadlineSnafu, ReplCreationSnafu, RequestDatabaseSnafu, Result, StartMetaClientSnafu,
@@ -257,19 +262,42 @@ async fn create_query_engine(meta_addr: &str) -> Result<DatafusionQueryEngine> {
let cached_meta_backend =
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
- let multi_cache_invalidator = Arc::new(MultiCacheInvalidator::with_invalidators(vec![
- cached_meta_backend.clone(),
- ]));
- let catalog_list = KvBackendCatalogManager::new(
+ let layered_cache_builder = LayeredCacheRegistryBuilder::default().add_cache_registry(
+ CacheRegistryBuilder::default()
+ .add_cache(cached_meta_backend.clone())
+ .build(),
+ );
+ let fundamental_cache_registry =
+ build_fundamental_cache_registry(Arc::new(MetaKvBackend::new(meta_client.clone())));
+ let layered_cache_registry = Arc::new(
+ with_default_composite_cache_registry(
+ layered_cache_builder.add_cache_registry(fundamental_cache_registry),
+ )
+ .context(error::BuildCacheRegistrySnafu)?
+ .build(),
+ );
+
+ let table_cache = layered_cache_registry
+ .get()
+ .context(error::CacheRequiredSnafu {
+ name: TABLE_CACHE_NAME,
+ })?;
+ let table_route_cache = layered_cache_registry
+ .get()
+ .context(error::CacheRequiredSnafu {
+ name: TABLE_ROUTE_CACHE_NAME,
+ })?;
+ let catalog_manager = KvBackendCatalogManager::new(
Mode::Distributed,
Some(meta_client.clone()),
cached_meta_backend.clone(),
- multi_cache_invalidator,
+ table_cache,
+ table_route_cache,
)
.await;
let plugins: Plugins = Default::default();
let state = Arc::new(QueryEngineState::new(
- catalog_list,
+ catalog_manager,
None,
None,
None,
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index 0e1fec26dfa4..a2a880fa6c1d 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -163,6 +163,15 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to request database, sql: {sql}"))]
+ RequestDatabase {
+ sql: String,
+ #[snafu(source)]
+ source: client::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Failed to collect RecordBatches"))]
CollectRecordBatches {
#[snafu(implicit)]
@@ -354,6 +363,7 @@ impl ErrorExt for Error {
Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => {
StatusCode::Internal
}
+ Error::RequestDatabase { source, .. } => source.status_code(),
Error::CollectRecordBatches { source, .. }
| Error::PrettyPrintRecordBatches { source, .. } => source.status_code(),
Error::StartMetaClient { source, .. } => source.status_code(),
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index b355539ea713..369476e6e5ba 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -111,7 +111,7 @@ tikv-jemalloc-ctl = { version = "0.5", features = ["use_std"] }
[dev-dependencies]
auth = { workspace = true, features = ["testing"] }
catalog = { workspace = true, features = ["testing"] }
-client.workspace = true
+client = { workspace = true, features = ["testing"] }
common-base.workspace = true
common-test-util.workspace = true
criterion = "0.4"
@@ -125,8 +125,6 @@ serde_json.workspace = true
session = { workspace = true, features = ["testing"] }
table.workspace = true
tempfile = "3.0.0"
-# TODO depend `Database` client
-tests-integration.workspace = true
tokio-postgres = "0.7"
tokio-postgres-rustls = "0.11"
tokio-test = "0.4"
diff --git a/src/servers/tests/grpc/mod.rs b/src/servers/tests/grpc/mod.rs
index 4155f5eac738..9faad45b0c24 100644
--- a/src/servers/tests/grpc/mod.rs
+++ b/src/servers/tests/grpc/mod.rs
@@ -21,7 +21,7 @@ use arrow_flight::flight_service_server::{FlightService, FlightServiceServer};
use async_trait::async_trait;
use auth::tests::MockUserProvider;
use auth::UserProviderRef;
-use client::{Client, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_runtime::{Builder as RuntimeBuilder, Runtime};
use servers::error::{Result, StartGrpcSnafu, TcpBindSnafu};
use servers::grpc::flight::FlightCraftWrapper;
@@ -31,7 +31,6 @@ use servers::server::Server;
use snafu::ResultExt;
use table::test_util::MemTable;
use table::TableRef;
-use tests_integration::database::Database;
use tokio::net::TcpListener;
use tokio_stream::wrappers::TcpListenerStream;
use tonic::codec::CompressionEncoding;
diff --git a/tests-integration/src/lib.rs b/tests-integration/src/lib.rs
index e4db599fd5e7..d3e700151345 100644
--- a/tests-integration/src/lib.rs
+++ b/tests-integration/src/lib.rs
@@ -15,7 +15,6 @@
#![feature(assert_matches)]
pub mod cluster;
-pub mod database;
mod grpc;
mod influxdb;
mod instance;
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 35ce52e273bd..7cbd640820b1 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -21,6 +21,7 @@ use std::time::Duration;
use auth::UserProviderRef;
use axum::Router;
use catalog::kvbackend::KvBackendCatalogManager;
+use client::Database;
use common_base::secrets::ExposeSecret;
use common_config::Configurable;
use common_meta::key::catalog_name::CatalogNameKey;
@@ -56,7 +57,6 @@ use servers::tls::ReloadableTlsServerConfig;
use servers::Mode;
use session::context::QueryContext;
-use crate::database::Database;
use crate::standalone::{GreptimeDbStandalone, GreptimeDbStandaloneBuilder};
pub const PEER_PLACEHOLDER_ADDR: &str = "127.0.0.1:3001";
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 48419498e726..7d1f9d57768f 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -22,7 +22,7 @@ use api::v1::{
PromqlRequest, RequestHeader, SemanticType,
};
use auth::user_provider_from_option;
-use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::consts::MITO_ENGINE;
use common_grpc::channel_manager::ClientTlsOption;
use common_query::Output;
@@ -36,7 +36,6 @@ use servers::http::prometheus::{
};
use servers::server::Server;
use servers::tls::{TlsMode, TlsOption};
-use tests_integration::database::Database;
use tests_integration::test_util::{
setup_grpc_server, setup_grpc_server_with, setup_grpc_server_with_user_provider, StorageType,
};
diff --git a/tests/runner/Cargo.toml b/tests/runner/Cargo.toml
index 7cb36c1645cb..6118c863fb15 100644
--- a/tests/runner/Cargo.toml
+++ b/tests/runner/Cargo.toml
@@ -10,7 +10,7 @@ workspace = true
[dependencies]
async-trait = "0.1"
clap.workspace = true
-client.workspace = true
+client = { workspace = true, features = ["testing"] }
common-error.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
@@ -19,7 +19,5 @@ serde.workspace = true
serde_json.workspace = true
sqlness = { version = "0.5" }
tempfile.workspace = true
-# TODO depend `Database` client
-tests-integration.workspace = true
tinytemplate = "1.2"
tokio.workspace = true
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 399f65840b9c..ea3e3e1bc10a 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -24,13 +24,14 @@ use std::time::Duration;
use async_trait::async_trait;
use client::error::ServerSnafu;
-use client::{Client, Error as ClientError, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use client::{
+ Client, Database as DB, Error as ClientError, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME,
+};
use common_error::ext::ErrorExt;
use common_query::{Output, OutputData};
use common_recordbatch::RecordBatches;
use serde::Serialize;
use sqlness::{Database, EnvController, QueryContext};
-use tests_integration::database::Database as DB;
use tinytemplate::TinyTemplate;
use tokio::sync::Mutex as TokioMutex;
|
refactor
|
move Database to client crate behind testing feature (#4059)
|
58bdf27068a18aa1830d772da752653fd553dc87
|
2023-03-09 19:46:48
|
Ning Sun
|
fix: make pyo3 optional again (#1153)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 259ffeb60f51..0e93f76436e8 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -860,9 +860,9 @@ dependencies = [
[[package]]
name = "block-buffer"
-version = "0.10.3"
+version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
+checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
dependencies = [
"generic-array",
]
@@ -3641,9 +3641,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.139"
+version = "0.2.140"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
+checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c"
[[package]]
name = "libloading"
@@ -5165,16 +5165,18 @@ dependencies = [
[[package]]
name = "polling"
-version = "2.5.2"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6"
+checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa"
dependencies = [
"autocfg",
+ "bitflags",
"cfg-if 1.0.0",
+ "concurrent-queue",
"libc",
"log",
- "wepoll-ffi",
- "windows-sys 0.42.0",
+ "pin-project-lite",
+ "windows-sys 0.45.0",
]
[[package]]
@@ -6724,9 +6726,9 @@ checksum = "e6b44e8fc93a14e66336d230954dda83d18b4605ccace8fe09bc7514a71ad0bc"
[[package]]
name = "serde"
-version = "1.0.153"
+version = "1.0.154"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3a382c72b4ba118526e187430bb4963cd6d55051ebf13d9b25574d379cc98d20"
+checksum = "8cdd151213925e7f1ab45a9bbfb129316bd00799784b174b7cc7bcd16961c49e"
dependencies = [
"serde_derive",
]
@@ -6743,9 +6745,9 @@ dependencies = [
[[package]]
name = "serde_derive"
-version = "1.0.153"
+version = "1.0.154"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ef476a5790f0f6decbc66726b6e5d63680ed518283e64c7df415989d880954f"
+checksum = "4fc80d722935453bcafdc2c9a73cd6fac4dc1938f0346035d84bf99fa9e33217"
dependencies = [
"proc-macro2",
"quote",
@@ -8518,9 +8520,9 @@ dependencies = [
[[package]]
name = "unicode-bidi"
-version = "0.3.10"
+version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58"
+checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c"
[[package]]
name = "unicode-casing"
@@ -8845,15 +8847,6 @@ dependencies = [
"webpki",
]
-[[package]]
-name = "wepoll-ffi"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb"
-dependencies = [
- "cc",
-]
-
[[package]]
name = "which"
version = "4.4.0"
diff --git a/Cargo.toml b/Cargo.toml
index 276c800def4c..01eb2ad28938 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -50,7 +50,7 @@ edition = "2021"
license = "Apache-2.0"
[workspace.dependencies]
-arrow = { version = "33.0", features = ["pyarrow"] }
+arrow = { version = "33.0" }
arrow-array = "33.0"
arrow-flight = "33.0"
arrow-schema = { version = "33.0", features = ["serde"] }
diff --git a/src/script/Cargo.toml b/src/script/Cargo.toml
index 114ac8736440..1e759ade536e 100644
--- a/src/script/Cargo.toml
+++ b/src/script/Cargo.toml
@@ -6,7 +6,7 @@ license.workspace = true
[features]
default = ["python"]
-pyo3_backend = ["pyo3"]
+pyo3_backend = ["dep:pyo3", "arrow/pyarrow"]
python = [
"dep:datafusion",
"dep:datafusion-common",
|
fix
|
make pyo3 optional again (#1153)
|
e47ef1f0d2666a0d36c8db3b56b81a082dbe6dcc
|
2023-06-20 16:33:52
|
JeremyHi
|
chore: minor fix (#1801)
| false
|
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index 78a612f4a270..5842b431c48e 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -756,7 +756,7 @@ mod tests {
let tc = new_client("test_batch_put").await;
let mut req = BatchPutRequest::new();
- for i in 0..256 {
+ for i in 0..275 {
req = req.add_kv(
tc.key(&format!("key-{}", i)),
format!("value-{}", i).into_bytes(),
@@ -769,7 +769,7 @@ mod tests {
let req = RangeRequest::new().with_prefix(tc.key("key-"));
let res = tc.client.range(req).await;
let kvs = res.unwrap().take_kvs();
- assert_eq!(256, kvs.len());
+ assert_eq!(275, kvs.len());
}
#[tokio::test]
diff --git a/src/meta-srv/src/service/store/etcd.rs b/src/meta-srv/src/service/store/etcd.rs
index 22834b355b3b..c97543137c52 100644
--- a/src/meta-srv/src/service/store/etcd.rs
+++ b/src/meta-srv/src/service/store/etcd.rs
@@ -32,6 +32,10 @@ use crate::error::Result;
use crate::metrics::METRIC_META_KV_REQUEST;
use crate::service::store::kv::{KvStore, KvStoreRef};
+// Maximum number of operations permitted in a transaction.
+// The etcd default configuration's `--max-txn-ops` is 128.
+//
+// For more detail, see: https://etcd.io/docs/v3.5/op-guide/configuration/
const MAX_TXN_SIZE: usize = 128;
pub struct EtcdStore {
@@ -55,7 +59,7 @@ impl EtcdStore {
Ok(Arc::new(Self { client }))
}
- async fn do_multi_txn(&self, mut txn_ops: Vec<TxnOp>) -> Result<Vec<TxnResponse>> {
+ async fn do_multi_txn(&self, txn_ops: Vec<TxnOp>) -> Result<Vec<TxnResponse>> {
if txn_ops.len() < MAX_TXN_SIZE {
// fast path
let txn = Txn::new().and_then(txn_ops);
@@ -68,36 +72,17 @@ impl EtcdStore {
return Ok(vec![txn_res]);
}
- let mut txns = vec![];
- loop {
- if txn_ops.is_empty() {
- break;
- }
-
- if txn_ops.len() < MAX_TXN_SIZE {
- let txn = Txn::new().and_then(txn_ops);
- txns.push(txn);
- break;
- }
-
- let part = txn_ops.drain(..MAX_TXN_SIZE).collect::<Vec<_>>();
- let txn = Txn::new().and_then(part);
- txns.push(txn);
- }
+ let txns = txn_ops
+ .chunks(MAX_TXN_SIZE)
+ .map(|part| async move {
+ let txn = Txn::new().and_then(part);
+ self.client.kv_client().txn(txn).await
+ })
+ .collect::<Vec<_>>();
- let mut txn_responses = Vec::with_capacity(txns.len());
- // Considering the pressure on etcd, it would be more appropriate to execute txn in
- // a serial manner.
- for txn in txns {
- let txn_res = self
- .client
- .kv_client()
- .txn(txn)
- .await
- .context(error::EtcdFailedSnafu)?;
- txn_responses.push(txn_res);
- }
- Ok(txn_responses)
+ futures::future::try_join_all(txns)
+ .await
+ .context(error::EtcdFailedSnafu)
}
}
@@ -241,7 +226,7 @@ impl KvStore for EtcdStore {
prev_kvs.push(KvPair::from_etcd_kv(prev_kv));
}
}
- _ => unreachable!(), // never get here
+ _ => unreachable!(),
}
}
}
@@ -283,7 +268,7 @@ impl KvStore for EtcdStore {
prev_kvs.push(KvPair::from_etcd_kv(kv));
});
}
- _ => unreachable!(), // never get here
+ _ => unreachable!(),
}
}
}
@@ -343,7 +328,7 @@ impl KvStore for EtcdStore {
let prev_kv = match op_res {
TxnOpResponse::Put(res) => res.prev_key().map(KvPair::from_etcd_kv),
TxnOpResponse::Get(res) => res.kvs().first().map(KvPair::from_etcd_kv),
- _ => unreachable!(), // never get here
+ _ => unreachable!(),
};
let header = Some(ResponseHeader::success(cluster_id));
|
chore
|
minor fix (#1801)
|
5dc7ce179153dbc945571cc69cc9d9cf194178e4
|
2023-12-18 08:36:11
|
Wei
|
fix: typos and bit operations (#2944)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index ffb2589a0dde..5aa53779d444 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3589,7 +3589,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=b1d403088f02136bcebde53d604f491c260ca8e2#b1d403088f02136bcebde53d604f491c260ca8e2"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=a31ea166fc015ea7ff111ac94e26c3a5d64364d2#a31ea166fc015ea7ff111ac94e26c3a5d64364d2"
dependencies = [
"prost 0.12.2",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index 06f8475f6a3c..10f5a99a7b31 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -88,7 +88,7 @@ etcd-client = "0.12"
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "b1d403088f02136bcebde53d604f491c260ca8e2" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "a31ea166fc015ea7ff111ac94e26c3a5d64364d2" }
humantime-serde = "1.1"
itertools = "0.10"
lazy_static = "1.4"
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index 40b8d1533125..ff4911e1a29c 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -535,11 +535,8 @@ pub fn convert_i128_to_interval(v: i128) -> v1::IntervalMonthDayNano {
/// Convert common decimal128 to grpc decimal128 without precision and scale.
pub fn convert_to_pb_decimal128(v: Decimal128) -> v1::Decimal128 {
- let value = v.val();
- v1::Decimal128 {
- hi: (value >> 64) as i64,
- lo: value as i64,
- }
+ let (hi, lo) = v.split_value();
+ v1::Decimal128 { hi, lo }
}
pub fn pb_value_to_value_ref<'a>(
@@ -580,9 +577,9 @@ pub fn pb_value_to_value_ref<'a>(
ValueData::TimeMillisecondValue(t) => ValueRef::Time(Time::new_millisecond(*t)),
ValueData::TimeMicrosecondValue(t) => ValueRef::Time(Time::new_microsecond(*t)),
ValueData::TimeNanosecondValue(t) => ValueRef::Time(Time::new_nanosecond(*t)),
- ValueData::IntervalYearMonthValues(v) => ValueRef::Interval(Interval::from_i32(*v)),
- ValueData::IntervalDayTimeValues(v) => ValueRef::Interval(Interval::from_i64(*v)),
- ValueData::IntervalMonthDayNanoValues(v) => {
+ ValueData::IntervalYearMonthValue(v) => ValueRef::Interval(Interval::from_i32(*v)),
+ ValueData::IntervalDayTimeValue(v) => ValueRef::Interval(Interval::from_i64(*v)),
+ ValueData::IntervalMonthDayNanoValue(v) => {
let interval = Interval::from_month_day_nano(v.months, v.days, v.nanoseconds);
ValueRef::Interval(interval)
}
@@ -986,13 +983,13 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
},
Value::Interval(v) => match v.unit() {
IntervalUnit::YearMonth => v1::Value {
- value_data: Some(ValueData::IntervalYearMonthValues(v.to_i32())),
+ value_data: Some(ValueData::IntervalYearMonthValue(v.to_i32())),
},
IntervalUnit::DayTime => v1::Value {
- value_data: Some(ValueData::IntervalDayTimeValues(v.to_i64())),
+ value_data: Some(ValueData::IntervalDayTimeValue(v.to_i64())),
},
IntervalUnit::MonthDayNano => v1::Value {
- value_data: Some(ValueData::IntervalMonthDayNanoValues(
+ value_data: Some(ValueData::IntervalMonthDayNanoValue(
convert_i128_to_interval(v.to_i128()),
)),
},
@@ -1011,12 +1008,9 @@ pub fn to_proto_value(value: Value) -> Option<v1::Value> {
value_data: Some(ValueData::DurationNanosecondValue(v.value())),
},
},
- Value::Decimal128(v) => {
- let (hi, lo) = v.split_value();
- v1::Value {
- value_data: Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo })),
- }
- }
+ Value::Decimal128(v) => v1::Value {
+ value_data: Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
+ },
Value::List(_) => return None,
};
@@ -1051,9 +1045,9 @@ pub fn proto_value_type(value: &v1::Value) -> Option<ColumnDataType> {
ValueData::TimeMillisecondValue(_) => ColumnDataType::TimeMillisecond,
ValueData::TimeMicrosecondValue(_) => ColumnDataType::TimeMicrosecond,
ValueData::TimeNanosecondValue(_) => ColumnDataType::TimeNanosecond,
- ValueData::IntervalYearMonthValues(_) => ColumnDataType::IntervalYearMonth,
- ValueData::IntervalDayTimeValues(_) => ColumnDataType::IntervalDayTime,
- ValueData::IntervalMonthDayNanoValues(_) => ColumnDataType::IntervalMonthDayNano,
+ ValueData::IntervalYearMonthValue(_) => ColumnDataType::IntervalYearMonth,
+ ValueData::IntervalDayTimeValue(_) => ColumnDataType::IntervalDayTime,
+ ValueData::IntervalMonthDayNanoValue(_) => ColumnDataType::IntervalMonthDayNano,
ValueData::DurationSecondValue(_) => ColumnDataType::DurationSecond,
ValueData::DurationMillisecondValue(_) => ColumnDataType::DurationMillisecond,
ValueData::DurationMicrosecondValue(_) => ColumnDataType::DurationMicrosecond,
@@ -1109,10 +1103,10 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
TimeUnit::Nanosecond => ValueData::TimeNanosecondValue(v.value()),
}),
Value::Interval(v) => Some(match v.unit() {
- IntervalUnit::YearMonth => ValueData::IntervalYearMonthValues(v.to_i32()),
- IntervalUnit::DayTime => ValueData::IntervalDayTimeValues(v.to_i64()),
+ IntervalUnit::YearMonth => ValueData::IntervalYearMonthValue(v.to_i32()),
+ IntervalUnit::DayTime => ValueData::IntervalDayTimeValue(v.to_i64()),
IntervalUnit::MonthDayNano => {
- ValueData::IntervalMonthDayNanoValues(convert_i128_to_interval(v.to_i128()))
+ ValueData::IntervalMonthDayNanoValue(convert_i128_to_interval(v.to_i128()))
}
}),
Value::Duration(v) => Some(match v.unit() {
@@ -1121,10 +1115,7 @@ pub fn value_to_grpc_value(value: Value) -> GrpcValue {
TimeUnit::Microsecond => ValueData::DurationMicrosecondValue(v.value()),
TimeUnit::Nanosecond => ValueData::DurationNanosecondValue(v.value()),
}),
- Value::Decimal128(v) => {
- let (hi, lo) = v.split_value();
- Some(ValueData::Decimal128Value(v1::Decimal128 { hi, lo }))
- }
+ Value::Decimal128(v) => Some(ValueData::Decimal128Value(convert_to_pb_decimal128(v))),
Value::List(_) => unreachable!(),
},
}
diff --git a/src/common/decimal/src/decimal128.rs b/src/common/decimal/src/decimal128.rs
index ce23fcf98a97..d742be5876f4 100644
--- a/src/common/decimal/src/decimal128.rs
+++ b/src/common/decimal/src/decimal128.rs
@@ -110,9 +110,15 @@ impl Decimal128 {
}
/// Convert from precision, scale, a i128 value which
- /// represents by two i64 value(high-64 bit, low-64 bit).
+ /// represents by i64 + i64 value(high-64 bit, low-64 bit).
pub fn from_value_precision_scale(hi: i64, lo: i64, precision: u8, scale: i8) -> Self {
- let value = (hi as i128) << 64 | lo as i128;
+ // 128 64 0
+ // +-------+-------+-------+-------+-------+-------+-------+-------+
+ // | hi | lo |
+ // +-------+-------+-------+-------+-------+-------+-------+-------+
+ let hi = (hi as u128 & u64::MAX as u128) << 64;
+ let lo = lo as u128 & u64::MAX as u128;
+ let value = (hi | lo) as i128;
Self::new(value, precision, scale)
}
}
@@ -429,4 +435,30 @@ mod tests {
let decimal2 = Decimal128::from_str("1234567890.123").unwrap();
assert_eq!(decimal1.partial_cmp(&decimal2), None);
}
+
+ #[test]
+ fn test_convert_with_i128() {
+ let test_decimal128_eq = |value| {
+ let decimal1 =
+ Decimal128::new(value, DECIMAL128_MAX_PRECISION, DECIMAL128_DEFAULT_SCALE);
+ let (hi, lo) = decimal1.split_value();
+ let decimal2 = Decimal128::from_value_precision_scale(
+ hi,
+ lo,
+ DECIMAL128_MAX_PRECISION,
+ DECIMAL128_DEFAULT_SCALE,
+ );
+ assert_eq!(decimal1, decimal2);
+ };
+
+ test_decimal128_eq(1 << 63);
+
+ test_decimal128_eq(0);
+ test_decimal128_eq(1234567890);
+ test_decimal128_eq(-1234567890);
+ test_decimal128_eq(32781372819372817382183218i128);
+ test_decimal128_eq(-32781372819372817382183218i128);
+ test_decimal128_eq(i128::MAX);
+ test_decimal128_eq(i128::MIN);
+ }
}
diff --git a/src/common/time/src/interval.rs b/src/common/time/src/interval.rs
index 716602b8e2a8..892e4807c499 100644
--- a/src/common/time/src/interval.rs
+++ b/src/common/time/src/interval.rs
@@ -258,21 +258,24 @@ impl Interval {
}
pub fn to_i128(&self) -> i128 {
- let mut result = 0;
- result |= self.months as i128;
- result <<= 32;
- result |= self.days as i128;
- result <<= 64;
- result |= self.nsecs as i128;
- result
+ // 128 96 64 0
+ // +-------+-------+-------+-------+-------+-------+-------+-------+
+ // | months | days | nanoseconds |
+ // +-------+-------+-------+-------+-------+-------+-------+-------+
+ let months = (self.months as u128 & u32::MAX as u128) << 96;
+ let days = (self.days as u128 & u32::MAX as u128) << 64;
+ let nsecs = self.nsecs as u128 & u64::MAX as u128;
+ (months | days | nsecs) as i128
}
pub fn to_i64(&self) -> i64 {
- let mut result = 0;
- result |= self.days as i64;
- result <<= 32;
- result |= self.nsecs / NANOS_PER_MILLI;
- result
+ // 64 32 0
+ // +-------+-------+-------+-------+-------+-------+-------+-------+
+ // | days | milliseconds |
+ // +-------+-------+-------+-------+-------+-------+-------+-------+
+ let days = (self.days as u64 & u32::MAX as u64) << 32;
+ let milliseconds = (self.nsecs / NANOS_PER_MILLI) as u64 & u32::MAX as u64;
+ (days | milliseconds) as i64
}
pub fn to_i32(&self) -> i32 {
@@ -635,9 +638,25 @@ mod tests {
#[test]
fn test_interval_i128_convert() {
- let interval = Interval::from_month_day_nano(1, 1, 1);
- let interval_i128 = interval.to_i128();
- assert_eq!(interval_i128, 79228162532711081667253501953);
+ let test_interval_eq = |month, day, nano| {
+ let interval = Interval::from_month_day_nano(month, day, nano);
+ let interval_i128 = interval.to_i128();
+ let interval2 = Interval::from_i128(interval_i128);
+ assert_eq!(interval, interval2);
+ };
+
+ test_interval_eq(1, 2, 3);
+ test_interval_eq(1, -2, 3);
+ test_interval_eq(1, -2, -3);
+ test_interval_eq(-1, -2, -3);
+ test_interval_eq(i32::MAX, i32::MAX, i64::MAX);
+ test_interval_eq(i32::MIN, i32::MAX, i64::MAX);
+ test_interval_eq(i32::MAX, i32::MIN, i64::MAX);
+ test_interval_eq(i32::MAX, i32::MAX, i64::MIN);
+ test_interval_eq(i32::MIN, i32::MIN, i64::MAX);
+ test_interval_eq(i32::MAX, i32::MIN, i64::MIN);
+ test_interval_eq(i32::MIN, i32::MAX, i64::MIN);
+ test_interval_eq(i32::MIN, i32::MIN, i64::MIN);
}
#[test]
diff --git a/src/operator/src/req_convert/common.rs b/src/operator/src/req_convert/common.rs
index 694906b82833..3c63d0f4b075 100644
--- a/src/operator/src/req_convert/common.rs
+++ b/src/operator/src/req_convert/common.rs
@@ -148,17 +148,17 @@ fn push_column_to_rows(column: Column, rows: &mut [Row]) -> Result<()> {
(TimeNanosecond, TimeNanosecondValue, time_nanosecond_values),
(
IntervalYearMonth,
- IntervalYearMonthValues,
+ IntervalYearMonthValue,
interval_year_month_values
),
(
IntervalDayTime,
- IntervalDayTimeValues,
+ IntervalDayTimeValue,
interval_day_time_values
),
(
IntervalMonthDayNano,
- IntervalMonthDayNanoValues,
+ IntervalMonthDayNanoValue,
interval_month_day_nano_values
),
(DurationSecond, DurationSecondValue, duration_second_values),
|
fix
|
typos and bit operations (#2944)
|
5eb2c609a34563df5e50e547515b009783eb0efb
|
2023-08-01 20:48:31
|
shuiyisong
|
fix: auth in grpc (#2056)
| false
|
diff --git a/src/servers/src/grpc/handler.rs b/src/servers/src/grpc/handler.rs
index 097294bcd541..9216f3113924 100644
--- a/src/servers/src/grpc/handler.rs
+++ b/src/servers/src/grpc/handler.rs
@@ -70,7 +70,10 @@ impl GreptimeRequestHandler {
let header = request.header.as_ref();
let query_ctx = create_query_context(header);
- let _ = self.auth(header, &query_ctx).await?;
+ if let Err(e) = self.auth(header, &query_ctx).await? {
+ return Ok(Err(e));
+ }
+
let handler = self.handler.clone();
let request_type = request_type(&query);
let db = query_ctx.get_db_string();
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 5d56ddb47258..b1a10beb7593 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -48,6 +48,7 @@ use object_store::services::{Azblob, Gcs, Oss, S3};
use object_store::test_util::TempFolder;
use object_store::ObjectStore;
use secrecy::ExposeSecret;
+use servers::auth::UserProviderRef;
use servers::grpc::GrpcServer;
use servers::http::{HttpOptions, HttpServerBuilder};
use servers::metrics_handler::MetricsHandler;
@@ -404,6 +405,14 @@ pub async fn setup_test_http_app(store_type: StorageType, name: &str) -> (Router
pub async fn setup_test_http_app_with_frontend(
store_type: StorageType,
name: &str,
+) -> (Router, TestGuard) {
+ setup_test_http_app_with_frontend_and_user_provider(store_type, name, None).await
+}
+
+pub async fn setup_test_http_app_with_frontend_and_user_provider(
+ store_type: StorageType,
+ name: &str,
+ user_provider: Option<UserProviderRef>,
) -> (Router, TestGuard) {
let (opts, guard) = create_tmp_dir_and_datanode_opts(store_type, name);
let (instance, heartbeat) = Instance::with_mock_meta_client(&opts).await.unwrap();
@@ -429,12 +438,20 @@ pub async fn setup_test_http_app_with_frontend(
};
let frontend_ref = Arc::new(frontend);
- let http_server = HttpServerBuilder::new(http_opts)
+ let mut http_server = HttpServerBuilder::new(http_opts);
+
+ http_server
.with_sql_handler(ServerSqlQueryHandlerAdaptor::arc(frontend_ref.clone()))
.with_grpc_handler(ServerGrpcQueryHandlerAdaptor::arc(frontend_ref.clone()))
.with_script_handler(frontend_ref)
- .with_greptime_config_options(opts.to_toml_string())
- .build();
+ .with_greptime_config_options(opts.to_toml_string());
+
+ if let Some(user_provider) = user_provider {
+ http_server.with_user_provider(user_provider);
+ }
+
+ let http_server = http_server.build();
+
let app = http_server.build(http_server.make_app());
(app, guard)
}
@@ -532,6 +549,14 @@ pub async fn setup_test_prom_app_with_frontend(
pub async fn setup_grpc_server(
store_type: StorageType,
name: &str,
+) -> (String, TestGuard, Arc<GrpcServer>) {
+ setup_grpc_server_with_user_provider(store_type, name, None).await
+}
+
+pub async fn setup_grpc_server_with_user_provider(
+ store_type: StorageType,
+ name: &str,
+ user_provider: Option<UserProviderRef>,
) -> (String, TestGuard, Arc<GrpcServer>) {
common_telemetry::init_default_ut_logging();
@@ -557,7 +582,7 @@ pub async fn setup_grpc_server(
let fe_grpc_server = Arc::new(GrpcServer::new(
ServerGrpcQueryHandlerAdaptor::arc(fe_instance_ref.clone()),
Some(fe_instance_ref.clone()),
- None,
+ user_provider,
runtime,
));
@@ -587,6 +612,14 @@ pub async fn check_output_stream(output: Output, expected: &str) {
pub async fn setup_mysql_server(
store_type: StorageType,
name: &str,
+) -> (String, TestGuard, Arc<Box<dyn Server>>) {
+ setup_mysql_server_with_user_provider(store_type, name, None).await
+}
+
+pub async fn setup_mysql_server_with_user_provider(
+ store_type: StorageType,
+ name: &str,
+ user_provider: Option<UserProviderRef>,
) -> (String, TestGuard, Arc<Box<dyn Server>>) {
common_telemetry::init_default_ut_logging();
@@ -619,7 +652,7 @@ pub async fn setup_mysql_server(
runtime,
Arc::new(MysqlSpawnRef::new(
ServerSqlQueryHandlerAdaptor::arc(fe_instance_ref),
- None,
+ user_provider,
)),
Arc::new(MysqlSpawnConfig::new(
false,
@@ -643,6 +676,14 @@ pub async fn setup_mysql_server(
pub async fn setup_pg_server(
store_type: StorageType,
name: &str,
+) -> (String, TestGuard, Arc<Box<dyn Server>>) {
+ setup_pg_server_with_user_provider(store_type, name, None).await
+}
+
+pub async fn setup_pg_server_with_user_provider(
+ store_type: StorageType,
+ name: &str,
+ user_provider: Option<UserProviderRef>,
) -> (String, TestGuard, Arc<Box<dyn Server>>) {
common_telemetry::init_default_ut_logging();
@@ -675,7 +716,7 @@ pub async fn setup_pg_server(
ServerSqlQueryHandlerAdaptor::arc(fe_instance_ref),
opts.tls.clone(),
runtime,
- None,
+ user_provider,
)) as Box<dyn Server>);
let fe_pg_addr_clone = fe_pg_addr.clone();
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 364e306e54cd..a65004c71cf8 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -12,19 +12,24 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::sync::Arc;
+
use api::v1::alter_expr::Kind;
use api::v1::promql_request::Promql;
use api::v1::{
- column, AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDef, CreateTableExpr,
- InsertRequest, InsertRequests, PromInstantQuery, PromRangeQuery, PromqlRequest, RequestHeader,
- SemanticType, TableId,
+ column, AddColumn, AddColumns, AlterExpr, Basic, Column, ColumnDataType, ColumnDef,
+ CreateTableExpr, InsertRequest, InsertRequests, PromInstantQuery, PromRangeQuery,
+ PromqlRequest, RequestHeader, SemanticType, TableId,
};
use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_catalog::consts::{MIN_USER_TABLE_ID, MITO_ENGINE};
use common_query::Output;
+use servers::auth::user_provider::StaticUserProvider;
use servers::prometheus::{PromData, PromSeries, PrometheusJsonResponse, PrometheusResponse};
use servers::server::Server;
-use tests_integration::test_util::{setup_grpc_server, StorageType};
+use tests_integration::test_util::{
+ setup_grpc_server, setup_grpc_server_with_user_provider, StorageType,
+};
#[macro_export]
macro_rules! grpc_test {
@@ -60,6 +65,7 @@ macro_rules! grpc_tests {
test_auto_create_table,
test_insert_and_select,
test_dbname,
+ test_grpc_auth,
test_health_check,
test_prom_gateway_query,
);
@@ -111,6 +117,60 @@ pub async fn test_dbname(store_type: StorageType) {
guard.remove_all().await;
}
+pub async fn test_grpc_auth(store_type: StorageType) {
+ let user_provider = StaticUserProvider::try_from("cmd:greptime_user=greptime_pwd").unwrap();
+
+ let (addr, mut guard, fe_grpc_server) = setup_grpc_server_with_user_provider(
+ store_type,
+ "auto_create_table",
+ Some(Arc::new(user_provider)),
+ )
+ .await;
+
+ let grpc_client = Client::with_urls(vec![addr]);
+ let mut db = Database::new_with_dbname(
+ format!("{}-{}", DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME),
+ grpc_client,
+ );
+
+ // 1. test without auth
+ let re = db.sql("show tables;").await;
+ assert!(re.is_err());
+ assert!(matches!(
+ re,
+ Err(client::Error::FlightGet {
+ tonic_code: tonic::Code::Unauthenticated,
+ ..
+ })
+ ));
+
+ // 2. test wrong auth
+ db.set_auth(api::v1::auth_header::AuthScheme::Basic(Basic {
+ username: "greptime_user".to_string(),
+ password: "wrong_pwd".to_string(),
+ }));
+ let re = db.sql("show tables;").await;
+ assert!(re.is_err());
+ assert!(matches!(
+ re,
+ Err(client::Error::FlightGet {
+ tonic_code: tonic::Code::Unauthenticated,
+ ..
+ })
+ ));
+
+ // 3. test right auth
+ db.set_auth(api::v1::auth_header::AuthScheme::Basic(Basic {
+ username: "greptime_user".to_string(),
+ password: "greptime_pwd".to_string(),
+ }));
+ let re = db.sql("show tables;").await;
+ assert!(re.is_ok());
+
+ let _ = fe_grpc_server.shutdown().await;
+ guard.remove_all().await;
+}
+
pub async fn test_auto_create_table(store_type: StorageType) {
let (addr, mut guard, fe_grpc_server) =
setup_grpc_server(store_type, "auto_create_table").await;
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index a056e9cac791..dca65775db62 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -12,15 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::sync::Arc;
+
use axum::http::StatusCode;
use axum_test_helper::TestClient;
use common_error::status_code::StatusCode as ErrorCode;
use serde_json::json;
+use servers::auth::user_provider::StaticUserProvider;
use servers::http::handler::HealthResponse;
use servers::http::{JsonOutput, JsonResponse};
use servers::prometheus::{PrometheusJsonResponse, PrometheusResponse};
use tests_integration::test_util::{
- setup_test_http_app, setup_test_http_app_with_frontend, setup_test_prom_app_with_frontend,
+ setup_test_http_app, setup_test_http_app_with_frontend,
+ setup_test_http_app_with_frontend_and_user_provider, setup_test_prom_app_with_frontend,
StorageType,
};
@@ -53,6 +57,7 @@ macro_rules! http_tests {
http_test!(
$service,
+ test_http_auth,
test_sql_api,
test_prometheus_promql_api,
test_prom_http_api,
@@ -67,6 +72,47 @@ macro_rules! http_tests {
};
}
+pub async fn test_http_auth(store_type: StorageType) {
+ common_telemetry::init_default_ut_logging();
+
+ let user_provider = StaticUserProvider::try_from("cmd:greptime_user=greptime_pwd").unwrap();
+
+ let (app, mut guard) = setup_test_http_app_with_frontend_and_user_provider(
+ store_type,
+ "sql_api",
+ Some(Arc::new(user_provider)),
+ )
+ .await;
+ let client = TestClient::new(app);
+
+ // 1. no auth
+ let res = client
+ .get("/v1/sql?db=public&sql=show tables;")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::UNAUTHORIZED);
+
+ // 2. wrong auth
+ let res = client
+ .get("/v1/sql?db=public&sql=show tables;")
+ .header("Authorization", "basic Z3JlcHRpbWVfdXNlcjp3cm9uZ19wd2Q=")
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::UNAUTHORIZED);
+
+ // 3. right auth
+ let res = client
+ .get("/v1/sql?db=public&sql=show tables;")
+ .header(
+ "Authorization",
+ "basic Z3JlcHRpbWVfdXNlcjpncmVwdGltZV9wd2Q=",
+ )
+ .send()
+ .await;
+ assert_eq!(res.status(), StatusCode::OK);
+ guard.remove_all().await;
+}
+
pub async fn test_sql_api(store_type: StorageType) {
common_telemetry::init_default_ut_logging();
let (app, mut guard) = setup_test_http_app_with_frontend(store_type, "sql_api").await;
diff --git a/tests-integration/tests/sql.rs b/tests-integration/tests/sql.rs
index 1d1d2f824ee2..e4652d9665e4 100644
--- a/tests-integration/tests/sql.rs
+++ b/tests-integration/tests/sql.rs
@@ -11,11 +11,17 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::sync::Arc;
+
use chrono::{DateTime, NaiveDate, NaiveDateTime, Utc};
-use sqlx::mysql::MySqlPoolOptions;
-use sqlx::postgres::PgPoolOptions;
+use servers::auth::user_provider::StaticUserProvider;
+use sqlx::mysql::{MySqlDatabaseError, MySqlPoolOptions};
+use sqlx::postgres::{PgDatabaseError, PgPoolOptions};
use sqlx::Row;
-use tests_integration::test_util::{setup_mysql_server, setup_pg_server, StorageType};
+use tests_integration::test_util::{
+ setup_mysql_server, setup_mysql_server_with_user_provider, setup_pg_server,
+ setup_pg_server_with_user_provider, StorageType,
+};
#[macro_export]
macro_rules! sql_test {
@@ -47,13 +53,72 @@ macro_rules! sql_tests {
sql_test!(
$service,
+ test_mysql_auth,
test_mysql_crud,
+ test_postgres_auth,
test_postgres_crud,
);
)*
};
}
+pub async fn test_mysql_auth(store_type: StorageType) {
+ let user_provider = StaticUserProvider::try_from("cmd:greptime_user=greptime_pwd").unwrap();
+ let (addr, mut guard, fe_mysql_server) = setup_mysql_server_with_user_provider(
+ store_type,
+ "sql_crud",
+ Some(Arc::new(user_provider)),
+ )
+ .await;
+
+ // 1. no auth
+ let conn_re = MySqlPoolOptions::new()
+ .max_connections(2)
+ .connect(&format!("mysql://{addr}/public"))
+ .await;
+
+ assert!(conn_re.is_err());
+ assert_eq!(
+ conn_re
+ .err()
+ .unwrap()
+ .into_database_error()
+ .unwrap()
+ .downcast::<MySqlDatabaseError>()
+ .code(),
+ Some("28000")
+ );
+
+ // 2. wrong pwd
+ let conn_re = MySqlPoolOptions::new()
+ .max_connections(2)
+ .connect(&format!("mysql://greptime_user:wrong_pwd@{addr}/public"))
+ .await;
+
+ assert!(conn_re.is_err());
+ assert_eq!(
+ conn_re
+ .err()
+ .unwrap()
+ .into_database_error()
+ .unwrap()
+ .downcast::<MySqlDatabaseError>()
+ .code(),
+ Some("28000")
+ );
+
+ // 3. right pwd
+ let conn_re = MySqlPoolOptions::new()
+ .max_connections(2)
+ .connect(&format!("mysql://greptime_user:greptime_pwd@{addr}/public"))
+ .await;
+
+ assert!(conn_re.is_ok());
+
+ let _ = fe_mysql_server.shutdown().await;
+ guard.remove_all().await;
+}
+
pub async fn test_mysql_crud(store_type: StorageType) {
let (addr, mut guard, fe_mysql_server) = setup_mysql_server(store_type, "sql_crud").await;
@@ -136,6 +201,62 @@ pub async fn test_mysql_crud(store_type: StorageType) {
guard.remove_all().await;
}
+pub async fn test_postgres_auth(store_type: StorageType) {
+ let user_provider = StaticUserProvider::try_from("cmd:greptime_user=greptime_pwd").unwrap();
+ let (addr, mut guard, fe_pg_server) =
+ setup_pg_server_with_user_provider(store_type, "sql_crud", Some(Arc::new(user_provider)))
+ .await;
+
+ // 1. no auth
+ let conn_re = PgPoolOptions::new()
+ .max_connections(2)
+ .connect(&format!("postgres://{addr}/public"))
+ .await;
+
+ assert!(conn_re.is_err());
+ assert_eq!(
+ conn_re
+ .err()
+ .unwrap()
+ .into_database_error()
+ .unwrap()
+ .downcast::<PgDatabaseError>()
+ .code(),
+ "28P01"
+ );
+
+ // 2. wrong pwd
+ let conn_re = PgPoolOptions::new()
+ .max_connections(2)
+ .connect(&format!("postgres://greptime_user:wrong_pwd@{addr}/public"))
+ .await;
+
+ assert!(conn_re.is_err());
+ assert_eq!(
+ conn_re
+ .err()
+ .unwrap()
+ .into_database_error()
+ .unwrap()
+ .downcast::<PgDatabaseError>()
+ .code(),
+ "28P01"
+ );
+
+ // 2. right pwd
+ let conn_re = PgPoolOptions::new()
+ .max_connections(2)
+ .connect(&format!(
+ "postgres://greptime_user:greptime_pwd@{addr}/public"
+ ))
+ .await;
+
+ assert!(conn_re.is_ok());
+
+ let _ = fe_pg_server.shutdown().await;
+ guard.remove_all().await;
+}
+
pub async fn test_postgres_crud(store_type: StorageType) {
let (addr, mut guard, fe_pg_server) = setup_pg_server(store_type, "sql_crud").await;
|
fix
|
auth in grpc (#2056)
|
216f2200079df100da5fe68cbb3a569700fe20c0
|
2023-07-31 09:08:27
|
zyy17
|
fix: restore 'aarch64/compile-python.sh' to fix the failed release temporarily (#2046)
| false
|
diff --git a/docker/aarch64/compile-python.sh b/docker/aarch64/compile-python.sh
new file mode 100755
index 000000000000..ba0bef763de9
--- /dev/null
+++ b/docker/aarch64/compile-python.sh
@@ -0,0 +1,87 @@
+#!/usr/bin/env bash
+
+set -e
+
+# this script will download Python source code, compile it, and install it to /usr/local/lib
+# then use this python to compile cross-compiled python for aarch64
+ARCH=$1
+PYTHON_VERSION=3.10.10
+PYTHON_SOURCE_DIR=Python-${PYTHON_VERSION}
+PYTHON_INSTALL_PATH_AMD64=${PWD}/python-${PYTHON_VERSION}/amd64
+PYTHON_INSTALL_PATH_AARCH64=${PWD}/python-${PYTHON_VERSION}/aarch64
+
+function download_python_source_code() {
+ wget https://www.python.org/ftp/python/$PYTHON_VERSION/Python-$PYTHON_VERSION.tgz
+ tar -xvf Python-$PYTHON_VERSION.tgz
+}
+
+function compile_for_amd64_platform() {
+ mkdir -p "$PYTHON_INSTALL_PATH_AMD64"
+
+ echo "Compiling for amd64 platform..."
+
+ ./configure \
+ --prefix="$PYTHON_INSTALL_PATH_AMD64" \
+ --enable-shared \
+ ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
+ ac_cv_have_long_long_format=yes \
+ --disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
+
+ make
+ make install
+}
+
+# explain Python compile options here a bit:s
+# --enable-shared: enable building a shared Python library (default is no) but we do need it for calling from rust
+# CC, CXX, AR, LD, RANLIB: set the compiler, archiver, linker, and ranlib programs to use
+# build: the machine you are building on, host: the machine you will run the compiled program on
+# --with-system-ffi: build _ctypes module using an installed ffi library, see Doc/library/ctypes.rst, not used in here TODO: could remove
+# ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes:
+# allow cross-compiled python to have -pthread set for CXX, see https://github.com/python/cpython/pull/22525
+# ac_cv_have_long_long_format=yes: target platform supports long long type
+# disable-ipv6: disable ipv6 support, we don't need it in here
+# ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no: disable pty support, we don't need it in here
+function compile_for_aarch64_platform() {
+ export LD_LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LD_LIBRARY_PATH
+ export LIBRARY_PATH=$PYTHON_INSTALL_PATH_AMD64/lib:$LIBRARY_PATH
+ export PATH=$PYTHON_INSTALL_PATH_AMD64/bin:$PATH
+
+ mkdir -p "$PYTHON_INSTALL_PATH_AARCH64"
+
+ echo "Compiling for aarch64 platform..."
+ echo "LD_LIBRARY_PATH: $LD_LIBRARY_PATH"
+ echo "LIBRARY_PATH: $LIBRARY_PATH"
+ echo "PATH: $PATH"
+
+ ./configure --build=x86_64-linux-gnu --host=aarch64-linux-gnu \
+ --prefix="$PYTHON_INSTALL_PATH_AARCH64" --enable-optimizations \
+ CC=aarch64-linux-gnu-gcc \
+ CXX=aarch64-linux-gnu-g++ \
+ AR=aarch64-linux-gnu-ar \
+ LD=aarch64-linux-gnu-ld \
+ RANLIB=aarch64-linux-gnu-ranlib \
+ --enable-shared \
+ ac_cv_pthread_is_default=no ac_cv_pthread=yes ac_cv_cxx_thread=yes \
+ ac_cv_have_long_long_format=yes \
+ --disable-ipv6 ac_cv_file__dev_ptmx=no ac_cv_file__dev_ptc=no
+
+ make
+ make altinstall
+}
+
+# Main script starts here.
+download_python_source_code
+
+# Enter the python source code directory.
+cd $PYTHON_SOURCE_DIR || exit 1
+
+# Build local python first, then build cross-compiled python.
+compile_for_amd64_platform
+
+# Clean the build directory.
+make clean && make distclean
+
+# Cross compile python for aarch64.
+if [ "$ARCH" = "aarch64-unknown-linux-gnu" ]; then
+ compile_for_aarch64_platform
+fi
|
fix
|
restore 'aarch64/compile-python.sh' to fix the failed release temporarily (#2046)
|
59b31372aa28f95f65c6ed4b62f8d07352059860
|
2025-02-05 08:54:22
|
discord9
|
feat(cli): add proxy options (#5459)
| false
|
diff --git a/src/cli/src/database.rs b/src/cli/src/database.rs
index 7152aac59270..24c4514fbc4c 100644
--- a/src/cli/src/database.rs
+++ b/src/cli/src/database.rs
@@ -17,6 +17,7 @@ use std::time::Duration;
use base64::engine::general_purpose;
use base64::Engine;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
+use common_error::ext::BoxedError;
use humantime::format_duration;
use serde_json::Value;
use servers::http::header::constants::GREPTIME_DB_HEADER_TIMEOUT;
@@ -24,7 +25,9 @@ use servers::http::result::greptime_result_v1::GreptimedbV1Response;
use servers::http::GreptimeQueryOutput;
use snafu::ResultExt;
-use crate::error::{HttpQuerySqlSnafu, Result, SerdeJsonSnafu};
+use crate::error::{
+ BuildClientSnafu, HttpQuerySqlSnafu, ParseProxyOptsSnafu, Result, SerdeJsonSnafu,
+};
#[derive(Debug, Clone)]
pub struct DatabaseClient {
@@ -32,6 +35,23 @@ pub struct DatabaseClient {
catalog: String,
auth_header: Option<String>,
timeout: Duration,
+ proxy: Option<reqwest::Proxy>,
+}
+
+pub fn parse_proxy_opts(
+ proxy: Option<String>,
+ no_proxy: bool,
+) -> std::result::Result<Option<reqwest::Proxy>, BoxedError> {
+ if no_proxy {
+ return Ok(None);
+ }
+ proxy
+ .map(|proxy| {
+ reqwest::Proxy::all(proxy)
+ .context(ParseProxyOptsSnafu)
+ .map_err(BoxedError::new)
+ })
+ .transpose()
}
impl DatabaseClient {
@@ -40,6 +60,7 @@ impl DatabaseClient {
catalog: String,
auth_basic: Option<String>,
timeout: Duration,
+ proxy: Option<reqwest::Proxy>,
) -> Self {
let auth_header = if let Some(basic) = auth_basic {
let encoded = general_purpose::STANDARD.encode(basic);
@@ -48,11 +69,18 @@ impl DatabaseClient {
None
};
+ if let Some(ref proxy) = proxy {
+ common_telemetry::info!("Using proxy: {:?}", proxy);
+ } else {
+ common_telemetry::info!("Using system proxy(if any)");
+ }
+
Self {
addr,
catalog,
auth_header,
timeout,
+ proxy,
}
}
@@ -67,7 +95,13 @@ impl DatabaseClient {
("db", format!("{}-{}", self.catalog, schema)),
("sql", sql.to_string()),
];
- let mut request = reqwest::Client::new()
+ let client = self
+ .proxy
+ .clone()
+ .map(|proxy| reqwest::Client::builder().proxy(proxy).build())
+ .unwrap_or_else(|| Ok(reqwest::Client::new()))
+ .context(BuildClientSnafu)?;
+ let mut request = client
.post(&url)
.form(¶ms)
.header("Content-Type", "application/x-www-form-urlencoded");
diff --git a/src/cli/src/error.rs b/src/cli/src/error.rs
index bf0b6342c1f9..1b79ee759be1 100644
--- a/src/cli/src/error.rs
+++ b/src/cli/src/error.rs
@@ -86,6 +86,22 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to parse proxy options: {}", error))]
+ ParseProxyOpts {
+ #[snafu(source)]
+ error: reqwest::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to build reqwest client: {}", error))]
+ BuildClient {
+ #[snafu(implicit)]
+ location: Location,
+ #[snafu(source)]
+ error: reqwest::Error,
+ },
+
#[snafu(display("Invalid REPL command: {reason}"))]
InvalidReplCommand { reason: String },
@@ -278,7 +294,8 @@ impl ErrorExt for Error {
| Error::InitTimezone { .. }
| Error::ConnectEtcd { .. }
| Error::CreateDir { .. }
- | Error::EmptyResult { .. } => StatusCode::InvalidArguments,
+ | Error::EmptyResult { .. }
+ | Error::ParseProxyOpts { .. } => StatusCode::InvalidArguments,
Error::StartProcedureManager { source, .. }
| Error::StopProcedureManager { source, .. } => source.status_code(),
@@ -298,7 +315,8 @@ impl ErrorExt for Error {
Error::SerdeJson { .. }
| Error::FileIo { .. }
| Error::SpawnThread { .. }
- | Error::InitTlsProvider { .. } => StatusCode::Unexpected,
+ | Error::InitTlsProvider { .. }
+ | Error::BuildClient { .. } => StatusCode::Unexpected,
Error::Other { source, .. } => source.status_code(),
diff --git a/src/cli/src/export.rs b/src/cli/src/export.rs
index 91e4be22bb93..846e2a49adc6 100644
--- a/src/cli/src/export.rs
+++ b/src/cli/src/export.rs
@@ -28,7 +28,7 @@ use tokio::io::{AsyncWriteExt, BufWriter};
use tokio::sync::Semaphore;
use tokio::time::Instant;
-use crate::database::DatabaseClient;
+use crate::database::{parse_proxy_opts, DatabaseClient};
use crate::error::{EmptyResultSnafu, Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
use crate::{database, Tool};
@@ -91,19 +91,30 @@ pub struct ExportCommand {
/// The default behavior will disable server-side default timeout(i.e. `0s`).
#[clap(long, value_parser = humantime::parse_duration)]
timeout: Option<Duration>,
+
+ /// The proxy server address to connect, if set, will override the system proxy.
+ ///
+ /// The default behavior will use the system proxy if neither `proxy` nor `no_proxy` is set.
+ #[clap(long)]
+ proxy: Option<String>,
+
+ /// Disable proxy server, if set, will not use any proxy.
+ #[clap(long)]
+ no_proxy: bool,
}
impl ExportCommand {
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
let (catalog, schema) =
database::split_database(&self.database).map_err(BoxedError::new)?;
-
+ let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
let database_client = DatabaseClient::new(
self.addr.clone(),
catalog.clone(),
self.auth_basic.clone(),
// Treats `None` as `0s` to disable server-side default timeout.
self.timeout.unwrap_or_default(),
+ proxy,
);
Ok(Box::new(Export {
diff --git a/src/cli/src/import.rs b/src/cli/src/import.rs
index f76560fbcd55..7cff2fd37f24 100644
--- a/src/cli/src/import.rs
+++ b/src/cli/src/import.rs
@@ -25,7 +25,7 @@ use snafu::{OptionExt, ResultExt};
use tokio::sync::Semaphore;
use tokio::time::Instant;
-use crate::database::DatabaseClient;
+use crate::database::{parse_proxy_opts, DatabaseClient};
use crate::error::{Error, FileIoSnafu, Result, SchemaNotFoundSnafu};
use crate::{database, Tool};
@@ -76,18 +76,30 @@ pub struct ImportCommand {
/// The default behavior will disable server-side default timeout(i.e. `0s`).
#[clap(long, value_parser = humantime::parse_duration)]
timeout: Option<Duration>,
+
+ /// The proxy server address to connect, if set, will override the system proxy.
+ ///
+ /// The default behavior will use the system proxy if neither `proxy` nor `no_proxy` is set.
+ #[clap(long)]
+ proxy: Option<String>,
+
+ /// Disable proxy server, if set, will not use any proxy.
+ #[clap(long, default_value = "false")]
+ no_proxy: bool,
}
impl ImportCommand {
pub async fn build(&self) -> std::result::Result<Box<dyn Tool>, BoxedError> {
let (catalog, schema) =
database::split_database(&self.database).map_err(BoxedError::new)?;
+ let proxy = parse_proxy_opts(self.proxy.clone(), self.no_proxy)?;
let database_client = DatabaseClient::new(
self.addr.clone(),
catalog.clone(),
self.auth_basic.clone(),
// Treats `None` as `0s` to disable server-side default timeout.
self.timeout.unwrap_or_default(),
+ proxy,
);
Ok(Box::new(Import {
|
feat
|
add proxy options (#5459)
|
841744e4a367f2f9fd649d65f6bc1e4c69e03193
|
2022-05-09 15:20:49
|
Lei, Huang
|
fix: remove direct unwraps (#23)
| false
|
diff --git a/src/sql/src/error.rs b/src/sql/src/error.rs
index 5bf2768d9026..9188d8432b16 100644
--- a/src/sql/src/error.rs
+++ b/src/sql/src/error.rs
@@ -2,6 +2,7 @@ use std::any::Any;
use common_error::prelude::*;
use sqlparser::parser::ParserError;
+use sqlparser::tokenizer::TokenizerError;
/// SQL parser errors.
// Now the error in parser does not contains backtrace to avoid generating backtrace
@@ -29,6 +30,9 @@ pub enum Error {
// Syntax error from sql parser.
#[snafu(display("Syntax error, sql: {}, source: {}", sql, source))]
Syntax { sql: String, source: ParserError },
+
+ #[snafu(display("Tokenizer error, sql: {}, source: {}", sql, source))]
+ Tokenizer { sql: String, source: TokenizerError },
}
impl ErrorExt for Error {
@@ -37,7 +41,7 @@ impl ErrorExt for Error {
match self {
Unsupported { .. } => StatusCode::Unsupported,
- Unexpected { .. } | Syntax { .. } => StatusCode::InvalidSyntax,
+ Unexpected { .. } | Syntax { .. } | Tokenizer { .. } => StatusCode::InvalidSyntax,
}
}
@@ -94,4 +98,18 @@ mod tests {
};
assert_eq!(StatusCode::Unsupported, err.status_code());
}
+
+ #[test]
+ pub fn test_tokenizer_error() {
+ let err = Error::Tokenizer {
+ sql: "".to_string(),
+ source: sqlparser::tokenizer::TokenizerError {
+ message: "tokenizer error".to_string(),
+ col: 1,
+ line: 1,
+ },
+ };
+
+ assert_eq!(StatusCode::InvalidSyntax, err.status_code());
+ }
}
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index cc3660756deb..419d0a44d37e 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -4,7 +4,7 @@ use sqlparser::keywords::Keyword;
use sqlparser::parser::Parser;
use sqlparser::tokenizer::{Token, Tokenizer};
-use crate::error::{self, Error};
+use crate::error::{self, Error, TokenizerSnafu};
use crate::statements::show_database::SqlShowDatabase;
use crate::statements::show_kind::ShowKind;
use crate::statements::statement::Statement;
@@ -23,7 +23,7 @@ impl<'a> ParserContext<'a> {
let mut stmts: Vec<Statement> = Vec::new();
let mut tokenizer = Tokenizer::new(dialect, sql);
- let tokens: Vec<Token> = tokenizer.tokenize().unwrap();
+ let tokens: Vec<Token> = tokenizer.tokenize().context(TokenizerSnafu { sql })?;
let mut parser_ctx = ParserContext {
sql,
@@ -135,22 +135,21 @@ impl<'a> ParserContext<'a> {
))),
Token::Word(w) => match w.keyword {
Keyword::LIKE => Ok(Statement::ShowDatabases(SqlShowDatabase::new(
- ShowKind::Like(
- self.parser
- .parse_identifier()
- .context(error::UnexpectedSnafu {
- sql: self.sql,
- expected: "LIKE",
- actual: tok.to_string(),
- })
- .unwrap(),
- ),
+ ShowKind::Like(self.parser.parse_identifier().with_context(|_| {
+ error::UnexpectedSnafu {
+ sql: self.sql,
+ expected: "LIKE",
+ actual: tok.to_string(),
+ }
+ })?),
))),
Keyword::WHERE => Ok(Statement::ShowDatabases(SqlShowDatabase::new(
- ShowKind::Where(self.parser.parse_expr().context(error::UnexpectedSnafu {
- sql: self.sql.to_string(),
- expected: "some valid expression".to_string(),
- actual: self.peek_token_as_string(),
+ ShowKind::Where(self.parser.parse_expr().with_context(|_| {
+ error::UnexpectedSnafu {
+ sql: self.sql,
+ expected: "some valid expression",
+ actual: self.peek_token_as_string(),
+ }
})?),
))),
_ => self.unsupported(self.peek_token_as_string()),
|
fix
|
remove direct unwraps (#23)
|
52d627e37db26b9830024de9b005444207299436
|
2024-09-19 10:44:47
|
shuiyisong
|
chore: add log ingest interceptor (#4734)
| false
|
diff --git a/src/frontend/src/instance/log_handler.rs b/src/frontend/src/instance/log_handler.rs
index 7edda5ccf130..441501b242c1 100644
--- a/src/frontend/src/instance/log_handler.rs
+++ b/src/frontend/src/instance/log_handler.rs
@@ -20,7 +20,10 @@ use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use client::Output;
use common_error::ext::BoxedError;
use pipeline::{GreptimeTransformer, Pipeline, PipelineInfo, PipelineVersion};
-use servers::error::{AuthSnafu, ExecuteGrpcRequestSnafu, PipelineSnafu, Result as ServerResult};
+use servers::error::{
+ AuthSnafu, Error as ServerError, ExecuteGrpcRequestSnafu, PipelineSnafu, Result as ServerResult,
+};
+use servers::interceptor::{LogIngestInterceptor, LogIngestInterceptorRef};
use servers::query_handler::LogHandler;
use session::context::QueryContextRef;
use snafu::ResultExt;
@@ -40,6 +43,12 @@ impl LogHandler for Instance {
.check_permission(ctx.current_user(), PermissionReq::LogWrite)
.context(AuthSnafu)?;
+ let log = self
+ .plugins
+ .get::<LogIngestInterceptorRef<ServerError>>()
+ .as_ref()
+ .pre_ingest(log, ctx.clone())?;
+
self.handle_log_inserts(log, ctx).await
}
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 9660b2762053..115002c3aba9 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -18,11 +18,13 @@ use std::sync::Arc;
use auth::UserProviderRef;
use common_base::Plugins;
use common_config::{Configurable, Mode};
+use servers::error::Error as ServerError;
use servers::grpc::builder::GrpcServerBuilder;
use servers::grpc::greptime_handler::GreptimeRequestHandler;
use servers::grpc::{GrpcOptions, GrpcServer, GrpcServerConfig};
use servers::http::event::LogValidatorRef;
use servers::http::{HttpServer, HttpServerBuilder};
+use servers::interceptor::LogIngestInterceptorRef;
use servers::metrics_handler::MetricsHandler;
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
use servers::postgres::PostgresServer;
@@ -81,8 +83,10 @@ where
Some(self.instance.clone()),
);
- builder = builder
- .with_log_ingest_handler(self.instance.clone(), self.plugins.get::<LogValidatorRef>());
+ let validator = self.plugins.get::<LogValidatorRef>();
+ let ingest_interceptor = self.plugins.get::<LogIngestInterceptorRef<ServerError>>();
+ builder =
+ builder.with_log_ingest_handler(self.instance.clone(), validator, ingest_interceptor);
if let Some(user_provider) = self.plugins.get::<UserProviderRef>() {
builder = builder.with_user_provider(user_provider);
diff --git a/src/pipeline/src/etl/processor/dissect.rs b/src/pipeline/src/etl/processor/dissect.rs
index dca88d38430d..f9925916fce4 100644
--- a/src/pipeline/src/etl/processor/dissect.rs
+++ b/src/pipeline/src/etl/processor/dissect.rs
@@ -15,7 +15,6 @@
use std::ops::Deref;
use ahash::{HashMap, HashMapExt, HashSet, HashSetExt};
-use common_telemetry::warn;
use itertools::Itertools;
use crate::etl::field::{Fields, InputFieldInfo, OneInputMultiOutputField};
@@ -742,11 +741,8 @@ impl DissectProcessor {
let chs = val.chars().collect::<Vec<char>>();
for pattern in &self.patterns {
- match self.process_pattern(&chs, pattern) {
- Ok(map) => return Ok(map),
- Err(e) => {
- warn!("dissect processor: {}", e);
- }
+ if let Ok(map) = self.process_pattern(&chs, pattern) {
+ return Ok(map);
}
}
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index 956a650fccae..18388998e792 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -63,6 +63,7 @@ use crate::http::prometheus::{
build_info_query, format_query, instant_query, label_values_query, labels_query, range_query,
series_query,
};
+use crate::interceptor::LogIngestInterceptorRef;
use crate::metrics::http_metrics_layer;
use crate::metrics_handler::MetricsHandler;
use crate::prometheus_handler::PrometheusHandlerRef;
@@ -596,11 +597,16 @@ impl HttpServerBuilder {
self,
handler: LogHandlerRef,
validator: Option<LogValidatorRef>,
+ ingest_interceptor: Option<LogIngestInterceptorRef<Error>>,
) -> Self {
Self {
router: self.router.nest(
&format!("/{HTTP_API_VERSION}/events"),
- HttpServer::route_log(handler, validator),
+ HttpServer::route_log(LogState {
+ log_handler: handler,
+ log_validator: validator,
+ ingest_interceptor,
+ }),
),
..self
}
@@ -739,10 +745,7 @@ impl HttpServer {
.with_state(metrics_handler)
}
- fn route_log<S>(
- log_handler: LogHandlerRef,
- log_validator: Option<LogValidatorRef>,
- ) -> Router<S> {
+ fn route_log<S>(log_state: LogState) -> Router<S> {
Router::new()
.route("/logs", routing::post(event::log_ingester))
.route(
@@ -759,10 +762,7 @@ impl HttpServer {
.layer(HandleErrorLayer::new(handle_error))
.layer(RequestDecompressionLayer::new()),
)
- .with_state(LogState {
- log_handler,
- log_validator,
- })
+ .with_state(log_state)
}
fn route_sql<S>(api_state: ApiState) -> ApiRouter<S> {
diff --git a/src/servers/src/http/event.rs b/src/servers/src/http/event.rs
index dbd7f1232a1b..f0a0902837a9 100644
--- a/src/servers/src/http/event.rs
+++ b/src/servers/src/http/event.rs
@@ -37,11 +37,13 @@ use session::context::{Channel, QueryContext, QueryContextRef};
use snafu::{ensure, OptionExt, ResultExt};
use crate::error::{
- InvalidParameterSnafu, ParseJsonSnafu, PipelineSnafu, Result, UnsupportedContentTypeSnafu,
+ Error, InvalidParameterSnafu, ParseJsonSnafu, PipelineSnafu, Result,
+ UnsupportedContentTypeSnafu,
};
use crate::http::greptime_manage_resp::GreptimedbManageResponse;
use crate::http::greptime_result_v1::GreptimedbV1Response;
use crate::http::HttpResponse;
+use crate::interceptor::{LogIngestInterceptor, LogIngestInterceptorRef};
use crate::metrics::{
METRIC_FAILURE_VALUE, METRIC_HTTP_LOGS_INGESTION_COUNTER, METRIC_HTTP_LOGS_INGESTION_ELAPSED,
METRIC_HTTP_LOGS_TRANSFORM_ELAPSED, METRIC_SUCCESS_VALUE,
@@ -378,6 +380,11 @@ pub async fn log_ingester(
query_ctx.set_channel(Channel::Http);
let query_ctx = Arc::new(query_ctx);
+ let value = log_state
+ .ingest_interceptor
+ .as_ref()
+ .pre_pipeline(value, query_ctx.clone())?;
+
ingest_logs_inner(
handler,
pipeline_name,
@@ -506,6 +513,7 @@ pub type LogValidatorRef = Arc<dyn LogValidator + 'static>;
pub struct LogState {
pub log_handler: LogHandlerRef,
pub log_validator: Option<LogValidatorRef>,
+ pub ingest_interceptor: Option<LogIngestInterceptorRef<Error>>,
}
#[cfg(test)]
diff --git a/src/servers/src/interceptor.rs b/src/servers/src/interceptor.rs
index e4aceeb4422d..d3478a56ea62 100644
--- a/src/servers/src/interceptor.rs
+++ b/src/servers/src/interceptor.rs
@@ -23,6 +23,7 @@ use common_error::ext::ErrorExt;
use common_query::Output;
use query::parser::PromQuery;
use query::plan::LogicalPlan;
+use serde_json::Value;
use session::context::QueryContextRef;
use sql::statements::statement::Statement;
@@ -397,3 +398,61 @@ impl<E: ErrorExt> PromStoreProtocolInterceptor for Option<PromStoreProtocolInter
}
}
}
+
+/// LogIngestInterceptor can track life cycle of a log ingestion request
+/// and customize or abort its execution at given point.
+pub trait LogIngestInterceptor {
+ type Error: ErrorExt;
+
+ /// Called before pipeline execution.
+ fn pre_pipeline(
+ &self,
+ values: Vec<Value>,
+ _query_ctx: QueryContextRef,
+ ) -> Result<Vec<Value>, Self::Error> {
+ Ok(values)
+ }
+
+ /// Called before insertion.
+ fn pre_ingest(
+ &self,
+ request: RowInsertRequests,
+ _query_ctx: QueryContextRef,
+ ) -> Result<RowInsertRequests, Self::Error> {
+ Ok(request)
+ }
+}
+
+pub type LogIngestInterceptorRef<E> =
+ Arc<dyn LogIngestInterceptor<Error = E> + Send + Sync + 'static>;
+
+impl<E> LogIngestInterceptor for Option<&LogIngestInterceptorRef<E>>
+where
+ E: ErrorExt,
+{
+ type Error = E;
+
+ fn pre_pipeline(
+ &self,
+ values: Vec<Value>,
+ query_ctx: QueryContextRef,
+ ) -> Result<Vec<Value>, Self::Error> {
+ if let Some(this) = self {
+ this.pre_pipeline(values, query_ctx)
+ } else {
+ Ok(values)
+ }
+ }
+
+ fn pre_ingest(
+ &self,
+ request: RowInsertRequests,
+ query_ctx: QueryContextRef,
+ ) -> Result<RowInsertRequests, Self::Error> {
+ if let Some(this) = self {
+ this.pre_ingest(request, query_ctx)
+ } else {
+ Ok(request)
+ }
+ }
+}
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index cf125a577634..a055527e2b65 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -424,7 +424,7 @@ pub async fn setup_test_http_app_with_frontend_and_user_provider(
ServerSqlQueryHandlerAdapter::arc(instance.instance.clone()),
Some(instance.instance.clone()),
)
- .with_log_ingest_handler(instance.instance.clone(), None)
+ .with_log_ingest_handler(instance.instance.clone(), None, None)
.with_otlp_handler(instance.instance.clone())
.with_greptime_config_options(instance.opts.to_toml().unwrap());
|
chore
|
add log ingest interceptor (#4734)
|
d45b04180c940545ef99c19659b4f55610f0a01d
|
2024-08-29 14:06:41
|
LFC
|
feat: pre-download the ingested sst (#4636)
| false
|
diff --git a/src/mito2/src/cache/write_cache.rs b/src/mito2/src/cache/write_cache.rs
index a95bcff15f45..a21cbb5f6b60 100644
--- a/src/mito2/src/cache/write_cache.rs
+++ b/src/mito2/src/cache/write_cache.rs
@@ -27,7 +27,10 @@ use snafu::ResultExt;
use crate::access_layer::{new_fs_cache_store, SstWriteRequest};
use crate::cache::file_cache::{FileCache, FileCacheRef, FileType, IndexKey, IndexValue};
use crate::error::{self, Result};
-use crate::metrics::{FLUSH_ELAPSED, UPLOAD_BYTES_TOTAL};
+use crate::metrics::{
+ FLUSH_ELAPSED, UPLOAD_BYTES_TOTAL, WRITE_CACHE_DOWNLOAD_BYTES_TOTAL,
+ WRITE_CACHE_DOWNLOAD_ELAPSED,
+};
use crate::sst::index::intermediate::IntermediateManager;
use crate::sst::index::puffin_manager::PuffinManagerFactory;
use crate::sst::index::IndexerBuilder;
@@ -166,6 +169,79 @@ impl WriteCache {
Ok(Some(sst_info))
}
+ /// Downloads a file in `remote_path` from the remote object store to the local cache
+ /// (specified by `index_key`).
+ pub(crate) async fn download(
+ &self,
+ index_key: IndexKey,
+ remote_path: &str,
+ remote_store: &ObjectStore,
+ ) -> Result<()> {
+ const DOWNLOAD_READER_CONCURRENCY: usize = 8;
+ const DOWNLOAD_READER_CHUNK_SIZE: ReadableSize = ReadableSize::mb(8);
+
+ let file_type = index_key.file_type;
+ let timer = WRITE_CACHE_DOWNLOAD_ELAPSED
+ .with_label_values(&[match file_type {
+ FileType::Parquet => "download_parquet",
+ FileType::Puffin => "download_puffin",
+ }])
+ .start_timer();
+
+ let remote_metadata = remote_store
+ .stat(remote_path)
+ .await
+ .context(error::OpenDalSnafu)?;
+ let reader = remote_store
+ .reader_with(remote_path)
+ .concurrent(DOWNLOAD_READER_CONCURRENCY)
+ .chunk(DOWNLOAD_READER_CHUNK_SIZE.as_bytes() as usize)
+ .await
+ .context(error::OpenDalSnafu)?
+ .into_futures_async_read(0..remote_metadata.content_length())
+ .await
+ .context(error::OpenDalSnafu)?;
+
+ let cache_path = self.file_cache.cache_file_path(index_key);
+ let mut writer = self
+ .file_cache
+ .local_store()
+ .writer(&cache_path)
+ .await
+ .context(error::OpenDalSnafu)?
+ .into_futures_async_write();
+
+ let region_id = index_key.region_id;
+ let file_id = index_key.file_id;
+ let bytes_written =
+ futures::io::copy(reader, &mut writer)
+ .await
+ .context(error::DownloadSnafu {
+ region_id,
+ file_id,
+ file_type,
+ })?;
+ writer.close().await.context(error::DownloadSnafu {
+ region_id,
+ file_id,
+ file_type,
+ })?;
+
+ WRITE_CACHE_DOWNLOAD_BYTES_TOTAL.inc_by(bytes_written);
+
+ let elapsed = timer.stop_and_record();
+ debug!(
+ "Successfully download file '{}' to local '{}', file size: {}, region: {}, cost: {:?}s",
+ remote_path, cache_path, bytes_written, region_id, elapsed,
+ );
+
+ let index_value = IndexValue {
+ file_size: bytes_written as _,
+ };
+ self.file_cache.put(index_key, index_value).await;
+ Ok(())
+ }
+
/// Uploads a Parquet file or a Puffin file to the remote object store.
async fn upload(
&self,
diff --git a/src/mito2/src/engine/edit_region_test.rs b/src/mito2/src/engine/edit_region_test.rs
index 8dd682a37269..b13691fb856a 100644
--- a/src/mito2/src/engine/edit_region_test.rs
+++ b/src/mito2/src/engine/edit_region_test.rs
@@ -12,21 +12,96 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
use object_store::ObjectStore;
use store_api::region_engine::RegionEngine;
use store_api::region_request::RegionRequest;
use store_api::storage::RegionId;
-use tokio::sync::Barrier;
+use tokio::sync::{oneshot, Barrier};
use crate::config::MitoConfig;
+use crate::engine::listener::EventListener;
use crate::engine::MitoEngine;
use crate::manifest::action::RegionEdit;
use crate::region::MitoRegionRef;
use crate::sst::file::{FileId, FileMeta};
use crate::test_util::{CreateRequestBuilder, TestEnv};
+#[tokio::test]
+async fn test_edit_region_fill_cache() {
+ let mut env = TestEnv::new();
+
+ struct EditRegionListener {
+ tx: Mutex<Option<oneshot::Sender<FileId>>>,
+ }
+
+ impl EventListener for EditRegionListener {
+ fn on_file_cache_filled(&self, file_id: FileId) {
+ let mut tx = self.tx.lock().unwrap();
+ tx.take().unwrap().send(file_id).unwrap();
+ }
+ }
+
+ let (tx, rx) = oneshot::channel();
+ let engine = env
+ .create_engine_with(
+ MitoConfig {
+ // Write cache must be enabled to download the ingested SST file.
+ enable_experimental_write_cache: true,
+ ..Default::default()
+ },
+ None,
+ Some(Arc::new(EditRegionListener {
+ tx: Mutex::new(Some(tx)),
+ })),
+ )
+ .await;
+
+ let region_id = RegionId::new(1, 1);
+ engine
+ .handle_request(
+ region_id,
+ RegionRequest::Create(CreateRequestBuilder::new().build()),
+ )
+ .await
+ .unwrap();
+ let region = engine.get_region(region_id).unwrap();
+
+ let file_id = FileId::random();
+ // Simulating the ingestion of an SST file.
+ env.get_object_store()
+ .unwrap()
+ .write(
+ &format!("{}/{}.parquet", region.region_dir(), file_id),
+ b"x".as_slice(),
+ )
+ .await
+ .unwrap();
+
+ let edit = RegionEdit {
+ files_to_add: vec![FileMeta {
+ region_id: region.region_id,
+ file_id,
+ level: 0,
+ ..Default::default()
+ }],
+ files_to_remove: vec![],
+ compaction_time_window: None,
+ flushed_entry_id: None,
+ flushed_sequence: None,
+ };
+ engine.edit_region(region.region_id, edit).await.unwrap();
+
+ // Asserts that the background downloading of the SST is succeeded.
+ let actual = tokio::time::timeout(Duration::from_secs(9), rx)
+ .await
+ .unwrap()
+ .unwrap();
+ assert_eq!(file_id, actual);
+}
+
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_edit_region_concurrently() {
const EDITS_PER_TASK: usize = 10;
diff --git a/src/mito2/src/engine/listener.rs b/src/mito2/src/engine/listener.rs
index ee6966270147..beea4add1ea8 100644
--- a/src/mito2/src/engine/listener.rs
+++ b/src/mito2/src/engine/listener.rs
@@ -22,6 +22,8 @@ use common_telemetry::info;
use store_api::storage::RegionId;
use tokio::sync::Notify;
+use crate::sst::file::FileId;
+
/// Mito engine background event listener.
#[async_trait]
pub trait EventListener: Send + Sync {
@@ -61,6 +63,9 @@ pub trait EventListener: Send + Sync {
fn on_recv_requests(&self, request_num: usize) {
let _ = request_num;
}
+
+ /// Notifies the listener that the file cache is filled when, for example, editing region.
+ fn on_file_cache_filled(&self, _file_id: FileId) {}
}
pub type EventListenerRef = Arc<dyn EventListener>;
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 1f60eee8831e..2038656a2333 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -639,6 +639,22 @@ pub enum Error {
location: Location,
},
+ #[snafu(display(
+ "Failed to download file, region_id: {}, file_id: {}, file_type: {:?}",
+ region_id,
+ file_id,
+ file_type,
+ ))]
+ Download {
+ region_id: RegionId,
+ file_id: FileId,
+ file_type: FileType,
+ #[snafu(source)]
+ error: std::io::Error,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display(
"Failed to upload file, region_id: {}, file_id: {}, file_type: {:?}",
region_id,
@@ -965,7 +981,7 @@ impl ErrorExt for Error {
FilterRecordBatch { source, .. } => source.status_code(),
- Upload { .. } => StatusCode::StorageUnavailable,
+ Download { .. } | Upload { .. } => StatusCode::StorageUnavailable,
ChecksumMismatch { .. } => StatusCode::Unexpected,
RegionStopped { .. } => StatusCode::RegionNotReady,
TimeRangePredicateOverflow { .. } => StatusCode::InvalidArguments,
diff --git a/src/mito2/src/metrics.rs b/src/mito2/src/metrics.rs
index 963330948955..355c0fba4714 100644
--- a/src/mito2/src/metrics.rs
+++ b/src/mito2/src/metrics.rs
@@ -189,6 +189,17 @@ lazy_static! {
&[TYPE_LABEL]
)
.unwrap();
+ /// Download bytes counter in the write cache.
+ pub static ref WRITE_CACHE_DOWNLOAD_BYTES_TOTAL: IntCounter = register_int_counter!(
+ "mito_write_cache_download_bytes_total",
+ "mito write cache download bytes total",
+ ).unwrap();
+ /// Timer of the downloading task in the write cache.
+ pub static ref WRITE_CACHE_DOWNLOAD_ELAPSED: HistogramVec = register_histogram_vec!(
+ "mito_write_cache_download_elapsed",
+ "mito write cache download elapsed",
+ &[TYPE_LABEL],
+ ).unwrap();
/// Upload bytes counter.
pub static ref UPLOAD_BYTES_TOTAL: IntCounter = register_int_counter!(
"mito_upload_bytes_total",
diff --git a/src/mito2/src/worker.rs b/src/mito2/src/worker.rs
index 3aff7764f082..242d48c45f8a 100644
--- a/src/mito2/src/worker.rs
+++ b/src/mito2/src/worker.rs
@@ -57,6 +57,7 @@ use crate::request::{
BackgroundNotify, DdlRequest, SenderDdlRequest, SenderWriteRequest, WorkerRequest,
};
use crate::schedule::scheduler::{LocalScheduler, SchedulerRef};
+use crate::sst::file::FileId;
use crate::sst::index::intermediate::IntermediateManager;
use crate::sst::index::puffin_manager::PuffinManagerFactory;
use crate::time_provider::{StdTimeProvider, TimeProviderRef};
@@ -950,6 +951,13 @@ impl WorkerListener {
// Avoid compiler warning.
let _ = request_num;
}
+
+ pub(crate) fn on_file_cache_filled(&self, _file_id: FileId) {
+ #[cfg(any(test, feature = "test"))]
+ if let Some(listener) = &self.listener {
+ listener.on_file_cache_filled(_file_id);
+ }
+ }
}
#[cfg(test)]
diff --git a/src/mito2/src/worker/handle_manifest.rs b/src/mito2/src/worker/handle_manifest.rs
index 1f8dfac60816..b9b20423d2a4 100644
--- a/src/mito2/src/worker/handle_manifest.rs
+++ b/src/mito2/src/worker/handle_manifest.rs
@@ -21,6 +21,8 @@ use std::collections::{HashMap, VecDeque};
use common_telemetry::{info, warn};
use store_api::storage::RegionId;
+use crate::cache::file_cache::{FileType, IndexKey};
+use crate::cache::CacheManagerRef;
use crate::error::{RegionBusySnafu, RegionNotFoundSnafu, Result};
use crate::manifest::action::{
RegionChange, RegionEdit, RegionMetaAction, RegionMetaActionList, RegionTruncate,
@@ -30,7 +32,8 @@ use crate::request::{
BackgroundNotify, OptionOutputTx, RegionChangeResult, RegionEditRequest, RegionEditResult,
TruncateResult, WorkerRequest,
};
-use crate::worker::RegionWorkerLoop;
+use crate::sst::location;
+use crate::worker::{RegionWorkerLoop, WorkerListener};
pub(crate) type RegionEditQueues = HashMap<RegionId, RegionEditQueue>;
@@ -105,10 +108,12 @@ impl<S> RegionWorkerLoop<S> {
}
let request_sender = self.sender.clone();
+ let cache_manager = self.cache_manager.clone();
+ let listener = self.listener.clone();
// Now the region is in editing state.
// Updates manifest in background.
common_runtime::spawn_global(async move {
- let result = edit_region(®ion, edit.clone()).await;
+ let result = edit_region(®ion, edit.clone(), cache_manager, listener).await;
let notify = WorkerRequest::Background {
region_id,
notify: BackgroundNotify::RegionEdit(RegionEditResult {
@@ -286,8 +291,40 @@ impl<S> RegionWorkerLoop<S> {
}
/// Checks the edit, writes and applies it.
-async fn edit_region(region: &MitoRegionRef, edit: RegionEdit) -> Result<()> {
+async fn edit_region(
+ region: &MitoRegionRef,
+ edit: RegionEdit,
+ cache_manager: CacheManagerRef,
+ listener: WorkerListener,
+) -> Result<()> {
let region_id = region.region_id;
+ if let Some(write_cache) = cache_manager.write_cache() {
+ for file_meta in &edit.files_to_add {
+ let write_cache = write_cache.clone();
+ let layer = region.access_layer.clone();
+ let listener = listener.clone();
+
+ let index_key = IndexKey::new(region_id, file_meta.file_id, FileType::Parquet);
+ let remote_path = location::sst_file_path(layer.region_dir(), file_meta.file_id);
+ common_runtime::spawn_global(async move {
+ if write_cache
+ .download(index_key, &remote_path, layer.object_store())
+ .await
+ .is_ok()
+ {
+ // Triggers the filling of the parquet metadata cache.
+ // The parquet file is already downloaded.
+ let _ = write_cache
+ .file_cache()
+ .get_parquet_meta_data(index_key)
+ .await;
+
+ listener.on_file_cache_filled(index_key.file_id);
+ }
+ });
+ }
+ }
+
info!("Applying {edit:?} to region {}", region_id);
let action_list = RegionMetaActionList::with_action(RegionMetaAction::Edit(edit));
|
feat
|
pre-download the ingested sst (#4636)
|
a63fa76b7b40b61c572d0fc1327edfd733d7428a
|
2023-10-25 11:58:32
|
tison
|
docs: Update README.md (#2653)
| false
|
diff --git a/README.md b/README.md
index eb42d7d1ce55..94a45dad3b36 100644
--- a/README.md
+++ b/README.md
@@ -184,6 +184,6 @@ Please refer to [contribution guidelines](CONTRIBUTING.md) for more information.
## Acknowledgement
- GreptimeDB uses [Apache Arrow](https://arrow.apache.org/) as the memory model and [Apache Parquet](https://parquet.apache.org/) as the persistent file format.
- GreptimeDB's query engine is powered by [Apache Arrow DataFusion](https://github.com/apache/arrow-datafusion).
-- [OpenDAL](https://github.com/datafuselabs/opendal) from [Datafuse Labs](https://github.com/datafuselabs) gives GreptimeDB a very general and elegant data access abstraction layer.
-- GreptimeDB’s meta service is based on [etcd](https://etcd.io/).
+- [Apache OpenDAL (incubating)](https://opendal.apache.org) gives GreptimeDB a very general and elegant data access abstraction layer.
+- GreptimeDB's meta service is based on [etcd](https://etcd.io/).
- GreptimeDB uses [RustPython](https://github.com/RustPython/RustPython) for experimental embedded python scripting.
|
docs
|
Update README.md (#2653)
|
25645a33036266b4ea29009b92596bd7813354b7
|
2025-03-12 19:16:56
|
Yingwen
|
feat: expose virtual_host_style config for s3 storage (#5696)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 52eaea91903c..a346975c0d88 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -231,6 +231,7 @@ overwrite_entry_start_id = false
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
+# enable_virtual_host_style = false
# Example of using Oss as the storage.
# [storage]
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index c42966e41075..63540ee4b7e4 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -318,6 +318,7 @@ retry_delay = "500ms"
# secret_access_key = "123456"
# endpoint = "https://s3.amazonaws.com"
# region = "us-west-2"
+# enable_virtual_host_style = false
# Example of using Oss as the storage.
# [storage]
diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs
index c873e6ba7c3d..a9697f22be28 100644
--- a/src/datanode/src/config.rs
+++ b/src/datanode/src/config.rs
@@ -171,6 +171,10 @@ pub struct S3Config {
pub secret_access_key: SecretString,
pub endpoint: Option<String>,
pub region: Option<String>,
+ /// Enable virtual host style so that opendal will send API requests in virtual host style instead of path style.
+ /// By default, opendal will send API to https://s3.us-east-1.amazonaws.com/bucket_name
+ /// Enabled, opendal will send API to https://bucket_name.s3.us-east-1.amazonaws.com
+ pub enable_virtual_host_style: bool,
#[serde(flatten)]
pub cache: ObjectStorageCacheConfig,
pub http_client: HttpClientConfig,
@@ -185,6 +189,7 @@ impl PartialEq for S3Config {
&& self.secret_access_key.expose_secret() == other.secret_access_key.expose_secret()
&& self.endpoint == other.endpoint
&& self.region == other.region
+ && self.enable_virtual_host_style == other.enable_virtual_host_style
&& self.cache == other.cache
&& self.http_client == other.http_client
}
@@ -289,6 +294,7 @@ impl Default for S3Config {
root: String::default(),
access_key_id: SecretString::from(String::default()),
secret_access_key: SecretString::from(String::default()),
+ enable_virtual_host_style: false,
endpoint: Option::default(),
region: Option::default(),
cache: ObjectStorageCacheConfig::default(),
diff --git a/src/datanode/src/store/s3.rs b/src/datanode/src/store/s3.rs
index 946558328e89..0e51a071aee8 100644
--- a/src/datanode/src/store/s3.rs
+++ b/src/datanode/src/store/s3.rs
@@ -41,10 +41,13 @@ pub(crate) async fn new_s3_object_store(s3_config: &S3Config) -> Result<ObjectSt
if s3_config.endpoint.is_some() {
builder = builder.endpoint(s3_config.endpoint.as_ref().unwrap());
- };
+ }
if s3_config.region.is_some() {
builder = builder.region(s3_config.region.as_ref().unwrap());
- };
+ }
+ if s3_config.enable_virtual_host_style {
+ builder = builder.enable_virtual_host_style();
+ }
Ok(ObjectStore::new(builder)
.context(error::InitBackendSnafu)?
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index 40a8547db071..c6fd59a63169 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -1098,6 +1098,7 @@ fn drop_lines_with_inconsistent_results(input: String) -> String {
"root =",
"endpoint =",
"region =",
+ "enable_virtual_host_style =",
"cache_path =",
"cache_capacity =",
"sas_token =",
|
feat
|
expose virtual_host_style config for s3 storage (#5696)
|
9d8f72d6111eab2496db3c016b4d3997deaf2186
|
2024-05-08 12:00:28
|
Yingwen
|
fix: add data type to vector cache key (#3876)
| false
|
diff --git a/src/mito2/src/cache.rs b/src/mito2/src/cache.rs
index 33e72e1b7011..892ee4fb9274 100644
--- a/src/mito2/src/cache.rs
+++ b/src/mito2/src/cache.rs
@@ -29,7 +29,7 @@ use datatypes::vectors::VectorRef;
use moka::sync::Cache;
use parquet::column::page::Page;
use parquet::file::metadata::ParquetMetaData;
-use store_api::storage::RegionId;
+use store_api::storage::{ConcreteDataType, RegionId};
use crate::cache::cache_size::parquet_meta_size;
use crate::cache::file_cache::{FileType, IndexKey};
@@ -123,16 +123,21 @@ impl CacheManager {
}
/// Gets a vector with repeated value for specific `key`.
- pub fn get_repeated_vector(&self, key: &Value) -> Option<VectorRef> {
+ pub fn get_repeated_vector(
+ &self,
+ data_type: &ConcreteDataType,
+ value: &Value,
+ ) -> Option<VectorRef> {
self.vector_cache.as_ref().and_then(|vector_cache| {
- let value = vector_cache.get(key);
+ let value = vector_cache.get(&(data_type.clone(), value.clone()));
update_hit_miss(value, VECTOR_TYPE)
})
}
/// Puts a vector with repeated value into the cache.
- pub fn put_repeated_vector(&self, key: Value, vector: VectorRef) {
+ pub fn put_repeated_vector(&self, value: Value, vector: VectorRef) {
if let Some(cache) = &self.vector_cache {
+ let key = (vector.data_type(), value);
CACHE_BYTES
.with_label_values(&[VECTOR_TYPE])
.add(vector_cache_weight(&key, &vector).into());
@@ -249,9 +254,9 @@ fn meta_cache_weight(k: &SstMetaKey, v: &Arc<ParquetMetaData>) -> u32 {
(k.estimated_size() + parquet_meta_size(v)) as u32
}
-fn vector_cache_weight(_k: &Value, v: &VectorRef) -> u32 {
+fn vector_cache_weight(_k: &(ConcreteDataType, Value), v: &VectorRef) -> u32 {
// We ignore the heap size of `Value`.
- (mem::size_of::<Value>() + v.memory_size()) as u32
+ (mem::size_of::<ConcreteDataType>() + mem::size_of::<Value>() + v.memory_size()) as u32
}
fn page_cache_weight(k: &PageKey, v: &Arc<PageValue>) -> u32 {
@@ -323,7 +328,7 @@ type SstMetaCache = Cache<SstMetaKey, Arc<ParquetMetaData>>;
/// Maps [Value] to a vector that holds this value repeatedly.
///
/// e.g. `"hello" => ["hello", "hello", "hello"]`
-type VectorCache = Cache<Value, VectorRef>;
+type VectorCache = Cache<(ConcreteDataType, Value), VectorRef>;
/// Maps (region, file, row group, column) to [PageValue].
type PageCache = Cache<PageKey, Arc<PageValue>>;
@@ -353,7 +358,9 @@ mod tests {
let value = Value::Int64(10);
let vector: VectorRef = Arc::new(Int64Vector::from_slice([10, 10, 10, 10]));
cache.put_repeated_vector(value.clone(), vector.clone());
- assert!(cache.get_repeated_vector(&value).is_none());
+ assert!(cache
+ .get_repeated_vector(&ConcreteDataType::int64_datatype(), &value)
+ .is_none());
let key = PageKey {
region_id,
@@ -394,10 +401,14 @@ mod tests {
fn test_repeated_vector_cache() {
let cache = CacheManager::builder().vector_cache_size(4096).build();
let value = Value::Int64(10);
- assert!(cache.get_repeated_vector(&value).is_none());
+ assert!(cache
+ .get_repeated_vector(&ConcreteDataType::int64_datatype(), &value)
+ .is_none());
let vector: VectorRef = Arc::new(Int64Vector::from_slice([10, 10, 10, 10]));
cache.put_repeated_vector(value.clone(), vector.clone());
- let cached = cache.get_repeated_vector(&value).unwrap();
+ let cached = cache
+ .get_repeated_vector(&ConcreteDataType::int64_datatype(), &value)
+ .unwrap();
assert_eq!(vector, cached);
}
diff --git a/src/mito2/src/engine/basic_test.rs b/src/mito2/src/engine/basic_test.rs
index 64c13778e3d4..a0f6b6df441b 100644
--- a/src/mito2/src/engine/basic_test.rs
+++ b/src/mito2/src/engine/basic_test.rs
@@ -17,13 +17,15 @@
use std::collections::HashMap;
use api::v1::value::ValueData;
-use api::v1::Rows;
+use api::v1::{Rows, SemanticType};
use common_base::readable_size::ReadableSize;
use common_error::ext::ErrorExt;
use common_error::status_code::StatusCode;
use common_recordbatch::RecordBatches;
use datatypes::prelude::ConcreteDataType;
-use store_api::region_request::{RegionOpenRequest, RegionPutRequest};
+use datatypes::schema::ColumnSchema;
+use store_api::metadata::ColumnMetadata;
+use store_api::region_request::{RegionCreateRequest, RegionOpenRequest, RegionPutRequest};
use store_api::storage::RegionId;
use super::*;
@@ -598,3 +600,102 @@ async fn test_engine_with_write_cache() {
+-------+---------+---------------------+";
assert_eq!(expected, batches.pretty_print().unwrap());
}
+
+#[tokio::test]
+async fn test_cache_null_primary_key() {
+ let mut env = TestEnv::new();
+ let engine = env
+ .create_engine(MitoConfig {
+ vector_cache_size: ReadableSize::mb(32),
+ ..Default::default()
+ })
+ .await;
+
+ let region_id = RegionId::new(1, 1);
+ let column_metadatas = vec![
+ ColumnMetadata {
+ column_schema: ColumnSchema::new("tag_0", ConcreteDataType::string_datatype(), true),
+ semantic_type: SemanticType::Tag,
+ column_id: 1,
+ },
+ ColumnMetadata {
+ column_schema: ColumnSchema::new("tag_1", ConcreteDataType::int64_datatype(), true),
+ semantic_type: SemanticType::Tag,
+ column_id: 2,
+ },
+ ColumnMetadata {
+ column_schema: ColumnSchema::new("field_0", ConcreteDataType::float64_datatype(), true),
+ semantic_type: SemanticType::Field,
+ column_id: 3,
+ },
+ ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Timestamp,
+ column_id: 4,
+ },
+ ];
+ let request = RegionCreateRequest {
+ engine: MITO_ENGINE_NAME.to_string(),
+ column_metadatas,
+ primary_key: vec![1, 2],
+ options: HashMap::new(),
+ region_dir: "test".to_string(),
+ };
+
+ let column_schemas = rows_schema(&request);
+ engine
+ .handle_request(region_id, RegionRequest::Create(request))
+ .await
+ .unwrap();
+
+ let rows = Rows {
+ schema: column_schemas,
+ rows: vec![
+ api::v1::Row {
+ values: vec![
+ api::v1::Value {
+ value_data: Some(ValueData::StringValue("1".to_string())),
+ },
+ api::v1::Value { value_data: None },
+ api::v1::Value {
+ value_data: Some(ValueData::F64Value(10.0)),
+ },
+ api::v1::Value {
+ value_data: Some(ValueData::TimestampMillisecondValue(1000)),
+ },
+ ],
+ },
+ api::v1::Row {
+ values: vec![
+ api::v1::Value { value_data: None },
+ api::v1::Value {
+ value_data: Some(ValueData::I64Value(200)),
+ },
+ api::v1::Value {
+ value_data: Some(ValueData::F64Value(20.0)),
+ },
+ api::v1::Value {
+ value_data: Some(ValueData::TimestampMillisecondValue(2000)),
+ },
+ ],
+ },
+ ],
+ };
+ put_rows(&engine, region_id, rows).await;
+
+ let request = ScanRequest::default();
+ let stream = engine.handle_query(region_id, request).await.unwrap();
+ let batches = RecordBatches::try_collect(stream).await.unwrap();
+ let expected = "\
++-------+-------+---------+---------------------+
+| tag_0 | tag_1 | field_0 | ts |
++-------+-------+---------+---------------------+
+| | 200 | 20.0 | 1970-01-01T00:00:02 |
+| 1 | | 10.0 | 1970-01-01T00:00:01 |
++-------+-------+---------+---------------------+";
+ assert_eq!(expected, batches.pretty_print().unwrap());
+}
diff --git a/src/mito2/src/read/projection.rs b/src/mito2/src/read/projection.rs
index a6fab7267103..375248d4186c 100644
--- a/src/mito2/src/read/projection.rs
+++ b/src/mito2/src/read/projection.rs
@@ -241,7 +241,7 @@ fn repeated_vector_with_cache(
num_rows: usize,
cache_manager: &CacheManager,
) -> common_recordbatch::error::Result<VectorRef> {
- if let Some(vector) = cache_manager.get_repeated_vector(value) {
+ if let Some(vector) = cache_manager.get_repeated_vector(data_type, value) {
// Tries to get the vector from cache manager. If the vector doesn't
// have enough length, creates a new one.
match vector.len().cmp(&num_rows) {
@@ -366,9 +366,15 @@ mod tests {
+---------------------+----+----+----+----+";
assert_eq!(expect, print_record_batch(record_batch));
- assert!(cache.get_repeated_vector(&Value::Int64(1)).is_some());
- assert!(cache.get_repeated_vector(&Value::Int64(2)).is_some());
- assert!(cache.get_repeated_vector(&Value::Int64(3)).is_none());
+ assert!(cache
+ .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(1))
+ .is_some());
+ assert!(cache
+ .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(2))
+ .is_some());
+ assert!(cache
+ .get_repeated_vector(&ConcreteDataType::int64_datatype(), &Value::Int64(3))
+ .is_none());
let record_batch = mapper.convert(&batch, Some(&cache)).unwrap();
assert_eq!(expect, print_record_batch(record_batch));
}
diff --git a/tests/cases/standalone/common/insert/nullable_tag.result b/tests/cases/standalone/common/insert/nullable_tag.result
new file mode 100644
index 000000000000..8605b8f577c5
--- /dev/null
+++ b/tests/cases/standalone/common/insert/nullable_tag.result
@@ -0,0 +1,54 @@
+CREATE TABLE `esT`(
+ `eT` TIMESTAMP(3) TIME INDEX,
+ `eAque` BOOLEAN,
+ `DoLOruM` INT,
+ `repudiAndae` STRING,
+ `ULLaM` BOOLEAN,
+ `COnSECTeTuR` SMALLINT DEFAULT -31852,
+ `DOLOrIBUS` FLOAT NOT NULL,
+ `QUiS` SMALLINT NULL,
+ `consEquatuR` BOOLEAN NOT NULL,
+ `vERO` BOOLEAN,
+ PRIMARY KEY(`repudiAndae`, `ULLaM`, `DoLOruM`)
+);
+
+Affected Rows: 0
+
+INSERT INTO `esT` (
+ `consEquatuR`,
+ `eAque`,
+ `eT`,
+ `repudiAndae`,
+ `DOLOrIBUS`
+)
+VALUES
+(
+ false,
+ false,
+ '+234049-06-04 01:11:41.163+0000',
+ 'hello',
+ 0.97377783
+),
+(
+ false,
+ true,
+ '-19578-12-20 11:45:59.875+0000',
+ NULL,
+ 0.3535998
+);
+
+Affected Rows: 2
+
+SELECT * FROM `esT` order by `eT` desc;
+
++----------------------------+-------+---------+-------------+-------+-------------+------------+------+-------------+------+
+| eT | eAque | DoLOruM | repudiAndae | ULLaM | COnSECTeTuR | DOLOrIBUS | QUiS | consEquatuR | vERO |
++----------------------------+-------+---------+-------------+-------+-------------+------------+------+-------------+------+
+| +234049-06-04T01:11:41.163 | false | | hello | | -31852 | 0.97377783 | | false | |
+| -19578-12-20T11:45:59.875 | true | | | | -31852 | 0.3535998 | | false | |
++----------------------------+-------+---------+-------------+-------+-------------+------------+------+-------------+------+
+
+DROP TABLE `esT`;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/insert/nullable_tag.sql b/tests/cases/standalone/common/insert/nullable_tag.sql
new file mode 100644
index 000000000000..e73973a430e0
--- /dev/null
+++ b/tests/cases/standalone/common/insert/nullable_tag.sql
@@ -0,0 +1,40 @@
+CREATE TABLE `esT`(
+ `eT` TIMESTAMP(3) TIME INDEX,
+ `eAque` BOOLEAN,
+ `DoLOruM` INT,
+ `repudiAndae` STRING,
+ `ULLaM` BOOLEAN,
+ `COnSECTeTuR` SMALLINT DEFAULT -31852,
+ `DOLOrIBUS` FLOAT NOT NULL,
+ `QUiS` SMALLINT NULL,
+ `consEquatuR` BOOLEAN NOT NULL,
+ `vERO` BOOLEAN,
+ PRIMARY KEY(`repudiAndae`, `ULLaM`, `DoLOruM`)
+);
+
+INSERT INTO `esT` (
+ `consEquatuR`,
+ `eAque`,
+ `eT`,
+ `repudiAndae`,
+ `DOLOrIBUS`
+)
+VALUES
+(
+ false,
+ false,
+ '+234049-06-04 01:11:41.163+0000',
+ 'hello',
+ 0.97377783
+),
+(
+ false,
+ true,
+ '-19578-12-20 11:45:59.875+0000',
+ NULL,
+ 0.3535998
+);
+
+SELECT * FROM `esT` order by `eT` desc;
+
+DROP TABLE `esT`;
|
fix
|
add data type to vector cache key (#3876)
|
b03cb3860e0d5fa81301c54035b55e5785c1aed3
|
2024-06-06 12:15:19
|
Jeremyhi
|
chore: reduce some burden on the write path (#4110)
| false
|
diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs
index 3d3ab8c77dbf..f12e29aed00a 100644
--- a/src/metric-engine/src/engine.rs
+++ b/src/metric-engine/src/engine.rs
@@ -145,19 +145,7 @@ impl RegionEngine for MetricEngine {
.alter_region(region_id, alter, &mut extension_return_value)
.await
}
- RegionRequest::Flush(_) => {
- if self.inner.is_physical_region(region_id) {
- self.inner
- .mito
- .handle_request(region_id, request)
- .await
- .context(error::MitoFlushOperationSnafu)
- .map(|response| response.affected_rows)
- } else {
- UnsupportedRegionRequestSnafu { request }.fail()
- }
- }
- RegionRequest::Compact(_) => {
+ RegionRequest::Flush(_) | RegionRequest::Compact(_) => {
if self.inner.is_physical_region(region_id) {
self.inner
.mito
diff --git a/src/metric-engine/src/engine/put.rs b/src/metric-engine/src/engine/put.rs
index 72c2aeb18034..9534a69f7d4f 100644
--- a/src/metric-engine/src/engine/put.rs
+++ b/src/metric-engine/src/engine/put.rs
@@ -17,7 +17,7 @@ use std::hash::Hash;
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, ColumnSchema, Row, Rows, SemanticType};
use common_telemetry::{error, info};
-use snafu::OptionExt;
+use snafu::{ensure, OptionExt};
use store_api::metric_engine_consts::{
DATA_SCHEMA_TABLE_ID_COLUMN_NAME, DATA_SCHEMA_TSID_COLUMN_NAME,
};
@@ -26,7 +26,8 @@ use store_api::storage::{RegionId, TableId};
use crate::engine::MetricEngineInner;
use crate::error::{
- ColumnNotFoundSnafu, ForbiddenPhysicalAlterSnafu, LogicalRegionNotFoundSnafu, Result,
+ ColumnNotFoundSnafu, ForbiddenPhysicalAlterSnafu, LogicalRegionNotFoundSnafu,
+ PhysicalRegionNotFoundSnafu, Result,
};
use crate::metrics::{FORBIDDEN_OPERATION_COUNT, MITO_OPERATION_ELAPSED};
use crate::utils::to_data_region_id;
@@ -101,7 +102,7 @@ impl MetricEngineInner {
physical_region_id: RegionId,
request: &RegionPutRequest,
) -> Result<()> {
- // check if the region exists
+ // Check if the region exists
let data_region_id = to_data_region_id(physical_region_id);
let state = self.state.read().unwrap();
if !state.is_logical_region_exist(logical_region_id) {
@@ -112,15 +113,22 @@ impl MetricEngineInner {
.fail();
}
- // check if the columns exist
+ // Check if a physical column exists
+ let physical_columns =
+ state
+ .physical_columns()
+ .get(&data_region_id)
+ .context(PhysicalRegionNotFoundSnafu {
+ region_id: data_region_id,
+ })?;
for col in &request.rows.schema {
- if !state.is_physical_column_exist(data_region_id, &col.column_name)? {
- return ColumnNotFoundSnafu {
+ ensure!(
+ physical_columns.contains(&col.column_name),
+ ColumnNotFoundSnafu {
name: col.column_name.clone(),
region_id: logical_region_id,
}
- .fail();
- }
+ );
}
Ok(())
diff --git a/src/metric-engine/src/engine/state.rs b/src/metric-engine/src/engine/state.rs
index cc36fe27df64..5214ae1fbfcb 100644
--- a/src/metric-engine/src/engine/state.rs
+++ b/src/metric-engine/src/engine/state.rs
@@ -132,23 +132,6 @@ impl MetricEngineState {
Ok(())
}
- /// Check if a physical column exists.
- pub fn is_physical_column_exist(
- &self,
- physical_region_id: RegionId,
- column_name: &str,
- ) -> Result<bool> {
- let data_region_id = to_data_region_id(physical_region_id);
- let exist = self
- .physical_columns()
- .get(&data_region_id)
- .context(PhysicalRegionNotFoundSnafu {
- region_id: data_region_id,
- })?
- .contains(column_name);
- Ok(exist)
- }
-
pub fn is_logical_region_exist(&self, logical_region_id: RegionId) -> bool {
self.logical_regions().contains_key(&logical_region_id)
}
diff --git a/src/operator/src/error.rs b/src/operator/src/error.rs
index 1eccbe081c58..3d89a6ed43ad 100644
--- a/src/operator/src/error.rs
+++ b/src/operator/src/error.rs
@@ -642,20 +642,6 @@ pub enum Error {
location: Location,
},
- #[snafu(display("Do not support {} in multiple catalogs", ddl_name))]
- DdlWithMultiCatalogs {
- ddl_name: String,
- #[snafu(implicit)]
- location: Location,
- },
-
- #[snafu(display("Do not support {} in multiple schemas", ddl_name))]
- DdlWithMultiSchemas {
- ddl_name: String,
- #[snafu(implicit)]
- location: Location,
- },
-
#[snafu(display("Empty {} expr", name))]
EmptyDdlExpr {
name: String,
@@ -835,9 +821,7 @@ impl ErrorExt for Error {
Error::ColumnDefaultValue { source, .. } => source.status_code(),
- Error::DdlWithMultiCatalogs { .. }
- | Error::DdlWithMultiSchemas { .. }
- | Error::EmptyDdlExpr { .. }
+ Error::EmptyDdlExpr { .. }
| Error::InvalidPartitionRule { .. }
| Error::ParseSqlValue { .. }
| Error::InvalidTimestampRange { .. } => StatusCode::InvalidArguments,
diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs
index 5cd6846c0e10..7a04ebdee5d9 100644
--- a/src/operator/src/insert.rs
+++ b/src/operator/src/insert.rs
@@ -394,11 +394,10 @@ impl Inserter {
Some(table) => {
let table_info = table.table_info();
table_name_to_ids.insert(table_info.name.clone(), table_info.table_id());
-
- // TODO(jeremy): alter in batch? (from `handle_metric_row_inserts`)
validate_request_with_table(req, &table)?;
- let alter_expr = self.get_alter_table_expr_on_demand(req, table, ctx)?;
- if let Some(alter_expr) = alter_expr {
+ if let Some(alter_expr) =
+ self.get_alter_table_expr_on_demand(req, table, ctx)?
+ {
alter_tables.push(alter_expr);
}
}
@@ -592,15 +591,12 @@ impl Inserter {
physical_table: &str,
statement_executor: &StatementExecutor,
) -> Result<Vec<TableRef>> {
+ let catalog_name = ctx.current_catalog();
+ let schema_name = ctx.current_schema();
let create_table_exprs = create_tables
.iter()
.map(|req| {
- let table_ref = TableReference::full(
- ctx.current_catalog(),
- ctx.current_schema(),
- &req.table_name,
- );
-
+ let table_ref = TableReference::full(catalog_name, schema_name, &req.table_name);
let request_schema = req.rows.as_ref().unwrap().schema.as_slice();
let mut create_table_expr = build_create_table_expr(&table_ref, request_schema)?;
@@ -615,7 +611,7 @@ impl Inserter {
.collect::<Result<Vec<_>>>()?;
let res = statement_executor
- .create_logical_tables(&create_table_exprs, ctx.clone())
+ .create_logical_tables(catalog_name, schema_name, &create_table_exprs, ctx.clone())
.await;
match res {
diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs
index 251c5529bf0f..5e70569c8ad6 100644
--- a/src/operator/src/statement/ddl.rs
+++ b/src/operator/src/statement/ddl.rs
@@ -67,13 +67,12 @@ use table::TableRef;
use super::StatementExecutor;
use crate::error::{
self, AlterExprToRequestSnafu, CatalogSnafu, ColumnDataTypeSnafu, ColumnNotFoundSnafu,
- CreateLogicalTablesSnafu, CreateTableInfoSnafu, DdlWithMultiCatalogsSnafu,
- DdlWithMultiSchemasSnafu, DeserializePartitionSnafu, EmptyDdlExprSnafu, ExtractTableNamesSnafu,
- FlowNotFoundSnafu, InvalidPartitionColumnsSnafu, InvalidPartitionRuleSnafu,
- InvalidTableNameSnafu, InvalidViewNameSnafu, InvalidViewStmtSnafu, ParseSqlValueSnafu, Result,
- SchemaInUseSnafu, SchemaNotFoundSnafu, SubstraitCodecSnafu, TableAlreadyExistsSnafu,
- TableMetadataManagerSnafu, TableNotFoundSnafu, UnrecognizedTableOptionSnafu,
- ViewAlreadyExistsSnafu,
+ CreateLogicalTablesSnafu, CreateTableInfoSnafu, DeserializePartitionSnafu, EmptyDdlExprSnafu,
+ ExtractTableNamesSnafu, FlowNotFoundSnafu, InvalidPartitionColumnsSnafu,
+ InvalidPartitionRuleSnafu, InvalidTableNameSnafu, InvalidViewNameSnafu, InvalidViewStmtSnafu,
+ ParseSqlValueSnafu, Result, SchemaInUseSnafu, SchemaNotFoundSnafu, SubstraitCodecSnafu,
+ TableAlreadyExistsSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu,
+ UnrecognizedTableOptionSnafu, ViewAlreadyExistsSnafu,
};
use crate::expr_factory;
use crate::statement::show::create_partitions_stmt;
@@ -157,8 +156,15 @@ impl StatementExecutor {
.table_options
.contains_key(LOGICAL_TABLE_METADATA_KEY)
{
+ let catalog_name = &create_table.catalog_name;
+ let schema_name = &create_table.schema_name;
return self
- .create_logical_tables(&[create_table.clone()], query_ctx)
+ .create_logical_tables(
+ catalog_name,
+ schema_name,
+ &[create_table.clone()],
+ query_ctx,
+ )
.await?
.into_iter()
.next()
@@ -260,6 +266,8 @@ impl StatementExecutor {
#[tracing::instrument(skip_all)]
pub async fn create_logical_tables(
&self,
+ catalog_name: &str,
+ schema_name: &str,
create_table_exprs: &[CreateTableExpr],
query_context: QueryContextRef,
) -> Result<Vec<TableRef>> {
@@ -267,35 +275,16 @@ impl StatementExecutor {
ensure!(
!create_table_exprs.is_empty(),
EmptyDdlExprSnafu {
- name: "create table"
- }
- );
- ensure!(
- create_table_exprs
- .windows(2)
- .all(|expr| expr[0].catalog_name == expr[1].catalog_name),
- DdlWithMultiCatalogsSnafu {
- ddl_name: "create tables"
- }
- );
- let catalog_name = create_table_exprs[0].catalog_name.to_string();
-
- ensure!(
- create_table_exprs
- .windows(2)
- .all(|expr| expr[0].schema_name == expr[1].schema_name),
- DdlWithMultiSchemasSnafu {
- ddl_name: "create tables"
+ name: "create logic tables"
}
);
- let schema_name = create_table_exprs[0].schema_name.to_string();
// Check table names
for create_table in create_table_exprs {
ensure!(
NAME_PATTERN_REG.is_match(&create_table.table_name),
InvalidTableNameSnafu {
- table_name: create_table.table_name.clone(),
+ table_name: &create_table.table_name,
}
);
}
@@ -303,11 +292,11 @@ impl StatementExecutor {
let schema = self
.table_metadata_manager
.schema_manager()
- .get(SchemaNameKey::new(&catalog_name, &schema_name))
+ .get(SchemaNameKey::new(catalog_name, schema_name))
.await
.context(TableMetadataManagerSnafu)?
.context(SchemaNotFoundSnafu {
- schema_info: &schema_name,
+ schema_info: schema_name,
})?;
let mut raw_tables_info = create_table_exprs
@@ -626,7 +615,7 @@ impl StatementExecutor {
ensure!(
!alter_table_exprs.is_empty(),
EmptyDdlExprSnafu {
- name: "alter table"
+ name: "alter logical tables"
}
);
|
chore
|
reduce some burden on the write path (#4110)
|
58c7858cd48dc2dd988abcf1acf1e3ca1900c2da
|
2024-03-26 19:59:53
|
JeremyHi
|
feat: update physical table schema on alter logical tables (#3585)
| false
|
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 124330ee477e..39c7982eae79 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -16,7 +16,7 @@ use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
-use catalog::kvbackend::CachedMetaKvBackendBuilder;
+use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager};
use clap::Parser;
use client::client_manager::DatanodeClients;
use common_meta::heartbeat::handler::parse_mailbox_message::ParseMailboxMessageHandler;
@@ -248,11 +248,12 @@ impl StartCommand {
.build();
let cached_meta_backend = Arc::new(cached_meta_backend);
+ let catalog_manager =
+ KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend.clone());
+
let executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
- Arc::new(InvalidateTableCacheHandler::new(
- cached_meta_backend.clone(),
- )),
+ Arc::new(InvalidateTableCacheHandler::new(catalog_manager.clone())),
]);
let heartbeat_task = HeartbeatTask::new(
@@ -263,10 +264,11 @@ impl StartCommand {
let mut instance = FrontendBuilder::new(
cached_meta_backend.clone(),
+ catalog_manager.clone(),
Arc::new(DatanodeClients::default()),
meta_client,
)
- .with_cache_invalidator(cached_meta_backend)
+ .with_cache_invalidator(catalog_manager.clone())
.with_plugin(plugins.clone())
.with_heartbeat_task(heartbeat_task)
.try_build()
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index c8b0385cfe44..4082b8aa9e14 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -16,10 +16,11 @@ use std::sync::Arc;
use std::{fs, path};
use async_trait::async_trait;
+use catalog::kvbackend::KvBackendCatalogManager;
use clap::Parser;
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_config::{metadata_store_dir, KvBackendConfig};
-use common_meta::cache_invalidator::DummyCacheInvalidator;
+use common_meta::cache_invalidator::{CacheInvalidatorRef, DummyCacheInvalidator};
use common_meta::datanode_manager::DatanodeManagerRef;
use common_meta::ddl::table_meta::{TableMetadataAllocator, TableMetadataAllocatorRef};
use common_meta::ddl::ProcedureExecutorRef;
@@ -399,6 +400,9 @@ impl StartCommand {
.await
.context(StartFrontendSnafu)?;
+ let catalog_manager =
+ KvBackendCatalogManager::new(kv_backend.clone(), Arc::new(DummyCacheInvalidator));
+
let builder =
DatanodeBuilder::new(dn_opts, fe_plugins.clone()).with_kv_backend(kv_backend.clone());
let datanode = builder.build().await.context(StartDatanodeSnafu)?;
@@ -429,15 +433,22 @@ impl StartCommand {
table_metadata_manager,
procedure_manager.clone(),
datanode_manager.clone(),
+ catalog_manager.clone(),
table_meta_allocator,
)
.await?;
- let mut frontend = FrontendBuilder::new(kv_backend, datanode_manager, ddl_task_executor)
- .with_plugin(fe_plugins.clone())
- .try_build()
- .await
- .context(StartFrontendSnafu)?;
+ let mut frontend = FrontendBuilder::new(
+ kv_backend,
+ catalog_manager.clone(),
+ datanode_manager,
+ ddl_task_executor,
+ )
+ .with_plugin(fe_plugins.clone())
+ .with_cache_invalidator(catalog_manager)
+ .try_build()
+ .await
+ .context(StartFrontendSnafu)?;
let servers = Services::new(fe_opts.clone(), Arc::new(frontend.clone()), fe_plugins)
.build()
@@ -459,13 +470,14 @@ impl StartCommand {
table_metadata_manager: TableMetadataManagerRef,
procedure_manager: ProcedureManagerRef,
datanode_manager: DatanodeManagerRef,
+ cache_invalidator: CacheInvalidatorRef,
table_meta_allocator: TableMetadataAllocatorRef,
) -> Result<ProcedureExecutorRef> {
let procedure_executor: ProcedureExecutorRef = Arc::new(
DdlManager::try_new(
procedure_manager,
datanode_manager,
- Arc::new(DummyCacheInvalidator),
+ cache_invalidator,
table_metadata_manager,
table_meta_allocator,
Arc::new(MemoryRegionKeeper::default()),
diff --git a/src/common/meta/src/ddl.rs b/src/common/meta/src/ddl.rs
index efef95916a18..4f2fd6f6df1d 100644
--- a/src/common/meta/src/ddl.rs
+++ b/src/common/meta/src/ddl.rs
@@ -35,6 +35,7 @@ pub mod create_table;
mod create_table_template;
pub mod drop_database;
pub mod drop_table;
+mod physical_table_metadata;
pub mod table_meta;
#[cfg(any(test, feature = "testing"))]
pub mod test_util;
diff --git a/src/common/meta/src/ddl/alter_logical_tables.rs b/src/common/meta/src/ddl/alter_logical_tables.rs
index a109919d2095..b2b8b6858bd1 100644
--- a/src/common/meta/src/ddl/alter_logical_tables.rs
+++ b/src/common/meta/src/ddl/alter_logical_tables.rs
@@ -15,25 +15,29 @@
mod check;
mod metadata;
mod region_request;
+mod table_cache_keys;
mod update_metadata;
use async_trait::async_trait;
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
use common_procedure::{Context, LockKey, Procedure, Status};
+use common_telemetry::{info, warn};
use futures_util::future;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
-use snafu::ResultExt;
+use snafu::{ensure, ResultExt};
+use store_api::metadata::ColumnMetadata;
+use store_api::metric_engine_consts::ALTER_PHYSICAL_EXTENSION_KEY;
use strum::AsRefStr;
use table::metadata::TableId;
use crate::ddl::utils::add_peer_context_if_needed;
-use crate::ddl::DdlContext;
-use crate::error::{Error, Result};
-use crate::instruction::CacheIdent;
+use crate::ddl::{physical_table_metadata, DdlContext};
+use crate::error::{DecodeJsonSnafu, Error, MetadataCorruptionSnafu, Result};
use crate::key::table_info::TableInfoValue;
use crate::key::table_route::PhysicalTableRouteValue;
-use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
+use crate::key::DeserializedValueWithBytes;
+use crate::lock_key::{CatalogLock, SchemaLock, TableLock};
use crate::rpc::ddl::AlterTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders};
use crate::{cache_invalidator, metrics, ClusterId};
@@ -60,8 +64,9 @@ impl AlterLogicalTablesProcedure {
tasks,
table_info_values: vec![],
physical_table_id,
+ physical_table_info: None,
physical_table_route: None,
- cache_invalidate_keys: vec![],
+ physical_columns: vec![],
},
}
}
@@ -79,11 +84,24 @@ impl AlterLogicalTablesProcedure {
// Checks the physical table, must after [fill_table_info_values]
self.check_physical_table().await?;
// Fills the physical table info
- self.fill_physical_table_route().await?;
- // Filter the tasks
+ self.fill_physical_table_info().await?;
+ // Filter the finished tasks
let finished_tasks = self.check_finished_tasks()?;
- if finished_tasks.iter().all(|x| *x) {
- return Ok(Status::done());
+ let already_finished_count = finished_tasks
+ .iter()
+ .map(|x| if *x { 1 } else { 0 })
+ .sum::<usize>();
+ let apply_tasks_count = self.data.tasks.len();
+ if already_finished_count == apply_tasks_count {
+ info!("All the alter tasks are finished, will skip the procedure.");
+ // Re-invalidate the table cache
+ self.data.state = AlterTablesState::InvalidateTableCache;
+ return Ok(Status::executing(true));
+ } else if already_finished_count > 0 {
+ info!(
+ "There are {} alter tasks, {} of them were already finished.",
+ apply_tasks_count, already_finished_count
+ );
}
self.filter_task(&finished_tasks)?;
@@ -116,17 +134,61 @@ impl AlterLogicalTablesProcedure {
}
}
- future::join_all(alter_region_tasks)
+ // Collects responses from all the alter region tasks.
+ let phy_raw_schemas = future::join_all(alter_region_tasks)
.await
.into_iter()
+ .map(|res| res.map(|mut res| res.extension.remove(ALTER_PHYSICAL_EXTENSION_KEY)))
.collect::<Result<Vec<_>>>()?;
- self.data.state = AlterTablesState::UpdateMetadata;
+ if phy_raw_schemas.is_empty() {
+ self.data.state = AlterTablesState::UpdateMetadata;
+ return Ok(Status::executing(true));
+ }
+
+ // Verify all the physical schemas are the same
+ // Safety: previous check ensures this vec is not empty
+ let first = phy_raw_schemas.first().unwrap();
+ ensure!(
+ phy_raw_schemas.iter().all(|x| x == first),
+ MetadataCorruptionSnafu {
+ err_msg: "The physical schemas from datanodes are not the same."
+ }
+ );
+ // Decodes the physical raw schemas
+ if let Some(phy_raw_schema) = first {
+ self.data.physical_columns =
+ ColumnMetadata::decode_list(phy_raw_schema).context(DecodeJsonSnafu)?;
+ } else {
+ warn!("altering logical table result doesn't contains extension key `{ALTER_PHYSICAL_EXTENSION_KEY}`,leaving the physical table's schema unchanged");
+ }
+
+ self.data.state = AlterTablesState::UpdateMetadata;
Ok(Status::executing(true))
}
pub(crate) async fn on_update_metadata(&mut self) -> Result<Status> {
+ if !self.data.physical_columns.is_empty() {
+ let physical_table_info = self.data.physical_table_info.as_ref().unwrap();
+
+ // Generates new table info
+ let old_raw_table_info = physical_table_info.table_info.clone();
+ let new_raw_table_info = physical_table_metadata::build_new_physical_table_info(
+ old_raw_table_info,
+ &self.data.physical_columns,
+ );
+
+ // Updates physical table's metadata
+ self.context
+ .table_metadata_manager
+ .update_table_info(
+ DeserializedValueWithBytes::from_inner(physical_table_info.clone()),
+ new_raw_table_info,
+ )
+ .await?;
+ }
+
let table_info_values = self.build_update_metadata()?;
let manager = &self.context.table_metadata_manager;
let chunk_size = manager.batch_update_table_info_value_chunk_size();
@@ -151,15 +213,12 @@ impl AlterLogicalTablesProcedure {
}
pub(crate) async fn on_invalidate_table_cache(&mut self) -> Result<Status> {
- let to_invalidate = self
- .data
- .cache_invalidate_keys
- .drain(..)
- .map(CacheIdent::TableId)
- .collect::<Vec<_>>();
+ let ctx = cache_invalidator::Context::default();
+ let to_invalidate = self.build_table_cache_keys_to_invalidate();
+
self.context
.cache_invalidator
- .invalidate(&cache_invalidator::Context::default(), to_invalidate)
+ .invalidate(&ctx, to_invalidate)
.await?;
Ok(Status::done())
}
@@ -212,17 +271,13 @@ impl Procedure for AlterLogicalTablesProcedure {
lock_key.push(CatalogLock::Read(table_ref.catalog).into());
lock_key.push(SchemaLock::read(table_ref.catalog, table_ref.schema).into());
lock_key.push(TableLock::Write(self.data.physical_table_id).into());
+ lock_key.extend(
+ self.data
+ .table_info_values
+ .iter()
+ .map(|table| TableLock::Write(table.table_info.ident.table_id).into()),
+ );
- for task in &self.data.tasks {
- lock_key.push(
- TableNameLock::new(
- &task.alter_table.catalog_name,
- &task.alter_table.schema_name,
- &task.alter_table.table_name,
- )
- .into(),
- );
- }
LockKey::new(lock_key)
}
}
@@ -237,8 +292,9 @@ pub struct AlterTablesData {
table_info_values: Vec<TableInfoValue>,
/// Physical table info
physical_table_id: TableId,
+ physical_table_info: Option<TableInfoValue>,
physical_table_route: Option<PhysicalTableRouteValue>,
- cache_invalidate_keys: Vec<TableId>,
+ physical_columns: Vec<ColumnMetadata>,
}
#[derive(Debug, Serialize, Deserialize, AsRefStr)]
diff --git a/src/common/meta/src/ddl/alter_logical_tables/metadata.rs b/src/common/meta/src/ddl/alter_logical_tables/metadata.rs
index 74747abb858e..60dde7a6341e 100644
--- a/src/common/meta/src/ddl/alter_logical_tables/metadata.rs
+++ b/src/common/meta/src/ddl/alter_logical_tables/metadata.rs
@@ -17,9 +17,13 @@ use snafu::OptionExt;
use table::metadata::TableId;
use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
-use crate::error::{Result, TableInfoNotFoundSnafu, TableNotFoundSnafu};
+use crate::error::{
+ AlterLogicalTablesInvalidArgumentsSnafu, Result, TableInfoNotFoundSnafu, TableNotFoundSnafu,
+ TableRouteNotFoundSnafu,
+};
use crate::key::table_info::TableInfoValue;
use crate::key::table_name::TableNameKey;
+use crate::key::table_route::TableRouteValue;
use crate::rpc::ddl::AlterTableTask;
impl AlterLogicalTablesProcedure {
@@ -46,21 +50,38 @@ impl AlterLogicalTablesProcedure {
}
})
.collect();
- self.data.cache_invalidate_keys = self
- .data
- .table_info_values
- .iter()
- .map(|table| table.table_info.ident.table_id)
- .collect();
Ok(())
}
- pub(crate) async fn fill_physical_table_route(&mut self) -> Result<()> {
- let table_route_manager = self.context.table_metadata_manager.table_route_manager();
- let (_, physical_table_route) = table_route_manager
- .get_physical_table_route(self.data.physical_table_id)
+ pub(crate) async fn fill_physical_table_info(&mut self) -> Result<()> {
+ let (physical_table_info, physical_table_route) = self
+ .context
+ .table_metadata_manager
+ .get_full_table_info(self.data.physical_table_id)
.await?;
+
+ let physical_table_info = physical_table_info
+ .with_context(|| TableInfoNotFoundSnafu {
+ table: format!("table id - {}", self.data.physical_table_id),
+ })?
+ .into_inner();
+ let physical_table_route = physical_table_route
+ .context(TableRouteNotFoundSnafu {
+ table_id: self.data.physical_table_id,
+ })?
+ .into_inner();
+
+ self.data.physical_table_info = Some(physical_table_info);
+ let TableRouteValue::Physical(physical_table_route) = physical_table_route else {
+ return AlterLogicalTablesInvalidArgumentsSnafu {
+ err_msg: format!(
+ "expected a physical table but got a logical table: {:?}",
+ self.data.physical_table_id
+ ),
+ }
+ .fail();
+ };
self.data.physical_table_route = Some(physical_table_route);
Ok(())
@@ -87,7 +108,7 @@ impl AlterLogicalTablesProcedure {
table_info_map
.remove(table_id)
.with_context(|| TableInfoNotFoundSnafu {
- table_name: extract_table_name(task),
+ table: extract_table_name(task),
})?;
table_info_values.push(table_info_value);
}
diff --git a/src/common/meta/src/ddl/alter_logical_tables/table_cache_keys.rs b/src/common/meta/src/ddl/alter_logical_tables/table_cache_keys.rs
new file mode 100644
index 000000000000..23cf22e2c02c
--- /dev/null
+++ b/src/common/meta/src/ddl/alter_logical_tables/table_cache_keys.rs
@@ -0,0 +1,51 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use table::metadata::RawTableInfo;
+
+use crate::ddl::alter_logical_tables::AlterLogicalTablesProcedure;
+use crate::instruction::CacheIdent;
+use crate::table_name::TableName;
+
+impl AlterLogicalTablesProcedure {
+ pub(crate) fn build_table_cache_keys_to_invalidate(&self) -> Vec<CacheIdent> {
+ let mut cache_keys = self
+ .data
+ .table_info_values
+ .iter()
+ .flat_map(|table| {
+ vec![
+ CacheIdent::TableId(table.table_info.ident.table_id),
+ CacheIdent::TableName(extract_table_name(&table.table_info)),
+ ]
+ })
+ .collect::<Vec<_>>();
+ cache_keys.push(CacheIdent::TableId(self.data.physical_table_id));
+ // Safety: physical_table_info already filled in previous steps
+ let physical_table_info = &self.data.physical_table_info.as_ref().unwrap().table_info;
+ cache_keys.push(CacheIdent::TableName(extract_table_name(
+ physical_table_info,
+ )));
+
+ cache_keys
+ }
+}
+
+fn extract_table_name(table_info: &RawTableInfo) -> TableName {
+ TableName::new(
+ &table_info.catalog_name,
+ &table_info.schema_name,
+ &table_info.name,
+ )
+}
diff --git a/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs b/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs
index e9ba0e72226f..b31e0a879935 100644
--- a/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs
+++ b/src/common/meta/src/ddl/alter_logical_tables/update_metadata.rs
@@ -23,10 +23,14 @@ use crate::key::table_info::TableInfoValue;
use crate::rpc::ddl::AlterTableTask;
impl AlterLogicalTablesProcedure {
- pub(crate) fn build_update_metadata(&mut self) -> Result<Vec<(TableInfoValue, RawTableInfo)>> {
+ pub(crate) fn build_update_metadata(&self) -> Result<Vec<(TableInfoValue, RawTableInfo)>> {
let mut table_info_values_to_update = Vec::with_capacity(self.data.tasks.len());
- let table_info_values = std::mem::take(&mut self.data.table_info_values);
- for (task, table) in self.data.tasks.iter().zip(table_info_values.into_iter()) {
+ for (task, table) in self
+ .data
+ .tasks
+ .iter()
+ .zip(self.data.table_info_values.iter())
+ {
table_info_values_to_update.push(self.build_new_table_info(task, table)?);
}
@@ -36,7 +40,7 @@ impl AlterLogicalTablesProcedure {
fn build_new_table_info(
&self,
task: &AlterTableTask,
- table: TableInfoValue,
+ table: &TableInfoValue,
) -> Result<(TableInfoValue, RawTableInfo)> {
// Builds new_meta
let table_info = TableInfo::try_from(table.table_info.clone())
@@ -61,6 +65,6 @@ impl AlterLogicalTablesProcedure {
let mut raw_table_info = RawTableInfo::from(new_table);
raw_table_info.sort_columns();
- Ok((table, raw_table_info))
+ Ok((table.clone(), raw_table_info))
}
}
diff --git a/src/common/meta/src/ddl/create_logical_tables.rs b/src/common/meta/src/ddl/create_logical_tables.rs
index 80cba554c3e7..df64b8e286e3 100644
--- a/src/common/meta/src/ddl/create_logical_tables.rs
+++ b/src/common/meta/src/ddl/create_logical_tables.rs
@@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::{HashMap, HashSet};
+use std::collections::HashMap;
use std::ops::Deref;
use api::v1::region::region_request::Body as PbRegionRequest;
use api::v1::region::{CreateRequests, RegionRequest, RegionRequestHeader};
-use api::v1::{CreateTableExpr, SemanticType};
+use api::v1::CreateTableExpr;
use async_trait::async_trait;
use common_procedure::error::{FromJsonSnafu, Result as ProcedureResult, ToJsonSnafu};
use common_procedure::{Context as ProcedureContext, LockKey, Procedure, Status};
@@ -36,7 +36,7 @@ use table::metadata::{RawTableInfo, TableId};
use crate::cache_invalidator::Context;
use crate::ddl::create_table_template::{build_template, CreateRequestBuilder};
use crate::ddl::utils::{add_peer_context_if_needed, handle_retry_error, region_storage_path};
-use crate::ddl::DdlContext;
+use crate::ddl::{physical_table_metadata, DdlContext};
use crate::error::{
DecodeJsonSnafu, MetadataCorruptionSnafu, Result, TableAlreadyExistsSnafu,
TableInfoNotFoundSnafu,
@@ -50,6 +50,7 @@ use crate::lock_key::{CatalogLock, SchemaLock, TableLock, TableNameLock};
use crate::peer::Peer;
use crate::rpc::ddl::CreateTableTask;
use crate::rpc::router::{find_leader_regions, find_leaders, RegionRoute};
+use crate::table_name::TableName;
use crate::{metrics, ClusterId};
pub struct CreateLogicalTablesProcedure {
@@ -226,11 +227,11 @@ impl CreateLogicalTablesProcedure {
let physical_table_info = self
.context
.table_metadata_manager
- .get_full_table_info(self.data.physical_table_id)
+ .table_info_manager()
+ .get(self.data.physical_table_id)
.await?
- .0
- .context(TableInfoNotFoundSnafu {
- table_name: format!("table id - {}", self.data.physical_table_id),
+ .with_context(|| TableInfoNotFoundSnafu {
+ table: format!("table id - {}", self.data.physical_table_id),
})?;
// generate new table info
@@ -238,6 +239,12 @@ impl CreateLogicalTablesProcedure {
.data
.build_new_physical_table_info(&physical_table_info);
+ let physical_table_name = TableName::new(
+ &new_table_info.catalog_name,
+ &new_table_info.schema_name,
+ &new_table_info.name,
+ );
+
// update physical table's metadata
self.context
.table_metadata_manager
@@ -249,7 +256,10 @@ impl CreateLogicalTablesProcedure {
.cache_invalidator
.invalidate(
&Context::default(),
- vec![CacheIdent::TableId(self.data.physical_table_id)],
+ vec![
+ CacheIdent::TableId(self.data.physical_table_id),
+ CacheIdent::TableName(physical_table_name),
+ ],
)
.await?;
} else {
@@ -358,8 +368,7 @@ impl CreateLogicalTablesProcedure {
self.data.state = CreateTablesState::CreateMetadata;
- // Ensures the procedures after the crash start from the `DatanodeCreateRegions` stage.
- Ok(Status::executing(false))
+ Ok(Status::executing(true))
}
}
@@ -479,38 +488,12 @@ impl CreateTablesData {
&mut self,
old_table_info: &DeserializedValueWithBytes<TableInfoValue>,
) -> RawTableInfo {
- let mut raw_table_info = old_table_info.deref().table_info.clone();
-
- let existing_primary_key = raw_table_info
- .meta
- .schema
- .column_schemas
- .iter()
- .map(|col| col.name.clone())
- .collect::<HashSet<_>>();
- let primary_key_indices = &mut raw_table_info.meta.primary_key_indices;
- let value_indices = &mut raw_table_info.meta.value_indices;
- value_indices.clear();
- let time_index = &mut raw_table_info.meta.schema.timestamp_index;
- let columns = &mut raw_table_info.meta.schema.column_schemas;
- columns.clear();
-
- for (idx, col) in self.physical_columns.drain(..).enumerate() {
- match col.semantic_type {
- SemanticType::Tag => {
- // push new primary key to the end.
- if !existing_primary_key.contains(&col.column_schema.name) {
- primary_key_indices.push(idx);
- }
- }
- SemanticType::Field => value_indices.push(idx),
- SemanticType::Timestamp => *time_index = Some(idx),
- }
-
- columns.push(col.column_schema);
- }
+ let raw_table_info = old_table_info.deref().table_info.clone();
- raw_table_info
+ physical_table_metadata::build_new_physical_table_info(
+ raw_table_info,
+ &self.physical_columns,
+ )
}
}
diff --git a/src/common/meta/src/ddl/physical_table_metadata.rs b/src/common/meta/src/ddl/physical_table_metadata.rs
new file mode 100644
index 000000000000..df66995bd883
--- /dev/null
+++ b/src/common/meta/src/ddl/physical_table_metadata.rs
@@ -0,0 +1,56 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashSet;
+
+use api::v1::SemanticType;
+use store_api::metadata::ColumnMetadata;
+use table::metadata::RawTableInfo;
+
+/// Generate the new physical table info.
+pub(crate) fn build_new_physical_table_info(
+ mut raw_table_info: RawTableInfo,
+ physical_columns: &[ColumnMetadata],
+) -> RawTableInfo {
+ let existing_columns = raw_table_info
+ .meta
+ .schema
+ .column_schemas
+ .iter()
+ .map(|col| col.name.clone())
+ .collect::<HashSet<_>>();
+ let primary_key_indices = &mut raw_table_info.meta.primary_key_indices;
+ let value_indices = &mut raw_table_info.meta.value_indices;
+ value_indices.clear();
+ let time_index = &mut raw_table_info.meta.schema.timestamp_index;
+ let columns = &mut raw_table_info.meta.schema.column_schemas;
+ columns.clear();
+
+ for (idx, col) in physical_columns.iter().enumerate() {
+ match col.semantic_type {
+ SemanticType::Tag => {
+ // push new primary key to the end.
+ if !existing_columns.contains(&col.column_schema.name) {
+ primary_key_indices.push(idx);
+ }
+ }
+ SemanticType::Field => value_indices.push(idx),
+ SemanticType::Timestamp => *time_index = Some(idx),
+ }
+
+ columns.push(col.column_schema.clone());
+ }
+
+ raw_table_info
+}
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index 2ee5ebc49e09..059094d819a9 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -369,7 +369,7 @@ async fn handle_truncate_table_task(
table_metadata_manager.get_full_table_info(table_id).await?;
let table_info_value = table_info_value.with_context(|| error::TableInfoNotFoundSnafu {
- table_name: table_ref.to_string(),
+ table: table_ref.to_string(),
})?;
let table_route_value =
@@ -421,7 +421,7 @@ async fn handle_alter_table_task(
.get(table_id)
.await?
.with_context(|| error::TableInfoNotFoundSnafu {
- table_name: table_ref.to_string(),
+ table: table_ref.to_string(),
})?;
let physical_table_id = ddl_manager
@@ -439,7 +439,7 @@ async fn handle_alter_table_task(
.get(physical_table_id)
.await?
.with_context(|| error::TableInfoNotFoundSnafu {
- table_name: table_ref.to_string(),
+ table: table_ref.to_string(),
})?
.table_info;
Some((
@@ -488,7 +488,7 @@ async fn handle_drop_table_task(
.await?;
let table_info_value = table_info_value.with_context(|| error::TableInfoNotFoundSnafu {
- table_name: table_ref.to_string(),
+ table: table_ref.to_string(),
})?;
let table_route_value =
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index bc85bfe262cb..ff067aa609a9 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -89,11 +89,8 @@ pub enum Error {
#[snafu(display("Unexpected sequence value: {}", err_msg))]
UnexpectedSequenceValue { err_msg: String, location: Location },
- #[snafu(display("Table info not found: {}", table_name))]
- TableInfoNotFound {
- table_name: String,
- location: Location,
- },
+ #[snafu(display("Table info not found: {}", table))]
+ TableInfoNotFound { table: String, location: Location },
#[snafu(display("Failed to register procedure loader, type name: {}", type_name))]
RegisterProcedureLoader {
diff --git a/src/frontend/src/instance/builder.rs b/src/frontend/src/instance/builder.rs
index bb99c2c5ee4a..1464a11c27d1 100644
--- a/src/frontend/src/instance/builder.rs
+++ b/src/frontend/src/instance/builder.rs
@@ -14,7 +14,7 @@
use std::sync::Arc;
-use catalog::kvbackend::KvBackendCatalogManager;
+use catalog::CatalogManagerRef;
use common_base::Plugins;
use common_meta::cache_invalidator::{CacheInvalidatorRef, DummyCacheInvalidator};
use common_meta::datanode_manager::DatanodeManagerRef;
@@ -41,6 +41,7 @@ use crate::script::ScriptExecutor;
pub struct FrontendBuilder {
kv_backend: KvBackendRef,
cache_invalidator: Option<CacheInvalidatorRef>,
+ catalog_manager: CatalogManagerRef,
datanode_manager: DatanodeManagerRef,
plugins: Option<Plugins>,
procedure_executor: ProcedureExecutorRef,
@@ -50,12 +51,14 @@ pub struct FrontendBuilder {
impl FrontendBuilder {
pub fn new(
kv_backend: KvBackendRef,
+ catalog_manager: CatalogManagerRef,
datanode_manager: DatanodeManagerRef,
procedure_executor: ProcedureExecutorRef,
) -> Self {
Self {
kv_backend,
cache_invalidator: None,
+ catalog_manager,
datanode_manager,
plugins: None,
procedure_executor,
@@ -89,29 +92,27 @@ impl FrontendBuilder {
let datanode_manager = self.datanode_manager;
let plugins = self.plugins.unwrap_or_default();
- let catalog_manager = KvBackendCatalogManager::new(
- kv_backend.clone(),
- self.cache_invalidator
- .unwrap_or_else(|| Arc::new(DummyCacheInvalidator)),
- );
-
let partition_manager = Arc::new(PartitionRuleManager::new(kv_backend.clone()));
+ let cache_invalidator = self
+ .cache_invalidator
+ .unwrap_or_else(|| Arc::new(DummyCacheInvalidator));
+
let region_query_handler =
FrontendRegionQueryHandler::arc(partition_manager.clone(), datanode_manager.clone());
let inserter = Arc::new(Inserter::new(
- catalog_manager.clone(),
+ self.catalog_manager.clone(),
partition_manager.clone(),
datanode_manager.clone(),
));
let deleter = Arc::new(Deleter::new(
- catalog_manager.clone(),
+ self.catalog_manager.clone(),
partition_manager.clone(),
datanode_manager.clone(),
));
let requester = Arc::new(Requester::new(
- catalog_manager.clone(),
+ self.catalog_manager.clone(),
partition_manager,
datanode_manager.clone(),
));
@@ -126,7 +127,7 @@ impl FrontendBuilder {
));
let query_engine = QueryEngineFactory::new_with_plugins(
- catalog_manager.clone(),
+ self.catalog_manager.clone(),
Some(region_query_handler.clone()),
Some(table_mutation_handler),
Some(procedure_service_handler),
@@ -135,22 +136,23 @@ impl FrontendBuilder {
)
.query_engine();
- let script_executor =
- Arc::new(ScriptExecutor::new(catalog_manager.clone(), query_engine.clone()).await?);
+ let script_executor = Arc::new(
+ ScriptExecutor::new(self.catalog_manager.clone(), query_engine.clone()).await?,
+ );
let statement_executor = Arc::new(StatementExecutor::new(
- catalog_manager.clone(),
+ self.catalog_manager.clone(),
query_engine.clone(),
self.procedure_executor,
kv_backend.clone(),
- catalog_manager.clone(),
+ cache_invalidator,
inserter.clone(),
));
plugins.insert::<StatementExecutorRef>(statement_executor.clone());
Ok(Instance {
- catalog_manager,
+ catalog_manager: self.catalog_manager,
script_executor,
statement_executor,
query_engine,
diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs
index 3caf1b63dd5c..d40ca633ff5d 100644
--- a/src/meta-srv/src/procedure/tests.rs
+++ b/src/meta-srv/src/procedure/tests.rs
@@ -303,7 +303,7 @@ async fn test_on_datanode_create_logical_regions() {
});
let status = procedure.on_datanode_create_regions().await.unwrap();
- assert!(matches!(status, Status::Executing { persist: false }));
+ assert!(matches!(status, Status::Executing { persist: true }));
assert!(matches!(
procedure.data.state(),
&CreateTablesState::CreateMetadata
diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs
index e16aa82073a3..49f1ef689528 100644
--- a/src/operator/src/statement/ddl.rs
+++ b/src/operator/src/statement/ddl.rs
@@ -487,26 +487,26 @@ impl StatementExecutor {
#[tracing::instrument(skip_all)]
pub async fn alter_table_inner(&self, expr: AlterExpr) -> Result<Output> {
let catalog_name = if expr.catalog_name.is_empty() {
- DEFAULT_CATALOG_NAME
+ DEFAULT_CATALOG_NAME.to_string()
} else {
- expr.catalog_name.as_str()
+ expr.catalog_name.clone()
};
let schema_name = if expr.schema_name.is_empty() {
- DEFAULT_SCHEMA_NAME
+ DEFAULT_SCHEMA_NAME.to_string()
} else {
- expr.schema_name.as_str()
+ expr.schema_name.clone()
};
- let table_name = expr.table_name.as_str();
+ let table_name = expr.table_name.clone();
let table = self
.catalog_manager
- .table(catalog_name, schema_name, table_name)
+ .table(&catalog_name, &schema_name, &table_name)
.await
.context(CatalogSnafu)?
.with_context(|| TableNotFoundSnafu {
- table_name: format_full_table_name(catalog_name, schema_name, table_name),
+ table_name: format_full_table_name(&catalog_name, &schema_name, &table_name),
})?;
let table_id = table.table_info().ident.table_id;
@@ -518,8 +518,54 @@ impl StatementExecutor {
expr
);
- let req = SubmitDdlTaskRequest {
- task: DdlTask::new_alter_table(expr.clone()),
+ let physical_table_id = self
+ .table_metadata_manager
+ .table_route_manager()
+ .get_physical_table_id(table_id)
+ .await
+ .context(TableMetadataManagerSnafu)?;
+
+ let (req, invalidate_keys) = if physical_table_id == table_id {
+ // This is physical table
+ let req = SubmitDdlTaskRequest {
+ task: DdlTask::new_alter_table(expr),
+ };
+
+ let invalidate_keys = vec![
+ CacheIdent::TableId(table_id),
+ CacheIdent::TableName(TableName::new(catalog_name, schema_name, table_name)),
+ ];
+
+ (req, invalidate_keys)
+ } else {
+ // This is logical table
+ let req = SubmitDdlTaskRequest {
+ task: DdlTask::new_alter_logical_tables(vec![expr]),
+ };
+
+ let mut invalidate_keys = vec![
+ CacheIdent::TableId(physical_table_id),
+ CacheIdent::TableId(table_id),
+ CacheIdent::TableName(TableName::new(catalog_name, schema_name, table_name)),
+ ];
+
+ let physical_table = self
+ .table_metadata_manager
+ .table_info_manager()
+ .get(physical_table_id)
+ .await
+ .context(TableMetadataManagerSnafu)?
+ .map(|x| x.into_inner());
+ if let Some(physical_table) = physical_table {
+ let physical_table_name = TableName::new(
+ physical_table.table_info.catalog_name,
+ physical_table.table_info.schema_name,
+ physical_table.table_info.name,
+ );
+ invalidate_keys.push(CacheIdent::TableName(physical_table_name));
+ }
+
+ (req, invalidate_keys)
};
self.procedure_executor
@@ -529,13 +575,7 @@ impl StatementExecutor {
// Invalidates local cache ASAP.
self.cache_invalidator
- .invalidate(
- &Context::default(),
- vec![
- CacheIdent::TableId(table_id),
- CacheIdent::TableName(TableName::new(catalog_name, schema_name, table_name)),
- ],
- )
+ .invalidate(&Context::default(), invalidate_keys)
.await
.context(error::InvalidateTableCacheSnafu)?;
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index 197d3dead15a..d427b2a8f27b 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -20,7 +20,7 @@ use std::time::Duration;
use api::v1::meta::Role;
use api::v1::region::region_server::RegionServer;
use arrow_flight::flight_service_server::FlightServiceServer;
-use catalog::kvbackend::{CachedMetaKvBackendBuilder, MetaKvBackend};
+use catalog::kvbackend::{CachedMetaKvBackendBuilder, KvBackendCatalogManager, MetaKvBackend};
use client::client_manager::DatanodeClients;
use client::Client;
use common_base::Plugins;
@@ -353,11 +353,12 @@ impl GreptimeDbClusterBuilder {
let cached_meta_backend =
Arc::new(CachedMetaKvBackendBuilder::new(meta_client.clone()).build());
+ let catalog_manager =
+ KvBackendCatalogManager::new(cached_meta_backend.clone(), cached_meta_backend.clone());
+
let handlers_executor = HandlerGroupExecutor::new(vec![
Arc::new(ParseMailboxMessageHandler),
- Arc::new(InvalidateTableCacheHandler::new(
- cached_meta_backend.clone(),
- )),
+ Arc::new(InvalidateTableCacheHandler::new(catalog_manager.clone())),
]);
let heartbeat_task = HeartbeatTask::new(
@@ -366,13 +367,17 @@ impl GreptimeDbClusterBuilder {
Arc::new(handlers_executor),
);
- let instance =
- FrontendBuilder::new(cached_meta_backend.clone(), datanode_clients, meta_client)
- .with_cache_invalidator(cached_meta_backend)
- .with_heartbeat_task(heartbeat_task)
- .try_build()
- .await
- .unwrap();
+ let instance = FrontendBuilder::new(
+ cached_meta_backend.clone(),
+ catalog_manager.clone(),
+ datanode_clients,
+ meta_client,
+ )
+ .with_cache_invalidator(catalog_manager)
+ .with_heartbeat_task(heartbeat_task)
+ .try_build()
+ .await
+ .unwrap();
Arc::new(instance)
}
diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs
index b0749c3a23f3..0c9a58284cca 100644
--- a/tests-integration/src/standalone.rs
+++ b/tests-integration/src/standalone.rs
@@ -14,6 +14,7 @@
use std::sync::Arc;
+use catalog::kvbackend::KvBackendCatalogManager;
use cmd::options::MixOptions;
use common_base::Plugins;
use common_catalog::consts::MIN_USER_TABLE_ID;
@@ -124,6 +125,9 @@ impl GreptimeDbStandaloneBuilder {
let table_metadata_manager = Arc::new(TableMetadataManager::new(kv_backend.clone()));
table_metadata_manager.init().await.unwrap();
+ let catalog_manager =
+ KvBackendCatalogManager::new(kv_backend.clone(), Arc::new(DummyCacheInvalidator));
+
let datanode_manager = Arc::new(StandaloneDatanodeManager(datanode.region_server()));
let table_id_sequence = Arc::new(
@@ -154,12 +158,17 @@ impl GreptimeDbStandaloneBuilder {
.unwrap(),
);
- let instance =
- FrontendBuilder::new(kv_backend.clone(), datanode_manager, ddl_task_executor)
- .with_plugin(plugins)
- .try_build()
- .await
- .unwrap();
+ let instance = FrontendBuilder::new(
+ kv_backend.clone(),
+ catalog_manager.clone(),
+ datanode_manager,
+ ddl_task_executor,
+ )
+ .with_plugin(plugins)
+ .with_cache_invalidator(catalog_manager)
+ .try_build()
+ .await
+ .unwrap();
procedure_manager.start().await.unwrap();
wal_options_allocator.start().await.unwrap();
diff --git a/tests/cases/standalone/common/alter/alter_metric_table.result b/tests/cases/standalone/common/alter/alter_metric_table.result
new file mode 100644
index 000000000000..8ae541b71e07
--- /dev/null
+++ b/tests/cases/standalone/common/alter/alter_metric_table.result
@@ -0,0 +1,110 @@
+CREATE TABLE phy (ts timestamp time index, val double) engine=metric with ("physical_metric_table" = "");
+
+Affected Rows: 0
+
+SHOW TABLES;
+
++---------+
+| Tables |
++---------+
+| numbers |
+| phy |
++---------+
+
+CREATE TABLE t1 (ts timestamp time index, val double, host string primary key) engine = metric with ("on_physical_table" = "phy");
+
+Affected Rows: 0
+
+CREATE TABLE t2 (ts timestamp time index, job string primary key, val double) engine = metric with ("on_physical_table" = "phy");
+
+Affected Rows: 0
+
+DESC TABLE t1;
+
++--------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------+----------------------+-----+------+---------+---------------+
+| host | String | PRI | YES | | TAG |
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+| val | Float64 | | YES | | FIELD |
++--------+----------------------+-----+------+---------+---------------+
+
+DESC TABLE t2;
+
++--------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------+----------------------+-----+------+---------+---------------+
+| job | String | PRI | YES | | TAG |
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+| val | Float64 | | YES | | FIELD |
++--------+----------------------+-----+------+---------+---------------+
+
+DESC TABLE phy;
+
++------------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++------------+----------------------+-----+------+---------+---------------+
+| ts | TimestampMillisecond | | NO | | FIELD |
+| val | Float64 | | YES | | FIELD |
+| __table_id | UInt32 | PRI | NO | | TAG |
+| __tsid | UInt64 | PRI | NO | | TAG |
+| host | String | PRI | YES | | TAG |
+| job | String | PRI | YES | | TAG |
++------------+----------------------+-----+------+---------+---------------+
+
+ALTER TABLE t1 ADD COLUMN k STRING PRIMARY KEY;
+
+Affected Rows: 0
+
+ALTER TABLE t2 ADD COLUMN k STRING PRIMARY KEY;
+
+Affected Rows: 0
+
+DESC TABLE t1;
+
++--------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------+----------------------+-----+------+---------+---------------+
+| host | String | PRI | YES | | TAG |
+| k | String | PRI | YES | | TAG |
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+| val | Float64 | | YES | | FIELD |
++--------+----------------------+-----+------+---------+---------------+
+
+DESC TABLE t2;
+
++--------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------+----------------------+-----+------+---------+---------------+
+| job | String | PRI | YES | | TAG |
+| k | String | PRI | YES | | TAG |
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+| val | Float64 | | YES | | FIELD |
++--------+----------------------+-----+------+---------+---------------+
+
+DESC TABLE phy;
+
++------------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++------------+----------------------+-----+------+---------+---------------+
+| ts | TimestampMillisecond | | NO | | FIELD |
+| val | Float64 | | YES | | FIELD |
+| __table_id | UInt32 | PRI | NO | | TAG |
+| __tsid | UInt64 | PRI | NO | | TAG |
+| host | String | PRI | YES | | TAG |
+| job | String | PRI | YES | | TAG |
+| k | String | PRI | YES | | TAG |
++------------+----------------------+-----+------+---------+---------------+
+
+DROP TABLE t1;
+
+Affected Rows: 0
+
+DROP TABLE t2;
+
+Affected Rows: 0
+
+DROP TABLE phy;
+
+Affected Rows: 0
+
diff --git a/tests/cases/standalone/common/alter/alter_metric_table.sql b/tests/cases/standalone/common/alter/alter_metric_table.sql
new file mode 100644
index 000000000000..579dd90c4896
--- /dev/null
+++ b/tests/cases/standalone/common/alter/alter_metric_table.sql
@@ -0,0 +1,29 @@
+CREATE TABLE phy (ts timestamp time index, val double) engine=metric with ("physical_metric_table" = "");
+
+SHOW TABLES;
+
+CREATE TABLE t1 (ts timestamp time index, val double, host string primary key) engine = metric with ("on_physical_table" = "phy");
+
+CREATE TABLE t2 (ts timestamp time index, job string primary key, val double) engine = metric with ("on_physical_table" = "phy");
+
+DESC TABLE t1;
+
+DESC TABLE t2;
+
+DESC TABLE phy;
+
+ALTER TABLE t1 ADD COLUMN k STRING PRIMARY KEY;
+
+ALTER TABLE t2 ADD COLUMN k STRING PRIMARY KEY;
+
+DESC TABLE t1;
+
+DESC TABLE t2;
+
+DESC TABLE phy;
+
+DROP TABLE t1;
+
+DROP TABLE t2;
+
+DROP TABLE phy;
diff --git a/tests/cases/standalone/common/create/create_metric_table.result b/tests/cases/standalone/common/create/create_metric_table.result
index b6578c5ca965..0a153ec733fc 100644
--- a/tests/cases/standalone/common/create/create_metric_table.result
+++ b/tests/cases/standalone/common/create/create_metric_table.result
@@ -11,6 +11,15 @@ SHOW TABLES;
| phy |
+---------+
+DESC TABLE phy;
+
++--------+----------------------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------+----------------------+-----+------+---------+---------------+
+| ts | TimestampMillisecond | PRI | NO | | TIMESTAMP |
+| val | Float64 | | YES | | FIELD |
++--------+----------------------+-----+------+---------+---------------+
+
CREATE TABLE t1 (ts timestamp time index, val double, host string primary key) engine = metric with ("on_physical_table" = "phy");
Affected Rows: 0
diff --git a/tests/cases/standalone/common/create/create_metric_table.sql b/tests/cases/standalone/common/create/create_metric_table.sql
index 28b3083d9037..fcc41ff11541 100644
--- a/tests/cases/standalone/common/create/create_metric_table.sql
+++ b/tests/cases/standalone/common/create/create_metric_table.sql
@@ -2,6 +2,8 @@ CREATE TABLE phy (ts timestamp time index, val double) engine=metric with ("phys
SHOW TABLES;
+DESC TABLE phy;
+
CREATE TABLE t1 (ts timestamp time index, val double, host string primary key) engine = metric with ("on_physical_table" = "phy");
CREATE TABLE t2 (ts timestamp time index, job string primary key, val double) engine = metric with ("on_physical_table" = "phy");
|
feat
|
update physical table schema on alter logical tables (#3585)
|
e54415e7234eeb997324175051552d48ea25a8ae
|
2023-07-03 09:38:47
|
Cao Zhengjia
|
feat: Make heartbeat intervals configurable in Frontend and Datanode (#1864)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 70b75d361de9..ebde9c81e3e7 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -10,6 +10,8 @@ rpc_addr = "127.0.0.1:3001"
rpc_hostname = "127.0.0.1"
# The number of gRPC server worker threads, 8 by default.
rpc_runtime_size = 8
+# Interval for sending heartbeat messages to the Metasrv in milliseconds, 5000 by default.
+heartbeat_interval_millis = 5000
# Metasrv client options.
[meta_client_options]
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index e8020a2b69cc..4ac7e97125c9 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -1,5 +1,9 @@
# Node running mode, see `standalone.example.toml`.
mode = "distributed"
+# Interval for sending heartbeat task to the Metasrv in milliseconds, 5000 by default.
+heartbeat_interval_millis = 5000
+# Interval for retry sending heartbeat task in milliseconds, 5000 by default.
+retry_interval_millis = 5000
# HTTP server options, see `standalone.example.toml`.
[http_options]
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index d6ad7560710b..fb791f21197d 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -132,6 +132,7 @@ impl StandaloneOptions {
prom_options: self.prom_options,
meta_client_options: None,
logging: self.logging,
+ ..Default::default()
}
}
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index ab8dbde174f9..960d4eb68964 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -27,6 +27,8 @@ use crate::service_config::{
#[serde(default)]
pub struct FrontendOptions {
pub mode: Mode,
+ pub heartbeat_interval_millis: u64,
+ pub retry_interval_millis: u64,
pub http_options: Option<HttpOptions>,
pub grpc_options: Option<GrpcOptions>,
pub mysql_options: Option<MysqlOptions>,
@@ -43,6 +45,8 @@ impl Default for FrontendOptions {
fn default() -> Self {
Self {
mode: Mode::Standalone,
+ heartbeat_interval_millis: 5000,
+ retry_interval_millis: 5000,
http_options: Some(HttpOptions::default()),
grpc_options: Some(GrpcOptions::default()),
mysql_options: Some(MysqlOptions::default()),
diff --git a/src/frontend/src/heartbeat.rs b/src/frontend/src/heartbeat.rs
index 3860f93bcc56..a486f9bfbe24 100644
--- a/src/frontend/src/heartbeat.rs
+++ b/src/frontend/src/heartbeat.rs
@@ -43,14 +43,14 @@ pub struct HeartbeatTask {
impl HeartbeatTask {
pub fn new(
meta_client: Arc<MetaClient>,
- report_interval: u64,
- retry_interval: u64,
+ heartbeat_interval_millis: u64,
+ retry_interval_millis: u64,
resp_handler_executor: HeartbeatResponseHandlerExecutorRef,
) -> Self {
HeartbeatTask {
meta_client,
- report_interval,
- retry_interval,
+ report_interval: heartbeat_interval_millis,
+ retry_interval: retry_interval_millis,
resp_handler_executor,
}
}
@@ -92,7 +92,7 @@ impl HeartbeatTask {
Err(e) => {
error!(e; "Occur error while reading heartbeat response");
capture_self
- .start_with_retry(Duration::from_secs(retry_interval))
+ .start_with_retry(Duration::from_millis(retry_interval))
.await;
break;
@@ -136,7 +136,7 @@ impl HeartbeatTask {
}
}
_ = &mut sleep => {
- sleep.as_mut().reset(Instant::now() + Duration::from_secs(report_interval));
+ sleep.as_mut().reset(Instant::now() + Duration::from_millis(report_interval));
Some(HeartbeatRequest::default())
}
};
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 57e79eb6897b..db885e65c132 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -136,13 +136,14 @@ impl Instance {
let datanode_clients = Arc::new(DatanodeClients::default());
- Self::try_new_distributed_with(meta_client, datanode_clients, plugins).await
+ Self::try_new_distributed_with(meta_client, datanode_clients, plugins, opts).await
}
pub async fn try_new_distributed_with(
meta_client: Arc<MetaClient>,
datanode_clients: Arc<DatanodeClients>,
plugins: Arc<Plugins>,
+ opts: &FrontendOptions,
) -> Result<Self> {
let meta_backend = Arc::new(CachedMetaKvBackend::new(meta_client.clone()));
let table_routes = Arc::new(TableRoutes::new(meta_client.clone()));
@@ -195,8 +196,8 @@ impl Instance {
let heartbeat_task = Some(HeartbeatTask::new(
meta_client,
- 5,
- 5,
+ opts.heartbeat_interval_millis,
+ opts.retry_interval_millis,
Arc::new(handlers_executor),
));
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index 1943edc7f659..8e250c398c66 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -29,6 +29,7 @@ use common_test_util::temp_dir::create_temp_dir;
use datanode::datanode::{DatanodeOptions, ObjectStoreConfig};
use datanode::heartbeat::HeartbeatTask;
use datanode::instance::Instance as DatanodeInstance;
+use frontend::frontend::FrontendOptions;
use frontend::instance::{FrontendInstance, Instance as FeInstance};
use meta_client::client::MetaClientBuilder;
use meta_srv::cluster::MetaPeerClientRef;
@@ -221,11 +222,14 @@ impl GreptimeDbClusterBuilder {
meta_client.start(&[&meta_srv.server_addr]).await.unwrap();
let meta_client = Arc::new(meta_client);
+ let frontend_opts = FrontendOptions::default();
+
Arc::new(
FeInstance::try_new_distributed_with(
meta_client,
datanode_clients,
Arc::new(Plugins::default()),
+ &frontend_opts,
)
.await
.unwrap(),
|
feat
|
Make heartbeat intervals configurable in Frontend and Datanode (#1864)
|
47a796c0ba7d9a1add0ffd59dd5eb5850ae47905
|
2023-08-02 07:50:49
|
zyy17
|
fix: incorrect github token secret name (#2061)
| false
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index a68c4a3107df..7b1e2448ff2c 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -140,7 +140,7 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
- github-token: ${{ secrets.GITHUB_TOKEN }}
+ github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_AMD64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
@@ -154,7 +154,7 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
- github-token: ${{ secrets.GITHUB_TOKEN }}
+ github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
image-id: ${{ vars.EC2_RUNNER_LINUX_ARM64_IMAGE_ID }}
security-group-id: ${{ vars.EC2_RUNNER_SECURITY_GROUP_ID }}
subnet-id: ${{ vars.EC2_RUNNER_SUBNET_ID }}
@@ -367,7 +367,7 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
- github-token: ${{ secrets.GITHUB_TOKEN }}
+ github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
stop-linux-arm64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-arm64 runner
@@ -392,4 +392,4 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.EC2_RUNNER_REGION }}
- github-token: ${{ secrets.GITHUB_TOKEN }}
+ github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
fix
|
incorrect github token secret name (#2061)
|
2b6b979d5a98c61eba24fb64c543446c2546fdd6
|
2022-11-21 11:45:47
|
Lei, HUANG
|
fix: remove datanode mysql options in standalone mode (#595)
| false
|
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index cf981a300f47..dc8b0519ab65 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -1,8 +1,6 @@
node_id = 0
mode = 'standalone'
http_addr = '127.0.0.1:4000'
-datanode_mysql_addr = '127.0.0.1:4406'
-datanode_mysql_runtime_size = 4
wal_dir = '/tmp/greptimedb/wal/'
[storage]
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index a61038979c4a..39b0b14fa66e 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -71,8 +71,6 @@ pub struct StandaloneOptions {
pub mode: Mode,
pub wal_dir: String,
pub storage: ObjectStoreConfig,
- pub datanode_mysql_addr: String,
- pub datanode_mysql_runtime_size: usize,
}
impl Default for StandaloneOptions {
@@ -88,8 +86,6 @@ impl Default for StandaloneOptions {
mode: Mode::Standalone,
wal_dir: "/tmp/greptimedb/wal".to_string(),
storage: ObjectStoreConfig::default(),
- datanode_mysql_addr: "127.0.0.1:4406".to_string(),
- datanode_mysql_runtime_size: 4,
}
}
}
@@ -114,8 +110,6 @@ impl StandaloneOptions {
DatanodeOptions {
wal_dir: self.wal_dir,
storage: self.storage,
- mysql_addr: self.datanode_mysql_addr,
- mysql_runtime_size: self.datanode_mysql_runtime_size,
..Default::default()
}
}
|
fix
|
remove datanode mysql options in standalone mode (#595)
|
e16f093282831661260ca7ffbf6474f0a1f513dc
|
2023-12-29 13:47:22
|
Weny Xu
|
test(remote_wal): add sqlness with kafka wal (#3027)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index f345f0607077..f58e9546a903 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -104,6 +104,37 @@ jobs:
path: ${{ runner.temp }}/greptime-*.log
retention-days: 3
+ sqlness-kafka-wal:
+ name: Sqlness Test with Kafka Wal
+ if: github.event.pull_request.draft == false
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ ubuntu-20.04-8-cores ]
+ timeout-minutes: 60
+ steps:
+ - uses: actions/checkout@v3
+ - uses: arduino/setup-protoc@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: ${{ env.RUST_TOOLCHAIN }}
+ - name: Rust Cache
+ uses: Swatinem/rust-cache@v2
+ - name: Setup kafka server
+ working-directory: tests-integration/fixtures/kafka
+ run: docker compose -f docker-compose-standalone.yml up -d --wait
+ - name: Run sqlness
+ run: cargo sqlness -w kafka -k 127.0.0.1:9092
+ - name: Upload sqlness logs
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: sqlness-logs
+ path: ${{ runner.temp }}/greptime-*.log
+ retention-days: 3
+
fmt:
name: Rustfmt
if: github.event.pull_request.draft == false
diff --git a/Cargo.lock b/Cargo.lock
index abe0acb61213..d179ea6c8c54 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8859,6 +8859,7 @@ dependencies = [
"common-recordbatch",
"common-time",
"serde",
+ "serde_json",
"sqlness",
"tinytemplate",
"tokio",
diff --git a/src/common/meta/src/wal/kafka.rs b/src/common/meta/src/wal/kafka.rs
index 0a61b6015dfc..6719f2f63849 100644
--- a/src/common/meta/src/wal/kafka.rs
+++ b/src/common/meta/src/wal/kafka.rs
@@ -27,6 +27,7 @@ pub use crate::wal::kafka::topic_manager::TopicManager;
/// Configurations for kafka wal.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
+#[serde(default)]
pub struct KafkaConfig {
/// The broker endpoints of the Kafka cluster.
pub broker_endpoints: Vec<String>,
diff --git a/src/log-store/src/kafka/log_store.rs b/src/log-store/src/kafka/log_store.rs
index df64fa66571f..36c86987041b 100644
--- a/src/log-store/src/kafka/log_store.rs
+++ b/src/log-store/src/kafka/log_store.rs
@@ -197,8 +197,6 @@ impl LogStore for KafkaLogStore {
&& entry.ns.region_id == region_id
{
yield Ok(entries);
- } else {
- yield Ok(vec![]);
}
// Terminates the stream if the entry with the end offset was read.
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index dba3c4485002..c5b11874d1c4 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -330,6 +330,9 @@ impl MetaSrv {
info!("MetaSrv stopped");
});
} else {
+ if let Err(e) = self.wal_options_allocator.start().await {
+ error!(e; "Failed to start wal options allocator");
+ }
// Always load kv into cached kv store.
self.leader_cached_kv_backend
.load()
diff --git a/tests-integration/fixtures/kafka/README.md b/tests-integration/fixtures/kafka/README.md
new file mode 100644
index 000000000000..9d49a2289309
--- /dev/null
+++ b/tests-integration/fixtures/kafka/README.md
@@ -0,0 +1,19 @@
+## Starts a standalone kafka
+```bash
+docker compose -f docker-compose-standalone.yml up kafka -d
+```
+
+## Lists running services
+```bash
+docker compose -f docker-compose-standalone.yml ps
+```
+
+## Stops the standalone kafka
+```bash
+docker compose -f docker-compose-standalone.yml stop kafka
+```
+
+## Stops and removes the standalone kafka
+```bash
+docker compose -f docker-compose-standalone.yml down kafka
+```
\ No newline at end of file
diff --git a/tests/cases/standalone/common/types/string/scan_big_varchar.result b/tests/cases/standalone/common/types/string/scan_big_varchar.result
index d132adce3f29..5a14cc0e1996 100644
--- a/tests/cases/standalone/common/types/string/scan_big_varchar.result
+++ b/tests/cases/standalone/common/types/string/scan_big_varchar.result
@@ -126,102 +126,22 @@ SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
| 128 | 128 | 10000 | 1280000 |
+----------+-------------------+-----------------------------------+-----------------------------------+
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 53 FROM bigtable;
-
-Affected Rows: 128
-
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| COUNT(*) | COUNT(bigtable.a) | MAX(character_length(bigtable.a)) | SUM(character_length(bigtable.a)) |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| 256 | 256 | 10000 | 2560000 |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 57 FROM bigtable;
-
-Affected Rows: 256
-
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| COUNT(*) | COUNT(bigtable.a) | MAX(character_length(bigtable.a)) | SUM(character_length(bigtable.a)) |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| 512 | 512 | 10000 | 5120000 |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 61 FROM bigtable;
-
-Affected Rows: 512
-
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| COUNT(*) | COUNT(bigtable.a) | MAX(character_length(bigtable.a)) | SUM(character_length(bigtable.a)) |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| 1024 | 1024 | 10000 | 10240000 |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 63 FROM bigtable;
-
-Affected Rows: 1024
-
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| COUNT(*) | COUNT(bigtable.a) | MAX(character_length(bigtable.a)) | SUM(character_length(bigtable.a)) |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| 2048 | 2048 | 10000 | 20480000 |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 67 FROM bigtable;
-
-Affected Rows: 2048
-
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| COUNT(*) | COUNT(bigtable.a) | MAX(character_length(bigtable.a)) | SUM(character_length(bigtable.a)) |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| 4096 | 4096 | 10000 | 40960000 |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 71 FROM bigtable;
-
-Affected Rows: 4096
-
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| COUNT(*) | COUNT(bigtable.a) | MAX(character_length(bigtable.a)) | SUM(character_length(bigtable.a)) |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| 8192 | 8192 | 10000 | 81920000 |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 73 FROM bigtable;
-
-Affected Rows: 8192
-
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| COUNT(*) | COUNT(bigtable.a) | MAX(character_length(bigtable.a)) | SUM(character_length(bigtable.a)) |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| 16384 | 16384 | 10000 | 163840000 |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 79 FROM bigtable;
-
-Affected Rows: 16384
-
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| COUNT(*) | COUNT(bigtable.a) | MAX(character_length(bigtable.a)) | SUM(character_length(bigtable.a)) |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-| 32768 | 32768 | 10000 | 327680000 |
-+----------+-------------------+-----------------------------------+-----------------------------------+
-
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 53 FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 57 FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 61 FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 63 FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 67 FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 71 FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 73 FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 79 FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
DROP TABLE test;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/types/string/scan_big_varchar.sql b/tests/cases/standalone/common/types/string/scan_big_varchar.sql
index 81bcb19da9ed..d9ce27e041f8 100644
--- a/tests/cases/standalone/common/types/string/scan_big_varchar.sql
+++ b/tests/cases/standalone/common/types/string/scan_big_varchar.sql
@@ -51,38 +51,38 @@ INSERT INTO bigtable SELECT a, to_unixtime(ts) * 51 FROM bigtable;
SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 53 FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 53 FROM bigtable;
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 57 FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 57 FROM bigtable;
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 61 FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 61 FROM bigtable;
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 63 FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 63 FROM bigtable;
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 67 FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 67 FROM bigtable;
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 71 FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 71 FROM bigtable;
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 73 FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 73 FROM bigtable;
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
-INSERT INTO bigtable SELECT a, to_unixtime(ts) * 79 FROM bigtable;
+-- INSERT INTO bigtable SELECT a, to_unixtime(ts) * 79 FROM bigtable;
-SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
+-- SELECT COUNT(*), COUNT(a), MAX(LENGTH(a)), SUM(LENGTH(a)) FROM bigtable;
DROP TABLE test;
diff --git a/tests/conf/datanode-test.toml.template b/tests/conf/datanode-test.toml.template
index 55d0c2f1fe4c..04968bc95690 100644
--- a/tests/conf/datanode-test.toml.template
+++ b/tests/conf/datanode-test.toml.template
@@ -6,12 +6,18 @@ rpc_hostname = '127.0.0.1'
rpc_runtime_size = 8
[wal]
+{{ if is_raft_engine }}
provider = "raft_engine"
file_size = '1GB'
purge_interval = '10m'
purge_threshold = '10GB'
read_batch_size = 128
sync_write = false
+{{ else }}
+provider = "kafka"
+broker_endpoints = {kafka_wal_broker_endpoints | unescaped}
+linger = "5ms"
+{{ endif }}
[storage]
type = 'File'
diff --git a/tests/conf/metasrv-test.toml.template b/tests/conf/metasrv-test.toml.template
new file mode 100644
index 000000000000..ecd69473249a
--- /dev/null
+++ b/tests/conf/metasrv-test.toml.template
@@ -0,0 +1,10 @@
+[wal]
+{{ if is_raft_engine }}
+provider = "raft_engine"
+{{ else }}
+provider = "kafka"
+broker_endpoints = {kafka_wal_broker_endpoints | unescaped}
+num_topics = 64
+selector_type = "round_robin"
+topic_name_prefix = "distributed_test_greptimedb_wal_topic"
+{{ endif }}
diff --git a/tests/conf/standalone-test.toml.template b/tests/conf/standalone-test.toml.template
index f0ddc38d048e..2e30ac35c266 100644
--- a/tests/conf/standalone-test.toml.template
+++ b/tests/conf/standalone-test.toml.template
@@ -3,12 +3,18 @@ enable_memory_catalog = false
require_lease_before_startup = true
[wal]
+{{ if is_raft_engine }}
provider = "raft_engine"
file_size = '1GB'
purge_interval = '10m'
purge_threshold = '10GB'
read_batch_size = 128
sync_write = false
+{{ else }}
+provider = "kafka"
+broker_endpoints = {kafka_wal_broker_endpoints | unescaped}
+linger = "5ms"
+{{ endif }}
[storage]
type = 'File'
diff --git a/tests/runner/Cargo.toml b/tests/runner/Cargo.toml
index 7b9141776fd8..b2757f479dd6 100644
--- a/tests/runner/Cargo.toml
+++ b/tests/runner/Cargo.toml
@@ -15,6 +15,7 @@ common-query.workspace = true
common-recordbatch.workspace = true
common-time.workspace = true
serde.workspace = true
+serde_json.workspace = true
sqlness = { version = "0.5" }
tinytemplate = "1.2"
tokio.workspace = true
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 1bd7ad36496a..76946ad6ba60 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -41,10 +41,17 @@ const METASRV_ADDR: &str = "127.0.0.1:3002";
const SERVER_ADDR: &str = "127.0.0.1:4001";
const DEFAULT_LOG_LEVEL: &str = "--log-level=debug,hyper=warn,tower=warn,datafusion=warn,reqwest=warn,sqlparser=warn,h2=info,opendal=info";
+#[derive(Clone)]
+pub enum WalConfig {
+ RaftEngine,
+ Kafka { broker_endpoints: Vec<String> },
+}
+
#[derive(Clone)]
pub struct Env {
data_home: PathBuf,
server_addr: Option<String>,
+ wal: WalConfig,
}
#[allow(clippy::print_stdout)]
@@ -68,10 +75,11 @@ impl EnvController for Env {
#[allow(clippy::print_stdout)]
impl Env {
- pub fn new(data_home: PathBuf, server_addr: Option<String>) -> Self {
+ pub fn new(data_home: PathBuf, server_addr: Option<String>, wal: WalConfig) -> Self {
Self {
data_home,
server_addr,
+ wal,
}
}
@@ -81,7 +89,7 @@ impl Env {
} else {
Self::build_db().await;
- let db_ctx = GreptimeDBContext::new();
+ let db_ctx = GreptimeDBContext::new(self.wal.clone());
let server_process = self.start_server("standalone", &db_ctx, true).await;
@@ -106,7 +114,7 @@ impl Env {
} else {
Self::build_db().await;
- let db_ctx = GreptimeDBContext::new();
+ let db_ctx = GreptimeDBContext::new(self.wal.clone());
// start a distributed GreptimeDB
let meta_server = self.start_server("metasrv", &db_ctx, true).await;
@@ -145,6 +153,7 @@ impl Env {
ctx: GreptimeDBContext {
time: 0,
datanode_id: Default::default(),
+ wal: self.wal.clone(),
},
is_standalone: false,
env: self.clone(),
@@ -178,6 +187,7 @@ impl Env {
.create(true)
.write(true)
.truncate(truncate_log)
+ .append(!truncate_log)
.open(log_file_name)
.unwrap();
@@ -214,6 +224,8 @@ impl Env {
"--enable-region-failover".to_string(),
"false".to_string(),
"--http-addr=127.0.0.1:5002".to_string(),
+ "-c".to_string(),
+ self.generate_config_file(subcommand, db_ctx),
];
(args, METASRV_ADDR.to_string())
}
@@ -321,6 +333,8 @@ impl Env {
wal_dir: String,
data_home: String,
procedure_dir: String,
+ is_raft_engine: bool,
+ kafka_wal_broker_endpoints: String,
}
let data_home = self
@@ -334,6 +348,8 @@ impl Env {
wal_dir,
data_home: data_home.display().to_string(),
procedure_dir,
+ is_raft_engine: db_ctx.is_raft_engine(),
+ kafka_wal_broker_endpoints: db_ctx.kafka_wal_broker_endpoints(),
};
let rendered = tt.render(subcommand, &ctx).unwrap();
@@ -447,13 +463,28 @@ struct GreptimeDBContext {
/// Start time in millisecond
time: i64,
datanode_id: AtomicU32,
+ wal: WalConfig,
}
impl GreptimeDBContext {
- pub fn new() -> Self {
+ pub fn new(wal: WalConfig) -> Self {
Self {
time: common_time::util::current_time_millis(),
datanode_id: AtomicU32::new(0),
+ wal,
+ }
+ }
+
+ fn is_raft_engine(&self) -> bool {
+ matches!(self.wal, WalConfig::RaftEngine)
+ }
+
+ fn kafka_wal_broker_endpoints(&self) -> String {
+ match &self.wal {
+ WalConfig::RaftEngine => String::new(),
+ WalConfig::Kafka { broker_endpoints } => {
+ serde_json::to_string(&broker_endpoints).unwrap()
+ }
}
}
diff --git a/tests/runner/src/main.rs b/tests/runner/src/main.rs
index 9c93e628a484..5fdddbd7149a 100644
--- a/tests/runner/src/main.rs
+++ b/tests/runner/src/main.rs
@@ -14,13 +14,20 @@
use std::path::PathBuf;
-use clap::Parser;
-use env::Env;
+use clap::{Parser, ValueEnum};
+use env::{Env, WalConfig};
use sqlness::{ConfigBuilder, Runner};
mod env;
mod util;
+#[derive(ValueEnum, Debug, Clone)]
+#[clap(rename_all = "snake_case")]
+enum Wal {
+ RaftEngine,
+ Kafka,
+}
+
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
/// SQL Harness for GrepTimeDB
@@ -41,9 +48,17 @@ struct Args {
#[clap(short, long, default_value = ".*")]
test_filter: String,
- /// Address of the server
+ /// Address of the server.
#[clap(short, long)]
server_addr: Option<String>,
+
+ /// The type of Wal.
+ #[clap(short, long, default_value = "raft_engine")]
+ wal: Wal,
+
+ /// The kafka wal broker endpoints.
+ #[clap(short, long, default_value = "127.0.0.1:9092")]
+ kafka_wal_broker_endpoints: String,
}
#[tokio::main]
@@ -63,6 +78,18 @@ async fn main() {
.env_config_file(args.env_config_file)
.build()
.unwrap();
- let runner = Runner::new(config, Env::new(data_home, args.server_addr));
+
+ let wal = match args.wal {
+ Wal::RaftEngine => WalConfig::RaftEngine,
+ Wal::Kafka => WalConfig::Kafka {
+ broker_endpoints: args
+ .kafka_wal_broker_endpoints
+ .split(',')
+ .map(|s| s.trim().to_string())
+ .collect(),
+ },
+ };
+
+ let runner = Runner::new(config, Env::new(data_home, args.server_addr, wal));
runner.run().await.unwrap();
}
|
test
|
add sqlness with kafka wal (#3027)
|
02b18fbca14a86f48d0d6ba7e6d8338e0a7054df
|
2024-03-05 15:17:32
|
Lei, HUANG
|
feat: decode prom requests to grpc (#3425)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index a8de2387aeac..e031e23632a7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -5921,6 +5921,15 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "object-pool"
+version = "0.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee9a3e7196d09ec86002b939f1576e8e446d58def8fd48fe578e2c72d5328d68"
+dependencies = [
+ "parking_lot 0.11.2",
+]
+
[[package]]
name = "object-store"
version = "0.6.0"
@@ -9054,6 +9063,7 @@ dependencies = [
"common-test-util",
"common-time",
"common-version",
+ "criterion",
"datafusion",
"datafusion-common",
"datafusion-expr",
@@ -9073,6 +9083,7 @@ dependencies = [
"mime_guess",
"mysql_async",
"notify",
+ "object-pool",
"once_cell",
"openmetrics-parser",
"opensrv-mysql",
diff --git a/src/frontend/src/instance/prom_store.rs b/src/frontend/src/instance/prom_store.rs
index 5e21188294c5..22402bebff25 100644
--- a/src/frontend/src/instance/prom_store.rs
+++ b/src/frontend/src/instance/prom_store.rs
@@ -16,6 +16,7 @@ use std::sync::Arc;
use api::prom_store::remote::read_request::ResponseType;
use api::prom_store::remote::{Query, QueryResult, ReadRequest, ReadResponse, WriteRequest};
+use api::v1::RowInsertRequests;
use async_trait::async_trait;
use auth::{PermissionChecker, PermissionCheckerRef, PermissionReq};
use common_catalog::format_full_table_name;
@@ -174,7 +175,7 @@ impl PromStoreProtocolHandler for Instance {
.get::<PromStoreProtocolInterceptorRef<servers::error::Error>>();
interceptor_ref.pre_write(&request, ctx.clone())?;
- let (requests, samples) = prom_store::to_grpc_row_insert_requests(request)?;
+ let (requests, samples) = prom_store::to_grpc_row_insert_requests(&request)?;
if with_metric_engine {
let physical_table = ctx
.extension(PHYSICAL_TABLE_PARAM)
@@ -197,6 +198,38 @@ impl PromStoreProtocolHandler for Instance {
Ok(())
}
+ async fn write_fast(
+ &self,
+ request: RowInsertRequests,
+ ctx: QueryContextRef,
+ with_metric_engine: bool,
+ ) -> ServerResult<()> {
+ self.plugins
+ .get::<PermissionCheckerRef>()
+ .as_ref()
+ .check_permission(ctx.current_user(), PermissionReq::PromStoreWrite)
+ .context(AuthSnafu)?;
+
+ if with_metric_engine {
+ let physical_table = ctx
+ .extension(PHYSICAL_TABLE_PARAM)
+ .unwrap_or(GREPTIME_PHYSICAL_TABLE)
+ .to_string();
+ let _ = self
+ .handle_metric_row_inserts(request, ctx.clone(), physical_table.to_string())
+ .await
+ .map_err(BoxedError::new)
+ .context(error::ExecuteGrpcQuerySnafu)?;
+ } else {
+ let _ = self
+ .handle_row_inserts(request, ctx.clone())
+ .await
+ .map_err(BoxedError::new)
+ .context(error::ExecuteGrpcQuerySnafu)?;
+ }
+ Ok(())
+ }
+
async fn read(
&self,
request: ReadRequest,
@@ -276,7 +309,7 @@ impl PromStoreProtocolHandler for ExportMetricHandler {
ctx: QueryContextRef,
_: bool,
) -> ServerResult<()> {
- let (requests, _) = prom_store::to_grpc_row_insert_requests(request)?;
+ let (requests, _) = prom_store::to_grpc_row_insert_requests(&request)?;
self.inserter
.handle_metric_row_inserts(
requests,
@@ -290,6 +323,15 @@ impl PromStoreProtocolHandler for ExportMetricHandler {
Ok(())
}
+ async fn write_fast(
+ &self,
+ _request: RowInsertRequests,
+ _ctx: QueryContextRef,
+ _with_metric_engine: bool,
+ ) -> ServerResult<()> {
+ unimplemented!()
+ }
+
async fn read(
&self,
_request: ReadRequest,
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 69f318815493..a9363a0efe78 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -60,6 +60,7 @@ itertools.workspace = true
lazy_static.workspace = true
mime_guess = "2.0"
notify = "6.1"
+object-pool = "0.5"
once_cell.workspace = true
openmetrics-parser = "0.4"
opensrv-mysql = "0.7.0"
@@ -114,6 +115,7 @@ catalog = { workspace = true, features = ["testing"] }
client.workspace = true
common-base.workspace = true
common-test-util.workspace = true
+criterion = "0.4"
mysql_async = { version = "0.33", default-features = false, features = [
"default-rustls",
] }
@@ -129,3 +131,7 @@ tokio-test = "0.4"
[build-dependencies]
common-version.workspace = true
+
+[[bench]]
+name = "bench_prom"
+harness = false
diff --git a/src/servers/benches/bench_prom.rs b/src/servers/benches/bench_prom.rs
new file mode 100644
index 000000000000..df052844f144
--- /dev/null
+++ b/src/servers/benches/bench_prom.rs
@@ -0,0 +1,21 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use criterion::criterion_main;
+
+mod prom_decode;
+
+criterion_main! {
+ prom_decode::benches
+}
diff --git a/src/servers/benches/prom_decode.rs b/src/servers/benches/prom_decode.rs
new file mode 100644
index 000000000000..4b40683cf08c
--- /dev/null
+++ b/src/servers/benches/prom_decode.rs
@@ -0,0 +1,53 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::time::Duration;
+
+use api::prom_store::remote::WriteRequest;
+use bytes::Bytes;
+use criterion::{criterion_group, criterion_main, Criterion};
+use prost::Message;
+use servers::prom_store::to_grpc_row_insert_requests;
+use servers::proto::PromWriteRequest;
+
+fn bench_decode_prom_request(c: &mut Criterion) {
+ let mut d = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ d.push("benches");
+ d.push("write_request.pb.data");
+
+ let data = Bytes::from(std::fs::read(d).unwrap());
+
+ let mut request = WriteRequest::default();
+ let mut prom_request = PromWriteRequest::default();
+ c.benchmark_group("decode")
+ .measurement_time(Duration::from_secs(3))
+ .bench_function("write_request", |b| {
+ b.iter(|| {
+ request.clear();
+ let data = data.clone();
+ request.merge(data).unwrap();
+ to_grpc_row_insert_requests(&request).unwrap();
+ });
+ })
+ .bench_function("prom_write_request", |b| {
+ b.iter(|| {
+ let data = data.clone();
+ prom_request.merge(data).unwrap();
+ prom_request.as_row_insert_requests();
+ });
+ });
+}
+
+criterion_group!(benches, bench_decode_prom_request);
+criterion_main!(benches);
diff --git a/src/servers/benches/write_request.pb.data b/src/servers/benches/write_request.pb.data
new file mode 100644
index 000000000000..19219dc4ee17
Binary files /dev/null and b/src/servers/benches/write_request.pb.data differ
diff --git a/src/servers/src/http/prom_store.rs b/src/servers/src/http/prom_store.rs
index 9188755028b5..5e30b47fc0d0 100644
--- a/src/servers/src/http/prom_store.rs
+++ b/src/servers/src/http/prom_store.rs
@@ -15,14 +15,18 @@
use std::sync::Arc;
use api::prom_store::remote::{ReadRequest, WriteRequest};
+use api::v1::RowInsertRequests;
use axum::extract::{Query, RawBody, State};
use axum::http::{header, StatusCode};
use axum::response::IntoResponse;
use axum::Extension;
+use bytes::Bytes;
use common_catalog::consts::DEFAULT_SCHEMA_NAME;
use common_query::prelude::GREPTIME_PHYSICAL_TABLE;
use common_telemetry::tracing;
use hyper::Body;
+use lazy_static::lazy_static;
+use object_pool::Pool;
use prost::Message;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
@@ -31,9 +35,14 @@ use snafu::prelude::*;
use crate::error::{self, Result, UnexpectedPhysicalTableSnafu};
use crate::prom_store::snappy_decompress;
+use crate::proto::PromWriteRequest;
use crate::query_handler::{PromStoreProtocolHandlerRef, PromStoreResponse};
pub const PHYSICAL_TABLE_PARAM: &str = "physical_table";
+lazy_static! {
+ static ref PROM_WRITE_REQUEST_POOL: Pool<PromWriteRequest> =
+ Pool::new(256, PromWriteRequest::default);
+}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct DatabaseQuery {
@@ -91,14 +100,15 @@ pub async fn remote_write(
.with_label_values(&[db.as_str()])
.start_timer();
- let request = decode_remote_write_request(body).await?;
+ let request = decode_remote_write_request_to_row_inserts(body).await?;
+
if let Some(physical_table) = params.physical_table {
let mut new_query_ctx = query_ctx.as_ref().clone();
new_query_ctx.set_extension(PHYSICAL_TABLE_PARAM, physical_table);
query_ctx = Arc::new(new_query_ctx);
}
- handler.write(request, query_ctx, true).await?;
+ handler.write_fast(request, query_ctx, true).await?;
Ok((StatusCode::NO_CONTENT, ()))
}
@@ -136,6 +146,23 @@ pub async fn remote_read(
handler.read(request, query_ctx).await
}
+async fn decode_remote_write_request_to_row_inserts(body: Body) -> Result<RowInsertRequests> {
+ let _timer = crate::metrics::METRIC_HTTP_PROM_STORE_DECODE_ELAPSED.start_timer();
+ let body = hyper::body::to_bytes(body)
+ .await
+ .context(error::HyperSnafu)?;
+
+ let buf = Bytes::from(snappy_decompress(&body[..])?);
+
+ let mut request = PROM_WRITE_REQUEST_POOL.pull(PromWriteRequest::default);
+ request
+ .merge(buf)
+ .context(error::DecodePromRemoteRequestSnafu)?;
+ let (requests, samples) = request.as_row_insert_requests();
+ crate::metrics::METRIC_HTTP_PROM_STORE_DECODE_NUM_SERIES.observe(samples as f64);
+ Ok(requests)
+}
+
async fn decode_remote_write_request(body: Body) -> Result<WriteRequest> {
let _timer = crate::metrics::METRIC_HTTP_PROM_STORE_DECODE_ELAPSED.start_timer();
let body = hyper::body::to_bytes(body)
diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs
index 4d04661c5eb6..efa6084eae85 100644
--- a/src/servers/src/lib.rs
+++ b/src/servers/src/lib.rs
@@ -36,9 +36,13 @@ pub mod mysql;
pub mod opentsdb;
pub mod otlp;
pub mod postgres;
+mod prom_row_builder;
pub mod prom_store;
pub mod prometheus_handler;
+pub mod proto;
pub mod query_handler;
+#[allow(clippy::all)]
+mod repeated_field;
mod row_writer;
pub mod server;
mod shutdown;
diff --git a/src/servers/src/prom_row_builder.rs b/src/servers/src/prom_row_builder.rs
new file mode 100644
index 000000000000..20a049f472b4
--- /dev/null
+++ b/src/servers/src/prom_row_builder.rs
@@ -0,0 +1,272 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::hash_map::Entry;
+use std::collections::HashMap;
+use std::string::ToString;
+
+use api::prom_store::remote::Sample;
+use api::v1::value::ValueData;
+use api::v1::{
+ ColumnDataType, ColumnSchema, Row, RowInsertRequest, RowInsertRequests, Rows, SemanticType,
+ Value,
+};
+use common_query::prelude::{GREPTIME_TIMESTAMP, GREPTIME_VALUE};
+
+use crate::proto::PromLabel;
+use crate::repeated_field::Clear;
+
+/// [TablesBuilder] serves as an intermediate container to build [RowInsertRequests].
+#[derive(Default)]
+pub(crate) struct TablesBuilder {
+ tables: HashMap<String, TableBuilder>,
+}
+
+impl Clear for TablesBuilder {
+ fn clear(&mut self) {
+ self.tables.clear();
+ }
+}
+
+impl TablesBuilder {
+ /// Gets table builder with given table name. Creates an empty [TableBuilder] if not exist.
+ pub(crate) fn get_or_create_table_builder(
+ &mut self,
+ table_name: String,
+ label_num: usize,
+ row_num: usize,
+ ) -> &mut TableBuilder {
+ self.tables
+ .entry(table_name)
+ .or_insert_with(|| TableBuilder::with_capacity(label_num + 2, row_num))
+ }
+
+ /// Converts [TablesBuilder] to [RowInsertRequests] and row numbers and clears inner states.
+ pub(crate) fn as_insert_requests(&mut self) -> (RowInsertRequests, usize) {
+ let mut total_rows = 0;
+ let inserts = self
+ .tables
+ .drain()
+ .map(|(name, mut table)| {
+ total_rows += table.num_rows();
+ table.as_row_insert_request(name)
+ })
+ .collect();
+ (RowInsertRequests { inserts }, total_rows)
+ }
+}
+
+/// Builder for one table.
+pub(crate) struct TableBuilder {
+ /// Column schemas.
+ schema: Vec<ColumnSchema>,
+ /// Rows written.
+ rows: Vec<Row>,
+ /// Indices of columns inside `schema`.
+ col_indexes: HashMap<String, usize>,
+}
+
+impl Default for TableBuilder {
+ fn default() -> Self {
+ Self::with_capacity(2, 0)
+ }
+}
+
+impl TableBuilder {
+ pub(crate) fn with_capacity(cols: usize, rows: usize) -> Self {
+ let mut col_indexes = HashMap::with_capacity(cols);
+ col_indexes.insert(GREPTIME_TIMESTAMP.to_string(), 0);
+ col_indexes.insert(GREPTIME_VALUE.to_string(), 1);
+
+ let mut schema = Vec::with_capacity(cols);
+ schema.push(ColumnSchema {
+ column_name: GREPTIME_TIMESTAMP.to_string(),
+ datatype: ColumnDataType::TimestampMillisecond as i32,
+ semantic_type: SemanticType::Timestamp as i32,
+ datatype_extension: None,
+ });
+
+ schema.push(ColumnSchema {
+ column_name: GREPTIME_VALUE.to_string(),
+ datatype: ColumnDataType::Float64 as i32,
+ semantic_type: SemanticType::Field as i32,
+ datatype_extension: None,
+ });
+
+ Self {
+ schema,
+ rows: Vec::with_capacity(rows),
+ col_indexes,
+ }
+ }
+
+ /// Total number of rows inside table builder.
+ fn num_rows(&self) -> usize {
+ self.rows.len()
+ }
+
+ /// Adds a set of labels and samples to table builder.
+ pub(crate) fn add_labels_and_samples(&mut self, labels: &[PromLabel], samples: &[Sample]) {
+ let mut row = vec![Value { value_data: None }; self.col_indexes.len()];
+
+ for PromLabel { name, value } in labels {
+ // safety: we expect all labels are UTF-8 encoded strings.
+ let tag_name = unsafe { String::from_utf8_unchecked(name.to_vec()) };
+ let tag_value = unsafe { String::from_utf8_unchecked(value.to_vec()) };
+ let tag_value = Some(ValueData::StringValue(tag_value));
+ let tag_num = self.col_indexes.len();
+
+ match self.col_indexes.entry(tag_name) {
+ Entry::Occupied(e) => {
+ row[*e.get()].value_data = tag_value;
+ }
+ Entry::Vacant(e) => {
+ let column_name = e.key().clone();
+ e.insert(tag_num);
+ self.schema.push(ColumnSchema {
+ column_name,
+ datatype: ColumnDataType::String as i32,
+ semantic_type: SemanticType::Tag as i32,
+ datatype_extension: None,
+ });
+ row.push(Value {
+ value_data: tag_value,
+ });
+ }
+ }
+ }
+
+ if samples.len() == 1 {
+ let sample = &samples[0];
+ row[0].value_data = Some(ValueData::TimestampMillisecondValue(sample.timestamp));
+ row[1].value_data = Some(ValueData::F64Value(sample.value));
+ self.rows.push(Row { values: row });
+ return;
+ }
+ for sample in samples {
+ row[0].value_data = Some(ValueData::TimestampMillisecondValue(sample.timestamp));
+ row[1].value_data = Some(ValueData::F64Value(sample.value));
+ self.rows.push(Row {
+ values: row.clone(),
+ });
+ }
+ }
+
+ /// Converts [TableBuilder] to [RowInsertRequest] and clears buffered data.
+ pub(crate) fn as_row_insert_request(&mut self, table_name: String) -> RowInsertRequest {
+ let mut rows = std::mem::take(&mut self.rows);
+ let schema = std::mem::take(&mut self.schema);
+ let col_num = schema.len();
+ for row in &mut rows {
+ if row.values.len() < col_num {
+ row.values.resize(col_num, Value { value_data: None });
+ }
+ }
+
+ RowInsertRequest {
+ table_name,
+ rows: Some(Rows { schema, rows }),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use api::prom_store::remote::Sample;
+ use api::v1::value::ValueData;
+ use api::v1::Value;
+ use bytes::Bytes;
+
+ use crate::prom_row_builder::TableBuilder;
+ use crate::proto::PromLabel;
+ #[test]
+ fn test_table_builder() {
+ let mut builder = TableBuilder::default();
+ builder.add_labels_and_samples(
+ &[
+ PromLabel {
+ name: Bytes::from("tag0"),
+ value: Bytes::from("v0"),
+ },
+ PromLabel {
+ name: Bytes::from("tag1"),
+ value: Bytes::from("v1"),
+ },
+ ],
+ &[Sample {
+ value: 0.0,
+ timestamp: 0,
+ }],
+ );
+
+ builder.add_labels_and_samples(
+ &[
+ PromLabel {
+ name: Bytes::from("tag0"),
+ value: Bytes::from("v0"),
+ },
+ PromLabel {
+ name: Bytes::from("tag2"),
+ value: Bytes::from("v2"),
+ },
+ ],
+ &[Sample {
+ value: 0.1,
+ timestamp: 1,
+ }],
+ );
+
+ let request = builder.as_row_insert_request("test".to_string());
+ let rows = request.rows.unwrap().rows;
+ assert_eq!(2, rows.len());
+
+ assert_eq!(
+ vec![
+ Value {
+ value_data: Some(ValueData::TimestampMillisecondValue(0))
+ },
+ Value {
+ value_data: Some(ValueData::F64Value(0.0))
+ },
+ Value {
+ value_data: Some(ValueData::StringValue("v0".to_string()))
+ },
+ Value {
+ value_data: Some(ValueData::StringValue("v1".to_string()))
+ },
+ Value { value_data: None },
+ ],
+ rows[0].values
+ );
+
+ assert_eq!(
+ vec![
+ Value {
+ value_data: Some(ValueData::TimestampMillisecondValue(1))
+ },
+ Value {
+ value_data: Some(ValueData::F64Value(0.1))
+ },
+ Value {
+ value_data: Some(ValueData::StringValue("v0".to_string()))
+ },
+ Value { value_data: None },
+ Value {
+ value_data: Some(ValueData::StringValue("v2".to_string()))
+ },
+ ],
+ rows[1].values
+ );
+ }
+}
diff --git a/src/servers/src/prom_store.rs b/src/servers/src/prom_store.rs
index f86d30781c82..7553d9791225 100644
--- a/src/servers/src/prom_store.rs
+++ b/src/servers/src/prom_store.rs
@@ -39,6 +39,8 @@ use crate::row_writer::{self, MultiTableData};
pub const METRIC_NAME_LABEL: &str = "__name__";
+pub const METRIC_NAME_LABEL_BYTES: &[u8] = b"__name__";
+
/// Metrics for push gateway protocol
pub struct Metrics {
pub exposition: MetricsExposition<PrometheusType, PrometheusValue>,
@@ -300,12 +302,12 @@ fn recordbatch_to_timeseries(table: &str, recordbatch: RecordBatch) -> Result<Ve
Ok(timeseries_map.into_values().collect())
}
-pub fn to_grpc_row_insert_requests(request: WriteRequest) -> Result<(RowInsertRequests, usize)> {
+pub fn to_grpc_row_insert_requests(request: &WriteRequest) -> Result<(RowInsertRequests, usize)> {
let _timer = crate::metrics::METRIC_HTTP_PROM_STORE_CONVERT_ELAPSED.start_timer();
let mut multi_table_data = MultiTableData::new();
- for series in request.timeseries {
+ for series in &request.timeseries {
let table_name = &series
.labels
.iter()
@@ -329,11 +331,11 @@ pub fn to_grpc_row_insert_requests(request: WriteRequest) -> Result<(RowInsertRe
);
// labels
- let kvs = series.labels.into_iter().filter_map(|label| {
+ let kvs = series.labels.iter().filter_map(|label| {
if label.name == METRIC_NAME_LABEL {
None
} else {
- Some((label.name, label.value))
+ Some((label.name.clone(), label.value.clone()))
}
});
@@ -649,7 +651,7 @@ mod tests {
..Default::default()
};
- let mut exprs = to_grpc_row_insert_requests(write_request)
+ let mut exprs = to_grpc_row_insert_requests(&write_request)
.unwrap()
.0
.inserts;
diff --git a/src/servers/src/proto.rs b/src/servers/src/proto.rs
new file mode 100644
index 000000000000..1a96cd9ed8b2
--- /dev/null
+++ b/src/servers/src/proto.rs
@@ -0,0 +1,304 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::ops::Deref;
+
+use api::prom_store::remote::Sample;
+use api::v1::RowInsertRequests;
+use bytes::{Buf, Bytes};
+use prost::encoding::message::merge;
+use prost::encoding::{decode_key, decode_varint, DecodeContext, WireType};
+use prost::DecodeError;
+
+use crate::prom_row_builder::TablesBuilder;
+use crate::prom_store::METRIC_NAME_LABEL_BYTES;
+use crate::repeated_field::{Clear, RepeatedField};
+
+impl Clear for Sample {
+ fn clear(&mut self) {}
+}
+
+#[derive(Default, Clone)]
+pub struct PromLabel {
+ pub name: Bytes,
+ pub value: Bytes,
+}
+
+impl Clear for PromLabel {
+ fn clear(&mut self) {
+ self.name.clear();
+ self.value.clear();
+ }
+}
+
+impl PromLabel {
+ pub fn merge_field<B>(
+ &mut self,
+ tag: u32,
+ wire_type: WireType,
+ buf: &mut B,
+ ctx: DecodeContext,
+ ) -> Result<(), DecodeError>
+ where
+ B: Buf,
+ {
+ const STRUCT_NAME: &str = "PromLabel";
+ match tag {
+ 1u32 => {
+ // decode label name
+ let value = &mut self.name;
+ prost::encoding::bytes::merge(wire_type, value, buf, ctx).map_err(|mut error| {
+ error.push(STRUCT_NAME, "name");
+ error
+ })
+ }
+ 2u32 => {
+ // decode label value
+ let value = &mut self.value;
+ prost::encoding::bytes::merge(wire_type, value, buf, ctx).map_err(|mut error| {
+ error.push(STRUCT_NAME, "value");
+ error
+ })
+ }
+ _ => prost::encoding::skip_field(wire_type, tag, buf, ctx),
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct PromTimeSeries {
+ pub table_name: String,
+ pub labels: RepeatedField<PromLabel>,
+ pub samples: RepeatedField<Sample>,
+}
+
+impl Clear for PromTimeSeries {
+ fn clear(&mut self) {
+ self.table_name.clear();
+ self.labels.clear();
+ self.samples.clear();
+ }
+}
+
+impl PromTimeSeries {
+ pub fn merge_field<B>(
+ &mut self,
+ tag: u32,
+ wire_type: WireType,
+ buf: &mut B,
+ ctx: DecodeContext,
+ ) -> Result<(), DecodeError>
+ where
+ B: Buf,
+ {
+ const STRUCT_NAME: &str = "PromTimeSeries";
+ match tag {
+ 1u32 => {
+ // decode labels
+ let label = self.labels.push_default();
+
+ let len = decode_varint(buf).map_err(|mut error| {
+ error.push(STRUCT_NAME, "labels");
+ error
+ })?;
+ let remaining = buf.remaining();
+ if len > remaining as u64 {
+ return Err(DecodeError::new("buffer underflow"));
+ }
+
+ let limit = remaining - len as usize;
+ while buf.remaining() > limit {
+ let (tag, wire_type) = decode_key(buf)?;
+ label.merge_field(tag, wire_type, buf, ctx.clone())?;
+ }
+ if buf.remaining() != limit {
+ return Err(DecodeError::new("delimited length exceeded"));
+ }
+ if label.name.deref() == METRIC_NAME_LABEL_BYTES {
+ // safety: we expect all labels are UTF-8 encoded strings.
+ let table_name = unsafe { String::from_utf8_unchecked(label.value.to_vec()) };
+ self.table_name = table_name;
+ self.labels.truncate(self.labels.len() - 1); // remove last label
+ }
+ Ok(())
+ }
+ 2u32 => {
+ let sample = self.samples.push_default();
+ merge(WireType::LengthDelimited, sample, buf, ctx).map_err(|mut error| {
+ error.push(STRUCT_NAME, "samples");
+ error
+ })?;
+ Ok(())
+ }
+ // skip exemplars
+ 3u32 => prost::encoding::skip_field(wire_type, tag, buf, ctx),
+ _ => prost::encoding::skip_field(wire_type, tag, buf, ctx),
+ }
+ }
+
+ fn add_to_table_data(&mut self, table_builders: &mut TablesBuilder) {
+ let label_num = self.labels.len();
+ let row_num = self.samples.len();
+ let table_data = table_builders.get_or_create_table_builder(
+ std::mem::take(&mut self.table_name),
+ label_num,
+ row_num,
+ );
+ table_data.add_labels_and_samples(self.labels.as_slice(), self.samples.as_slice());
+ self.labels.clear();
+ self.samples.clear();
+ }
+}
+
+#[derive(Default)]
+pub struct PromWriteRequest {
+ table_data: TablesBuilder,
+ series: PromTimeSeries,
+}
+
+impl Clear for PromWriteRequest {
+ fn clear(&mut self) {
+ self.table_data.clear();
+ }
+}
+
+impl PromWriteRequest {
+ pub fn as_row_insert_requests(&mut self) -> (RowInsertRequests, usize) {
+ self.table_data.as_insert_requests()
+ }
+
+ pub fn merge<B>(&mut self, mut buf: B) -> Result<(), DecodeError>
+ where
+ B: Buf,
+ Self: Sized,
+ {
+ const STRUCT_NAME: &str = "PromWriteRequest";
+ let ctx = DecodeContext::default();
+ while buf.has_remaining() {
+ let (tag, wire_type) = decode_key(&mut buf)?;
+ assert_eq!(WireType::LengthDelimited, wire_type);
+ match tag {
+ 1u32 => {
+ // decode TimeSeries
+ let len = decode_varint(&mut buf).map_err(|mut e| {
+ e.push(STRUCT_NAME, "timeseries");
+ e
+ })?;
+ let remaining = buf.remaining();
+ if len > remaining as u64 {
+ return Err(DecodeError::new("buffer underflow"));
+ }
+
+ let limit = remaining - len as usize;
+ while buf.remaining() > limit {
+ let (tag, wire_type) = decode_key(&mut buf)?;
+ self.series
+ .merge_field(tag, wire_type, &mut buf, ctx.clone())?;
+ }
+ if buf.remaining() != limit {
+ return Err(DecodeError::new("delimited length exceeded"));
+ }
+ self.series.add_to_table_data(&mut self.table_data);
+ }
+ 3u32 => {
+ // we can ignore metadata for now.
+ prost::encoding::skip_field(wire_type, tag, &mut buf, ctx.clone())?;
+ }
+ _ => prost::encoding::skip_field(wire_type, tag, &mut buf, ctx.clone())?,
+ }
+ }
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::{HashMap, HashSet};
+
+ use api::prom_store::remote::WriteRequest;
+ use api::v1::RowInsertRequests;
+ use bytes::Bytes;
+ use prost::Message;
+
+ use crate::prom_store::to_grpc_row_insert_requests;
+ use crate::proto::PromWriteRequest;
+ use crate::repeated_field::Clear;
+
+ fn check_deserialized(
+ prom_write_request: &mut PromWriteRequest,
+ data: &Bytes,
+ expected_samples: usize,
+ expected_rows: &RowInsertRequests,
+ ) {
+ prom_write_request.clear();
+ prom_write_request.merge(data.clone()).unwrap();
+ let (prom_rows, samples) = prom_write_request.as_row_insert_requests();
+
+ assert_eq!(expected_samples, samples);
+ assert_eq!(expected_rows.inserts.len(), prom_rows.inserts.len());
+
+ let schemas = expected_rows
+ .inserts
+ .iter()
+ .map(|r| {
+ (
+ r.table_name.clone(),
+ r.rows
+ .as_ref()
+ .unwrap()
+ .schema
+ .iter()
+ .map(|c| (c.column_name.clone(), c.datatype, c.semantic_type))
+ .collect::<HashSet<_>>(),
+ )
+ })
+ .collect::<HashMap<_, _>>();
+
+ for r in &prom_rows.inserts {
+ let expected = schemas.get(&r.table_name).unwrap();
+ assert_eq!(
+ expected,
+ &r.rows
+ .as_ref()
+ .unwrap()
+ .schema
+ .iter()
+ .map(|c| { (c.column_name.clone(), c.datatype, c.semantic_type) })
+ .collect()
+ );
+ }
+ }
+
+ // Ensures `WriteRequest` and `PromWriteRequest` produce the same gRPC request.
+ #[test]
+ fn test_decode_write_request() {
+ let mut d = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ d.push("benches");
+ d.push("write_request.pb.data");
+ let data = Bytes::from(std::fs::read(d).unwrap());
+
+ let (expected_rows, expected_samples) =
+ to_grpc_row_insert_requests(&WriteRequest::decode(data.clone()).unwrap()).unwrap();
+
+ let mut prom_write_request = PromWriteRequest::default();
+ for _ in 0..3 {
+ check_deserialized(
+ &mut prom_write_request,
+ &data,
+ expected_samples,
+ &expected_rows,
+ );
+ }
+ }
+}
diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs
index d36b7418b022..347ec524565a 100644
--- a/src/servers/src/query_handler.rs
+++ b/src/servers/src/query_handler.rs
@@ -29,6 +29,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use api::prom_store::remote::{ReadRequest, WriteRequest};
+use api::v1::RowInsertRequests;
use async_trait::async_trait;
use common_query::Output;
use opentelemetry_proto::tonic::collector::metrics::v1::{
@@ -95,6 +96,15 @@ pub trait PromStoreProtocolHandler {
ctx: QueryContextRef,
with_metric_engine: bool,
) -> Result<()>;
+
+ /// Handling prometheus remote write requests
+ async fn write_fast(
+ &self,
+ request: RowInsertRequests,
+ ctx: QueryContextRef,
+ with_metric_engine: bool,
+ ) -> Result<()>;
+
/// Handling prometheus remote read requests
async fn read(&self, request: ReadRequest, ctx: QueryContextRef) -> Result<PromStoreResponse>;
/// Handling push gateway requests
diff --git a/src/servers/src/repeated_field.rs b/src/servers/src/repeated_field.rs
new file mode 100644
index 000000000000..0e3baf16a576
--- /dev/null
+++ b/src/servers/src/repeated_field.rs
@@ -0,0 +1,540 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2019 Stepan Koltsov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
+// OR OTHER DEALINGS IN THE SOFTWARE.
+
+/// ! The [Clear] trait and [RepeatedField] are taken from [rust-protobuf](https://github.com/stepancheg/rust-protobuf/tree/master/protobuf-examples/vs-prost)
+/// to leverage the pooling mechanism to avoid frequent heap allocation/deallocation when decoding deeply nested structs.
+use std::borrow::Borrow;
+use std::cmp::Ordering;
+use std::default::Default;
+use std::hash::{Hash, Hasher};
+use std::iter::{FromIterator, IntoIterator};
+use std::ops::{Deref, DerefMut, Index, IndexMut};
+use std::{fmt, slice, vec};
+
+use bytes::Bytes;
+
+/// anything that can be cleared
+pub trait Clear {
+ /// Clear this make, make it equivalent to newly created object.
+ fn clear(&mut self);
+}
+
+impl<T> Clear for Option<T> {
+ fn clear(&mut self) {
+ self.take();
+ }
+}
+
+impl Clear for String {
+ fn clear(&mut self) {
+ String::clear(self);
+ }
+}
+
+impl<T> Clear for Vec<T> {
+ fn clear(&mut self) {
+ Vec::clear(self);
+ }
+}
+
+impl Clear for Bytes {
+ fn clear(&mut self) {
+ Bytes::clear(self);
+ }
+}
+
+/// Wrapper around vector to avoid deallocations on clear.
+pub struct RepeatedField<T> {
+ vec: Vec<T>,
+ len: usize,
+}
+
+impl<T> RepeatedField<T> {
+ /// Return number of elements in this container.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Clear.
+ #[inline]
+ pub fn clear(&mut self) {
+ self.len = 0;
+ }
+}
+
+impl<T> Default for RepeatedField<T> {
+ #[inline]
+ fn default() -> RepeatedField<T> {
+ RepeatedField {
+ vec: Vec::new(),
+ len: 0,
+ }
+ }
+}
+
+impl<T> RepeatedField<T> {
+ /// Create new empty container.
+ #[inline]
+ pub fn new() -> RepeatedField<T> {
+ Default::default()
+ }
+
+ /// Create a contained with data from given vec.
+ #[inline]
+ pub fn from_vec(vec: Vec<T>) -> RepeatedField<T> {
+ let len = vec.len();
+ RepeatedField { vec, len }
+ }
+
+ /// Convert data into vec.
+ #[inline]
+ pub fn into_vec(self) -> Vec<T> {
+ let mut vec = self.vec;
+ vec.truncate(self.len);
+ vec
+ }
+
+ /// Return current capacity.
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.vec.capacity()
+ }
+
+ /// View data as slice.
+ #[inline]
+ pub fn as_slice<'a>(&'a self) -> &'a [T] {
+ &self.vec[..self.len]
+ }
+
+ /// View data as mutable slice.
+ #[inline]
+ pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] {
+ &mut self.vec[..self.len]
+ }
+
+ /// Get subslice of this container.
+ #[inline]
+ pub fn slice(&self, start: usize, end: usize) -> &[T] {
+ &self.as_ref()[start..end]
+ }
+
+ /// Get mutable subslice of this container.
+ #[inline]
+ pub fn slice_mut(&mut self, start: usize, end: usize) -> &mut [T] {
+ &mut self.as_mut_slice()[start..end]
+ }
+
+ /// Get slice from given index.
+ #[inline]
+ pub fn slice_from(&self, start: usize) -> &[T] {
+ &self.as_ref()[start..]
+ }
+
+ /// Get mutable slice from given index.
+ #[inline]
+ pub fn slice_from_mut(&mut self, start: usize) -> &mut [T] {
+ &mut self.as_mut_slice()[start..]
+ }
+
+ /// Get slice to given index.
+ #[inline]
+ pub fn slice_to(&self, end: usize) -> &[T] {
+ &self.as_ref()[..end]
+ }
+
+ /// Get mutable slice to given index.
+ #[inline]
+ pub fn slice_to_mut(&mut self, end: usize) -> &mut [T] {
+ &mut self.as_mut_slice()[..end]
+ }
+
+ /// View this container as two slices split at given index.
+ #[inline]
+ pub fn split_at<'a>(&'a self, mid: usize) -> (&'a [T], &'a [T]) {
+ self.as_ref().split_at(mid)
+ }
+
+ /// View this container as two mutable slices split at given index.
+ #[inline]
+ pub fn split_at_mut<'a>(&'a mut self, mid: usize) -> (&'a mut [T], &'a mut [T]) {
+ self.as_mut_slice().split_at_mut(mid)
+ }
+
+ /// View all but first elements of this container.
+ #[inline]
+ pub fn tail(&self) -> &[T] {
+ &self.as_ref()[1..]
+ }
+
+ /// Last element of this container.
+ #[inline]
+ pub fn last(&self) -> Option<&T> {
+ self.as_ref().last()
+ }
+
+ /// Mutable last element of this container.
+ #[inline]
+ pub fn last_mut<'a>(&'a mut self) -> Option<&'a mut T> {
+ self.as_mut_slice().last_mut()
+ }
+
+ /// View all but last elements of this container.
+ #[inline]
+ pub fn init<'a>(&'a self) -> &'a [T] {
+ let s = self.as_ref();
+ &s[0..s.len() - 1]
+ }
+
+ /// Push an element to the end.
+ #[inline]
+ pub fn push(&mut self, value: T) {
+ if self.len == self.vec.len() {
+ self.vec.push(value);
+ } else {
+ self.vec[self.len] = value;
+ }
+ self.len += 1;
+ }
+
+ /// Pop last element.
+ #[inline]
+ pub fn pop(&mut self) -> Option<T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.vec.truncate(self.len);
+ self.len -= 1;
+ self.vec.pop()
+ }
+ }
+
+ /// Insert an element at specified position.
+ #[inline]
+ pub fn insert(&mut self, index: usize, value: T) {
+ assert!(index <= self.len);
+ self.vec.insert(index, value);
+ self.len += 1;
+ }
+
+ /// Remove an element from specified position.
+ #[inline]
+ pub fn remove(&mut self, index: usize) -> T {
+ assert!(index < self.len);
+ self.len -= 1;
+ self.vec.remove(index)
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` such that `f(&e)` returns `false`.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use protobuf::RepeatedField;
+ ///
+ /// let mut vec = RepeatedField::from(vec![1, 2, 3, 4]);
+ /// vec.retain(|&x| x % 2 == 0);
+ /// assert_eq!(vec, RepeatedField::from(vec![2, 4]));
+ /// ```
+ pub fn retain<F>(&mut self, f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ // suboptimal
+ self.vec.truncate(self.len);
+ self.vec.retain(f);
+ self.len = self.vec.len();
+ }
+
+ /// Truncate at specified length.
+ #[inline]
+ pub fn truncate(&mut self, len: usize) {
+ if self.len > len {
+ self.len = len;
+ }
+ }
+
+ /// Reverse in place.
+ #[inline]
+ pub fn reverse(&mut self) {
+ self.as_mut_slice().reverse()
+ }
+
+ /// Into owned iterator.
+ #[inline]
+ pub fn into_iter(mut self) -> vec::IntoIter<T> {
+ self.vec.truncate(self.len);
+ self.vec.into_iter()
+ }
+
+ /// Immutable data iterator.
+ #[inline]
+ pub fn iter<'a>(&'a self) -> slice::Iter<'a, T> {
+ self.as_ref().iter()
+ }
+
+ /// Mutable data iterator.
+ #[inline]
+ pub fn iter_mut<'a>(&'a mut self) -> slice::IterMut<'a, T> {
+ self.as_mut_slice().iter_mut()
+ }
+
+ /// Sort elements with given comparator.
+ #[inline]
+ pub fn sort_by<F>(&mut self, compare: F)
+ where
+ F: Fn(&T, &T) -> Ordering,
+ {
+ self.as_mut_slice().sort_by(compare)
+ }
+
+ /// Get data as raw pointer.
+ #[inline]
+ pub fn as_ptr(&self) -> *const T {
+ self.vec.as_ptr()
+ }
+
+ /// Get data a mutable raw pointer.
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ self.vec.as_mut_ptr()
+ }
+}
+
+impl<T: Default + Clear> RepeatedField<T> {
+ /// Push default value.
+ /// This operation could be faster than `rf.push(Default::default())`,
+ /// because it may reuse previously allocated and cleared element.
+ pub fn push_default<'a>(&'a mut self) -> &'a mut T {
+ if self.len == self.vec.len() {
+ self.vec.push(Default::default());
+ } else {
+ self.vec[self.len].clear();
+ }
+ self.len += 1;
+ self.last_mut().unwrap()
+ }
+}
+
+impl<T> From<Vec<T>> for RepeatedField<T> {
+ #[inline]
+ fn from(values: Vec<T>) -> RepeatedField<T> {
+ RepeatedField::from_vec(values)
+ }
+}
+
+impl<'a, T: Clone> From<&'a [T]> for RepeatedField<T> {
+ #[inline]
+ fn from(values: &'a [T]) -> RepeatedField<T> {
+ RepeatedField::from_slice(values)
+ }
+}
+
+impl<T> Into<Vec<T>> for RepeatedField<T> {
+ #[inline]
+ fn into(self) -> Vec<T> {
+ self.into_vec()
+ }
+}
+
+impl<T: Clone> RepeatedField<T> {
+ /// Copy slice data to `RepeatedField`
+ #[inline]
+ pub fn from_slice(values: &[T]) -> RepeatedField<T> {
+ RepeatedField::from_vec(values.to_vec())
+ }
+
+ /// Copy slice data to `RepeatedField`
+ #[inline]
+ pub fn from_ref<X: AsRef<[T]>>(values: X) -> RepeatedField<T> {
+ RepeatedField::from_slice(values.as_ref())
+ }
+
+ /// Copy this data into new vec.
+ #[inline]
+ pub fn to_vec(&self) -> Vec<T> {
+ self.as_ref().to_vec()
+ }
+}
+
+impl<T: Clone> Clone for RepeatedField<T> {
+ #[inline]
+ fn clone(&self) -> RepeatedField<T> {
+ RepeatedField {
+ vec: self.to_vec(),
+ len: self.len(),
+ }
+ }
+}
+
+impl<T> FromIterator<T> for RepeatedField<T> {
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> RepeatedField<T> {
+ RepeatedField::from_vec(FromIterator::from_iter(iter))
+ }
+}
+
+impl<'a, T> IntoIterator for &'a RepeatedField<T> {
+ type Item = &'a T;
+ type IntoIter = slice::Iter<'a, T>;
+
+ fn into_iter(self) -> slice::Iter<'a, T> {
+ self.iter()
+ }
+}
+
+impl<'a, T> IntoIterator for &'a mut RepeatedField<T> {
+ type Item = &'a mut T;
+ type IntoIter = slice::IterMut<'a, T>;
+
+ fn into_iter(self) -> slice::IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+impl<'a, T> IntoIterator for RepeatedField<T> {
+ type Item = T;
+ type IntoIter = vec::IntoIter<T>;
+
+ fn into_iter(self) -> vec::IntoIter<T> {
+ self.into_iter()
+ }
+}
+
+impl<T: PartialEq> PartialEq for RepeatedField<T> {
+ #[inline]
+ fn eq(&self, other: &RepeatedField<T>) -> bool {
+ self.as_ref() == other.as_ref()
+ }
+}
+
+impl<T: Eq> Eq for RepeatedField<T> {}
+
+impl<T: PartialEq> PartialEq<[T]> for RepeatedField<T> {
+ fn eq(&self, other: &[T]) -> bool {
+ self.as_slice() == other
+ }
+}
+
+impl<T: PartialEq> PartialEq<RepeatedField<T>> for [T] {
+ fn eq(&self, other: &RepeatedField<T>) -> bool {
+ self == other.as_slice()
+ }
+}
+
+impl<T: PartialEq> RepeatedField<T> {
+ /// True iff this container contains given element.
+ #[inline]
+ pub fn contains(&self, value: &T) -> bool {
+ self.as_ref().contains(value)
+ }
+}
+
+impl<T: Hash> Hash for RepeatedField<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.as_ref().hash(state);
+ }
+}
+
+impl<T> AsRef<[T]> for RepeatedField<T> {
+ #[inline]
+ fn as_ref<'a>(&'a self) -> &'a [T] {
+ &self.vec[..self.len]
+ }
+}
+
+impl<T> Borrow<[T]> for RepeatedField<T> {
+ #[inline]
+ fn borrow(&self) -> &[T] {
+ &self.vec[..self.len]
+ }
+}
+
+impl<T> Deref for RepeatedField<T> {
+ type Target = [T];
+ #[inline]
+ fn deref(&self) -> &[T] {
+ &self.vec[..self.len]
+ }
+}
+
+impl<T> DerefMut for RepeatedField<T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [T] {
+ &mut self.vec[..self.len]
+ }
+}
+
+impl<T> Index<usize> for RepeatedField<T> {
+ type Output = T;
+
+ #[inline]
+ fn index<'a>(&'a self, index: usize) -> &'a T {
+ &self.as_ref()[index]
+ }
+}
+
+impl<T> IndexMut<usize> for RepeatedField<T> {
+ #[inline]
+ fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut T {
+ &mut self.as_mut_slice()[index]
+ }
+}
+
+impl<T> Extend<T> for RepeatedField<T> {
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ self.vec.truncate(self.len);
+ self.vec.extend(iter);
+ self.len = self.vec.len();
+ }
+}
+
+impl<'a, T: Copy + 'a> Extend<&'a T> for RepeatedField<T> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.vec.truncate(self.len);
+ self.vec.extend(iter);
+ self.len = self.vec.len();
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for RepeatedField<T> {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.as_ref().fmt(f)
+ }
+}
diff --git a/src/servers/tests/http/prom_store_test.rs b/src/servers/tests/http/prom_store_test.rs
index 44564c367b4f..f77c7d7cb27d 100644
--- a/src/servers/tests/http/prom_store_test.rs
+++ b/src/servers/tests/http/prom_store_test.rs
@@ -18,6 +18,7 @@ use api::prom_store::remote::{
LabelMatcher, Query, QueryResult, ReadRequest, ReadResponse, WriteRequest,
};
use api::v1::greptime_request::Request;
+use api::v1::RowInsertRequests;
use async_trait::async_trait;
use axum::Router;
use axum_test_helper::TestClient;
@@ -64,6 +65,16 @@ impl PromStoreProtocolHandler for DummyInstance {
Ok(())
}
+
+ async fn write_fast(
+ &self,
+ _request: RowInsertRequests,
+ _ctx: QueryContextRef,
+ _with_metric_engine: bool,
+ ) -> Result<()> {
+ Ok(())
+ }
+
async fn read(&self, request: ReadRequest, ctx: QueryContextRef) -> Result<PromStoreResponse> {
let _ = self
.tx
@@ -141,6 +152,7 @@ fn make_test_app(tx: mpsc::Sender<(String, Vec<u8>)>) -> Router {
#[tokio::test]
async fn test_prometheus_remote_write_read() {
+ common_telemetry::init_default_ut_logging();
let (tx, mut rx) = mpsc::channel(100);
let app = make_test_app(tx);
@@ -219,28 +231,17 @@ async fn test_prometheus_remote_write_read() {
requests.push(s);
}
- assert_eq!(4, requests.len());
+ assert_eq!(2, requests.len());
- assert_eq!("public", requests[0].0);
- assert_eq!("prometheus", requests[1].0);
- assert_eq!("prometheus", requests[2].0);
- assert_eq!("public", requests[3].0);
-
- assert_eq!(
- write_request,
- WriteRequest::decode(&(requests[0].1)[..]).unwrap()
- );
- assert_eq!(
- write_request,
- WriteRequest::decode(&(requests[1].1)[..]).unwrap()
- );
+ assert_eq!("prometheus", requests[0].0);
+ assert_eq!("public", requests[1].0);
assert_eq!(
read_request,
- ReadRequest::decode(&(requests[2].1)[..]).unwrap()
+ ReadRequest::decode(&(requests[0].1)[..]).unwrap()
);
assert_eq!(
read_request,
- ReadRequest::decode(&(requests[3].1)[..]).unwrap()
+ ReadRequest::decode(&(requests[1].1)[..]).unwrap()
);
}
|
feat
|
decode prom requests to grpc (#3425)
|
2fd1075c4fa3200d951bf7690a0d11fc0223300e
|
2023-05-15 18:39:09
|
Ruihang Xia
|
fix: uses nextest in the Release CI (#1582)
| false
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index b07c98707fee..d501c424ea2c 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -84,13 +84,14 @@ jobs:
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
targets: ${{ matrix.arch }}
-
+ - name: Install latest nextest release
+ uses: taiki-e/install-action@nextest
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- # - name: Run tests
- # if: env.DISABLE_RUN_TESTS == 'false'
- # run: make unit-test integration-test sqlness-test
+ - name: Run tests
+ if: env.DISABLE_RUN_TESTS == 'false'
+ run: make test sqlness-test
- name: Run cargo build
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
@@ -200,13 +201,14 @@ jobs:
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
targets: ${{ matrix.arch }}
-
+ - name: Install latest nextest release
+ uses: taiki-e/install-action@nextest
- name: Output package versions
run: protoc --version ; cargo version ; rustc --version ; gcc --version ; g++ --version
- name: Run tests
if: env.DISABLE_RUN_TESTS == 'false'
- run: make unit-test integration-test sqlness-test
+ run: make test sqlness-test
- name: Run cargo build
if: contains(matrix.arch, 'darwin') || contains(matrix.opts, 'pyo3_backend') == false
diff --git a/Makefile b/Makefile
index f9ef92a6c964..1e3dd4eb0a64 100644
--- a/Makefile
+++ b/Makefile
@@ -33,13 +33,12 @@ docker-image: ## Build docker image.
##@ Test
-.PHONY: unit-test
-unit-test: ## Run unit test.
- cargo test --workspace
+test: nextest ## Run unit and integration tests.
+ cargo nextest run
-.PHONY: integration-test
-integration-test: ## Run integation test.
- cargo test integration
+.PHONY: nextest ## Install nextest tools.
+nextest:
+ cargo --list | grep nextest || cargo install cargo-nextest --locked
.PHONY: sqlness-test
sqlness-test: ## Run sqlness test.
|
fix
|
uses nextest in the Release CI (#1582)
|
8bade8f8e495e7fc2551bb0daac8261e06bc37f2
|
2024-01-25 19:28:43
|
Weny Xu
|
fix: fix create table ddl return incorrect table id (#3232)
| false
|
diff --git a/src/common/meta/src/ddl/create_table.rs b/src/common/meta/src/ddl/create_table.rs
index ca9ac1e0b0a0..848e9f2d8eed 100644
--- a/src/common/meta/src/ddl/create_table.rs
+++ b/src/common/meta/src/ddl/create_table.rs
@@ -104,18 +104,18 @@ impl CreateTableProcedure {
/// Checks whether the table exists.
async fn on_prepare(&mut self) -> Result<Status> {
let expr = &self.creator.data.task.create_table;
- let exist = self
+ let table_name_value = self
.context
.table_metadata_manager
.table_name_manager()
- .exists(TableNameKey::new(
+ .get(TableNameKey::new(
&expr.catalog_name,
&expr.schema_name,
&expr.table_name,
))
.await?;
- if exist {
+ if let Some(value) = table_name_value {
ensure!(
self.creator.data.task.create_table.create_if_not_exists,
error::TableAlreadyExistsSnafu {
@@ -123,7 +123,8 @@ impl CreateTableProcedure {
}
);
- return Ok(Status::done());
+ let table_id = value.table_id();
+ return Ok(Status::done_with_output(table_id));
}
self.creator.data.state = CreateTableState::DatanodeCreateRegions;
@@ -315,7 +316,7 @@ impl CreateTableProcedure {
.await?;
info!("Created table metadata for table {table_id}");
- Ok(Status::done())
+ Ok(Status::done_with_output(table_id))
}
}
diff --git a/src/common/meta/src/ddl_manager.rs b/src/common/meta/src/ddl_manager.rs
index 655d66126b8b..d7c74ae51988 100644
--- a/src/common/meta/src/ddl_manager.rs
+++ b/src/common/meta/src/ddl_manager.rs
@@ -15,7 +15,7 @@
use std::collections::HashMap;
use std::sync::Arc;
-use common_procedure::{watcher, ProcedureId, ProcedureManagerRef, ProcedureWithId};
+use common_procedure::{watcher, Output, ProcedureId, ProcedureManagerRef, ProcedureWithId};
use common_telemetry::tracing_context::{FutureExt, TracingContext};
use common_telemetry::{info, tracing};
use snafu::{OptionExt, ResultExt};
@@ -163,7 +163,7 @@ impl DdlManager {
alter_table_task: AlterTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
physical_table_info: Option<(TableId, TableName)>,
- ) -> Result<ProcedureId> {
+ ) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = AlterTableProcedure::new(
@@ -187,7 +187,7 @@ impl DdlManager {
create_table_task: CreateTableTask,
table_route: TableRouteValue,
region_wal_options: HashMap<RegionNumber, String>,
- ) -> Result<ProcedureId> {
+ ) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = CreateTableProcedure::new(
@@ -211,7 +211,7 @@ impl DdlManager {
drop_table_task: DropTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
table_route_value: DeserializedValueWithBytes<TableRouteValue>,
- ) -> Result<ProcedureId> {
+ ) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = DropTableProcedure::new(
@@ -235,7 +235,7 @@ impl DdlManager {
truncate_table_task: TruncateTableTask,
table_info_value: DeserializedValueWithBytes<TableInfoValue>,
region_routes: Vec<RegionRoute>,
- ) -> Result<ProcedureId> {
+ ) -> Result<(ProcedureId, Option<Output>)> {
let context = self.create_context();
let procedure = TruncateTableProcedure::new(
cluster_id,
@@ -250,7 +250,10 @@ impl DdlManager {
self.submit_procedure(procedure_with_id).await
}
- async fn submit_procedure(&self, procedure_with_id: ProcedureWithId) -> Result<ProcedureId> {
+ async fn submit_procedure(
+ &self,
+ procedure_with_id: ProcedureWithId,
+ ) -> Result<(ProcedureId, Option<Output>)> {
let procedure_id = procedure_with_id.id;
let mut watcher = self
@@ -259,11 +262,11 @@ impl DdlManager {
.await
.context(SubmitProcedureSnafu)?;
- watcher::wait(&mut watcher)
+ let output = watcher::wait(&mut watcher)
.await
.context(WaitProcedureSnafu)?;
- Ok(procedure_id)
+ Ok((procedure_id, output))
}
}
@@ -288,7 +291,7 @@ async fn handle_truncate_table_task(
let table_route = table_route_value.into_inner().region_routes()?.clone();
- let id = ddl_manager
+ let (id, _) = ddl_manager
.submit_truncate_table_task(
cluster_id,
truncate_table_task,
@@ -363,7 +366,7 @@ async fn handle_alter_table_task(
))
};
- let id = ddl_manager
+ let (id, _) = ddl_manager
.submit_alter_table_task(
cluster_id,
alter_table_task,
@@ -405,7 +408,7 @@ async fn handle_drop_table_task(
let table_route_value =
DeserializedValueWithBytes::from_inner(TableRouteValue::Physical(table_route_value));
- let id = ddl_manager
+ let (id, _) = ddl_manager
.submit_drop_table_task(
cluster_id,
drop_table_task,
@@ -443,7 +446,7 @@ async fn handle_create_table_task(
create_table_task.table_info.ident.table_id = table_id;
- let id = ddl_manager
+ let (id, output) = ddl_manager
.submit_create_table_task(
cluster_id,
create_table_task,
@@ -451,8 +454,10 @@ async fn handle_create_table_task(
region_wal_options,
)
.await?;
+ let output = output.context(error::ProcedureOutputSnafu)?;
- info!("Table: {table_id:?} is created via procedure_id {id:?}");
+ let table_id = *(output.downcast_ref::<u32>().unwrap());
+ info!("Table: {table_id} is created via procedure_id {id:?}");
Ok(SubmitDdlTaskResponse {
key: id.to_string().into(),
diff --git a/src/common/meta/src/error.rs b/src/common/meta/src/error.rs
index 9da624fab9c2..246a09711d9a 100644
--- a/src/common/meta/src/error.rs
+++ b/src/common/meta/src/error.rs
@@ -118,6 +118,9 @@ pub enum Error {
source: datatypes::Error,
},
+ #[snafu(display("Failed to get procedure output"))]
+ ProcedureOutput { location: Location },
+
#[snafu(display("Primary key '{key}' not found when creating region request"))]
PrimaryKeyNotFound { key: String, location: Location },
@@ -396,7 +399,8 @@ impl ErrorExt for Error {
| ProduceRecord { .. }
| CreateKafkaWalTopic { .. }
| EmptyTopicPool { .. }
- | UnexpectedLogicalRouteTable { .. } => StatusCode::Unexpected,
+ | UnexpectedLogicalRouteTable { .. }
+ | ProcedureOutput { .. } => StatusCode::Unexpected,
SendMessage { .. }
| GetKvCache { .. }
diff --git a/src/common/procedure/src/lib.rs b/src/common/procedure/src/lib.rs
index 338664057200..36ccd6b19050 100644
--- a/src/common/procedure/src/lib.rs
+++ b/src/common/procedure/src/lib.rs
@@ -25,7 +25,7 @@ pub mod watcher;
pub use crate::error::{Error, Result};
pub use crate::procedure::{
- BoxedProcedure, Context, ContextProvider, LockKey, Procedure, ProcedureId, ProcedureManager,
- ProcedureManagerRef, ProcedureState, ProcedureWithId, Status, StringKey,
+ BoxedProcedure, Context, ContextProvider, LockKey, Output, Procedure, ProcedureId,
+ ProcedureManager, ProcedureManagerRef, ProcedureState, ProcedureWithId, Status, StringKey,
};
pub use crate::watcher::Watcher;
diff --git a/src/common/procedure/src/procedure.rs b/src/common/procedure/src/procedure.rs
index a60d935c3e25..93d8711edd5a 100644
--- a/src/common/procedure/src/procedure.rs
+++ b/src/common/procedure/src/procedure.rs
@@ -58,9 +58,9 @@ impl Status {
}
/// Returns a [Status::Done] with output.
- pub fn done_with_output(output: Output) -> Status {
+ pub fn done_with_output<T: Any + Send + Sync>(output: T) -> Status {
Status::Done {
- output: Some(output),
+ output: Some(Arc::new(output)),
}
}
/// Returns `true` if the procedure is done.
diff --git a/src/common/procedure/src/watcher.rs b/src/common/procedure/src/watcher.rs
index 93aa91d5bc35..de15b545c1d0 100644
--- a/src/common/procedure/src/watcher.rs
+++ b/src/common/procedure/src/watcher.rs
@@ -89,7 +89,7 @@ mod tests {
self.error = !self.error;
Err(Error::retry_later(MockError::new(StatusCode::Internal)))
} else {
- Ok(Status::done_with_output(Arc::new("hello")))
+ Ok(Status::done_with_output("hello"))
}
}
|
fix
|
fix create table ddl return incorrect table id (#3232)
|
68b59e0e5e28b1d6e2e0a89f570545319c1edf86
|
2024-08-31 08:46:01
|
Ruihang Xia
|
feat: remove the requirement that partition column must be PK (#4647)
| false
|
diff --git a/src/operator/src/error.rs b/src/operator/src/error.rs
index e7526c4092fe..2473128137df 100644
--- a/src/operator/src/error.rs
+++ b/src/operator/src/error.rs
@@ -676,18 +676,6 @@ pub enum Error {
location: Location,
},
- #[snafu(display(
- "Invalid partition columns when creating table '{}', reason: {}",
- table,
- reason
- ))]
- InvalidPartitionColumns {
- table: String,
- reason: String,
- #[snafu(implicit)]
- location: Location,
- },
-
#[snafu(display("Failed to prepare file table"))]
PrepareFileTable {
#[snafu(implicit)]
@@ -812,7 +800,6 @@ impl ErrorExt for Error {
| Error::ProjectSchema { .. }
| Error::UnsupportedFormat { .. }
| Error::ColumnNoneDefaultValue { .. }
- | Error::InvalidPartitionColumns { .. }
| Error::PrepareFileTable { .. }
| Error::InferFileTableSchema { .. }
| Error::SchemaIncompatible { .. }
diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs
index 6e646bc5fe5c..afef6d590d5d 100644
--- a/src/operator/src/statement/ddl.rs
+++ b/src/operator/src/statement/ddl.rs
@@ -70,11 +70,11 @@ use super::StatementExecutor;
use crate::error::{
self, AlterExprToRequestSnafu, CatalogSnafu, ColumnDataTypeSnafu, ColumnNotFoundSnafu,
CreateLogicalTablesSnafu, CreateTableInfoSnafu, DeserializePartitionSnafu, EmptyDdlExprSnafu,
- ExtractTableNamesSnafu, FlowNotFoundSnafu, InvalidPartitionColumnsSnafu,
- InvalidPartitionRuleSnafu, InvalidPartitionSnafu, InvalidTableNameSnafu, InvalidViewNameSnafu,
- InvalidViewStmtSnafu, ParseSqlValueSnafu, Result, SchemaInUseSnafu, SchemaNotFoundSnafu,
- SchemaReadOnlySnafu, SubstraitCodecSnafu, TableAlreadyExistsSnafu, TableMetadataManagerSnafu,
- TableNotFoundSnafu, UnrecognizedTableOptionSnafu, ViewAlreadyExistsSnafu,
+ ExtractTableNamesSnafu, FlowNotFoundSnafu, InvalidPartitionRuleSnafu, InvalidPartitionSnafu,
+ InvalidTableNameSnafu, InvalidViewNameSnafu, InvalidViewStmtSnafu, ParseSqlValueSnafu, Result,
+ SchemaInUseSnafu, SchemaNotFoundSnafu, SchemaReadOnlySnafu, SubstraitCodecSnafu,
+ TableAlreadyExistsSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu,
+ UnrecognizedTableOptionSnafu, ViewAlreadyExistsSnafu,
};
use crate::expr_factory;
use crate::statement::show::create_partitions_stmt;
@@ -239,7 +239,6 @@ impl StatementExecutor {
);
let (partitions, partition_cols) = parse_partitions(create_table, partitions, &query_ctx)?;
- validate_partition_columns(create_table, &partition_cols)?;
let mut table_info = create_table_info(create_table, partition_cols, schema_opts)?;
let resp = self
@@ -1209,22 +1208,6 @@ impl StatementExecutor {
}
}
-fn validate_partition_columns(
- create_table: &CreateTableExpr,
- partition_cols: &[String],
-) -> Result<()> {
- ensure!(
- partition_cols
- .iter()
- .all(|col| &create_table.time_index == col || create_table.primary_keys.contains(col)),
- InvalidPartitionColumnsSnafu {
- table: &create_table.table_name,
- reason: "partition column must belongs to primary keys or equals to time index"
- }
- );
- Ok(())
-}
-
/// Parse partition statement [Partitions] into [MetaPartition] and partition columns.
fn parse_partitions(
create_table: &CreateTableExpr,
@@ -1519,31 +1502,6 @@ mod test {
assert!(NAME_PATTERN_REG.is_match("hello"));
}
- #[test]
- fn test_validate_partition_columns() {
- let create_table = CreateTableExpr {
- table_name: "my_table".to_string(),
- time_index: "ts".to_string(),
- primary_keys: vec!["a".to_string(), "b".to_string()],
- ..Default::default()
- };
-
- assert!(validate_partition_columns(&create_table, &[]).is_ok());
- assert!(validate_partition_columns(&create_table, &["ts".to_string()]).is_ok());
- assert!(validate_partition_columns(&create_table, &["a".to_string()]).is_ok());
- assert!(
- validate_partition_columns(&create_table, &["b".to_string(), "a".to_string()]).is_ok()
- );
-
- assert_eq!(
- validate_partition_columns(&create_table, &["a".to_string(), "c".to_string()])
- .unwrap_err()
- .to_string(),
- "Invalid partition columns when creating table 'my_table', \
- reason: partition column must belongs to primary keys or equals to time index",
- );
- }
-
#[tokio::test]
#[ignore = "TODO(ruihang): WIP new partition rule"]
async fn test_parse_partitions() {
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 98638df3f21d..de81bd1a2144 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -380,7 +380,7 @@ impl<'a> ParserContext<'a> {
Ok(options.into())
}
- /// "PARTITION BY ..." clause
+ /// "PARTITION ON COLUMNS (...)" clause
fn parse_partitions(&mut self) -> Result<Option<Partitions>> {
if !self.parser.parse_keyword(Keyword::PARTITION) {
return Ok(None);
|
feat
|
remove the requirement that partition column must be PK (#4647)
|
2f2609d8c645e8cf4b68b9ad1a50ec320b75fb9c
|
2023-02-07 12:52:32
|
Ruihang Xia
|
build(ci): disable release workflow for forked repo (#954)
| false
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 24e3fa854c43..658dd86a815d 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -40,6 +40,7 @@ jobs:
os: macos-latest
file: greptime-darwin-amd64
runs-on: ${{ matrix.os }}
+ if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -132,6 +133,7 @@ jobs:
name: Release artifacts
needs: [build]
runs-on: ubuntu-latest
+ if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -174,6 +176,7 @@ jobs:
name: Build docker image
needs: [build]
runs-on: ubuntu-latest
+ if: github.repository == 'GreptimeTeam/greptimedb'
steps:
- name: Checkout sources
uses: actions/checkout@v3
|
build
|
disable release workflow for forked repo (#954)
|
af13eeaad321277de95c6bf362a3b2ae7e4d8ac7
|
2023-12-28 09:31:42
|
dennis zhuang
|
feat: adds character_sets, collations and events etc. (#3017)
| false
|
diff --git a/src/catalog/src/information_schema.rs b/src/catalog/src/information_schema.rs
index 0ed20844bf30..5bbfb1c6d88f 100644
--- a/src/catalog/src/information_schema.rs
+++ b/src/catalog/src/information_schema.rs
@@ -51,6 +51,11 @@ lazy_static! {
COLUMN_PRIVILEGES,
COLUMN_STATISTICS,
BUILD_INFO,
+ CHARACTER_SETS,
+ COLLATIONS,
+ COLLATION_CHARACTER_SET_APPLICABILITY,
+ CHECK_CONSTRAINTS,
+ EVENTS,
];
}
@@ -125,7 +130,7 @@ impl InformationSchemaProvider {
// Add memory tables
for name in MEMORY_TABLES.iter() {
- tables.insert((*name).to_string(), self.build_table(name).unwrap());
+ tables.insert((*name).to_string(), self.build_table(name).expect(name));
}
self.tables = tables;
@@ -156,6 +161,13 @@ impl InformationSchemaProvider {
COLUMN_PRIVILEGES => setup_memory_table!(COLUMN_PRIVILEGES),
COLUMN_STATISTICS => setup_memory_table!(COLUMN_STATISTICS),
BUILD_INFO => setup_memory_table!(BUILD_INFO),
+ CHARACTER_SETS => setup_memory_table!(CHARACTER_SETS),
+ COLLATIONS => setup_memory_table!(COLLATIONS),
+ COLLATION_CHARACTER_SET_APPLICABILITY => {
+ setup_memory_table!(COLLATION_CHARACTER_SET_APPLICABILITY)
+ }
+ CHECK_CONSTRAINTS => setup_memory_table!(CHECK_CONSTRAINTS),
+ EVENTS => setup_memory_table!(EVENTS),
_ => None,
}
}
diff --git a/src/catalog/src/information_schema/memory_table/tables.rs b/src/catalog/src/information_schema/memory_table/tables.rs
index e19b8d08d1ef..abb719ca1b4b 100644
--- a/src/catalog/src/information_schema/memory_table/tables.rs
+++ b/src/catalog/src/information_schema/memory_table/tables.rs
@@ -17,7 +17,7 @@ use std::sync::Arc;
use common_catalog::consts::MITO_ENGINE;
use datatypes::prelude::{ConcreteDataType, VectorRef};
use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
-use datatypes::vectors::StringVector;
+use datatypes::vectors::{Int64Vector, StringVector};
use crate::information_schema::table_names::*;
@@ -97,6 +97,92 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
],
),
+ CHARACTER_SETS => (
+ vec![
+ string_column("CHARACTER_SET_NAME"),
+ string_column("DEFAULT_COLLATE_NAME"),
+ string_column("DESCRIPTION"),
+ bigint_column("MAXLEN"),
+ ],
+ vec![
+ Arc::new(StringVector::from(vec!["utf8"])),
+ Arc::new(StringVector::from(vec!["utf8_bin"])),
+ Arc::new(StringVector::from(vec!["UTF-8 Unicode"])),
+ Arc::new(Int64Vector::from_slice([4])),
+ ],
+ ),
+
+ COLLATIONS => (
+ vec![
+ string_column("COLLATION_NAME"),
+ string_column("CHARACTER_SET_NAME"),
+ bigint_column("ID"),
+ string_column("IS_DEFAULT"),
+ string_column("IS_COMPILED"),
+ bigint_column("SORTLEN"),
+ ],
+ vec![
+ Arc::new(StringVector::from(vec!["utf8_bin"])),
+ Arc::new(StringVector::from(vec!["utf8"])),
+ Arc::new(Int64Vector::from_slice([1])),
+ Arc::new(StringVector::from(vec!["Yes"])),
+ Arc::new(StringVector::from(vec!["Yes"])),
+ Arc::new(Int64Vector::from_slice([1])),
+ ],
+ ),
+
+ COLLATION_CHARACTER_SET_APPLICABILITY => (
+ vec![
+ string_column("COLLATION_NAME"),
+ string_column("CHARACTER_SET_NAME"),
+ ],
+ vec![
+ Arc::new(StringVector::from(vec!["utf8_bin"])),
+ Arc::new(StringVector::from(vec!["utf8"])),
+ ],
+ ),
+
+ CHECK_CONSTRAINTS => (
+ string_columns(&[
+ "CONSTRAINT_CATALOG",
+ "CONSTRAINT_SCHEMA",
+ "CONSTRAINT_NAME",
+ "CHECK_CLAUSE",
+ ]),
+ // Not support check constraints yet
+ vec![],
+ ),
+
+ EVENTS => (
+ vec![
+ string_column("EVENT_CATALOG"),
+ string_column("EVENT_SCHEMA"),
+ string_column("EVENT_NAME"),
+ string_column("DEFINER"),
+ string_column("TIME_ZONE"),
+ string_column("EVENT_BODY"),
+ string_column("EVENT_DEFINITION"),
+ string_column("EVENT_TYPE"),
+ datetime_column("EXECUTE_AT"),
+ bigint_column("INTERVAL_VALUE"),
+ string_column("INTERVAL_FIELD"),
+ string_column("SQL_MODE"),
+ datetime_column("STARTS"),
+ datetime_column("ENDS"),
+ string_column("STATUS"),
+ string_column("ON_COMPLETION"),
+ datetime_column("CREATED"),
+ datetime_column("LAST_ALTERED"),
+ datetime_column("LAST_EXECUTED"),
+ string_column("EVENT_COMMENT"),
+ bigint_column("ORIGINATOR"),
+ string_column("CHARACTER_SET_CLIENT"),
+ string_column("COLLATION_CONNECTION"),
+ string_column("DATABASE_COLLATION"),
+ ],
+ vec![],
+ ),
+
_ => unreachable!("Unknown table in information_schema: {}", table_name),
};
@@ -115,6 +201,22 @@ fn string_column(name: &str) -> ColumnSchema {
)
}
+fn bigint_column(name: &str) -> ColumnSchema {
+ ColumnSchema::new(
+ str::to_lowercase(name),
+ ConcreteDataType::int64_datatype(),
+ false,
+ )
+}
+
+fn datetime_column(name: &str) -> ColumnSchema {
+ ColumnSchema::new(
+ str::to_lowercase(name),
+ ConcreteDataType::datetime_datatype(),
+ false,
+ )
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/src/catalog/src/information_schema/table_names.rs b/src/catalog/src/information_schema/table_names.rs
index 1bd0df8a90ce..996a2e035f48 100644
--- a/src/catalog/src/information_schema/table_names.rs
+++ b/src/catalog/src/information_schema/table_names.rs
@@ -20,3 +20,8 @@ pub const ENGINES: &str = "engines";
pub const COLUMN_PRIVILEGES: &str = "column_privileges";
pub const COLUMN_STATISTICS: &str = "column_statistics";
pub const BUILD_INFO: &str = "build_info";
+pub const CHARACTER_SETS: &str = "character_sets";
+pub const COLLATIONS: &str = "collations";
+pub const COLLATION_CHARACTER_SET_APPLICABILITY: &str = "collation_character_set_applicability";
+pub const CHECK_CONSTRAINTS: &str = "check_constraints";
+pub const EVENTS: &str = "events";
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index a02b0f87bf3a..d9cb1ba54b50 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -44,6 +44,16 @@ pub const INFORMATION_SCHEMA_COLUMN_PRIVILEGES_TABLE_ID: u32 = 6;
pub const INFORMATION_SCHEMA_COLUMN_STATISTICS_TABLE_ID: u32 = 7;
/// id for information_schema.build_info
pub const INFORMATION_SCHEMA_BUILD_INFO_TABLE_ID: u32 = 8;
+/// id for information_schema.CHARACTER_SETS
+pub const INFORMATION_SCHEMA_CHARACTER_SETS_TABLE_ID: u32 = 9;
+/// id for information_schema.COLLATIONS
+pub const INFORMATION_SCHEMA_COLLATIONS_TABLE_ID: u32 = 10;
+/// id for information_schema.COLLATIONS
+pub const INFORMATION_SCHEMA_COLLATION_CHARACTER_SET_APPLICABILITY_TABLE_ID: u32 = 11;
+/// id for information_schema.CHECK_CONSTRAINTS
+pub const INFORMATION_SCHEMA_CHECK_CONSTRAINTS_TABLE_ID: u32 = 12;
+/// id for information_schema.EVENTS
+pub const INFORMATION_SCHEMA_EVENTS_TABLE_ID: u32 = 13;
/// ----- End of information_schema tables -----
pub const MITO_ENGINE: &str = "mito";
diff --git a/tests/cases/standalone/common/show/show_databases_tables.result b/tests/cases/standalone/common/show/show_databases_tables.result
index ca4dc2c5b4aa..0d43286ccc88 100644
--- a/tests/cases/standalone/common/show/show_databases_tables.result
+++ b/tests/cases/standalone/common/show/show_databases_tables.result
@@ -17,14 +17,19 @@ Affected Rows: 0
show tables;
-+-------------------+
-| Tables |
-+-------------------+
-| build_info |
-| column_privileges |
-| column_statistics |
-| columns |
-| engines |
-| tables |
-+-------------------+
++---------------------------------------+
+| Tables |
++---------------------------------------+
+| build_info |
+| character_sets |
+| check_constraints |
+| collation_character_set_applicability |
+| collations |
+| column_privileges |
+| column_statistics |
+| columns |
+| engines |
+| events |
+| tables |
++---------------------------------------+
diff --git a/tests/cases/standalone/common/system/information_schema.result b/tests/cases/standalone/common/system/information_schema.result
index e05ec496d1a8..c3949ca644ba 100644
--- a/tests/cases/standalone/common/system/information_schema.result
+++ b/tests/cases/standalone/common/system/information_schema.result
@@ -9,59 +9,104 @@ from information_schema.tables
where table_name != 'scripts'
order by table_schema, table_name;
-+---------------+--------------------+-------------------+-----------------+----------+-------------+
-| table_catalog | table_schema | table_name | table_type | table_id | engine |
-+---------------+--------------------+-------------------+-----------------+----------+-------------+
-| greptime | information_schema | build_info | LOCAL TEMPORARY | 8 | |
-| greptime | information_schema | column_privileges | LOCAL TEMPORARY | 6 | |
-| greptime | information_schema | column_statistics | LOCAL TEMPORARY | 7 | |
-| greptime | information_schema | columns | LOCAL TEMPORARY | 4 | |
-| greptime | information_schema | engines | LOCAL TEMPORARY | 5 | |
-| greptime | information_schema | tables | LOCAL TEMPORARY | 3 | |
-| greptime | public | numbers | LOCAL TEMPORARY | 2 | test_engine |
-+---------------+--------------------+-------------------+-----------------+----------+-------------+
++---------------+--------------------+---------------------------------------+-----------------+----------+-------------+
+| table_catalog | table_schema | table_name | table_type | table_id | engine |
++---------------+--------------------+---------------------------------------+-----------------+----------+-------------+
+| greptime | information_schema | build_info | LOCAL TEMPORARY | 8 | |
+| greptime | information_schema | character_sets | LOCAL TEMPORARY | 9 | |
+| greptime | information_schema | check_constraints | LOCAL TEMPORARY | 12 | |
+| greptime | information_schema | collation_character_set_applicability | LOCAL TEMPORARY | 11 | |
+| greptime | information_schema | collations | LOCAL TEMPORARY | 10 | |
+| greptime | information_schema | column_privileges | LOCAL TEMPORARY | 6 | |
+| greptime | information_schema | column_statistics | LOCAL TEMPORARY | 7 | |
+| greptime | information_schema | columns | LOCAL TEMPORARY | 4 | |
+| greptime | information_schema | engines | LOCAL TEMPORARY | 5 | |
+| greptime | information_schema | events | LOCAL TEMPORARY | 13 | |
+| greptime | information_schema | tables | LOCAL TEMPORARY | 3 | |
+| greptime | public | numbers | LOCAL TEMPORARY | 2 | test_engine |
++---------------+--------------------+---------------------------------------+-----------------+----------+-------------+
select * from information_schema.columns order by table_schema, table_name;
-+---------------+--------------------+-------------------+------------------+-----------+---------------+
-| table_catalog | table_schema | table_name | column_name | data_type | semantic_type |
-+---------------+--------------------+-------------------+------------------+-----------+---------------+
-| greptime | information_schema | build_info | pkg_version | String | FIELD |
-| greptime | information_schema | build_info | git_dirty | String | FIELD |
-| greptime | information_schema | build_info | git_commit_short | String | FIELD |
-| greptime | information_schema | build_info | git_commit | String | FIELD |
-| greptime | information_schema | build_info | git_branch | String | FIELD |
-| greptime | information_schema | column_privileges | grantee | String | FIELD |
-| greptime | information_schema | column_privileges | is_grantable | String | FIELD |
-| greptime | information_schema | column_privileges | privilege_type | String | FIELD |
-| greptime | information_schema | column_privileges | column_name | String | FIELD |
-| greptime | information_schema | column_privileges | table_name | String | FIELD |
-| greptime | information_schema | column_privileges | table_schema | String | FIELD |
-| greptime | information_schema | column_privileges | table_catalog | String | FIELD |
-| greptime | information_schema | column_statistics | histogram | String | FIELD |
-| greptime | information_schema | column_statistics | column_name | String | FIELD |
-| greptime | information_schema | column_statistics | table_name | String | FIELD |
-| greptime | information_schema | column_statistics | schema_name | String | FIELD |
-| greptime | information_schema | columns | table_name | String | FIELD |
-| greptime | information_schema | columns | semantic_type | String | FIELD |
-| greptime | information_schema | columns | data_type | String | FIELD |
-| greptime | information_schema | columns | column_name | String | FIELD |
-| greptime | information_schema | columns | table_schema | String | FIELD |
-| greptime | information_schema | columns | table_catalog | String | FIELD |
-| greptime | information_schema | engines | savepoints | String | FIELD |
-| greptime | information_schema | engines | xa | String | FIELD |
-| greptime | information_schema | engines | transactions | String | FIELD |
-| greptime | information_schema | engines | comment | String | FIELD |
-| greptime | information_schema | engines | support | String | FIELD |
-| greptime | information_schema | engines | engine | String | FIELD |
-| greptime | information_schema | tables | table_schema | String | FIELD |
-| greptime | information_schema | tables | table_catalog | String | FIELD |
-| greptime | information_schema | tables | engine | String | FIELD |
-| greptime | information_schema | tables | table_id | UInt32 | FIELD |
-| greptime | information_schema | tables | table_type | String | FIELD |
-| greptime | information_schema | tables | table_name | String | FIELD |
-| greptime | public | numbers | number | UInt32 | TAG |
-+---------------+--------------------+-------------------+------------------+-----------+---------------+
++---------------+--------------------+---------------------------------------+----------------------+-----------+---------------+
+| table_catalog | table_schema | table_name | column_name | data_type | semantic_type |
++---------------+--------------------+---------------------------------------+----------------------+-----------+---------------+
+| greptime | information_schema | build_info | git_branch | String | FIELD |
+| greptime | information_schema | build_info | git_commit | String | FIELD |
+| greptime | information_schema | build_info | git_commit_short | String | FIELD |
+| greptime | information_schema | build_info | git_dirty | String | FIELD |
+| greptime | information_schema | build_info | pkg_version | String | FIELD |
+| greptime | information_schema | character_sets | maxlen | Int64 | FIELD |
+| greptime | information_schema | character_sets | character_set_name | String | FIELD |
+| greptime | information_schema | character_sets | default_collate_name | String | FIELD |
+| greptime | information_schema | character_sets | description | String | FIELD |
+| greptime | information_schema | check_constraints | check_clause | String | FIELD |
+| greptime | information_schema | check_constraints | constraint_name | String | FIELD |
+| greptime | information_schema | check_constraints | constraint_schema | String | FIELD |
+| greptime | information_schema | check_constraints | constraint_catalog | String | FIELD |
+| greptime | information_schema | collation_character_set_applicability | character_set_name | String | FIELD |
+| greptime | information_schema | collation_character_set_applicability | collation_name | String | FIELD |
+| greptime | information_schema | collations | collation_name | String | FIELD |
+| greptime | information_schema | collations | character_set_name | String | FIELD |
+| greptime | information_schema | collations | id | Int64 | FIELD |
+| greptime | information_schema | collations | is_default | String | FIELD |
+| greptime | information_schema | collations | is_compiled | String | FIELD |
+| greptime | information_schema | collations | sortlen | Int64 | FIELD |
+| greptime | information_schema | column_privileges | table_catalog | String | FIELD |
+| greptime | information_schema | column_privileges | grantee | String | FIELD |
+| greptime | information_schema | column_privileges | privilege_type | String | FIELD |
+| greptime | information_schema | column_privileges | is_grantable | String | FIELD |
+| greptime | information_schema | column_privileges | column_name | String | FIELD |
+| greptime | information_schema | column_privileges | table_name | String | FIELD |
+| greptime | information_schema | column_privileges | table_schema | String | FIELD |
+| greptime | information_schema | column_statistics | schema_name | String | FIELD |
+| greptime | information_schema | column_statistics | table_name | String | FIELD |
+| greptime | information_schema | column_statistics | column_name | String | FIELD |
+| greptime | information_schema | column_statistics | histogram | String | FIELD |
+| greptime | information_schema | columns | table_catalog | String | FIELD |
+| greptime | information_schema | columns | table_schema | String | FIELD |
+| greptime | information_schema | columns | semantic_type | String | FIELD |
+| greptime | information_schema | columns | data_type | String | FIELD |
+| greptime | information_schema | columns | column_name | String | FIELD |
+| greptime | information_schema | columns | table_name | String | FIELD |
+| greptime | information_schema | engines | savepoints | String | FIELD |
+| greptime | information_schema | engines | xa | String | FIELD |
+| greptime | information_schema | engines | transactions | String | FIELD |
+| greptime | information_schema | engines | comment | String | FIELD |
+| greptime | information_schema | engines | support | String | FIELD |
+| greptime | information_schema | engines | engine | String | FIELD |
+| greptime | information_schema | events | sql_mode | String | FIELD |
+| greptime | information_schema | events | interval_value | Int64 | FIELD |
+| greptime | information_schema | events | database_collation | String | FIELD |
+| greptime | information_schema | events | collation_connection | String | FIELD |
+| greptime | information_schema | events | character_set_client | String | FIELD |
+| greptime | information_schema | events | originator | Int64 | FIELD |
+| greptime | information_schema | events | event_catalog | String | FIELD |
+| greptime | information_schema | events | event_schema | String | FIELD |
+| greptime | information_schema | events | event_name | String | FIELD |
+| greptime | information_schema | events | definer | String | FIELD |
+| greptime | information_schema | events | time_zone | String | FIELD |
+| greptime | information_schema | events | event_body | String | FIELD |
+| greptime | information_schema | events | event_definition | String | FIELD |
+| greptime | information_schema | events | event_type | String | FIELD |
+| greptime | information_schema | events | execute_at | DateTime | FIELD |
+| greptime | information_schema | events | event_comment | String | FIELD |
+| greptime | information_schema | events | interval_field | String | FIELD |
+| greptime | information_schema | events | last_executed | DateTime | FIELD |
+| greptime | information_schema | events | starts | DateTime | FIELD |
+| greptime | information_schema | events | ends | DateTime | FIELD |
+| greptime | information_schema | events | status | String | FIELD |
+| greptime | information_schema | events | on_completion | String | FIELD |
+| greptime | information_schema | events | created | DateTime | FIELD |
+| greptime | information_schema | events | last_altered | DateTime | FIELD |
+| greptime | information_schema | tables | table_catalog | String | FIELD |
+| greptime | information_schema | tables | table_schema | String | FIELD |
+| greptime | information_schema | tables | table_name | String | FIELD |
+| greptime | information_schema | tables | table_type | String | FIELD |
+| greptime | information_schema | tables | table_id | UInt32 | FIELD |
+| greptime | information_schema | tables | engine | String | FIELD |
+| greptime | public | numbers | number | UInt32 | TAG |
++---------------+--------------------+---------------------------------------+----------------------+-----------+---------------+
create
database my_db;
@@ -197,6 +242,48 @@ select * from COLUMN_STATISTICS;
+-------------+------------+-------------+-----------+
+-------------+------------+-------------+-----------+
+select * from CHARACTER_SETS;
+
++--------------------+----------------------+---------------+--------+
+| character_set_name | default_collate_name | description | maxlen |
++--------------------+----------------------+---------------+--------+
+| utf8 | utf8_bin | UTF-8 Unicode | 4 |
++--------------------+----------------------+---------------+--------+
+
+select * from COLLATIONS;
+
++----------------+--------------------+----+------------+-------------+---------+
+| collation_name | character_set_name | id | is_default | is_compiled | sortlen |
++----------------+--------------------+----+------------+-------------+---------+
+| utf8_bin | utf8 | 1 | Yes | Yes | 1 |
++----------------+--------------------+----+------------+-------------+---------+
+
+select * from COLLATION_CHARACTER_SET_APPLICABILITY;
+
++----------------+--------------------+
+| collation_name | character_set_name |
++----------------+--------------------+
+| utf8_bin | utf8 |
++----------------+--------------------+
+
+desc table CHECK_CONSTRAINTS;
+
++--------------------+--------+-----+------+---------+---------------+
+| Column | Type | Key | Null | Default | Semantic Type |
++--------------------+--------+-----+------+---------+---------------+
+| constraint_catalog | String | | NO | | FIELD |
+| constraint_schema | String | | NO | | FIELD |
+| constraint_name | String | | NO | | FIELD |
+| check_clause | String | | NO | | FIELD |
++--------------------+--------+-----+------+---------+---------------+
+
+select * from CHECK_CONSTRAINTS;
+
++--------------------+-------------------+-----------------+--------------+
+| constraint_catalog | constraint_schema | constraint_name | check_clause |
++--------------------+-------------------+-----------------+--------------+
++--------------------+-------------------+-----------------+--------------+
+
use public;
Affected Rows: 0
diff --git a/tests/cases/standalone/common/system/information_schema.sql b/tests/cases/standalone/common/system/information_schema.sql
index 8916842af42d..34db6c144f4f 100644
--- a/tests/cases/standalone/common/system/information_schema.sql
+++ b/tests/cases/standalone/common/system/information_schema.sql
@@ -60,4 +60,14 @@ desc table COLUMN_STATISTICS;
select * from COLUMN_STATISTICS;
+select * from CHARACTER_SETS;
+
+select * from COLLATIONS;
+
+select * from COLLATION_CHARACTER_SET_APPLICABILITY;
+
+desc table CHECK_CONSTRAINTS;
+
+select * from CHECK_CONSTRAINTS;
+
use public;
|
feat
|
adds character_sets, collations and events etc. (#3017)
|
3887d207b6ce0d180a3ba8787a94df23a773674d
|
2024-02-26 15:07:54
|
Ning Sun
|
feat: make tls certificates/keys reloadable (part 1) (#3335)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 84d28103f312..29ea367b9825 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3585,6 +3585,15 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "fsevent-sys"
+version = "4.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "fst"
version = "0.4.7"
@@ -4369,6 +4378,26 @@ dependencies = [
"snafu",
]
+[[package]]
+name = "inotify"
+version = "0.9.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff"
+dependencies = [
+ "bitflags 1.3.2",
+ "inotify-sys",
+ "libc",
+]
+
+[[package]]
+name = "inotify-sys"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "instant"
version = "0.1.12"
@@ -4566,6 +4595,26 @@ dependencies = [
"indexmap 2.1.0",
]
+[[package]]
+name = "kqueue"
+version = "1.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7447f1ca1b7b563588a205fe93dea8df60fd981423a768bc1c0ded35ed147d0c"
+dependencies = [
+ "kqueue-sys",
+ "libc",
+]
+
+[[package]]
+name = "kqueue-sys"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b"
+dependencies = [
+ "bitflags 1.3.2",
+ "libc",
+]
+
[[package]]
name = "lalrpop"
version = "0.19.12"
@@ -5655,6 +5704,25 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
+[[package]]
+name = "notify"
+version = "6.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d"
+dependencies = [
+ "bitflags 2.4.1",
+ "crossbeam-channel",
+ "filetime",
+ "fsevent-sys",
+ "inotify",
+ "kqueue",
+ "libc",
+ "log",
+ "mio",
+ "walkdir",
+ "windows-sys 0.48.0",
+]
+
[[package]]
name = "ntapi"
version = "0.4.1"
@@ -8995,6 +9063,7 @@ dependencies = [
"lazy_static",
"mime_guess",
"mysql_async",
+ "notify",
"once_cell",
"openmetrics-parser",
"opensrv-mysql",
@@ -9027,6 +9096,7 @@ dependencies = [
"sql",
"strum 0.25.0",
"table",
+ "tempfile",
"tikv-jemalloc-ctl",
"tokio",
"tokio-postgres",
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 9fcc7372c858..4866dd7fab78 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -18,7 +18,6 @@ use std::sync::Arc;
use auth::UserProviderRef;
use common_base::Plugins;
use common_runtime::Builder as RuntimeBuilder;
-use servers::error::InternalIoSnafu;
use servers::grpc::builder::GrpcServerBuilder;
use servers::grpc::greptime_handler::GreptimeRequestHandler;
use servers::grpc::{GrpcServer, GrpcServerConfig};
@@ -30,6 +29,7 @@ use servers::postgres::PostgresServer;
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdapter;
use servers::query_handler::sql::ServerSqlQueryHandlerAdapter;
use servers::server::{Server, ServerHandlers};
+use servers::tls::{watch_tls_config, ReloadableTlsServerConfig};
use snafu::ResultExt;
use crate::error::{self, Result, StartServerSnafu};
@@ -195,6 +195,12 @@ where
let opts = &opts.mysql;
let mysql_addr = parse_addr(&opts.addr)?;
+ let tls_server_config = Arc::new(
+ ReloadableTlsServerConfig::try_new(opts.tls.clone()).context(StartServerSnafu)?,
+ );
+
+ watch_tls_config(tls_server_config.clone()).context(StartServerSnafu)?;
+
let mysql_io_runtime = Arc::new(
RuntimeBuilder::default()
.worker_threads(opts.runtime_size)
@@ -210,11 +216,7 @@ where
)),
Arc::new(MysqlSpawnConfig::new(
opts.tls.should_force_tls(),
- opts.tls
- .setup()
- .context(InternalIoSnafu)
- .context(StartServerSnafu)?
- .map(Arc::new),
+ tls_server_config,
opts.reject_no_database.unwrap_or(false),
)),
);
@@ -226,6 +228,12 @@ where
let opts = &opts.postgres;
let pg_addr = parse_addr(&opts.addr)?;
+ let tls_server_config = Arc::new(
+ ReloadableTlsServerConfig::try_new(opts.tls.clone()).context(StartServerSnafu)?,
+ );
+
+ watch_tls_config(tls_server_config.clone()).context(StartServerSnafu)?;
+
let pg_io_runtime = Arc::new(
RuntimeBuilder::default()
.worker_threads(opts.runtime_size)
@@ -236,7 +244,8 @@ where
let pg_server = Box::new(PostgresServer::new(
ServerSqlQueryHandlerAdapter::arc(instance.clone()),
- opts.tls.clone(),
+ opts.tls.should_force_tls(),
+ tls_server_config,
pg_io_runtime,
user_provider.clone(),
)) as Box<dyn Server>;
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index d0aba8af463d..69f318815493 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -59,6 +59,7 @@ influxdb_line_protocol = { git = "https://github.com/evenyag/influxdb_iox", bran
itertools.workspace = true
lazy_static.workspace = true
mime_guess = "2.0"
+notify = "6.1"
once_cell.workspace = true
openmetrics-parser = "0.4"
opensrv-mysql = "0.7.0"
@@ -121,6 +122,7 @@ script = { workspace = true, features = ["python"] }
serde_json.workspace = true
session = { workspace = true, features = ["testing"] }
table.workspace = true
+tempfile = "3.0.0"
tokio-postgres = "0.7"
tokio-postgres-rustls = "0.11"
tokio-test = "0.4"
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 4ebbdc55445c..2640454a94af 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -441,6 +441,12 @@ pub enum Error {
"Invalid parameter, physical_table is not expected when metric engine is disabled"
))]
UnexpectedPhysicalTable { location: Location },
+
+ #[snafu(display("Failed to initialize a watcher for file"))]
+ FileWatch {
+ #[snafu(source)]
+ error: notify::Error,
+ },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -462,7 +468,8 @@ impl ErrorExt for Error {
| CatalogError { .. }
| GrpcReflectionService { .. }
| BuildHttpResponse { .. }
- | Arrow { .. } => StatusCode::Internal,
+ | Arrow { .. }
+ | FileWatch { .. } => StatusCode::Internal,
UnsupportedDataType { .. } => StatusCode::Unsupported,
diff --git a/src/servers/src/mysql/server.rs b/src/servers/src/mysql/server.rs
index 785a47a120bf..a71a1dc62313 100644
--- a/src/servers/src/mysql/server.rs
+++ b/src/servers/src/mysql/server.rs
@@ -33,6 +33,7 @@ use crate::error::{Error, Result};
use crate::mysql::handler::MysqlInstanceShim;
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
use crate::server::{AbortableStream, BaseTcpServer, Server};
+use crate::tls::ReloadableTlsServerConfig;
// Default size of ResultSet write buffer: 100KB
const DEFAULT_RESULT_SET_WRITE_BUFFER_SIZE: usize = 100 * 1024;
@@ -68,7 +69,7 @@ impl MysqlSpawnRef {
pub struct MysqlSpawnConfig {
// tls config
force_tls: bool,
- tls: Option<Arc<ServerConfig>>,
+ tls: Arc<ReloadableTlsServerConfig>,
// other shim config
reject_no_database: bool,
}
@@ -76,7 +77,7 @@ pub struct MysqlSpawnConfig {
impl MysqlSpawnConfig {
pub fn new(
force_tls: bool,
- tls: Option<Arc<ServerConfig>>,
+ tls: Arc<ReloadableTlsServerConfig>,
reject_no_database: bool,
) -> MysqlSpawnConfig {
MysqlSpawnConfig {
@@ -87,7 +88,7 @@ impl MysqlSpawnConfig {
}
fn tls(&self) -> Option<Arc<ServerConfig>> {
- self.tls.clone()
+ self.tls.get_server_config()
}
}
diff --git a/src/servers/src/postgres/server.rs b/src/servers/src/postgres/server.rs
index 6a4d7a112dda..3ed9f5f40ebf 100644
--- a/src/servers/src/postgres/server.rs
+++ b/src/servers/src/postgres/server.rs
@@ -29,19 +29,20 @@ use super::{MakePostgresServerHandler, MakePostgresServerHandlerBuilder};
use crate::error::Result;
use crate::query_handler::sql::ServerSqlQueryHandlerRef;
use crate::server::{AbortableStream, BaseTcpServer, Server};
-use crate::tls::TlsOption;
+use crate::tls::ReloadableTlsServerConfig;
pub struct PostgresServer {
base_server: BaseTcpServer,
make_handler: Arc<MakePostgresServerHandler>,
- tls: TlsOption,
+ tls_server_config: Arc<ReloadableTlsServerConfig>,
}
impl PostgresServer {
/// Creates a new Postgres server with provided query_handler and async runtime
pub fn new(
query_handler: ServerSqlQueryHandlerRef,
- tls: TlsOption,
+ force_tls: bool,
+ tls_server_config: Arc<ReloadableTlsServerConfig>,
io_runtime: Arc<Runtime>,
user_provider: Option<UserProviderRef>,
) -> PostgresServer {
@@ -49,14 +50,14 @@ impl PostgresServer {
MakePostgresServerHandlerBuilder::default()
.query_handler(query_handler.clone())
.user_provider(user_provider.clone())
- .force_tls(tls.should_force_tls())
+ .force_tls(force_tls)
.build()
.unwrap(),
);
PostgresServer {
base_server: BaseTcpServer::create_server("Postgres", io_runtime),
make_handler,
- tls,
+ tls_server_config,
}
}
@@ -64,12 +65,16 @@ impl PostgresServer {
&self,
io_runtime: Arc<Runtime>,
accepting_stream: AbortableStream,
- tls_acceptor: Option<Arc<TlsAcceptor>>,
) -> impl Future<Output = ()> {
let handler_maker = self.make_handler.clone();
+ let tls_server_config = self.tls_server_config.clone();
accepting_stream.for_each(move |tcp_stream| {
let io_runtime = io_runtime.clone();
- let tls_acceptor = tls_acceptor.clone();
+
+ let tls_acceptor = tls_server_config
+ .get_server_config()
+ .map(|server_config| Arc::new(TlsAcceptor::from(server_config)));
+
let handler_maker = handler_maker.clone();
async move {
@@ -119,14 +124,8 @@ impl Server for PostgresServer {
async fn start(&self, listening: SocketAddr) -> Result<SocketAddr> {
let (stream, addr) = self.base_server.bind(listening).await?;
- debug!("Starting PostgreSQL with TLS option: {:?}", self.tls);
- let tls_acceptor = self
- .tls
- .setup()?
- .map(|server_conf| Arc::new(TlsAcceptor::from(Arc::new(server_conf))));
-
let io_runtime = self.base_server.io_runtime();
- let join_handle = common_runtime::spawn_read(self.accept(io_runtime, stream, tls_acceptor));
+ let join_handle = common_runtime::spawn_read(self.accept(io_runtime, stream));
self.base_server.start_with(join_handle).await?;
Ok(addr)
diff --git a/src/servers/src/tls.rs b/src/servers/src/tls.rs
index deb695bb4558..1c0be507e723 100644
--- a/src/servers/src/tls.rs
+++ b/src/servers/src/tls.rs
@@ -13,14 +13,23 @@
// limitations under the License.
use std::fs::File;
-use std::io::{BufReader, Error, ErrorKind};
-
+use std::io::{BufReader, Error as IoError, ErrorKind};
+use std::path::Path;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::mpsc::channel;
+use std::sync::{Arc, RwLock};
+
+use common_telemetry::{error, info};
+use notify::{EventKind, RecursiveMode, Watcher};
use rustls::ServerConfig;
use rustls_pemfile::{certs, pkcs8_private_keys, rsa_private_keys};
use rustls_pki_types::{CertificateDer, PrivateKeyDer};
use serde::{Deserialize, Serialize};
+use snafu::ResultExt;
use strum::EnumString;
+use crate::error::{FileWatchSnafu, InternalIoSnafu, Result};
+
/// TlsMode is used for Mysql and Postgres server start up.
#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq, EnumString)]
#[serde(rename_all = "snake_case")]
@@ -73,27 +82,38 @@ impl TlsOption {
tls_option
}
- pub fn setup(&self) -> Result<Option<ServerConfig>, Error> {
+ pub fn setup(&self) -> Result<Option<ServerConfig>> {
if let TlsMode::Disable = self.mode {
return Ok(None);
}
- let cert = certs(&mut BufReader::new(File::open(&self.cert_path)?))
- .collect::<Result<Vec<CertificateDer>, Error>>()?;
+ let cert = certs(&mut BufReader::new(
+ File::open(&self.cert_path).context(InternalIoSnafu)?,
+ ))
+ .collect::<std::result::Result<Vec<CertificateDer>, IoError>>()
+ .context(InternalIoSnafu)?;
let key = {
- let mut pkcs8 = pkcs8_private_keys(&mut BufReader::new(File::open(&self.key_path)?))
- .map(|key| key.map(PrivateKeyDer::from))
- .collect::<Result<Vec<PrivateKeyDer>, Error>>()?;
+ let mut pkcs8 = pkcs8_private_keys(&mut BufReader::new(
+ File::open(&self.key_path).context(InternalIoSnafu)?,
+ ))
+ .map(|key| key.map(PrivateKeyDer::from))
+ .collect::<std::result::Result<Vec<PrivateKeyDer>, IoError>>()
+ .context(InternalIoSnafu)?;
+
if !pkcs8.is_empty() {
pkcs8.remove(0)
} else {
- let mut rsa = rsa_private_keys(&mut BufReader::new(File::open(&self.key_path)?))
- .map(|key| key.map(PrivateKeyDer::from))
- .collect::<Result<Vec<PrivateKeyDer>, Error>>()?;
+ let mut rsa = rsa_private_keys(&mut BufReader::new(
+ File::open(&self.key_path).context(InternalIoSnafu)?,
+ ))
+ .map(|key| key.map(PrivateKeyDer::from))
+ .collect::<std::result::Result<Vec<PrivateKeyDer>, IoError>>()
+ .context(InternalIoSnafu)?;
if !rsa.is_empty() {
rsa.remove(0)
} else {
- return Err(Error::new(ErrorKind::InvalidInput, "invalid key"));
+ return Err(IoError::new(ErrorKind::InvalidInput, "invalid key"))
+ .context(InternalIoSnafu);
}
}
};
@@ -110,6 +130,104 @@ impl TlsOption {
pub fn should_force_tls(&self) -> bool {
!matches!(self.mode, TlsMode::Disable | TlsMode::Prefer)
}
+
+ pub fn cert_path(&self) -> &Path {
+ Path::new(&self.cert_path)
+ }
+
+ pub fn key_path(&self) -> &Path {
+ Path::new(&self.key_path)
+ }
+}
+
+/// A mutable container for TLS server config
+///
+/// This struct allows dynamic reloading of server certificates and keys
+pub struct ReloadableTlsServerConfig {
+ tls_option: TlsOption,
+ config: RwLock<Option<Arc<ServerConfig>>>,
+ version: AtomicUsize,
+}
+
+impl ReloadableTlsServerConfig {
+ /// Create server config by loading configuration from `TlsOption`
+ pub fn try_new(tls_option: TlsOption) -> Result<ReloadableTlsServerConfig> {
+ let server_config = tls_option.setup()?;
+ Ok(Self {
+ tls_option,
+ config: RwLock::new(server_config.map(Arc::new)),
+ version: AtomicUsize::new(0),
+ })
+ }
+
+ /// Reread server certificates and keys from file system.
+ pub fn reload(&self) -> Result<()> {
+ let server_config = self.tls_option.setup()?;
+ *self.config.write().unwrap() = server_config.map(Arc::new);
+ self.version.fetch_add(1, Ordering::Relaxed);
+ Ok(())
+ }
+
+ /// Get the server config hold by this container
+ pub fn get_server_config(&self) -> Option<Arc<ServerConfig>> {
+ self.config.read().unwrap().clone()
+ }
+
+ /// Get associated `TlsOption`
+ pub fn get_tls_option(&self) -> &TlsOption {
+ &self.tls_option
+ }
+
+ /// Get version of current config
+ ///
+ /// this version will auto increase when server config get reloaded.
+ pub fn get_version(&self) -> usize {
+ self.version.load(Ordering::Relaxed)
+ }
+}
+
+pub fn watch_tls_config(tls_server_config: Arc<ReloadableTlsServerConfig>) -> Result<()> {
+ if tls_server_config.get_tls_option().mode == TlsMode::Disable {
+ return Ok(());
+ }
+
+ let tls_server_config_for_watcher = tls_server_config.clone();
+
+ let (tx, rx) = channel::<notify::Result<notify::Event>>();
+ let mut watcher = notify::recommended_watcher(tx).context(FileWatchSnafu)?;
+
+ watcher
+ .watch(
+ tls_server_config.get_tls_option().cert_path(),
+ RecursiveMode::NonRecursive,
+ )
+ .context(FileWatchSnafu)?;
+
+ watcher
+ .watch(
+ tls_server_config.get_tls_option().key_path(),
+ RecursiveMode::NonRecursive,
+ )
+ .context(FileWatchSnafu)?;
+
+ std::thread::spawn(move || {
+ let _watcher = watcher;
+ while let Ok(res) = rx.recv() {
+ if let Ok(event) = res {
+ match event.kind {
+ EventKind::Modify(_) | EventKind::Create(_) => {
+ info!("Detected TLS cert/key file change: {:?}", event);
+ if let Err(err) = tls_server_config_for_watcher.reload() {
+ error!(err; "Failed to reload TLS server config");
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+ });
+
+ Ok(())
}
#[cfg(test)]
@@ -237,4 +355,44 @@ mod tests {
assert!(!t.key_path.is_empty());
assert!(!t.cert_path.is_empty());
}
+
+ #[test]
+ fn test_tls_file_change_watch() {
+ let dir = tempfile::tempdir().unwrap();
+ let cert_path = dir.path().join("serevr.crt");
+ let key_path = dir.path().join("server.key");
+
+ std::fs::copy("tests/ssl/server.crt", &cert_path).expect("failed to copy cert to tmpdir");
+ std::fs::copy("tests/ssl/server-rsa.key", &key_path).expect("failed to copy key to tmpdir");
+
+ let server_tls = TlsOption {
+ mode: TlsMode::Require,
+ cert_path: cert_path
+ .clone()
+ .into_os_string()
+ .into_string()
+ .expect("failed to convert path to string"),
+ key_path: key_path
+ .clone()
+ .into_os_string()
+ .into_string()
+ .expect("failed to convert path to string"),
+ };
+
+ let server_config = Arc::new(
+ ReloadableTlsServerConfig::try_new(server_tls).expect("failed to create server config"),
+ );
+ watch_tls_config(server_config.clone()).expect("failed to watch server config");
+
+ assert_eq!(0, server_config.get_version());
+ assert!(server_config.get_server_config().is_some());
+
+ std::fs::copy("tests/ssl/server-pkcs8.key", &key_path)
+ .expect("failed to copy key to tmpdir");
+
+ // waiting for async load
+ std::thread::sleep(std::time::Duration::from_millis(100));
+ assert!(server_config.get_version() > 1);
+ assert!(server_config.get_server_config().is_some());
+ }
}
diff --git a/src/servers/tests/mysql/mysql_server_test.rs b/src/servers/tests/mysql/mysql_server_test.rs
index 18f5865a05e9..3cbac4ee9369 100644
--- a/src/servers/tests/mysql/mysql_server_test.rs
+++ b/src/servers/tests/mysql/mysql_server_test.rs
@@ -30,7 +30,7 @@ use rand::Rng;
use servers::error::Result;
use servers::mysql::server::{MysqlServer, MysqlSpawnConfig, MysqlSpawnRef};
use servers::server::Server;
-use servers::tls::TlsOption;
+use servers::tls::{ReloadableTlsServerConfig, TlsOption};
use table::test_util::MemTable;
use table::TableRef;
@@ -59,12 +59,17 @@ fn create_mysql_server(table: TableRef, opts: MysqlOpts<'_>) -> Result<Box<dyn S
provider.set_authorization_info(auth_info);
}
+ let tls_server_config = Arc::new(
+ ReloadableTlsServerConfig::try_new(opts.tls.clone())
+ .expect("Failed to load certificates and keys"),
+ );
+
Ok(MysqlServer::create_server(
io_runtime,
Arc::new(MysqlSpawnRef::new(query_handler, Some(Arc::new(provider)))),
Arc::new(MysqlSpawnConfig::new(
opts.tls.should_force_tls(),
- opts.tls.setup()?.map(Arc::new),
+ tls_server_config,
opts.reject_no_database,
)),
))
diff --git a/src/servers/tests/postgres/mod.rs b/src/servers/tests/postgres/mod.rs
index dc3ae93a6e9f..7668723df36b 100644
--- a/src/servers/tests/postgres/mod.rs
+++ b/src/servers/tests/postgres/mod.rs
@@ -29,7 +29,7 @@ use rustls_pki_types::{CertificateDer, ServerName};
use servers::error::Result;
use servers::postgres::PostgresServer;
use servers::server::Server;
-use servers::tls::TlsOption;
+use servers::tls::{ReloadableTlsServerConfig, TlsOption};
use table::test_util::MemTable;
use table::TableRef;
use tokio_postgres::{Client, Error as PgError, NoTls, SimpleQueryMessage};
@@ -60,9 +60,15 @@ fn create_postgres_server(
None
};
+ let tls_server_config = Arc::new(
+ ReloadableTlsServerConfig::try_new(tls.clone())
+ .expect("Failed to load certificates and keys"),
+ );
+
Ok(Box::new(PostgresServer::new(
instance,
- tls,
+ tls.should_force_tls(),
+ tls_server_config,
io_runtime,
user_provider,
)))
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 0028c559b1a0..a584de8bef3c 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -50,6 +50,7 @@ use servers::postgres::PostgresServer;
use servers::query_handler::grpc::ServerGrpcQueryHandlerAdapter;
use servers::query_handler::sql::{ServerSqlQueryHandlerAdapter, SqlQueryHandler};
use servers::server::Server;
+use servers::tls::ReloadableTlsServerConfig;
use servers::Mode;
use session::context::QueryContext;
@@ -568,7 +569,10 @@ pub async fn setup_mysql_server_with_user_provider(
)),
Arc::new(MysqlSpawnConfig::new(
false,
- opts.tls.setup().unwrap().map(Arc::new),
+ Arc::new(
+ ReloadableTlsServerConfig::try_new(opts.tls.clone())
+ .expect("Failed to load certificates and keys"),
+ ),
opts.reject_no_database.unwrap_or(false),
)),
));
@@ -614,9 +618,15 @@ pub async fn setup_pg_server_with_user_provider(
addr: fe_pg_addr.clone(),
..Default::default()
};
+ let tls_server_config = Arc::new(
+ ReloadableTlsServerConfig::try_new(opts.tls.clone())
+ .expect("Failed to load certificates and keys"),
+ );
+
let fe_pg_server = Arc::new(Box::new(PostgresServer::new(
ServerSqlQueryHandlerAdapter::arc(fe_instance_ref),
- opts.tls.clone(),
+ opts.tls.should_force_tls(),
+ tls_server_config,
runtime,
user_provider,
)) as Box<dyn Server>);
|
feat
|
make tls certificates/keys reloadable (part 1) (#3335)
|
99565a367688f4a81a1954385a1a4170a8cff102
|
2023-12-26 12:23:34
|
Ruihang Xia
|
fix: update doc label on pr edit (#3005)
| false
|
diff --git a/.github/workflows/doc-label.yml b/.github/workflows/doc-label.yml
index ba8bf61f759d..298c7e3cecce 100644
--- a/.github/workflows/doc-label.yml
+++ b/.github/workflows/doc-label.yml
@@ -1,7 +1,7 @@
name: "PR Doc Labeler"
on:
pull_request:
- types: [opened, edited]
+ types: [opened, edited, synchronize, ready_for_review, auto_merge_enabled, labeled, unlabeled]
permissions:
pull-requests: write
@@ -16,4 +16,5 @@ jobs:
with:
configuration-path: .github/doc-label-config.yml
enable-versioned-regex: false
- repo-token: ${{ github.token }}
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ sync-labels: 1
|
fix
|
update doc label on pr edit (#3005)
|
9d7fea902e6a87d0decf3b96f51e81c2c1569e73
|
2024-12-16 11:47:27
|
shuiyisong
|
chore: remove unused dep (#5163)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index b60615c8e54c..df817dc201cd 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -222,26 +222,6 @@ dependencies = [
"num-traits",
]
-[[package]]
-name = "approx_eq"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3f9eb837c6a783fbf002e3e5cc7925a3aa6893d6d42f9169517528983777590"
-
-[[package]]
-name = "aquamarine"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d1da02abba9f9063d786eab1509833ebb2fac0f966862ca59439c76b9c566760"
-dependencies = [
- "include_dir",
- "itertools 0.10.5",
- "proc-macro-error",
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
[[package]]
name = "arbitrary"
version = "1.3.2"
@@ -1310,7 +1290,6 @@ dependencies = [
"common-meta",
"moka",
"snafu 0.8.5",
- "substrait 0.12.0",
]
[[package]]
@@ -1349,7 +1328,6 @@ dependencies = [
"catalog",
"chrono",
"common-catalog",
- "common-config",
"common-error",
"common-macro",
"common-meta",
@@ -1358,7 +1336,6 @@ dependencies = [
"common-recordbatch",
"common-runtime",
"common-telemetry",
- "common-test-util",
"common-time",
"common-version",
"dashmap",
@@ -1369,7 +1346,6 @@ dependencies = [
"humantime",
"itertools 0.10.5",
"lazy_static",
- "log-store",
"meta-client",
"moka",
"object-store",
@@ -1693,7 +1669,6 @@ dependencies = [
"common-grpc",
"common-macro",
"common-meta",
- "common-options",
"common-procedure",
"common-query",
"common-recordbatch",
@@ -1722,7 +1697,6 @@ dependencies = [
"store-api",
"substrait 0.12.0",
"table",
- "temp-env",
"tempfile",
"tokio",
"tracing-appender",
@@ -1746,8 +1720,6 @@ dependencies = [
"common-query",
"common-recordbatch",
"common-telemetry",
- "datanode",
- "derive-new 0.5.9",
"enum_dispatch",
"futures-util",
"lazy_static",
@@ -1928,13 +1900,6 @@ dependencies = [
[[package]]
name = "common-catalog"
version = "0.12.0"
-dependencies = [
- "chrono",
- "common-error",
- "common-macro",
- "snafu 0.8.5",
- "tokio",
-]
[[package]]
name = "common-config"
@@ -1978,7 +1943,6 @@ dependencies = [
"datafusion",
"datatypes",
"derive_builder 0.12.0",
- "dotenv",
"futures",
"lazy_static",
"object-store",
@@ -2022,15 +1986,10 @@ dependencies = [
name = "common-frontend"
version = "0.12.0"
dependencies = [
- "api",
"async-trait",
- "common-base",
"common-error",
"common-macro",
- "common-query",
- "session",
"snafu 0.8.5",
- "sql",
]
[[package]]
@@ -2064,7 +2023,6 @@ dependencies = [
"num-traits",
"once_cell",
"paste",
- "ron",
"s2",
"serde",
"serde_json",
@@ -2353,8 +2311,6 @@ dependencies = [
"snafu 0.8.5",
"tempfile",
"tokio",
- "tokio-metrics",
- "tokio-metrics-collector",
"tokio-test",
"tokio-util",
]
@@ -2834,16 +2790,6 @@ dependencies = [
"memchr",
]
-[[package]]
-name = "ctor"
-version = "0.1.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096"
-dependencies = [
- "quote",
- "syn 1.0.109",
-]
-
[[package]]
name = "darling"
version = "0.14.4"
@@ -3386,17 +3332,6 @@ dependencies = [
"syn 1.0.109",
]
-[[package]]
-name = "derive-new"
-version = "0.5.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
[[package]]
name = "derive-new"
version = "0.7.0"
@@ -3919,7 +3854,6 @@ dependencies = [
"common-error",
"common-macro",
"common-procedure",
- "common-procedure-test",
"common-query",
"common-recordbatch",
"common-telemetry",
@@ -4067,7 +4001,6 @@ dependencies = [
"itertools 0.10.5",
"lazy_static",
"meta-client",
- "minstant",
"nom",
"num-traits",
"operator",
@@ -4114,15 +4047,6 @@ dependencies = [
"percent-encoding",
]
-[[package]]
-name = "format_num"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "14ac05eb8d2eb4ed1eeff847911deae077b0b53332465de9d6a26b0ea9961bc8"
-dependencies = [
- "regex",
-]
-
[[package]]
name = "fragile"
version = "2.0.0"
@@ -4145,7 +4069,6 @@ dependencies = [
"common-config",
"common-datasource",
"common-error",
- "common-frontend",
"common-function",
"common-grpc",
"common-macro",
@@ -4167,7 +4090,6 @@ dependencies = [
"lazy_static",
"log-store",
"meta-client",
- "meta-srv",
"opentelemetry-proto 0.5.0",
"operator",
"partition",
@@ -5244,25 +5166,6 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed"
-[[package]]
-name = "include_dir"
-version = "0.7.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "923d117408f1e49d914f1a379a309cffe4f18c05cf4e3d12e613a15fc81bd0dd"
-dependencies = [
- "include_dir_macros",
-]
-
-[[package]]
-name = "include_dir_macros"
-version = "0.7.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7cab85a7ed0bd5f0e76d93846e0147172bed2e2d3f859bcc33a8d9699cad1a75"
-dependencies = [
- "proc-macro2",
- "quote",
-]
-
[[package]]
name = "index"
version = "0.12.0"
@@ -6535,7 +6438,6 @@ name = "metric-engine"
version = "0.12.0"
dependencies = [
"api",
- "aquamarine",
"async-trait",
"base64 0.21.7",
"common-base",
@@ -6600,16 +6502,6 @@ dependencies = [
"adler2",
]
-[[package]]
-name = "minstant"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fb9b5c752f145ac5046bccc3c4f62892e3c950c1d1eab80c5949cd68a2078db"
-dependencies = [
- "ctor",
- "web-time 1.1.0",
-]
-
[[package]]
name = "mio"
version = "0.8.11"
@@ -6639,7 +6531,6 @@ name = "mito2"
version = "0.12.0"
dependencies = [
"api",
- "aquamarine",
"async-channel 1.9.0",
"async-stream",
"async-trait",
@@ -6653,7 +6544,6 @@ dependencies = [
"common-function",
"common-macro",
"common-meta",
- "common-procedure-test",
"common-query",
"common-recordbatch",
"common-runtime",
@@ -8090,7 +7980,7 @@ dependencies = [
"async-trait",
"bytes",
"chrono",
- "derive-new 0.7.0",
+ "derive-new",
"futures",
"hex",
"lazy-regex",
@@ -8230,7 +8120,6 @@ dependencies = [
"query",
"rayon",
"regex",
- "ron",
"serde",
"serde_json",
"session",
@@ -8642,10 +8531,7 @@ dependencies = [
"greptime-proto",
"lazy_static",
"prometheus",
- "promql-parser",
"prost 0.12.6",
- "query",
- "session",
"snafu 0.8.5",
"tokio",
]
@@ -8993,7 +8879,6 @@ version = "0.12.0"
dependencies = [
"ahash 0.8.11",
"api",
- "approx_eq",
"arc-swap",
"arrow",
"arrow-schema",
@@ -9025,7 +8910,6 @@ dependencies = [
"datafusion-sql",
"datatypes",
"fastrand",
- "format_num",
"futures",
"futures-util",
"greptime-proto",
@@ -9053,9 +8937,7 @@ dependencies = [
"sql",
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"statrs",
- "stats-cli",
"store-api",
- "streaming-stats",
"substrait 0.12.0",
"table",
"tokio",
@@ -10546,7 +10428,6 @@ dependencies = [
"datatypes",
"futures",
"lazy_static",
- "log-store",
"once_cell",
"operator",
"paste",
@@ -10569,7 +10450,6 @@ dependencies = [
"sql",
"table",
"tokio",
- "tokio-test",
]
[[package]]
@@ -10911,7 +10791,6 @@ dependencies = [
"tokio-postgres-rustls",
"tokio-rustls 0.26.0",
"tokio-stream",
- "tokio-test",
"tokio-util",
"tonic 0.11.0",
"tonic-reflection",
@@ -11545,22 +11424,11 @@ dependencies = [
"rand",
]
-[[package]]
-name = "stats-cli"
-version = "3.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8786c4fc8a91bc4fcd90aed33413f79e4dc9811f24ba14d1d59adf57cf1c871"
-dependencies = [
- "clap 2.34.0",
- "num-traits",
-]
-
[[package]]
name = "store-api"
version = "0.12.0"
dependencies = [
"api",
- "aquamarine",
"async-stream",
"async-trait",
"common-base",
@@ -11596,15 +11464,6 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb"
-[[package]]
-name = "streaming-stats"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0d670ce4e348a2081843569e0f79b21c99c91bb9028b3b3ecb0f050306de547"
-dependencies = [
- "num-traits",
-]
-
[[package]]
name = "strfmt"
version = "0.2.4"
@@ -12562,30 +12421,6 @@ dependencies = [
"syn 2.0.90",
]
-[[package]]
-name = "tokio-metrics"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eace09241d62c98b7eeb1107d4c5c64ca3bd7da92e8c218c153ab3a78f9be112"
-dependencies = [
- "futures-util",
- "pin-project-lite",
- "tokio",
- "tokio-stream",
-]
-
-[[package]]
-name = "tokio-metrics-collector"
-version = "0.2.1"
-source = "git+https://github.com/MichaelScofield/tokio-metrics-collector.git?rev=89d692d5753d28564a7aac73c6ac5aba22243ba0#89d692d5753d28564a7aac73c6ac5aba22243ba0"
-dependencies = [
- "lazy_static",
- "parking_lot 0.12.3",
- "prometheus",
- "tokio",
- "tokio-metrics",
-]
-
[[package]]
name = "tokio-postgres"
version = "0.7.12"
@@ -13012,7 +12847,7 @@ dependencies = [
"tracing-core",
"tracing-log 0.2.0",
"tracing-subscriber",
- "web-time 0.2.4",
+ "web-time",
]
[[package]]
@@ -13783,16 +13618,6 @@ dependencies = [
"wasm-bindgen",
]
-[[package]]
-name = "web-time"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
-dependencies = [
- "js-sys",
- "wasm-bindgen",
-]
-
[[package]]
name = "webbrowser"
version = "0.8.15"
diff --git a/src/cache/Cargo.toml b/src/cache/Cargo.toml
index 9a2888e5fc13..07870fa904a5 100644
--- a/src/cache/Cargo.toml
+++ b/src/cache/Cargo.toml
@@ -11,4 +11,3 @@ common-macro.workspace = true
common-meta.workspace = true
moka.workspace = true
snafu.workspace = true
-substrait.workspace = true
diff --git a/src/catalog/Cargo.toml b/src/catalog/Cargo.toml
index a5ad92e8917c..b7e19a44b9ff 100644
--- a/src/catalog/Cargo.toml
+++ b/src/catalog/Cargo.toml
@@ -18,7 +18,6 @@ async-stream.workspace = true
async-trait = "0.1"
bytes.workspace = true
common-catalog.workspace = true
-common-config.workspace = true
common-error.workspace = true
common-macro.workspace = true
common-meta.workspace = true
@@ -58,7 +57,5 @@ catalog = { workspace = true, features = ["testing"] }
chrono.workspace = true
common-meta = { workspace = true, features = ["testing"] }
common-query = { workspace = true, features = ["testing"] }
-common-test-util.workspace = true
-log-store.workspace = true
object-store.workspace = true
tokio.workspace = true
diff --git a/src/cli/Cargo.toml b/src/cli/Cargo.toml
index b49aa00ee2cc..de2abc15f18e 100644
--- a/src/cli/Cargo.toml
+++ b/src/cli/Cargo.toml
@@ -23,7 +23,6 @@ common-error.workspace = true
common-grpc.workspace = true
common-macro.workspace = true
common-meta.workspace = true
-common-options.workspace = true
common-procedure.workspace = true
common-query.workspace = true
common-recordbatch.workspace = true
@@ -61,5 +60,4 @@ client = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
common-version.workspace = true
serde.workspace = true
-temp-env = "0.3"
tempfile.workspace = true
diff --git a/src/client/Cargo.toml b/src/client/Cargo.toml
index 9d198ab9fbae..f8702fe6ac16 100644
--- a/src/client/Cargo.toml
+++ b/src/client/Cargo.toml
@@ -42,8 +42,6 @@ tonic.workspace = true
[dev-dependencies]
common-grpc-expr.workspace = true
-datanode.workspace = true
-derive-new = "0.5"
tracing = "0.1"
[dev-dependencies.substrait_proto]
diff --git a/src/common/catalog/Cargo.toml b/src/common/catalog/Cargo.toml
index 61f49ab0e4b3..051675fe93e1 100644
--- a/src/common/catalog/Cargo.toml
+++ b/src/common/catalog/Cargo.toml
@@ -8,10 +8,5 @@ license.workspace = true
workspace = true
[dependencies]
-common-error.workspace = true
-common-macro.workspace = true
-snafu.workspace = true
[dev-dependencies]
-chrono.workspace = true
-tokio.workspace = true
diff --git a/src/common/datasource/Cargo.toml b/src/common/datasource/Cargo.toml
index 65f1d18a6603..16137e6b3e51 100644
--- a/src/common/datasource/Cargo.toml
+++ b/src/common/datasource/Cargo.toml
@@ -48,5 +48,4 @@ url = "2.3"
[dev-dependencies]
common-telemetry.workspace = true
common-test-util.workspace = true
-dotenv.workspace = true
uuid.workspace = true
diff --git a/src/common/frontend/Cargo.toml b/src/common/frontend/Cargo.toml
index 2aa111fa1af0..7c3b705bddcd 100644
--- a/src/common/frontend/Cargo.toml
+++ b/src/common/frontend/Cargo.toml
@@ -5,12 +5,7 @@ edition.workspace = true
license.workspace = true
[dependencies]
-api.workspace = true
async-trait.workspace = true
-common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
-common-query.workspace = true
-session.workspace = true
snafu.workspace = true
-sql.workspace = true
diff --git a/src/common/function/Cargo.toml b/src/common/function/Cargo.toml
index 29cefb1e7547..e7cc25ca1325 100644
--- a/src/common/function/Cargo.toml
+++ b/src/common/function/Cargo.toml
@@ -51,6 +51,5 @@ wkt = { version = "0.11", optional = true }
[dev-dependencies]
approx = "0.5"
-ron = "0.7"
serde = { version = "1.0", features = ["derive"] }
tokio.workspace = true
diff --git a/src/common/runtime/Cargo.toml b/src/common/runtime/Cargo.toml
index c249ba221ecd..7a12a03ba9cd 100644
--- a/src/common/runtime/Cargo.toml
+++ b/src/common/runtime/Cargo.toml
@@ -35,8 +35,6 @@ serde_json.workspace = true
snafu.workspace = true
tempfile.workspace = true
tokio.workspace = true
-tokio-metrics = "0.3"
-tokio-metrics-collector = { git = "https://github.com/MichaelScofield/tokio-metrics-collector.git", rev = "89d692d5753d28564a7aac73c6ac5aba22243ba0" }
tokio-util.workspace = true
[dev-dependencies]
diff --git a/src/file-engine/Cargo.toml b/src/file-engine/Cargo.toml
index f9cd1113f535..1a665d667607 100644
--- a/src/file-engine/Cargo.toml
+++ b/src/file-engine/Cargo.toml
@@ -38,5 +38,4 @@ tokio.workspace = true
[dev-dependencies]
api.workspace = true
-common-procedure-test.workspace = true
common-test-util.workspace = true
diff --git a/src/flow/Cargo.toml b/src/flow/Cargo.toml
index ed2a1dc1c474..ffba0618daaf 100644
--- a/src/flow/Cargo.toml
+++ b/src/flow/Cargo.toml
@@ -47,7 +47,6 @@ hydroflow = { git = "https://github.com/GreptimeTeam/hydroflow.git", branch = "m
itertools.workspace = true
lazy_static.workspace = true
meta-client.workspace = true
-minstant = "0.1.7"
nom = "7.1.3"
num-traits = "0.2"
operator.workspace = true
diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml
index 01f06eb03338..e21819c568f2 100644
--- a/src/frontend/Cargo.toml
+++ b/src/frontend/Cargo.toml
@@ -25,7 +25,6 @@ common-catalog.workspace = true
common-config.workspace = true
common-datasource.workspace = true
common-error.workspace = true
-common-frontend.workspace = true
common-function.workspace = true
common-grpc.workspace = true
common-macro.workspace = true
@@ -71,7 +70,6 @@ common-test-util.workspace = true
datanode.workspace = true
datatypes.workspace = true
futures = "0.3"
-meta-srv = { workspace = true, features = ["mock"] }
serde_json.workspace = true
strfmt = "0.2"
tower.workspace = true
diff --git a/src/metric-engine/Cargo.toml b/src/metric-engine/Cargo.toml
index 85aa371594e8..666ac09faa75 100644
--- a/src/metric-engine/Cargo.toml
+++ b/src/metric-engine/Cargo.toml
@@ -9,7 +9,6 @@ workspace = true
[dependencies]
api.workspace = true
-aquamarine.workspace = true
async-trait.workspace = true
base64.workspace = true
common-base.workspace = true
diff --git a/src/mito2/Cargo.toml b/src/mito2/Cargo.toml
index eecb79440a2e..181ba0f43407 100644
--- a/src/mito2/Cargo.toml
+++ b/src/mito2/Cargo.toml
@@ -13,7 +13,6 @@ workspace = true
[dependencies]
api.workspace = true
-aquamarine.workspace = true
async-channel = "1.9"
async-stream.workspace = true
async-trait = "0.1"
@@ -77,7 +76,6 @@ uuid.workspace = true
[dev-dependencies]
common-function.workspace = true
common-meta = { workspace = true, features = ["testing"] }
-common-procedure-test.workspace = true
common-test-util.workspace = true
criterion = "0.4"
dotenv.workspace = true
diff --git a/src/pipeline/Cargo.toml b/src/pipeline/Cargo.toml
index 4657f39a6866..9c26d1a52fa6 100644
--- a/src/pipeline/Cargo.toml
+++ b/src/pipeline/Cargo.toml
@@ -63,7 +63,6 @@ yaml-rust = "0.4"
catalog = { workspace = true, features = ["testing"] }
criterion = { version = "0.4", features = ["html_reports"] }
rayon = "1.0"
-ron = "0.7"
serde = { version = "1.0", features = ["derive"] }
session = { workspace = true, features = ["testing"] }
diff --git a/src/promql/Cargo.toml b/src/promql/Cargo.toml
index 4039328528c3..7b51651a7ca9 100644
--- a/src/promql/Cargo.toml
+++ b/src/promql/Cargo.toml
@@ -22,11 +22,8 @@ futures = "0.3"
greptime-proto.workspace = true
lazy_static.workspace = true
prometheus.workspace = true
-promql-parser.workspace = true
prost.workspace = true
snafu.workspace = true
[dev-dependencies]
-query.workspace = true
-session = { workspace = true, features = ["testing"] }
tokio.workspace = true
diff --git a/src/query/Cargo.toml b/src/query/Cargo.toml
index 863a5a1c33d3..8139ea3aafbb 100644
--- a/src/query/Cargo.toml
+++ b/src/query/Cargo.toml
@@ -67,13 +67,11 @@ tokio.workspace = true
uuid.workspace = true
[dev-dependencies]
-approx_eq = "0.1"
arrow.workspace = true
catalog = { workspace = true, features = ["testing"] }
common-macro.workspace = true
common-query = { workspace = true, features = ["testing"] }
fastrand = "2.0"
-format_num = "0.1"
num = "0.4"
num-traits = "0.2"
paste = "1.0"
@@ -83,8 +81,6 @@ serde.workspace = true
serde_json.workspace = true
session = { workspace = true, features = ["testing"] }
statrs = "0.16"
-stats-cli = "3.0"
store-api.workspace = true
-streaming-stats = "0.2"
table = { workspace = true, features = ["testing"] }
tokio-stream.workspace = true
diff --git a/src/script/Cargo.toml b/src/script/Cargo.toml
index 88d10c9509c7..136eb3c4fc4c 100644
--- a/src/script/Cargo.toml
+++ b/src/script/Cargo.toml
@@ -80,13 +80,11 @@ tokio.workspace = true
catalog = { workspace = true, features = ["testing"] }
common-test-util.workspace = true
criterion = { version = "0.4", features = ["html_reports", "async_tokio"] }
-log-store.workspace = true
operator.workspace = true
rayon = "1.0"
ron = "0.7"
serde = { version = "1.0", features = ["derive"] }
session = { workspace = true, features = ["testing"] }
-tokio-test = "0.4"
[[bench]]
name = "py_benchmark"
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index ddfeaf27bd45..a90fb880e20d 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -134,7 +134,6 @@ table.workspace = true
tempfile = "3.0.0"
tokio-postgres = "0.7"
tokio-postgres-rustls = "0.12"
-tokio-test = "0.4"
[target.'cfg(unix)'.dev-dependencies]
pprof = { version = "0.13", features = ["criterion", "flamegraph"] }
diff --git a/src/store-api/Cargo.toml b/src/store-api/Cargo.toml
index 7c974661e315..1214ae3d4001 100644
--- a/src/store-api/Cargo.toml
+++ b/src/store-api/Cargo.toml
@@ -9,7 +9,6 @@ workspace = true
[dependencies]
api.workspace = true
-aquamarine.workspace = true
async-trait.workspace = true
common-base.workspace = true
common-error.workspace = true
|
chore
|
remove unused dep (#5163)
|
da68d8ce4b0e3f6f5314de5ef361268812632098
|
2023-11-20 12:17:42
|
fys
|
feat: add random weighted choose in load_based selector (#2234)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index e08c49877238..eb8bac66e396 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4723,6 +4723,7 @@ dependencies = [
"h2",
"http-body",
"humantime-serde",
+ "itertools 0.10.5",
"lazy_static",
"once_cell",
"parking_lot 0.12.1",
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index 641eace2fbcc..a5e9d35c8aa6 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -385,7 +385,7 @@ impl MetaClient {
mod tests {
use api::v1::meta::{HeartbeatRequest, Peer};
use meta_srv::metasrv::SelectorContext;
- use meta_srv::selector::{Namespace, Selector};
+ use meta_srv::selector::{Namespace, Selector, SelectorOptions};
use meta_srv::Result as MetaResult;
use super::*;
@@ -547,7 +547,12 @@ mod tests {
type Context = SelectorContext;
type Output = Vec<Peer>;
- async fn select(&self, _ns: Namespace, _ctx: &Self::Context) -> MetaResult<Self::Output> {
+ async fn select(
+ &self,
+ _ns: Namespace,
+ _ctx: &Self::Context,
+ _opts: SelectorOptions,
+ ) -> MetaResult<Self::Output> {
Ok(vec![
Peer {
id: 0,
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index 829c315708a2..f38f82be64bd 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -34,6 +34,7 @@ futures.workspace = true
h2 = "0.3"
http-body = "0.4"
humantime-serde.workspace = true
+itertools.workspace = true
lazy_static.workspace = true
once_cell.workspace = true
parking_lot = "0.12"
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 975a3fb66f87..dcf112e2e754 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -188,7 +188,7 @@ pub async fn build_meta_srv(opts: &MetaSrvOptions, plugins: Plugins) -> Result<M
let in_memory = Arc::new(MemoryKvBackend::new()) as ResettableKvBackendRef;
let selector = match opts.selector {
- SelectorType::LoadBased => Arc::new(LoadBasedSelector) as SelectorRef,
+ SelectorType::LoadBased => Arc::new(LoadBasedSelector::default()) as SelectorRef,
SelectorType::LeaseBased => Arc::new(LeaseBasedSelector) as SelectorRef,
};
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 649383f06d1b..600c188e3a91 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -18,6 +18,7 @@ use common_macro::stack_trace_debug;
use common_meta::peer::Peer;
use common_meta::DatanodeId;
use common_runtime::JoinError;
+use rand::distributions::WeightedError;
use servers::define_into_tonic_status;
use snafu::{Location, Snafu};
use store_api::storage::RegionId;
@@ -114,13 +115,13 @@ pub enum Error {
},
#[snafu(display(
- "Failed to request Datanode, expected: {}, but only {} available",
- expected,
+ "Failed to request Datanode, required: {}, but only {} available",
+ required,
available
))]
NoEnoughAvailableDatanode {
location: Location,
- expected: usize,
+ required: usize,
available: usize,
},
@@ -562,6 +563,16 @@ pub enum Error {
operation: String,
location: Location,
},
+
+ #[snafu(display("Failed to set weight array"))]
+ WeightArray {
+ #[snafu(source)]
+ error: WeightedError,
+ location: Location,
+ },
+
+ #[snafu(display("Weight array is not set"))]
+ NotSetWeightArray { location: Location },
}
impl Error {
@@ -611,6 +622,8 @@ impl ErrorExt for Error {
| Error::NoEnoughAvailableDatanode { .. }
| Error::PublishMessage { .. }
| Error::Join { .. }
+ | Error::WeightArray { .. }
+ | Error::NotSetWeightArray { .. }
| Error::Unsupported { .. } => StatusCode::Internal,
Error::TableAlreadyExists { .. } => StatusCode::TableAlreadyExists,
Error::EmptyKey { .. }
diff --git a/src/meta-srv/src/handler/collect_stats_handler.rs b/src/meta-srv/src/handler/collect_stats_handler.rs
index a52a69be6e52..536748aaaf08 100644
--- a/src/meta-srv/src/handler/collect_stats_handler.rs
+++ b/src/meta-srv/src/handler/collect_stats_handler.rs
@@ -42,10 +42,7 @@ impl HeartbeatHandler for CollectStatsHandler {
match Stat::try_from(req.clone()) {
Ok(stat) => {
- // If stat is empty, it means the request is a mailbox response
- if !stat.is_empty() {
- let _ = acc.stat.insert(stat);
- }
+ let _ = acc.stat.insert(stat);
}
Err(err) => {
warn!("Incomplete heartbeat data: {:?}, err: {:?}", req, err);
diff --git a/src/meta-srv/src/handler/mailbox_handler.rs b/src/meta-srv/src/handler/mailbox_handler.rs
index 8ffbf9aa21ef..4bc3b543ba06 100644
--- a/src/meta-srv/src/handler/mailbox_handler.rs
+++ b/src/meta-srv/src/handler/mailbox_handler.rs
@@ -34,6 +34,7 @@ impl HeartbeatHandler for MailboxHandler {
) -> Result<()> {
if let Some(message) = &req.mailbox_message {
ctx.mailbox.on_recv(message.id, Ok(message.clone())).await?;
+ ctx.set_skip_all();
}
Ok(())
diff --git a/src/meta-srv/src/handler/node_stat.rs b/src/meta-srv/src/handler/node_stat.rs
index 609e806296d6..4e9343da35b7 100644
--- a/src/meta-srv/src/handler/node_stat.rs
+++ b/src/meta-srv/src/handler/node_stat.rs
@@ -23,7 +23,7 @@ use store_api::storage::RegionId;
use crate::error::{Error, InvalidHeartbeatRequestSnafu};
use crate::keys::StatKey;
-#[derive(Debug, Default, Serialize, Deserialize)]
+#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct Stat {
pub timestamp_millis: i64,
pub cluster_id: u64,
@@ -42,7 +42,7 @@ pub struct Stat {
pub node_epoch: u64,
}
-#[derive(Debug, Serialize, Deserialize)]
+#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RegionStat {
/// The region_id.
pub id: u64,
diff --git a/src/meta-srv/src/keys.rs b/src/meta-srv/src/keys.rs
index 0bfd3bb733ed..cb67af41cbd8 100644
--- a/src/meta-srv/src/keys.rs
+++ b/src/meta-srv/src/keys.rs
@@ -187,7 +187,7 @@ impl TryFrom<Vec<u8>> for StatKey {
}
}
-#[derive(Debug, Serialize, Deserialize)]
+#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(transparent)]
pub struct StatValue {
pub stats: Vec<Stat>,
@@ -198,6 +198,11 @@ impl StatValue {
pub fn region_num(&self) -> Option<u64> {
self.stats.last().map(|x| x.region_num)
}
+
+ /// Get the latest node addr.
+ pub fn node_addr(&self) -> Option<String> {
+ self.stats.last().map(|x| x.addr.clone())
+ }
}
impl TryFrom<StatValue> for Vec<u8> {
@@ -365,6 +370,32 @@ mod tests {
assert_eq!(new_value, value);
}
+ #[test]
+ fn test_get_addr_from_stat_val() {
+ let empty = StatValue { stats: vec![] };
+ let addr = empty.node_addr();
+ assert!(addr.is_none());
+
+ let stat_val = StatValue {
+ stats: vec![
+ Stat {
+ addr: "1".to_string(),
+ ..Default::default()
+ },
+ Stat {
+ addr: "2".to_string(),
+ ..Default::default()
+ },
+ Stat {
+ addr: "3".to_string(),
+ ..Default::default()
+ },
+ ],
+ };
+ let addr = stat_val.node_addr().unwrap();
+ assert_eq!("3", addr);
+ }
+
#[test]
fn test_get_region_num_from_stat_val() {
let empty = StatValue { stats: vec![] };
diff --git a/src/meta-srv/src/procedure/region_failover.rs b/src/meta-srv/src/procedure/region_failover.rs
index 1ab93ce8e6b6..2694f8c88a13 100644
--- a/src/meta-srv/src/procedure/region_failover.rs
+++ b/src/meta-srv/src/procedure/region_failover.rs
@@ -400,7 +400,7 @@ mod tests {
use crate::cluster::MetaPeerClientBuilder;
use crate::handler::{HeartbeatMailbox, Pusher, Pushers};
use crate::lock::memory::MemLock;
- use crate::selector::{Namespace, Selector};
+ use crate::selector::{Namespace, Selector, SelectorOptions};
use crate::service::mailbox::Channel;
use crate::test_util;
@@ -413,7 +413,12 @@ mod tests {
type Context = SelectorContext;
type Output = Vec<Peer>;
- async fn select(&self, _ns: Namespace, _ctx: &Self::Context) -> Result<Self::Output> {
+ async fn select(
+ &self,
+ _ns: Namespace,
+ _ctx: &Self::Context,
+ _opts: SelectorOptions,
+ ) -> Result<Self::Output> {
let mut rng = rand::thread_rng();
let mut nodes = self.nodes.clone();
nodes.shuffle(&mut rng);
@@ -711,7 +716,12 @@ mod tests {
type Context = SelectorContext;
type Output = Vec<Peer>;
- async fn select(&self, _ns: Namespace, _ctx: &Self::Context) -> Result<Self::Output> {
+ async fn select(
+ &self,
+ _ns: Namespace,
+ _ctx: &Self::Context,
+ _opts: SelectorOptions,
+ ) -> Result<Self::Output> {
let mut peers = self.peers.lock().unwrap();
Ok(if let Some(Some(peer)) = peers.pop() {
vec![peer]
diff --git a/src/meta-srv/src/procedure/region_failover/failover_start.rs b/src/meta-srv/src/procedure/region_failover/failover_start.rs
index e31de684ca3b..d3c2dea8f261 100644
--- a/src/meta-srv/src/procedure/region_failover/failover_start.rs
+++ b/src/meta-srv/src/procedure/region_failover/failover_start.rs
@@ -24,6 +24,7 @@ use snafu::ensure;
use super::deactivate_region::DeactivateRegion;
use super::{RegionFailoverContext, State};
use crate::error::{RegionFailoverCandidatesNotFoundSnafu, Result, RetryLaterSnafu};
+use crate::selector::SelectorOptions;
#[derive(Serialize, Deserialize, Debug)]
pub(super) struct RegionFailoverStart {
@@ -50,9 +51,10 @@ impl RegionFailoverStart {
selector_ctx.table_id = Some(failed_region.table_id);
let cluster_id = failed_region.cluster_id;
+ let opts = SelectorOptions::default();
let candidates = ctx
.selector
- .select(cluster_id, &selector_ctx)
+ .select(cluster_id, &selector_ctx, opts)
.await?
.iter()
.filter_map(|p| {
diff --git a/src/meta-srv/src/selector.rs b/src/meta-srv/src/selector.rs
index 58a83429c7b1..140f4e5711d5 100644
--- a/src/meta-srv/src/selector.rs
+++ b/src/meta-srv/src/selector.rs
@@ -12,8 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+mod common;
pub mod lease_based;
pub mod load_based;
+mod weight_compute;
+mod weighted_choose;
use serde::{Deserialize, Serialize};
@@ -27,7 +30,29 @@ pub trait Selector: Send + Sync {
type Context;
type Output;
- async fn select(&self, ns: Namespace, ctx: &Self::Context) -> Result<Self::Output>;
+ async fn select(
+ &self,
+ ns: Namespace,
+ ctx: &Self::Context,
+ opts: SelectorOptions,
+ ) -> Result<Self::Output>;
+}
+
+#[derive(Debug)]
+pub struct SelectorOptions {
+ /// Minimum number of selected results.
+ pub min_required_items: usize,
+ /// Whether duplicates are allowed in the selected result, default false.
+ pub allow_duplication: bool,
+}
+
+impl Default for SelectorOptions {
+ fn default() -> Self {
+ Self {
+ min_required_items: 1,
+ allow_duplication: false,
+ }
+ }
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
diff --git a/src/meta-srv/src/selector/common.rs b/src/meta-srv/src/selector/common.rs
new file mode 100644
index 000000000000..8d807b7eb26c
--- /dev/null
+++ b/src/meta-srv/src/selector/common.rs
@@ -0,0 +1,192 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashSet;
+
+use api::v1::meta::Peer;
+use snafu::ensure;
+
+use super::weighted_choose::{WeightedChoose, WeightedItem};
+use crate::error;
+use crate::error::Result;
+use crate::selector::SelectorOptions;
+
+/// According to the `opts`, choose peers from the `weight_array` through `weighted_choose`.
+pub fn choose_peers<W>(
+ mut weight_array: Vec<WeightedItem<Peer>>,
+ opts: &SelectorOptions,
+ weighted_choose: &mut W,
+) -> Result<Vec<Peer>>
+where
+ W: WeightedChoose<Peer>,
+{
+ let min_required_items = opts.min_required_items;
+ ensure!(
+ !weight_array.is_empty(),
+ error::NoEnoughAvailableDatanodeSnafu {
+ required: min_required_items,
+ available: 0_usize,
+ }
+ );
+
+ if opts.allow_duplication {
+ weighted_choose.set_weight_array(weight_array)?;
+ (0..min_required_items)
+ .map(|_| weighted_choose.choose_one())
+ .collect::<Result<_>>()
+ } else {
+ let weight_array_len = weight_array.len();
+
+ // When opts.allow_duplication is false, we need to check that the length of the weighted array is greater than
+ // or equal to min_required_items, otherwise it may cause an infinite loop.
+ ensure!(
+ weight_array_len >= min_required_items,
+ error::NoEnoughAvailableDatanodeSnafu {
+ required: min_required_items,
+ available: weight_array_len,
+ }
+ );
+
+ if weight_array_len == min_required_items {
+ return Ok(weight_array.into_iter().map(|item| item.item).collect());
+ }
+
+ weighted_choose.set_weight_array(weight_array.clone())?;
+
+ // Assume min_required_items is 3, weight_array_len is 100, then we can choose 3 items from the weight array
+ // and return. But assume min_required_items is 99, weight_array_len is 100. It's not cheap to choose 99 items
+ // from the weight array. So we can reverse choose 1 item from the weight array, and return the remaining 99
+ // items.
+ if min_required_items * 2 > weight_array_len {
+ let select_num = weight_array_len - min_required_items;
+ let mut selected = HashSet::with_capacity(select_num);
+ while selected.len() < select_num {
+ let item = weighted_choose.reverse_choose_one()?;
+ selected.insert(item);
+ }
+ weight_array.retain(|item| !selected.contains(&item.item));
+ Ok(weight_array.into_iter().map(|item| item.item).collect())
+ } else {
+ let mut selected = HashSet::with_capacity(min_required_items);
+ while selected.len() < min_required_items {
+ let item = weighted_choose.choose_one()?;
+ selected.insert(item);
+ }
+ Ok(selected.into_iter().collect())
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashSet;
+
+ use api::v1::meta::Peer;
+
+ use crate::selector::common::choose_peers;
+ use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
+ use crate::selector::SelectorOptions;
+
+ #[test]
+ fn test_choose_peers() {
+ let weight_array = vec![
+ WeightedItem {
+ item: Peer {
+ id: 1,
+ addr: "127.0.0.1:3001".to_string(),
+ },
+ weight: 1,
+ reverse_weight: 1,
+ },
+ WeightedItem {
+ item: Peer {
+ id: 2,
+ addr: "127.0.0.1:3001".to_string(),
+ },
+ weight: 1,
+ reverse_weight: 1,
+ },
+ WeightedItem {
+ item: Peer {
+ id: 3,
+ addr: "127.0.0.1:3001".to_string(),
+ },
+ weight: 1,
+ reverse_weight: 1,
+ },
+ WeightedItem {
+ item: Peer {
+ id: 4,
+ addr: "127.0.0.1:3001".to_string(),
+ },
+ weight: 1,
+ reverse_weight: 1,
+ },
+ WeightedItem {
+ item: Peer {
+ id: 5,
+ addr: "127.0.0.1:3001".to_string(),
+ },
+ weight: 1,
+ reverse_weight: 1,
+ },
+ ];
+
+ for i in 1..=5 {
+ let opts = SelectorOptions {
+ min_required_items: i,
+ allow_duplication: false,
+ };
+
+ let selected_peers: HashSet<_> = choose_peers(
+ weight_array.clone(),
+ &opts,
+ &mut RandomWeightedChoose::default(),
+ )
+ .unwrap()
+ .into_iter()
+ .collect();
+
+ assert_eq!(i, selected_peers.len());
+ }
+
+ let opts = SelectorOptions {
+ min_required_items: 6,
+ allow_duplication: false,
+ };
+
+ let selected_result = choose_peers(
+ weight_array.clone(),
+ &opts,
+ &mut RandomWeightedChoose::default(),
+ );
+ assert!(selected_result.is_err());
+
+ for i in 1..=50 {
+ let opts = SelectorOptions {
+ min_required_items: i,
+ allow_duplication: true,
+ };
+
+ let selected_peers = choose_peers(
+ weight_array.clone(),
+ &opts,
+ &mut RandomWeightedChoose::default(),
+ )
+ .unwrap();
+
+ assert_eq!(i, selected_peers.len());
+ }
+ }
+}
diff --git a/src/meta-srv/src/selector/lease_based.rs b/src/meta-srv/src/selector/lease_based.rs
index 83a42605103e..268371a003f7 100644
--- a/src/meta-srv/src/selector/lease_based.rs
+++ b/src/meta-srv/src/selector/lease_based.rs
@@ -13,13 +13,13 @@
// limitations under the License.
use api::v1::meta::Peer;
-use rand::seq::SliceRandom;
-use rand::thread_rng;
use crate::error::Result;
use crate::lease;
use crate::metasrv::SelectorContext;
-use crate::selector::{Namespace, Selector};
+use crate::selector::common::choose_peers;
+use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedItem};
+use crate::selector::{Namespace, Selector, SelectorOptions};
pub struct LeaseBasedSelector;
@@ -28,24 +28,33 @@ impl Selector for LeaseBasedSelector {
type Context = SelectorContext;
type Output = Vec<Peer>;
- async fn select(&self, ns: Namespace, ctx: &Self::Context) -> Result<Self::Output> {
- // filter out the nodes out lease
- let mut lease_kvs: Vec<_> =
- lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs)
- .await?
- .into_iter()
- .collect();
-
- lease_kvs.shuffle(&mut thread_rng());
-
- let peers = lease_kvs
+ async fn select(
+ &self,
+ ns: Namespace,
+ ctx: &Self::Context,
+ opts: SelectorOptions,
+ ) -> Result<Self::Output> {
+ // 1. get alive datanodes.
+ let lease_kvs =
+ lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
+
+ // 2. compute weight array, but the weight of each item is the same.
+ let weight_array = lease_kvs
.into_iter()
- .map(|(k, v)| Peer {
- id: k.node_id,
- addr: v.node_addr,
+ .map(|(k, v)| WeightedItem {
+ item: Peer {
+ id: k.node_id,
+ addr: v.node_addr.clone(),
+ },
+ weight: 1,
+ reverse_weight: 1,
})
- .collect::<Vec<_>>();
+ .collect();
+
+ // 3. choose peers by weight_array.
+ let weighted_choose = &mut RandomWeightedChoose::default();
+ let selected = choose_peers(weight_array, &opts, weighted_choose)?;
- Ok(peers)
+ Ok(selected)
}
}
diff --git a/src/meta-srv/src/selector/load_based.rs b/src/meta-srv/src/selector/load_based.rs
index e816cc5cdc4c..114a48beff72 100644
--- a/src/meta-srv/src/selector/load_based.rs
+++ b/src/meta-srv/src/selector/load_based.rs
@@ -12,22 +12,125 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
+
use api::v1::meta::Peer;
use common_meta::key::TableMetadataManager;
use common_meta::rpc::router::find_leaders;
-use common_telemetry::warn;
+use common_telemetry::{debug, info};
+use parking_lot::RwLock;
use snafu::ResultExt;
use table::metadata::TableId;
use crate::error::{self, Result};
-use crate::keys::{LeaseKey, LeaseValue, StatKey};
+use crate::keys::{LeaseKey, LeaseValue, StatKey, StatValue};
use crate::lease;
use crate::metasrv::SelectorContext;
-use crate::selector::{Namespace, Selector};
+use crate::selector::common::choose_peers;
+use crate::selector::weight_compute::{RegionNumsBasedWeightCompute, WeightCompute};
+use crate::selector::weighted_choose::{RandomWeightedChoose, WeightedChoose};
+use crate::selector::{Namespace, Selector, SelectorOptions};
+
+pub struct LoadBasedSelector<W, C> {
+ weighted_choose: RwLock<W>,
+ weight_compute: C,
+}
+
+impl<W, C> LoadBasedSelector<W, C> {
+ pub fn new(weighted_choose: W, weight_compute: C) -> Self {
+ Self {
+ weighted_choose: RwLock::new(weighted_choose),
+ weight_compute,
+ }
+ }
+}
+
+impl Default for LoadBasedSelector<RandomWeightedChoose<Peer>, RegionNumsBasedWeightCompute> {
+ fn default() -> Self {
+ Self {
+ weighted_choose: RwLock::new(RandomWeightedChoose::default()),
+ weight_compute: RegionNumsBasedWeightCompute,
+ }
+ }
+}
-const MAX_REGION_NUMBER: u64 = u64::MAX;
+#[async_trait::async_trait]
+impl<W, C> Selector for LoadBasedSelector<W, C>
+where
+ W: WeightedChoose<Peer>,
+ C: WeightCompute<Source = HashMap<StatKey, StatValue>>,
+{
+ type Context = SelectorContext;
+ type Output = Vec<Peer>;
+
+ async fn select(
+ &self,
+ ns: Namespace,
+ ctx: &Self::Context,
+ opts: SelectorOptions,
+ ) -> Result<Self::Output> {
+ // 1. get alive datanodes.
+ let lease_kvs =
+ lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
+
+ // 2. get stat kvs and filter out expired datanodes.
+ let stat_keys = lease_kvs.keys().map(|k| k.into()).collect();
+ let stat_kvs = filter_out_expired_datanode(
+ ctx.meta_peer_client.get_dn_stat_kvs(stat_keys).await?,
+ &lease_kvs,
+ );
+
+ // 3. try to make the regions of a table distributed on different datanodes as much as possible.
+ let stat_kvs = if let Some(table_id) = ctx.table_id {
+ let table_metadata_manager = TableMetadataManager::new(ctx.kv_backend.clone());
+ let leader_peer_ids = get_leader_peer_ids(&table_metadata_manager, table_id).await?;
+ let filter_result = filter_out_datanode_by_table(&stat_kvs, &leader_peer_ids);
+ if filter_result.is_empty() {
+ info!("The regions of the table cannot be allocated to completely different datanodes, table id: {}.", table_id);
+ stat_kvs
+ } else {
+ filter_result
+ }
+ } else {
+ stat_kvs
+ };
-pub struct LoadBasedSelector;
+ // 4. compute weight array.
+ let weight_array = self.weight_compute.compute(&stat_kvs);
+
+ // 5. choose peers by weight_array.
+ let mut weighted_choose = self.weighted_choose.write();
+ let selected = choose_peers(weight_array, &opts, &mut *weighted_choose)?;
+
+ debug!(
+ "LoadBasedSelector select peers: {:?}, namespace: {}, opts: {:?}.",
+ selected, ns, opts,
+ );
+
+ Ok(selected)
+ }
+}
+
+fn filter_out_expired_datanode(
+ mut stat_kvs: HashMap<StatKey, StatValue>,
+ lease_kvs: &HashMap<LeaseKey, LeaseValue>,
+) -> HashMap<StatKey, StatValue> {
+ lease_kvs
+ .iter()
+ .filter_map(|(lease_k, _)| stat_kvs.remove_entry(&lease_k.into()))
+ .collect()
+}
+
+fn filter_out_datanode_by_table(
+ stat_kvs: &HashMap<StatKey, StatValue>,
+ leader_peer_ids: &[u64],
+) -> HashMap<StatKey, StatValue> {
+ stat_kvs
+ .iter()
+ .filter(|(stat_k, _)| leader_peer_ids.contains(&stat_k.node_id))
+ .map(|(stat_k, stat_v)| (*stat_k, stat_v.clone()))
+ .collect()
+}
async fn get_leader_peer_ids(
table_metadata_manager: &TableMetadataManager,
@@ -48,59 +151,58 @@ async fn get_leader_peer_ids(
})
}
-#[async_trait::async_trait]
-impl Selector for LoadBasedSelector {
- type Context = SelectorContext;
- type Output = Vec<Peer>;
-
- async fn select(&self, ns: Namespace, ctx: &Self::Context) -> Result<Self::Output> {
- // get alive datanodes
- let lease_kvs =
- lease::alive_datanodes(ns, &ctx.meta_peer_client, ctx.datanode_lease_secs).await?;
- if lease_kvs.is_empty() {
- return Ok(vec![]);
- }
+#[cfg(test)]
+mod tests {
+ use std::collections::HashMap;
- let stat_keys: Vec<StatKey> = lease_kvs.keys().map(|k| k.into()).collect();
- let stat_kvs = ctx.meta_peer_client.get_dn_stat_kvs(stat_keys).await?;
+ use crate::keys::{LeaseKey, LeaseValue, StatKey, StatValue};
+ use crate::selector::load_based::filter_out_expired_datanode;
- let leader_peer_ids = if let Some(table_id) = ctx.table_id {
- let table_metadata_manager = TableMetadataManager::new(ctx.kv_backend.clone());
+ #[test]
+ fn test_filter_out_expired_datanode() {
+ let mut stat_kvs = HashMap::new();
+ stat_kvs.insert(
+ StatKey {
+ cluster_id: 1,
+ node_id: 0,
+ },
+ StatValue { stats: vec![] },
+ );
+ stat_kvs.insert(
+ StatKey {
+ cluster_id: 1,
+ node_id: 1,
+ },
+ StatValue { stats: vec![] },
+ );
+ stat_kvs.insert(
+ StatKey {
+ cluster_id: 1,
+ node_id: 2,
+ },
+ StatValue { stats: vec![] },
+ );
- get_leader_peer_ids(&table_metadata_manager, table_id).await?
- } else {
- Vec::new()
- };
-
- let mut tuples: Vec<(LeaseKey, LeaseValue, u64)> = lease_kvs
- .into_iter()
- .filter(|(lease_k, _)| !leader_peer_ids.contains(&lease_k.node_id))
- .map(|(lease_k, lease_v)| {
- let stat_key: StatKey = (&lease_k).into();
- let region_num = match stat_kvs
- .get(&stat_key)
- .and_then(|stat_val| stat_val.region_num())
- {
- Some(region_num) => region_num,
- None => {
- warn!("Failed to get stat_val by stat_key {:?}", stat_key);
- MAX_REGION_NUMBER
- }
- };
-
- (lease_k, lease_v, region_num)
- })
- .collect();
+ let mut lease_kvs = HashMap::new();
+ lease_kvs.insert(
+ LeaseKey {
+ cluster_id: 1,
+ node_id: 1,
+ },
+ LeaseValue {
+ timestamp_millis: 0,
+ node_addr: "127.0.0.1:3002".to_string(),
+ },
+ );
- // sort the datanodes according to the number of regions
- tuples.sort_by(|a, b| a.2.cmp(&b.2));
+ let alive_stat_kvs = filter_out_expired_datanode(stat_kvs, &lease_kvs);
- Ok(tuples
- .into_iter()
- .map(|(lease_key, lease_val, _)| Peer {
- id: lease_key.node_id,
- addr: lease_val.node_addr,
+ assert_eq!(1, alive_stat_kvs.len());
+ assert!(alive_stat_kvs
+ .get(&StatKey {
+ cluster_id: 1,
+ node_id: 1
})
- .collect())
+ .is_some());
}
}
diff --git a/src/meta-srv/src/selector/weight_compute.rs b/src/meta-srv/src/selector/weight_compute.rs
new file mode 100644
index 000000000000..8971f73accbc
--- /dev/null
+++ b/src/meta-srv/src/selector/weight_compute.rs
@@ -0,0 +1,237 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashMap;
+
+use api::v1::meta::Peer;
+use itertools::{Itertools, MinMaxResult};
+
+use crate::keys::{StatKey, StatValue};
+use crate::selector::weighted_choose::WeightedItem;
+
+/// The [`WeightCompute`] trait is used to compute the weight array by heartbeats.
+pub trait WeightCompute: Send + Sync {
+ type Source;
+
+ fn compute(&self, stat_kvs: &Self::Source) -> Vec<WeightedItem<Peer>>;
+}
+
+/// The ['RegionNumsBasedWeightCompute'] calculates weighted list based on region number obtained from the heartbeat.
+///
+/// # How to calculate the weighted list?
+/// weight = max_region_num - current_region_num + (max_region_num - min_region_num);
+///
+/// # How to calculate the reverse weighted list?
+/// reverse_weight = region_num - min_region_num + (max_region_num - min_region_num);
+pub struct RegionNumsBasedWeightCompute;
+
+impl WeightCompute for RegionNumsBasedWeightCompute {
+ type Source = HashMap<StatKey, StatValue>;
+
+ fn compute(&self, stat_kvs: &HashMap<StatKey, StatValue>) -> Vec<WeightedItem<Peer>> {
+ let mut region_nums = Vec::with_capacity(stat_kvs.len());
+ let mut peers = Vec::with_capacity(stat_kvs.len());
+
+ for (stat_k, stat_v) in stat_kvs {
+ let Some(region_num) = stat_v.region_num() else {
+ continue;
+ };
+ let Some(node_addr) = stat_v.node_addr() else {
+ continue;
+ };
+
+ let peer = Peer {
+ id: stat_k.node_id,
+ addr: node_addr,
+ };
+
+ region_nums.push(region_num);
+ peers.push(peer);
+ }
+
+ if region_nums.is_empty() {
+ return vec![];
+ }
+
+ let (min_weight, max_weight) = match region_nums.iter().minmax() {
+ // unreachable safety: region_nums is not empty
+ MinMaxResult::NoElements => unreachable!(),
+ MinMaxResult::OneElement(minmax) => (*minmax, *minmax),
+ MinMaxResult::MinMax(min, max) => (*min, *max),
+ };
+
+ let base_weight = match max_weight - min_weight {
+ 0 => 1,
+ x => x,
+ };
+
+ peers
+ .into_iter()
+ .zip(region_nums)
+ .map(|(peer, region_num)| WeightedItem {
+ item: peer,
+ weight: (max_weight - region_num + base_weight) as usize,
+ reverse_weight: (region_num - min_weight + base_weight) as usize,
+ })
+ .collect()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashMap;
+
+ use api::v1::meta::Peer;
+ use store_api::region_engine::RegionRole;
+
+ use super::{RegionNumsBasedWeightCompute, WeightCompute};
+ use crate::handler::node_stat::{RegionStat, Stat};
+ use crate::keys::{StatKey, StatValue};
+
+ #[test]
+ fn test_weight_compute() {
+ let mut stat_kvs: HashMap<StatKey, StatValue> = HashMap::default();
+ let stat_key = StatKey {
+ cluster_id: 1,
+ node_id: 1,
+ };
+ let stat_val = StatValue {
+ stats: vec![mock_stat_1()],
+ };
+ stat_kvs.insert(stat_key, stat_val);
+ let stat_key = StatKey {
+ cluster_id: 1,
+ node_id: 2,
+ };
+ let stat_val = StatValue {
+ stats: vec![mock_stat_2()],
+ };
+ stat_kvs.insert(stat_key, stat_val);
+ let stat_key = StatKey {
+ cluster_id: 1,
+ node_id: 3,
+ };
+ let stat_val = StatValue {
+ stats: vec![mock_stat_3()],
+ };
+ stat_kvs.insert(stat_key, stat_val);
+
+ let compute = RegionNumsBasedWeightCompute;
+ let weight_array = compute.compute(&stat_kvs);
+
+ let mut expected = HashMap::new();
+ expected.insert(
+ Peer {
+ id: 1,
+ addr: "127.0.0.1:3001".to_string(),
+ },
+ 4,
+ );
+ expected.insert(
+ Peer {
+ id: 2,
+ addr: "127.0.0.1:3002".to_string(),
+ },
+ 3,
+ );
+ expected.insert(
+ Peer {
+ id: 3,
+ addr: "127.0.0.1:3003".to_string(),
+ },
+ 2,
+ );
+ for weight in weight_array.iter() {
+ assert_eq!(*expected.get(&weight.item).unwrap(), weight.weight,);
+ }
+
+ let mut expected = HashMap::new();
+ expected.insert(
+ Peer {
+ id: 1,
+ addr: "127.0.0.1:3001".to_string(),
+ },
+ 2,
+ );
+ expected.insert(
+ Peer {
+ id: 2,
+ addr: "127.0.0.1:3002".to_string(),
+ },
+ 3,
+ );
+ expected.insert(
+ Peer {
+ id: 3,
+ addr: "127.0.0.1:3003".to_string(),
+ },
+ 4,
+ );
+
+ for weight in weight_array.iter() {
+ assert_eq!(weight.reverse_weight, *expected.get(&weight.item).unwrap());
+ }
+ }
+
+ fn mock_stat_1() -> Stat {
+ Stat {
+ addr: "127.0.0.1:3001".to_string(),
+ region_num: 11,
+ region_stats: vec![RegionStat {
+ id: 111,
+ rcus: 1,
+ wcus: 1,
+ approximate_bytes: 1,
+ approximate_rows: 1,
+ engine: "mito2".to_string(),
+ role: RegionRole::Leader,
+ }],
+ ..Default::default()
+ }
+ }
+
+ fn mock_stat_2() -> Stat {
+ Stat {
+ addr: "127.0.0.1:3002".to_string(),
+ region_num: 12,
+ region_stats: vec![RegionStat {
+ id: 112,
+ rcus: 1,
+ wcus: 1,
+ approximate_bytes: 1,
+ approximate_rows: 1,
+ engine: "mito2".to_string(),
+ role: RegionRole::Leader,
+ }],
+ ..Default::default()
+ }
+ }
+
+ fn mock_stat_3() -> Stat {
+ Stat {
+ addr: "127.0.0.1:3003".to_string(),
+ region_num: 13,
+ region_stats: vec![RegionStat {
+ id: 113,
+ rcus: 1,
+ wcus: 1,
+ approximate_bytes: 1,
+ approximate_rows: 1,
+ engine: "mito2".to_string(),
+ role: RegionRole::Leader,
+ }],
+ ..Default::default()
+ }
+ }
+}
diff --git a/src/meta-srv/src/selector/weighted_choose.rs b/src/meta-srv/src/selector/weighted_choose.rs
new file mode 100644
index 000000000000..9e9f63abadfd
--- /dev/null
+++ b/src/meta-srv/src/selector/weighted_choose.rs
@@ -0,0 +1,176 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use rand::distributions::WeightedIndex;
+use rand::prelude::Distribution;
+use rand::thread_rng;
+use snafu::{ensure, ResultExt};
+
+use crate::error;
+use crate::error::Result;
+
+/// A common trait for weighted balance algorithm.
+pub trait WeightedChoose<Item>: Send + Sync {
+ /// The method will re-set weight array.
+ ///
+ /// Note:
+ /// 1. make sure weight_array is not empty.
+ /// 2. the total weight is greater than 0.
+ /// Otherwise an error will be returned.
+ fn set_weight_array(&mut self, weight_array: Vec<WeightedItem<Item>>) -> Result<()>;
+
+ /// The method will choose one item.
+ ///
+ /// If not set weight_array before, an error will be returned.
+ fn choose_one(&mut self) -> Result<Item>;
+
+ /// The method will reverse choose one item.
+ ///
+ /// If not set weight_array before, an error will be returned.
+ fn reverse_choose_one(&mut self) -> Result<Item>;
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct WeightedItem<Item> {
+ pub item: Item,
+ pub weight: usize,
+ pub reverse_weight: usize,
+}
+
+/// A implementation of weighted balance: random weighted choose.
+///
+/// The algorithm is as follows:
+///
+/// ```text
+/// random value
+/// ─────────────────────────────────▶
+/// │
+/// ▼
+/// ┌─────────────────┬─────────┬──────────────────────┬─────┬─────────────────┐
+/// │element_0 │element_1│element_2 │... │element_n │
+/// └─────────────────┴─────────┴──────────────────────┴─────┴─────────────────┘
+/// ```
+pub struct RandomWeightedChoose<Item> {
+ items: Vec<WeightedItem<Item>>,
+ weighted_index: Option<WeightedIndex<usize>>,
+ reverse_weighted_index: Option<WeightedIndex<usize>>,
+}
+
+impl<Item> Default for RandomWeightedChoose<Item> {
+ fn default() -> Self {
+ Self {
+ items: Vec::default(),
+ weighted_index: None,
+ reverse_weighted_index: None,
+ }
+ }
+}
+
+impl<Item> WeightedChoose<Item> for RandomWeightedChoose<Item>
+where
+ Item: Clone + Send + Sync,
+{
+ fn set_weight_array(&mut self, weight_array: Vec<WeightedItem<Item>>) -> Result<()> {
+ self.weighted_index = Some(
+ WeightedIndex::new(weight_array.iter().map(|item| item.weight))
+ .context(error::WeightArraySnafu)?,
+ );
+
+ self.reverse_weighted_index = Some(
+ WeightedIndex::new(weight_array.iter().map(|item| item.reverse_weight))
+ .context(error::WeightArraySnafu)?,
+ );
+
+ self.items = weight_array;
+
+ Ok(())
+ }
+
+ fn choose_one(&mut self) -> Result<Item> {
+ ensure!(
+ !self.items.is_empty() && self.weighted_index.is_some(),
+ error::NotSetWeightArraySnafu
+ );
+
+ // unwrap safety: whether weighted_index is none has been checked before.
+ let weighted_index = self.weighted_index.as_ref().unwrap();
+
+ Ok(self.items[weighted_index.sample(&mut thread_rng())]
+ .item
+ .clone())
+ }
+
+ fn reverse_choose_one(&mut self) -> Result<Item> {
+ ensure!(
+ !self.items.is_empty() && self.reverse_weighted_index.is_some(),
+ error::NotSetWeightArraySnafu
+ );
+
+ // unwrap safety: whether reverse_weighted_index is none has been checked before.
+ let reverse_weighted_index = self.reverse_weighted_index.as_ref().unwrap();
+
+ Ok(self.items[reverse_weighted_index.sample(&mut thread_rng())]
+ .item
+ .clone())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{RandomWeightedChoose, WeightedChoose, WeightedItem};
+
+ #[test]
+ fn test_random_weighted_choose() {
+ let mut choose = RandomWeightedChoose::default();
+ choose
+ .set_weight_array(vec![
+ WeightedItem {
+ item: 1,
+ weight: 100,
+ reverse_weight: 0,
+ },
+ WeightedItem {
+ item: 2,
+ weight: 0,
+ reverse_weight: 100,
+ },
+ ])
+ .unwrap();
+ for _ in 0..100 {
+ let ret = choose.choose_one().unwrap();
+ assert_eq!(1, ret);
+ }
+
+ for _ in 0..100 {
+ let ret = choose.reverse_choose_one().unwrap();
+ assert_eq!(2, ret);
+ }
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_random_weighted_choose_should_panic() {
+ let mut choose: RandomWeightedChoose<u32> = RandomWeightedChoose::default();
+ choose.set_weight_array(vec![]).unwrap();
+ let _ = choose.choose_one().unwrap();
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_random_reverse_weighted_choose_should_panic() {
+ let mut choose: RandomWeightedChoose<u32> = RandomWeightedChoose::default();
+ choose.set_weight_array(vec![]).unwrap();
+ let _ = choose.reverse_choose_one().unwrap();
+ }
+}
diff --git a/src/meta-srv/src/table_meta_alloc.rs b/src/meta-srv/src/table_meta_alloc.rs
index cfefbba0c282..60fba7539059 100644
--- a/src/meta-srv/src/table_meta_alloc.rs
+++ b/src/meta-srv/src/table_meta_alloc.rs
@@ -26,6 +26,7 @@ use table::metadata::RawTableInfo;
use crate::error::{self, Result, TooManyPartitionsSnafu};
use crate::metasrv::{SelectorContext, SelectorRef};
+use crate::selector::SelectorOptions;
pub struct MetaSrvTableMetadataAllocator {
ctx: SelectorContext,
@@ -78,12 +79,21 @@ async fn handle_create_region_routes(
selector: &SelectorRef,
table_id_sequence: &SequenceRef,
) -> Result<(TableId, Vec<RegionRoute>)> {
- let mut peers = selector.select(cluster_id, ctx).await?;
+ let mut peers = selector
+ .select(
+ cluster_id,
+ ctx,
+ SelectorOptions {
+ min_required_items: partitions.len(),
+ allow_duplication: true,
+ },
+ )
+ .await?;
if peers.len() < partitions.len() {
warn!("Create table failed due to no enough available datanodes, table: {}, partition number: {}, datanode number: {}", format_full_table_name(&table_info.catalog_name,&table_info.schema_name,&table_info.name), partitions.len(), peers.len());
return error::NoEnoughAvailableDatanodeSnafu {
- expected: partitions.len(),
+ required: partitions.len(),
available: peers.len(),
}
.fail();
diff --git a/tests-integration/src/grpc.rs b/tests-integration/src/grpc.rs
index 127598468c2c..123e864dd553 100644
--- a/tests-integration/src/grpc.rs
+++ b/tests-integration/src/grpc.rs
@@ -524,7 +524,7 @@ CREATE TABLE {table_name} (
.iter()
.map(|(k, v)| (v[0], *k))
.collect::<HashMap<u32, u64>>();
- assert_eq!(region_to_dn_map.len(), expected_distribution.len());
+ assert!(region_to_dn_map.len() <= instance.datanodes().len());
let stmt = QueryLanguageParser::parse_sql(&format!(
"SELECT ts, a, b FROM {table_name} ORDER BY ts"
diff --git a/tests-integration/src/instance.rs b/tests-integration/src/instance.rs
index 3bff7bf6b984..ac5a2e4b3ca9 100644
--- a/tests-integration/src/instance.rs
+++ b/tests-integration/src/instance.rs
@@ -221,7 +221,7 @@ mod tests {
.iter()
.map(|(k, v)| (v[0], *k))
.collect::<HashMap<u32, u64>>();
- assert_eq!(region_to_dn_map.len(), expected_distribution.len());
+ assert!(region_to_dn_map.len() <= instance.datanodes().len());
let stmt = QueryLanguageParser::parse_sql("SELECT ts, host FROM demo ORDER BY ts").unwrap();
let LogicalPlan::DfPlan(plan) = instance
diff --git a/tests-integration/tests/region_failover.rs b/tests-integration/tests/region_failover.rs
index de60fd8f84dd..612a1fd4abab 100644
--- a/tests-integration/tests/region_failover.rs
+++ b/tests-integration/tests/region_failover.rs
@@ -30,7 +30,7 @@ use futures::TryStreamExt;
use meta_srv::error::Result as MetaResult;
use meta_srv::metasrv::{SelectorContext, SelectorRef};
use meta_srv::procedure::region_failover::{RegionFailoverContext, RegionFailoverProcedure};
-use meta_srv::selector::{Namespace, Selector};
+use meta_srv::selector::{Namespace, Selector, SelectorOptions};
use servers::query_handler::sql::SqlQueryHandler;
use session::context::{QueryContext, QueryContextRef};
use table::metadata::TableId;
@@ -325,7 +325,12 @@ impl Selector for ForeignNodeSelector {
type Context = SelectorContext;
type Output = Vec<Peer>;
- async fn select(&self, _ns: Namespace, _ctx: &Self::Context) -> MetaResult<Self::Output> {
+ async fn select(
+ &self,
+ _ns: Namespace,
+ _ctx: &Self::Context,
+ _opts: SelectorOptions,
+ ) -> MetaResult<Self::Output> {
Ok(vec![self.foreign.clone()])
}
}
|
feat
|
add random weighted choose in load_based selector (#2234)
|
75b8afe04359c90e5631ec3164bf42a787fe7e82
|
2023-02-15 11:44:07
|
Lei, HUANG
|
feat: compaction integration (#997)
| false
|
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 5fc00df1b8ee..c68f379c9c12 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -24,3 +24,7 @@ metasrv_addrs = ['127.0.0.1:3002']
timeout_millis = 3000
connect_timeout_millis = 5000
tcp_nodelay = false
+
+[compaction]
+max_inflight_task = 4
+max_file_in_level0 = 16
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index bcc9e4444605..2ac85407830f 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -399,6 +399,7 @@ mod tests {
use mito::config::EngineConfig;
use mito::engine::MitoEngine;
use object_store::ObjectStore;
+ use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::metadata::TableType;
@@ -485,12 +486,14 @@ mod tests {
.build()
.unwrap();
let object_store = ObjectStore::new(accessor);
+ let noop_compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = Arc::new(MitoEngine::new(
EngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(NoopLogStore::default()),
object_store.clone(),
+ noop_compaction_scheduler,
),
object_store,
));
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 4c1f066f1d2f..f17542c86aec 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -143,7 +143,7 @@ mod tests {
use std::assert_matches::assert_matches;
use std::time::Duration;
- use datanode::datanode::ObjectStoreConfig;
+ use datanode::datanode::{CompactionConfig, ObjectStoreConfig};
use servers::Mode;
use super::*;
@@ -181,6 +181,14 @@ mod tests {
ObjectStoreConfig::S3 { .. } => unreachable!(),
ObjectStoreConfig::Oss { .. } => unreachable!(),
};
+
+ assert_eq!(
+ CompactionConfig {
+ max_inflight_task: 4,
+ max_file_in_level0: 16,
+ },
+ options.compaction
+ );
}
#[test]
diff --git a/src/common/time/src/range.rs b/src/common/time/src/range.rs
index 08f5642066cc..ed08014505d1 100644
--- a/src/common/time/src/range.rs
+++ b/src/common/time/src/range.rs
@@ -205,7 +205,7 @@ impl TimestampRange {
pub fn new_inclusive(start: Option<Timestamp>, end: Option<Timestamp>) -> Self {
// check for emptiness
if let (Some(start_ts), Some(end_ts)) = (start, end) {
- if start_ts >= end_ts {
+ if start_ts > end_ts {
return Self::empty();
}
}
@@ -462,4 +462,29 @@ mod tests {
assert!(!full.intersects(&empty));
}
+
+ #[test]
+ fn test_new_inclusive() {
+ let range = TimestampRange::new_inclusive(
+ Some(Timestamp::new_millisecond(1)),
+ Some(Timestamp::new_millisecond(3)),
+ );
+ assert!(!range.is_empty());
+ assert!(range.contains(&Timestamp::new_millisecond(1)));
+ assert!(range.contains(&Timestamp::new_millisecond(3)));
+
+ let range = TimestampRange::new_inclusive(
+ Some(Timestamp::new_millisecond(1)),
+ Some(Timestamp::new_millisecond(1)),
+ );
+ assert!(!range.is_empty());
+ assert_eq!(1, range.start.unwrap().value());
+ assert!(range.contains(&Timestamp::new_millisecond(1)));
+
+ let range = TimestampRange::new_inclusive(
+ Some(Timestamp::new_millisecond(2)),
+ Some(Timestamp::new_millisecond(1)),
+ );
+ assert!(range.is_empty());
+ }
}
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index bf2a43a4ff29..5b1773583bdf 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -20,6 +20,8 @@ use common_telemetry::info;
use meta_client::MetaClientOpts;
use serde::{Deserialize, Serialize};
use servers::Mode;
+use storage::compaction::CompactionSchedulerConfig;
+use storage::config::EngineConfig as StorageEngineConfig;
use crate::error::Result;
use crate::instance::{Instance, InstanceRef};
@@ -104,6 +106,40 @@ impl Default for WalConfig {
}
}
+/// Options for table compaction
+#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
+pub struct CompactionConfig {
+ /// Max task number that can concurrently run.
+ pub max_inflight_task: usize,
+ /// Max files in level 0 to trigger compaction.
+ pub max_file_in_level0: usize,
+}
+
+impl Default for CompactionConfig {
+ fn default() -> Self {
+ Self {
+ max_inflight_task: 4,
+ max_file_in_level0: 8,
+ }
+ }
+}
+
+impl From<&DatanodeOptions> for CompactionSchedulerConfig {
+ fn from(value: &DatanodeOptions) -> Self {
+ Self {
+ max_inflight_task: value.compaction.max_inflight_task,
+ }
+ }
+}
+
+impl From<&DatanodeOptions> for StorageEngineConfig {
+ fn from(value: &DatanodeOptions) -> Self {
+ Self {
+ max_files_in_l0: value.compaction.max_file_in_level0,
+ }
+ }
+}
+
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct DatanodeOptions {
@@ -117,6 +153,7 @@ pub struct DatanodeOptions {
pub wal: WalConfig,
pub storage: ObjectStoreConfig,
pub enable_memory_catalog: bool,
+ pub compaction: CompactionConfig,
pub mode: Mode,
}
@@ -133,6 +170,7 @@ impl Default for DatanodeOptions {
wal: WalConfig::default(),
storage: ObjectStoreConfig::default(),
enable_memory_catalog: false,
+ compaction: CompactionConfig::default(),
mode: Mode::Standalone,
}
}
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index f0395bbbe90f..1b828ad3def6 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -38,8 +38,12 @@ use object_store::{util, ObjectStore};
use query::query_engine::{QueryEngineFactory, QueryEngineRef};
use servers::Mode;
use snafu::prelude::*;
+use storage::compaction::{
+ CompactionSchedulerConfig, CompactionSchedulerRef, LocalCompactionScheduler, SimplePicker,
+};
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
+use store_api::logstore::LogStore;
use table::table::numbers::NumbersTable;
use table::table::TableIdProviderRef;
use table::Table;
@@ -92,12 +96,15 @@ impl Instance {
}
};
+ let compaction_scheduler = create_compaction_scheduler(opts);
+
let table_engine = Arc::new(DefaultEngine::new(
TableEngineConfig::default(),
EngineImpl::new(
- StorageEngineConfig::default(),
+ StorageEngineConfig::from(opts),
logstore.clone(),
object_store.clone(),
+ compaction_scheduler,
),
object_store,
));
@@ -204,6 +211,13 @@ impl Instance {
}
}
+fn create_compaction_scheduler<S: LogStore>(opts: &DatanodeOptions) -> CompactionSchedulerRef<S> {
+ let picker = SimplePicker::default();
+ let config = CompactionSchedulerConfig::from(opts);
+ let scheduler = LocalCompactionScheduler::new(config, picker);
+ Arc::new(scheduler)
+}
+
pub(crate) async fn new_object_store(store_config: &ObjectStoreConfig) -> Result<ObjectStore> {
let object_store = match store_config {
ObjectStoreConfig::File { .. } => new_fs_object_store(store_config).await,
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
index 3d0e4bd6eaee..ed764c2fae3d 100644
--- a/src/datanode/src/mock.rs
+++ b/src/datanode/src/mock.rs
@@ -24,6 +24,7 @@ use mito::config::EngineConfig as TableEngineConfig;
use query::QueryEngineFactory;
use servers::Mode;
use snafu::ResultExt;
+use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::metadata::TableId;
@@ -46,12 +47,14 @@ impl Instance {
let object_store = new_object_store(&opts.storage).await?;
let logstore = Arc::new(create_log_store(&opts.wal).await?);
let meta_client = Arc::new(mock_meta_client(meta_srv, opts.node_id.unwrap_or(42)).await);
+ let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = Arc::new(DefaultEngine::new(
TableEngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
logstore.clone(),
object_store.clone(),
+ compaction_scheduler,
),
object_store,
));
diff --git a/src/datanode/src/sql.rs b/src/datanode/src/sql.rs
index fdb8770ea754..1ff9206adbd2 100644
--- a/src/datanode/src/sql.rs
+++ b/src/datanode/src/sql.rs
@@ -150,6 +150,7 @@ mod tests {
use query::parser::{QueryLanguageParser, QueryStatement};
use query::QueryEngineFactory;
use sql::statements::statement::Statement;
+ use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::engine::TableReference;
@@ -209,7 +210,7 @@ mod tests {
let store_dir = dir.path().to_string_lossy();
let accessor = Builder::default().root(&store_dir).build().unwrap();
let object_store = ObjectStore::new(accessor);
-
+ let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let sql = r#"insert into demo(host, cpu, memory, ts) values
('host1', 66.6, 1024, 1655276557000),
('host2', 88.8, 333.3, 1655276558000)
@@ -221,6 +222,7 @@ mod tests {
StorageEngineConfig::default(),
Arc::new(NoopLogStore::default()),
object_store.clone(),
+ compaction_scheduler,
),
object_store,
));
diff --git a/src/mito/src/engine.rs b/src/mito/src/engine.rs
index f9bde0feb795..e29d4fa98534 100644
--- a/src/mito/src/engine.rs
+++ b/src/mito/src/engine.rs
@@ -605,6 +605,7 @@ mod tests {
Float64Vector, Int32Vector, StringVector, TimestampMillisecondVector, VectorRef,
};
use log_store::NoopLogStore;
+ use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::region::RegionImpl;
use storage::EngineImpl;
@@ -643,13 +644,14 @@ mod tests {
let (dir, object_store) =
test_util::new_test_object_store("test_insert_with_column_default_constraint").await;
-
+ let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let table_engine = MitoEngine::new(
EngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(NoopLogStore::default()),
object_store.clone(),
+ compaction_scheduler,
),
object_store,
);
diff --git a/src/mito/src/table/test_util.rs b/src/mito/src/table/test_util.rs
index 35721d892431..4b4681f15cf5 100644
--- a/src/mito/src/table/test_util.rs
+++ b/src/mito/src/table/test_util.rs
@@ -23,6 +23,7 @@ use datatypes::vectors::VectorRef;
use log_store::NoopLogStore;
use object_store::services::fs::Builder;
use object_store::ObjectStore;
+use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use table::engine::{EngineContext, TableEngine};
@@ -127,11 +128,12 @@ pub struct TestEngineComponents {
pub async fn setup_test_engine_and_table() -> TestEngineComponents {
let (dir, object_store) = new_test_object_store("setup_test_engine_and_table").await;
-
+ let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let storage_engine = EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(NoopLogStore::default()),
object_store.clone(),
+ compaction_scheduler,
);
let table_engine = MitoEngine::new(
EngineConfig::default(),
diff --git a/src/script/src/manager.rs b/src/script/src/manager.rs
index 11f3a42ccd6c..637e1194a742 100644
--- a/src/script/src/manager.rs
+++ b/src/script/src/manager.rs
@@ -118,6 +118,7 @@ mod tests {
use log_store::raft_engine::log_store::RaftEngineLogStore;
use log_store::LogConfig;
use mito::engine::MitoEngine;
+ use storage::compaction::noop::NoopCompactionScheduler;
use storage::config::EngineConfig as StorageEngineConfig;
use storage::EngineImpl;
use tempdir::TempDir;
@@ -135,12 +136,14 @@ mod tests {
};
let log_store = RaftEngineLogStore::try_new(log_config).await.unwrap();
+ let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
let mock_engine = Arc::new(DefaultEngine::new(
TableEngineConfig::default(),
EngineImpl::new(
StorageEngineConfig::default(),
Arc::new(log_store),
object_store.clone(),
+ compaction_scheduler,
),
object_store,
));
diff --git a/src/script/src/python/builtins.rs b/src/script/src/python/builtins.rs
index db1f62208d33..71815f175038 100644
--- a/src/script/src/python/builtins.rs
+++ b/src/script/src/python/builtins.rs
@@ -962,7 +962,7 @@ pub(crate) mod greptime_builtin {
Ok(obj) => match py_vec_obj_to_array(&obj, vm, 1){
Ok(v) => if v.len()==1{
Ok(v)
- }else{
+ } else {
Err(vm.new_runtime_error(format!("Expect return's length to be at most one, found to be length of {}.", v.len())))
},
Err(err) => Err(vm
diff --git a/src/storage/src/compaction.rs b/src/storage/src/compaction.rs
index 4dfd973ae55b..87650ad157b0 100644
--- a/src/storage/src/compaction.rs
+++ b/src/storage/src/compaction.rs
@@ -13,9 +13,22 @@
// limitations under the License.
mod dedup_deque;
+pub mod noop;
mod picker;
mod rate_limit;
mod scheduler;
mod strategy;
mod task;
mod writer;
+
+use std::sync::Arc;
+
+pub use picker::{Picker, PickerContext, SimplePicker};
+pub use scheduler::{
+ CompactionRequest, CompactionRequestImpl, CompactionScheduler, CompactionSchedulerConfig,
+ LocalCompactionScheduler,
+};
+pub use task::{CompactionTask, CompactionTaskImpl};
+
+pub type CompactionSchedulerRef<S> =
+ Arc<dyn CompactionScheduler<CompactionRequestImpl<S>> + Send + Sync>;
diff --git a/src/storage/src/compaction/noop.rs b/src/storage/src/compaction/noop.rs
new file mode 100644
index 000000000000..3cb24e31a89c
--- /dev/null
+++ b/src/storage/src/compaction/noop.rs
@@ -0,0 +1,79 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::{Debug, Formatter};
+use std::marker::PhantomData;
+
+use store_api::storage::RegionId;
+
+use crate::compaction::{
+ CompactionRequest, CompactionScheduler, CompactionTask, Picker, PickerContext,
+};
+
+pub struct NoopCompactionScheduler<R> {
+ _phantom_data: PhantomData<R>,
+}
+
+impl<R> Default for NoopCompactionScheduler<R> {
+ fn default() -> Self {
+ Self {
+ _phantom_data: Default::default(),
+ }
+ }
+}
+
+impl<R> Debug for NoopCompactionScheduler<R> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("NoopCompactionScheduler<...>").finish()
+ }
+}
+
+#[derive(Default, Debug)]
+pub struct NoopCompactionRequest;
+
+#[derive(Default, Debug)]
+pub struct NoopCompactionPicker;
+
+impl<R, T: CompactionTask> Picker<R, T> for NoopCompactionPicker {
+ fn pick(&self, _ctx: &PickerContext, _req: &R) -> crate::error::Result<Option<T>> {
+ Ok(None)
+ }
+}
+
+#[derive(Debug)]
+pub struct NoopCompactionTask;
+
+#[async_trait::async_trait]
+impl CompactionTask for NoopCompactionTask {
+ async fn run(self) -> crate::error::Result<()> {
+ Ok(())
+ }
+}
+
+impl CompactionRequest for NoopCompactionRequest {
+ fn region_id(&self) -> RegionId {
+ 0
+ }
+}
+
+#[async_trait::async_trait]
+impl<R: CompactionRequest> CompactionScheduler<R> for NoopCompactionScheduler<R> {
+ async fn schedule(&self, _request: R) -> crate::error::Result<bool> {
+ Ok(true)
+ }
+
+ async fn stop(&self) -> crate::error::Result<()> {
+ Ok(())
+ }
+}
diff --git a/src/storage/src/compaction/picker.rs b/src/storage/src/compaction/picker.rs
index 39b3223ed780..51edbb77ec01 100644
--- a/src/storage/src/compaction/picker.rs
+++ b/src/storage/src/compaction/picker.rs
@@ -13,12 +13,13 @@
// limitations under the License.
use std::marker::PhantomData;
+use std::sync::Arc;
use common_telemetry::debug;
use store_api::logstore::LogStore;
use crate::compaction::scheduler::CompactionRequestImpl;
-use crate::compaction::strategy::StrategyRef;
+use crate::compaction::strategy::{SimpleTimeWindowStrategy, StrategyRef};
use crate::compaction::task::{CompactionTask, CompactionTaskImpl};
/// Picker picks input SST files and builds the compaction task.
@@ -30,12 +31,17 @@ pub trait Picker<R, T: CompactionTask>: Send + 'static {
pub struct PickerContext {}
/// L0 -> L1 compaction based on time windows.
-pub(crate) struct SimplePicker<S> {
+pub struct SimplePicker<S> {
strategy: StrategyRef,
_phantom_data: PhantomData<S>,
}
-#[allow(unused)]
+impl<S> Default for SimplePicker<S> {
+ fn default() -> Self {
+ Self::new(Arc::new(SimpleTimeWindowStrategy {}))
+ }
+}
+
impl<S> SimplePicker<S> {
pub fn new(strategy: StrategyRef) -> Self {
Self {
@@ -51,7 +57,7 @@ impl<S: LogStore> Picker<CompactionRequestImpl<S>, CompactionTaskImpl<S>> for Si
ctx: &PickerContext,
req: &CompactionRequestImpl<S>,
) -> crate::error::Result<Option<CompactionTaskImpl<S>>> {
- let levels = &req.levels;
+ let levels = &req.levels();
for level_num in 0..levels.level_num() {
let level = levels.level(level_num as u8);
@@ -67,7 +73,7 @@ impl<S: LogStore> Picker<CompactionRequestImpl<S>, CompactionTaskImpl<S>> for Si
outputs, level_num
);
return Ok(Some(CompactionTaskImpl {
- schema: req.schema.clone(),
+ schema: req.schema(),
sst_layer: req.sst_layer.clone(),
outputs,
writer: req.writer.clone(),
diff --git a/src/storage/src/compaction/rate_limit.rs b/src/storage/src/compaction/rate_limit.rs
index 6b96afe7ee45..5df5744dadf5 100644
--- a/src/storage/src/compaction/rate_limit.rs
+++ b/src/storage/src/compaction/rate_limit.rs
@@ -50,7 +50,6 @@ pub struct MaxInflightTaskLimiter<R> {
_phantom_data: PhantomData<R>,
}
-#[allow(unused)]
impl<R> MaxInflightTaskLimiter<R> {
pub fn new(max_inflight_task: usize) -> Self {
Self {
diff --git a/src/storage/src/compaction/scheduler.rs b/src/storage/src/compaction/scheduler.rs
index 17bbad72710a..d21894d1daa3 100644
--- a/src/storage/src/compaction/scheduler.rs
+++ b/src/storage/src/compaction/scheduler.rs
@@ -12,14 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::fmt::{Debug, Formatter};
use std::marker::PhantomData;
use std::sync::{Arc, Mutex, RwLock};
use async_trait::async_trait;
-use common_telemetry::{debug, info};
+use common_telemetry::{debug, error, info};
use snafu::ResultExt;
use store_api::logstore::LogStore;
-use table::metadata::TableId;
+use store_api::storage::RegionId;
use tokio::sync::Notify;
use tokio::task::JoinHandle;
use tokio_util::sync::CancellationToken;
@@ -38,11 +39,9 @@ use crate::sst::AccessLayerRef;
use crate::version::LevelMetasRef;
use crate::wal::Wal;
-/// Table compaction request.
+/// Region compaction request.
pub struct CompactionRequestImpl<S: LogStore> {
- table_id: TableId,
- pub levels: LevelMetasRef,
- pub schema: RegionSchemaRef,
+ pub region_id: RegionId,
pub sst_layer: AccessLayerRef,
pub writer: RegionWriterRef,
pub shared: SharedDataRef,
@@ -50,36 +49,48 @@ pub struct CompactionRequestImpl<S: LogStore> {
pub wal: Wal<S>,
}
+impl<S: LogStore> CompactionRequestImpl<S> {
+ #[inline]
+ pub(crate) fn schema(&self) -> RegionSchemaRef {
+ self.shared.version_control.current().schema().clone()
+ }
+
+ #[inline]
+ pub(crate) fn levels(&self) -> LevelMetasRef {
+ self.shared.version_control.current().ssts().clone()
+ }
+}
+
impl<S: LogStore> CompactionRequest for CompactionRequestImpl<S> {
#[inline]
- fn table_id(&self) -> TableId {
- self.table_id
+ fn region_id(&self) -> RegionId {
+ self.region_id
}
}
pub trait CompactionRequest: Send + Sync + 'static {
- fn table_id(&self) -> TableId;
+ fn region_id(&self) -> RegionId;
}
#[derive(Debug)]
pub struct CompactionSchedulerConfig {
- max_inflight_task: usize,
+ pub max_inflight_task: usize,
}
impl Default for CompactionSchedulerConfig {
fn default() -> Self {
Self {
- max_inflight_task: 16,
+ max_inflight_task: 4,
}
}
}
/// CompactionScheduler defines a set of API to schedule compaction tasks.
#[async_trait]
-pub trait CompactionScheduler<R> {
+pub trait CompactionScheduler<R>: Debug {
/// Schedules a compaction request.
/// Returns true if request is scheduled. Returns false if task queue already
- /// contains the request with same table id.
+ /// contains the request with same region id.
async fn schedule(&self, request: R) -> Result<bool>;
/// Stops compaction scheduler.
@@ -87,14 +98,22 @@ pub trait CompactionScheduler<R> {
}
/// Compaction task scheduler based on local state.
-#[allow(unused)]
pub struct LocalCompactionScheduler<R: CompactionRequest> {
- request_queue: Arc<RwLock<DedupDeque<TableId, R>>>,
+ request_queue: Arc<RwLock<DedupDeque<RegionId, R>>>,
cancel_token: CancellationToken,
task_notifier: Arc<Notify>,
join_handle: Mutex<Option<JoinHandle<()>>>,
}
+impl<R> Debug for LocalCompactionScheduler<R>
+where
+ R: CompactionRequest + Send + Sync,
+{
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("LocalCompactionScheduler<...>").finish()
+ }
+}
+
#[async_trait]
impl<R> CompactionScheduler<R> for LocalCompactionScheduler<R>
where
@@ -103,11 +122,11 @@ where
async fn schedule(&self, request: R) -> Result<bool> {
debug!(
"Schedule request: {}, queue size: {}",
- request.table_id(),
+ request.region_id(),
self.remaining_requests().await
);
let mut queue = self.request_queue.write().unwrap();
- let res = queue.push_back(request.table_id(), request);
+ let res = queue.push_back(request.region_id(), request);
self.task_notifier.notify_one();
Ok(res)
}
@@ -122,7 +141,6 @@ where
}
}
-#[allow(unused)]
impl<R> LocalCompactionScheduler<R>
where
R: CompactionRequest,
@@ -132,7 +150,7 @@ where
T: CompactionTask,
P: Picker<R, T> + Send + Sync,
{
- let request_queue: Arc<RwLock<DedupDeque<TableId, R>>> =
+ let request_queue: Arc<RwLock<DedupDeque<RegionId, R>>> =
Arc::new(RwLock::new(DedupDeque::default()));
let cancel_token = CancellationToken::new();
let task_notifier = Arc::new(Notify::new());
@@ -164,9 +182,8 @@ where
}
}
-#[allow(unused)]
struct CompactionHandler<R, T: CompactionTask, P: Picker<R, T>> {
- req_queue: Arc<RwLock<DedupDeque<TableId, R>>>,
+ req_queue: Arc<RwLock<DedupDeque<RegionId, R>>>,
cancel_token: CancellationToken,
task_notifier: Arc<Notify>,
limiter: Arc<CascadeRateLimiter<R>>,
@@ -174,9 +191,8 @@ struct CompactionHandler<R, T: CompactionTask, P: Picker<R, T>> {
_phantom_data: PhantomData<T>,
}
-#[allow(unused)]
impl<R: CompactionRequest, T: CompactionTask, P: Picker<R, T>> CompactionHandler<R, T, P> {
- /// Runs table compaction requests dispatch loop.
+ /// Runs region compaction requests dispatch loop.
pub async fn run(&self) {
let task_notifier = self.task_notifier.clone();
let limiter = self.limiter.clone();
@@ -186,15 +202,19 @@ impl<R: CompactionRequest, T: CompactionTask, P: Picker<R, T>> CompactionHandler
// poll requests as many as possible until rate limited, and then wait for
// notification (some task's finished).
debug!("Notified, queue size: {:?}", self.req_queue.read().unwrap().len());
- while let Some((table_id, req)) = self.poll_task().await {
+ while let Some((region_id, req)) = self.poll_task().await{
if let Ok(token) = limiter.acquire_token(&req) {
- debug!("Executing compaction request: {}", table_id);
- self.handle_compaction_request(req, token).await;
+ debug!("Executing compaction request: {}", region_id);
+ if let Err(e) = self.handle_compaction_request(req, token).await {
+ error!(e; "Failed to submit compaction task for region: {}", region_id);
+ } else {
+ info!("Submitted region compaction task: {}", region_id);
+ }
} else {
// compaction rate limited, put back to req queue to wait for next
// schedule
- debug!("Put back request {}, queue size: {}", table_id, self.req_queue.read().unwrap().len());
- self.put_back_req(table_id, req).await;
+ debug!("Put back request {}, queue size: {}", region_id, self.req_queue.read().unwrap().len());
+ self.put_back_req(region_id, req).await;
break;
}
}
@@ -208,35 +228,36 @@ impl<R: CompactionRequest, T: CompactionTask, P: Picker<R, T>> CompactionHandler
}
#[inline]
- async fn poll_task(&self) -> Option<(TableId, R)> {
+ async fn poll_task(&self) -> Option<(RegionId, R)> {
let mut queue = self.req_queue.write().unwrap();
queue.pop_front()
}
/// Puts request back to the front of request queue.
#[inline]
- async fn put_back_req(&self, table_id: TableId, req: R) {
+ async fn put_back_req(&self, region_id: RegionId, req: R) {
let mut queue = self.req_queue.write().unwrap();
- queue.push_front(table_id, req);
+ queue.push_front(region_id, req);
}
// Handles compaction request, submit task to bg runtime.
- async fn handle_compaction_request(
- &self,
- mut req: R,
- token: BoxedRateLimitToken,
- ) -> Result<()> {
+ async fn handle_compaction_request(&self, req: R, token: BoxedRateLimitToken) -> Result<()> {
let cloned_notify = self.task_notifier.clone();
- let table_id = req.table_id();
+ let region_id = req.region_id();
let Some(task) = self.build_compaction_task(req).await? else {
- info!("No file needs compaction in table: {}", table_id);
+ info!("No file needs compaction in region: {}", region_id);
return Ok(());
};
+ debug!("Compaction task, region: {}, task: {:?}", region_id, task);
// TODO(hl): we need to keep a track of task handle here to allow task cancellation.
common_runtime::spawn_bg(async move {
- task.run().await; // TODO(hl): handle errors
-
+ if let Err(e) = task.run().await {
+ // TODO(hl): maybe resubmit compaction task on failure?
+ error!(e; "Failed to compact region: {}", region_id);
+ } else {
+ info!("Successfully compacted region: {}", region_id);
+ }
// releases rate limit token
token.try_release();
// notify scheduler to schedule next task when current task finishes.
@@ -246,7 +267,6 @@ impl<R: CompactionRequest, T: CompactionTask, P: Picker<R, T>> CompactionHandler
Ok(())
}
- // TODO(hl): generate compaction task(find SSTs to compact along with the output of compaction)
async fn build_compaction_task(&self, req: R) -> crate::error::Result<Option<T>> {
let ctx = PickerContext {};
self.picker.pick(&ctx, &req)
@@ -333,12 +353,12 @@ mod tests {
#[derive(Default, Debug)]
struct MockRequest {
- table_id: TableId,
+ region_id: RegionId,
}
impl CompactionRequest for MockRequest {
- fn table_id(&self) -> TableId {
- self.table_id
+ fn region_id(&self) -> RegionId {
+ self.region_id
}
}
@@ -356,12 +376,12 @@ mod tests {
);
scheduler
- .schedule(MockRequest { table_id: 1 })
+ .schedule(MockRequest { region_id: 1 })
.await
.unwrap();
scheduler
- .schedule(MockRequest { table_id: 2 })
+ .schedule(MockRequest { region_id: 2 })
.await
.unwrap();
@@ -390,7 +410,7 @@ mod tests {
for i in 0..task_size {
scheduler
.schedule(MockRequest {
- table_id: i as TableId,
+ region_id: i as RegionId,
})
.await
.unwrap();
@@ -420,7 +440,7 @@ mod tests {
for i in 0..task_size / 2 {
scheduler
.schedule(MockRequest {
- table_id: i as TableId,
+ region_id: i as RegionId,
})
.await
.unwrap();
@@ -430,7 +450,7 @@ mod tests {
for i in task_size / 2..task_size {
scheduler
.schedule(MockRequest {
- table_id: i as TableId,
+ region_id: i as RegionId,
})
.await
.unwrap();
@@ -453,7 +473,7 @@ mod tests {
let mut scheduled_task = 0;
for _ in 0..10 {
if scheduler
- .schedule(MockRequest { table_id: 1 })
+ .schedule(MockRequest { region_id: 1 })
.await
.unwrap()
{
diff --git a/src/storage/src/compaction/strategy.rs b/src/storage/src/compaction/strategy.rs
index b1b11e422738..ff3e7fc4cd53 100644
--- a/src/storage/src/compaction/strategy.rs
+++ b/src/storage/src/compaction/strategy.rs
@@ -43,13 +43,14 @@ impl Strategy for SimpleTimeWindowStrategy {
return vec![];
}
let files = find_compactable_files(level);
+ debug!("Compactable files found: {:?}", files);
if files.is_empty() {
return vec![];
}
let time_bucket = infer_time_bucket(&files);
let buckets = calculate_time_buckets(time_bucket, &files);
- debug!("File buckets: {:?}", buckets);
+ debug!("File bucket:{}, file groups: {:?}", time_bucket, buckets);
buckets
.into_iter()
.map(|(bound, files)| CompactionOutput {
@@ -89,12 +90,7 @@ fn calculate_time_buckets(bucket_sec: i64, files: &[FileHandle]) -> HashMap<i64,
.push(file.clone());
}
} else {
- // Files without timestamp range is assign to a special bucket `i64::MAX`,
- // so that they can be compacted together.
- buckets
- .entry(i64::MAX)
- .or_insert_with(Vec::new)
- .push(file.clone());
+ warn!("Found corrupted SST without timestamp bounds: {:?}", file);
}
}
buckets
@@ -303,19 +299,7 @@ mod tests {
&[(0, &["a"]), (10, &["a"])],
);
- // files without timestamp are align to a special bucket: i64::MAX
- check_bucket_calculation(
- 10,
- vec![FileHandle::new(FileMeta {
- file_name: "a".to_string(),
- time_range: None,
- level: 0,
- })],
- &[(i64::MAX, &["a"])],
- );
-
// file with an large time range
-
let expected = (0..(TIME_BUCKETS[4] / TIME_BUCKETS[0]))
.into_iter()
.map(|b| (b * TIME_BUCKETS[0], &["a"] as _))
diff --git a/src/storage/src/compaction/task.rs b/src/storage/src/compaction/task.rs
index 4aaafe2bc962..94322cadece1 100644
--- a/src/storage/src/compaction/task.rs
+++ b/src/storage/src/compaction/task.rs
@@ -13,9 +13,9 @@
// limitations under the License.
use std::collections::HashSet;
+use std::fmt::{Debug, Formatter};
use common_telemetry::{error, info};
-use object_store::ObjectStore;
use store_api::logstore::LogStore;
use uuid::Uuid;
@@ -25,17 +25,15 @@ use crate::manifest::action::RegionEdit;
use crate::manifest::region::RegionManifest;
use crate::region::{RegionWriterRef, SharedDataRef};
use crate::schema::RegionSchemaRef;
-use crate::sst::parquet::{ParquetWriter, Source};
-use crate::sst::{AccessLayerRef, FileHandle, FileMeta, Level, SstInfo, WriteOptions};
+use crate::sst::{AccessLayerRef, FileHandle, FileMeta, Level, Source, SstInfo, WriteOptions};
use crate::wal::Wal;
#[async_trait::async_trait]
-pub trait CompactionTask: Send + Sync + 'static {
+pub trait CompactionTask: Debug + Send + Sync + 'static {
async fn run(self) -> Result<()>;
}
-#[allow(unused)]
-pub(crate) struct CompactionTaskImpl<S: LogStore> {
+pub struct CompactionTaskImpl<S: LogStore> {
pub schema: RegionSchemaRef,
pub sst_layer: AccessLayerRef,
pub outputs: Vec<CompactionOutput>,
@@ -45,6 +43,14 @@ pub(crate) struct CompactionTaskImpl<S: LogStore> {
pub manifest: RegionManifest,
}
+impl<S: LogStore> Debug for CompactionTaskImpl<S> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("CompactionTaskImpl")
+ .field("region_name", &self.shared_data.name())
+ .finish()
+ }
+}
+
impl<S: LogStore> Drop for CompactionTaskImpl<S> {
fn drop(&mut self) {
self.mark_files_compacting(false);
@@ -60,7 +66,6 @@ impl<S: LogStore> CompactionTaskImpl<S> {
for output in self.outputs.drain(..) {
let schema = self.schema.clone();
let sst_layer = self.sst_layer.clone();
- let object_store = self.sst_layer.object_store();
compacted_inputs.extend(output.inputs.iter().map(|f| FileMeta {
file_name: f.file_name().to_string(),
time_range: *f.time_range(),
@@ -69,7 +74,7 @@ impl<S: LogStore> CompactionTaskImpl<S> {
// TODO(hl): Maybe spawn to runtime to exploit in-job parallelism.
futs.push(async move {
- match output.build(schema, sst_layer, object_store).await {
+ match output.build(schema, sst_layer).await {
Ok(meta) => Ok(meta),
Err(e) => Err(e),
}
@@ -137,17 +142,9 @@ impl<S: LogStore> CompactionTask for CompactionTaskImpl<S> {
}
}
-#[allow(unused)]
-pub(crate) struct CompactionInput {
- input_level: u8,
- output_level: u8,
- file: FileHandle,
-}
-
/// Many-to-many compaction can be decomposed to a many-to-one compaction from level n to level n+1
/// and a many-to-one compaction from level n+1 to level n+1.
#[derive(Debug)]
-#[allow(unused)]
pub struct CompactionOutput {
/// Compaction output file level.
pub(crate) output_level: Level,
@@ -160,15 +157,10 @@ pub struct CompactionOutput {
}
impl CompactionOutput {
- async fn build(
- &self,
- schema: RegionSchemaRef,
- sst_layer: AccessLayerRef,
- object_store: ObjectStore,
- ) -> Result<FileMeta> {
+ async fn build(&self, schema: RegionSchemaRef, sst_layer: AccessLayerRef) -> Result<FileMeta> {
let reader = build_sst_reader(
schema,
- sst_layer,
+ sst_layer.clone(),
&self.inputs,
self.bucket_bound,
self.bucket_bound + self.bucket,
@@ -176,10 +168,10 @@ impl CompactionOutput {
.await?;
let output_file_name = format!("{}.parquet", Uuid::new_v4().hyphenated());
let opts = WriteOptions {};
- let SstInfo { time_range } =
- ParquetWriter::new(&output_file_name, Source::Reader(reader), object_store)
- .write_sst(&opts)
- .await?;
+
+ let SstInfo { time_range } = sst_layer
+ .write_sst(&output_file_name, Source::Reader(reader), &opts)
+ .await?;
Ok(FileMeta {
file_name: output_file_name,
@@ -197,10 +189,18 @@ pub mod tests {
use crate::compaction::task::CompactionTask;
pub type CallbackRef = Arc<dyn Fn() + Send + Sync>;
+
pub struct NoopCompactionTask {
pub cbs: Vec<CallbackRef>,
}
+ impl Debug for NoopCompactionTask {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("storage::compaction::task::tests::NoopCompactionTask")
+ .finish()
+ }
+ }
+
impl NoopCompactionTask {
pub fn new(cbs: Vec<CallbackRef>) -> Self {
Self { cbs }
diff --git a/src/storage/src/compaction/writer.rs b/src/storage/src/compaction/writer.rs
index e19877444ac4..880a13f2f3a2 100644
--- a/src/storage/src/compaction/writer.rs
+++ b/src/storage/src/compaction/writer.rs
@@ -102,8 +102,8 @@ mod tests {
};
use crate::metadata::RegionMetadata;
use crate::sst;
- use crate::sst::parquet::{ParquetWriter, Source};
- use crate::sst::{FileMeta, FsAccessLayer, SstInfo, WriteOptions};
+ use crate::sst::parquet::ParquetWriter;
+ use crate::sst::{FileMeta, FsAccessLayer, Source, SstInfo, WriteOptions};
use crate::test_util::descriptor_util::RegionDescBuilder;
fn schema_for_test() -> RegionSchemaRef {
diff --git a/src/storage/src/config.rs b/src/storage/src/config.rs
index 1f6bf31efc3f..26f2741461a4 100644
--- a/src/storage/src/config.rs
+++ b/src/storage/src/config.rs
@@ -14,5 +14,13 @@
//! storage engine config
-#[derive(Debug, Default, Clone)]
-pub struct EngineConfig {}
+#[derive(Debug, Clone)]
+pub struct EngineConfig {
+ pub max_files_in_l0: usize,
+}
+
+impl Default for EngineConfig {
+ fn default() -> Self {
+ Self { max_files_in_l0: 8 }
+ }
+}
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index 8664facc9bdf..c1dbf8968f6b 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -25,6 +25,7 @@ use store_api::storage::{
};
use crate::background::JobPoolImpl;
+use crate::compaction::CompactionSchedulerRef;
use crate::config::EngineConfig;
use crate::error::{self, Error, Result};
use crate::flush::{FlushSchedulerImpl, FlushSchedulerRef, FlushStrategyRef, SizeBasedStrategy};
@@ -84,9 +85,19 @@ impl<S: LogStore> StorageEngine for EngineImpl<S> {
}
impl<S: LogStore> EngineImpl<S> {
- pub fn new(config: EngineConfig, log_store: Arc<S>, object_store: ObjectStore) -> Self {
+ pub fn new(
+ config: EngineConfig,
+ log_store: Arc<S>,
+ object_store: ObjectStore,
+ compaction_scheduler: CompactionSchedulerRef<S>,
+ ) -> Self {
Self {
- inner: Arc::new(EngineInner::new(config, log_store, object_store)),
+ inner: Arc::new(EngineInner::new(
+ config,
+ log_store,
+ object_store,
+ compaction_scheduler,
+ )),
}
}
}
@@ -210,13 +221,19 @@ struct EngineInner<S: LogStore> {
memtable_builder: MemtableBuilderRef,
flush_scheduler: FlushSchedulerRef,
flush_strategy: FlushStrategyRef,
+ compaction_scheduler: CompactionSchedulerRef<S>,
+ config: Arc<EngineConfig>,
}
impl<S: LogStore> EngineInner<S> {
- pub fn new(_config: EngineConfig, log_store: Arc<S>, object_store: ObjectStore) -> Self {
+ pub fn new(
+ config: EngineConfig,
+ log_store: Arc<S>,
+ object_store: ObjectStore,
+ compaction_scheduler: CompactionSchedulerRef<S>,
+ ) -> Self {
let job_pool = Arc::new(JobPoolImpl {});
let flush_scheduler = Arc::new(FlushSchedulerImpl::new(job_pool));
-
Self {
object_store,
log_store,
@@ -224,6 +241,8 @@ impl<S: LogStore> EngineInner<S> {
memtable_builder: Arc::new(DefaultMemtableBuilder::default()),
flush_scheduler,
flush_strategy: Arc::new(SizeBasedStrategy::default()),
+ compaction_scheduler,
+ config: Arc::new(config),
}
}
@@ -320,6 +339,8 @@ impl<S: LogStore> EngineInner<S> {
memtable_builder: self.memtable_builder.clone(),
flush_scheduler: self.flush_scheduler.clone(),
flush_strategy: self.flush_strategy.clone(),
+ compaction_scheduler: self.compaction_scheduler.clone(),
+ engine_config: self.config.clone(),
}
}
}
@@ -333,6 +354,7 @@ mod tests {
use tempdir::TempDir;
use super::*;
+ use crate::compaction::noop::NoopCompactionScheduler;
use crate::test_util::descriptor_util::RegionDescBuilder;
#[tokio::test]
@@ -347,7 +369,14 @@ mod tests {
let config = EngineConfig::default();
- let engine = EngineImpl::new(config, Arc::new(log_store), object_store);
+ let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
+
+ let engine = EngineImpl::new(
+ config,
+ Arc::new(log_store),
+ object_store,
+ compaction_scheduler,
+ );
let region_name = "region-0";
let desc = RegionDescBuilder::new(region_name)
diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs
index e61bc244092e..278a3aa4dc4e 100644
--- a/src/storage/src/flush.rs
+++ b/src/storage/src/flush.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::future::Future;
+use std::pin::Pin;
use std::sync::Arc;
use async_trait::async_trait;
@@ -27,7 +29,7 @@ use crate::manifest::action::*;
use crate::manifest::region::RegionManifest;
use crate::memtable::{IterContext, MemtableId, MemtableRef};
use crate::region::{RegionWriterRef, SharedDataRef};
-use crate::sst::{AccessLayerRef, FileMeta, SstInfo, WriteOptions};
+use crate::sst::{AccessLayerRef, FileMeta, Source, SstInfo, WriteOptions};
use crate::wal::Wal;
/// Default write buffer size (32M).
@@ -142,6 +144,8 @@ impl FlushScheduler for FlushSchedulerImpl {
pub type FlushSchedulerRef = Arc<dyn FlushScheduler>;
+pub type FlushCallback = Pin<Box<dyn Future<Output = ()> + Send + 'static>>;
+
pub struct FlushJob<S: LogStore> {
/// Max memtable id in these memtables,
/// used to remove immutable memtables in current version.
@@ -160,10 +164,12 @@ pub struct FlushJob<S: LogStore> {
pub wal: Wal<S>,
/// Region manifest service, used to persist metadata.
pub manifest: RegionManifest,
+ /// Callbacks that get invoked on flush success.
+ pub on_success: Option<FlushCallback>,
}
impl<S: LogStore> FlushJob<S> {
- async fn write_memtables_to_layer(&self, ctx: &Context) -> Result<Vec<FileMeta>> {
+ async fn write_memtables_to_layer(&mut self, ctx: &Context) -> Result<Vec<FileMeta>> {
if ctx.is_cancelled() {
return CancelledSnafu {}.fail();
}
@@ -184,10 +190,11 @@ impl<S: LogStore> FlushJob<S> {
let file_name = Self::generate_sst_file_name();
// TODO(hl): Check if random file name already exists in meta.
let iter = m.iter(&iter_ctx)?;
+ let sst_layer = self.sst_layer.clone();
+
futures.push(async move {
- let SstInfo { time_range } = self
- .sst_layer
- .write_sst(&file_name, iter, &WriteOptions::default())
+ let SstInfo { time_range } = sst_layer
+ .write_sst(&file_name, Source::Iter(iter), &WriteOptions::default())
.await?;
Ok(FileMeta {
@@ -209,7 +216,7 @@ impl<S: LogStore> FlushJob<S> {
Ok(metas)
}
- async fn write_manifest_and_apply(&self, file_metas: &[FileMeta]) -> Result<()> {
+ async fn write_manifest_and_apply(&mut self, file_metas: &[FileMeta]) -> Result<()> {
let edit = RegionEdit {
region_version: self.shared.version_control.metadata().version(),
flushed_sequence: Some(self.flush_sequence),
@@ -241,6 +248,10 @@ impl<S: LogStore> Job for FlushJob<S> {
async fn run(&mut self, ctx: &Context) -> Result<()> {
let file_metas = self.write_memtables_to_layer(ctx).await?;
self.write_manifest_and_apply(&file_metas).await?;
+
+ if let Some(cb) = self.on_success.take() {
+ cb.await;
+ }
Ok(())
}
}
diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs
index cc6c592e471a..78c573217dc1 100644
--- a/src/storage/src/lib.rs
+++ b/src/storage/src/lib.rs
@@ -17,7 +17,7 @@
mod background;
mod chunk;
pub mod codec;
-mod compaction;
+pub mod compaction;
pub mod config;
mod engine;
pub mod error;
diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs
index 05e66352fa33..558674ce7cbb 100644
--- a/src/storage/src/region.rs
+++ b/src/storage/src/region.rs
@@ -28,6 +28,8 @@ use store_api::storage::{
WriteResponse,
};
+use crate::compaction::CompactionSchedulerRef;
+use crate::config::EngineConfig;
use crate::error::{self, Error, Result};
use crate::flush::{FlushSchedulerRef, FlushStrategyRef};
use crate::manifest::action::{
@@ -107,13 +109,15 @@ impl<S: LogStore> Region for RegionImpl<S> {
///
/// Contains all necessary storage related components needed by the region, such as logstore,
/// manifest, memtable builder.
-pub struct StoreConfig<S> {
+pub struct StoreConfig<S: LogStore> {
pub log_store: Arc<S>,
pub sst_layer: AccessLayerRef,
pub manifest: RegionManifest,
pub memtable_builder: MemtableBuilderRef,
pub flush_scheduler: FlushSchedulerRef,
pub flush_strategy: FlushStrategyRef,
+ pub compaction_scheduler: CompactionSchedulerRef<S>,
+ pub engine_config: Arc<EngineConfig>,
}
pub type RecoverdMetadata = (SequenceNumber, (ManifestVersion, RawRegionMetadata));
@@ -163,10 +167,14 @@ impl<S: LogStore> RegionImpl<S> {
name,
version_control: Arc::new(version_control),
}),
- writer: Arc::new(RegionWriter::new(store_config.memtable_builder)),
+ writer: Arc::new(RegionWriter::new(
+ store_config.memtable_builder,
+ store_config.engine_config.clone(),
+ )),
wal,
flush_strategy: store_config.flush_strategy,
flush_scheduler: store_config.flush_scheduler,
+ compaction_scheduler: store_config.compaction_scheduler,
sst_layer: store_config.sst_layer,
manifest: store_config.manifest,
});
@@ -236,11 +244,15 @@ impl<S: LogStore> RegionImpl<S> {
version_control,
});
- let writer = Arc::new(RegionWriter::new(store_config.memtable_builder));
+ let writer = Arc::new(RegionWriter::new(
+ store_config.memtable_builder,
+ store_config.engine_config.clone(),
+ ));
let writer_ctx = WriterContext {
shared: &shared,
flush_strategy: &store_config.flush_strategy,
flush_scheduler: &store_config.flush_scheduler,
+ compaction_scheduler: &store_config.compaction_scheduler,
sst_layer: &store_config.sst_layer,
wal: &wal,
writer: &writer,
@@ -257,6 +269,7 @@ impl<S: LogStore> RegionImpl<S> {
wal,
flush_strategy: store_config.flush_strategy,
flush_scheduler: store_config.flush_scheduler,
+ compaction_scheduler: store_config.compaction_scheduler,
sst_layer: store_config.sst_layer,
manifest: store_config.manifest,
});
@@ -387,6 +400,7 @@ impl<S: LogStore> RegionImpl<S> {
shared: &inner.shared,
flush_strategy: &inner.flush_strategy,
flush_scheduler: &inner.flush_scheduler,
+ compaction_scheduler: &inner.compaction_scheduler,
sst_layer: &inner.sst_layer,
wal: &inner.wal,
writer: &inner.writer,
@@ -429,6 +443,7 @@ struct RegionInner<S: LogStore> {
wal: Wal<S>,
flush_strategy: FlushStrategyRef,
flush_scheduler: FlushSchedulerRef,
+ compaction_scheduler: CompactionSchedulerRef<S>,
sst_layer: AccessLayerRef,
manifest: RegionManifest,
}
@@ -467,6 +482,7 @@ impl<S: LogStore> RegionInner<S> {
shared: &self.shared,
flush_strategy: &self.flush_strategy,
flush_scheduler: &self.flush_scheduler,
+ compaction_scheduler: &self.compaction_scheduler,
sst_layer: &self.sst_layer,
wal: &self.wal,
writer: &self.writer,
diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs
index 24607aee83e0..678ec20b5f16 100644
--- a/src/storage/src/region/writer.rs
+++ b/src/storage/src/region/writer.rs
@@ -14,7 +14,8 @@
use std::sync::Arc;
-use common_telemetry::logging;
+use common_telemetry::tracing::log::info;
+use common_telemetry::{error, logging};
use futures::TryStreamExt;
use snafu::ResultExt;
use store_api::logstore::LogStore;
@@ -23,8 +24,10 @@ use store_api::storage::{AlterRequest, SequenceNumber, WriteContext, WriteRespon
use tokio::sync::Mutex;
use crate::background::JobHandle;
+use crate::compaction::{CompactionRequestImpl, CompactionSchedulerRef};
+use crate::config::EngineConfig;
use crate::error::{self, Result};
-use crate::flush::{FlushJob, FlushSchedulerRef, FlushStrategyRef};
+use crate::flush::{FlushCallback, FlushJob, FlushSchedulerRef, FlushStrategyRef};
use crate::manifest::action::{
RawRegionMetadata, RegionChange, RegionEdit, RegionMetaAction, RegionMetaActionList,
};
@@ -34,7 +37,7 @@ use crate::proto::wal::WalHeader;
use crate::region::{RecoverdMetadata, RecoveredMetadataMap, RegionManifest, SharedDataRef};
use crate::schema::compat::CompatWrite;
use crate::sst::AccessLayerRef;
-use crate::version::{VersionControl, VersionControlRef, VersionEdit};
+use crate::version::{VersionControl, VersionControlRef, VersionEdit, VersionRef};
use crate::wal::Wal;
use crate::write_batch::WriteBatch;
@@ -56,9 +59,9 @@ pub struct RegionWriter {
}
impl RegionWriter {
- pub fn new(memtable_builder: MemtableBuilderRef) -> RegionWriter {
+ pub fn new(memtable_builder: MemtableBuilderRef, config: Arc<EngineConfig>) -> RegionWriter {
RegionWriter {
- inner: Mutex::new(WriterInner::new(memtable_builder)),
+ inner: Mutex::new(WriterInner::new(memtable_builder, config)),
version_mutex: Mutex::new(()),
}
}
@@ -241,6 +244,7 @@ pub struct WriterContext<'a, S: LogStore> {
pub shared: &'a SharedDataRef,
pub flush_strategy: &'a FlushStrategyRef,
pub flush_scheduler: &'a FlushSchedulerRef,
+ pub compaction_scheduler: &'a CompactionSchedulerRef<S>,
pub sst_layer: &'a AccessLayerRef,
pub wal: &'a Wal<S>,
pub writer: &'a RegionWriterRef,
@@ -271,13 +275,15 @@ impl<'a, S: LogStore> AlterContext<'a, S> {
struct WriterInner {
memtable_builder: MemtableBuilderRef,
flush_handle: Option<JobHandle>,
+ engine_config: Arc<EngineConfig>,
}
impl WriterInner {
- fn new(memtable_builder: MemtableBuilderRef) -> WriterInner {
+ fn new(memtable_builder: MemtableBuilderRef, engine_config: Arc<EngineConfig>) -> WriterInner {
WriterInner {
memtable_builder,
flush_handle: None,
+ engine_config,
}
}
@@ -541,6 +547,8 @@ impl WriterInner {
return Ok(());
}
+ let cb = Self::build_flush_callback(¤t_version, ctx, &self.engine_config);
+
let flush_req = FlushJob {
max_memtable_id: max_memtable_id.unwrap(),
memtables: mem_to_flush,
@@ -551,6 +559,7 @@ impl WriterInner {
writer: ctx.writer.clone(),
wal: ctx.wal.clone(),
manifest: ctx.manifest.clone(),
+ on_success: cb,
};
let flush_handle = ctx
@@ -561,4 +570,51 @@ impl WriterInner {
Ok(())
}
+
+ fn build_flush_callback<S: LogStore>(
+ version: &VersionRef,
+ ctx: &WriterContext<S>,
+ config: &Arc<EngineConfig>,
+ ) -> Option<FlushCallback> {
+ let region_id = version.metadata().id();
+ let compaction_request = CompactionRequestImpl {
+ region_id,
+ sst_layer: ctx.sst_layer.clone(),
+ writer: ctx.writer.clone(),
+ shared: ctx.shared.clone(),
+ manifest: ctx.manifest.clone(),
+ wal: ctx.wal.clone(),
+ };
+ let compaction_scheduler = ctx.compaction_scheduler.clone();
+ let shared_data = ctx.shared.clone();
+ let max_files_in_l0 = config.max_files_in_l0;
+ let schedule_compaction_cb = Box::pin(async move {
+ let level0_file_num = shared_data
+ .version_control
+ .current()
+ .ssts()
+ .level(0)
+ .file_num();
+
+ if level0_file_num <= max_files_in_l0 {
+ info!(
+ "No enough SST files in level 0 (threshold: {}), skip compaction",
+ max_files_in_l0
+ );
+ return;
+ }
+ match compaction_scheduler.schedule(compaction_request).await {
+ Ok(scheduled) => {
+ info!(
+ "Schedule region {} compaction request result: {}",
+ region_id, scheduled
+ )
+ }
+ Err(e) => {
+ error!(e;"Failed to schedule region compaction request {}", region_id);
+ }
+ }
+ });
+ Some(schedule_compaction_cb)
+ }
}
diff --git a/src/storage/src/sst.rs b/src/storage/src/sst.rs
index 1caa0a4de48b..bd667ebb683e 100644
--- a/src/storage/src/sst.rs
+++ b/src/storage/src/sst.rs
@@ -23,13 +23,15 @@ use common_time::range::TimestampRange;
use common_time::Timestamp;
use object_store::{util, ObjectStore};
use serde::{Deserialize, Serialize};
+use store_api::storage::ChunkReader;
use table::predicate::Predicate;
+use crate::chunk::ChunkReaderImpl;
use crate::error::Result;
use crate::memtable::BoxedBatchIterator;
-use crate::read::BoxedBatchReader;
+use crate::read::{Batch, BoxedBatchReader};
use crate::schema::ProjectedSchemaRef;
-use crate::sst::parquet::{ParquetReader, ParquetWriter, Source};
+use crate::sst::parquet::{ParquetReader, ParquetWriter};
/// Maximum level of SSTs.
pub const MAX_LEVEL: u8 = 2;
@@ -111,7 +113,7 @@ pub struct LevelMeta {
}
impl LevelMeta {
- pub fn new_empty(level: Level) -> Self {
+ pub fn new(level: Level) -> Self {
Self {
level,
files: HashMap::new(),
@@ -132,6 +134,12 @@ impl LevelMeta {
self.level
}
+ /// Returns number of SST files in level.
+ #[inline]
+ pub fn file_num(&self) -> usize {
+ self.files.len()
+ }
+
pub fn files(&self) -> impl Iterator<Item = &FileHandle> {
self.files.values()
}
@@ -140,7 +148,7 @@ impl LevelMeta {
fn new_level_meta_vec() -> LevelMetaVec {
(0u8..MAX_LEVEL)
.into_iter()
- .map(LevelMeta::new_empty)
+ .map(LevelMeta::new)
.collect::<Vec<_>>()
.try_into()
.unwrap() // safety: LevelMetaVec is a fixed length array with length MAX_LEVEL
@@ -243,7 +251,7 @@ pub trait AccessLayer: Send + Sync + std::fmt::Debug {
async fn write_sst(
&self,
file_name: &str,
- iter: BoxedBatchIterator,
+ source: Source,
opts: &WriteOptions,
) -> Result<SstInfo>;
@@ -256,6 +264,33 @@ pub trait AccessLayer: Send + Sync + std::fmt::Debug {
pub type AccessLayerRef = Arc<dyn AccessLayer>;
+/// Parquet writer data source.
+pub enum Source {
+ /// Writes rows from memtable to parquet
+ Iter(BoxedBatchIterator),
+ /// Writes row from ChunkReaderImpl (maybe a set of SSTs) to parquet.
+ Reader(ChunkReaderImpl),
+}
+
+impl Source {
+ async fn next_batch(&mut self) -> Result<Option<Batch>> {
+ match self {
+ Source::Iter(iter) => iter.next().transpose(),
+ Source::Reader(reader) => reader
+ .next_chunk()
+ .await
+ .map(|p| p.map(|chunk| Batch::new(chunk.columns))),
+ }
+ }
+
+ fn projected_schema(&self) -> ProjectedSchemaRef {
+ match self {
+ Source::Iter(iter) => iter.schema(),
+ Source::Reader(reader) => reader.projected_schema().clone(),
+ }
+ }
+}
+
/// Sst access layer based on local file system.
#[derive(Debug)]
pub struct FsAccessLayer {
@@ -282,13 +317,13 @@ impl AccessLayer for FsAccessLayer {
async fn write_sst(
&self,
file_name: &str,
- iter: BoxedBatchIterator,
+ source: Source,
opts: &WriteOptions,
) -> Result<SstInfo> {
// Now we only supports parquet format. We may allow caller to specific SST format in
// WriteOptions in the future.
let file_path = self.sst_file_path(file_name);
- let writer = ParquetWriter::new(&file_path, Source::Iter(iter), self.object_store.clone());
+ let writer = ParquetWriter::new(&file_path, source, self.object_store.clone());
writer.write_sst(opts).await
}
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index 972894e40469..e106145c82ea 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -45,21 +45,18 @@ use parquet::file::properties::WriterProperties;
use parquet::format::FileMetaData;
use parquet::schema::types::SchemaDescriptor;
use snafu::{OptionExt, ResultExt};
-use store_api::storage::ChunkReader;
use table::predicate::Predicate;
use tokio::io::BufReader;
-use crate::chunk::ChunkReaderImpl;
use crate::error::{
self, DecodeParquetTimeRangeSnafu, NewRecordBatchSnafu, ReadObjectSnafu, ReadParquetSnafu,
Result, WriteObjectSnafu, WriteParquetSnafu,
};
-use crate::memtable::BoxedBatchIterator;
use crate::read::{Batch, BatchReader};
use crate::schema::compat::ReadAdapter;
use crate::schema::{ProjectedSchemaRef, StoreSchema, StoreSchemaRef};
use crate::sst;
-use crate::sst::SstInfo;
+use crate::sst::{Source, SstInfo};
/// Parquet sst writer.
pub struct ParquetWriter<'a> {
file_path: &'a str,
@@ -321,7 +318,6 @@ impl<'a> ParquetReader<'a> {
// checks if converting time range unit into ts col unit will result into rounding error.
if time_unit_lossy(&self.time_range, ts_col_unit) {
let filter = RowFilter::new(vec![Box::new(PlainTimestampRowFilter::new(
- ts_col_idx,
self.time_range,
projection,
))]);
@@ -343,15 +339,9 @@ impl<'a> ParquetReader<'a> {
.and_then(|s| s.convert_to(ts_col_unit))
.map(|t| t.value()),
) {
- Box::new(FastTimestampRowFilter::new(
- ts_col_idx, projection, lower, upper,
- )) as _
+ Box::new(FastTimestampRowFilter::new(projection, lower, upper)) as _
} else {
- Box::new(PlainTimestampRowFilter::new(
- ts_col_idx,
- self.time_range,
- projection,
- )) as _
+ Box::new(PlainTimestampRowFilter::new(self.time_range, projection)) as _
};
let filter = RowFilter::new(vec![row_filter]);
Some(filter)
@@ -372,21 +362,14 @@ fn time_unit_lossy(range: &TimestampRange, ts_col_unit: TimeUnit) -> bool {
/// `FastTimestampRowFilter` is used to filter rows within given timestamp range when reading
/// row groups from parquet files, while avoids fetching all columns from SSTs file.
struct FastTimestampRowFilter {
- timestamp_index: usize,
lower_bound: i64,
upper_bound: i64,
projection: ProjectionMask,
}
impl FastTimestampRowFilter {
- fn new(
- ts_col_idx: usize,
- projection: ProjectionMask,
- lower_bound: i64,
- upper_bound: i64,
- ) -> Self {
+ fn new(projection: ProjectionMask, lower_bound: i64, upper_bound: i64) -> Self {
Self {
- timestamp_index: ts_col_idx,
lower_bound,
upper_bound,
projection,
@@ -401,7 +384,8 @@ impl ArrowPredicate for FastTimestampRowFilter {
/// Selects the rows matching given time range.
fn evaluate(&mut self, batch: RecordBatch) -> std::result::Result<BooleanArray, ArrowError> {
- let ts_col = batch.column(self.timestamp_index);
+ // the projection has only timestamp column, so we can safely take the first column in batch.
+ let ts_col = batch.column(0);
macro_rules! downcast_and_compute {
($typ: ty) => {
@@ -443,15 +427,13 @@ impl ArrowPredicate for FastTimestampRowFilter {
/// [PlainTimestampRowFilter] iterates each element in timestamp column, build a [Timestamp] struct
/// and checks if given time range contains the timestamp.
struct PlainTimestampRowFilter {
- timestamp_index: usize,
time_range: TimestampRange,
projection: ProjectionMask,
}
impl PlainTimestampRowFilter {
- fn new(timestamp_index: usize, time_range: TimestampRange, projection: ProjectionMask) -> Self {
+ fn new(time_range: TimestampRange, projection: ProjectionMask) -> Self {
Self {
- timestamp_index,
time_range,
projection,
}
@@ -464,7 +446,8 @@ impl ArrowPredicate for PlainTimestampRowFilter {
}
fn evaluate(&mut self, batch: RecordBatch) -> std::result::Result<BooleanArray, ArrowError> {
- let ts_col = batch.column(self.timestamp_index);
+ // the projection has only timestamp column, so we can safely take the first column in batch.
+ let ts_col = batch.column(0);
macro_rules! downcast_and_compute {
($array_ty: ty, $unit: ident) => {{
@@ -532,33 +515,6 @@ impl BatchReader for ChunkStream {
}
}
-/// Parquet writer data source.
-pub enum Source {
- /// Writes rows from memtable to parquet
- Iter(BoxedBatchIterator),
- /// Writes row from ChunkReaderImpl (maybe a set of SSTs) to parquet.
- Reader(ChunkReaderImpl),
-}
-
-impl Source {
- async fn next_batch(&mut self) -> Result<Option<Batch>> {
- match self {
- Source::Iter(iter) => iter.next().transpose(),
- Source::Reader(reader) => reader
- .next_chunk()
- .await
- .map(|p| p.map(|chunk| Batch::new(chunk.columns))),
- }
- }
-
- fn projected_schema(&self) -> ProjectedSchemaRef {
- match self {
- Source::Iter(iter) => iter.schema(),
- Source::Reader(reader) => reader.projected_schema().clone(),
- }
- }
-}
-
#[cfg(test)]
mod tests {
use std::sync::Arc;
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
index 70ebdcf897bd..ca3d3fb57cfb 100644
--- a/src/storage/src/test_util/config_util.rs
+++ b/src/storage/src/test_util/config_util.rs
@@ -20,6 +20,7 @@ use object_store::backend::fs::Builder;
use object_store::ObjectStore;
use crate::background::JobPoolImpl;
+use crate::compaction::noop::NoopCompactionScheduler;
use crate::engine;
use crate::flush::{FlushSchedulerImpl, SizeBasedStrategy};
use crate::manifest::region::RegionManifest;
@@ -51,7 +52,7 @@ pub async fn new_store_config(
..Default::default()
};
let log_store = Arc::new(RaftEngineLogStore::try_new(log_config).await.unwrap());
-
+ let compaction_scheduler = Arc::new(NoopCompactionScheduler::default());
StoreConfig {
log_store,
sst_layer,
@@ -59,5 +60,7 @@ pub async fn new_store_config(
memtable_builder: Arc::new(DefaultMemtableBuilder::default()),
flush_scheduler,
flush_strategy: Arc::new(SizeBasedStrategy::default()),
+ compaction_scheduler,
+ engine_config: Default::default(),
}
}
diff --git a/src/storage/src/version.rs b/src/storage/src/version.rs
index 4f333dc7c059..6f84b0c5f58b 100644
--- a/src/storage/src/version.rs
+++ b/src/storage/src/version.rs
@@ -243,7 +243,7 @@ impl Version {
);
info!(
- "After region compaction, region: {}, SST files: {:?}",
+ "After apply edit, region: {}, SST files: {:?}",
self.metadata.id(),
merged_ssts
);
|
feat
|
compaction integration (#997)
|
6344b1e0db77fdc89d9b649413a648307b4873d1
|
2023-12-05 08:05:23
|
LFC
|
fix: fragile integration tests (#2870)
| false
|
diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs
index 3de98634762e..f114e3015cac 100644
--- a/tests-integration/src/cluster.rs
+++ b/tests-integration/src/cluster.rs
@@ -270,7 +270,8 @@ impl GreptimeDbClusterBuilder {
Arc::new(handlers_executor),
);
- let instance = FrontendBuilder::new(meta_backend, datanode_clients, meta_client)
+ let instance = FrontendBuilder::new(meta_backend.clone(), datanode_clients, meta_client)
+ .with_cache_invalidator(meta_backend)
.with_heartbeat_task(heartbeat_task)
.try_build()
.await
|
fix
|
fragile integration tests (#2870)
|
0b0ed03ee63c7e5baaeda8b5cc93f22cbdc86124
|
2024-07-25 12:15:43
|
Lei, HUANG
|
fix(metrics): RowGroupLastRowCachedReader metrics (#4418)
| false
|
diff --git a/src/mito2/src/cache.rs b/src/mito2/src/cache.rs
index 7a9f879669e2..5ae81aa037f2 100644
--- a/src/mito2/src/cache.rs
+++ b/src/mito2/src/cache.rs
@@ -179,10 +179,7 @@ impl CacheManager {
) -> Option<Arc<SelectorResultValue>> {
self.selector_result_cache
.as_ref()
- .and_then(|selector_result_cache| {
- let value = selector_result_cache.get(selector_key);
- update_hit_miss(value, SELECTOR_RESULT_TYPE)
- })
+ .and_then(|selector_result_cache| selector_result_cache.get(selector_key))
}
/// Puts result of the selector into the cache.
@@ -209,6 +206,16 @@ impl CacheManager {
}
}
+/// Increases selector cache miss metrics.
+pub fn selector_result_cache_miss() {
+ CACHE_MISS.with_label_values(&[SELECTOR_RESULT_TYPE]).inc()
+}
+
+/// Increases selector cache hit metrics.
+pub fn selector_result_cache_hit() {
+ CACHE_HIT.with_label_values(&[SELECTOR_RESULT_TYPE]).inc()
+}
+
/// Builder to construct a [CacheManager].
#[derive(Default)]
pub struct CacheManagerBuilder {
diff --git a/src/mito2/src/read/last_row.rs b/src/mito2/src/read/last_row.rs
index f75a8094d8b3..40767bc48314 100644
--- a/src/mito2/src/read/last_row.rs
+++ b/src/mito2/src/read/last_row.rs
@@ -19,7 +19,10 @@ use std::sync::Arc;
use async_trait::async_trait;
use store_api::storage::TimeSeriesRowSelector;
-use crate::cache::{CacheManagerRef, SelectorResultKey, SelectorResultValue};
+use crate::cache::{
+ selector_result_cache_hit, selector_result_cache_miss, CacheManagerRef, SelectorResultKey,
+ SelectorResultValue,
+};
use crate::error::Result;
use crate::read::{Batch, BatchReader, BoxedBatchReader};
use crate::sst::file::FileId;
@@ -92,7 +95,7 @@ impl RowGroupLastRowCachedReader {
};
let Some(cache_manager) = cache_manager else {
- return Self::Miss(RowGroupLastRowReader::new(key, row_group_reader, None));
+ return Self::new_miss(key, row_group_reader, None);
};
if let Some(value) = cache_manager.get_selector_result(&key) {
let schema_matches = value.projection
@@ -102,22 +105,34 @@ impl RowGroupLastRowCachedReader {
.projection_indices();
if schema_matches {
// Schema matches, use cache batches.
- Self::Hit(LastRowCacheReader { value, idx: 0 })
+ Self::new_hit(value)
} else {
- Self::Miss(RowGroupLastRowReader::new(
- key,
- row_group_reader,
- Some(cache_manager),
- ))
+ Self::new_miss(key, row_group_reader, Some(cache_manager))
}
} else {
- Self::Miss(RowGroupLastRowReader::new(
- key,
- row_group_reader,
- Some(cache_manager),
- ))
+ Self::new_miss(key, row_group_reader, Some(cache_manager))
}
}
+
+ /// Creates new Hit variant and updates metrics.
+ fn new_hit(value: Arc<SelectorResultValue>) -> Self {
+ selector_result_cache_hit();
+ Self::Hit(LastRowCacheReader { value, idx: 0 })
+ }
+
+ /// Creates new Miss variant and updates metrics.
+ fn new_miss(
+ key: SelectorResultKey,
+ row_group_reader: RowGroupReader,
+ cache_manager: Option<CacheManagerRef>,
+ ) -> Self {
+ selector_result_cache_miss();
+ Self::Miss(RowGroupLastRowReader::new(
+ key,
+ row_group_reader,
+ cache_manager,
+ ))
+ }
}
#[async_trait]
|
fix
|
RowGroupLastRowCachedReader metrics (#4418)
|
7437820bdc866865d5f0f2e33683f79efba8366d
|
2023-06-09 11:29:56
|
Ning Sun
|
ci: correct data type for input and event check (#1752)
| false
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 17a0614fcc3a..6f896741c993 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -29,7 +29,7 @@ env:
CARGO_PROFILE: nightly
# Controls whether to run tests, include unit-test, integration-test and sqlness.
- DISABLE_RUN_TESTS: ${{ github.event.inputs.skip_test || false }}
+ DISABLE_RUN_TESTS: ${{ inputs.skip_test || false }}
jobs:
build-macos:
@@ -290,7 +290,7 @@ jobs:
name: Build docker image
needs: [build-linux, build-macos]
runs-on: ubuntu-latest
- if: github.repository == 'GreptimeTeam/greptimedb' && !(github.event.inputs.dry_run || false)
+ if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -376,7 +376,7 @@ jobs:
# Release artifacts only when all the artifacts are built successfully.
needs: [build-linux, build-macos, docker]
runs-on: ubuntu-latest
- if: github.repository == 'GreptimeTeam/greptimedb' && !(github.event.inputs.dry_run || false)
+ if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
steps:
- name: Checkout sources
uses: actions/checkout@v3
@@ -386,7 +386,7 @@ jobs:
- name: Configure scheduled build version # the version would be ${SCHEDULED_BUILD_VERSION_PREFIX}-${SCHEDULED_PERIOD}-YYYYMMDD, like v0.2.0-nigthly-20230313.
shell: bash
- if: github.event_name == 'schedule'
+ if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-${{ env.SCHEDULED_PERIOD }}-$buildTime
@@ -404,13 +404,13 @@ jobs:
fi
- name: Create scheduled build git tag
- if: github.event_name == 'schedule'
+ if: github.event_name != 'push'
run: |
git tag ${{ env.SCHEDULED_BUILD_VERSION }}
- name: Publish scheduled release # configure the different release title and tags.
uses: ncipollo/release-action@v1
- if: github.event_name == 'schedule'
+ if: github.event_name != 'push'
with:
name: "Release ${{ env.SCHEDULED_BUILD_VERSION }}"
prerelease: ${{ env.prerelease }}
@@ -422,7 +422,7 @@ jobs:
- name: Publish release
uses: ncipollo/release-action@v1
- if: github.event_name != 'schedule'
+ if: github.event_name == 'push'
with:
name: "${{ github.ref_name }}"
prerelease: ${{ env.prerelease }}
@@ -435,7 +435,7 @@ jobs:
name: Push docker image to alibaba cloud container registry
needs: [docker]
runs-on: ubuntu-latest
- if: github.repository == 'GreptimeTeam/greptimedb' && !(github.event.inputs.dry_run || false)
+ if: github.repository == 'GreptimeTeam/greptimedb' && !(inputs.dry_run || false)
continue-on-error: true
steps:
- name: Checkout sources
@@ -456,7 +456,7 @@ jobs:
- name: Configure scheduled build image tag # the tag would be ${SCHEDULED_BUILD_VERSION_PREFIX}-YYYYMMDD-${SCHEDULED_PERIOD}
shell: bash
- if: github.event_name == 'schedule'
+ if: github.event_name != 'push'
run: |
buildTime=`date "+%Y%m%d"`
SCHEDULED_BUILD_VERSION=${{ env.SCHEDULED_BUILD_VERSION_PREFIX }}-$buildTime-${{ env.SCHEDULED_PERIOD }}
@@ -464,7 +464,7 @@ jobs:
- name: Configure tag # If the release tag is v0.1.0, then the image version tag will be 0.1.0.
shell: bash
- if: github.event_name != 'schedule'
+ if: github.event_name == 'push'
run: |
VERSION=${{ github.ref_name }}
echo "IMAGE_TAG=${VERSION:1}" >> $GITHUB_ENV
|
ci
|
correct data type for input and event check (#1752)
|
c1a9f84c7fdb740b0b85619babde2759fa8ccd00
|
2023-02-13 13:28:30
|
fys
|
feat: meta provides the ability to distribute lock (#961)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 6a2282ff2295..e0d2308d73b0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2973,7 +2973,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=966161508646f575801bcf05f47ed283ec231d68#966161508646f575801bcf05f47ed283ec231d68"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=3e6349be127b65a8b42a38cda9d527ec423ca77d#3e6349be127b65a8b42a38cda9d527ec423ca77d"
dependencies = [
"prost 0.11.6",
"tonic",
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index 7892210163bb..384f1d7ddfd0 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -10,7 +10,7 @@ common-base = { path = "../common/base" }
common-error = { path = "../common/error" }
common-time = { path = "../common/time" }
datatypes = { path = "../datatypes" }
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "966161508646f575801bcf05f47ed283ec231d68" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3e6349be127b65a8b42a38cda9d527ec423ca77d" }
prost.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
tonic.workspace = true
diff --git a/src/meta-client/examples/lock.rs b/src/meta-client/examples/lock.rs
new file mode 100644
index 000000000000..2591b4da4bcb
--- /dev/null
+++ b/src/meta-client/examples/lock.rs
@@ -0,0 +1,125 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::time::Duration;
+
+use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
+use meta_client::client::{MetaClient, MetaClientBuilder};
+use meta_client::rpc::lock::{LockRequest, UnlockRequest};
+use tracing::{info, subscriber};
+use tracing_subscriber::FmtSubscriber;
+
+fn main() {
+ subscriber::set_global_default(FmtSubscriber::builder().finish()).unwrap();
+ run();
+}
+
+#[tokio::main]
+async fn run() {
+ let id = (1000u64, 2000u64);
+ let config = ChannelConfig::new()
+ .timeout(Duration::from_secs(30))
+ .connect_timeout(Duration::from_secs(5))
+ .tcp_nodelay(true);
+ let channel_manager = ChannelManager::with_config(config);
+ let mut meta_client = MetaClientBuilder::new(id.0, id.1)
+ .enable_lock()
+ .channel_manager(channel_manager)
+ .build();
+ meta_client.start(&["127.0.0.1:3002"]).await.unwrap();
+
+ run_normal(meta_client.clone()).await;
+
+ run_multi_thread(meta_client.clone()).await;
+
+ run_multi_thread_with_one_timeout(meta_client).await;
+}
+
+async fn run_normal(meta_client: MetaClient) {
+ let name = "lock_name".as_bytes().to_vec();
+ let expire_secs = 60;
+
+ let lock_req = LockRequest { name, expire_secs };
+
+ let lock_result = meta_client.lock(lock_req).await.unwrap();
+ let key = lock_result.key;
+ info!(
+ "lock success! Returned key: {}",
+ String::from_utf8(key.clone()).unwrap()
+ );
+
+ // It is recommended that time of holding lock is less than the timeout of the grpc channel
+ info!("do some work, take 3 seconds");
+ tokio::time::sleep(Duration::from_secs(3)).await;
+
+ let unlock_req = UnlockRequest { key };
+
+ meta_client.unlock(unlock_req).await.unwrap();
+ info!("unlock success!");
+}
+
+async fn run_multi_thread(meta_client: MetaClient) {
+ let meta_client_clone = meta_client.clone();
+ let join1 = tokio::spawn(async move {
+ run_normal(meta_client_clone.clone()).await;
+ });
+
+ tokio::time::sleep(Duration::from_secs(1)).await;
+
+ let join2 = tokio::spawn(async move {
+ run_normal(meta_client).await;
+ });
+
+ join1.await.unwrap();
+ join2.await.unwrap();
+}
+
+async fn run_multi_thread_with_one_timeout(meta_client: MetaClient) {
+ let meta_client_clone = meta_client.clone();
+ let join1 = tokio::spawn(async move {
+ run_with_timeout(meta_client_clone.clone()).await;
+ });
+
+ tokio::time::sleep(Duration::from_secs(1)).await;
+
+ let join2 = tokio::spawn(async move {
+ run_normal(meta_client).await;
+ });
+
+ join1.await.unwrap();
+ join2.await.unwrap();
+}
+
+async fn run_with_timeout(meta_client: MetaClient) {
+ let name = "lock_name".as_bytes().to_vec();
+ let expire_secs = 5;
+
+ let lock_req = LockRequest { name, expire_secs };
+
+ let lock_result = meta_client.lock(lock_req).await.unwrap();
+ let key = lock_result.key;
+ info!(
+ "lock success! Returned key: {}",
+ String::from_utf8(key.clone()).unwrap()
+ );
+
+ // It is recommended that time of holding lock is less than the timeout of the grpc channel
+ info!("do some work, take 20 seconds");
+ tokio::time::sleep(Duration::from_secs(20)).await;
+
+ let unlock_req = UnlockRequest { key };
+
+ meta_client.unlock(unlock_req).await.unwrap();
+ info!("unlock success!");
+}
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index 8ae5682e9e2f..6b842405fc1c 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -14,12 +14,14 @@
mod heartbeat;
mod load_balance;
+mod lock;
mod router;
mod store;
use common_grpc::channel_manager::{ChannelConfig, ChannelManager};
use common_telemetry::info;
use heartbeat::Client as HeartbeatClient;
+use lock::Client as LockClient;
use router::Client as RouterClient;
use snafu::OptionExt;
use store::Client as StoreClient;
@@ -27,6 +29,7 @@ use store::Client as StoreClient;
pub use self::heartbeat::{HeartbeatSender, HeartbeatStream};
use crate::error;
use crate::error::Result;
+use crate::rpc::lock::{LockRequest, LockResponse, UnlockRequest};
use crate::rpc::router::DeleteRequest;
use crate::rpc::{
BatchPutRequest, BatchPutResponse, CompareAndPutRequest, CompareAndPutResponse, CreateRequest,
@@ -42,6 +45,7 @@ pub struct MetaClientBuilder {
enable_heartbeat: bool,
enable_router: bool,
enable_store: bool,
+ enable_lock: bool,
channel_manager: Option<ChannelManager>,
}
@@ -74,6 +78,13 @@ impl MetaClientBuilder {
}
}
+ pub fn enable_lock(self) -> Self {
+ Self {
+ enable_lock: true,
+ ..self
+ }
+ }
+
pub fn channel_manager(self, channel_manager: ChannelManager) -> Self {
Self {
channel_manager: Some(channel_manager),
@@ -88,9 +99,7 @@ impl MetaClientBuilder {
MetaClient::new(self.id)
};
- if let (false, false, false) =
- (self.enable_heartbeat, self.enable_router, self.enable_store)
- {
+ if !(self.enable_heartbeat || self.enable_router || self.enable_store || self.enable_lock) {
panic!("At least one client needs to be enabled.")
}
@@ -103,7 +112,10 @@ impl MetaClientBuilder {
client.router = Some(RouterClient::new(self.id, mgr.clone()));
}
if self.enable_store {
- client.store = Some(StoreClient::new(self.id, mgr));
+ client.store = Some(StoreClient::new(self.id, mgr.clone()));
+ }
+ if self.enable_lock {
+ client.lock = Some(LockClient::new(self.id, mgr));
}
client
@@ -117,6 +129,7 @@ pub struct MetaClient {
heartbeat: Option<HeartbeatClient>,
router: Option<RouterClient>,
store: Option<StoreClient>,
+ lock: Option<LockClient>,
}
impl MetaClient {
@@ -151,10 +164,15 @@ impl MetaClient {
info!("Router client started");
}
if let Some(client) = &mut self.store {
- client.start(urls).await?;
+ client.start(urls.clone()).await?;
info!("Store client started");
}
+ if let Some(client) = &mut self.lock {
+ client.start(urls).await?;
+ info!("Lock client started");
+ }
+
Ok(())
}
@@ -260,6 +278,15 @@ impl MetaClient {
.try_into()
}
+ pub async fn lock(&self, req: LockRequest) -> Result<LockResponse> {
+ self.lock_client()?.lock(req.into()).await.map(Into::into)
+ }
+
+ pub async fn unlock(&self, req: UnlockRequest) -> Result<()> {
+ self.lock_client()?.unlock(req.into()).await?;
+ Ok(())
+ }
+
#[inline]
pub fn heartbeat_client(&self) -> Result<HeartbeatClient> {
self.heartbeat.clone().context(error::NotStartedSnafu {
@@ -281,6 +308,13 @@ impl MetaClient {
})
}
+ #[inline]
+ pub fn lock_client(&self) -> Result<LockClient> {
+ self.lock.clone().context(error::NotStartedSnafu {
+ name: "lock_client",
+ })
+ }
+
#[inline]
pub fn channel_config(&self) -> &ChannelConfig {
self.channel_manager.config()
diff --git a/src/meta-client/src/client/lock.rs b/src/meta-client/src/client/lock.rs
new file mode 100644
index 000000000000..eddef25f3e46
--- /dev/null
+++ b/src/meta-client/src/client/lock.rs
@@ -0,0 +1,184 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::HashSet;
+use std::sync::Arc;
+
+use api::v1::meta::lock_client::LockClient;
+use api::v1::meta::{LockRequest, LockResponse, UnlockRequest, UnlockResponse};
+use common_grpc::channel_manager::ChannelManager;
+use snafu::{ensure, OptionExt, ResultExt};
+use tokio::sync::RwLock;
+use tonic::transport::Channel;
+
+use crate::client::{load_balance, Id};
+use crate::error;
+use crate::error::Result;
+
+#[derive(Clone, Debug)]
+pub struct Client {
+ inner: Arc<RwLock<Inner>>,
+}
+
+impl Client {
+ pub fn new(id: Id, channel_manager: ChannelManager) -> Self {
+ let inner = Arc::new(RwLock::new(Inner {
+ id,
+ channel_manager,
+ peers: vec![],
+ }));
+
+ Self { inner }
+ }
+
+ pub async fn start<U, A>(&mut self, urls: A) -> Result<()>
+ where
+ U: AsRef<str>,
+ A: AsRef<[U]>,
+ {
+ let mut inner = self.inner.write().await;
+ inner.start(urls).await
+ }
+
+ pub async fn is_started(&self) -> bool {
+ let inner = self.inner.read().await;
+ inner.is_started()
+ }
+
+ pub async fn lock(&self, req: LockRequest) -> Result<LockResponse> {
+ let inner = self.inner.read().await;
+ inner.lock(req).await
+ }
+
+ pub async fn unlock(&self, req: UnlockRequest) -> Result<UnlockResponse> {
+ let inner = self.inner.read().await;
+ inner.unlock(req).await
+ }
+}
+
+#[derive(Debug)]
+struct Inner {
+ id: Id,
+ channel_manager: ChannelManager,
+ peers: Vec<String>,
+}
+
+impl Inner {
+ async fn start<U, A>(&mut self, urls: A) -> Result<()>
+ where
+ U: AsRef<str>,
+ A: AsRef<[U]>,
+ {
+ ensure!(
+ !self.is_started(),
+ error::IllegalGrpcClientStateSnafu {
+ err_msg: "Lock client already started",
+ }
+ );
+
+ self.peers = urls
+ .as_ref()
+ .iter()
+ .map(|url| url.as_ref().to_string())
+ .collect::<HashSet<_>>()
+ .drain()
+ .collect::<Vec<_>>();
+
+ Ok(())
+ }
+
+ fn random_client(&self) -> Result<LockClient<Channel>> {
+ let len = self.peers.len();
+ let peer = load_balance::random_get(len, |i| Some(&self.peers[i])).context(
+ error::IllegalGrpcClientStateSnafu {
+ err_msg: "Empty peers, lock client may not start yet",
+ },
+ )?;
+
+ self.make_client(peer)
+ }
+
+ fn make_client(&self, addr: impl AsRef<str>) -> Result<LockClient<Channel>> {
+ let channel = self
+ .channel_manager
+ .get(addr)
+ .context(error::CreateChannelSnafu)?;
+
+ Ok(LockClient::new(channel))
+ }
+
+ #[inline]
+ fn is_started(&self) -> bool {
+ !self.peers.is_empty()
+ }
+
+ async fn lock(&self, mut req: LockRequest) -> Result<LockResponse> {
+ let mut client = self.random_client()?;
+ req.set_header(self.id);
+ let res = client.lock(req).await.context(error::TonicStatusSnafu)?;
+
+ Ok(res.into_inner())
+ }
+
+ async fn unlock(&self, mut req: UnlockRequest) -> Result<UnlockResponse> {
+ let mut client = self.random_client()?;
+ req.set_header(self.id);
+ let res = client.unlock(req).await.context(error::TonicStatusSnafu)?;
+
+ Ok(res.into_inner())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[tokio::test]
+ async fn test_start_client() {
+ let mut client = Client::new((0, 0), ChannelManager::default());
+ assert!(!client.is_started().await);
+ client
+ .start(&["127.0.0.1:1000", "127.0.0.1:1001"])
+ .await
+ .unwrap();
+ assert!(client.is_started().await);
+ }
+
+ #[tokio::test]
+ async fn test_already_start() {
+ let mut client = Client::new((0, 0), ChannelManager::default());
+ client
+ .start(&["127.0.0.1:1000", "127.0.0.1:1001"])
+ .await
+ .unwrap();
+ assert!(client.is_started().await);
+ let res = client.start(&["127.0.0.1:1002"]).await;
+ assert!(res.is_err());
+ assert!(matches!(
+ res.err(),
+ Some(error::Error::IllegalGrpcClientState { .. })
+ ));
+ }
+
+ #[tokio::test]
+ async fn test_start_with_duplicate_peers() {
+ let mut client = Client::new((0, 0), ChannelManager::default());
+ client
+ .start(&["127.0.0.1:1000", "127.0.0.1:1000", "127.0.0.1:1000"])
+ .await
+ .unwrap();
+
+ assert_eq!(1, client.inner.write().await.peers.len());
+ }
+}
diff --git a/src/meta-client/src/rpc.rs b/src/meta-client/src/rpc.rs
index 66844a00bbdb..2264f0033b65 100644
--- a/src/meta-client/src/rpc.rs
+++ b/src/meta-client/src/rpc.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+pub mod lock;
pub mod router;
mod store;
pub mod util;
diff --git a/src/meta-client/src/rpc/lock.rs b/src/meta-client/src/rpc/lock.rs
new file mode 100644
index 000000000000..4b8e611f965d
--- /dev/null
+++ b/src/meta-client/src/rpc/lock.rs
@@ -0,0 +1,115 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::{
+ LockRequest as PbLockRequest, LockResponse as PbLockResponse, UnlockRequest as PbUnlockRequest,
+};
+
+#[derive(Debug)]
+pub struct LockRequest {
+ pub name: Vec<u8>,
+ pub expire_secs: i64,
+}
+
+impl From<LockRequest> for PbLockRequest {
+ fn from(req: LockRequest) -> Self {
+ Self {
+ header: None,
+ name: req.name,
+ expire_secs: req.expire_secs,
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct LockResponse {
+ pub key: Vec<u8>,
+}
+
+impl From<PbLockResponse> for LockResponse {
+ fn from(resp: PbLockResponse) -> Self {
+ Self { key: resp.key }
+ }
+}
+
+#[derive(Debug)]
+pub struct UnlockRequest {
+ pub key: Vec<u8>,
+}
+
+impl From<UnlockRequest> for PbUnlockRequest {
+ fn from(req: UnlockRequest) -> Self {
+ Self {
+ header: None,
+ key: req.key.to_vec(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use api::v1::meta::{
+ LockRequest as PbLockRequest, LockResponse as PbLockResponse,
+ UnlockRequest as PbUnlockRequest,
+ };
+
+ use super::LockRequest;
+ use crate::rpc::lock::{LockResponse, UnlockRequest};
+
+ #[test]
+ fn test_convert_lock_req() {
+ let lock_req = LockRequest {
+ name: "lock_1".as_bytes().to_vec(),
+ expire_secs: 1,
+ };
+ let pb_lock_req: PbLockRequest = lock_req.into();
+
+ let expected = PbLockRequest {
+ header: None,
+ name: "lock_1".as_bytes().to_vec(),
+ expire_secs: 1,
+ };
+
+ assert_eq!(expected, pb_lock_req);
+ }
+
+ #[test]
+ fn test_convert_unlock_req() {
+ let unlock_req = UnlockRequest {
+ key: "lock_1_12378123".as_bytes().to_vec(),
+ };
+ let pb_unlock_req: PbUnlockRequest = unlock_req.into();
+
+ let expected = PbUnlockRequest {
+ header: None,
+ key: "lock_1_12378123".as_bytes().to_vec(),
+ };
+
+ assert_eq!(expected, pb_unlock_req);
+ }
+
+ #[test]
+ fn test_convert_lock_response() {
+ let pb_lock_resp = PbLockResponse {
+ header: None,
+ key: "lock_1_12378123".as_bytes().to_vec(),
+ };
+
+ let lock_resp: LockResponse = pb_lock_resp.into();
+
+ let expected_key = "lock_1_12378123".as_bytes().to_vec();
+
+ assert_eq!(expected_key, lock_resp.key);
+ }
+}
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 5ca0ac3809fb..243512e16874 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -16,8 +16,10 @@ use std::sync::Arc;
use api::v1::meta::cluster_server::ClusterServer;
use api::v1::meta::heartbeat_server::HeartbeatServer;
+use api::v1::meta::lock_server::LockServer;
use api::v1::meta::router_server::RouterServer;
use api::v1::meta::store_server::StoreServer;
+use etcd_client::Client;
use snafu::ResultExt;
use tokio::net::TcpListener;
use tokio_stream::wrappers::TcpListenerStream;
@@ -25,6 +27,7 @@ use tonic::transport::server::Router;
use crate::cluster::MetaPeerClient;
use crate::election::etcd::EtcdElection;
+use crate::lock::etcd::EtcdLock;
use crate::metasrv::builder::MetaSrvBuilder;
use crate::metasrv::{MetaSrv, MetaSrvOptions, SelectorRef};
use crate::selector::lease_based::LeaseBasedSelector;
@@ -65,16 +68,25 @@ pub fn router(meta_srv: MetaSrv) -> Router {
.add_service(RouterServer::new(meta_srv.clone()))
.add_service(StoreServer::new(meta_srv.clone()))
.add_service(ClusterServer::new(meta_srv.clone()))
+ .add_service(LockServer::new(meta_srv.clone()))
.add_service(admin::make_admin_service(meta_srv))
}
pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
- let (kv_store, election) = if opts.use_memory_store {
- (Arc::new(MemStore::new()) as _, None)
+ let (kv_store, election, lock) = if opts.use_memory_store {
+ (Arc::new(MemStore::new()) as _, None, None)
} else {
+ let etcd_endpoints = [&opts.store_addr];
+ let etcd_client = Client::connect(etcd_endpoints, None)
+ .await
+ .context(error::ConnectEtcdSnafu)?;
(
- EtcdStore::with_endpoints([&opts.store_addr]).await?,
- Some(EtcdElection::with_endpoints(&opts.server_addr, [&opts.store_addr]).await?),
+ EtcdStore::with_etcd_client(etcd_client.clone())?,
+ Some(EtcdElection::with_etcd_client(
+ &opts.server_addr,
+ etcd_client.clone(),
+ )?),
+ Some(EtcdLock::with_etcd_client(etcd_client)?),
)
};
@@ -95,6 +107,7 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
.selector(selector)
.election(election)
.meta_peer_client(meta_peer_client)
+ .lock(lock)
.build()
.await;
diff --git a/src/meta-srv/src/election/etcd.rs b/src/meta-srv/src/election/etcd.rs
index fc4ceef57857..ee19eede976b 100644
--- a/src/meta-srv/src/election/etcd.rs
+++ b/src/meta-srv/src/election/etcd.rs
@@ -38,11 +38,19 @@ impl EtcdElection {
E: AsRef<str>,
S: AsRef<[E]>,
{
- let leader_value = leader_value.as_ref().into();
let client = Client::connect(endpoints, None)
.await
.context(error::ConnectEtcdSnafu)?;
+ Self::with_etcd_client(leader_value, client)
+ }
+
+ pub fn with_etcd_client<E>(leader_value: E, client: Client) -> Result<ElectionRef>
+ where
+ E: AsRef<str>,
+ {
+ let leader_value = leader_value.as_ref().into();
+
Ok(Arc::new(Self {
leader_value,
client,
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index 2e6b49e6ffdd..b08fd2f2dcdb 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -218,6 +218,27 @@ pub enum Error {
#[snafu(backtrace)]
source: BoxedError,
},
+
+ #[snafu(display("Failed to lock based on etcd, source: {}", source))]
+ Lock {
+ source: etcd_client::Error,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Failed to unlock based on etcd, source: {}", source))]
+ Unlock {
+ source: etcd_client::Error,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Failed to grant lease, source: {}", source))]
+ LeaseGrant {
+ source: etcd_client::Error,
+ backtrace: Backtrace,
+ },
+
+ #[snafu(display("Distributed lock is not configured"))]
+ LockNotConfig { backtrace: Backtrace },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -254,6 +275,10 @@ impl ErrorExt for Error {
| Error::IsNotLeader { .. }
| Error::NoMetaPeerClient { .. }
| Error::InvalidHttpBody { .. }
+ | Error::Lock { .. }
+ | Error::Unlock { .. }
+ | Error::LeaseGrant { .. }
+ | Error::LockNotConfig { .. }
| Error::StartGrpc { .. } => StatusCode::Internal,
Error::EmptyKey { .. }
| Error::EmptyTableName { .. }
diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs
index c0b00e446b98..8e95448ff50e 100644
--- a/src/meta-srv/src/lib.rs
+++ b/src/meta-srv/src/lib.rs
@@ -20,6 +20,7 @@ pub mod error;
pub mod handler;
pub mod keys;
pub mod lease;
+pub mod lock;
pub mod metasrv;
#[cfg(feature = "mock")]
pub mod mocks;
diff --git a/src/meta-srv/src/lock.rs b/src/meta-srv/src/lock.rs
new file mode 100644
index 000000000000..9923ed894502
--- /dev/null
+++ b/src/meta-srv/src/lock.rs
@@ -0,0 +1,41 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+pub mod etcd;
+
+use std::sync::Arc;
+
+use crate::error::Result;
+
+pub type Key = Vec<u8>;
+
+pub const DEFAULT_EXPIRE_TIME_SECS: u64 = 10;
+
+pub struct Opts {
+ // If the expiration time is exceeded and currently holds the lock, the lock is
+ // automatically released.
+ pub expire_secs: Option<u64>,
+}
+
+#[async_trait::async_trait]
+pub trait DistLock: Send + Sync {
+ // Lock acquires a distributed shared lock on a given named lock. On success, it
+ // will return a unique key that exists so long as the lock is held by the caller.
+ async fn lock(&self, name: Vec<u8>, opts: Opts) -> Result<Key>;
+
+ // Unlock takes a key returned by Lock and releases the hold on lock.
+ async fn unlock(&self, key: Vec<u8>) -> Result<()>;
+}
+
+pub type DistLockRef = Arc<dyn DistLock>;
diff --git a/src/meta-srv/src/lock/etcd.rs b/src/meta-srv/src/lock/etcd.rs
new file mode 100644
index 000000000000..f02a8a974428
--- /dev/null
+++ b/src/meta-srv/src/lock/etcd.rs
@@ -0,0 +1,76 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use etcd_client::{Client, LockOptions};
+use snafu::ResultExt;
+
+use super::{DistLock, DistLockRef, Opts, DEFAULT_EXPIRE_TIME_SECS};
+use crate::error;
+use crate::error::Result;
+
+/// A implementation of distributed lock based on etcd. The Clone of EtcdLock is cheap.
+#[derive(Clone)]
+pub struct EtcdLock {
+ client: Client,
+}
+
+impl EtcdLock {
+ pub async fn with_endpoints<E, S>(endpoints: S) -> Result<DistLockRef>
+ where
+ E: AsRef<str>,
+ S: AsRef<[E]>,
+ {
+ let client = Client::connect(endpoints, None)
+ .await
+ .context(error::ConnectEtcdSnafu)?;
+
+ Self::with_etcd_client(client)
+ }
+
+ pub fn with_etcd_client(client: Client) -> Result<DistLockRef> {
+ Ok(Arc::new(EtcdLock { client }))
+ }
+}
+
+#[async_trait::async_trait]
+impl DistLock for EtcdLock {
+ async fn lock(&self, name: Vec<u8>, opts: Opts) -> Result<Vec<u8>> {
+ let expire = opts.expire_secs.unwrap_or(DEFAULT_EXPIRE_TIME_SECS) as i64;
+
+ let mut client = self.client.clone();
+
+ let resp = client
+ .lease_grant(expire, None)
+ .await
+ .context(error::LeaseGrantSnafu)?;
+
+ let lease_id = resp.id();
+ let lock_opts = LockOptions::new().with_lease(lease_id);
+
+ let resp = client
+ .lock(name, Some(lock_opts))
+ .await
+ .context(error::LockSnafu)?;
+
+ Ok(resp.key().to_vec())
+ }
+
+ async fn unlock(&self, key: Vec<u8>) -> Result<()> {
+ let mut client = self.client.clone();
+ let _ = client.unlock(key).await.context(error::UnlockSnafu)?;
+ Ok(())
+ }
+}
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 62633cc950b8..985c2db6a2fa 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -24,6 +24,7 @@ use serde::{Deserialize, Serialize};
use crate::cluster::MetaPeerClient;
use crate::election::Election;
use crate::handler::HeartbeatHandlerGroup;
+use crate::lock::DistLockRef;
use crate::selector::{Selector, SelectorType};
use crate::sequence::SequenceRef;
use crate::service::store::kv::{KvStoreRef, ResetableKvStoreRef};
@@ -99,6 +100,7 @@ pub struct MetaSrv {
handler_group: HeartbeatHandlerGroup,
election: Option<ElectionRef>,
meta_peer_client: Option<MetaPeerClient>,
+ lock: Option<DistLockRef>,
}
impl MetaSrv {
@@ -174,6 +176,11 @@ impl MetaSrv {
self.meta_peer_client.clone()
}
+ #[inline]
+ pub fn lock(&self) -> Option<DistLockRef> {
+ self.lock.clone()
+ }
+
#[inline]
pub fn new_ctx(&self) -> Context {
let datanode_lease_secs = self.options().datanode_lease_secs;
diff --git a/src/meta-srv/src/metasrv/builder.rs b/src/meta-srv/src/metasrv/builder.rs
index 730dc3d54a76..aec93289a8cc 100644
--- a/src/meta-srv/src/metasrv/builder.rs
+++ b/src/meta-srv/src/metasrv/builder.rs
@@ -20,6 +20,7 @@ use crate::handler::{
CheckLeaderHandler, CollectStatsHandler, HeartbeatHandlerGroup, KeepLeaseHandler,
OnLeaderStartHandler, PersistStatsHandler, ResponseHeaderHandler,
};
+use crate::lock::DistLockRef;
use crate::metasrv::{ElectionRef, MetaSrv, MetaSrvOptions, SelectorRef, TABLE_ID_SEQ};
use crate::selector::lease_based::LeaseBasedSelector;
use crate::sequence::Sequence;
@@ -35,6 +36,7 @@ pub struct MetaSrvBuilder {
handler_group: Option<HeartbeatHandlerGroup>,
election: Option<ElectionRef>,
meta_peer_client: Option<MetaPeerClient>,
+ lock: Option<DistLockRef>,
}
impl MetaSrvBuilder {
@@ -47,6 +49,7 @@ impl MetaSrvBuilder {
meta_peer_client: None,
election: None,
options: None,
+ lock: None,
}
}
@@ -85,6 +88,11 @@ impl MetaSrvBuilder {
self
}
+ pub fn lock(mut self, lock: Option<DistLockRef>) -> Self {
+ self.lock = lock;
+ self
+ }
+
pub async fn build(self) -> MetaSrv {
let started = Arc::new(AtomicBool::new(false));
@@ -96,6 +104,7 @@ impl MetaSrvBuilder {
in_memory,
selector,
handler_group,
+ lock,
} = self;
let options = options.unwrap_or_default();
@@ -136,6 +145,7 @@ impl MetaSrvBuilder {
handler_group,
election,
meta_peer_client,
+ lock,
}
}
}
diff --git a/src/meta-srv/src/service.rs b/src/meta-srv/src/service.rs
index d8a5eaf4c98e..cb386f82ff8c 100644
--- a/src/meta-srv/src/service.rs
+++ b/src/meta-srv/src/service.rs
@@ -20,6 +20,7 @@ use tonic::{Response, Status};
pub mod admin;
pub mod cluster;
mod heartbeat;
+pub mod lock;
pub mod router;
pub mod store;
diff --git a/src/meta-srv/src/service/lock.rs b/src/meta-srv/src/service/lock.rs
new file mode 100644
index 000000000000..3be36152cfc9
--- /dev/null
+++ b/src/meta-srv/src/service/lock.rs
@@ -0,0 +1,55 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::{lock_server, LockRequest, LockResponse, UnlockRequest, UnlockResponse};
+use snafu::OptionExt;
+use tonic::{Request, Response};
+
+use super::GrpcResult;
+use crate::error;
+use crate::lock::Opts;
+use crate::metasrv::MetaSrv;
+
+#[async_trait::async_trait]
+impl lock_server::Lock for MetaSrv {
+ async fn lock(&self, request: Request<LockRequest>) -> GrpcResult<LockResponse> {
+ let LockRequest {
+ name, expire_secs, ..
+ } = request.into_inner();
+ let expire_secs = Some(expire_secs as u64);
+
+ let lock = self.lock().context(error::LockNotConfigSnafu)?;
+ let key = lock.lock(name, Opts { expire_secs }).await?;
+
+ let resp = LockResponse {
+ key,
+ ..Default::default()
+ };
+
+ Ok(Response::new(resp))
+ }
+
+ async fn unlock(&self, request: Request<UnlockRequest>) -> GrpcResult<UnlockResponse> {
+ let UnlockRequest { key, .. } = request.into_inner();
+
+ let lock = self.lock().context(error::LockNotConfigSnafu)?;
+ let _ = lock.unlock(key).await?;
+
+ let resp = UnlockResponse {
+ ..Default::default()
+ };
+
+ Ok(Response::new(resp))
+ }
+}
diff --git a/src/meta-srv/src/service/store/etcd.rs b/src/meta-srv/src/service/store/etcd.rs
index 38dd9220f4f3..91cd1f7c16ca 100644
--- a/src/meta-srv/src/service/store/etcd.rs
+++ b/src/meta-srv/src/service/store/etcd.rs
@@ -43,6 +43,10 @@ impl EtcdStore {
.await
.context(error::ConnectEtcdSnafu)?;
+ Self::with_etcd_client(client)
+ }
+
+ pub fn with_etcd_client(client: Client) -> Result<KvStoreRef> {
Ok(Arc::new(Self { client }))
}
}
|
feat
|
meta provides the ability to distribute lock (#961)
|
b8354bbb5507de9485d27dc4a28d7d00fa53dbfc
|
2024-05-18 07:27:49
|
zyy17
|
docs: add toc for config docs (#3974)
| false
|
diff --git a/config/config-docs-template.md b/config/config-docs-template.md
index 8fe10780f599..b70c20184d31 100644
--- a/config/config-docs-template.md
+++ b/config/config-docs-template.md
@@ -1,10 +1,16 @@
# Configurations
+- [Standalone Mode](#standalone-mode)
+- [Distributed Mode](#distributed-mode)
+ - [Frontend](#frontend)
+ - [Metasrv](#metasrv)
+ - [Datanode](#datanode)
+
## Standalone Mode
{{ toml2docs "./standalone.example.toml" }}
-## Cluster Mode
+## Distributed Mode
### Frontend
diff --git a/config/config.md b/config/config.md
index ce5b57250236..fa58b074645b 100644
--- a/config/config.md
+++ b/config/config.md
@@ -1,5 +1,11 @@
# Configurations
+- [Standalone Mode](#standalone-mode)
+- [Distributed Mode](#distributed-mode)
+ - [Frontend](#frontend)
+ - [Metasrv](#metasrv)
+ - [Datanode](#datanode)
+
## Standalone Mode
| Key | Type | Default | Descriptions |
@@ -131,7 +137,7 @@
| `tracing.tokio_console_addr` | String | `None` | The tokio console address. |
-## Cluster Mode
+## Distributed Mode
### Frontend
|
docs
|
add toc for config docs (#3974)
|
4d5ecb54c5ac7e6a4f2e3c1245e648a009d3ec34
|
2023-07-31 15:34:22
|
Yingwen
|
feat(mito): Implement open for RegionManifestManager (#2036)
| false
|
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 6c52e3d1e1b7..b44c5c3020cc 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -98,12 +98,6 @@ pub enum Error {
location: Location,
},
- #[snafu(display(
- "Expect initial region metadata on creating/opening a new region, location: {}",
- location
- ))]
- InitialMetadata { location: Location },
-
#[snafu(display("Invalid metadata, {}, location: {}", reason, location))]
InvalidMeta { reason: String, location: Location },
@@ -180,10 +174,9 @@ impl ErrorExt for Error {
| Utf8 { .. }
| RegionExists { .. }
| NewRecordBatch { .. } => StatusCode::Unexpected,
- InvalidScanIndex { .. }
- | InitialMetadata { .. }
- | InvalidMeta { .. }
- | InvalidSchema { .. } => StatusCode::InvalidArguments,
+ InvalidScanIndex { .. } | InvalidMeta { .. } | InvalidSchema { .. } => {
+ StatusCode::InvalidArguments
+ }
RegionMetadataNotFound { .. } | Join { .. } | WorkerStopped { .. } | Recv { .. } => {
StatusCode::Internal
}
diff --git a/src/mito2/src/manifest/action.rs b/src/mito2/src/manifest/action.rs
index 0cf5ac472780..a996ec0c2d98 100644
--- a/src/mito2/src/manifest/action.rs
+++ b/src/mito2/src/manifest/action.rs
@@ -14,7 +14,6 @@
use std::collections::HashMap;
-use common_telemetry::info;
use serde::{Deserialize, Serialize};
use snafu::{OptionExt, ResultExt};
use storage::metadata::VersionNumber;
@@ -24,7 +23,7 @@ use store_api::manifest::ManifestVersion;
use store_api::storage::{RegionId, SequenceNumber};
use crate::error::{RegionMetadataNotFoundSnafu, Result, SerdeJsonSnafu, Utf8Snafu};
-use crate::metadata::RegionMetadata;
+use crate::metadata::RegionMetadataRef;
/// Actions that can be applied to region manifest.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
@@ -42,7 +41,7 @@ pub enum RegionMetaAction {
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct RegionChange {
/// The metadata after changed.
- pub metadata: RegionMetadata,
+ pub metadata: RegionMetadataRef,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
@@ -59,54 +58,50 @@ pub struct RegionRemove {
pub region_id: RegionId,
}
-/// The region manifest data
+/// The region manifest data.
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct RegionManifest {
- pub metadata: RegionMetadata,
- pub version: RegionVersion,
+ /// Metadata of the region.
+ pub metadata: RegionMetadataRef,
+ /// SST files.
+ pub files: HashMap<FileId, FileMeta>,
+ /// Current manifest version.
+ pub manifest_version: ManifestVersion,
}
#[derive(Debug, Default)]
pub struct RegionManifestBuilder {
- metadata: Option<RegionMetadata>,
- version: Option<RegionVersion>,
+ metadata: Option<RegionMetadataRef>,
+ files: HashMap<FileId, FileMeta>,
+ manifest_version: ManifestVersion,
}
impl RegionManifestBuilder {
- /// Start with a checkpoint
+ /// Start with a checkpoint.
pub fn with_checkpoint(checkpoint: Option<RegionManifest>) -> Self {
if let Some(s) = checkpoint {
Self {
metadata: Some(s.metadata),
- version: Some(s.version),
+ files: s.files,
+ manifest_version: s.manifest_version,
}
} else {
Default::default()
}
}
- pub fn apply_change(&mut self, change: RegionChange) {
+ pub fn apply_change(&mut self, manifest_version: ManifestVersion, change: RegionChange) {
self.metadata = Some(change.metadata);
+ self.manifest_version = manifest_version;
}
pub fn apply_edit(&mut self, manifest_version: ManifestVersion, edit: RegionEdit) {
- if let Some(version) = &mut self.version {
- version.manifest_version = manifest_version;
- for file in edit.files_to_add {
- let _ = version.files.insert(file.file_id, file);
- }
- for file in edit.files_to_remove {
- let _ = version.files.remove(&file.file_id);
- }
- } else {
- self.version = Some(RegionVersion {
- manifest_version,
- files: edit
- .files_to_add
- .into_iter()
- .map(|f| (f.file_id, f))
- .collect(),
- });
+ self.manifest_version = manifest_version;
+ for file in edit.files_to_add {
+ self.files.insert(file.file_id, file);
+ }
+ for file in edit.files_to_remove {
+ self.files.remove(&file.file_id);
}
}
@@ -117,24 +112,14 @@ impl RegionManifestBuilder {
pub fn try_build(self) -> Result<RegionManifest> {
let metadata = self.metadata.context(RegionMetadataNotFoundSnafu)?;
- let version = self.version.unwrap_or_else(|| {
- info!(
- "Create new default region version for region {:?}",
- metadata.region_id
- );
- RegionVersion::default()
- });
- Ok(RegionManifest { metadata, version })
+ Ok(RegionManifest {
+ metadata,
+ files: self.files,
+ manifest_version: self.manifest_version,
+ })
}
}
-/// The region version checkpoint
-#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Default)]
-pub struct RegionVersion {
- pub manifest_version: ManifestVersion,
- pub files: HashMap<FileId, FileMeta>,
-}
-
// The checkpoint of region manifest, generated by checkpointer.
#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
pub struct RegionCheckpoint {
@@ -170,22 +155,17 @@ impl RegionCheckpoint {
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct RegionMetaActionList {
pub actions: Vec<RegionMetaAction>,
- pub prev_version: ManifestVersion,
}
impl RegionMetaActionList {
pub fn with_action(action: RegionMetaAction) -> Self {
Self {
actions: vec![action],
- prev_version: 0,
}
}
pub fn new(actions: Vec<RegionMetaAction>) -> Self {
- Self {
- actions,
- prev_version: 0,
- }
+ Self { actions }
}
}
@@ -195,11 +175,7 @@ impl RegionMetaActionList {
self.actions.insert(0, RegionMetaAction::Protocol(action));
}
- pub fn set_prev_version(&mut self, version: ManifestVersion) {
- self.prev_version = version;
- }
-
- /// Encode self into json in the form of string lines, starts with prev_version and then action json list.
+ /// Encode self into json in the form of string lines.
pub fn encode(&self) -> Result<Vec<u8>> {
let json = serde_json::to_string(&self).context(SerdeJsonSnafu)?;
diff --git a/src/mito2/src/manifest/manager.rs b/src/mito2/src/manifest/manager.rs
index 7a82312e98e6..40d90ed0f417 100644
--- a/src/mito2/src/manifest/manager.rs
+++ b/src/mito2/src/manifest/manager.rs
@@ -12,22 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::atomic::Ordering;
use std::sync::Arc;
-use arc_swap::ArcSwap;
use common_telemetry::{debug, info};
-use snafu::OptionExt;
use store_api::manifest::action::{ProtocolAction, ProtocolVersion};
-use store_api::manifest::{AtomicManifestVersion, ManifestVersion, MAX_VERSION, MIN_VERSION};
+use store_api::manifest::{ManifestVersion, MAX_VERSION, MIN_VERSION};
+use tokio::sync::RwLock;
-use crate::error::{InitialMetadataSnafu, Result};
+use crate::error::Result;
use crate::manifest::action::{
RegionChange, RegionCheckpoint, RegionManifest, RegionManifestBuilder, RegionMetaAction,
RegionMetaActionIter, RegionMetaActionList,
};
use crate::manifest::options::RegionManifestOptions;
use crate::manifest::storage::ManifestObjectStore;
+use crate::metadata::RegionMetadataRef;
// rewrite note:
// trait Checkpoint -> struct RegionCheckpoint
@@ -36,32 +35,63 @@ use crate::manifest::storage::ManifestObjectStore;
/// Manage region's manifest. Provide APIs to access (create/modify/recover) region's persisted
/// metadata.
-#[derive(Clone, Debug)]
+#[derive(Debug)]
pub struct RegionManifestManager {
- inner: Arc<RegionManifestManagerInner>,
+ inner: RwLock<RegionManifestManagerInner>,
}
impl RegionManifestManager {
- /// Construct and recover a region's manifest from storage.
- pub async fn new(options: RegionManifestOptions) -> Result<Self> {
- let inner = RegionManifestManagerInner::new(options).await?;
+ /// Construct a region's manifest and persist it.
+ pub async fn new(metadata: RegionMetadataRef, options: RegionManifestOptions) -> Result<Self> {
+ let inner = RegionManifestManagerInner::new(metadata, options).await?;
Ok(Self {
- inner: Arc::new(inner),
+ inner: RwLock::new(inner),
})
}
+ /// Open an existing manifest.
+ pub async fn open(options: RegionManifestOptions) -> Result<Option<Self>> {
+ if let Some(inner) = RegionManifestManagerInner::open(options).await? {
+ Ok(Some(Self {
+ inner: RwLock::new(inner),
+ }))
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Stop background tasks gracefully.
pub async fn stop(&self) -> Result<()> {
- self.inner.stop().await
+ let mut inner = self.inner.write().await;
+ inner.stop().await
}
/// Update the manifest. Return the current manifest version number.
pub async fn update(&self, action_list: RegionMetaActionList) -> Result<ManifestVersion> {
- self.inner.update(action_list).await
+ let mut inner = self.inner.write().await;
+ inner.update(action_list).await
}
/// Retrieve the current [RegionManifest].
- pub fn manifest(&self) -> Arc<RegionManifest> {
- self.inner.manifest.load().clone()
+ pub async fn manifest(&self) -> Arc<RegionManifest> {
+ let inner = self.inner.read().await;
+ inner.manifest.clone()
+ }
+}
+
+#[cfg(test)]
+impl RegionManifestManager {
+ pub(crate) async fn validate_manifest(
+ &self,
+ expect: &RegionMetadataRef,
+ last_version: ManifestVersion,
+ ) {
+ let manifest = self.manifest().await;
+ assert_eq!(manifest.metadata, *expect);
+
+ let inner = self.inner.read().await;
+ assert_eq!(inner.manifest.manifest_version, inner.last_version);
+ assert_eq!(last_version, inner.last_version);
}
}
@@ -69,12 +99,60 @@ impl RegionManifestManager {
struct RegionManifestManagerInner {
store: ManifestObjectStore,
options: RegionManifestOptions,
- version: AtomicManifestVersion,
- manifest: ArcSwap<RegionManifest>,
+ last_version: ManifestVersion,
+ manifest: Arc<RegionManifest>,
}
impl RegionManifestManagerInner {
- pub async fn new(mut options: RegionManifestOptions) -> Result<Self> {
+ /// Creates a new manifest.
+ async fn new(metadata: RegionMetadataRef, options: RegionManifestOptions) -> Result<Self> {
+ // construct storage
+ let store = ManifestObjectStore::new(
+ &options.manifest_dir,
+ options.object_store.clone(),
+ options.compress_type,
+ );
+
+ info!(
+ "Creating region manifest in {} with metadata {:?}",
+ options.manifest_dir, metadata
+ );
+
+ let version = MIN_VERSION;
+ let mut manifest_builder = RegionManifestBuilder::default();
+ // set the initial metadata.
+ manifest_builder.apply_change(
+ version,
+ RegionChange {
+ metadata: metadata.clone(),
+ },
+ );
+ let manifest = manifest_builder.try_build()?;
+
+ debug!(
+ "Build region manifest in {}, manifest: {:?}",
+ options.manifest_dir, manifest
+ );
+
+ // Persist region change.
+ let action_list =
+ RegionMetaActionList::with_action(RegionMetaAction::Change(RegionChange { metadata }));
+ store.save(version, &action_list.encode()?).await?;
+
+ // todo: start gc task
+
+ Ok(Self {
+ store,
+ options,
+ last_version: version,
+ manifest: Arc::new(manifest),
+ })
+ }
+
+ /// Open an existing manifest.
+ ///
+ /// Returns `Ok(None)` if no such manifest.
+ async fn open(options: RegionManifestOptions) -> Result<Option<Self>> {
// construct storage
let store = ManifestObjectStore::new(
&options.manifest_dir,
@@ -91,13 +169,16 @@ impl RegionManifestManagerInner {
.transpose()?;
let mut manifest_builder = if let Some(checkpoint) = checkpoint {
info!(
- "Recover region manifest from checkpoint version {}",
- checkpoint.last_version
+ "Recover region manifest {} from checkpoint version {}",
+ options.manifest_dir, checkpoint.last_version
);
version = version.max(checkpoint.last_version + 1);
RegionManifestBuilder::with_checkpoint(checkpoint.checkpoint)
} else {
- info!("Checkpoint not found, build manifest from scratch");
+ info!(
+ "Checkpoint not found in {}, build manifest from scratch",
+ options.manifest_dir
+ );
RegionManifestBuilder::default()
};
@@ -108,13 +189,16 @@ impl RegionManifestManagerInner {
for action in action_list.actions {
match action {
RegionMetaAction::Change(action) => {
- manifest_builder.apply_change(action);
+ manifest_builder.apply_change(manifest_version, action);
}
RegionMetaAction::Edit(action) => {
manifest_builder.apply_edit(manifest_version, action);
}
RegionMetaAction::Remove(_) | RegionMetaAction::Protocol(_) => {
- debug!("Unhandled action: {:?}", action);
+ debug!(
+ "Unhandled action in {}, action: {:?}",
+ options.manifest_dir, action
+ );
}
}
}
@@ -122,63 +206,66 @@ impl RegionManifestManagerInner {
// set the initial metadata if necessary
if !manifest_builder.contains_metadata() {
- let metadata = options
- .initial_metadata
- .take()
- .context(InitialMetadataSnafu)?;
- info!("Creating region manifest with metadata {:?}", metadata);
- manifest_builder.apply_change(RegionChange { metadata });
+ debug!("No region manifest in {}", options.manifest_dir);
+ return Ok(None);
}
let manifest = manifest_builder.try_build()?;
- debug!("Recovered region manifest: {:?}", manifest);
- let version = manifest.version.manifest_version;
+ debug!(
+ "Recovered region manifest from {}, manifest: {:?}",
+ options.manifest_dir, manifest
+ );
+ let version = manifest.manifest_version;
// todo: start gc task
- Ok(Self {
+ Ok(Some(Self {
store,
options,
- version: AtomicManifestVersion::new(version),
- manifest: ArcSwap::new(Arc::new(manifest)),
- })
+ last_version: version,
+ manifest: Arc::new(manifest),
+ }))
}
- pub async fn stop(&self) -> Result<()> {
+ async fn stop(&mut self) -> Result<()> {
// todo: stop gc task
Ok(())
}
- pub async fn update(&self, action_list: RegionMetaActionList) -> Result<ManifestVersion> {
- let version = self.inc_version();
-
+ async fn update(&mut self, action_list: RegionMetaActionList) -> Result<ManifestVersion> {
+ let version = self.increase_version();
self.store.save(version, &action_list.encode()?).await?;
let mut manifest_builder =
- RegionManifestBuilder::with_checkpoint(Some(self.manifest.load().as_ref().clone()));
+ RegionManifestBuilder::with_checkpoint(Some(self.manifest.as_ref().clone()));
for action in action_list.actions {
match action {
RegionMetaAction::Change(action) => {
- manifest_builder.apply_change(action);
+ manifest_builder.apply_change(version, action);
}
RegionMetaAction::Edit(action) => {
manifest_builder.apply_edit(version, action);
}
RegionMetaAction::Remove(_) | RegionMetaAction::Protocol(_) => {
- debug!("Unhandled action: {:?}", action);
+ debug!(
+ "Unhandled action for region {}, action: {:?}",
+ self.manifest.metadata.region_id, action
+ );
}
}
}
let new_manifest = manifest_builder.try_build()?;
- self.manifest.store(Arc::new(new_manifest));
+ self.manifest = Arc::new(new_manifest);
Ok(version)
}
}
impl RegionManifestManagerInner {
- fn inc_version(&self) -> ManifestVersion {
- self.version.fetch_add(1, Ordering::Relaxed)
+ /// Increases last version and returns the increased version.
+ fn increase_version(&mut self) -> ManifestVersion {
+ self.last_version += 1;
+ self.last_version
}
// pub (crate) fn checkpointer(&self) -> Checkpointer {
@@ -254,7 +341,6 @@ mod test {
use store_api::storage::RegionId;
use super::*;
- use crate::error::Error;
use crate::manifest::action::RegionChange;
use crate::metadata::{ColumnMetadata, RegionMetadata, RegionMetadataBuilder, SemanticType};
use crate::test_util::TestEnv;
@@ -289,57 +375,82 @@ mod test {
}
#[tokio::test]
- async fn create_region_without_initial_metadata() {
+ async fn create_manifest_manager() {
+ let metadata = Arc::new(basic_region_metadata());
let env = TestEnv::new("");
- let result = env
- .create_manifest_manager(CompressionType::Uncompressed, 10, None)
- .await;
- assert!(matches!(
- result.err().unwrap(),
- Error::InitialMetadata { .. }
- ))
+ let manager = env
+ .create_manifest_manager(CompressionType::Uncompressed, 10, Some(metadata.clone()))
+ .await
+ .unwrap()
+ .unwrap();
+
+ manager.validate_manifest(&metadata, 0).await;
}
#[tokio::test]
- async fn create_manifest_manager() {
- let metadata = basic_region_metadata();
+ async fn open_manifest_manager() {
let env = TestEnv::new("");
+ // Try to opens an empty manifest.
+ assert!(env
+ .create_manifest_manager(CompressionType::Uncompressed, 10, None)
+ .await
+ .unwrap()
+ .is_none());
+
+ // Creates a manifest.
+ let metadata = Arc::new(basic_region_metadata());
let manager = env
.create_manifest_manager(CompressionType::Uncompressed, 10, Some(metadata.clone()))
.await
+ .unwrap()
.unwrap();
+ // Stops it.
+ manager.stop().await.unwrap();
- let manifest = manager.manifest();
- assert_eq!(manifest.metadata, metadata);
+ // Open it.
+ let manager = env
+ .create_manifest_manager(CompressionType::Uncompressed, 10, None)
+ .await
+ .unwrap()
+ .unwrap();
+
+ manager.validate_manifest(&metadata, 0).await;
}
#[tokio::test]
async fn region_change_add_column() {
- let metadata = basic_region_metadata();
+ let metadata = Arc::new(basic_region_metadata());
let env = TestEnv::new("");
let manager = env
.create_manifest_manager(CompressionType::Uncompressed, 10, Some(metadata.clone()))
.await
+ .unwrap()
.unwrap();
- let mut new_metadata_builder = RegionMetadataBuilder::from_existing(metadata, 1);
+ let mut new_metadata_builder = RegionMetadataBuilder::from_existing((*metadata).clone(), 1);
new_metadata_builder.push_column_metadata(ColumnMetadata {
column_schema: ColumnSchema::new("val2", ConcreteDataType::float64_datatype(), false),
semantic_type: SemanticType::Field,
column_id: 252,
});
- let new_metadata = new_metadata_builder.build().unwrap();
+ let new_metadata = Arc::new(new_metadata_builder.build().unwrap());
- let mut action_list =
+ let action_list =
RegionMetaActionList::with_action(RegionMetaAction::Change(RegionChange {
metadata: new_metadata.clone(),
}));
- action_list.set_prev_version(0);
- let prev_version = manager.update(action_list).await.unwrap();
- assert_eq!(prev_version, 0);
+ let current_version = manager.update(action_list).await.unwrap();
+ assert_eq!(current_version, 1);
+ manager.validate_manifest(&new_metadata, 1).await;
- let manifest = manager.manifest();
- assert_eq!(manifest.metadata, new_metadata);
+ // Reopen the manager.
+ manager.stop().await.unwrap();
+ let manager = env
+ .create_manifest_manager(CompressionType::Uncompressed, 10, None)
+ .await
+ .unwrap()
+ .unwrap();
+ manager.validate_manifest(&new_metadata, 1).await;
}
}
diff --git a/src/mito2/src/manifest/options.rs b/src/mito2/src/manifest/options.rs
index 6f6d64cfd144..d72ea5ff2333 100644
--- a/src/mito2/src/manifest/options.rs
+++ b/src/mito2/src/manifest/options.rs
@@ -17,17 +17,13 @@
use common_datasource::compression::CompressionType;
use object_store::ObjectStore;
-use crate::metadata::RegionMetadata;
-
+/// Options for manifest.
#[derive(Debug, Clone)]
pub struct RegionManifestOptions {
+ /// Directory to store manifest.
pub manifest_dir: String,
pub object_store: ObjectStore,
pub compress_type: CompressionType,
/// Interval of version ([ManifestVersion](store_api::manifest::ManifestVersion)) between two checkpoints.
pub checkpoint_interval: u64,
- /// Initial [RegionMetadata](crate::metadata::RegionMetadata) of this region.
- /// Only need to set when create a new region, otherwise it will be ignored.
- // TODO(yingwen): Could we pass RegionMetadataRef?
- pub initial_metadata: Option<RegionMetadata>,
}
diff --git a/src/mito2/src/region/opener.rs b/src/mito2/src/region/opener.rs
index 2ff78b696b5a..2b2f8e1f4f70 100644
--- a/src/mito2/src/region/opener.rs
+++ b/src/mito2/src/region/opener.rs
@@ -60,19 +60,18 @@ impl RegionOpener {
/// Writes region manifest and creates a new region.
pub(crate) async fn create(self, config: &MitoConfig) -> Result<MitoRegion> {
let region_id = self.metadata.region_id;
+ let metadata = Arc::new(self.metadata);
+
// Create a manifest manager for this region.
let options = RegionManifestOptions {
manifest_dir: new_manifest_dir(&self.region_dir),
object_store: self.object_store,
compress_type: config.manifest_compress_type,
checkpoint_interval: config.manifest_checkpoint_interval,
- // We are creating a new region, so we need to set this field.
- initial_metadata: Some(self.metadata.clone()),
};
// Writes regions to the manifest file.
- let manifest_manager = RegionManifestManager::new(options).await?;
+ let manifest_manager = RegionManifestManager::new(metadata.clone(), options).await?;
- let metadata = Arc::new(self.metadata);
let mutable = self.memtable_builder.build(&metadata);
let version = VersionBuilder::new(metadata, mutable).build();
diff --git a/src/mito2/src/sst.rs b/src/mito2/src/sst.rs
index bb31fdc3ddae..b12fa962da9d 100644
--- a/src/mito2/src/sst.rs
+++ b/src/mito2/src/sst.rs
@@ -15,6 +15,7 @@
//! Sorted strings tables.
pub mod file;
+pub mod file_purger;
pub mod parquet;
mod stream_writer;
pub(crate) mod version;
diff --git a/src/mito2/src/sst/file.rs b/src/mito2/src/sst/file.rs
index 8ebba3a2f9f8..8affb3a0d3d2 100644
--- a/src/mito2/src/sst/file.rs
+++ b/src/mito2/src/sst/file.rs
@@ -16,7 +16,7 @@
use std::fmt;
use std::str::FromStr;
-use std::sync::atomic::AtomicBool;
+use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use common_time::Timestamp;
@@ -26,6 +26,8 @@ use snafu::{ResultExt, Snafu};
use store_api::storage::RegionId;
use uuid::Uuid;
+use crate::sst::file_purger::{FilePurgerRef, PurgeRequest};
+
/// Type to store SST level.
pub type Level = u8;
/// Maximum level of SSTs.
@@ -99,8 +101,8 @@ pub struct FileHandle {
impl fmt::Debug for FileHandle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FileHandle")
- .field("file_id", &self.inner.meta.file_id)
.field("region_id", &self.inner.meta.region_id)
+ .field("file_id", &self.inner.meta.file_id)
.field("time_range", &self.inner.meta.time_range)
.field("size", &self.inner.meta.file_size)
.field("level", &self.inner.meta.level)
@@ -129,6 +131,18 @@ struct FileHandleInner {
meta: FileMeta,
compacting: AtomicBool,
deleted: AtomicBool,
+ file_purger: FilePurgerRef,
+}
+
+impl Drop for FileHandleInner {
+ fn drop(&mut self) {
+ if self.deleted.load(Ordering::Relaxed) {
+ self.file_purger.send_request(PurgeRequest {
+ region_id: self.meta.region_id,
+ file_id: self.meta.file_id,
+ });
+ }
+ }
}
#[cfg(test)]
diff --git a/src/mito2/src/sst/file_purger.rs b/src/mito2/src/sst/file_purger.rs
new file mode 100644
index 000000000000..010fc3ffc255
--- /dev/null
+++ b/src/mito2/src/sst/file_purger.rs
@@ -0,0 +1,45 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use store_api::storage::RegionId;
+
+use crate::sst::file::FileId;
+
+/// Request to remove a file.
+#[derive(Debug)]
+pub struct PurgeRequest {
+ /// Region id of the file.
+ pub region_id: RegionId,
+ /// Id of the file.
+ pub file_id: FileId,
+}
+
+/// A worker to delete files in background.
+pub trait FilePurger: Send + Sync {
+ /// Send a purge request to the background worker.
+ fn send_request(&self, request: PurgeRequest);
+}
+
+pub type FilePurgerRef = Arc<dyn FilePurger>;
+
+// TODO(yingwen): Remove this once we implement the real purger.
+/// A purger that does nothing.
+#[derive(Debug)]
+struct NoopPurger {}
+
+impl FilePurger for NoopPurger {
+ fn send_request(&self, _request: PurgeRequest) {}
+}
diff --git a/src/mito2/src/test_util.rs b/src/mito2/src/test_util.rs
index ba663bca4d01..71244f6a1945 100644
--- a/src/mito2/src/test_util.rs
+++ b/src/mito2/src/test_util.rs
@@ -32,7 +32,7 @@ use crate::engine::MitoEngine;
use crate::error::Result;
use crate::manifest::manager::RegionManifestManager;
use crate::manifest::options::RegionManifestOptions;
-use crate::metadata::{ColumnMetadata, RegionMetadata, SemanticType};
+use crate::metadata::{ColumnMetadata, RegionMetadataRef, SemanticType};
use crate::worker::request::{CreateRequest, RegionOptions};
use crate::worker::WorkerGroup;
@@ -77,17 +77,19 @@ impl TestEnv {
(log_store, object_store)
}
+ /// If `initial_metadata` is `Some`, creates a new manifest. If `initial_metadata`
+ /// is `None`, opens an existing manifest and returns `None` if no such manifest.
pub async fn create_manifest_manager(
&self,
compress_type: CompressionType,
checkpoint_interval: u64,
- initial_metadata: Option<RegionMetadata>,
- ) -> Result<RegionManifestManager> {
+ initial_metadata: Option<RegionMetadataRef>,
+ ) -> Result<Option<RegionManifestManager>> {
let data_home = self.data_home.path().to_str().unwrap();
let manifest_dir = join_dir(data_home, "manifest");
let mut builder = Fs::default();
- let _ = builder.root(&manifest_dir);
+ builder.root(&manifest_dir);
let object_store = ObjectStore::new(builder).unwrap().finish();
let manifest_opts = RegionManifestOptions {
@@ -95,10 +97,15 @@ impl TestEnv {
object_store,
compress_type,
checkpoint_interval,
- initial_metadata,
};
- RegionManifestManager::new(manifest_opts).await
+ if let Some(metadata) = initial_metadata {
+ RegionManifestManager::new(metadata, manifest_opts)
+ .await
+ .map(Some)
+ } else {
+ RegionManifestManager::open(manifest_opts).await
+ }
}
}
diff --git a/src/store-api/src/manifest.rs b/src/store-api/src/manifest.rs
index b43185ba4ba9..985339c694c2 100644
--- a/src/store-api/src/manifest.rs
+++ b/src/store-api/src/manifest.rs
@@ -16,8 +16,6 @@
pub mod action;
mod storage;
-use std::sync::atomic::AtomicU64;
-
use async_trait::async_trait;
use common_error::ext::ErrorExt;
use serde::de::DeserializeOwned;
@@ -27,7 +25,6 @@ use crate::manifest::action::{ProtocolAction, ProtocolVersion};
pub use crate::manifest::storage::*;
pub type ManifestVersion = u64;
-pub type AtomicManifestVersion = AtomicU64;
pub const MIN_VERSION: u64 = 0;
pub const MAX_VERSION: u64 = u64::MAX;
|
feat
|
Implement open for RegionManifestManager (#2036)
|
ddbc97befb0f94be54de1f5e5d1a48ea508e8484
|
2023-02-16 13:34:17
|
Yingwen
|
refactor: changes CreateTableRequest::schema to RawSchema (#1018)
| false
|
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index 6a053fc46915..811628c1ff56 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -19,7 +19,6 @@ use common_error::ext::{BoxedError, ErrorExt};
use common_error::prelude::{Snafu, StatusCode};
use datafusion::error::DataFusionError;
use datatypes::prelude::ConcreteDataType;
-use datatypes::schema::RawSchema;
use snafu::{Backtrace, ErrorCompat};
use crate::DeregisterTableRequest;
@@ -162,19 +161,6 @@ pub enum Error {
source: table::error::Error,
},
- #[snafu(display(
- "Invalid table schema in catalog entry, table:{}, schema: {:?}, source: {}",
- table_info,
- schema,
- source
- ))]
- InvalidTableSchema {
- table_info: String,
- schema: RawSchema,
- #[snafu(backtrace)]
- source: datatypes::error::Error,
- },
-
#[snafu(display("Failure during SchemaProvider operation, source: {}", source))]
SchemaProviderOperation {
#[snafu(backtrace)]
@@ -254,8 +240,7 @@ impl ErrorExt for Error {
Error::MetaSrv { source, .. } => source.status_code(),
Error::SystemCatalogTableScan { source } => source.status_code(),
Error::SystemCatalogTableScanExec { source } => source.status_code(),
- Error::InvalidTableSchema { source, .. }
- | Error::InvalidTableInfoInCatalog { source } => source.status_code(),
+ Error::InvalidTableInfoInCatalog { source } => source.status_code(),
Error::SchemaProviderOperation { source } | Error::Internal { source } => {
source.status_code()
}
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index ce7d24b621bc..1c571159c85d 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -32,8 +32,8 @@ use table::TableRef;
use tokio::sync::Mutex;
use crate::error::{
- CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, InvalidTableSchemaSnafu,
- OpenTableSnafu, Result, SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
+ CatalogNotFoundSnafu, CreateTableSnafu, InvalidCatalogValueSnafu, OpenTableSnafu, Result,
+ SchemaNotFoundSnafu, TableExistsSnafu, UnimplementedSnafu,
};
use crate::helper::{
build_catalog_prefix, build_schema_prefix, build_table_global_prefix, CatalogKey, CatalogValue,
@@ -346,21 +346,13 @@ impl RemoteCatalogManager {
);
let meta = &table_info.meta;
- let schema = meta
- .schema
- .clone()
- .try_into()
- .context(InvalidTableSchemaSnafu {
- table_info: format!("{catalog_name}.{schema_name}.{table_name}"),
- schema: meta.schema.clone(),
- })?;
let req = CreateTableRequest {
id: table_id,
catalog_name: catalog_name.clone(),
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
- schema: Arc::new(schema),
+ schema: meta.schema.clone(),
region_numbers: region_numbers.clone(),
primary_key_indices: meta.primary_key_indices.clone(),
create_if_not_exists: true,
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index 2ac85407830f..23542d6be07a 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -26,7 +26,7 @@ use common_recordbatch::SendableRecordBatchStream;
use common_telemetry::debug;
use common_time::util;
use datatypes::prelude::{ConcreteDataType, ScalarVector, VectorRef};
-use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
+use datatypes::schema::{ColumnSchema, RawSchema, SchemaRef};
use datatypes::vectors::{BinaryVector, TimestampMillisecondVector, UInt8Vector};
use serde::{Deserialize, Serialize};
use snafu::{ensure, OptionExt, ResultExt};
@@ -88,7 +88,7 @@ impl SystemCatalogTable {
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
table_id: SYSTEM_CATALOG_TABLE_ID,
};
- let schema = Arc::new(build_system_catalog_schema());
+ let schema = build_system_catalog_schema();
let ctx = EngineContext::default();
if let Some(table) = engine
@@ -105,7 +105,7 @@ impl SystemCatalogTable {
schema_name: INFORMATION_SCHEMA_NAME.to_string(),
table_name: SYSTEM_CATALOG_TABLE_NAME.to_string(),
desc: Some("System catalog table".to_string()),
- schema: schema.clone(),
+ schema,
region_numbers: vec![0],
primary_key_indices: vec![ENTRY_TYPE_INDEX, KEY_INDEX],
create_if_not_exists: true,
@@ -143,7 +143,7 @@ impl SystemCatalogTable {
/// - value: JSON-encoded value of entry's metadata.
/// - gmt_created: create time of this metadata.
/// - gmt_modified: last updated time of this metadata.
-fn build_system_catalog_schema() -> Schema {
+fn build_system_catalog_schema() -> RawSchema {
let cols = vec![
ColumnSchema::new(
"entry_type".to_string(),
@@ -178,8 +178,7 @@ fn build_system_catalog_schema() -> Schema {
),
];
- // The schema of this table must be valid.
- SchemaBuilder::try_from(cols).unwrap().build().unwrap()
+ RawSchema::new(cols)
}
/// Formats key string for table entry in system catalog
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index 7df423111623..32e8a49e568b 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -28,7 +28,7 @@ mod tests {
};
use catalog::{CatalogList, CatalogManager, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
- use datatypes::schema::Schema;
+ use datatypes::schema::RawSchema;
use futures_util::StreamExt;
use table::engine::{EngineContext, TableEngineRef};
use table::requests::CreateTableRequest;
@@ -116,7 +116,7 @@ mod tests {
let schema_name = "nonexistent_schema".to_string();
let table_name = "fail_table".to_string();
// this schema has no effect
- let table_schema = Arc::new(Schema::new(vec![]));
+ let table_schema = RawSchema::new(vec![]);
let table = table_engine
.create_table(
&EngineContext {},
@@ -126,7 +126,7 @@ mod tests {
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
- schema: table_schema.clone(),
+ schema: table_schema,
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
@@ -176,7 +176,7 @@ mod tests {
let table_name = "test_table".to_string();
let table_id = 1;
// this schema has no effect
- let table_schema = Arc::new(Schema::new(vec![]));
+ let table_schema = RawSchema::new(vec![]);
let table = table_engine
.create_table(
&EngineContext {},
@@ -186,7 +186,7 @@ mod tests {
schema_name: schema_name.clone(),
table_name: table_name.clone(),
desc: None,
- schema: table_schema.clone(),
+ schema: table_schema,
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
@@ -246,7 +246,7 @@ mod tests {
schema_name: schema_name.clone(),
table_name: "".to_string(),
desc: None,
- schema: Arc::new(Schema::new(vec![])),
+ schema: RawSchema::new(vec![]),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: false,
diff --git a/src/common/grpc-expr/src/alter.rs b/src/common/grpc-expr/src/alter.rs
index 5c450253d57b..1ebc38bf0d97 100644
--- a/src/common/grpc-expr/src/alter.rs
+++ b/src/common/grpc-expr/src/alter.rs
@@ -12,19 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
-
use api::v1::alter_expr::Kind;
use api::v1::{column_def, AlterExpr, CreateTableExpr, DropColumns, RenameTable};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
-use datatypes::schema::{ColumnSchema, SchemaBuilder, SchemaRef};
+use datatypes::schema::{ColumnSchema, RawSchema};
use snafu::{ensure, OptionExt, ResultExt};
use table::metadata::TableId;
use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, CreateTableRequest};
use crate::error::{
- ColumnNotFoundSnafu, CreateSchemaSnafu, InvalidColumnDefSnafu, MissingFieldSnafu,
- MissingTimestampColumnSnafu, Result,
+ ColumnNotFoundSnafu, InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu,
+ Result,
};
/// Convert an [`AlterExpr`] to an [`AlterTableRequest`]
@@ -92,7 +90,7 @@ pub fn alter_expr_to_request(expr: AlterExpr) -> Result<AlterTableRequest> {
}
}
-pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
+pub fn create_table_schema(expr: &CreateTableExpr) -> Result<RawSchema> {
let column_schemas = expr
.column_defs
.iter()
@@ -121,12 +119,7 @@ pub fn create_table_schema(expr: &CreateTableExpr) -> Result<SchemaRef> {
})
.collect::<Vec<_>>();
- Ok(Arc::new(
- SchemaBuilder::try_from(column_schemas)
- .context(CreateSchemaSnafu)?
- .build()
- .context(CreateSchemaSnafu)?,
- ))
+ Ok(RawSchema::new(column_schemas))
}
pub fn create_expr_to_request(
@@ -138,8 +131,11 @@ pub fn create_expr_to_request(
.primary_keys
.iter()
.map(|key| {
+ // We do a linear search here.
schema
- .column_index_by_name(key)
+ .column_schemas
+ .iter()
+ .position(|column_schema| column_schema.name == *key)
.context(ColumnNotFoundSnafu {
column_name: key,
table_name: &expr.table_name,
diff --git a/src/common/grpc-expr/src/error.rs b/src/common/grpc-expr/src/error.rs
index 5dbf9a2c12e8..51a5be55a2d1 100644
--- a/src/common/grpc-expr/src/error.rs
+++ b/src/common/grpc-expr/src/error.rs
@@ -40,12 +40,6 @@ pub enum Error {
source: api::error::Error,
},
- #[snafu(display("Failed to create schema when creating table, source: {}", source))]
- CreateSchema {
- #[snafu(backtrace)]
- source: datatypes::error::Error,
- },
-
#[snafu(display(
"Duplicated timestamp column in gRPC requests, exists {}, duplicated: {}",
exists,
@@ -102,9 +96,9 @@ impl ErrorExt for Error {
StatusCode::InvalidArguments
}
Error::ColumnDataType { .. } => StatusCode::Internal,
- Error::CreateSchema { .. }
- | Error::DuplicatedTimestampColumn { .. }
- | Error::MissingTimestampColumn { .. } => StatusCode::InvalidArguments,
+ Error::DuplicatedTimestampColumn { .. } | Error::MissingTimestampColumn { .. } => {
+ StatusCode::InvalidArguments
+ }
Error::InvalidColumnProto { .. } => StatusCode::InvalidArguments,
Error::CreateVector { .. } => StatusCode::InvalidArguments,
Error::MissingField { .. } => StatusCode::InvalidArguments,
diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs
index e605af151f85..3357bc207554 100644
--- a/src/common/substrait/src/df_logical.rs
+++ b/src/common/substrait/src/df_logical.rs
@@ -522,7 +522,7 @@ mod test {
use catalog::{CatalogList, CatalogProvider, RegisterTableRequest};
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datafusion::common::{DFSchema, ToDFSchema};
- use datatypes::schema::Schema;
+ use datatypes::schema::RawSchema;
use table::requests::CreateTableRequest;
use table::test_util::{EmptyTable, MockTableEngine};
@@ -558,7 +558,7 @@ mod test {
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_name.to_string(),
desc: None,
- schema: Arc::new(Schema::new(supported_types())),
+ schema: RawSchema::new(supported_types()),
region_numbers: vec![0],
primary_key_indices: vec![],
create_if_not_exists: true,
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index c38c6982f25e..208d9eca251c 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -177,12 +177,6 @@ pub enum Error {
#[snafu(display("Not support SQL, error: {}", msg))]
NotSupportSql { msg: String },
- #[snafu(display("Failed to create schema when creating table, source: {}", source))]
- CreateSchema {
- #[snafu(backtrace)]
- source: datatypes::error::Error,
- },
-
#[snafu(display("Failed to convert datafusion schema, source: {}", source))]
ConvertSchema {
#[snafu(backtrace)]
@@ -370,9 +364,9 @@ impl ErrorExt for Error {
| Error::CreateExprToRequest { source }
| Error::InsertData { source } => source.status_code(),
- Error::CreateSchema { source, .. }
- | Error::ConvertSchema { source, .. }
- | Error::VectorComputation { source } => source.status_code(),
+ Error::ConvertSchema { source, .. } | Error::VectorComputation { source } => {
+ source.status_code()
+ }
Error::ColumnValuesNumberMismatch { .. }
| Error::InvalidSql { .. }
diff --git a/src/datanode/src/server/grpc.rs b/src/datanode/src/server/grpc.rs
index 053b673052bd..c541912de6ab 100644
--- a/src/datanode/src/server/grpc.rs
+++ b/src/datanode/src/server/grpc.rs
@@ -86,13 +86,11 @@ impl Instance {
#[cfg(test)]
mod tests {
- use std::sync::Arc;
-
use api::v1::{column_def, ColumnDataType, ColumnDef, TableId};
use common_catalog::consts::MIN_USER_TABLE_ID;
use common_grpc_expr::create_table_schema;
use datatypes::prelude::ConcreteDataType;
- use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, SchemaBuilder, SchemaRef};
+ use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, RawSchema};
use datatypes::value::Value;
use super::*;
@@ -224,7 +222,7 @@ mod tests {
}
}
- fn expected_table_schema() -> SchemaRef {
+ fn expected_table_schema() -> RawSchema {
let column_schemas = vec![
ColumnSchema::new("host", ConcreteDataType::string_datatype(), false),
ColumnSchema::new(
@@ -236,11 +234,7 @@ mod tests {
ColumnSchema::new("cpu", ConcreteDataType::float32_datatype(), true),
ColumnSchema::new("memory", ConcreteDataType::float64_datatype(), true),
];
- Arc::new(
- SchemaBuilder::try_from(column_schemas)
- .unwrap()
- .build()
- .unwrap(),
- )
+
+ RawSchema::new(column_schemas)
}
}
diff --git a/src/datanode/src/sql/create.rs b/src/datanode/src/sql/create.rs
index a983d42f3c27..299aa196063d 100644
--- a/src/datanode/src/sql/create.rs
+++ b/src/datanode/src/sql/create.rs
@@ -13,13 +13,12 @@
// limitations under the License.
use std::collections::HashMap;
-use std::sync::Arc;
use catalog::{RegisterSchemaRequest, RegisterTableRequest};
use common_query::Output;
use common_telemetry::tracing::info;
use common_telemetry::tracing::log::error;
-use datatypes::schema::SchemaBuilder;
+use datatypes::schema::RawSchema;
use session::context::QueryContextRef;
use snafu::{ensure, OptionExt, ResultExt};
use sql::ast::{ColumnOption, TableConstraint};
@@ -31,8 +30,8 @@ use table::metadata::TableId;
use table::requests::*;
use crate::error::{
- self, CatalogNotFoundSnafu, CatalogSnafu, ConstraintNotSupportedSnafu, CreateSchemaSnafu,
- CreateTableSnafu, IllegalPrimaryKeysDefSnafu, InsertSystemCatalogSnafu, KeyColumnNotFoundSnafu,
+ self, CatalogNotFoundSnafu, CatalogSnafu, ConstraintNotSupportedSnafu, CreateTableSnafu,
+ IllegalPrimaryKeysDefSnafu, InsertSystemCatalogSnafu, KeyColumnNotFoundSnafu,
RegisterSchemaSnafu, Result, SchemaExistsSnafu, SchemaNotFoundSnafu,
};
use crate::sql::SqlHandler;
@@ -239,13 +238,7 @@ impl SqlHandler {
})
.collect::<Result<Vec<_>>>()?;
- let schema = Arc::new(
- SchemaBuilder::try_from(columns_schemas)
- .context(CreateSchemaSnafu)?
- .build()
- .context(CreateSchemaSnafu)?,
- );
-
+ let schema = RawSchema::new(columns_schemas);
let request = CreateTableRequest {
id: table_id,
catalog_name: table_ref.catalog.to_string(),
@@ -267,6 +260,7 @@ mod tests {
use std::assert_matches::assert_matches;
use datatypes::prelude::ConcreteDataType;
+ use datatypes::schema::Schema;
use sql::dialect::GenericDialect;
use sql::parser::ParserContext;
use sql::statements::statement::Statement;
@@ -320,8 +314,8 @@ mod tests {
assert_eq!(42, c.id);
assert!(!c.create_if_not_exists);
assert_eq!(vec![0], c.primary_key_indices);
- assert_eq!(1, c.schema.timestamp_index().unwrap());
- assert_eq!(4, c.schema.column_schemas().len());
+ assert_eq!(1, c.schema.timestamp_index.unwrap());
+ assert_eq!(4, c.schema.column_schemas.len());
}
#[tokio::test]
@@ -371,7 +365,7 @@ mod tests {
.create_to_request(42, parsed_stmt, &TableReference::bare("demo_table"))
.unwrap();
assert!(c.primary_key_indices.is_empty());
- assert_eq!(c.schema.timestamp_index(), Some(1));
+ assert_eq!(c.schema.timestamp_index, Some(1));
}
/// Constraints specified, not column cannot be found.
@@ -438,40 +432,25 @@ mod tests {
assert_eq!("s".to_string(), request.schema_name);
assert_eq!("demo".to_string(), request.table_name);
assert!(!request.create_if_not_exists);
- assert_eq!(4, request.schema.column_schemas().len());
+ assert_eq!(4, request.schema.column_schemas.len());
assert_eq!(vec![0], request.primary_key_indices);
+ let schema = Schema::try_from(request.schema).unwrap();
assert_eq!(
ConcreteDataType::string_datatype(),
- request
- .schema
- .column_schema_by_name("host")
- .unwrap()
- .data_type
+ schema.column_schema_by_name("host").unwrap().data_type
);
assert_eq!(
ConcreteDataType::timestamp_millisecond_datatype(),
- request
- .schema
- .column_schema_by_name("ts")
- .unwrap()
- .data_type
+ schema.column_schema_by_name("ts").unwrap().data_type
);
assert_eq!(
ConcreteDataType::float64_datatype(),
- request
- .schema
- .column_schema_by_name("cpu")
- .unwrap()
- .data_type
+ schema.column_schema_by_name("cpu").unwrap().data_type
);
assert_eq!(
ConcreteDataType::float64_datatype(),
- request
- .schema
- .column_schema_by_name("memory")
- .unwrap()
- .data_type
+ schema.column_schema_by_name("memory").unwrap().data_type
);
}
}
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index fef0e5df0919..5365ad103ccb 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -19,7 +19,7 @@ use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MIN_USER
use common_query::Output;
use common_recordbatch::util;
use datatypes::data_type::ConcreteDataType;
-use datatypes::schema::{ColumnSchema, SchemaBuilder};
+use datatypes::schema::{ColumnSchema, RawSchema};
use mito::config::EngineConfig;
use mito::table::test_util::{new_test_object_store, MockEngine, MockMitoEngine};
use query::QueryEngineFactory;
@@ -104,12 +104,7 @@ pub(crate) async fn create_test_table(
schema_name: "public".to_string(),
table_name: table_name.to_string(),
desc: Some(" a test table".to_string()),
- schema: Arc::new(
- SchemaBuilder::try_from(column_schemas)
- .unwrap()
- .build()
- .expect("ts is expected to be timestamp column"),
- ),
+ schema: RawSchema::new(column_schemas),
create_if_not_exists: true,
primary_key_indices: vec![0], // "host" is in primary keys
table_options: HashMap::new(),
diff --git a/src/datatypes/src/schema/raw.rs b/src/datatypes/src/schema/raw.rs
index ab94e9ad8fcd..d952bb9f3c73 100644
--- a/src/datatypes/src/schema/raw.rs
+++ b/src/datatypes/src/schema/raw.rs
@@ -22,15 +22,38 @@ use crate::schema::{ColumnSchema, Schema, SchemaBuilder};
/// This struct only contains necessary data to recover the Schema.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct RawSchema {
+ /// Schema of columns.
pub column_schemas: Vec<ColumnSchema>,
+ /// Index of the timestamp column.
pub timestamp_index: Option<usize>,
+ /// Schema version.
pub version: u32,
}
+impl RawSchema {
+ /// Creates a new [RawSchema] from specific `column_schemas`.
+ ///
+ /// Sets [RawSchema::timestamp_index] to the first index of the timestamp
+ /// column. It doesn't check whether time index column is duplicate.
+ pub fn new(column_schemas: Vec<ColumnSchema>) -> RawSchema {
+ let timestamp_index = column_schemas
+ .iter()
+ .position(|column_schema| column_schema.is_time_index());
+
+ RawSchema {
+ column_schemas,
+ timestamp_index,
+ version: 0,
+ }
+ }
+}
+
impl TryFrom<RawSchema> for Schema {
type Error = Error;
fn try_from(raw: RawSchema) -> Result<Schema> {
+ // While building Schema, we don't trust the fields, such as timestamp_index,
+ // in RawSchema. We use SchemaBuilder to perform the validation.
SchemaBuilder::try_from(raw.column_schemas)?
.version(raw.version)
.build()
@@ -74,4 +97,33 @@ mod tests {
assert_eq!(schema, schema_new);
}
+
+ #[test]
+ fn test_new_raw_schema_with_time_index() {
+ let column_schemas = vec![
+ ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ )
+ .with_time_index(true),
+ ];
+ let schema = RawSchema::new(column_schemas);
+ assert_eq!(1, schema.timestamp_index.unwrap());
+ }
+
+ #[test]
+ fn test_new_raw_schema_without_time_index() {
+ let column_schemas = vec![
+ ColumnSchema::new("col1", ConcreteDataType::int32_datatype(), true),
+ ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ ];
+ let schema = RawSchema::new(column_schemas);
+ assert!(schema.timestamp_index.is_none());
+ }
}
diff --git a/src/mito/src/engine.rs b/src/mito/src/engine.rs
index 73cb27a17efa..91278eec6ce4 100644
--- a/src/mito/src/engine.rs
+++ b/src/mito/src/engine.rs
@@ -19,7 +19,7 @@ use async_trait::async_trait;
use common_error::ext::BoxedError;
use common_telemetry::tracing::log::info;
use common_telemetry::{debug, logging};
-use datatypes::schema::SchemaRef;
+use datatypes::schema::{Schema, SchemaRef};
use object_store::ObjectStore;
use snafu::{ensure, OptionExt, ResultExt};
use store_api::storage::{
@@ -42,8 +42,8 @@ use tokio::sync::Mutex;
use crate::config::EngineConfig;
use crate::error::{
self, BuildColumnDescriptorSnafu, BuildColumnFamilyDescriptorSnafu, BuildRegionDescriptorSnafu,
- BuildRowKeyDescriptorSnafu, InvalidPrimaryKeySnafu, MissingTimestampIndexSnafu,
- RegionNotFoundSnafu, Result, TableExistsSnafu,
+ BuildRowKeyDescriptorSnafu, InvalidPrimaryKeySnafu, InvalidRawSchemaSnafu,
+ MissingTimestampIndexSnafu, RegionNotFoundSnafu, Result, TableExistsSnafu,
};
use crate::manifest::TableManifest;
use crate::table::MitoTable;
@@ -274,7 +274,7 @@ fn build_column_family(
fn validate_create_table_request(request: &CreateTableRequest) -> Result<()> {
let ts_index = request
.schema
- .timestamp_index()
+ .timestamp_index
.context(MissingTimestampIndexSnafu {
table_name: &request.table_name,
})?;
@@ -320,18 +320,19 @@ impl<S: StorageEngine> MitoEngineInner<S> {
}
}
- let table_schema = &request.schema;
+ let table_schema =
+ Arc::new(Schema::try_from(request.schema).context(InvalidRawSchemaSnafu)?);
let primary_key_indices = &request.primary_key_indices;
let (next_column_id, default_cf) = build_column_family(
INIT_COLUMN_ID,
table_name,
- table_schema,
+ &table_schema,
primary_key_indices,
)?;
let (next_column_id, row_key) = build_row_key_desc(
next_column_id,
table_name,
- table_schema,
+ &table_schema,
primary_key_indices,
)?;
@@ -378,7 +379,7 @@ impl<S: StorageEngine> MitoEngineInner<S> {
}
let table_meta = TableMetaBuilder::default()
- .schema(request.schema)
+ .schema(table_schema)
.engine(MITO_ENGINE)
.next_column_id(next_column_id)
.primary_key_indices(request.primary_key_indices.clone())
@@ -599,7 +600,7 @@ mod tests {
use common_query::physical_plan::SessionContext;
use common_recordbatch::util;
use datatypes::prelude::ConcreteDataType;
- use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, SchemaBuilder};
+ use datatypes::schema::{ColumnDefaultConstraint, ColumnSchema, RawSchema};
use datatypes::value::Value;
use datatypes::vectors::{
Float64Vector, Int32Vector, StringVector, TimestampMillisecondVector, VectorRef,
@@ -635,12 +636,7 @@ mod tests {
.with_time_index(true),
];
- let schema = Arc::new(
- SchemaBuilder::try_from(column_schemas)
- .unwrap()
- .build()
- .expect("ts must be timestamp column"),
- );
+ let schema = RawSchema::new(column_schemas);
let (dir, object_store) =
test_util::new_test_object_store("test_insert_with_column_default_constraint").await;
@@ -665,7 +661,7 @@ mod tests {
schema_name: "public".to_string(),
table_name: table_name.to_string(),
desc: Some("a test table".to_string()),
- schema: schema.clone(),
+ schema,
create_if_not_exists: true,
primary_key_indices: Vec::default(),
table_options: HashMap::new(),
@@ -770,12 +766,7 @@ mod tests {
.with_time_index(true),
];
- let schema = Arc::new(
- SchemaBuilder::try_from(column_schemas)
- .unwrap()
- .build()
- .expect("ts must be timestamp column"),
- );
+ let schema = RawSchema::new(column_schemas);
let mut request = CreateTableRequest {
id: 1,
@@ -944,7 +935,7 @@ mod tests {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: table_info.name.to_string(),
- schema: table_info.meta.schema.clone(),
+ schema: RawSchema::from(&*table_info.meta.schema),
create_if_not_exists: true,
desc: None,
primary_key_indices: Vec::default(),
@@ -961,7 +952,7 @@ mod tests {
catalog_name: "greptime".to_string(),
schema_name: "public".to_string(),
table_name: table_info.name.to_string(),
- schema: table_info.meta.schema.clone(),
+ schema: RawSchema::from(&*table_info.meta.schema),
create_if_not_exists: false,
desc: None,
primary_key_indices: Vec::default(),
@@ -1170,7 +1161,7 @@ mod tests {
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: another_name.to_string(),
desc: Some("another test table".to_string()),
- schema: Arc::new(schema_for_test()),
+ schema: RawSchema::from(&schema_for_test()),
region_numbers: vec![0],
primary_key_indices: vec![0],
create_if_not_exists: true,
@@ -1253,7 +1244,7 @@ mod tests {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_info.name.to_string(),
- schema: table_info.meta.schema.clone(),
+ schema: RawSchema::from(&*table_info.meta.schema),
create_if_not_exists: true,
desc: None,
primary_key_indices: Vec::default(),
@@ -1286,7 +1277,7 @@ mod tests {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
schema_name: DEFAULT_SCHEMA_NAME.to_string(),
table_name: table_info.name.to_string(),
- schema: table_info.meta.schema.clone(),
+ schema: RawSchema::from(&*table_info.meta.schema),
create_if_not_exists: false,
desc: None,
primary_key_indices: Vec::default(),
diff --git a/src/mito/src/error.rs b/src/mito/src/error.rs
index ee1aa15265e4..03b7266cecf8 100644
--- a/src/mito/src/error.rs
+++ b/src/mito/src/error.rs
@@ -184,6 +184,9 @@ pub enum Error {
region_name: String,
backtrace: Backtrace,
},
+
+ #[snafu(display("Invalid schema, source: {}", source))]
+ InvalidRawSchema { source: datatypes::error::Error },
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -207,7 +210,8 @@ impl ErrorExt for Error {
| ProjectedColumnNotFound { .. }
| InvalidPrimaryKey { .. }
| MissingTimestampIndex { .. }
- | TableNotFound { .. } => StatusCode::InvalidArguments,
+ | TableNotFound { .. }
+ | InvalidRawSchema { .. } => StatusCode::InvalidArguments,
TableInfoNotFound { .. } | ConvertRaw { .. } => StatusCode::Unexpected,
diff --git a/src/mito/src/table/test_util.rs b/src/mito/src/table/test_util.rs
index 4b4681f15cf5..3a31f71965b3 100644
--- a/src/mito/src/table/test_util.rs
+++ b/src/mito/src/table/test_util.rs
@@ -18,7 +18,7 @@ use std::sync::Arc;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use datatypes::prelude::ConcreteDataType;
-use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder, SchemaRef};
+use datatypes::schema::{ColumnSchema, RawSchema, Schema, SchemaBuilder, SchemaRef};
use datatypes::vectors::VectorRef;
use log_store::NoopLogStore;
use object_store::services::fs::Builder;
@@ -109,7 +109,7 @@ fn new_create_request(schema: SchemaRef) -> CreateTableRequest {
schema_name: "public".to_string(),
table_name: TABLE_NAME.to_string(),
desc: Some("a test table".to_string()),
- schema,
+ schema: RawSchema::from(&*schema),
region_numbers: vec![0],
create_if_not_exists: true,
primary_key_indices: vec![0],
diff --git a/src/script/src/table.rs b/src/script/src/table.rs
index 7fe267eeab06..0f66fc1a234c 100644
--- a/src/script/src/table.rs
+++ b/src/script/src/table.rs
@@ -24,7 +24,7 @@ use common_recordbatch::util as record_util;
use common_telemetry::logging;
use common_time::util;
use datatypes::prelude::{ConcreteDataType, ScalarVector};
-use datatypes::schema::{ColumnSchema, Schema, SchemaBuilder};
+use datatypes::schema::{ColumnSchema, RawSchema};
use datatypes::vectors::{StringVector, TimestampMillisecondVector, Vector, VectorRef};
use query::parser::QueryLanguageParser;
use query::QueryEngineRef;
@@ -50,7 +50,7 @@ impl ScriptsTable {
catalog_manager: CatalogManagerRef,
query_engine: QueryEngineRef,
) -> Result<Self> {
- let schema = Arc::new(build_scripts_schema());
+ let schema = build_scripts_schema();
// TODO(dennis): we put scripts table into default catalog and schema.
// maybe put into system catalog?
let request = CreateTableRequest {
@@ -202,7 +202,7 @@ impl ScriptsTable {
}
/// Build scripts table
-fn build_scripts_schema() -> Schema {
+fn build_scripts_schema() -> RawSchema {
let cols = vec![
ColumnSchema::new(
"schema".to_string(),
@@ -242,6 +242,5 @@ fn build_scripts_schema() -> Schema {
),
];
- // Schema is always valid here
- SchemaBuilder::try_from(cols).unwrap().build().unwrap()
+ RawSchema::new(cols)
}
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index 27ff4375ed86..f505f65c632b 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -16,7 +16,7 @@
use std::collections::HashMap;
use datatypes::prelude::VectorRef;
-use datatypes::schema::{ColumnSchema, SchemaRef};
+use datatypes::schema::{ColumnSchema, RawSchema};
use store_api::storage::RegionNumber;
use crate::metadata::TableId;
@@ -45,7 +45,7 @@ pub struct CreateTableRequest {
pub schema_name: String,
pub table_name: String,
pub desc: Option<String>,
- pub schema: SchemaRef,
+ pub schema: RawSchema,
pub region_numbers: Vec<u32>,
pub primary_key_indices: Vec<usize>,
pub create_if_not_exists: bool,
diff --git a/src/table/src/test_util/empty_table.rs b/src/table/src/test_util/empty_table.rs
index c0dfcb582199..7fd81682355a 100644
--- a/src/table/src/test_util/empty_table.rs
+++ b/src/table/src/test_util/empty_table.rs
@@ -29,8 +29,9 @@ pub struct EmptyTable {
impl EmptyTable {
pub fn new(req: CreateTableRequest) -> Self {
+ let schema = Arc::new(req.schema.try_into().unwrap());
let table_meta = TableMetaBuilder::default()
- .schema(req.schema)
+ .schema(schema)
.primary_key_indices(req.primary_key_indices)
.next_column_id(0)
.options(req.table_options)
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 069a87b589a2..4679373cf9a9 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -30,7 +30,7 @@ use datanode::error::{CreateTableSnafu, Result};
use datanode::instance::{Instance, InstanceRef};
use datanode::sql::SqlHandler;
use datatypes::data_type::ConcreteDataType;
-use datatypes::schema::{ColumnSchema, SchemaBuilder};
+use datatypes::schema::{ColumnSchema, RawSchema};
use frontend::instance::Instance as FeInstance;
use object_store::backend::s3;
use object_store::services::oss;
@@ -236,12 +236,7 @@ pub async fn create_test_table(
schema_name: "public".to_string(),
table_name: table_name.to_string(),
desc: Some(" a test table".to_string()),
- schema: Arc::new(
- SchemaBuilder::try_from(column_schemas)
- .unwrap()
- .build()
- .expect("ts is expected to be timestamp column"),
- ),
+ schema: RawSchema::new(column_schemas),
create_if_not_exists: true,
primary_key_indices: vec![0], // "host" is in primary keys
table_options: HashMap::new(),
|
refactor
|
changes CreateTableRequest::schema to RawSchema (#1018)
|
6f4779b4744289ce3646076464c7e65edcc51b23
|
2023-09-13 14:40:10
|
JeremyHi
|
feat: engine name in heartbeat (#2377)
| false
|
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 927510408703..07c42d6c04de 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -438,9 +438,7 @@ impl Datanode {
Mode::Standalone => None,
};
let heartbeat_task = match opts.mode {
- Mode::Distributed => {
- Some(HeartbeatTask::try_new(&opts, Some(region_server.clone())).await?)
- }
+ Mode::Distributed => Some(HeartbeatTask::try_new(&opts, region_server.clone()).await?),
Mode::Standalone => None,
};
let greptimedb_telemetry_task = get_greptimedb_telemetry_task(
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index 72c00192d857..92843e7630aa 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -62,11 +62,7 @@ impl Drop for HeartbeatTask {
impl HeartbeatTask {
/// Create a new heartbeat task instance.
- pub async fn try_new(
- opts: &DatanodeOptions,
- // TODO: remove optional
- region_server: Option<RegionServer>,
- ) -> Result<Self> {
+ pub async fn try_new(opts: &DatanodeOptions, region_server: RegionServer) -> Result<Self> {
let meta_client = new_metasrv_client(
opts.node_id.context(MissingNodeIdSnafu)?,
opts.meta_client_options
@@ -75,8 +71,6 @@ impl HeartbeatTask {
)
.await?;
- let region_server = region_server.unwrap();
-
let region_alive_keeper = Arc::new(RegionAliveKeeper::new(
region_server.clone(),
opts.heartbeat.interval_millis,
@@ -258,13 +252,13 @@ impl HeartbeatTask {
}
async fn load_region_stats(region_server: &RegionServer) -> Vec<RegionStat> {
- let region_ids = region_server.opened_region_ids();
- region_ids
+ let regions = region_server.opened_regions();
+ regions
.into_iter()
- .map(|region_id| RegionStat {
- // TODO(ruihang): scratch more info
+ .map(|(region_id, engine)| RegionStat {
region_id: region_id.as_u64(),
- engine: "MitoEngine".to_string(),
+ engine,
+ // TODO(ruihang): scratch more info
..Default::default()
})
.collect::<Vec<_>>()
diff --git a/src/datanode/src/region_server.rs b/src/datanode/src/region_server.rs
index 7580a8307ffc..15c295933735 100644
--- a/src/datanode/src/region_server.rs
+++ b/src/datanode/src/region_server.rs
@@ -88,8 +88,12 @@ impl RegionServer {
self.inner.handle_read(request).await
}
- pub fn opened_region_ids(&self) -> Vec<RegionId> {
- self.inner.region_map.iter().map(|e| *e.key()).collect()
+ pub fn opened_regions(&self) -> Vec<(RegionId, String)> {
+ self.inner
+ .region_map
+ .iter()
+ .map(|e| (*e.key(), e.value().name().to_string()))
+ .collect()
}
pub fn runtime(&self) -> Arc<Runtime> {
|
feat
|
engine name in heartbeat (#2377)
|
aeca0d8e8a9241db1e807aeb75d15de77e999a90
|
2024-03-08 13:47:57
|
Eugene Tolbakov
|
feat(influxdb): add db query param support for v2 write api (#3445)
| false
|
diff --git a/src/servers/src/grpc/authorize.rs b/src/servers/src/grpc/authorize.rs
index 878a45f1693c..84e203d3730e 100644
--- a/src/servers/src/grpc/authorize.rs
+++ b/src/servers/src/grpc/authorize.rs
@@ -112,7 +112,7 @@ async fn do_auth<T>(
return Ok(());
};
- let (username, password) = extract_username_and_password(false, req)
+ let (username, password) = extract_username_and_password(req)
.map_err(|e| tonic::Status::invalid_argument(e.to_string()))?;
let id = auth::Identity::UserId(&username, None);
diff --git a/src/servers/src/http/authorize.rs b/src/servers/src/http/authorize.rs
index 843cdae183d3..135d1027308b 100644
--- a/src/servers/src/http/authorize.rs
+++ b/src/servers/src/http/authorize.rs
@@ -40,6 +40,7 @@ use crate::error::{
};
use crate::http::error_result::ErrorResponse;
use crate::http::HTTP_API_PREFIX;
+use crate::influxdb::{is_influxdb_request, is_influxdb_v2_request};
/// AuthState is a holder state for [`UserProviderRef`]
/// during [`check_http_auth`] function in axum's middleware
@@ -69,7 +70,6 @@ pub async fn inner_auth<B>(
let query_ctx = query_ctx_builder.build();
let need_auth = need_auth(&req);
- let is_influxdb = req.uri().path().contains("influxdb");
// 2. check if auth is needed
let user_provider = if let Some(user_provider) = user_provider.filter(|_| need_auth) {
@@ -81,14 +81,14 @@ pub async fn inner_auth<B>(
};
// 3. get username and pwd
- let (username, password) = match extract_username_and_password(is_influxdb, &req) {
+ let (username, password) = match extract_username_and_password(&req) {
Ok((username, password)) => (username, password),
Err(e) => {
warn!("extract username and password failed: {}", e);
crate::metrics::METRIC_AUTH_FAILURE
.with_label_values(&[e.status_code().as_ref()])
.inc();
- return Err(err_response(is_influxdb, e).into_response());
+ return Err(err_response(is_influxdb_request(&req), e).into_response());
}
};
@@ -112,7 +112,7 @@ pub async fn inner_auth<B>(
crate::metrics::METRIC_AUTH_FAILURE
.with_label_values(&[e.status_code().as_ref()])
.inc();
- Err(err_response(is_influxdb, e).into_response())
+ Err(err_response(is_influxdb_request(&req), e).into_response())
}
}
}
@@ -146,7 +146,11 @@ pub fn extract_catalog_and_schema<B>(request: &Request<B>) -> (&str, &str) {
.and_then(|header| header.to_str().ok())
.or_else(|| {
let query = request.uri().query().unwrap_or_default();
- extract_db_from_query(query)
+ if is_influxdb_v2_request(request) {
+ extract_db_from_query(query).or_else(|| extract_bucket_from_query(query))
+ } else {
+ extract_db_from_query(query)
+ }
})
.unwrap_or(DEFAULT_SCHEMA_NAME);
@@ -208,11 +212,8 @@ fn get_influxdb_credentials<B>(request: &Request<B>) -> Result<Option<(Username,
}
}
-pub fn extract_username_and_password<B>(
- is_influxdb: bool,
- request: &Request<B>,
-) -> Result<(Username, Password)> {
- Ok(if is_influxdb {
+pub fn extract_username_and_password<B>(request: &Request<B>) -> Result<(Username, Password)> {
+ Ok(if is_influxdb_request(request) {
// compatible with influxdb auth
get_influxdb_credentials(request)?.context(NotFoundInfluxAuthSnafu)?
} else {
@@ -290,15 +291,26 @@ fn need_auth<B>(req: &Request<B>) -> bool {
path.starts_with(HTTP_API_PREFIX)
}
-fn extract_db_from_query(query: &str) -> Option<&str> {
+fn extract_param_from_query<'a>(query: &'a str, param: &'a str) -> Option<&'a str> {
+ let prefix = format!("{}=", param);
for pair in query.split('&') {
- if let Some(db) = pair.strip_prefix("db=") {
- return if db.is_empty() { None } else { Some(db) };
+ if let Some(param) = pair.strip_prefix(&prefix) {
+ return if param.is_empty() { None } else { Some(param) };
}
}
None
}
+fn extract_db_from_query(query: &str) -> Option<&str> {
+ extract_param_from_query(query, "db")
+}
+
+/// InfluxDB v2 uses "bucket" instead of "db"
+/// https://docs.influxdata.com/influxdb/v1/tools/api/#apiv2write-http-endpoint
+fn extract_bucket_from_query(query: &str) -> Option<&str> {
+ extract_param_from_query(query, "bucket")
+}
+
fn extract_influxdb_user_from_query(query: &str) -> (Option<&str>, Option<&str>) {
let mut username = None;
let mut password = None;
@@ -422,10 +434,14 @@ mod tests {
assert_matches!(extract_db_from_query(""), None);
assert_matches!(extract_db_from_query("&"), None);
assert_matches!(extract_db_from_query("db="), None);
+ assert_matches!(extract_bucket_from_query("bucket="), None);
+ assert_matches!(extract_bucket_from_query("db=foo"), None);
assert_matches!(extract_db_from_query("db=foo"), Some("foo"));
+ assert_matches!(extract_bucket_from_query("bucket=foo"), Some("foo"));
assert_matches!(extract_db_from_query("name=bar"), None);
assert_matches!(extract_db_from_query("db=&name=bar"), None);
assert_matches!(extract_db_from_query("db=foo&name=bar"), Some("foo"));
+ assert_matches!(extract_bucket_from_query("db=foo&bucket=bar"), Some("bar"));
assert_matches!(extract_db_from_query("name=bar&db="), None);
assert_matches!(extract_db_from_query("name=bar&db=foo"), Some("foo"));
assert_matches!(extract_db_from_query("name=bar&db=&name=bar"), None);
diff --git a/src/servers/src/http/influxdb.rs b/src/servers/src/http/influxdb.rs
index 9ced1557bf0c..4b8a606b6835 100644
--- a/src/servers/src/http/influxdb.rs
+++ b/src/servers/src/http/influxdb.rs
@@ -67,9 +67,11 @@ pub async fn influxdb_write_v2(
Extension(query_ctx): Extension<QueryContextRef>,
lines: String,
) -> Result<impl IntoResponse> {
- let db = params
- .remove("bucket")
- .unwrap_or_else(|| DEFAULT_SCHEMA_NAME.to_string());
+ let db = match (params.remove("db"), params.remove("bucket")) {
+ (_, Some(bucket)) => bucket.clone(),
+ (Some(db), None) => db.clone(),
+ _ => DEFAULT_SCHEMA_NAME.to_string(),
+ };
let precision = params
.get("precision")
diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs
index 870573679370..36a3fce1faa1 100644
--- a/src/servers/src/influxdb.rs
+++ b/src/servers/src/influxdb.rs
@@ -15,14 +15,27 @@
use api::v1::value::ValueData;
use api::v1::{ColumnDataType, RowInsertRequests};
use common_grpc::writer::Precision;
+use hyper::Request;
use influxdb_line_protocol::{parse_lines, FieldValue};
use snafu::ResultExt;
use crate::error::{Error, InfluxdbLineProtocolSnafu};
use crate::row_writer::{self, MultiTableData};
-pub const INFLUXDB_TIMESTAMP_COLUMN_NAME: &str = "ts";
-pub const DEFAULT_TIME_PRECISION: Precision = Precision::Nanosecond;
+const INFLUXDB_API_PATH_NAME: &str = "influxdb";
+const INFLUXDB_API_V2_PATH_NAME: &str = "influxdb/api/v2";
+const INFLUXDB_TIMESTAMP_COLUMN_NAME: &str = "ts";
+const DEFAULT_TIME_PRECISION: Precision = Precision::Nanosecond;
+
+#[inline]
+pub(crate) fn is_influxdb_request<T>(req: &Request<T>) -> bool {
+ req.uri().path().contains(INFLUXDB_API_PATH_NAME)
+}
+
+#[inline]
+pub(crate) fn is_influxdb_v2_request<T>(req: &Request<T>) -> bool {
+ req.uri().path().contains(INFLUXDB_API_V2_PATH_NAME)
+}
#[derive(Debug)]
pub struct InfluxdbRequest {
diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs
index 5779e377e591..ff5f4c85afbd 100644
--- a/src/servers/tests/http/influxdb_test.rs
+++ b/src/servers/tests/http/influxdb_test.rs
@@ -26,6 +26,7 @@ use query::parser::PromQuery;
use query::plan::LogicalPlan;
use query::query_engine::DescribeResult;
use servers::error::{Error, Result};
+use servers::http::header::constants::GREPTIME_DB_HEADER_NAME;
use servers::http::header::GREPTIME_DB_HEADER_FORMAT;
use servers::http::{HttpOptions, HttpServerBuilder};
use servers::influxdb::InfluxdbRequest;
@@ -244,3 +245,122 @@ async fn test_influxdb_write() {
]
);
}
+
+#[tokio::test]
+async fn test_influxdb_write_v2() {
+ let (tx, mut rx) = mpsc::channel(100);
+ let tx = Arc::new(tx);
+
+ let public_db_app = make_test_app(tx.clone(), None);
+ let public_db_client = TestClient::new(public_db_app);
+
+ let result = public_db_client.get("/v1/influxdb/health").send().await;
+ assert_eq!(result.status(), 200);
+
+ let result = public_db_client.get("/v1/influxdb/ping").send().await;
+ assert_eq!(result.status(), 204);
+
+ // right request with no query string
+ let result = public_db_client
+ .post("/v1/influxdb/api/v2/write")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .header(http::header::AUTHORIZATION, "token greptime:greptime")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
+ // right request with `bucket` query string
+ let result = public_db_client
+ .post("/v1/influxdb/api/v2/write?bucket=public")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .header(http::header::AUTHORIZATION, "token greptime:greptime")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
+ // right request with `db` query string
+ let result = public_db_client
+ .post("/v1/influxdb/api/v2/write?db=public")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .header(http::header::AUTHORIZATION, "token greptime:greptime")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
+ // make new app for 'influxdb' database
+ let app = make_test_app(tx, Some("influxdb"));
+ let client = TestClient::new(app);
+
+ // right request with `bucket` query string
+ let result = client
+ .post("/v1/influxdb/api/v2/write?bucket=influxdb")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .header(http::header::AUTHORIZATION, "token greptime:greptime")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
+ // right request with `db` query string
+ let result = client
+ .post("/v1/influxdb/api/v2/write?db=influxdb")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .header(http::header::AUTHORIZATION, "token greptime:greptime")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
+ // right request with no query string, `public_db_client` is used otherwise the auth will fail
+ let result = public_db_client
+ .post("/v1/influxdb/api/v2/write")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .header(http::header::AUTHORIZATION, "token greptime:greptime")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
+ // right request with the 'greptime' header and 'db' query string
+ let result = client
+ .post("/v1/influxdb/api/v2/write?db=influxdbv2")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .header(http::header::AUTHORIZATION, "token greptime:greptime")
+ .header(GREPTIME_DB_HEADER_NAME, "influxdb")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
+ // right request with the 'greptime' header and 'bucket' query string
+ let result = client
+ .post("/v1/influxdb/api/v2/write?bucket=influxdbv2")
+ .body("monitor,host=host1 cpu=1.2 1664370459457010101")
+ .header(http::header::AUTHORIZATION, "token greptime:greptime")
+ .header(GREPTIME_DB_HEADER_NAME, "influxdb")
+ .send()
+ .await;
+ assert_eq!(result.status(), 204);
+ assert!(result.text().await.is_empty());
+
+ let mut metrics = vec![];
+ while let Ok(s) = rx.try_recv() {
+ metrics.push(s);
+ }
+ assert_eq!(
+ metrics,
+ vec![
+ ("public".to_string(), "monitor".to_string()),
+ ("public".to_string(), "monitor".to_string()),
+ ("public".to_string(), "monitor".to_string()),
+ ("influxdb".to_string(), "monitor".to_string()),
+ ("influxdb".to_string(), "monitor".to_string()),
+ ("public".to_string(), "monitor".to_string()),
+ ("influxdb".to_string(), "monitor".to_string()),
+ ("influxdb".to_string(), "monitor".to_string()),
+ ]
+ );
+}
|
feat
|
add db query param support for v2 write api (#3445)
|
8ffc078f88f5e265eb0821820593a1c4bd309c17
|
2023-01-03 12:39:49
|
Lei, HUANG
|
fix: license header (#815)
| false
|
diff --git a/benchmarks/src/bin/nyc-taxi.rs b/benchmarks/src/bin/nyc-taxi.rs
index 9ee82af92ec3..4d857aa56125 100644
--- a/benchmarks/src/bin/nyc-taxi.rs
+++ b/benchmarks/src/bin/nyc-taxi.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/build.rs b/src/api/build.rs
index 9187207a425b..b913d685b2c7 100644
--- a/src/api/build.rs
+++ b/src/api/build.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/src/error.rs b/src/api/src/error.rs
index 562ea5a81841..034f8561586a 100644
--- a/src/api/src/error.rs
+++ b/src/api/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/src/helper.rs b/src/api/src/helper.rs
index 72fb0c507bcd..8bd50bcad9cd 100644
--- a/src/api/src/helper.rs
+++ b/src/api/src/helper.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/src/lib.rs b/src/api/src/lib.rs
index d6c415d8cf9f..9624b5b96aec 100644
--- a/src/api/src/lib.rs
+++ b/src/api/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/src/prometheus.rs b/src/api/src/prometheus.rs
index 1ce8940a8821..01f60199455d 100644
--- a/src/api/src/prometheus.rs
+++ b/src/api/src/prometheus.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/src/result.rs b/src/api/src/result.rs
index e93e94945a3f..7b6c311bc3bb 100644
--- a/src/api/src/result.rs
+++ b/src/api/src/result.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/src/serde.rs b/src/api/src/serde.rs
index 1c1bc8beb7a9..ce7967cf98ed 100644
--- a/src/api/src/serde.rs
+++ b/src/api/src/serde.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/src/v1.rs b/src/api/src/v1.rs
index 636b6f2cf013..dcf7927edd89 100644
--- a/src/api/src/v1.rs
+++ b/src/api/src/v1.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/src/v1/column_def.rs b/src/api/src/v1/column_def.rs
index 828adfccf8c4..1c7a4fae8b44 100644
--- a/src/api/src/v1/column_def.rs
+++ b/src/api/src/v1/column_def.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/api/src/v1/meta.rs b/src/api/src/v1/meta.rs
index d2db34c5fc6d..dafe6f69c326 100644
--- a/src/api/src/v1/meta.rs
+++ b/src/api/src/v1/meta.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/error.rs b/src/catalog/src/error.rs
index f344ae3bb8a4..fa828b9f3356 100644
--- a/src/catalog/src/error.rs
+++ b/src/catalog/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/helper.rs b/src/catalog/src/helper.rs
index 0993e865f69f..3cccb11c0e36 100644
--- a/src/catalog/src/helper.rs
+++ b/src/catalog/src/helper.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/lib.rs b/src/catalog/src/lib.rs
index 21d8efe65f1b..a7c53ffe9e77 100644
--- a/src/catalog/src/lib.rs
+++ b/src/catalog/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/local.rs b/src/catalog/src/local.rs
index 42c24c152e93..11d7b47a945d 100644
--- a/src/catalog/src/local.rs
+++ b/src/catalog/src/local.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/local/manager.rs b/src/catalog/src/local/manager.rs
index 1a1f1a995a29..a7455dd516cf 100644
--- a/src/catalog/src/local/manager.rs
+++ b/src/catalog/src/local/manager.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/local/memory.rs b/src/catalog/src/local/memory.rs
index fb41058ad0aa..89c547b467d9 100644
--- a/src/catalog/src/local/memory.rs
+++ b/src/catalog/src/local/memory.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/remote.rs b/src/catalog/src/remote.rs
index 36a382b736c3..f66cc409635c 100644
--- a/src/catalog/src/remote.rs
+++ b/src/catalog/src/remote.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/remote/client.rs b/src/catalog/src/remote/client.rs
index ad41ef6f2ea6..be855a92aca4 100644
--- a/src/catalog/src/remote/client.rs
+++ b/src/catalog/src/remote/client.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/remote/manager.rs b/src/catalog/src/remote/manager.rs
index c18a079c84bd..5e963045b04c 100644
--- a/src/catalog/src/remote/manager.rs
+++ b/src/catalog/src/remote/manager.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/schema.rs b/src/catalog/src/schema.rs
index eae813d2f26e..3f91f8d81b12 100644
--- a/src/catalog/src/schema.rs
+++ b/src/catalog/src/schema.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/system.rs b/src/catalog/src/system.rs
index df39d3a5ab13..e1d4bbae0a5a 100644
--- a/src/catalog/src/system.rs
+++ b/src/catalog/src/system.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/src/tables.rs b/src/catalog/src/tables.rs
index 09e7779a166a..1485c51e1d7b 100644
--- a/src/catalog/src/tables.rs
+++ b/src/catalog/src/tables.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/tests/local_catalog_tests.rs b/src/catalog/tests/local_catalog_tests.rs
index e58722bac4c5..f40639ed4bc6 100644
--- a/src/catalog/tests/local_catalog_tests.rs
+++ b/src/catalog/tests/local_catalog_tests.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/tests/mock.rs b/src/catalog/tests/mock.rs
index 336f41ba0a2a..52c9bb97cb1c 100644
--- a/src/catalog/tests/mock.rs
+++ b/src/catalog/tests/mock.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/catalog/tests/remote_catalog_tests.rs b/src/catalog/tests/remote_catalog_tests.rs
index 9903b8ff8570..7df423111623 100644
--- a/src/catalog/tests/remote_catalog_tests.rs
+++ b/src/catalog/tests/remote_catalog_tests.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/client/examples/logical.rs b/src/client/examples/logical.rs
index e00c8c09845a..368104d52ac0 100644
--- a/src/client/examples/logical.rs
+++ b/src/client/examples/logical.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/client/src/client.rs b/src/client/src/client.rs
index ddc145b1beae..9cd053c746c2 100644
--- a/src/client/src/client.rs
+++ b/src/client/src/client.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index a5b024d344b7..e48041e0024c 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/client/src/error.rs b/src/client/src/error.rs
index fa607258bf0a..773862624eba 100644
--- a/src/client/src/error.rs
+++ b/src/client/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/client/src/lib.rs b/src/client/src/lib.rs
index 09080c640def..3e3246fdcbe5 100644
--- a/src/client/src/lib.rs
+++ b/src/client/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/client/src/load_balance.rs b/src/client/src/load_balance.rs
index 90b2fae33979..d2837883715a 100644
--- a/src/client/src/load_balance.rs
+++ b/src/client/src/load_balance.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/cmd/build.rs b/src/cmd/build.rs
index 8547bbd1d965..1a32b5376b0a 100644
--- a/src/cmd/build.rs
+++ b/src/cmd/build.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/cmd/src/bin/greptime.rs b/src/cmd/src/bin/greptime.rs
index 517459114253..6ee20faf4440 100644
--- a/src/cmd/src/bin/greptime.rs
+++ b/src/cmd/src/bin/greptime.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs
index 4afc1ea619eb..00fa25d83c25 100644
--- a/src/cmd/src/datanode.rs
+++ b/src/cmd/src/datanode.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index 14fe0a9c270e..1b996f1ff980 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 0563629ab28e..a8da249cca87 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/cmd/src/lib.rs b/src/cmd/src/lib.rs
index 8b5dc10641ad..61d694e4ae48 100644
--- a/src/cmd/src/lib.rs
+++ b/src/cmd/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/cmd/src/metasrv.rs b/src/cmd/src/metasrv.rs
index 3d9018527d06..22d4729f9017 100644
--- a/src/cmd/src/metasrv.rs
+++ b/src/cmd/src/metasrv.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 83809f0b4396..f8c17a2cbaf1 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/cmd/src/toml_loader.rs b/src/cmd/src/toml_loader.rs
index 8eb1b080b983..011171104a5e 100644
--- a/src/cmd/src/toml_loader.rs
+++ b/src/cmd/src/toml_loader.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/base/src/bit_vec.rs b/src/common/base/src/bit_vec.rs
index bdc1e4020eda..b2634da8f583 100644
--- a/src/common/base/src/bit_vec.rs
+++ b/src/common/base/src/bit_vec.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/base/src/buffer.rs b/src/common/base/src/buffer.rs
index 26303c6f8853..3d4de246b6e0 100644
--- a/src/common/base/src/buffer.rs
+++ b/src/common/base/src/buffer.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/base/src/bytes.rs b/src/common/base/src/bytes.rs
index 9f5cff5dd91f..b65f4b3443ac 100644
--- a/src/common/base/src/bytes.rs
+++ b/src/common/base/src/bytes.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/base/src/lib.rs b/src/common/base/src/lib.rs
index c3d8a047295b..c86c2bd472cd 100644
--- a/src/common/base/src/lib.rs
+++ b/src/common/base/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/base/tests/buffer_tests.rs b/src/common/base/tests/buffer_tests.rs
index 76d577618a60..2d03a4e36b25 100644
--- a/src/common/base/tests/buffer_tests.rs
+++ b/src/common/base/tests/buffer_tests.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index 118c53930b3f..a33b44f9389f 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/catalog/src/error.rs b/src/common/catalog/src/error.rs
index 0a4613b89b9a..a3fc0133a592 100644
--- a/src/common/catalog/src/error.rs
+++ b/src/common/catalog/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/catalog/src/lib.rs b/src/common/catalog/src/lib.rs
index 841420c21992..2fb83abedcbb 100644
--- a/src/common/catalog/src/lib.rs
+++ b/src/common/catalog/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/error/src/ext.rs b/src/common/error/src/ext.rs
index 9d2b3fa0ae64..aca7f9e82114 100644
--- a/src/common/error/src/ext.rs
+++ b/src/common/error/src/ext.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/error/src/format.rs b/src/common/error/src/format.rs
index 40c077ca3d5c..96816a2b3e3f 100644
--- a/src/common/error/src/format.rs
+++ b/src/common/error/src/format.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/error/src/lib.rs b/src/common/error/src/lib.rs
index 0b8542137af0..4d0d3a8fde46 100644
--- a/src/common/error/src/lib.rs
+++ b/src/common/error/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/error/src/mock.rs b/src/common/error/src/mock.rs
index e3165aefe072..281c386f8477 100644
--- a/src/common/error/src/mock.rs
+++ b/src/common/error/src/mock.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/error/src/status_code.rs b/src/common/error/src/status_code.rs
index 3e5fec37aca3..d26660cb25bc 100644
--- a/src/common/error/src/status_code.rs
+++ b/src/common/error/src/status_code.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function-macro/src/lib.rs b/src/common/function-macro/src/lib.rs
index da254c3c1633..5eb2c93b9cfe 100644
--- a/src/common/function-macro/src/lib.rs
+++ b/src/common/function-macro/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function-macro/tests/test_derive.rs b/src/common/function-macro/tests/test_derive.rs
index fb4e9bf1db17..f304edd4f280 100644
--- a/src/common/function-macro/tests/test_derive.rs
+++ b/src/common/function-macro/tests/test_derive.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/lib.rs b/src/common/function/src/lib.rs
index 8d15fe0b25c2..a971d5cceb0d 100644
--- a/src/common/function/src/lib.rs
+++ b/src/common/function/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars.rs b/src/common/function/src/scalars.rs
index e9499b215140..e6f7ed766c95 100644
--- a/src/common/function/src/scalars.rs
+++ b/src/common/function/src/scalars.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/aggregate.rs b/src/common/function/src/scalars/aggregate.rs
index f605fff2f2f6..829296498078 100644
--- a/src/common/function/src/scalars/aggregate.rs
+++ b/src/common/function/src/scalars/aggregate.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/aggregate/argmax.rs b/src/common/function/src/scalars/aggregate/argmax.rs
index d42d4550c65a..19c43ea8e0ef 100644
--- a/src/common/function/src/scalars/aggregate/argmax.rs
+++ b/src/common/function/src/scalars/aggregate/argmax.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/aggregate/argmin.rs b/src/common/function/src/scalars/aggregate/argmin.rs
index 5b9356128613..cba7a82b16f4 100644
--- a/src/common/function/src/scalars/aggregate/argmin.rs
+++ b/src/common/function/src/scalars/aggregate/argmin.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/aggregate/diff.rs b/src/common/function/src/scalars/aggregate/diff.rs
index 3f7ecc24004f..bee94add778e 100644
--- a/src/common/function/src/scalars/aggregate/diff.rs
+++ b/src/common/function/src/scalars/aggregate/diff.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/aggregate/mean.rs b/src/common/function/src/scalars/aggregate/mean.rs
index ce619bb2532a..35f6ae6d2655 100644
--- a/src/common/function/src/scalars/aggregate/mean.rs
+++ b/src/common/function/src/scalars/aggregate/mean.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/aggregate/percentile.rs b/src/common/function/src/scalars/aggregate/percentile.rs
index 1517f90e6282..5b10a4fcaac1 100644
--- a/src/common/function/src/scalars/aggregate/percentile.rs
+++ b/src/common/function/src/scalars/aggregate/percentile.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/aggregate/polyval.rs b/src/common/function/src/scalars/aggregate/polyval.rs
index 0a8fc818c5da..c5cbfbf51e0a 100644
--- a/src/common/function/src/scalars/aggregate/polyval.rs
+++ b/src/common/function/src/scalars/aggregate/polyval.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/aggregate/scipy_stats_norm_cdf.rs b/src/common/function/src/scalars/aggregate/scipy_stats_norm_cdf.rs
index caa07248a33b..c9a04b566585 100644
--- a/src/common/function/src/scalars/aggregate/scipy_stats_norm_cdf.rs
+++ b/src/common/function/src/scalars/aggregate/scipy_stats_norm_cdf.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs b/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs
index 186d59a89084..4efd59d42f1d 100644
--- a/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs
+++ b/src/common/function/src/scalars/aggregate/scipy_stats_norm_pdf.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/expression.rs b/src/common/function/src/scalars/expression.rs
index a66b0d04e573..363d9acb6475 100644
--- a/src/common/function/src/scalars/expression.rs
+++ b/src/common/function/src/scalars/expression.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/expression/binary.rs b/src/common/function/src/scalars/expression/binary.rs
index d1a9db8eb997..9ef67a2b0f3e 100644
--- a/src/common/function/src/scalars/expression/binary.rs
+++ b/src/common/function/src/scalars/expression/binary.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/expression/ctx.rs b/src/common/function/src/scalars/expression/ctx.rs
index c6735bd1d0d5..65844548a297 100644
--- a/src/common/function/src/scalars/expression/ctx.rs
+++ b/src/common/function/src/scalars/expression/ctx.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/expression/unary.rs b/src/common/function/src/scalars/expression/unary.rs
index 0862f711e1f4..3927adb327a9 100644
--- a/src/common/function/src/scalars/expression/unary.rs
+++ b/src/common/function/src/scalars/expression/unary.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/function.rs b/src/common/function/src/scalars/function.rs
index 6f70bca4a0d5..3225183bd47f 100644
--- a/src/common/function/src/scalars/function.rs
+++ b/src/common/function/src/scalars/function.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/function_registry.rs b/src/common/function/src/scalars/function_registry.rs
index ff8dc2338bae..d25341c7ab23 100644
--- a/src/common/function/src/scalars/function_registry.rs
+++ b/src/common/function/src/scalars/function_registry.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/math.rs b/src/common/function/src/scalars/math.rs
index 3934a7f3a244..f15d782bf79a 100644
--- a/src/common/function/src/scalars/math.rs
+++ b/src/common/function/src/scalars/math.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/math/pow.rs b/src/common/function/src/scalars/math/pow.rs
index 6a4e1937ddc3..c6a554d5964f 100644
--- a/src/common/function/src/scalars/math/pow.rs
+++ b/src/common/function/src/scalars/math/pow.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/math/rate.rs b/src/common/function/src/scalars/math/rate.rs
index ad03485a36aa..9d668d39d5d9 100644
--- a/src/common/function/src/scalars/math/rate.rs
+++ b/src/common/function/src/scalars/math/rate.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/numpy.rs b/src/common/function/src/scalars/numpy.rs
index ed8d9b6f3009..67c42ffb56c6 100644
--- a/src/common/function/src/scalars/numpy.rs
+++ b/src/common/function/src/scalars/numpy.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/numpy/clip.rs b/src/common/function/src/scalars/numpy/clip.rs
index 888a080f3fcf..a8d098e5cd61 100644
--- a/src/common/function/src/scalars/numpy/clip.rs
+++ b/src/common/function/src/scalars/numpy/clip.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/numpy/interp.rs b/src/common/function/src/scalars/numpy/interp.rs
index c4dbadc99914..9d20fc3f9c33 100644
--- a/src/common/function/src/scalars/numpy/interp.rs
+++ b/src/common/function/src/scalars/numpy/interp.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/test.rs b/src/common/function/src/scalars/test.rs
index 8e81d1f025ac..1ffef23be545 100644
--- a/src/common/function/src/scalars/test.rs
+++ b/src/common/function/src/scalars/test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/timestamp.rs b/src/common/function/src/scalars/timestamp.rs
index f326aec283db..bc4662f08e91 100644
--- a/src/common/function/src/scalars/timestamp.rs
+++ b/src/common/function/src/scalars/timestamp.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/timestamp/from_unixtime.rs b/src/common/function/src/scalars/timestamp/from_unixtime.rs
index c8adc01f8c6e..b38fbb6749d8 100644
--- a/src/common/function/src/scalars/timestamp/from_unixtime.rs
+++ b/src/common/function/src/scalars/timestamp/from_unixtime.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/function/src/scalars/udf.rs b/src/common/function/src/scalars/udf.rs
index 38812f695e70..75a61975db61 100644
--- a/src/common/function/src/scalars/udf.rs
+++ b/src/common/function/src/scalars/udf.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc-expr/src/alter.rs b/src/common/grpc-expr/src/alter.rs
index 45c9e710d1d7..68ebe3f97bf1 100644
--- a/src/common/grpc-expr/src/alter.rs
+++ b/src/common/grpc-expr/src/alter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc-expr/src/error.rs b/src/common/grpc-expr/src/error.rs
index dc0df10c464a..5dbf9a2c12e8 100644
--- a/src/common/grpc-expr/src/error.rs
+++ b/src/common/grpc-expr/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc-expr/src/insert.rs b/src/common/grpc-expr/src/insert.rs
index 23755fec2618..d383740e6ae3 100644
--- a/src/common/grpc-expr/src/insert.rs
+++ b/src/common/grpc-expr/src/insert.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc-expr/src/lib.rs b/src/common/grpc-expr/src/lib.rs
index ab03627b254b..b237290fa335 100644
--- a/src/common/grpc-expr/src/lib.rs
+++ b/src/common/grpc-expr/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc/benches/bench_main.rs b/src/common/grpc/benches/bench_main.rs
index 3d29e5ee2e92..b67e1817a58e 100644
--- a/src/common/grpc/benches/bench_main.rs
+++ b/src/common/grpc/benches/bench_main.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc/benches/channel_manager.rs b/src/common/grpc/benches/channel_manager.rs
index 1b2a917b5693..3ba2269b2fc2 100644
--- a/src/common/grpc/benches/channel_manager.rs
+++ b/src/common/grpc/benches/channel_manager.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc/src/channel_manager.rs b/src/common/grpc/src/channel_manager.rs
index 42f5dea47df4..a78bf608b664 100644
--- a/src/common/grpc/src/channel_manager.rs
+++ b/src/common/grpc/src/channel_manager.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc/src/error.rs b/src/common/grpc/src/error.rs
index 55594627fa96..c26e44db3f66 100644
--- a/src/common/grpc/src/error.rs
+++ b/src/common/grpc/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc/src/flight.rs b/src/common/grpc/src/flight.rs
index 76b274dc0427..29d74174b702 100644
--- a/src/common/grpc/src/flight.rs
+++ b/src/common/grpc/src/flight.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc/src/lib.rs b/src/common/grpc/src/lib.rs
index 944f999df166..948ff43c2905 100644
--- a/src/common/grpc/src/lib.rs
+++ b/src/common/grpc/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc/src/select.rs b/src/common/grpc/src/select.rs
index 8b6730442b4d..010630b6357d 100644
--- a/src/common/grpc/src/select.rs
+++ b/src/common/grpc/src/select.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/grpc/src/writer.rs b/src/common/grpc/src/writer.rs
index d05a2908e179..c38ab72af763 100644
--- a/src/common/grpc/src/writer.rs
+++ b/src/common/grpc/src/writer.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/columnar_value.rs b/src/common/query/src/columnar_value.rs
index 12f3815bd03d..4be676cc9259 100644
--- a/src/common/query/src/columnar_value.rs
+++ b/src/common/query/src/columnar_value.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/error.rs b/src/common/query/src/error.rs
index 25c169baa5b2..553ccd691995 100644
--- a/src/common/query/src/error.rs
+++ b/src/common/query/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/function.rs b/src/common/query/src/function.rs
index cde2e25f1490..6eb683c797ae 100644
--- a/src/common/query/src/function.rs
+++ b/src/common/query/src/function.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/lib.rs b/src/common/query/src/lib.rs
index 06b88fcbf8d1..65ec955e8a21 100644
--- a/src/common/query/src/lib.rs
+++ b/src/common/query/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/logical_plan.rs b/src/common/query/src/logical_plan.rs
index a0df518ce7f2..809d033bafb7 100644
--- a/src/common/query/src/logical_plan.rs
+++ b/src/common/query/src/logical_plan.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/logical_plan/accumulator.rs b/src/common/query/src/logical_plan/accumulator.rs
index 773b5192be18..694d46074de9 100644
--- a/src/common/query/src/logical_plan/accumulator.rs
+++ b/src/common/query/src/logical_plan/accumulator.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/logical_plan/expr.rs b/src/common/query/src/logical_plan/expr.rs
index cc8aa1bea330..0af6aa39229c 100644
--- a/src/common/query/src/logical_plan/expr.rs
+++ b/src/common/query/src/logical_plan/expr.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/logical_plan/udaf.rs b/src/common/query/src/logical_plan/udaf.rs
index 1f3fb26a9824..3fe2ddc62b5a 100644
--- a/src/common/query/src/logical_plan/udaf.rs
+++ b/src/common/query/src/logical_plan/udaf.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/logical_plan/udf.rs b/src/common/query/src/logical_plan/udf.rs
index 61a3653f8127..6a48a7b97ff9 100644
--- a/src/common/query/src/logical_plan/udf.rs
+++ b/src/common/query/src/logical_plan/udf.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/physical_plan.rs b/src/common/query/src/physical_plan.rs
index 35b73be4d99d..22e2b48bec24 100644
--- a/src/common/query/src/physical_plan.rs
+++ b/src/common/query/src/physical_plan.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/prelude.rs b/src/common/query/src/prelude.rs
index e1af6f0c418b..e65586e669e5 100644
--- a/src/common/query/src/prelude.rs
+++ b/src/common/query/src/prelude.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/query/src/signature.rs b/src/common/query/src/signature.rs
index 1d57ee7992cd..9e92a10e1730 100644
--- a/src/common/query/src/signature.rs
+++ b/src/common/query/src/signature.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/recordbatch/src/adapter.rs b/src/common/recordbatch/src/adapter.rs
index 14b8fba0dd10..0aa6f2a211ee 100644
--- a/src/common/recordbatch/src/adapter.rs
+++ b/src/common/recordbatch/src/adapter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/recordbatch/src/error.rs b/src/common/recordbatch/src/error.rs
index c77e2f3f48d6..07fd5a1795e1 100644
--- a/src/common/recordbatch/src/error.rs
+++ b/src/common/recordbatch/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/recordbatch/src/lib.rs b/src/common/recordbatch/src/lib.rs
index 4fcb69656d70..656f265b2b0c 100644
--- a/src/common/recordbatch/src/lib.rs
+++ b/src/common/recordbatch/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/recordbatch/src/recordbatch.rs b/src/common/recordbatch/src/recordbatch.rs
index 863f1107ce8c..16a51dbbc634 100644
--- a/src/common/recordbatch/src/recordbatch.rs
+++ b/src/common/recordbatch/src/recordbatch.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/recordbatch/src/util.rs b/src/common/recordbatch/src/util.rs
index 4b2f1a67c84d..8dcb31c8e132 100644
--- a/src/common/recordbatch/src/util.rs
+++ b/src/common/recordbatch/src/util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/runtime/src/error.rs b/src/common/runtime/src/error.rs
index cbb561c3bb92..0a4c4d07307a 100644
--- a/src/common/runtime/src/error.rs
+++ b/src/common/runtime/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/runtime/src/global.rs b/src/common/runtime/src/global.rs
index 10fe081e9cd8..98f9e9917b73 100644
--- a/src/common/runtime/src/global.rs
+++ b/src/common/runtime/src/global.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/runtime/src/lib.rs b/src/common/runtime/src/lib.rs
index 539705f40114..b7bed53c7754 100644
--- a/src/common/runtime/src/lib.rs
+++ b/src/common/runtime/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/runtime/src/metric.rs b/src/common/runtime/src/metric.rs
index 88ba1e303b79..c36da98162e5 100644
--- a/src/common/runtime/src/metric.rs
+++ b/src/common/runtime/src/metric.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/runtime/src/runtime.rs b/src/common/runtime/src/runtime.rs
index 4a306de6490c..a16c4c4c3c53 100644
--- a/src/common/runtime/src/runtime.rs
+++ b/src/common/runtime/src/runtime.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/substrait/src/context.rs b/src/common/substrait/src/context.rs
index af4a07b788d9..fb7e90cfffd9 100644
--- a/src/common/substrait/src/context.rs
+++ b/src/common/substrait/src/context.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/substrait/src/df_expr.rs b/src/common/substrait/src/df_expr.rs
index c0b5e260d161..148bafc3e1bb 100644
--- a/src/common/substrait/src/df_expr.rs
+++ b/src/common/substrait/src/df_expr.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/substrait/src/df_logical.rs b/src/common/substrait/src/df_logical.rs
index dfcc3a33f418..e605af151f85 100644
--- a/src/common/substrait/src/df_logical.rs
+++ b/src/common/substrait/src/df_logical.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/substrait/src/error.rs b/src/common/substrait/src/error.rs
index 4455e9231c17..f6875812bcb6 100644
--- a/src/common/substrait/src/error.rs
+++ b/src/common/substrait/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/substrait/src/lib.rs b/src/common/substrait/src/lib.rs
index 04c5e8277199..e340d4da6f7f 100644
--- a/src/common/substrait/src/lib.rs
+++ b/src/common/substrait/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/substrait/src/schema.rs b/src/common/substrait/src/schema.rs
index 623a4e74164f..0235263507b8 100644
--- a/src/common/substrait/src/schema.rs
+++ b/src/common/substrait/src/schema.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/substrait/src/types.rs b/src/common/substrait/src/types.rs
index 31de2a16a19a..8834acac36b3 100644
--- a/src/common/substrait/src/types.rs
+++ b/src/common/substrait/src/types.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/telemetry/src/lib.rs b/src/common/telemetry/src/lib.rs
index e8e92cfe6fd1..e91c77175145 100644
--- a/src/common/telemetry/src/lib.rs
+++ b/src/common/telemetry/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/telemetry/src/logging.rs b/src/common/telemetry/src/logging.rs
index 6407e04dfa1f..ad2d1957bdaf 100644
--- a/src/common/telemetry/src/logging.rs
+++ b/src/common/telemetry/src/logging.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/telemetry/src/macros.rs b/src/common/telemetry/src/macros.rs
index d2be6b9e0275..dcae8bd14ded 100644
--- a/src/common/telemetry/src/macros.rs
+++ b/src/common/telemetry/src/macros.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/telemetry/src/metric.rs b/src/common/telemetry/src/metric.rs
index d0cbd537eefa..f401e2ab3b13 100644
--- a/src/common/telemetry/src/metric.rs
+++ b/src/common/telemetry/src/metric.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/telemetry/src/panic_hook.rs b/src/common/telemetry/src/panic_hook.rs
index ef2c2e639b12..bd665c73e2e6 100644
--- a/src/common/telemetry/src/panic_hook.rs
+++ b/src/common/telemetry/src/panic_hook.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/time/src/date.rs b/src/common/time/src/date.rs
index b12eb9f50da7..e6057c81b04e 100644
--- a/src/common/time/src/date.rs
+++ b/src/common/time/src/date.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/time/src/datetime.rs b/src/common/time/src/datetime.rs
index 73d465babed5..fba96b233ad1 100644
--- a/src/common/time/src/datetime.rs
+++ b/src/common/time/src/datetime.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/time/src/error.rs b/src/common/time/src/error.rs
index 73e3af577324..a6d722c480cb 100644
--- a/src/common/time/src/error.rs
+++ b/src/common/time/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/time/src/lib.rs b/src/common/time/src/lib.rs
index 98788a35c37d..fdc9033bed98 100644
--- a/src/common/time/src/lib.rs
+++ b/src/common/time/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/time/src/range.rs b/src/common/time/src/range.rs
index 6c72fdf5b675..ef1935caf5e9 100644
--- a/src/common/time/src/range.rs
+++ b/src/common/time/src/range.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/time/src/timestamp.rs b/src/common/time/src/timestamp.rs
index b3de23d01d70..2066dcb31642 100644
--- a/src/common/time/src/timestamp.rs
+++ b/src/common/time/src/timestamp.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/time/src/timestamp_millis.rs b/src/common/time/src/timestamp_millis.rs
index d3da8bd58eca..1968e2138461 100644
--- a/src/common/time/src/timestamp_millis.rs
+++ b/src/common/time/src/timestamp_millis.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/common/time/src/util.rs b/src/common/time/src/util.rs
index 1917ce3456a2..95bc6a2a64ad 100644
--- a/src/common/time/src/util.rs
+++ b/src/common/time/src/util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index ccc8b0d3c6f1..b5f15592da82 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/error.rs b/src/datanode/src/error.rs
index 4e1139a67837..72c70f085d0b 100644
--- a/src/datanode/src/error.rs
+++ b/src/datanode/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/heartbeat.rs b/src/datanode/src/heartbeat.rs
index 37431574ebdb..bf17ef67ed43 100644
--- a/src/datanode/src/heartbeat.rs
+++ b/src/datanode/src/heartbeat.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/instance.rs b/src/datanode/src/instance.rs
index c6d79457c214..e12b1c8bee58 100644
--- a/src/datanode/src/instance.rs
+++ b/src/datanode/src/instance.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/instance/flight.rs b/src/datanode/src/instance/flight.rs
index a6bcd6fb9236..59158a161ad0 100644
--- a/src/datanode/src/instance/flight.rs
+++ b/src/datanode/src/instance/flight.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/instance/flight/stream.rs b/src/datanode/src/instance/flight/stream.rs
index 5e86ca1d2c0e..d0a04a3962b2 100644
--- a/src/datanode/src/instance/flight/stream.rs
+++ b/src/datanode/src/instance/flight/stream.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/instance/grpc.rs b/src/datanode/src/instance/grpc.rs
index 5bf2726823ac..1d88d8a01a11 100644
--- a/src/datanode/src/instance/grpc.rs
+++ b/src/datanode/src/instance/grpc.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/instance/script.rs b/src/datanode/src/instance/script.rs
index c60a61567795..140f1bcca577 100644
--- a/src/datanode/src/instance/script.rs
+++ b/src/datanode/src/instance/script.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/instance/sql.rs b/src/datanode/src/instance/sql.rs
index f238a0374557..c7d49d8d56ab 100644
--- a/src/datanode/src/instance/sql.rs
+++ b/src/datanode/src/instance/sql.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/lib.rs b/src/datanode/src/lib.rs
index 3e1aa92a76c5..b88b240325c6 100644
--- a/src/datanode/src/lib.rs
+++ b/src/datanode/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/metric.rs b/src/datanode/src/metric.rs
index 97c7197bcfc6..22cb05b2c5ff 100644
--- a/src/datanode/src/metric.rs
+++ b/src/datanode/src/metric.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/mock.rs b/src/datanode/src/mock.rs
index cff79afad118..2417150e5e01 100644
--- a/src/datanode/src/mock.rs
+++ b/src/datanode/src/mock.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/script.rs b/src/datanode/src/script.rs
index f8ebea4e1eb6..1a284da16d66 100644
--- a/src/datanode/src/script.rs
+++ b/src/datanode/src/script.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/server.rs b/src/datanode/src/server.rs
index 6cf285110d18..cdfec1f4acfc 100644
--- a/src/datanode/src/server.rs
+++ b/src/datanode/src/server.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/server/grpc.rs b/src/datanode/src/server/grpc.rs
index 6de572d1f0e5..73ebb4f7ee6e 100644
--- a/src/datanode/src/server/grpc.rs
+++ b/src/datanode/src/server/grpc.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/sql.rs b/src/datanode/src/sql.rs
index aa41e8a25536..52ccc406a0ef 100644
--- a/src/datanode/src/sql.rs
+++ b/src/datanode/src/sql.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/sql/alter.rs b/src/datanode/src/sql/alter.rs
index 38b3a24b0061..92e41894685d 100644
--- a/src/datanode/src/sql/alter.rs
+++ b/src/datanode/src/sql/alter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/sql/create.rs b/src/datanode/src/sql/create.rs
index 7e83907e1b9b..873ba1f0b54c 100644
--- a/src/datanode/src/sql/create.rs
+++ b/src/datanode/src/sql/create.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/sql/drop_table.rs b/src/datanode/src/sql/drop_table.rs
index 4a56b669c9d4..93b7548d5f28 100644
--- a/src/datanode/src/sql/drop_table.rs
+++ b/src/datanode/src/sql/drop_table.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/sql/insert.rs b/src/datanode/src/sql/insert.rs
index 6c99b7172921..20b913ebaaf2 100644
--- a/src/datanode/src/sql/insert.rs
+++ b/src/datanode/src/sql/insert.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/tests.rs b/src/datanode/src/tests.rs
index 8c460a53fd89..030ce4dbaadd 100644
--- a/src/datanode/src/tests.rs
+++ b/src/datanode/src/tests.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/tests/instance_test.rs b/src/datanode/src/tests/instance_test.rs
index daf21216d9ce..e1d0cc367179 100644
--- a/src/datanode/src/tests/instance_test.rs
+++ b/src/datanode/src/tests/instance_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datanode/src/tests/test_util.rs b/src/datanode/src/tests/test_util.rs
index 292402cf1f9c..90233d43d2e4 100644
--- a/src/datanode/src/tests/test_util.rs
+++ b/src/datanode/src/tests/test_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/arrow_array.rs b/src/datatypes/src/arrow_array.rs
index 72de42214200..d9b231bdb41e 100644
--- a/src/datatypes/src/arrow_array.rs
+++ b/src/datatypes/src/arrow_array.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/data_type.rs b/src/datatypes/src/data_type.rs
index 9e4641defa30..0f0f971894a8 100644
--- a/src/datatypes/src/data_type.rs
+++ b/src/datatypes/src/data_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/error.rs b/src/datatypes/src/error.rs
index ddb390a8a58a..1b4c034c9d9e 100644
--- a/src/datatypes/src/error.rs
+++ b/src/datatypes/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/lib.rs b/src/datatypes/src/lib.rs
index ff15b33c8965..60f3b853159f 100644
--- a/src/datatypes/src/lib.rs
+++ b/src/datatypes/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/macros.rs b/src/datatypes/src/macros.rs
index 37c0a42e3f55..19b7dad4ec6d 100644
--- a/src/datatypes/src/macros.rs
+++ b/src/datatypes/src/macros.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/prelude.rs b/src/datatypes/src/prelude.rs
index b1afe93042f4..2f67d2113a6e 100644
--- a/src/datatypes/src/prelude.rs
+++ b/src/datatypes/src/prelude.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/scalars.rs b/src/datatypes/src/scalars.rs
index 327ebaa629a2..78b8aa46876d 100644
--- a/src/datatypes/src/scalars.rs
+++ b/src/datatypes/src/scalars.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/schema.rs b/src/datatypes/src/schema.rs
index cd738448f0c9..aeb37d2afc2a 100644
--- a/src/datatypes/src/schema.rs
+++ b/src/datatypes/src/schema.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/schema/column_schema.rs b/src/datatypes/src/schema/column_schema.rs
index 7fc76fdda95a..5718fff6a81d 100644
--- a/src/datatypes/src/schema/column_schema.rs
+++ b/src/datatypes/src/schema/column_schema.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/schema/constraint.rs b/src/datatypes/src/schema/constraint.rs
index f512f3190d63..9c9557a73941 100644
--- a/src/datatypes/src/schema/constraint.rs
+++ b/src/datatypes/src/schema/constraint.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/schema/raw.rs b/src/datatypes/src/schema/raw.rs
index 75f0853b4b74..ab94e9ad8fcd 100644
--- a/src/datatypes/src/schema/raw.rs
+++ b/src/datatypes/src/schema/raw.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/serialize.rs b/src/datatypes/src/serialize.rs
index 1cbf04cedd79..1e7ad0a1230a 100644
--- a/src/datatypes/src/serialize.rs
+++ b/src/datatypes/src/serialize.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/timestamp.rs b/src/datatypes/src/timestamp.rs
index 82d68ae66234..83c749048fe2 100644
--- a/src/datatypes/src/timestamp.rs
+++ b/src/datatypes/src/timestamp.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/type_id.rs b/src/datatypes/src/type_id.rs
index bcb7ea52b129..5548d95147ab 100644
--- a/src/datatypes/src/type_id.rs
+++ b/src/datatypes/src/type_id.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types.rs b/src/datatypes/src/types.rs
index 8f40c563defe..953baa617ae2 100644
--- a/src/datatypes/src/types.rs
+++ b/src/datatypes/src/types.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types/binary_type.rs b/src/datatypes/src/types/binary_type.rs
index 0d06724fffb4..a465653c4bcf 100644
--- a/src/datatypes/src/types/binary_type.rs
+++ b/src/datatypes/src/types/binary_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types/boolean_type.rs b/src/datatypes/src/types/boolean_type.rs
index 36d92169eb01..23896994f0bd 100644
--- a/src/datatypes/src/types/boolean_type.rs
+++ b/src/datatypes/src/types/boolean_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types/date_type.rs b/src/datatypes/src/types/date_type.rs
index 6b7bb788a3dd..9be87096ba10 100644
--- a/src/datatypes/src/types/date_type.rs
+++ b/src/datatypes/src/types/date_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types/datetime_type.rs b/src/datatypes/src/types/datetime_type.rs
index 47e1183a3a4e..8af826ab7939 100644
--- a/src/datatypes/src/types/datetime_type.rs
+++ b/src/datatypes/src/types/datetime_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types/list_type.rs b/src/datatypes/src/types/list_type.rs
index 3c8535810d6c..08ac3439f4ec 100644
--- a/src/datatypes/src/types/list_type.rs
+++ b/src/datatypes/src/types/list_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types/null_type.rs b/src/datatypes/src/types/null_type.rs
index 21a008e1da2a..f4bed2603c69 100644
--- a/src/datatypes/src/types/null_type.rs
+++ b/src/datatypes/src/types/null_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types/primitive_type.rs b/src/datatypes/src/types/primitive_type.rs
index ea752cf8debb..4a9df85b3099 100644
--- a/src/datatypes/src/types/primitive_type.rs
+++ b/src/datatypes/src/types/primitive_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types/string_type.rs b/src/datatypes/src/types/string_type.rs
index 799cbbbdd345..4252d5f55f3f 100644
--- a/src/datatypes/src/types/string_type.rs
+++ b/src/datatypes/src/types/string_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/types/timestamp_type.rs b/src/datatypes/src/types/timestamp_type.rs
index 629d901cc835..15c555c2727a 100644
--- a/src/datatypes/src/types/timestamp_type.rs
+++ b/src/datatypes/src/types/timestamp_type.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/value.rs b/src/datatypes/src/value.rs
index 3adbd2c6f342..30364ab7c5bf 100644
--- a/src/datatypes/src/value.rs
+++ b/src/datatypes/src/value.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors.rs b/src/datatypes/src/vectors.rs
index 09ac4dc1eeaf..98dbcc8af965 100644
--- a/src/datatypes/src/vectors.rs
+++ b/src/datatypes/src/vectors.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/binary.rs b/src/datatypes/src/vectors/binary.rs
index b2756294c375..53d5b1d3e877 100644
--- a/src/datatypes/src/vectors/binary.rs
+++ b/src/datatypes/src/vectors/binary.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/boolean.rs b/src/datatypes/src/vectors/boolean.rs
index facbc2cfc624..788b4de4207a 100644
--- a/src/datatypes/src/vectors/boolean.rs
+++ b/src/datatypes/src/vectors/boolean.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/constant.rs b/src/datatypes/src/vectors/constant.rs
index 825d1dc7bd82..da5ac16f2413 100644
--- a/src/datatypes/src/vectors/constant.rs
+++ b/src/datatypes/src/vectors/constant.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/date.rs b/src/datatypes/src/vectors/date.rs
index d0a66b80fb63..22c60fbae99a 100644
--- a/src/datatypes/src/vectors/date.rs
+++ b/src/datatypes/src/vectors/date.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/datetime.rs b/src/datatypes/src/vectors/datetime.rs
index 524ada886981..dae8557096d4 100644
--- a/src/datatypes/src/vectors/datetime.rs
+++ b/src/datatypes/src/vectors/datetime.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/eq.rs b/src/datatypes/src/vectors/eq.rs
index 55359026d479..5f532622a30b 100644
--- a/src/datatypes/src/vectors/eq.rs
+++ b/src/datatypes/src/vectors/eq.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/helper.rs b/src/datatypes/src/vectors/helper.rs
index cd04eae6438c..c2cadc6dcdce 100644
--- a/src/datatypes/src/vectors/helper.rs
+++ b/src/datatypes/src/vectors/helper.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/list.rs b/src/datatypes/src/vectors/list.rs
index 3e9b3637b8ad..8f9c6ef8e5f6 100644
--- a/src/datatypes/src/vectors/list.rs
+++ b/src/datatypes/src/vectors/list.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/null.rs b/src/datatypes/src/vectors/null.rs
index 7f6d3fbeb1fd..e754335bdc2c 100644
--- a/src/datatypes/src/vectors/null.rs
+++ b/src/datatypes/src/vectors/null.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/operations.rs b/src/datatypes/src/vectors/operations.rs
index 70ddb4a0317a..adb430c96a2d 100644
--- a/src/datatypes/src/vectors/operations.rs
+++ b/src/datatypes/src/vectors/operations.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/operations/filter.rs b/src/datatypes/src/vectors/operations/filter.rs
index d921a67bb20a..f9ad6b2c3377 100644
--- a/src/datatypes/src/vectors/operations/filter.rs
+++ b/src/datatypes/src/vectors/operations/filter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/operations/find_unique.rs b/src/datatypes/src/vectors/operations/find_unique.rs
index 7116a9e90d53..b76a975f8477 100644
--- a/src/datatypes/src/vectors/operations/find_unique.rs
+++ b/src/datatypes/src/vectors/operations/find_unique.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/operations/replicate.rs b/src/datatypes/src/vectors/operations/replicate.rs
index 8216517fc62d..e10923f34919 100644
--- a/src/datatypes/src/vectors/operations/replicate.rs
+++ b/src/datatypes/src/vectors/operations/replicate.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/primitive.rs b/src/datatypes/src/vectors/primitive.rs
index 42bee1e33921..d797cf2d2b17 100644
--- a/src/datatypes/src/vectors/primitive.rs
+++ b/src/datatypes/src/vectors/primitive.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/string.rs b/src/datatypes/src/vectors/string.rs
index 252116b3b2dd..3a7bf7cb11a4 100644
--- a/src/datatypes/src/vectors/string.rs
+++ b/src/datatypes/src/vectors/string.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/timestamp.rs b/src/datatypes/src/vectors/timestamp.rs
index 5d9f7f2ed1fc..8248811ea35b 100644
--- a/src/datatypes/src/vectors/timestamp.rs
+++ b/src/datatypes/src/vectors/timestamp.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/datatypes/src/vectors/validity.rs b/src/datatypes/src/vectors/validity.rs
index 01c7faa7895b..8b3722a511b8 100644
--- a/src/datatypes/src/vectors/validity.rs
+++ b/src/datatypes/src/vectors/validity.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/catalog.rs b/src/frontend/src/catalog.rs
index 0cfd453f7e26..15fb7f44fdf1 100644
--- a/src/frontend/src/catalog.rs
+++ b/src/frontend/src/catalog.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/datanode.rs b/src/frontend/src/datanode.rs
index c365ff2c33d3..c6b8efdbf290 100644
--- a/src/frontend/src/datanode.rs
+++ b/src/frontend/src/datanode.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/error.rs b/src/frontend/src/error.rs
index 92df4144765e..b6d116d56b26 100644
--- a/src/frontend/src/error.rs
+++ b/src/frontend/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/expr_factory.rs b/src/frontend/src/expr_factory.rs
index dff5a768f30b..f6bf91847851 100644
--- a/src/frontend/src/expr_factory.rs
+++ b/src/frontend/src/expr_factory.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index 2c943de82db4..563d3924346d 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/grpc.rs b/src/frontend/src/grpc.rs
index 49044dfc4f45..92d6ea771710 100644
--- a/src/frontend/src/grpc.rs
+++ b/src/frontend/src/grpc.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/influxdb.rs b/src/frontend/src/influxdb.rs
index dc5dcec4ea18..8a113b53813e 100644
--- a/src/frontend/src/influxdb.rs
+++ b/src/frontend/src/influxdb.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index b2b7bab0cf64..d4b7c030fd65 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index 63fa90c85710..7d0f66a86c33 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/instance/influxdb.rs b/src/frontend/src/instance/influxdb.rs
index 014b131ffc35..0cd69b7432a2 100644
--- a/src/frontend/src/instance/influxdb.rs
+++ b/src/frontend/src/instance/influxdb.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/instance/opentsdb.rs b/src/frontend/src/instance/opentsdb.rs
index b52f37aabb2e..941cdc257a8a 100644
--- a/src/frontend/src/instance/opentsdb.rs
+++ b/src/frontend/src/instance/opentsdb.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/instance/prometheus.rs b/src/frontend/src/instance/prometheus.rs
index 393a9488e01a..a899f64baed3 100644
--- a/src/frontend/src/instance/prometheus.rs
+++ b/src/frontend/src/instance/prometheus.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs
index 0c5bf3381692..14dd7328b32e 100644
--- a/src/frontend/src/lib.rs
+++ b/src/frontend/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/mysql.rs b/src/frontend/src/mysql.rs
index 87888b147b2b..2d0ebcfd6a0e 100644
--- a/src/frontend/src/mysql.rs
+++ b/src/frontend/src/mysql.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/opentsdb.rs b/src/frontend/src/opentsdb.rs
index 16cc5c5fa8a3..fb3af5afa4cc 100644
--- a/src/frontend/src/opentsdb.rs
+++ b/src/frontend/src/opentsdb.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/partitioning.rs b/src/frontend/src/partitioning.rs
index e83e3bc45406..3f47cc426184 100644
--- a/src/frontend/src/partitioning.rs
+++ b/src/frontend/src/partitioning.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/partitioning/columns.rs b/src/frontend/src/partitioning/columns.rs
index 3d8704906ef4..6e8e739c1384 100644
--- a/src/frontend/src/partitioning/columns.rs
+++ b/src/frontend/src/partitioning/columns.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/partitioning/range.rs b/src/frontend/src/partitioning/range.rs
index dc02a02d2f20..a608a51542f3 100644
--- a/src/frontend/src/partitioning/range.rs
+++ b/src/frontend/src/partitioning/range.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/postgres.rs b/src/frontend/src/postgres.rs
index 144758f315e3..60f8ae22dbd5 100644
--- a/src/frontend/src/postgres.rs
+++ b/src/frontend/src/postgres.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/prometheus.rs b/src/frontend/src/prometheus.rs
index 3a56e1d8fb4e..6dac05419acc 100644
--- a/src/frontend/src/prometheus.rs
+++ b/src/frontend/src/prometheus.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/server.rs b/src/frontend/src/server.rs
index 9636a5318f38..a8414594d079 100644
--- a/src/frontend/src/server.rs
+++ b/src/frontend/src/server.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/spliter.rs b/src/frontend/src/spliter.rs
index d7753d434c20..5c64722497c6 100644
--- a/src/frontend/src/spliter.rs
+++ b/src/frontend/src/spliter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/sql.rs b/src/frontend/src/sql.rs
index 8814ef2bf5dd..0e85d8f0a7af 100644
--- a/src/frontend/src/sql.rs
+++ b/src/frontend/src/sql.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/table.rs b/src/frontend/src/table.rs
index 25a78a69792f..c50bd2395d18 100644
--- a/src/frontend/src/table.rs
+++ b/src/frontend/src/table.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/table/insert.rs b/src/frontend/src/table/insert.rs
index e5c02a86962d..bcb4539675b8 100644
--- a/src/frontend/src/table/insert.rs
+++ b/src/frontend/src/table/insert.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/table/route.rs b/src/frontend/src/table/route.rs
index 8099d43a30da..68f0c7225df2 100644
--- a/src/frontend/src/table/route.rs
+++ b/src/frontend/src/table/route.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/table/scan.rs b/src/frontend/src/table/scan.rs
index c69478a8b95d..ae69011f4dad 100644
--- a/src/frontend/src/table/scan.rs
+++ b/src/frontend/src/table/scan.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/frontend/src/tests.rs b/src/frontend/src/tests.rs
index 4481a809fd74..c576c15c4f29 100644
--- a/src/frontend/src/tests.rs
+++ b/src/frontend/src/tests.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/error.rs b/src/log-store/src/error.rs
index e510038ddb49..d767987a43c3 100644
--- a/src/log-store/src/error.rs
+++ b/src/log-store/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs.rs b/src/log-store/src/fs.rs
index e8f334be4945..d48c2e78296d 100644
--- a/src/log-store/src/fs.rs
+++ b/src/log-store/src/fs.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/chunk.rs b/src/log-store/src/fs/chunk.rs
index a59b34e55762..0a0fcb65cd77 100644
--- a/src/log-store/src/fs/chunk.rs
+++ b/src/log-store/src/fs/chunk.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/config.rs b/src/log-store/src/fs/config.rs
index 3a6c84a2ee54..50d6d761255d 100644
--- a/src/log-store/src/fs/config.rs
+++ b/src/log-store/src/fs/config.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/crc.rs b/src/log-store/src/fs/crc.rs
index 7410d2161603..91704f2f0000 100644
--- a/src/log-store/src/fs/crc.rs
+++ b/src/log-store/src/fs/crc.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/entry.rs b/src/log-store/src/fs/entry.rs
index 5bc92cadbb9e..196706d689b1 100644
--- a/src/log-store/src/fs/entry.rs
+++ b/src/log-store/src/fs/entry.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/file.rs b/src/log-store/src/fs/file.rs
index 132fbd337e79..38cae26a29d1 100644
--- a/src/log-store/src/fs/file.rs
+++ b/src/log-store/src/fs/file.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/file_name.rs b/src/log-store/src/fs/file_name.rs
index 555513803948..9bd9abf86c3b 100644
--- a/src/log-store/src/fs/file_name.rs
+++ b/src/log-store/src/fs/file_name.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/index.rs b/src/log-store/src/fs/index.rs
index 2d1a0da58068..5f365de9d7ab 100644
--- a/src/log-store/src/fs/index.rs
+++ b/src/log-store/src/fs/index.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/io.rs b/src/log-store/src/fs/io.rs
index 0d4bb2dd556d..a6a60e76985c 100644
--- a/src/log-store/src/fs/io.rs
+++ b/src/log-store/src/fs/io.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/io/fallback.rs b/src/log-store/src/fs/io/fallback.rs
index a56ca4538a40..4fa4a6bcff10 100644
--- a/src/log-store/src/fs/io/fallback.rs
+++ b/src/log-store/src/fs/io/fallback.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/io/unix.rs b/src/log-store/src/fs/io/unix.rs
index f0936ada2cf2..e4be97767d4a 100644
--- a/src/log-store/src/fs/io/unix.rs
+++ b/src/log-store/src/fs/io/unix.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/log.rs b/src/log-store/src/fs/log.rs
index 96576b330d62..776e0f17428e 100644
--- a/src/log-store/src/fs/log.rs
+++ b/src/log-store/src/fs/log.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/namespace.rs b/src/log-store/src/fs/namespace.rs
index 05203903b75b..66655a2f7922 100644
--- a/src/log-store/src/fs/namespace.rs
+++ b/src/log-store/src/fs/namespace.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/fs/noop.rs b/src/log-store/src/fs/noop.rs
index 099d3a9ce9ee..d3baecf092d8 100644
--- a/src/log-store/src/fs/noop.rs
+++ b/src/log-store/src/fs/noop.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/lib.rs b/src/log-store/src/lib.rs
index 0cd3815d074e..8ee1155631a1 100644
--- a/src/log-store/src/lib.rs
+++ b/src/log-store/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/test_util.rs b/src/log-store/src/test_util.rs
index 4c04334accc9..973d6d3f9720 100644
--- a/src/log-store/src/test_util.rs
+++ b/src/log-store/src/test_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/log-store/src/test_util/log_store_util.rs b/src/log-store/src/test_util/log_store_util.rs
index a8d4d24f8860..795060a8fdda 100644
--- a/src/log-store/src/test_util/log_store_util.rs
+++ b/src/log-store/src/test_util/log_store_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/examples/meta_client.rs b/src/meta-client/examples/meta_client.rs
index 6bf23a488d5a..7558a4c8f225 100644
--- a/src/meta-client/examples/meta_client.rs
+++ b/src/meta-client/examples/meta_client.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/client.rs b/src/meta-client/src/client.rs
index f85fec8b4cf1..ffaf40c80993 100644
--- a/src/meta-client/src/client.rs
+++ b/src/meta-client/src/client.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/client/heartbeat.rs b/src/meta-client/src/client/heartbeat.rs
index 3c9f5fb44abc..2b2e64843489 100644
--- a/src/meta-client/src/client/heartbeat.rs
+++ b/src/meta-client/src/client/heartbeat.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/client/load_balance.rs b/src/meta-client/src/client/load_balance.rs
index f770056a9891..1c8df613c3a8 100644
--- a/src/meta-client/src/client/load_balance.rs
+++ b/src/meta-client/src/client/load_balance.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/client/router.rs b/src/meta-client/src/client/router.rs
index ab6f3b459f53..486edfa11fdf 100644
--- a/src/meta-client/src/client/router.rs
+++ b/src/meta-client/src/client/router.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/client/store.rs b/src/meta-client/src/client/store.rs
index be860419f967..b7b890ec28f7 100644
--- a/src/meta-client/src/client/store.rs
+++ b/src/meta-client/src/client/store.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/error.rs b/src/meta-client/src/error.rs
index 48252c9d56f8..51ad1732787a 100644
--- a/src/meta-client/src/error.rs
+++ b/src/meta-client/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/lib.rs b/src/meta-client/src/lib.rs
index 76e017396837..153161cc3b9a 100644
--- a/src/meta-client/src/lib.rs
+++ b/src/meta-client/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/mocks.rs b/src/meta-client/src/mocks.rs
index e0aa7671dddb..0e9eb5223215 100644
--- a/src/meta-client/src/mocks.rs
+++ b/src/meta-client/src/mocks.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/rpc.rs b/src/meta-client/src/rpc.rs
index 23c4f2ac58b3..66844a00bbdb 100644
--- a/src/meta-client/src/rpc.rs
+++ b/src/meta-client/src/rpc.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/rpc/router.rs b/src/meta-client/src/rpc/router.rs
index 9cc63acb70c6..2dfd2e9a389a 100644
--- a/src/meta-client/src/rpc/router.rs
+++ b/src/meta-client/src/rpc/router.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/rpc/store.rs b/src/meta-client/src/rpc/store.rs
index 9c7f53dc6e4f..50e126148d61 100644
--- a/src/meta-client/src/rpc/store.rs
+++ b/src/meta-client/src/rpc/store.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-client/src/rpc/util.rs b/src/meta-client/src/rpc/util.rs
index 58e6bebacb86..59fde8648118 100644
--- a/src/meta-client/src/rpc/util.rs
+++ b/src/meta-client/src/rpc/util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/examples/kv_store.rs b/src/meta-srv/examples/kv_store.rs
index 3ff381265033..bae7a1a996e1 100644
--- a/src/meta-srv/examples/kv_store.rs
+++ b/src/meta-srv/examples/kv_store.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index a92cbbb1c3e8..9d5575c9f729 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/election.rs b/src/meta-srv/src/election.rs
index 32e154bb5fd8..b91ca9b22c4c 100644
--- a/src/meta-srv/src/election.rs
+++ b/src/meta-srv/src/election.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/election/etcd.rs b/src/meta-srv/src/election/etcd.rs
index d2d3e362c4fb..59fca5e3134b 100644
--- a/src/meta-srv/src/election/etcd.rs
+++ b/src/meta-srv/src/election/etcd.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index f010303a9840..703707818432 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index 07d36df6f5c0..7fe0892d29c1 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/handler/check_leader.rs b/src/meta-srv/src/handler/check_leader.rs
index da93c358ea1d..5efca6208f91 100644
--- a/src/meta-srv/src/handler/check_leader.rs
+++ b/src/meta-srv/src/handler/check_leader.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/handler/datanode_lease.rs b/src/meta-srv/src/handler/datanode_lease.rs
index 66f94eeb31b0..65089bba5c9a 100644
--- a/src/meta-srv/src/handler/datanode_lease.rs
+++ b/src/meta-srv/src/handler/datanode_lease.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/handler/response_header.rs b/src/meta-srv/src/handler/response_header.rs
index 509d3e9aefdb..598e10305896 100644
--- a/src/meta-srv/src/handler/response_header.rs
+++ b/src/meta-srv/src/handler/response_header.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/keys.rs b/src/meta-srv/src/keys.rs
index 0f6bf7a22d6b..1efec0c187f4 100644
--- a/src/meta-srv/src/keys.rs
+++ b/src/meta-srv/src/keys.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/lease.rs b/src/meta-srv/src/lease.rs
index 570749a765fc..45e974c7f5b4 100644
--- a/src/meta-srv/src/lease.rs
+++ b/src/meta-srv/src/lease.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/lib.rs b/src/meta-srv/src/lib.rs
index 25cdc1d61b5a..b0424b4ae2d6 100644
--- a/src/meta-srv/src/lib.rs
+++ b/src/meta-srv/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index e6a8373f18c2..5e6245ac5c9e 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/mocks.rs b/src/meta-srv/src/mocks.rs
index 05e901ec268d..6d7203c74002 100644
--- a/src/meta-srv/src/mocks.rs
+++ b/src/meta-srv/src/mocks.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/selector.rs b/src/meta-srv/src/selector.rs
index 34af33808e5b..24045651463d 100644
--- a/src/meta-srv/src/selector.rs
+++ b/src/meta-srv/src/selector.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/selector/lease_based.rs b/src/meta-srv/src/selector/lease_based.rs
index 5cc218d1b8a5..061bcd9f02d5 100644
--- a/src/meta-srv/src/selector/lease_based.rs
+++ b/src/meta-srv/src/selector/lease_based.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/sequence.rs b/src/meta-srv/src/sequence.rs
index ffbf250b1754..4b87f6b032ec 100644
--- a/src/meta-srv/src/sequence.rs
+++ b/src/meta-srv/src/sequence.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/service.rs b/src/meta-srv/src/service.rs
index 6061d4297a2f..ed52d3bab84b 100644
--- a/src/meta-srv/src/service.rs
+++ b/src/meta-srv/src/service.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/service/admin.rs b/src/meta-srv/src/service/admin.rs
index abacfd8872b3..05986b7873c2 100644
--- a/src/meta-srv/src/service/admin.rs
+++ b/src/meta-srv/src/service/admin.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/service/admin/health.rs b/src/meta-srv/src/service/admin/health.rs
index 7a4649c0e42e..1b14172cd2d7 100644
--- a/src/meta-srv/src/service/admin/health.rs
+++ b/src/meta-srv/src/service/admin/health.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index 331b6b3b1059..81fadbb6a5ce 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/service/router.rs b/src/meta-srv/src/service/router.rs
index 18cf40df43fb..29c94b8a4ebb 100644
--- a/src/meta-srv/src/service/router.rs
+++ b/src/meta-srv/src/service/router.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/service/store.rs b/src/meta-srv/src/service/store.rs
index fc1190082794..4517d6590ad5 100644
--- a/src/meta-srv/src/service/store.rs
+++ b/src/meta-srv/src/service/store.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/service/store/etcd.rs b/src/meta-srv/src/service/store/etcd.rs
index 19b8f8da107a..fb1d6edeb4f5 100644
--- a/src/meta-srv/src/service/store/etcd.rs
+++ b/src/meta-srv/src/service/store/etcd.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/service/store/kv.rs b/src/meta-srv/src/service/store/kv.rs
index 7b5b43f9db6c..17962abd7b5e 100644
--- a/src/meta-srv/src/service/store/kv.rs
+++ b/src/meta-srv/src/service/store/kv.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/service/store/memory.rs b/src/meta-srv/src/service/store/memory.rs
index 03efc74292e5..f6a6fe470130 100644
--- a/src/meta-srv/src/service/store/memory.rs
+++ b/src/meta-srv/src/service/store/memory.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/meta-srv/src/util.rs b/src/meta-srv/src/util.rs
index 8fcdfc7e9e8c..8b8c54c1e72f 100644
--- a/src/meta-srv/src/util.rs
+++ b/src/meta-srv/src/util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/mito/src/config.rs b/src/mito/src/config.rs
index 4bac9a52abbb..669ce741172f 100644
--- a/src/mito/src/config.rs
+++ b/src/mito/src/config.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/mito/src/engine.rs b/src/mito/src/engine.rs
index cab3d527dc86..8f0b3a6f2587 100644
--- a/src/mito/src/engine.rs
+++ b/src/mito/src/engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/mito/src/error.rs b/src/mito/src/error.rs
index ff92512028ad..dc65d095078c 100644
--- a/src/mito/src/error.rs
+++ b/src/mito/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/mito/src/lib.rs b/src/mito/src/lib.rs
index ec2fb53785fe..2a856ce7c96f 100644
--- a/src/mito/src/lib.rs
+++ b/src/mito/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/mito/src/manifest.rs b/src/mito/src/manifest.rs
index 1de7148c23e7..bdb31ce38d06 100644
--- a/src/mito/src/manifest.rs
+++ b/src/mito/src/manifest.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/mito/src/manifest/action.rs b/src/mito/src/manifest/action.rs
index 4e2ba43db44a..34adae2eb86c 100644
--- a/src/mito/src/manifest/action.rs
+++ b/src/mito/src/manifest/action.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/mito/src/table.rs b/src/mito/src/table.rs
index e0dd91055b65..548929722801 100644
--- a/src/mito/src/table.rs
+++ b/src/mito/src/table.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/mito/src/table/test_util.rs b/src/mito/src/table/test_util.rs
index 80f736c26dbe..bc3b7bab8bea 100644
--- a/src/mito/src/table/test_util.rs
+++ b/src/mito/src/table/test_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/mito/src/table/test_util/mock_engine.rs b/src/mito/src/table/test_util/mock_engine.rs
index c659bc08e73c..bbed1ccd0bdd 100644
--- a/src/mito/src/table/test_util/mock_engine.rs
+++ b/src/mito/src/table/test_util/mock_engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/object-store/src/backend.rs b/src/object-store/src/backend.rs
index d469c0bb7beb..c4689d79d814 100644
--- a/src/object-store/src/backend.rs
+++ b/src/object-store/src/backend.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/object-store/src/backend/azblob.rs b/src/object-store/src/backend/azblob.rs
index 2ee88a262873..755c77d60a4e 100644
--- a/src/object-store/src/backend/azblob.rs
+++ b/src/object-store/src/backend/azblob.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/object-store/src/backend/fs.rs b/src/object-store/src/backend/fs.rs
index 4e46c523562e..bc1cebe5b2e4 100644
--- a/src/object-store/src/backend/fs.rs
+++ b/src/object-store/src/backend/fs.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/object-store/src/backend/memory.rs b/src/object-store/src/backend/memory.rs
index 5ec43d5c2941..22fd16186a9f 100644
--- a/src/object-store/src/backend/memory.rs
+++ b/src/object-store/src/backend/memory.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/object-store/src/backend/s3.rs b/src/object-store/src/backend/s3.rs
index d419967a567b..46faa2659d0c 100644
--- a/src/object-store/src/backend/s3.rs
+++ b/src/object-store/src/backend/s3.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/object-store/src/lib.rs b/src/object-store/src/lib.rs
index 7d6673d64735..9ac89f0c7ca2 100644
--- a/src/object-store/src/lib.rs
+++ b/src/object-store/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/object-store/src/test_util.rs b/src/object-store/src/test_util.rs
index d443aaf005b1..d1a22df206cd 100644
--- a/src/object-store/src/test_util.rs
+++ b/src/object-store/src/test_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/object-store/src/util.rs b/src/object-store/src/util.rs
index 298069ab3bf3..de9f25f8c8ec 100644
--- a/src/object-store/src/util.rs
+++ b/src/object-store/src/util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/object-store/tests/object_store_test.rs b/src/object-store/tests/object_store_test.rs
index 33cba429fd12..c3173fbf99c5 100644
--- a/src/object-store/tests/object_store_test.rs
+++ b/src/object-store/tests/object_store_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/promql/src/engine.rs b/src/promql/src/engine.rs
index 910d75d747da..d21a421e2207 100644
--- a/src/promql/src/engine.rs
+++ b/src/promql/src/engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/promql/src/engine/evaluator.rs b/src/promql/src/engine/evaluator.rs
index cb8d3c9f385e..5352e6d9c51b 100644
--- a/src/promql/src/engine/evaluator.rs
+++ b/src/promql/src/engine/evaluator.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/promql/src/engine/functions.rs b/src/promql/src/engine/functions.rs
index 9e0fcc186d8e..99a4a29db140 100644
--- a/src/promql/src/engine/functions.rs
+++ b/src/promql/src/engine/functions.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/promql/src/error.rs b/src/promql/src/error.rs
index 14eefab56a15..5d98d29c7aff 100644
--- a/src/promql/src/error.rs
+++ b/src/promql/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/promql/src/extension_plan.rs b/src/promql/src/extension_plan.rs
index c314ce041cec..0878ab2ad4d2 100644
--- a/src/promql/src/extension_plan.rs
+++ b/src/promql/src/extension_plan.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/promql/src/extension_plan/normalize.rs b/src/promql/src/extension_plan/normalize.rs
index ae08dc62c4e7..20bea1f5e4e9 100644
--- a/src/promql/src/extension_plan/normalize.rs
+++ b/src/promql/src/extension_plan/normalize.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/promql/src/lib.rs b/src/promql/src/lib.rs
index 552b4fd0ff60..83fb3293f61b 100644
--- a/src/promql/src/lib.rs
+++ b/src/promql/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 21c4cc767e35..932cbe51490c 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/datafusion/catalog_adapter.rs b/src/query/src/datafusion/catalog_adapter.rs
index f21cf4406e0a..9957bca99a91 100644
--- a/src/query/src/datafusion/catalog_adapter.rs
+++ b/src/query/src/datafusion/catalog_adapter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/datafusion/error.rs b/src/query/src/datafusion/error.rs
index 95ffc8d84321..526973d228e5 100644
--- a/src/query/src/datafusion/error.rs
+++ b/src/query/src/datafusion/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/datafusion/planner.rs b/src/query/src/datafusion/planner.rs
index f39b4ef9fcaf..6ca3223ab8fb 100644
--- a/src/query/src/datafusion/planner.rs
+++ b/src/query/src/datafusion/planner.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/error.rs b/src/query/src/error.rs
index a24fee0240de..4ca16f10f889 100644
--- a/src/query/src/error.rs
+++ b/src/query/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/executor.rs b/src/query/src/executor.rs
index 52664940fb1e..842c24155138 100644
--- a/src/query/src/executor.rs
+++ b/src/query/src/executor.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/function.rs b/src/query/src/function.rs
index 017106127743..1f142ee547b0 100644
--- a/src/query/src/function.rs
+++ b/src/query/src/function.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs
index 5b25707dc7e0..76cde6932c3f 100644
--- a/src/query/src/lib.rs
+++ b/src/query/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/logical_optimizer.rs b/src/query/src/logical_optimizer.rs
index 266a1a4233bc..5e3db6d8fe39 100644
--- a/src/query/src/logical_optimizer.rs
+++ b/src/query/src/logical_optimizer.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/metric.rs b/src/query/src/metric.rs
index 7f927098f60f..d2427ff95dfd 100644
--- a/src/query/src/metric.rs
+++ b/src/query/src/metric.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs
index 2dbddd1fb3ba..637fdefe8e2b 100644
--- a/src/query/src/optimizer.rs
+++ b/src/query/src/optimizer.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/physical_optimizer.rs b/src/query/src/physical_optimizer.rs
index a75c629057a9..479e6164a554 100644
--- a/src/query/src/physical_optimizer.rs
+++ b/src/query/src/physical_optimizer.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/physical_planner.rs b/src/query/src/physical_planner.rs
index 40213a1346fa..c282cd667942 100644
--- a/src/query/src/physical_planner.rs
+++ b/src/query/src/physical_planner.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/plan.rs b/src/query/src/plan.rs
index 5182db4f6aea..823ea7286e00 100644
--- a/src/query/src/plan.rs
+++ b/src/query/src/plan.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/planner.rs b/src/query/src/planner.rs
index 8bf3468e0401..8f59912aab37 100644
--- a/src/query/src/planner.rs
+++ b/src/query/src/planner.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/query_engine.rs b/src/query/src/query_engine.rs
index 110f78e6f3d1..d1f55667dd70 100644
--- a/src/query/src/query_engine.rs
+++ b/src/query/src/query_engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/query_engine/context.rs b/src/query/src/query_engine/context.rs
index c54cb8b59597..5641f9198626 100644
--- a/src/query/src/query_engine/context.rs
+++ b/src/query/src/query_engine/context.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs
index f748d429c70f..1c102ed2f028 100644
--- a/src/query/src/query_engine/state.rs
+++ b/src/query/src/query_engine/state.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs
index 327394416eb0..7a68b52b6926 100644
--- a/src/query/src/sql.rs
+++ b/src/query/src/sql.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/argmax_test.rs b/src/query/tests/argmax_test.rs
index 88dbde83e416..8a4bc61b17c4 100644
--- a/src/query/tests/argmax_test.rs
+++ b/src/query/tests/argmax_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/argmin_test.rs b/src/query/tests/argmin_test.rs
index 2655a8db17e6..68313ea808b0 100644
--- a/src/query/tests/argmin_test.rs
+++ b/src/query/tests/argmin_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/function.rs b/src/query/tests/function.rs
index 3a0bad335168..ef9a198a1cf3 100644
--- a/src/query/tests/function.rs
+++ b/src/query/tests/function.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/mean_test.rs b/src/query/tests/mean_test.rs
index 56bf327339d1..f44c498e1f9d 100644
--- a/src/query/tests/mean_test.rs
+++ b/src/query/tests/mean_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/my_sum_udaf_example.rs b/src/query/tests/my_sum_udaf_example.rs
index 06adca86c1fc..f4323ef3b26c 100644
--- a/src/query/tests/my_sum_udaf_example.rs
+++ b/src/query/tests/my_sum_udaf_example.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/percentile_test.rs b/src/query/tests/percentile_test.rs
index 724d80c663df..38806311ea42 100644
--- a/src/query/tests/percentile_test.rs
+++ b/src/query/tests/percentile_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/polyval_test.rs b/src/query/tests/polyval_test.rs
index d174d20ec0c3..299bd74e3324 100644
--- a/src/query/tests/polyval_test.rs
+++ b/src/query/tests/polyval_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/pow.rs b/src/query/tests/pow.rs
index d48c28b22051..ffb0e85e02c7 100644
--- a/src/query/tests/pow.rs
+++ b/src/query/tests/pow.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/query_engine_test.rs b/src/query/tests/query_engine_test.rs
index 05bb32a2c415..010bee1176b8 100644
--- a/src/query/tests/query_engine_test.rs
+++ b/src/query/tests/query_engine_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/scipy_stats_norm_cdf_test.rs b/src/query/tests/scipy_stats_norm_cdf_test.rs
index 0c8f50251880..2a48e704ee33 100644
--- a/src/query/tests/scipy_stats_norm_cdf_test.rs
+++ b/src/query/tests/scipy_stats_norm_cdf_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/query/tests/scipy_stats_norm_pdf.rs b/src/query/tests/scipy_stats_norm_pdf.rs
index 1142db436483..da3c6b704125 100644
--- a/src/query/tests/scipy_stats_norm_pdf.rs
+++ b/src/query/tests/scipy_stats_norm_pdf.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/engine.rs b/src/script/src/engine.rs
index 8740dd0dcc1d..004ce351bb91 100644
--- a/src/script/src/engine.rs
+++ b/src/script/src/engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/error.rs b/src/script/src/error.rs
index 60e06647a1dc..2734f2c47304 100644
--- a/src/script/src/error.rs
+++ b/src/script/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/lib.rs b/src/script/src/lib.rs
index adb949918c20..3dafa5469721 100644
--- a/src/script/src/lib.rs
+++ b/src/script/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/manager.rs b/src/script/src/manager.rs
index 1949c7723359..9485fb91aa81 100644
--- a/src/script/src/manager.rs
+++ b/src/script/src/manager.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python.rs b/src/script/src/python.rs
index 5fb65828be75..30e402ef294a 100644
--- a/src/script/src/python.rs
+++ b/src/script/src/python.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/builtins.rs b/src/script/src/python/builtins.rs
index f23cd63ad07b..db1f62208d33 100644
--- a/src/script/src/python/builtins.rs
+++ b/src/script/src/python/builtins.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/builtins/test.rs b/src/script/src/python/builtins/test.rs
index d9ab067811ac..6738f9e7229f 100644
--- a/src/script/src/python/builtins/test.rs
+++ b/src/script/src/python/builtins/test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/coprocessor.rs b/src/script/src/python/coprocessor.rs
index b9a77ef34043..165691912fee 100644
--- a/src/script/src/python/coprocessor.rs
+++ b/src/script/src/python/coprocessor.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/coprocessor/compile.rs b/src/script/src/python/coprocessor/compile.rs
index ad991c78f12b..9f5f0cd82ec9 100644
--- a/src/script/src/python/coprocessor/compile.rs
+++ b/src/script/src/python/coprocessor/compile.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/coprocessor/parse.rs b/src/script/src/python/coprocessor/parse.rs
index b8280a8db11f..57b348552abf 100644
--- a/src/script/src/python/coprocessor/parse.rs
+++ b/src/script/src/python/coprocessor/parse.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs
index 848bf71d8b12..cfaaf18bed2a 100644
--- a/src/script/src/python/engine.rs
+++ b/src/script/src/python/engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/error.rs b/src/script/src/python/error.rs
index 9564e373f726..05ecdc2ac2f1 100644
--- a/src/script/src/python/error.rs
+++ b/src/script/src/python/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/test.rs b/src/script/src/python/test.rs
index 901e8391ba6a..59502fe8c361 100644
--- a/src/script/src/python/test.rs
+++ b/src/script/src/python/test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/utils.rs b/src/script/src/python/utils.rs
index 56c73ccb3ef3..a0f1df2a8bd6 100644
--- a/src/script/src/python/utils.rs
+++ b/src/script/src/python/utils.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/python/vector.rs b/src/script/src/python/vector.rs
index 484d826ef193..0d8294772c95 100644
--- a/src/script/src/python/vector.rs
+++ b/src/script/src/python/vector.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/script/src/table.rs b/src/script/src/table.rs
index 7c1570d8d1c9..c88b66fe6780 100644
--- a/src/script/src/table.rs
+++ b/src/script/src/table.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/auth.rs b/src/servers/src/auth.rs
index 0ad3cd021316..a029a83f5a20 100644
--- a/src/servers/src/auth.rs
+++ b/src/servers/src/auth.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/auth/user_provider.rs b/src/servers/src/auth/user_provider.rs
index 84edb725c736..c25855e25d9b 100644
--- a/src/servers/src/auth/user_provider.rs
+++ b/src/servers/src/auth/user_provider.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index 2208228949c7..fa790739a0ea 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/grpc.rs b/src/servers/src/grpc.rs
index cda3dc65c1e7..14a1f21ac9b4 100644
--- a/src/servers/src/grpc.rs
+++ b/src/servers/src/grpc.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/grpc/handler.rs b/src/servers/src/grpc/handler.rs
index bd624c4cae3c..169782878854 100644
--- a/src/servers/src/grpc/handler.rs
+++ b/src/servers/src/grpc/handler.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs
index d054d756e366..559ef2f8f943 100644
--- a/src/servers/src/http.rs
+++ b/src/servers/src/http.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/http/authorize.rs b/src/servers/src/http/authorize.rs
index b9dadaccdfb3..08353d280770 100644
--- a/src/servers/src/http/authorize.rs
+++ b/src/servers/src/http/authorize.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/http/handler.rs b/src/servers/src/http/handler.rs
index 623d9cbe02c5..14ecf802844d 100644
--- a/src/servers/src/http/handler.rs
+++ b/src/servers/src/http/handler.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/http/influxdb.rs b/src/servers/src/http/influxdb.rs
index b68cb3616f14..65d9dae55f43 100644
--- a/src/servers/src/http/influxdb.rs
+++ b/src/servers/src/http/influxdb.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/http/opentsdb.rs b/src/servers/src/http/opentsdb.rs
index 62a20609909d..5bda9047d9ff 100644
--- a/src/servers/src/http/opentsdb.rs
+++ b/src/servers/src/http/opentsdb.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/http/prometheus.rs b/src/servers/src/http/prometheus.rs
index 6b37d23fc212..90431f2707cc 100644
--- a/src/servers/src/http/prometheus.rs
+++ b/src/servers/src/http/prometheus.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/http/script.rs b/src/servers/src/http/script.rs
index 3683f8aeb0ad..97b6341f09e3 100644
--- a/src/servers/src/http/script.rs
+++ b/src/servers/src/http/script.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/influxdb.rs b/src/servers/src/influxdb.rs
index 722bf65a948c..d90a902894e0 100644
--- a/src/servers/src/influxdb.rs
+++ b/src/servers/src/influxdb.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/interceptor.rs b/src/servers/src/interceptor.rs
index 3f105e7dd833..ccc4f468f708 100644
--- a/src/servers/src/interceptor.rs
+++ b/src/servers/src/interceptor.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs
index 7e80333a1f5a..9efba81a2974 100644
--- a/src/servers/src/lib.rs
+++ b/src/servers/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/line_writer.rs b/src/servers/src/line_writer.rs
index 211e720399f8..0c34d78a8abc 100644
--- a/src/servers/src/line_writer.rs
+++ b/src/servers/src/line_writer.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/mysql.rs b/src/servers/src/mysql.rs
index f22f169a1c54..04059124f7d2 100644
--- a/src/servers/src/mysql.rs
+++ b/src/servers/src/mysql.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index d9e5d635af36..0cb225b97d50 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index ae7de92ba134..c237ddc5d687 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/mysql/server.rs b/src/servers/src/mysql/server.rs
index 2a205094c7a4..e8df8583aec2 100644
--- a/src/servers/src/mysql/server.rs
+++ b/src/servers/src/mysql/server.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/mysql/writer.rs b/src/servers/src/mysql/writer.rs
index 13bebfc91ec8..901e98d9e195 100644
--- a/src/servers/src/mysql/writer.rs
+++ b/src/servers/src/mysql/writer.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/opentsdb.rs b/src/servers/src/opentsdb.rs
index a50c87907ecf..99504bb255d4 100644
--- a/src/servers/src/opentsdb.rs
+++ b/src/servers/src/opentsdb.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/opentsdb/codec.rs b/src/servers/src/opentsdb/codec.rs
index a0b7a38b0d82..b7191a28c289 100644
--- a/src/servers/src/opentsdb/codec.rs
+++ b/src/servers/src/opentsdb/codec.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/opentsdb/connection.rs b/src/servers/src/opentsdb/connection.rs
index a1657259fc59..1f3f0012fe74 100644
--- a/src/servers/src/opentsdb/connection.rs
+++ b/src/servers/src/opentsdb/connection.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/opentsdb/handler.rs b/src/servers/src/opentsdb/handler.rs
index 471b784edc7e..f729b732122c 100644
--- a/src/servers/src/opentsdb/handler.rs
+++ b/src/servers/src/opentsdb/handler.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/postgres.rs b/src/servers/src/postgres.rs
index 5b325ec3743c..4679b41da3b2 100644
--- a/src/servers/src/postgres.rs
+++ b/src/servers/src/postgres.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/postgres/auth_handler.rs b/src/servers/src/postgres/auth_handler.rs
index 4fd7aad9e263..ba5d316013aa 100644
--- a/src/servers/src/postgres/auth_handler.rs
+++ b/src/servers/src/postgres/auth_handler.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/postgres/handler.rs b/src/servers/src/postgres/handler.rs
index eda85452ba30..c09b96764422 100644
--- a/src/servers/src/postgres/handler.rs
+++ b/src/servers/src/postgres/handler.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/postgres/server.rs b/src/servers/src/postgres/server.rs
index 6decbe9da654..1859c5d89697 100644
--- a/src/servers/src/postgres/server.rs
+++ b/src/servers/src/postgres/server.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/prometheus.rs b/src/servers/src/prometheus.rs
index c82019e1b6e6..0ee66c3bd8fc 100644
--- a/src/servers/src/prometheus.rs
+++ b/src/servers/src/prometheus.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs
index 0e2e8d2cf806..13603b0eb197 100644
--- a/src/servers/src/query_handler.rs
+++ b/src/servers/src/query_handler.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/server.rs b/src/servers/src/server.rs
index bce62845f923..e2a264c96e6e 100644
--- a/src/servers/src/server.rs
+++ b/src/servers/src/server.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/shutdown.rs b/src/servers/src/shutdown.rs
index f31810ef0486..b69b3e4da415 100644
--- a/src/servers/src/shutdown.rs
+++ b/src/servers/src/shutdown.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/src/tls.rs b/src/servers/src/tls.rs
index cee4b1fa9c22..906d685027ff 100644
--- a/src/servers/src/tls.rs
+++ b/src/servers/src/tls.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index 05ed54cfb178..09b80ef9d600 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/http/influxdb_test.rs b/src/servers/tests/http/influxdb_test.rs
index aaac972a09da..e795eebcaf35 100644
--- a/src/servers/tests/http/influxdb_test.rs
+++ b/src/servers/tests/http/influxdb_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/http/mod.rs b/src/servers/tests/http/mod.rs
index 3ffba21daadc..4dea49b9fdf5 100644
--- a/src/servers/tests/http/mod.rs
+++ b/src/servers/tests/http/mod.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/http/opentsdb_test.rs b/src/servers/tests/http/opentsdb_test.rs
index 227093e56328..84e212341ea2 100644
--- a/src/servers/tests/http/opentsdb_test.rs
+++ b/src/servers/tests/http/opentsdb_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/http/prometheus_test.rs b/src/servers/tests/http/prometheus_test.rs
index 0d8e72f3c8c3..0a2fe1c8733e 100644
--- a/src/servers/tests/http/prometheus_test.rs
+++ b/src/servers/tests/http/prometheus_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/interceptor.rs b/src/servers/tests/interceptor.rs
index c1acd7c80854..3a072568a4f8 100644
--- a/src/servers/tests/interceptor.rs
+++ b/src/servers/tests/interceptor.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index 32a76385783f..7a2f6c5adc5c 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/mysql/mod.rs b/src/servers/tests/mysql/mod.rs
index aba8699087a9..8da117cedcd7 100644
--- a/src/servers/tests/mysql/mod.rs
+++ b/src/servers/tests/mysql/mod.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/mysql/mysql_server_test.rs b/src/servers/tests/mysql/mysql_server_test.rs
index eca4f05b2d6e..27dfa27f2533 100644
--- a/src/servers/tests/mysql/mysql_server_test.rs
+++ b/src/servers/tests/mysql/mysql_server_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/mysql/mysql_writer_test.rs b/src/servers/tests/mysql/mysql_writer_test.rs
index 9d9d1060c8a7..9bedf72a3f89 100644
--- a/src/servers/tests/mysql/mysql_writer_test.rs
+++ b/src/servers/tests/mysql/mysql_writer_test.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/opentsdb.rs b/src/servers/tests/opentsdb.rs
index e9611703cd5c..aa3d9a5e7c27 100644
--- a/src/servers/tests/opentsdb.rs
+++ b/src/servers/tests/opentsdb.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/servers/tests/postgres/mod.rs b/src/servers/tests/postgres/mod.rs
index a6b4ab977a78..87594523845c 100644
--- a/src/servers/tests/postgres/mod.rs
+++ b/src/servers/tests/postgres/mod.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/session/src/context.rs b/src/session/src/context.rs
index 92d2cd894248..a1c2086d47be 100644
--- a/src/session/src/context.rs
+++ b/src/session/src/context.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/session/src/lib.rs b/src/session/src/lib.rs
index 2e6d4de73642..875c6769888b 100644
--- a/src/session/src/lib.rs
+++ b/src/session/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/ast.rs b/src/sql/src/ast.rs
index 7388b9453cc4..8f8acd597880 100644
--- a/src/sql/src/ast.rs
+++ b/src/sql/src/ast.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/dialect.rs b/src/sql/src/dialect.rs
index 534f438c99a0..078b25d849e5 100644
--- a/src/sql/src/dialect.rs
+++ b/src/sql/src/dialect.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/error.rs b/src/sql/src/error.rs
index a607a4273857..2e2508eadeaf 100644
--- a/src/sql/src/error.rs
+++ b/src/sql/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/lib.rs b/src/sql/src/lib.rs
index 331496586a2d..2be9b3d31b60 100644
--- a/src/sql/src/lib.rs
+++ b/src/sql/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index 63c28954c01f..7e0227283213 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/parsers.rs b/src/sql/src/parsers.rs
index 8feaa5020492..1b874bc6169a 100644
--- a/src/sql/src/parsers.rs
+++ b/src/sql/src/parsers.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/parsers/alter_parser.rs b/src/sql/src/parsers/alter_parser.rs
index 7d614ee3ae10..06e97394c831 100644
--- a/src/sql/src/parsers/alter_parser.rs
+++ b/src/sql/src/parsers/alter_parser.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 362963a4ddf1..8c83e47c5a8c 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/parsers/insert_parser.rs b/src/sql/src/parsers/insert_parser.rs
index 0f8c0b0aa7d6..3c40389b0cfc 100644
--- a/src/sql/src/parsers/insert_parser.rs
+++ b/src/sql/src/parsers/insert_parser.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/parsers/query_parser.rs b/src/sql/src/parsers/query_parser.rs
index e75642c79a08..65b61c5638b8 100644
--- a/src/sql/src/parsers/query_parser.rs
+++ b/src/sql/src/parsers/query_parser.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements.rs b/src/sql/src/statements.rs
index bbdc6fc5c60d..7ba078e5f1cb 100644
--- a/src/sql/src/statements.rs
+++ b/src/sql/src/statements.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/alter.rs b/src/sql/src/statements/alter.rs
index 372170bf7b11..059d2a41d1ff 100644
--- a/src/sql/src/statements/alter.rs
+++ b/src/sql/src/statements/alter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index 01a7cb28cbbc..b2cb665e92cc 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/describe.rs b/src/sql/src/statements/describe.rs
index 2695b6520518..a3fd0197a30a 100644
--- a/src/sql/src/statements/describe.rs
+++ b/src/sql/src/statements/describe.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/drop.rs b/src/sql/src/statements/drop.rs
index 5ea56d6e4cc2..72092e08b66f 100644
--- a/src/sql/src/statements/drop.rs
+++ b/src/sql/src/statements/drop.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/explain.rs b/src/sql/src/statements/explain.rs
index 01f9330ef3e5..82b757d193fa 100644
--- a/src/sql/src/statements/explain.rs
+++ b/src/sql/src/statements/explain.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/insert.rs b/src/sql/src/statements/insert.rs
index 56fa98aaba4d..50b8518e381c 100644
--- a/src/sql/src/statements/insert.rs
+++ b/src/sql/src/statements/insert.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/query.rs b/src/sql/src/statements/query.rs
index 64e115f1353e..640b16324ee0 100644
--- a/src/sql/src/statements/query.rs
+++ b/src/sql/src/statements/query.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/show.rs b/src/sql/src/statements/show.rs
index a137c3e7b144..0aca303cbb15 100644
--- a/src/sql/src/statements/show.rs
+++ b/src/sql/src/statements/show.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
index e1c8d731bb90..325c67d3f858 100644
--- a/src/sql/src/statements/statement.rs
+++ b/src/sql/src/statements/statement.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/sql/src/statements/statement_query.rs b/src/sql/src/statements/statement_query.rs
index 2d530f10d359..b54d4df2704a 100644
--- a/src/sql/src/statements/statement_query.rs
+++ b/src/sql/src/statements/statement_query.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/bench_main.rs b/src/storage/benches/bench_main.rs
index 3590d8de032c..6d89eea3a6e6 100644
--- a/src/storage/benches/bench_main.rs
+++ b/src/storage/benches/bench_main.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/memtable/bench_memtable_read.rs b/src/storage/benches/memtable/bench_memtable_read.rs
index 4ca030f50e29..ad73708afe1d 100644
--- a/src/storage/benches/memtable/bench_memtable_read.rs
+++ b/src/storage/benches/memtable/bench_memtable_read.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs b/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
index ec3f41774166..eda89e13e51b 100644
--- a/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
+++ b/src/storage/benches/memtable/bench_memtable_read_write_ratio.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/memtable/bench_memtable_write.rs b/src/storage/benches/memtable/bench_memtable_write.rs
index 39fc1bbde782..114cb9677dab 100644
--- a/src/storage/benches/memtable/bench_memtable_write.rs
+++ b/src/storage/benches/memtable/bench_memtable_write.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/memtable/mod.rs b/src/storage/benches/memtable/mod.rs
index eb12b11ab079..a53f26cd4add 100644
--- a/src/storage/benches/memtable/mod.rs
+++ b/src/storage/benches/memtable/mod.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/memtable/util/bench_context.rs b/src/storage/benches/memtable/util/bench_context.rs
index 6d36f5abdd44..9ecf1dd871e6 100644
--- a/src/storage/benches/memtable/util/bench_context.rs
+++ b/src/storage/benches/memtable/util/bench_context.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/memtable/util/mod.rs b/src/storage/benches/memtable/util/mod.rs
index 62b3ee8c3d96..3b55a6fd8bb9 100644
--- a/src/storage/benches/memtable/util/mod.rs
+++ b/src/storage/benches/memtable/util/mod.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/memtable/util/regiondesc_util.rs b/src/storage/benches/memtable/util/regiondesc_util.rs
index e8f71c71bd34..b5c4a218e71c 100644
--- a/src/storage/benches/memtable/util/regiondesc_util.rs
+++ b/src/storage/benches/memtable/util/regiondesc_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/memtable/util/schema_util.rs b/src/storage/benches/memtable/util/schema_util.rs
index 0522bae825cc..f20e74d966b3 100644
--- a/src/storage/benches/memtable/util/schema_util.rs
+++ b/src/storage/benches/memtable/util/schema_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/wal/bench_decode.rs b/src/storage/benches/wal/bench_decode.rs
index 911836061243..9af1e624efcd 100644
--- a/src/storage/benches/wal/bench_decode.rs
+++ b/src/storage/benches/wal/bench_decode.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/wal/bench_encode.rs b/src/storage/benches/wal/bench_encode.rs
index b1897e85e9f5..4cc8ca5f2554 100644
--- a/src/storage/benches/wal/bench_encode.rs
+++ b/src/storage/benches/wal/bench_encode.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/wal/bench_wal.rs b/src/storage/benches/wal/bench_wal.rs
index 0499f6ae4bab..a7762c6c2cae 100644
--- a/src/storage/benches/wal/bench_wal.rs
+++ b/src/storage/benches/wal/bench_wal.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/wal/mod.rs b/src/storage/benches/wal/mod.rs
index 7966e66e0fcf..55f04ce477f6 100644
--- a/src/storage/benches/wal/mod.rs
+++ b/src/storage/benches/wal/mod.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/wal/util/mod.rs b/src/storage/benches/wal/util/mod.rs
index d3b33f440152..36c8be432c02 100644
--- a/src/storage/benches/wal/util/mod.rs
+++ b/src/storage/benches/wal/util/mod.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/benches/wal/util/write_batch_util.rs b/src/storage/benches/wal/util/write_batch_util.rs
index 22f183511ff7..640138c3e51a 100644
--- a/src/storage/benches/wal/util/write_batch_util.rs
+++ b/src/storage/benches/wal/util/write_batch_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/build.rs b/src/storage/build.rs
index b8140ee3955c..1e5230ecc4be 100644
--- a/src/storage/build.rs
+++ b/src/storage/build.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/background.rs b/src/storage/src/background.rs
index 015c41382471..2c04fd5c8366 100644
--- a/src/storage/src/background.rs
+++ b/src/storage/src/background.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/chunk.rs b/src/storage/src/chunk.rs
index f39f2db9c6ff..06b6ab402555 100644
--- a/src/storage/src/chunk.rs
+++ b/src/storage/src/chunk.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/codec.rs b/src/storage/src/codec.rs
index 8dfe29482aa6..b0c34605cd70 100644
--- a/src/storage/src/codec.rs
+++ b/src/storage/src/codec.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/config.rs b/src/storage/src/config.rs
index 3522790c3ec8..1f6bf31efc3f 100644
--- a/src/storage/src/config.rs
+++ b/src/storage/src/config.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/engine.rs b/src/storage/src/engine.rs
index ca31701342b8..8664facc9bdf 100644
--- a/src/storage/src/engine.rs
+++ b/src/storage/src/engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/error.rs b/src/storage/src/error.rs
index bac2851052e4..197d5fc1d9fe 100644
--- a/src/storage/src/error.rs
+++ b/src/storage/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/flush.rs b/src/storage/src/flush.rs
index bc8fa292af40..e0a2c45feda4 100644
--- a/src/storage/src/flush.rs
+++ b/src/storage/src/flush.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs
index 8e58817490bb..2d6d2bc51461 100644
--- a/src/storage/src/lib.rs
+++ b/src/storage/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/manifest.rs b/src/storage/src/manifest.rs
index c9c740e18420..76d57c9948c7 100644
--- a/src/storage/src/manifest.rs
+++ b/src/storage/src/manifest.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/manifest/action.rs b/src/storage/src/manifest/action.rs
index 690fe679c44f..1d56464e9c37 100644
--- a/src/storage/src/manifest/action.rs
+++ b/src/storage/src/manifest/action.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/manifest/helper.rs b/src/storage/src/manifest/helper.rs
index 1c68d1086299..8f7943f0b951 100644
--- a/src/storage/src/manifest/helper.rs
+++ b/src/storage/src/manifest/helper.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/manifest/impl_.rs b/src/storage/src/manifest/impl_.rs
index 27947964336d..c69015cda39f 100644
--- a/src/storage/src/manifest/impl_.rs
+++ b/src/storage/src/manifest/impl_.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/manifest/region.rs b/src/storage/src/manifest/region.rs
index b56cd6aae87b..5831ac6f2f4f 100644
--- a/src/storage/src/manifest/region.rs
+++ b/src/storage/src/manifest/region.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/manifest/storage.rs b/src/storage/src/manifest/storage.rs
index 8e217e95ced1..04634bcd574f 100644
--- a/src/storage/src/manifest/storage.rs
+++ b/src/storage/src/manifest/storage.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/manifest/test_utils.rs b/src/storage/src/manifest/test_utils.rs
index ab3fc7bb4994..7e7d1e180989 100644
--- a/src/storage/src/manifest/test_utils.rs
+++ b/src/storage/src/manifest/test_utils.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/memtable.rs b/src/storage/src/memtable.rs
index 101e10544f4d..b6debb1f21d4 100644
--- a/src/storage/src/memtable.rs
+++ b/src/storage/src/memtable.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/memtable/btree.rs b/src/storage/src/memtable/btree.rs
index e1da00a33d73..bb7a57a2df0e 100644
--- a/src/storage/src/memtable/btree.rs
+++ b/src/storage/src/memtable/btree.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/memtable/inserter.rs b/src/storage/src/memtable/inserter.rs
index 8fbbd504e12f..100a84ff5797 100644
--- a/src/storage/src/memtable/inserter.rs
+++ b/src/storage/src/memtable/inserter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/memtable/tests.rs b/src/storage/src/memtable/tests.rs
index f4eef66c173d..c46e864bacfa 100644
--- a/src/storage/src/memtable/tests.rs
+++ b/src/storage/src/memtable/tests.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/memtable/version.rs b/src/storage/src/memtable/version.rs
index cce626677c66..895566f5e176 100644
--- a/src/storage/src/memtable/version.rs
+++ b/src/storage/src/memtable/version.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/metadata.rs b/src/storage/src/metadata.rs
index af31388a2275..98154e2cca23 100644
--- a/src/storage/src/metadata.rs
+++ b/src/storage/src/metadata.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/proto.rs b/src/storage/src/proto.rs
index 72e922055412..7f85132d1835 100644
--- a/src/storage/src/proto.rs
+++ b/src/storage/src/proto.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/proto/wal.rs b/src/storage/src/proto/wal.rs
index 2a7faebb1082..56a0cd4f7e9e 100644
--- a/src/storage/src/proto/wal.rs
+++ b/src/storage/src/proto/wal.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/read.rs b/src/storage/src/read.rs
index f625b53be267..fad5b613d6ff 100644
--- a/src/storage/src/read.rs
+++ b/src/storage/src/read.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/read/dedup.rs b/src/storage/src/read/dedup.rs
index ab8485a9102f..e26da6560945 100644
--- a/src/storage/src/read/dedup.rs
+++ b/src/storage/src/read/dedup.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/read/merge.rs b/src/storage/src/read/merge.rs
index ab973e9fbc53..205e71cef192 100644
--- a/src/storage/src/read/merge.rs
+++ b/src/storage/src/read/merge.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs
index 5c8f2cd85123..f471db9d70e7 100644
--- a/src/storage/src/region.rs
+++ b/src/storage/src/region.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/region/tests.rs b/src/storage/src/region/tests.rs
index df8ef81ee6ba..54b584f795d7 100644
--- a/src/storage/src/region/tests.rs
+++ b/src/storage/src/region/tests.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/region/tests/alter.rs b/src/storage/src/region/tests/alter.rs
index 238f2bd09471..175fd2f56820 100644
--- a/src/storage/src/region/tests/alter.rs
+++ b/src/storage/src/region/tests/alter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/region/tests/basic.rs b/src/storage/src/region/tests/basic.rs
index c2ccc830c460..18b6bd19d4b3 100644
--- a/src/storage/src/region/tests/basic.rs
+++ b/src/storage/src/region/tests/basic.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/region/tests/flush.rs b/src/storage/src/region/tests/flush.rs
index eadb4d5b5b4d..2f152c4c3f03 100644
--- a/src/storage/src/region/tests/flush.rs
+++ b/src/storage/src/region/tests/flush.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/region/tests/projection.rs b/src/storage/src/region/tests/projection.rs
index 837f418cdf8d..1c3c331aa30d 100644
--- a/src/storage/src/region/tests/projection.rs
+++ b/src/storage/src/region/tests/projection.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/region/writer.rs b/src/storage/src/region/writer.rs
index f40f53fa325b..3b53ed421af9 100644
--- a/src/storage/src/region/writer.rs
+++ b/src/storage/src/region/writer.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/schema.rs b/src/storage/src/schema.rs
index dcec7ef1d19c..4d7aabc2f64a 100644
--- a/src/storage/src/schema.rs
+++ b/src/storage/src/schema.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/schema/compat.rs b/src/storage/src/schema/compat.rs
index ae3f6e4ce139..5a4a3fa2cc2e 100644
--- a/src/storage/src/schema/compat.rs
+++ b/src/storage/src/schema/compat.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/schema/projected.rs b/src/storage/src/schema/projected.rs
index 26bf8632a888..9878a90f30bf 100644
--- a/src/storage/src/schema/projected.rs
+++ b/src/storage/src/schema/projected.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/schema/region.rs b/src/storage/src/schema/region.rs
index 3b1d51203514..0b3625ff0aed 100644
--- a/src/storage/src/schema/region.rs
+++ b/src/storage/src/schema/region.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/schema/store.rs b/src/storage/src/schema/store.rs
index c91858d0926a..d8671602bf1e 100644
--- a/src/storage/src/schema/store.rs
+++ b/src/storage/src/schema/store.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/snapshot.rs b/src/storage/src/snapshot.rs
index 32d030ba8e12..2d8323531393 100644
--- a/src/storage/src/snapshot.rs
+++ b/src/storage/src/snapshot.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/sst.rs b/src/storage/src/sst.rs
index afe1c10fdff7..3f317b2eb1e2 100644
--- a/src/storage/src/sst.rs
+++ b/src/storage/src/sst.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/sst/parquet.rs b/src/storage/src/sst/parquet.rs
index 5d1531ab4190..d48306d97fdd 100644
--- a/src/storage/src/sst/parquet.rs
+++ b/src/storage/src/sst/parquet.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/sync.rs b/src/storage/src/sync.rs
index 5f1164c1468c..6e71b616fe9f 100644
--- a/src/storage/src/sync.rs
+++ b/src/storage/src/sync.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/test_util.rs b/src/storage/src/test_util.rs
index a24f7b2430f2..8952408fdf5b 100644
--- a/src/storage/src/test_util.rs
+++ b/src/storage/src/test_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
index 41d994a5b870..ed8b0f11ab77 100644
--- a/src/storage/src/test_util/config_util.rs
+++ b/src/storage/src/test_util/config_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/test_util/descriptor_util.rs b/src/storage/src/test_util/descriptor_util.rs
index 21e5a966f72e..244050165775 100644
--- a/src/storage/src/test_util/descriptor_util.rs
+++ b/src/storage/src/test_util/descriptor_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/test_util/read_util.rs b/src/storage/src/test_util/read_util.rs
index fe231de8ae93..595bc3c77fbe 100644
--- a/src/storage/src/test_util/read_util.rs
+++ b/src/storage/src/test_util/read_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/test_util/schema_util.rs b/src/storage/src/test_util/schema_util.rs
index 61b4621f14a3..ee3b2d2ec99b 100644
--- a/src/storage/src/test_util/schema_util.rs
+++ b/src/storage/src/test_util/schema_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/test_util/write_batch_util.rs b/src/storage/src/test_util/write_batch_util.rs
index 41600612efd8..596e4228f193 100644
--- a/src/storage/src/test_util/write_batch_util.rs
+++ b/src/storage/src/test_util/write_batch_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/version.rs b/src/storage/src/version.rs
index 67b43fef485c..f0660ed7d383 100644
--- a/src/storage/src/version.rs
+++ b/src/storage/src/version.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/wal.rs b/src/storage/src/wal.rs
index 83f54e293b22..6b46317ad9f6 100644
--- a/src/storage/src/wal.rs
+++ b/src/storage/src/wal.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/write_batch.rs b/src/storage/src/write_batch.rs
index e43051470fe6..ec0a0b0aa805 100644
--- a/src/storage/src/write_batch.rs
+++ b/src/storage/src/write_batch.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/write_batch/codec.rs b/src/storage/src/write_batch/codec.rs
index 74ea9a50e241..1db604d7f628 100644
--- a/src/storage/src/write_batch/codec.rs
+++ b/src/storage/src/write_batch/codec.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/storage/src/write_batch/compat.rs b/src/storage/src/write_batch/compat.rs
index dbb2bff4de95..b339076d2589 100644
--- a/src/storage/src/write_batch/compat.rs
+++ b/src/storage/src/write_batch/compat.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/lib.rs b/src/store-api/src/lib.rs
index 080e87a83ae9..334ee96c4091 100644
--- a/src/store-api/src/lib.rs
+++ b/src/store-api/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/logstore.rs b/src/store-api/src/logstore.rs
index 80be436ea0fc..918d8cf3bdc6 100644
--- a/src/store-api/src/logstore.rs
+++ b/src/store-api/src/logstore.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/logstore/entry.rs b/src/store-api/src/logstore/entry.rs
index 3fa7aad06d22..390932980d18 100644
--- a/src/store-api/src/logstore/entry.rs
+++ b/src/store-api/src/logstore/entry.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/logstore/entry_stream.rs b/src/store-api/src/logstore/entry_stream.rs
index ac8c574bb1a2..161dd10a015f 100644
--- a/src/store-api/src/logstore/entry_stream.rs
+++ b/src/store-api/src/logstore/entry_stream.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/logstore/namespace.rs b/src/store-api/src/logstore/namespace.rs
index d056939c95ac..165006fbbd6b 100644
--- a/src/store-api/src/logstore/namespace.rs
+++ b/src/store-api/src/logstore/namespace.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/manifest.rs b/src/store-api/src/manifest.rs
index 105c1ada95b3..e6b453df5134 100644
--- a/src/store-api/src/manifest.rs
+++ b/src/store-api/src/manifest.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/manifest/action.rs b/src/store-api/src/manifest/action.rs
index a777004dfa2f..bbcce9091dec 100644
--- a/src/store-api/src/manifest/action.rs
+++ b/src/store-api/src/manifest/action.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/manifest/storage.rs b/src/store-api/src/manifest/storage.rs
index 5150fc6d6adb..93ee056c03a1 100644
--- a/src/store-api/src/manifest/storage.rs
+++ b/src/store-api/src/manifest/storage.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage.rs b/src/store-api/src/storage.rs
index bea711ea6552..10b14d37a97a 100644
--- a/src/store-api/src/storage.rs
+++ b/src/store-api/src/storage.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/chunk.rs b/src/store-api/src/storage/chunk.rs
index 32fedc2df1dc..db6b46ad94b7 100644
--- a/src/store-api/src/storage/chunk.rs
+++ b/src/store-api/src/storage/chunk.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/consts.rs b/src/store-api/src/storage/consts.rs
index 68352b14ca50..0844a2a951c4 100644
--- a/src/store-api/src/storage/consts.rs
+++ b/src/store-api/src/storage/consts.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/descriptors.rs b/src/store-api/src/storage/descriptors.rs
index b31159b1bddd..86427a22e0dc 100644
--- a/src/store-api/src/storage/descriptors.rs
+++ b/src/store-api/src/storage/descriptors.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/engine.rs b/src/store-api/src/storage/engine.rs
index 651c422cb5fd..01c0cb8cbeaf 100644
--- a/src/store-api/src/storage/engine.rs
+++ b/src/store-api/src/storage/engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/metadata.rs b/src/store-api/src/storage/metadata.rs
index 67dfb30f5b25..8221ca3458ae 100644
--- a/src/store-api/src/storage/metadata.rs
+++ b/src/store-api/src/storage/metadata.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/region.rs b/src/store-api/src/storage/region.rs
index 236b6681d46f..a8a732469b23 100644
--- a/src/store-api/src/storage/region.rs
+++ b/src/store-api/src/storage/region.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs
index 7df6c88be00b..30260aac760b 100644
--- a/src/store-api/src/storage/requests.rs
+++ b/src/store-api/src/storage/requests.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/responses.rs b/src/store-api/src/storage/responses.rs
index d5fe32669db4..7a226670ccc6 100644
--- a/src/store-api/src/storage/responses.rs
+++ b/src/store-api/src/storage/responses.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/snapshot.rs b/src/store-api/src/storage/snapshot.rs
index 0532c9948395..9bf5692b5af9 100644
--- a/src/store-api/src/storage/snapshot.rs
+++ b/src/store-api/src/storage/snapshot.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/store-api/src/storage/types.rs b/src/store-api/src/storage/types.rs
index ed8d3c42229d..f8336edc4295 100644
--- a/src/store-api/src/storage/types.rs
+++ b/src/store-api/src/storage/types.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/engine.rs b/src/table/src/engine.rs
index 23e2e4e7ba4b..75980e7d2a7f 100644
--- a/src/table/src/engine.rs
+++ b/src/table/src/engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/error.rs b/src/table/src/error.rs
index eba0a5269338..0b1b424e86c0 100644
--- a/src/table/src/error.rs
+++ b/src/table/src/error.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/lib.rs b/src/table/src/lib.rs
index 9c86665ff69f..eb49686c9c16 100644
--- a/src/table/src/lib.rs
+++ b/src/table/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/metadata.rs b/src/table/src/metadata.rs
index 3ae85784dc33..2c665ecc2aa3 100644
--- a/src/table/src/metadata.rs
+++ b/src/table/src/metadata.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/predicate.rs b/src/table/src/predicate.rs
index 6e61415cbe9f..d4537e775b29 100644
--- a/src/table/src/predicate.rs
+++ b/src/table/src/predicate.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/predicate/stats.rs b/src/table/src/predicate/stats.rs
index f092cd5418fc..4d707a82e9cf 100644
--- a/src/table/src/predicate/stats.rs
+++ b/src/table/src/predicate/stats.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index 9d5e877aad61..ed72346f1f3b 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/table.rs b/src/table/src/table.rs
index d5efd33b2e04..2ee2d45cbf1d 100644
--- a/src/table/src/table.rs
+++ b/src/table/src/table.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/table/adapter.rs b/src/table/src/table/adapter.rs
index d9eb5a41e1ca..5ed7ffd36af4 100644
--- a/src/table/src/table/adapter.rs
+++ b/src/table/src/table/adapter.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/table/numbers.rs b/src/table/src/table/numbers.rs
index 455199023ac3..d57e10cc2978 100644
--- a/src/table/src/table/numbers.rs
+++ b/src/table/src/table/numbers.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/table/scan.rs b/src/table/src/table/scan.rs
index b9078befa8aa..fb6f312cc486 100644
--- a/src/table/src/table/scan.rs
+++ b/src/table/src/table/scan.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/test_util.rs b/src/table/src/test_util.rs
index 0cadbf689101..59737db02f7d 100644
--- a/src/table/src/test_util.rs
+++ b/src/table/src/test_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/test_util/empty_table.rs b/src/table/src/test_util/empty_table.rs
index e9f085000e04..e194b0157b2d 100644
--- a/src/table/src/test_util/empty_table.rs
+++ b/src/table/src/test_util/empty_table.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/test_util/memtable.rs b/src/table/src/test_util/memtable.rs
index af92f40eb941..c1fb41d5d803 100644
--- a/src/table/src/test_util/memtable.rs
+++ b/src/table/src/test_util/memtable.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/src/table/src/test_util/mock_engine.rs b/src/table/src/test_util/mock_engine.rs
index 916b46060574..ddd5b32b6d74 100644
--- a/src/table/src/test_util/mock_engine.rs
+++ b/src/table/src/test_util/mock_engine.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/tests-integration/src/lib.rs b/tests-integration/src/lib.rs
index 1bfde512a8b3..423bc7f30040 100644
--- a/tests-integration/src/lib.rs
+++ b/tests-integration/src/lib.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs
index 4677305bfd7e..bf244f0162e6 100644
--- a/tests-integration/src/test_util.rs
+++ b/tests-integration/src/test_util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs
index 19987d2be258..d7cd59e853c6 100644
--- a/tests-integration/tests/grpc.rs
+++ b/tests-integration/tests/grpc.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs
index e85ed738ef03..f508863df404 100644
--- a/tests-integration/tests/http.rs
+++ b/tests-integration/tests/http.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/tests-integration/tests/main.rs b/tests-integration/tests/main.rs
index b664e03ad913..4f76d51aab02 100644
--- a/tests-integration/tests/main.rs
+++ b/tests-integration/tests/main.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs
index 38bb23ed527e..652568840b80 100644
--- a/tests/runner/src/env.rs
+++ b/tests/runner/src/env.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/tests/runner/src/main.rs b/tests/runner/src/main.rs
index 696cbf1d7c65..0cb01a122a69 100644
--- a/tests/runner/src/main.rs
+++ b/tests/runner/src/main.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/tests/runner/src/util.rs b/tests/runner/src/util.rs
index e3c1ff00d502..47cb84eb3521 100644
--- a/tests/runner/src/util.rs
+++ b/tests/runner/src/util.rs
@@ -1,10 +1,10 @@
-// Copyright 2022 Greptime Team
+// Copyright 2023 Greptime Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
|
fix
|
license header (#815)
|
7942b8fae967386e5a06a416a023af5689511b1d
|
2024-02-28 09:07:19
|
Lei, HUANG
|
chore: add metris for memtable read path (#3397)
| false
|
diff --git a/src/mito2/src/memtable/merge_tree/data.rs b/src/mito2/src/memtable/merge_tree/data.rs
index 82bf5c8440d0..e3214ee71abc 100644
--- a/src/mito2/src/memtable/merge_tree/data.rs
+++ b/src/mito2/src/memtable/merge_tree/data.rs
@@ -18,6 +18,7 @@ use std::cmp::{Ordering, Reverse};
use std::fmt::{Debug, Formatter};
use std::ops::Range;
use std::sync::Arc;
+use std::time::{Duration, Instant};
use bytes::Bytes;
use datatypes::arrow;
@@ -46,6 +47,7 @@ use crate::error::Result;
use crate::memtable::key_values::KeyValue;
use crate::memtable::merge_tree::merger::{DataBatchKey, DataNode, DataSource, Merger};
use crate::memtable::merge_tree::PkIndex;
+use crate::metrics::{MERGE_TREE_DATA_BUFFER_FREEZE_STAGE_ELAPSED, MERGE_TREE_READ_STAGE_ELAPSED};
const PK_INDEX_COLUMN_NAME: &str = "__pk_index";
@@ -255,16 +257,21 @@ impl DataBuffer {
/// If pk_weights is present, yielded rows are sorted according to weights,
/// otherwise rows are sorted by "pk_weights" values as they are actually weights.
pub fn read(&self, pk_weights: Option<&[u16]>) -> Result<DataBufferReader> {
- let batch = read_data_buffer_to_record_batches(
- self.data_part_schema.clone(),
- self,
- pk_weights,
- self.dedup,
- // replace_pk_index is always set to false since:
- // - for DataBuffer in ShardBuilder, pk dict is not frozen
- // - for DataBuffer in Shard, values in pk_index column has already been replaced during `freeze`.
- false,
- )?;
+ let batch = {
+ let _timer = MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["read_data_buffer_to_batch"])
+ .start_timer();
+ read_data_buffer_to_record_batches(
+ self.data_part_schema.clone(),
+ self,
+ pk_weights,
+ self.dedup,
+ // replace_pk_index is always set to false since:
+ // - for DataBuffer in ShardBuilder, pk dict is not frozen
+ // - for DataBuffer in Shard, values in pk_index column has already been replaced during `freeze`.
+ false,
+ )?
+ };
DataBufferReader::new(batch)
}
@@ -493,6 +500,15 @@ pub(crate) struct DataBufferReader {
batch: RecordBatch,
offset: usize,
current_range: Option<DataBatchRange>,
+ elapsed_time: Duration,
+}
+
+impl Drop for DataBufferReader {
+ fn drop(&mut self) {
+ MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["read_data_buffer"])
+ .observe(self.elapsed_time.as_secs_f64())
+ }
}
impl DataBufferReader {
@@ -501,6 +517,7 @@ impl DataBufferReader {
batch,
offset: 0,
current_range: None,
+ elapsed_time: Duration::default(),
};
reader.next()?; // fill data batch for comparison and merge.
Ok(reader)
@@ -527,6 +544,7 @@ impl DataBufferReader {
self.current_range = None;
return Ok(());
}
+ let start = Instant::now();
let pk_index_array = pk_index_array(&self.batch);
if let Some((next_pk, range)) = search_next_pk_range(pk_index_array, self.offset) {
self.offset = range.end;
@@ -538,6 +556,7 @@ impl DataBufferReader {
} else {
self.current_range = None;
}
+ self.elapsed_time += start.elapsed();
Ok(())
}
}
@@ -741,18 +760,30 @@ impl<'a> DataPartEncoder<'a> {
pub fn write(self, source: &mut DataBuffer) -> Result<DataPart> {
let mut bytes = Vec::with_capacity(1024);
- let rb = drain_data_buffer_to_record_batches(
- self.schema.clone(),
- source,
- self.pk_weights,
- self.dedup,
- self.replace_pk_index,
- )?;
- let mut writer =
- ArrowWriter::try_new(&mut bytes, self.schema.clone(), Some(self.writer_props()))
- .context(error::EncodeMemtableSnafu)?;
- writer.write(&rb).context(error::EncodeMemtableSnafu)?;
- let _metadata = writer.close().context(error::EncodeMemtableSnafu)?;
+
+ let rb = {
+ let _timer = MERGE_TREE_DATA_BUFFER_FREEZE_STAGE_ELAPSED
+ .with_label_values(&["drain_data_buffer_to_batch"])
+ .start_timer();
+ drain_data_buffer_to_record_batches(
+ self.schema.clone(),
+ source,
+ self.pk_weights,
+ self.dedup,
+ self.replace_pk_index,
+ )?
+ };
+
+ {
+ let _timer = MERGE_TREE_DATA_BUFFER_FREEZE_STAGE_ELAPSED
+ .with_label_values(&["encode"])
+ .start_timer();
+ let mut writer =
+ ArrowWriter::try_new(&mut bytes, self.schema.clone(), Some(self.writer_props()))
+ .context(error::EncodeMemtableSnafu)?;
+ writer.write(&rb).context(error::EncodeMemtableSnafu)?;
+ let _metadata = writer.close().context(error::EncodeMemtableSnafu)?;
+ }
Ok(DataPart::Parquet(ParquetPart {
data: Bytes::from(bytes),
}))
@@ -783,6 +814,15 @@ pub struct DataPartReader {
inner: ParquetRecordBatchReader,
current_batch: Option<RecordBatch>,
current_range: Option<DataBatchRange>,
+ elapsed: Duration,
+}
+
+impl Drop for DataPartReader {
+ fn drop(&mut self) {
+ MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["read_data_part"])
+ .observe(self.elapsed.as_secs_f64());
+ }
}
impl Debug for DataPartReader {
@@ -805,6 +845,7 @@ impl DataPartReader {
inner: parquet_reader,
current_batch: None,
current_range: None,
+ elapsed: Default::default(),
};
reader.next()?;
Ok(reader)
@@ -827,6 +868,7 @@ impl DataPartReader {
}
pub(crate) fn next(&mut self) -> Result<()> {
+ let start = Instant::now();
if let Some((next_pk, range)) = self.search_next_pk_range() {
// first try to search next pk in current record batch.
self.current_range = Some(DataBatchRange {
@@ -847,7 +889,7 @@ impl DataPartReader {
self.current_range = None;
}
}
-
+ self.elapsed += start.elapsed();
Ok(())
}
@@ -901,6 +943,10 @@ impl DataParts {
/// The returned iterator yields a record batch of one primary key at a time.
/// The order of yielding primary keys is determined by provided weights.
pub fn read(&self) -> Result<DataPartsReader> {
+ let _timer = MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["build_data_parts_reader"])
+ .start_timer();
+
let mut nodes = Vec::with_capacity(self.frozen.len() + 1);
nodes.push(DataNode::new(DataSource::Buffer(
// `DataPars::read` ensures that all pk_index inside `DataBuffer` are replaced by weights.
@@ -911,7 +957,10 @@ impl DataParts {
nodes.push(DataNode::new(DataSource::Part(p.read()?)));
}
let merger = Merger::try_new(nodes)?;
- Ok(DataPartsReader { merger })
+ Ok(DataPartsReader {
+ merger,
+ elapsed: Default::default(),
+ })
}
pub(crate) fn is_empty(&self) -> bool {
@@ -922,6 +971,15 @@ impl DataParts {
/// Reader for all parts inside a `DataParts`.
pub struct DataPartsReader {
merger: Merger<DataNode>,
+ elapsed: Duration,
+}
+
+impl Drop for DataPartsReader {
+ fn drop(&mut self) {
+ MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["read_data_parts"])
+ .observe(self.elapsed.as_secs_f64())
+ }
}
impl DataPartsReader {
@@ -931,7 +989,10 @@ impl DataPartsReader {
}
pub(crate) fn next(&mut self) -> Result<()> {
- self.merger.next()
+ let start = Instant::now();
+ let result = self.merger.next();
+ self.elapsed += start.elapsed();
+ result
}
pub(crate) fn is_valid(&self) -> bool {
diff --git a/src/mito2/src/memtable/merge_tree/partition.rs b/src/mito2/src/memtable/merge_tree/partition.rs
index e32381c4f5a7..28df85ba7aa0 100644
--- a/src/mito2/src/memtable/merge_tree/partition.rs
+++ b/src/mito2/src/memtable/merge_tree/partition.rs
@@ -18,9 +18,11 @@
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock};
+use std::time::{Duration, Instant};
use api::v1::SemanticType;
use common_recordbatch::filter::SimpleFilterEvaluator;
+use common_telemetry::tracing::log;
use store_api::metadata::RegionMetadataRef;
use store_api::metric_engine_consts::DATA_SCHEMA_TABLE_ID_COLUMN_NAME;
use store_api::storage::ColumnId;
@@ -35,6 +37,7 @@ use crate::memtable::merge_tree::shard::{
};
use crate::memtable::merge_tree::shard_builder::ShardBuilder;
use crate::memtable::merge_tree::{MergeTreeConfig, PkId};
+use crate::metrics::MERGE_TREE_READ_STAGE_ELAPSED;
use crate::read::{Batch, BatchBuilder};
use crate::row_converter::{McmpRowCodec, RowCodec};
@@ -110,8 +113,8 @@ impl Partition {
let inner = self.inner.read().unwrap();
let mut nodes = Vec::with_capacity(inner.shards.len() + 1);
if !inner.shard_builder.is_empty() {
- let bulder_reader = inner.shard_builder.read(&mut context.pk_weights)?;
- nodes.push(ShardNode::new(ShardSource::Builder(bulder_reader)));
+ let builder_reader = inner.shard_builder.read(&mut context.pk_weights)?;
+ nodes.push(ShardNode::new(ShardSource::Builder(builder_reader)));
}
for shard in &inner.shards {
if !shard.is_empty() {
@@ -122,7 +125,7 @@ impl Partition {
nodes
};
- // Creating a shard merger will invoke next so we do it outside of the lock.
+ // Creating a shard merger will invoke next so we do it outside the lock.
let merger = ShardMerger::try_new(nodes)?;
if self.dedup {
let source = DedupReader::try_new(merger)?;
@@ -234,6 +237,15 @@ pub(crate) struct PartitionStats {
pub(crate) shared_memory_size: usize,
}
+#[derive(Default)]
+struct PartitionReaderMetrics {
+ prune_pk: Duration,
+ read_source: Duration,
+ data_batch_to_batch: Duration,
+ keys_before_pruning: usize,
+ keys_after_pruning: usize,
+}
+
/// Reader to scan rows in a partition.
///
/// It can merge rows from multiple shards.
@@ -266,8 +278,9 @@ impl PartitionReader {
/// # Panics
/// Panics if the reader is invalid.
pub fn next(&mut self) -> Result<()> {
+ let read_source = Instant::now();
self.source.next()?;
-
+ self.context.metrics.read_source += read_source.elapsed();
self.prune_batch_by_key()
}
@@ -275,14 +288,17 @@ impl PartitionReader {
///
/// # Panics
/// Panics if the reader is invalid.
- pub fn convert_current_batch(&self) -> Result<Batch> {
+ pub fn convert_current_batch(&mut self) -> Result<Batch> {
+ let start = Instant::now();
let data_batch = self.source.current_data_batch();
- data_batch_to_batch(
+ let batch = data_batch_to_batch(
&self.context.metadata,
&self.context.projection,
self.source.current_key(),
data_batch,
- )
+ )?;
+ self.context.metrics.data_batch_to_batch += start.elapsed();
+ Ok(batch)
}
pub(crate) fn into_context(self) -> ReadPartitionContext {
@@ -290,6 +306,7 @@ impl PartitionReader {
}
fn prune_batch_by_key(&mut self) -> Result<()> {
+ let start = Instant::now();
if self.context.metadata.primary_key.is_empty() || !self.context.need_prune_key {
// Nothing to prune.
return Ok(());
@@ -305,6 +322,7 @@ impl PartitionReader {
}
}
let key = self.source.current_key().unwrap();
+ self.context.metrics.keys_before_pruning += 1;
// Prune batch by primary key.
if prune_primary_key(
&self.context.metadata,
@@ -314,11 +332,12 @@ impl PartitionReader {
) {
// We need this key.
self.last_yield_pk_id = Some(pk_id);
+ self.context.metrics.keys_after_pruning += 1;
break;
}
self.source.next()?;
}
-
+ self.context.metrics.prune_pk += start.elapsed();
Ok(())
}
}
@@ -384,6 +403,26 @@ pub(crate) struct ReadPartitionContext {
/// Buffer to store pk weights.
pk_weights: Vec<u16>,
need_prune_key: bool,
+ metrics: PartitionReaderMetrics,
+}
+
+impl Drop for ReadPartitionContext {
+ fn drop(&mut self) {
+ MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["partition_prune_pk"])
+ .observe(self.metrics.prune_pk.as_secs_f64());
+ MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["partition_read_source"])
+ .observe(self.metrics.read_source.as_secs_f64());
+ MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["partition_data_batch_to_batch"])
+ .observe(self.metrics.data_batch_to_batch.as_secs_f64());
+ log::debug!(
+ "TreeIter pruning, before: {}, after: {}",
+ self.metrics.keys_before_pruning,
+ self.metrics.keys_before_pruning
+ );
+ }
}
impl ReadPartitionContext {
@@ -401,10 +440,11 @@ impl ReadPartitionContext {
filters,
pk_weights: Vec::new(),
need_prune_key,
+ metrics: Default::default(),
}
}
- /// Does filters contains predicate on primary key columns after pruning the
+ /// Does filter contain predicate on primary key columns after pruning the
/// partition column.
fn need_prune_key(metadata: &RegionMetadataRef, filters: &[SimpleFilterEvaluator]) -> bool {
for filter in filters {
diff --git a/src/mito2/src/memtable/merge_tree/shard_builder.rs b/src/mito2/src/memtable/merge_tree/shard_builder.rs
index f20a38fe672d..c2185f2d3517 100644
--- a/src/mito2/src/memtable/merge_tree/shard_builder.rs
+++ b/src/mito2/src/memtable/merge_tree/shard_builder.rs
@@ -28,6 +28,7 @@ use crate::memtable::merge_tree::dict::{DictBuilderReader, KeyDictBuilder};
use crate::memtable::merge_tree::metrics::WriteMetrics;
use crate::memtable::merge_tree::shard::Shard;
use crate::memtable::merge_tree::{MergeTreeConfig, PkId, ShardId};
+use crate::metrics::MERGE_TREE_READ_STAGE_ELAPSED;
/// Builder to write keys and data to a shard that the key dictionary
/// is still active.
@@ -125,10 +126,21 @@ impl ShardBuilder {
/// Scans the shard builder.
pub fn read(&self, pk_weights_buffer: &mut Vec<u16>) -> Result<ShardBuilderReader> {
- let dict_reader = self.dict_builder.read();
- dict_reader.pk_weights_to_sort_data(pk_weights_buffer);
- let data_reader = self.data_buffer.read(Some(pk_weights_buffer))?;
+ let dict_reader = {
+ let _timer = MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["shard_builder_read_pk"])
+ .start_timer();
+ self.dict_builder.read()
+ };
+ {
+ let _timer = MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["sort_pk"])
+ .start_timer();
+ dict_reader.pk_weights_to_sort_data(pk_weights_buffer);
+ }
+
+ let data_reader = self.data_buffer.read(Some(pk_weights_buffer))?;
Ok(ShardBuilderReader {
shard_id: self.current_shard_id,
dict_reader,
diff --git a/src/mito2/src/memtable/merge_tree/tree.rs b/src/mito2/src/memtable/merge_tree/tree.rs
index 6472af49973a..84c5fb09dfb3 100644
--- a/src/mito2/src/memtable/merge_tree/tree.rs
+++ b/src/mito2/src/memtable/merge_tree/tree.rs
@@ -16,9 +16,11 @@
use std::collections::{BTreeMap, HashSet, VecDeque};
use std::sync::{Arc, RwLock};
+use std::time::{Duration, Instant};
use api::v1::OpType;
use common_recordbatch::filter::SimpleFilterEvaluator;
+use common_telemetry::tracing::log;
use common_time::Timestamp;
use datafusion_common::ScalarValue;
use snafu::ensure;
@@ -35,6 +37,7 @@ use crate::memtable::merge_tree::partition::{
};
use crate::memtable::merge_tree::MergeTreeConfig;
use crate::memtable::{BoxedBatchIterator, KeyValues};
+use crate::metrics::MERGE_TREE_READ_STAGE_ELAPSED;
use crate::read::Batch;
use crate::row_converter::{McmpRowCodec, RowCodec, SortField};
@@ -153,6 +156,7 @@ impl MergeTree {
let mut iter = TreeIter {
partitions,
current_reader: None,
+ metrics: Default::default(),
};
let context = ReadPartitionContext::new(
self.metadata.clone(),
@@ -313,9 +317,30 @@ impl MergeTree {
}
}
+#[derive(Default)]
+struct TreeIterMetrics {
+ fetch_partition_elapsed: Duration,
+ rows_fetched: usize,
+ batches_fetched: usize,
+}
+
struct TreeIter {
partitions: VecDeque<PartitionRef>,
current_reader: Option<PartitionReader>,
+ metrics: TreeIterMetrics,
+}
+
+impl Drop for TreeIter {
+ fn drop(&mut self) {
+ MERGE_TREE_READ_STAGE_ELAPSED
+ .with_label_values(&["fetch_next_partition"])
+ .observe(self.metrics.fetch_partition_elapsed.as_secs_f64());
+ log::debug!(
+ "TreeIter rows fetched: {}, batches fetched: {}",
+ self.metrics.rows_fetched,
+ self.metrics.batches_fetched
+ );
+ }
}
impl Iterator for TreeIter {
@@ -329,6 +354,7 @@ impl Iterator for TreeIter {
impl TreeIter {
/// Fetch next partition.
fn fetch_next_partition(&mut self, mut context: ReadPartitionContext) -> Result<()> {
+ let start = Instant::now();
while let Some(partition) = self.partitions.pop_front() {
let part_reader = partition.read(context)?;
if !part_reader.is_valid() {
@@ -338,7 +364,7 @@ impl TreeIter {
self.current_reader = Some(part_reader);
break;
}
-
+ self.metrics.fetch_partition_elapsed += start.elapsed();
Ok(())
}
@@ -360,6 +386,8 @@ impl TreeIter {
let context = part_reader.into_context();
self.fetch_next_partition(context)?;
+ self.metrics.rows_fetched += batch.num_rows();
+ self.metrics.batches_fetched += 1;
Ok(Some(batch))
}
}
diff --git a/src/mito2/src/metrics.rs b/src/mito2/src/metrics.rs
index b25fd393bdb4..83d8f87016f9 100644
--- a/src/mito2/src/metrics.rs
+++ b/src/mito2/src/metrics.rs
@@ -254,4 +254,24 @@ lazy_static! {
pub static ref INDEX_INTERMEDIATE_FLUSH_OP_TOTAL: IntCounter = INDEX_IO_OP_TOTAL
.with_label_values(&["flush", "intermediate"]);
// ------- End of index metrics.
+
+ /// Merge tree memtable data buffer freeze metrics
+ pub static ref MERGE_TREE_DATA_BUFFER_FREEZE_STAGE_ELAPSED: HistogramVec = register_histogram_vec!(
+ "greptime_merge_tree_buffer_freeze_stage_elapsed",
+ "mito merge tree data buffer freeze stage elapsed",
+ &[STAGE_LABEL],
+ vec![0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 60.0]
+ )
+ .unwrap();
+
+ /// Merge tree memtable read path metrics
+ pub static ref MERGE_TREE_READ_STAGE_ELAPSED: HistogramVec = register_histogram_vec!(
+ "greptime_merge_tree_read_stage_elapsed",
+ "mito merge tree read stage elapsed",
+ &[STAGE_LABEL],
+ vec![0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 60.0]
+ )
+ .unwrap();
+
+ // ------- End of merge tree memtable metrics.
}
|
chore
|
add metris for memtable read path (#3397)
|
e714f7df6c188fed9b05006eb5ae93e4cc3477d9
|
2025-03-03 15:23:14
|
Zhenchi
|
fix: out of bound during bloom search (#5625)
| false
|
diff --git a/src/index/src/bloom_filter/applier.rs b/src/index/src/bloom_filter/applier.rs
index e87a94cd1bb2..8829f4e0eeaf 100644
--- a/src/index/src/bloom_filter/applier.rs
+++ b/src/index/src/bloom_filter/applier.rs
@@ -42,7 +42,16 @@ impl BloomFilterApplier {
) -> Result<Vec<Range<usize>>> {
let rows_per_segment = self.meta.rows_per_segment as usize;
let start_seg = search_range.start / rows_per_segment;
- let end_seg = search_range.end.div_ceil(rows_per_segment);
+ let mut end_seg = search_range.end.div_ceil(rows_per_segment);
+
+ if end_seg == self.meta.segment_loc_indices.len() + 1 {
+ // In a previous version, there was a bug where if the last segment was all null,
+ // this segment would not be written into the index. This caused the slice
+ // `self.meta.segment_loc_indices[start_seg..end_seg]` to go out of bounds due to
+ // the missing segment. Since the `search` function does not search for nulls,
+ // we can simply ignore the last segment in this buggy scenario.
+ end_seg -= 1;
+ }
let locs = &self.meta.segment_loc_indices[start_seg..end_seg];
diff --git a/src/index/src/bloom_filter/creator.rs b/src/index/src/bloom_filter/creator.rs
index 0b6810a688c3..66e892e29fd0 100644
--- a/src/index/src/bloom_filter/creator.rs
+++ b/src/index/src/bloom_filter/creator.rs
@@ -64,6 +64,9 @@ pub struct BloomFilterCreator {
/// Storage for finalized Bloom filters.
finalized_bloom_filters: FinalizedBloomFilterStorage,
+ /// Row count that finalized so far.
+ finalized_row_count: usize,
+
/// Global memory usage of the bloom filter creator.
global_memory_usage: Arc<AtomicUsize>,
}
@@ -96,6 +99,7 @@ impl BloomFilterCreator {
global_memory_usage,
global_memory_usage_threshold,
),
+ finalized_row_count: 0,
}
}
@@ -136,6 +140,7 @@ impl BloomFilterCreator {
if self.accumulated_row_count % self.rows_per_segment == 0 {
self.finalize_segment().await?;
+ self.finalized_row_count = self.accumulated_row_count;
}
}
@@ -161,6 +166,7 @@ impl BloomFilterCreator {
if self.accumulated_row_count % self.rows_per_segment == 0 {
self.finalize_segment().await?;
+ self.finalized_row_count = self.accumulated_row_count;
}
Ok(())
@@ -168,7 +174,7 @@ impl BloomFilterCreator {
/// Finalizes any remaining segments and writes the bloom filters and metadata to the provided writer.
pub async fn finish(&mut self, mut writer: impl AsyncWrite + Unpin) -> Result<()> {
- if !self.cur_seg_distinct_elems.is_empty() {
+ if self.accumulated_row_count > self.finalized_row_count {
self.finalize_segment().await?;
}
@@ -406,4 +412,35 @@ mod tests {
assert!(bf.contains(&b"f"));
}
}
+
+ #[tokio::test]
+ async fn test_final_seg_all_null() {
+ let mut writer = Cursor::new(Vec::new());
+ let mut creator = BloomFilterCreator::new(
+ 2,
+ Arc::new(MockExternalTempFileProvider::new()),
+ Arc::new(AtomicUsize::new(0)),
+ None,
+ );
+
+ creator
+ .push_n_row_elems(4, vec![b"a".to_vec(), b"b".to_vec()])
+ .await
+ .unwrap();
+ creator.push_row_elems(Vec::new()).await.unwrap();
+
+ creator.finish(&mut writer).await.unwrap();
+
+ let bytes = writer.into_inner();
+ let total_size = bytes.len();
+ let meta_size_offset = total_size - 4;
+ let meta_size = u32::from_le_bytes((&bytes[meta_size_offset..]).try_into().unwrap());
+
+ let meta_bytes = &bytes[total_size - meta_size as usize - 4..total_size - 4];
+ let meta = BloomFilterMeta::decode(meta_bytes).unwrap();
+
+ assert_eq!(meta.rows_per_segment, 2);
+ assert_eq!(meta.segment_count, 3);
+ assert_eq!(meta.row_count, 5);
+ }
}
|
fix
|
out of bound during bloom search (#5625)
|
33d894c1f0ae2840a7170735c4ac049783397a86
|
2024-02-28 11:45:23
|
tison
|
build: do not retry for connrefused (#3402)
| false
|
diff --git a/scripts/fetch-dashboard-assets.sh b/scripts/fetch-dashboard-assets.sh
index 9ffc10b7df82..893b5cb578e3 100755
--- a/scripts/fetch-dashboard-assets.sh
+++ b/scripts/fetch-dashboard-assets.sh
@@ -23,12 +23,7 @@ function retry_fetch() {
local url=$1
local filename=$2
- curl \
- --connect-timeout 10 \
- --retry 3 \
- --retry-connrefused \
- -fsSL $url --output $filename || \
- {
+ curl --connect-timeout 10 --retry 3 -fsSL $url --output $filename || {
echo "Failed to download $url"
echo "You may try to set http_proxy and https_proxy environment variables."
if [[ -z "$GITHUB_PROXY_URL" ]]; then
|
build
|
do not retry for connrefused (#3402)
|
815ce59a3a4d466ecfd44f7322f8238bfcdd14cd
|
2025-01-08 18:11:04
|
Ning Sun
|
ci: use mold for tests (#5319)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 94528e9b8194..fc60d8071219 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -659,19 +659,18 @@ jobs:
- uses: arduino/setup-protoc@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- - uses: KyleMayes/install-llvm-action@v1
- with:
- version: "14.0"
+ - uses: rui314/setup-mold@v1
- name: Install toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
- components: llvm-tools-preview
+ components: llvm-tools
cache: false
- # - name: Rust Cache
- # uses: Swatinem/rust-cache@v2
- # with:
- # # Shares cross multiple jobs
- # shared-key: "coverage-test"
+ - name: Rust Cache
+ uses: Swatinem/rust-cache@v2
+ with:
+ # Shares cross multiple jobs
+ shared-key: "coverage-test"
+ save-if: ${{ github.event_name == 'merge_group' }}
# Disabled temporarily to see performance
# - name: Docker Cache
# uses: ScribeMD/[email protected]
@@ -687,7 +686,7 @@ jobs:
- name: Run nextest cases
run: cargo llvm-cov nextest --workspace --lcov --output-path lcov.info -F dashboard -F pg_kvbackend
env:
- CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=lld"
+ CARGO_BUILD_RUSTFLAGS: "-C link-arg=-fuse-ld=mold"
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
GT_S3_BUCKET: ${{ vars.AWS_CI_TEST_BUCKET }}
|
ci
|
use mold for tests (#5319)
|
89ebe47cd9aa3ca9a68c33decba1405629019e2b
|
2023-10-20 15:13:45
|
localhost
|
feat: RepeatedTask adds execute-first-wait-later behavior. (#2625)
| false
|
diff --git a/src/common/greptimedb-telemetry/src/lib.rs b/src/common/greptimedb-telemetry/src/lib.rs
index 37b86c642544..42e367652153 100644
--- a/src/common/greptimedb-telemetry/src/lib.rs
+++ b/src/common/greptimedb-telemetry/src/lib.rs
@@ -57,7 +57,10 @@ impl GreptimeDBTelemetryTask {
task_fn: BoxedTaskFunction<Error>,
should_report: Arc<AtomicBool>,
) -> Self {
- GreptimeDBTelemetryTask::Enable((RepeatedTask::new(interval, task_fn), should_report))
+ GreptimeDBTelemetryTask::Enable((
+ RepeatedTask::new(interval, task_fn).with_initial_delay(Some(Duration::ZERO)),
+ should_report,
+ ))
}
pub fn disable() -> Self {
diff --git a/src/common/runtime/src/repeated_task.rs b/src/common/runtime/src/repeated_task.rs
index b3dcc781f1bd..a4f2bde8b00a 100644
--- a/src/common/runtime/src/repeated_task.rs
+++ b/src/common/runtime/src/repeated_task.rs
@@ -40,6 +40,7 @@ pub type BoxedTaskFunction<E> = Box<dyn TaskFunction<E> + Send + Sync + 'static>
struct TaskInner<E> {
/// The repeated task handle. This handle is Some if the task is started.
task_handle: Option<JoinHandle<()>>,
+
/// The task_fn to run. This is Some if the task is not started.
task_fn: Option<BoxedTaskFunction<E>>,
}
@@ -50,6 +51,7 @@ pub struct RepeatedTask<E> {
inner: Mutex<TaskInner<E>>,
started: AtomicBool,
interval: Duration,
+ initial_delay: Option<Duration>,
}
impl<E> std::fmt::Display for RepeatedTask<E> {
@@ -75,6 +77,9 @@ impl<E> Drop for RepeatedTask<E> {
}
impl<E: ErrorExt + 'static> RepeatedTask<E> {
+ /// Creates a new repeated task. The `initial_delay` is the delay before the first execution.
+ /// `initial_delay` default is None, the initial interval uses the `interval`.
+ /// You can use `with_initial_delay` to set the `initial_delay`.
pub fn new(interval: Duration, task_fn: BoxedTaskFunction<E>) -> Self {
Self {
name: task_fn.name().to_string(),
@@ -85,9 +90,15 @@ impl<E: ErrorExt + 'static> RepeatedTask<E> {
}),
started: AtomicBool::new(false),
interval,
+ initial_delay: None,
}
}
+ pub fn with_initial_delay(mut self, initial_delay: Option<Duration>) -> Self {
+ self.initial_delay = initial_delay;
+ self
+ }
+
pub fn started(&self) -> bool {
self.started.load(Ordering::Relaxed)
}
@@ -99,17 +110,21 @@ impl<E: ErrorExt + 'static> RepeatedTask<E> {
IllegalStateSnafu { name: &self.name }
);
- let interval = self.interval;
let child = self.cancel_token.child_token();
// Safety: The task is not started.
let mut task_fn = inner.task_fn.take().unwrap();
+ let interval = self.interval;
+ let mut initial_delay = self.initial_delay;
// TODO(hl): Maybe spawn to a blocking runtime.
let handle = runtime.spawn(async move {
loop {
- tokio::select! {
- _ = tokio::time::sleep(interval) => {}
- _ = child.cancelled() => {
- return;
+ let sleep_time = initial_delay.take().unwrap_or(interval);
+ if sleep_time > Duration::ZERO {
+ tokio::select! {
+ _ = tokio::time::sleep(sleep_time) => {}
+ _ = child.cancelled() => {
+ return;
+ }
}
}
if let Err(e) = task_fn.call().await {
@@ -192,4 +207,21 @@ mod tests {
assert_eq!(n.load(Ordering::Relaxed), 5);
}
+
+ #[tokio::test]
+ async fn test_repeated_task_prior_exec() {
+ common_telemetry::init_default_ut_logging();
+
+ let n = Arc::new(AtomicI32::new(0));
+ let task_fn = TickTask { n: n.clone() };
+
+ let task = RepeatedTask::new(Duration::from_millis(100), Box::new(task_fn))
+ .with_initial_delay(Some(Duration::ZERO));
+
+ task.start(crate::bg_runtime()).unwrap();
+ tokio::time::sleep(Duration::from_millis(550)).await;
+ task.stop().await.unwrap();
+
+ assert_eq!(n.load(Ordering::Relaxed), 6);
+ }
}
|
feat
|
RepeatedTask adds execute-first-wait-later behavior. (#2625)
|
3504d8254e459f4e01d44255322524d43f7f1937
|
2023-09-12 18:27:15
|
Niwaka
|
fix: unused table options (#2267)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index cac1497afaa7..7314b8a95e6b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8980,6 +8980,7 @@ dependencies = [
"regex",
"snafu",
"sqlparser 0.34.0",
+ "table",
]
[[package]]
@@ -9458,6 +9459,7 @@ dependencies = [
"chrono",
"common-base",
"common-catalog",
+ "common-datasource",
"common-error",
"common-procedure",
"common-query",
diff --git a/src/common/datasource/src/object_store/s3.rs b/src/common/datasource/src/object_store/s3.rs
index 2b7fedf041a6..87975280879d 100644
--- a/src/common/datasource/src/object_store/s3.rs
+++ b/src/common/datasource/src/object_store/s3.rs
@@ -27,6 +27,15 @@ const SESSION_TOKEN: &str = "session_token";
const REGION: &str = "region";
const ENABLE_VIRTUAL_HOST_STYLE: &str = "enable_virtual_host_style";
+pub fn is_supported_in_s3(key: &str) -> bool {
+ key == ENDPOINT
+ || key == ACCESS_KEY_ID
+ || key == SECRET_ACCESS_KEY
+ || key == SESSION_TOKEN
+ || key == REGION
+ || key == ENABLE_VIRTUAL_HOST_STYLE
+}
+
pub fn build_s3_backend(
host: &str,
path: &str,
@@ -75,3 +84,18 @@ pub fn build_s3_backend(
.context(error::BuildBackendSnafu)?
.finish())
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ #[test]
+ fn test_is_supported_in_s3() {
+ assert!(is_supported_in_s3(ENDPOINT));
+ assert!(is_supported_in_s3(ACCESS_KEY_ID));
+ assert!(is_supported_in_s3(SECRET_ACCESS_KEY));
+ assert!(is_supported_in_s3(SESSION_TOKEN));
+ assert!(is_supported_in_s3(REGION));
+ assert!(is_supported_in_s3(ENABLE_VIRTUAL_HOST_STYLE));
+ assert!(!is_supported_in_s3("foo"))
+ }
+}
diff --git a/src/datanode/src/sql/create.rs b/src/datanode/src/sql/create.rs
index 2665dcc87fa6..573ed6bdedc8 100644
--- a/src/datanode/src/sql/create.rs
+++ b/src/datanode/src/sql/create.rs
@@ -279,7 +279,7 @@ mod tests {
"timestamp" timestamp TIME INDEX,
"value" DOUBLE,
host STRING PRIMARY KEY
- ) engine=mito with(regions=1, ttl='7days',write_buffer_size='32MB',some='other');"#;
+ ) engine=mito with(regions=1, ttl='7days',write_buffer_size='32MB');"#;
let parsed_stmt = sql_to_statement(sql);
let c = SqlHandler::create_to_request(42, parsed_stmt, &TableReference::bare("demo_table"))
.unwrap();
@@ -289,7 +289,6 @@ mod tests {
Some(ReadableSize::mb(32)),
c.table_options.write_buffer_size
);
- assert_eq!("other", c.table_options.extra_options.get("some").unwrap());
}
#[tokio::test]
diff --git a/src/frontend/src/statement.rs b/src/frontend/src/statement.rs
index 3deadcf8649e..1105ac9299ea 100644
--- a/src/frontend/src/statement.rs
+++ b/src/frontend/src/statement.rs
@@ -120,10 +120,9 @@ impl StatementExecutor {
self.copy_database(to_copy_database_request(arg, &query_ctx)?)
.await
}
-
- Statement::CreateDatabase(_)
- | Statement::CreateTable(_)
+ Statement::CreateTable(_)
| Statement::CreateExternalTable(_)
+ | Statement::CreateDatabase(_)
| Statement::Alter(_)
| Statement::DropTable(_)
| Statement::TruncateTable(_)
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
index ee85f93616dd..5d271470aa12 100644
--- a/src/sql/Cargo.toml
+++ b/src/sql/Cargo.toml
@@ -19,6 +19,7 @@ once_cell.workspace = true
regex.workspace = true
snafu = { version = "0.7", features = ["backtraces"] }
sqlparser.workspace = true
+table = { workspace = true }
[dev-dependencies]
common-datasource = { workspace = true }
diff --git a/src/sql/src/error.rs b/src/sql/src/error.rs
index 1ced9be3cef5..f5c375ea8f44 100644
--- a/src/sql/src/error.rs
+++ b/src/sql/src/error.rs
@@ -103,6 +103,9 @@ pub enum Error {
source: datatypes::error::Error,
},
+ #[snafu(display("Invalid table option key: {}", key))]
+ InvalidTableOption { key: String, location: Location },
+
#[snafu(display("Failed to serialize column default constraint, source: {}", source))]
SerializeColumnDefaultConstraint {
location: Location,
@@ -168,7 +171,8 @@ impl ErrorExt for Error {
| ColumnTypeMismatch { .. }
| InvalidTableName { .. }
| InvalidSqlValue { .. }
- | TimestampOverflow { .. } => StatusCode::InvalidArguments,
+ | TimestampOverflow { .. }
+ | InvalidTableOption { .. } => StatusCode::InvalidArguments,
SerializeColumnDefaultConstraint { source, .. } => source.status_code(),
ConvertToGrpcDataType { source, .. } => source.status_code(),
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 84ae1c87d6ae..71b0c3786918 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -13,6 +13,7 @@
// limitations under the License.
use std::cmp::Ordering;
+use std::collections::HashMap;
use common_catalog::consts::default_engine;
use itertools::Itertools;
@@ -24,11 +25,12 @@ use sqlparser::keywords::ALL_KEYWORDS;
use sqlparser::parser::IsOptional::Mandatory;
use sqlparser::parser::{Parser, ParserError};
use sqlparser::tokenizer::{Token, TokenWithLocation, Word};
+use table::requests::valid_table_option;
use crate::ast::{ColumnDef, Ident, TableConstraint, Value as SqlValue};
use crate::error::{
- self, InvalidColumnOptionSnafu, InvalidTimeIndexSnafu, MissingTimeIndexSnafu, Result,
- SyntaxSnafu,
+ self, InvalidColumnOptionSnafu, InvalidTableOptionSnafu, InvalidTimeIndexSnafu,
+ MissingTimeIndexSnafu, Result, SyntaxSnafu,
};
use crate::parser::ParserContext;
use crate::statements::create::{
@@ -91,8 +93,15 @@ impl<'a> ParserContext<'a> {
None
}
})
- .collect();
-
+ .collect::<HashMap<String, String>>();
+ for key in options.keys() {
+ ensure!(
+ valid_table_option(key),
+ InvalidTableOptionSnafu {
+ key: key.to_string()
+ }
+ );
+ }
Ok(Statement::CreateExternalTable(CreateExternalTable {
name: table_name,
columns,
@@ -149,7 +158,14 @@ impl<'a> ParserContext<'a> {
.parser
.parse_options(Keyword::WITH)
.context(error::SyntaxSnafu { sql: self.sql })?;
-
+ for option in options.iter() {
+ ensure!(
+ valid_table_option(&option.name.value),
+ InvalidTableOptionSnafu {
+ key: option.name.value.to_string()
+ }
+ );
+ }
let create_table = CreateTable {
if_not_exists,
name: table_name,
@@ -807,6 +823,24 @@ mod tests {
use super::*;
use crate::dialect::GreptimeDbDialect;
+ #[test]
+ fn test_validate_external_table_options() {
+ let sql = "CREATE EXTERNAL TABLE city (
+ host string,
+ ts int64,
+ cpu float64 default 0,
+ memory float64,
+ TIME INDEX (ts),
+ PRIMARY KEY(ts, host)
+ ) with(location='/var/data/city.csv',format='csv',foo='bar');";
+
+ let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {});
+ assert!(matches!(
+ result,
+ Err(error::Error::InvalidTableOption { .. })
+ ));
+ }
+
#[test]
fn test_parse_create_external_table() {
struct Test<'a> {
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index 06bf185371c8..3bf1fe260b8f 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -222,6 +222,7 @@ pub struct CreateExternalTable {
#[cfg(test)]
mod tests {
use crate::dialect::GreptimeDbDialect;
+ use crate::error::Error::InvalidTableOption;
use crate::parser::ParserContext;
use crate::statements::statement::Statement;
@@ -319,4 +320,26 @@ ENGINE=mito
_ => unreachable!(),
}
}
+
+ #[test]
+ fn test_validate_table_options() {
+ let sql = r"create table if not exists demo(
+ host string,
+ ts bigint,
+ cpu double default 0,
+ memory double,
+ TIME INDEX (ts),
+ PRIMARY KEY(ts, host)
+ )
+ PARTITION BY RANGE COLUMNS (ts) (
+ PARTITION r0 VALUES LESS THAN (5),
+ PARTITION r1 VALUES LESS THAN (9),
+ PARTITION r2 VALUES LESS THAN (MAXVALUE),
+ )
+ engine=mito
+ with(regions=1, ttl='7d', hello='world');
+";
+ let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {});
+ assert!(matches!(result, Err(InvalidTableOption { .. })))
+ }
}
diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml
index 168548b9ad92..245ef06e25dd 100644
--- a/src/table/Cargo.toml
+++ b/src/table/Cargo.toml
@@ -13,6 +13,7 @@ async-trait = "0.1"
chrono.workspace = true
common-base = { workspace = true }
common-catalog = { workspace = true }
+common-datasource = { workspace = true }
common-error = { workspace = true }
common-procedure = { workspace = true }
common-query = { workspace = true }
diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs
index 7ca269c68d82..5407cbac3959 100644
--- a/src/table/src/requests.rs
+++ b/src/table/src/requests.rs
@@ -19,6 +19,7 @@ use std::str::FromStr;
use std::time::Duration;
use common_base::readable_size::ReadableSize;
+use common_datasource::object_store::s3::is_supported_in_s3;
use common_query::AddColumnLocation;
use common_time::range::TimestampRange;
use datatypes::prelude::VectorRef;
@@ -332,6 +333,18 @@ macro_rules! meter_insert_request {
};
}
+pub fn valid_table_option(key: &str) -> bool {
+ matches!(
+ key,
+ IMMUTABLE_TABLE_LOCATION_KEY
+ | IMMUTABLE_TABLE_FORMAT_KEY
+ | IMMUTABLE_TABLE_PATTERN_KEY
+ | WRITE_BUFFER_SIZE_KEY
+ | TTL_KEY
+ | REGIONS_KEY
+ ) | is_supported_in_s3(key)
+}
+
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
pub struct CopyDatabaseRequest {
pub catalog_name: String,
@@ -346,6 +359,17 @@ pub struct CopyDatabaseRequest {
mod tests {
use super::*;
+ #[test]
+ fn test_validate_table_option() {
+ assert!(valid_table_option(IMMUTABLE_TABLE_LOCATION_KEY));
+ assert!(valid_table_option(IMMUTABLE_TABLE_FORMAT_KEY));
+ assert!(valid_table_option(IMMUTABLE_TABLE_PATTERN_KEY));
+ assert!(valid_table_option(TTL_KEY));
+ assert!(valid_table_option(REGIONS_KEY));
+ assert!(valid_table_option(WRITE_BUFFER_SIZE_KEY));
+ assert!(!valid_table_option("foo"));
+ }
+
#[test]
fn test_serialize_table_options() {
let options = TableOptions {
diff --git a/tests-integration/src/tests/instance_test.rs b/tests-integration/src/tests/instance_test.rs
index cc42e82fbe6f..c4bceefaf25f 100644
--- a/tests-integration/src/tests/instance_test.rs
+++ b/tests-integration/src/tests/instance_test.rs
@@ -153,6 +153,94 @@ PARTITION BY RANGE COLUMNS (ts) (
check_output_stream(output, expected).await;
}
+#[apply(both_instances_cases)]
+async fn test_validate_external_table_options(instance: Arc<dyn MockInstance>) {
+ let frontend = instance.frontend();
+ let format = "json";
+ let location = find_testing_resource("/tests/data/json/various_type.json");
+ let table_name = "various_type_json_with_schema";
+ let sql = &format!(
+ r#"CREATE EXTERNAL TABLE {table_name} (
+ a BIGINT NULL,
+ b DOUBLE NULL,
+ c BOOLEAN NULL,
+ d STRING NULL,
+ e TIMESTAMP(0) NULL,
+ f DOUBLE NULL,
+ g TIMESTAMP(0) NULL,
+ ) WITH (foo='bar', location='{location}', format='{format}');"#,
+ );
+
+ let result = try_execute_sql(&frontend, sql).await;
+ assert!(matches!(result, Err(Error::ParseSql { .. })));
+}
+
+#[apply(both_instances_cases)]
+async fn test_show_create_external_table(instance: Arc<dyn MockInstance>) {
+ let fe_instance = instance.frontend();
+ let format = "csv";
+ let location = find_testing_resource("/tests/data/csv/various_type.csv");
+ let table_name = "various_type_csv";
+
+ let output = execute_sql(
+ &fe_instance,
+ &format!(
+ r#"create external table {table_name} with (location='{location}', format='{format}');"#,
+ ),
+ )
+ .await;
+ assert!(matches!(output, Output::AffectedRows(0)));
+
+ let output = execute_sql(&fe_instance, &format!("show create table {table_name};")).await;
+
+ let Output::RecordBatches(record_batches) = output else {
+ unreachable!()
+ };
+
+ // We can't directly test `show create table` by check_output_stream because the location name length depends on the current filesystem.
+ let record_batches = record_batches.iter().collect::<Vec<_>>();
+ let column = record_batches[0].column_by_name("Create Table").unwrap();
+ let actual = column.get(0);
+ let expect = if instance.is_distributed_mode() {
+ format!(
+ r#"CREATE EXTERNAL TABLE IF NOT EXISTS "various_type_csv" (
+ "c_int" BIGINT NULL,
+ "c_float" DOUBLE NULL,
+ "c_string" DOUBLE NULL,
+ "c_bool" BOOLEAN NULL,
+ "c_date" DATE NULL,
+ "c_datetime" TIMESTAMP(0) NULL,
+
+)
+
+ENGINE=file
+WITH(
+ format = 'csv',
+ location = '{location}',
+ regions = 1
+)"#
+ )
+ } else {
+ format!(
+ r#"CREATE EXTERNAL TABLE IF NOT EXISTS "various_type_csv" (
+ "c_int" BIGINT NULL,
+ "c_float" DOUBLE NULL,
+ "c_string" DOUBLE NULL,
+ "c_bool" BOOLEAN NULL,
+ "c_date" DATE NULL,
+ "c_datetime" TIMESTAMP(0) NULL,
+
+)
+ENGINE=file
+WITH(
+ format = 'csv',
+ location = '{location}'
+)"#
+ )
+ };
+ assert_eq!(actual.to_string(), expect);
+}
+
#[apply(both_instances_cases)]
async fn test_issue477_same_table_name_in_different_databases(instance: Arc<dyn MockInstance>) {
let instance = instance.frontend();
diff --git a/tests/cases/distributed/show/show_create.result b/tests/cases/distributed/show/show_create.result
index 2789ae27cc2b..a90dbd0914d8 100644
--- a/tests/cases/distributed/show/show_create.result
+++ b/tests/cases/distributed/show/show_create.result
@@ -13,7 +13,11 @@ PARTITION BY RANGE COLUMNS (n) (
PARTITION r1 VALUES LESS THAN (9),
PARTITION r2 VALUES LESS THAN (MAXVALUE),
)
-ENGINE=mito;
+ENGINE=mito
+WITH(
+ ttl = '7d',
+ write_buffer_size = 1024
+);
Affected Rows: 0
@@ -39,7 +43,9 @@ SHOW CREATE TABLE system_metrics;
| | ) |
| | ENGINE=mito |
| | WITH( |
-| | regions = 3 |
+| | regions = 3, |
+| | ttl = '7days', |
+| | write_buffer_size = '1.0KiB' |
| | ) |
+----------------+-----------------------------------------------------------+
@@ -73,3 +79,27 @@ drop table table_without_partition;
Affected Rows: 1
+CREATE TABLE not_supported_table_options_keys (
+ id INT UNSIGNED,
+ host STRING,
+ cpu DOUBLE,
+ disk FLOAT,
+ n INT COMMENT 'range key',
+ ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
+ TIME INDEX (ts),
+ PRIMARY KEY (id, host)
+)
+PARTITION BY RANGE COLUMNS (n) (
+ PARTITION r0 VALUES LESS THAN (5),
+ PARTITION r1 VALUES LESS THAN (9),
+ PARTITION r2 VALUES LESS THAN (MAXVALUE),
+)
+ENGINE=mito
+WITH(
+ foo = 123,
+ ttl = '7d',
+ write_buffer_size = 1024
+);
+
+Error: 1004(InvalidArguments), Invalid table option key: foo
+
diff --git a/tests/cases/distributed/show/show_create.sql b/tests/cases/distributed/show/show_create.sql
index cdb6444eeefe..9435f8729653 100644
--- a/tests/cases/distributed/show/show_create.sql
+++ b/tests/cases/distributed/show/show_create.sql
@@ -13,7 +13,11 @@ PARTITION BY RANGE COLUMNS (n) (
PARTITION r1 VALUES LESS THAN (9),
PARTITION r2 VALUES LESS THAN (MAXVALUE),
)
-ENGINE=mito;
+ENGINE=mito
+WITH(
+ ttl = '7d',
+ write_buffer_size = 1024
+);
SHOW CREATE TABLE system_metrics;
@@ -26,3 +30,25 @@ create table table_without_partition (
show create table table_without_partition;
drop table table_without_partition;
+
+CREATE TABLE not_supported_table_options_keys (
+ id INT UNSIGNED,
+ host STRING,
+ cpu DOUBLE,
+ disk FLOAT,
+ n INT COMMENT 'range key',
+ ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
+ TIME INDEX (ts),
+ PRIMARY KEY (id, host)
+)
+PARTITION BY RANGE COLUMNS (n) (
+ PARTITION r0 VALUES LESS THAN (5),
+ PARTITION r1 VALUES LESS THAN (9),
+ PARTITION r2 VALUES LESS THAN (MAXVALUE),
+)
+ENGINE=mito
+WITH(
+ foo = 123,
+ ttl = '7d',
+ write_buffer_size = 1024
+);
diff --git a/tests/cases/standalone/show/show_create.result b/tests/cases/standalone/show/show_create.result
index d370d2646da4..9c764675fbc2 100644
--- a/tests/cases/standalone/show/show_create.result
+++ b/tests/cases/standalone/show/show_create.result
@@ -41,3 +41,22 @@ DROP TABLE system_metrics;
Affected Rows: 1
+CREATE TABLE not_supported_table_options_keys (
+ id INT UNSIGNED,
+ host STRING,
+ cpu DOUBLE,
+ disk FLOAT,
+ n INT COMMENT 'range key',
+ ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
+ TIME INDEX (ts),
+ PRIMARY KEY (id, host)
+)
+ENGINE=mito
+WITH(
+ foo = 123,
+ ttl = '7d',
+ write_buffer_size = 1024
+);
+
+Error: 1004(InvalidArguments), Invalid table option key: foo
+
diff --git a/tests/cases/standalone/show/show_create.sql b/tests/cases/standalone/show/show_create.sql
index bebbd46b752b..86faf1a604aa 100644
--- a/tests/cases/standalone/show/show_create.sql
+++ b/tests/cases/standalone/show/show_create.sql
@@ -16,3 +16,20 @@ WITH(
SHOW CREATE TABLE system_metrics;
DROP TABLE system_metrics;
+
+CREATE TABLE not_supported_table_options_keys (
+ id INT UNSIGNED,
+ host STRING,
+ cpu DOUBLE,
+ disk FLOAT,
+ n INT COMMENT 'range key',
+ ts TIMESTAMP NOT NULL DEFAULT current_timestamp(),
+ TIME INDEX (ts),
+ PRIMARY KEY (id, host)
+)
+ENGINE=mito
+WITH(
+ foo = 123,
+ ttl = '7d',
+ write_buffer_size = 1024
+);
|
fix
|
unused table options (#2267)
|
685aa7dd8f019f36ab9ab368c6f9b1b85176452a
|
2024-08-06 17:22:34
|
LFC
|
ci: squeeze some disk space for complex fuzz tests (#4519)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index d2c069100db8..0685def6b4e0 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -291,6 +291,18 @@ jobs:
kafka: true
values: "with-remote-wal.yaml"
steps:
+ - name: Remove unused software
+ run: |
+ echo "Disk space before:"
+ df -h
+ [[ -d /usr/share/dotnet ]] && sudo rm -rf /usr/share/dotnet
+ [[ -d /usr/local/lib/android ]] && sudo rm -rf /usr/local/lib/android
+ [[ -d /opt/ghc ]] && sudo rm -rf /opt/ghc
+ [[ -d /opt/hostedtoolcache/CodeQL ]] && sudo rm -rf /opt/hostedtoolcache/CodeQL
+ sudo docker image prune --all --force
+ sudo docker builder prune -a
+ echo "Disk space after:"
+ df -h
- uses: actions/checkout@v4
- name: Setup Kind
uses: ./.github/actions/setup-kind
|
ci
|
squeeze some disk space for complex fuzz tests (#4519)
|
6fd04e38a39d7d2d0ed744df956057934a517530
|
2023-11-06 09:18:26
|
Ruihang Xia
|
feat: implement create region request for metric engine (#2694)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 89639ff11702..cf2c39426316 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4765,6 +4765,7 @@ dependencies = [
"common-telemetry",
"common-test-util",
"common-time",
+ "datafusion",
"datatypes",
"mito2",
"object-store",
diff --git a/src/common/recordbatch/src/lib.rs b/src/common/recordbatch/src/lib.rs
index 9730b4041f73..0e34dc4cff04 100644
--- a/src/common/recordbatch/src/lib.rs
+++ b/src/common/recordbatch/src/lib.rs
@@ -43,7 +43,7 @@ pub trait RecordBatchStream: Stream<Item = Result<RecordBatch>> {
pub type SendableRecordBatchStream = Pin<Box<dyn RecordBatchStream + Send>>;
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq, Eq)]
pub struct OrderOption {
pub name: String,
pub options: SortOptions,
diff --git a/src/metric-engine/Cargo.toml b/src/metric-engine/Cargo.toml
index 46c76fd79b3a..65b1bdd786a9 100644
--- a/src/metric-engine/Cargo.toml
+++ b/src/metric-engine/Cargo.toml
@@ -14,6 +14,7 @@ common-query.workspace = true
common-recordbatch.workspace = true
common-telemetry.workspace = true
common-time.workspace = true
+datafusion.workspace = true
datatypes.workspace = true
mito2.workspace = true
object-store.workspace = true
diff --git a/src/metric-engine/src/engine.rs b/src/metric-engine/src/engine.rs
index 0262c3255843..86c41aecedd5 100644
--- a/src/metric-engine/src/engine.rs
+++ b/src/metric-engine/src/engine.rs
@@ -26,13 +26,14 @@ use datatypes::schema::ColumnSchema;
use datatypes::value::Value;
use mito2::engine::{MitoEngine, MITO_ENGINE_NAME};
use object_store::util::join_dir;
-use snafu::ResultExt;
+use snafu::{ensure, ResultExt};
use store_api::metadata::{ColumnMetadata, RegionMetadataRef};
use store_api::region_engine::{RegionEngine, RegionRole};
use store_api::region_request::{RegionCreateRequest, RegionRequest};
+use store_api::storage::consts::ReservedColumnId;
use store_api::storage::{RegionGroup, RegionId, ScanRequest};
-use crate::error::{CreateMitoRegionSnafu, Result};
+use crate::error::{CreateMitoRegionSnafu, InternalColumnOccupiedSnafu, Result};
use crate::utils;
/// region group value for data region inside a metric region
@@ -41,12 +42,20 @@ pub const METRIC_DATA_REGION_GROUP: RegionGroup = 0;
/// region group value for metadata region inside a metric region
pub const METRIC_METADATA_REGION_GROUP: RegionGroup = 1;
-const METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME: &str = "ts";
-const METADATA_SCHEMA_KEY_COLUMN_NAME: &str = "k";
-const METADATA_SCHEMA_VALUE_COLUMN_NAME: &str = "val";
+pub const METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME: &str = "ts";
+pub const METADATA_SCHEMA_KEY_COLUMN_NAME: &str = "k";
+pub const METADATA_SCHEMA_VALUE_COLUMN_NAME: &str = "v";
-const METADATA_REGION_SUBDIR: &str = "metadata";
-const DATA_REGION_SUBDIR: &str = "data";
+pub const METADATA_SCHEMA_TIMESTAMP_COLUMN_INDEX: usize = 0;
+pub const METADATA_SCHEMA_KEY_COLUMN_INDEX: usize = 1;
+pub const METADATA_SCHEMA_VALUE_COLUMN_INDEX: usize = 2;
+
+/// Column name of internal column `__metric` that stores the original metric name
+pub const DATA_SCHEMA_METRIC_NAME_COLUMN_NAME: &str = "__metric";
+pub const DATA_SCHEMA_TSID_COLUMN_NAME: &str = "__tsid";
+
+pub const METADATA_REGION_SUBDIR: &str = "metadata";
+pub const DATA_REGION_SUBDIR: &str = "data";
pub const METRIC_ENGINE_NAME: &str = "metric";
@@ -129,32 +138,32 @@ impl RegionEngine for MetricEngine {
}
}
+impl MetricEngine {
+ pub fn new(mito: MitoEngine) -> Self {
+ Self {
+ inner: Arc::new(MetricEngineInner { mito }),
+ }
+ }
+}
+
struct MetricEngineInner {
mito: MitoEngine,
}
impl MetricEngineInner {
+ /// Initialize a metric region at given region id.
pub async fn create_region(
&self,
region_id: RegionId,
request: RegionCreateRequest,
) -> Result<()> {
- self.verify_region_create_request(&request)?;
+ Self::verify_region_create_request(&request)?;
let (data_region_id, metadata_region_id) = Self::transform_region_id(region_id);
- let create_data_region_request = self.create_request_for_data_region(&request);
+
+ // create metadata region
let create_metadata_region_request =
self.create_request_for_metadata_region(&request.region_dir);
-
- // self.mito
- // .handle_request(
- // data_region_id,
- // RegionRequest::Create(create_data_region_request),
- // )
- // .await
- // .with_context(|_| CreateMitoRegionSnafu {
- // region_type: DATA_REGION_SUBDIR,
- // })?;
self.mito
.handle_request(
metadata_region_id,
@@ -165,12 +174,24 @@ impl MetricEngineInner {
region_type: METADATA_REGION_SUBDIR,
})?;
+ // create data region
+ let create_data_region_request = self.create_request_for_data_region(&request);
+ self.mito
+ .handle_request(
+ data_region_id,
+ RegionRequest::Create(create_data_region_request),
+ )
+ .await
+ .with_context(|_| CreateMitoRegionSnafu {
+ region_type: DATA_REGION_SUBDIR,
+ })?;
+
Ok(())
}
/// Check if
- /// - internal columns are present
- fn verify_region_create_request(&self, request: &RegionCreateRequest) -> Result<()> {
+ /// - internal columns are not occupied
+ fn verify_region_create_request(request: &RegionCreateRequest) -> Result<()> {
let name_to_index = request
.column_metadatas
.iter()
@@ -178,6 +199,20 @@ impl MetricEngineInner {
.map(|(idx, metadata)| (metadata.column_schema.name.clone(), idx))
.collect::<HashMap<String, usize>>();
+ // check if internal columns are not occupied
+ ensure!(
+ !name_to_index.contains_key(DATA_SCHEMA_METRIC_NAME_COLUMN_NAME),
+ InternalColumnOccupiedSnafu {
+ column: DATA_SCHEMA_METRIC_NAME_COLUMN_NAME,
+ }
+ );
+ ensure!(
+ !name_to_index.contains_key(DATA_SCHEMA_TSID_COLUMN_NAME),
+ InternalColumnOccupiedSnafu {
+ column: DATA_SCHEMA_TSID_COLUMN_NAME,
+ }
+ );
+
Ok(())
}
@@ -197,7 +232,7 @@ impl MetricEngineInner {
pub fn create_request_for_metadata_region(&self, region_dir: &str) -> RegionCreateRequest {
// ts TIME INDEX DEFAULT 0
let timestamp_column_metadata = ColumnMetadata {
- column_id: 0,
+ column_id: METADATA_SCHEMA_TIMESTAMP_COLUMN_INDEX as _,
semantic_type: SemanticType::Timestamp,
column_schema: ColumnSchema::new(
METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME,
@@ -211,7 +246,7 @@ impl MetricEngineInner {
};
// key STRING PRIMARY KEY
let key_column_metadata = ColumnMetadata {
- column_id: 1,
+ column_id: METADATA_SCHEMA_KEY_COLUMN_INDEX as _,
semantic_type: SemanticType::Tag,
column_schema: ColumnSchema::new(
METADATA_SCHEMA_KEY_COLUMN_NAME,
@@ -221,7 +256,7 @@ impl MetricEngineInner {
};
// val STRING
let value_column_metadata = ColumnMetadata {
- column_id: 2,
+ column_id: METADATA_SCHEMA_VALUE_COLUMN_INDEX as _,
semantic_type: SemanticType::Field,
column_schema: ColumnSchema::new(
METADATA_SCHEMA_VALUE_COLUMN_NAME,
@@ -230,6 +265,7 @@ impl MetricEngineInner {
),
};
+ // concat region dir
let metadata_region_dir = join_dir(region_dir, METADATA_REGION_SUBDIR);
RegionCreateRequest {
@@ -239,13 +275,18 @@ impl MetricEngineInner {
key_column_metadata,
value_column_metadata,
],
- primary_key: vec![1],
+ primary_key: vec![METADATA_SCHEMA_KEY_COLUMN_INDEX as _],
options: HashMap::new(),
region_dir: metadata_region_dir,
}
}
- // todo: register "tag columns" to metadata
+ /// Convert [RegionCreateRequest] for data region.
+ ///
+ /// All tag columns in the original request will be converted to value columns.
+ /// Those columns real semantic type is stored in metadata region.
+ ///
+ /// This will also add internal columns to the request.
pub fn create_request_for_data_region(
&self,
request: &RegionCreateRequest,
@@ -255,57 +296,157 @@ impl MetricEngineInner {
// concat region dir
data_region_request.region_dir = join_dir(&request.region_dir, DATA_REGION_SUBDIR);
- // todo: change semantic type and primary key
-
- // todo: add internal column
+ // convert semantic type
+ data_region_request
+ .column_metadatas
+ .iter_mut()
+ .for_each(|metadata| {
+ if metadata.semantic_type == SemanticType::Tag {
+ metadata.semantic_type = SemanticType::Field;
+ }
+ });
+
+ // add internal columns
+ let metric_name_col = ColumnMetadata {
+ column_id: ReservedColumnId::metric_name(),
+ semantic_type: SemanticType::Tag,
+ column_schema: ColumnSchema::new(
+ DATA_SCHEMA_METRIC_NAME_COLUMN_NAME,
+ ConcreteDataType::string_datatype(),
+ false,
+ ),
+ };
+ let tsid_col = ColumnMetadata {
+ column_id: ReservedColumnId::tsid(),
+ semantic_type: SemanticType::Tag,
+ column_schema: ColumnSchema::new(
+ DATA_SCHEMA_TSID_COLUMN_NAME,
+ ConcreteDataType::int64_datatype(),
+ false,
+ ),
+ };
+ data_region_request.column_metadatas.push(metric_name_col);
+ data_region_request.column_metadatas.push(tsid_col);
+ data_region_request.primary_key =
+ vec![ReservedColumnId::metric_name(), ReservedColumnId::tsid()];
data_region_request
}
}
#[cfg(test)]
-mod test {
- use std::time::Duration;
-
- use common_telemetry::info;
-
+mod tests {
use super::*;
use crate::test_util::TestEnv;
- #[tokio::test]
- async fn create_metadata_region() {
- common_telemetry::init_default_ut_logging();
-
- let env = TestEnv::new().await;
- let mito = env.mito();
- let engine = MetricEngine {
- inner: Arc::new(MetricEngineInner { mito }),
+ #[test]
+ fn test_verify_region_create_request() {
+ // internal column is occupied
+ let request = RegionCreateRequest {
+ column_metadatas: vec![
+ ColumnMetadata {
+ column_id: 0,
+ semantic_type: SemanticType::Timestamp,
+ column_schema: ColumnSchema::new(
+ METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME,
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ },
+ ColumnMetadata {
+ column_id: 1,
+ semantic_type: SemanticType::Tag,
+ column_schema: ColumnSchema::new(
+ DATA_SCHEMA_METRIC_NAME_COLUMN_NAME,
+ ConcreteDataType::string_datatype(),
+ false,
+ ),
+ },
+ ],
+ region_dir: "test_dir".to_string(),
+ engine: METRIC_ENGINE_NAME.to_string(),
+ primary_key: vec![],
+ options: HashMap::new(),
};
- let engine_dir = env.data_home();
- let region_dir = join_dir(&engine_dir, "test_metric_region");
-
- let region_id = RegionId::new(1, 2);
- let region_create_request = RegionCreateRequest {
+ let result = MetricEngineInner::verify_region_create_request(&request);
+ assert!(result.is_err());
+ assert_eq!(
+ result.unwrap_err().to_string(),
+ "Internal column __metric is reserved".to_string()
+ );
+
+ // valid request
+ let request = RegionCreateRequest {
+ column_metadatas: vec![
+ ColumnMetadata {
+ column_id: 0,
+ semantic_type: SemanticType::Timestamp,
+ column_schema: ColumnSchema::new(
+ METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME,
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ },
+ ColumnMetadata {
+ column_id: 1,
+ semantic_type: SemanticType::Tag,
+ column_schema: ColumnSchema::new(
+ "column1".to_string(),
+ ConcreteDataType::string_datatype(),
+ false,
+ ),
+ },
+ ],
+ region_dir: "test_dir".to_string(),
engine: METRIC_ENGINE_NAME.to_string(),
- column_metadatas: vec![],
primary_key: vec![],
options: HashMap::new(),
- region_dir: "test_metric_region".to_string(),
};
+ let result = MetricEngineInner::verify_region_create_request(&request);
+ assert!(result.is_ok());
+ }
- // create the region
- engine
- .handle_request(region_id, RegionRequest::Create(region_create_request))
- .await
- .unwrap();
-
- // assert metadata region's dir
- let metadata_region_dir = join_dir(®ion_dir, METADATA_REGION_SUBDIR);
- let exist = tokio::fs::try_exists(region_dir).await.unwrap();
- assert!(exist);
+ #[tokio::test]
+ async fn test_create_request_for_data_region() {
+ let request = RegionCreateRequest {
+ engine: METRIC_ENGINE_NAME.to_string(),
+ column_metadatas: vec![
+ ColumnMetadata {
+ column_id: 0,
+ semantic_type: SemanticType::Timestamp,
+ column_schema: ColumnSchema::new(
+ "timestamp",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ },
+ ColumnMetadata {
+ column_id: 1,
+ semantic_type: SemanticType::Tag,
+ column_schema: ColumnSchema::new(
+ "tag",
+ ConcreteDataType::string_datatype(),
+ false,
+ ),
+ },
+ ],
+ primary_key: vec![0],
+ options: HashMap::new(),
+ region_dir: "test_dir".to_string(),
+ };
- // check mito engine
- let metadata_region_id = utils::to_metadata_region_id(region_id);
- let result = env.mito().get_metadata(metadata_region_id).await.unwrap();
+ let env = TestEnv::new().await;
+ let engine = MetricEngineInner { mito: env.mito() };
+ let data_region_request = engine.create_request_for_data_region(&request);
+
+ assert_eq!(
+ data_region_request.region_dir,
+ "/test_dir/data/".to_string()
+ );
+ assert_eq!(data_region_request.column_metadatas.len(), 4);
+ assert_eq!(
+ data_region_request.primary_key,
+ vec![ReservedColumnId::metric_name(), ReservedColumnId::tsid()]
+ );
}
}
diff --git a/src/metric-engine/src/error.rs b/src/metric-engine/src/error.rs
index 7bbb38afac1a..6ec8c95b53bd 100644
--- a/src/metric-engine/src/error.rs
+++ b/src/metric-engine/src/error.rs
@@ -53,6 +53,27 @@ pub enum Error {
error: base64::DecodeError,
location: Location,
},
+
+ #[snafu(display("Mito read operation fails"))]
+ MitoReadOperation {
+ source: BoxedError,
+ location: Location,
+ },
+
+ #[snafu(display("Mito write operation fails"))]
+ MitoWriteOperation {
+ source: BoxedError,
+ location: Location,
+ },
+
+ #[snafu(display("Failed to collect record batch stream"))]
+ CollectRecordBatchStream {
+ source: common_recordbatch::error::Error,
+ location: Location,
+ },
+
+ #[snafu(display("Internal column {} is reserved", column))]
+ InternalColumnOccupied { column: String, location: Location },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -62,11 +83,17 @@ impl ErrorExt for Error {
use Error::*;
match self {
+ InternalColumnOccupied { .. } => StatusCode::InvalidArguments,
+
MissingInternalColumn { .. }
| DeserializeSemanticType { .. }
| DecodeColumnValue { .. } => StatusCode::Unexpected,
- CreateMitoRegion { source, .. } => source.status_code(),
+ CreateMitoRegion { source, .. }
+ | MitoReadOperation { source, .. }
+ | MitoWriteOperation { source, .. } => source.status_code(),
+
+ CollectRecordBatchStream { source, .. } => source.status_code(),
TableAlreadyExists { .. } => StatusCode::TableAlreadyExists,
}
diff --git a/src/metric-engine/src/metadata_region.rs b/src/metric-engine/src/metadata_region.rs
index 09bdc7cd192f..0493553ed837 100644
--- a/src/metric-engine/src/metadata_region.rs
+++ b/src/metric-engine/src/metadata_region.rs
@@ -12,15 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use api::v1::SemanticType;
+use api::v1::value::ValueData;
+use api::v1::{self, ColumnDataType, ColumnSchema, Row, Rows, SemanticType, Value};
use base64::engine::general_purpose::STANDARD_NO_PAD;
use base64::Engine;
+use common_recordbatch::util::collect;
+use datafusion::prelude::{col, lit, Expr};
+use datatypes::vectors::StringVector;
use mito2::engine::MitoEngine;
use snafu::ResultExt;
-use store_api::storage::RegionId;
+use store_api::region_engine::RegionEngine;
+use store_api::region_request::{RegionPutRequest, RegionReadRequest};
+use store_api::storage::{RegionId, ScanRequest};
+use crate::engine::{
+ METADATA_SCHEMA_KEY_COLUMN_NAME, METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME,
+ METADATA_SCHEMA_VALUE_COLUMN_INDEX, METADATA_SCHEMA_VALUE_COLUMN_NAME,
+};
use crate::error::{
- DecodeColumnValueSnafu, DeserializeSemanticTypeSnafu, Result, TableAlreadyExistsSnafu,
+ CollectRecordBatchStreamSnafu, DecodeColumnValueSnafu, DeserializeSemanticTypeSnafu,
+ MitoReadOperationSnafu, MitoWriteOperationSnafu, Result, TableAlreadyExistsSnafu,
};
use crate::utils;
@@ -41,15 +52,21 @@ pub struct MetadataRegion {
}
impl MetadataRegion {
+ pub fn new(mito: MitoEngine) -> Self {
+ Self { mito }
+ }
+
/// Add a new table key to metadata.
///
/// This method will check if the table key already exists, if so, it will return
/// a [TableAlreadyExistsSnafu] error.
- pub fn add_table(&self, region_id: RegionId, table_name: &str) -> Result<()> {
+ pub async fn add_table(&self, region_id: RegionId, table_name: &str) -> Result<()> {
let region_id = utils::to_metadata_region_id(region_id);
let table_key = Self::concat_table_key(table_name);
- let put_success = self.put_conditionally(region_id, table_key, String::new())?;
+ let put_success = self
+ .put_conditionally(region_id, table_key, String::new())
+ .await?;
if !put_success {
TableAlreadyExistsSnafu { table_name }.fail()
@@ -60,14 +77,15 @@ impl MetadataRegion {
/// Add a new column key to metadata.
///
- /// This method won't check if the column already exists.
- pub fn add_column(
+ /// This method won't check if the column already exists. But
+ /// will return if the column is successfully added.
+ pub async fn add_column(
&self,
region_id: RegionId,
table_name: &str,
column_name: &str,
semantic_type: SemanticType,
- ) -> Result<()> {
+ ) -> Result<bool> {
let region_id = utils::to_metadata_region_id(region_id);
let column_key = Self::concat_column_key(table_name, column_name);
@@ -75,8 +93,30 @@ impl MetadataRegion {
region_id,
column_key,
Self::serialize_semantic_type(semantic_type),
- )?;
- Ok(())
+ )
+ .await
+ }
+
+ /// Check if the given table exists.
+ pub async fn is_table_exist(&self, region_id: RegionId, table_name: &str) -> Result<bool> {
+ let region_id = utils::to_metadata_region_id(region_id);
+ let table_key = Self::concat_table_key(table_name);
+ self.exist(region_id, &table_key).await
+ }
+
+ /// Check if the given column exists. Return the semantic type if exists.
+ pub async fn column_semantic_type(
+ &self,
+ region_id: RegionId,
+ table_name: &str,
+ column_name: &str,
+ ) -> Result<Option<SemanticType>> {
+ let region_id = utils::to_metadata_region_id(region_id);
+ let column_key = Self::concat_column_key(table_name, column_name);
+ let semantic_type = self.get(region_id, &column_key).await?;
+ semantic_type
+ .map(|s| Self::deserialize_semantic_type(&s))
+ .transpose()
}
}
@@ -136,24 +176,134 @@ impl MetadataRegion {
impl MetadataRegion {
/// Put if not exist, return if this put operation is successful (error other
/// than "key already exist" will be wrapped in [Err]).
- pub fn put_conditionally(
+ pub async fn put_conditionally(
&self,
region_id: RegionId,
key: String,
value: String,
) -> Result<bool> {
- todo!()
+ if self.exist(region_id, &key).await? {
+ return Ok(false);
+ }
+
+ let put_request = Self::build_put_request(&key, &value);
+ self.mito
+ .handle_request(
+ region_id,
+ store_api::region_request::RegionRequest::Put(put_request),
+ )
+ .await
+ .context(MitoWriteOperationSnafu)?;
+ Ok(true)
}
/// Check if the given key exists.
- pub fn exist(&self, region_id: RegionId, key: &str) -> Result<bool> {
- todo!()
+ ///
+ /// Notice that due to mito doesn't support transaction, TOCTTOU is possible.
+ pub async fn exist(&self, region_id: RegionId, key: &str) -> Result<bool> {
+ let scan_req = Self::build_read_request(key);
+ let record_batch_stream = self
+ .mito
+ .handle_query(region_id, scan_req)
+ .await
+ .context(MitoReadOperationSnafu)?;
+ let scan_result = collect(record_batch_stream)
+ .await
+ .context(CollectRecordBatchStreamSnafu)?;
+
+ let exist = !scan_result.is_empty() && scan_result.first().unwrap().num_rows() != 0;
+ Ok(exist)
+ }
+
+ /// Retrieves the value associated with the given key in the specified region.
+ /// Returns `Ok(None)` if the key is not found.
+ pub async fn get(&self, region_id: RegionId, key: &str) -> Result<Option<String>> {
+ let scan_req = Self::build_read_request(key);
+ let record_batch_stream = self
+ .mito
+ .handle_query(region_id, scan_req)
+ .await
+ .context(MitoReadOperationSnafu)?;
+ let mut scan_result = collect(record_batch_stream)
+ .await
+ .context(CollectRecordBatchStreamSnafu)?;
+
+ let Some(first_batch) = scan_result.first() else {
+ return Ok(None);
+ };
+
+ let val = first_batch
+ .column(0)
+ .get_ref(0)
+ .as_string()
+ .unwrap()
+ .map(|s| s.to_string());
+
+ Ok(val)
+ }
+
+ /// Builds a [ScanRequest] to read metadata for a given key.
+ /// The request will contains a EQ filter on the key column.
+ ///
+ /// Only the value column is projected.
+ fn build_read_request(key: &str) -> ScanRequest {
+ let filter_expr = col(METADATA_SCHEMA_KEY_COLUMN_NAME).eq(lit(key));
+
+ ScanRequest {
+ sequence: None,
+ projection: Some(vec![METADATA_SCHEMA_VALUE_COLUMN_INDEX]),
+ filters: vec![filter_expr.into()],
+ output_ordering: None,
+ limit: None,
+ }
+ }
+
+ fn build_put_request(key: &str, value: &str) -> RegionPutRequest {
+ let cols = vec![
+ ColumnSchema {
+ column_name: METADATA_SCHEMA_TIMESTAMP_COLUMN_NAME.to_string(),
+ datatype: ColumnDataType::TimestampMillisecond as _,
+ semantic_type: SemanticType::Timestamp as _,
+ },
+ ColumnSchema {
+ column_name: METADATA_SCHEMA_KEY_COLUMN_NAME.to_string(),
+ datatype: ColumnDataType::String as _,
+ semantic_type: SemanticType::Tag as _,
+ },
+ ColumnSchema {
+ column_name: METADATA_SCHEMA_VALUE_COLUMN_NAME.to_string(),
+ datatype: ColumnDataType::String as _,
+ semantic_type: SemanticType::Field as _,
+ },
+ ];
+ let rows = Rows {
+ schema: cols,
+ rows: vec![Row {
+ values: vec![
+ Value {
+ value_data: Some(ValueData::TimestampMillisecondValue(0)),
+ },
+ Value {
+ value_data: Some(ValueData::StringValue(key.to_string())),
+ },
+ Value {
+ value_data: Some(ValueData::StringValue(value.to_string())),
+ },
+ ],
+ }],
+ };
+
+ RegionPutRequest { rows }
}
}
#[cfg(test)]
mod test {
+ use store_api::region_request::RegionRequest;
+
use super::*;
+ use crate::test_util::TestEnv;
+ use crate::utils::to_metadata_region_id;
#[test]
fn test_concat_table_key() {
@@ -226,4 +376,163 @@ mod test {
let semantic_type = "\"InvalidType\"";
assert!(MetadataRegion::deserialize_semantic_type(semantic_type).is_err());
}
+
+ #[test]
+ fn test_build_read_request() {
+ let key = "test_key";
+ let expected_filter_expr = col(METADATA_SCHEMA_KEY_COLUMN_NAME).eq(lit(key));
+ let expected_scan_request = ScanRequest {
+ sequence: None,
+ projection: Some(vec![METADATA_SCHEMA_VALUE_COLUMN_INDEX]),
+ filters: vec![expected_filter_expr.into()],
+ output_ordering: None,
+ limit: None,
+ };
+ let actual_scan_request = MetadataRegion::build_read_request(key);
+ assert_eq!(actual_scan_request, expected_scan_request);
+ }
+
+ #[tokio::test]
+ async fn test_put_conditionally() {
+ let env = TestEnv::new().await;
+ env.init_metric_region().await;
+ let metadata_region = env.metadata_region();
+ let region_id = to_metadata_region_id(env.default_region_id());
+
+ // Test inserting a new key-value pair
+ let key = "test_key".to_string();
+ let value = "test_value".to_string();
+ let result = metadata_region
+ .put_conditionally(region_id, key.clone(), value.clone())
+ .await;
+ assert!(result.is_ok());
+ assert!(result.unwrap());
+
+ // Verify that the key-value pair was actually inserted
+ let scan_req = MetadataRegion::build_read_request("test_key");
+ let record_batch_stream = metadata_region
+ .mito
+ .handle_query(region_id, scan_req)
+ .await
+ .unwrap();
+ let scan_result = collect(record_batch_stream).await.unwrap();
+ assert_eq!(scan_result.len(), 1);
+
+ // Test inserting the same key-value pair again
+ let result = metadata_region
+ .put_conditionally(region_id, key.clone(), value.clone())
+ .await;
+ assert!(result.is_ok());
+ assert!(!result.unwrap(),);
+ }
+
+ #[tokio::test]
+ async fn test_exist() {
+ let env = TestEnv::new().await;
+ env.init_metric_region().await;
+ let metadata_region = env.metadata_region();
+ let region_id = to_metadata_region_id(env.default_region_id());
+
+ // Test checking for a non-existent key
+ let key = "test_key".to_string();
+ let result = metadata_region.exist(region_id, &key).await;
+ assert!(result.is_ok());
+ assert!(!result.unwrap());
+
+ // Test inserting a key and then checking for its existence
+ let value = "test_value".to_string();
+ let put_request = MetadataRegion::build_put_request(&key, &value);
+ metadata_region
+ .mito
+ .handle_request(region_id, RegionRequest::Put(put_request))
+ .await
+ .unwrap();
+ let result = metadata_region.exist(region_id, &key).await;
+ assert!(result.is_ok());
+ assert!(result.unwrap(),);
+ }
+
+ #[tokio::test]
+ async fn test_get() {
+ let env = TestEnv::new().await;
+ env.init_metric_region().await;
+ let metadata_region = env.metadata_region();
+ let region_id = to_metadata_region_id(env.default_region_id());
+
+ // Test getting a non-existent key
+ let key = "test_key".to_string();
+ let result = metadata_region.get(region_id, &key).await;
+ assert!(result.is_ok());
+ assert_eq!(result.unwrap(), None);
+
+ // Test inserting a key and then getting its value
+ let value = "test_value".to_string();
+ let put_request = MetadataRegion::build_put_request(&key, &value);
+ metadata_region
+ .mito
+ .handle_request(region_id, RegionRequest::Put(put_request))
+ .await
+ .unwrap();
+ let result = metadata_region.get(region_id, &key).await;
+ assert!(result.is_ok());
+ assert_eq!(result.unwrap(), Some(value));
+ }
+
+ #[tokio::test]
+ async fn test_add_table() {
+ let env = TestEnv::new().await;
+ env.init_metric_region().await;
+ let metadata_region = env.metadata_region();
+ let region_id = to_metadata_region_id(env.default_region_id());
+
+ // add one table
+ let table_name = "table1";
+ metadata_region
+ .add_table(region_id, table_name)
+ .await
+ .unwrap();
+ assert!(metadata_region
+ .is_table_exist(region_id, table_name)
+ .await
+ .unwrap());
+
+ // add it again
+ assert!(metadata_region
+ .add_table(region_id, table_name)
+ .await
+ .is_err());
+ }
+
+ #[tokio::test]
+ async fn test_add_column() {
+ let env = TestEnv::new().await;
+ env.init_metric_region().await;
+ let metadata_region = env.metadata_region();
+ let region_id = to_metadata_region_id(env.default_region_id());
+
+ let table_name = "table1";
+ let column_name = "column1";
+ let semantic_type = SemanticType::Tag;
+ metadata_region
+ .add_column(region_id, table_name, column_name, semantic_type)
+ .await
+ .unwrap();
+ let actual_semantic_type = metadata_region
+ .column_semantic_type(region_id, table_name, column_name)
+ .await
+ .unwrap();
+ assert_eq!(actual_semantic_type, Some(semantic_type));
+
+ // duplicate column won't be updated
+ let is_updated = metadata_region
+ .add_column(region_id, table_name, column_name, SemanticType::Field)
+ .await
+ .unwrap();
+ assert!(!is_updated);
+ let actual_semantic_type = metadata_region
+ .column_semantic_type(region_id, table_name, column_name)
+ .await
+ .unwrap();
+ assert_eq!(actual_semantic_type, Some(semantic_type));
+ }
}
diff --git a/src/metric-engine/src/test_util.rs b/src/metric-engine/src/test_util.rs
index 313a2ceba881..7426aadb49fd 100644
--- a/src/metric-engine/src/test_util.rs
+++ b/src/metric-engine/src/test_util.rs
@@ -14,10 +14,22 @@
//! Utilities for testing.
+use std::collections::HashMap;
+
+use api::v1::SemanticType;
+use datatypes::prelude::ConcreteDataType;
+use datatypes::schema::ColumnSchema;
use mito2::config::MitoConfig;
use mito2::engine::MitoEngine;
use mito2::test_util::TestEnv as MitoTestEnv;
use object_store::util::join_dir;
+use store_api::metadata::ColumnMetadata;
+use store_api::region_engine::RegionEngine;
+use store_api::region_request::{RegionCreateRequest, RegionRequest};
+use store_api::storage::RegionId;
+
+use crate::engine::{MetricEngine, METRIC_ENGINE_NAME};
+use crate::metadata_region::MetadataRegion;
/// Env to test metric engine.
pub struct TestEnv {
@@ -47,4 +59,78 @@ impl TestEnv {
pub fn mito(&self) -> MitoEngine {
self.mito.clone()
}
+
+ pub fn metric(&self) -> MetricEngine {
+ MetricEngine::new(self.mito())
+ }
+
+ /// Create regions in [MetricEngine] under [`default_region_id`](TestEnv::default_region_id)
+ /// and region dir `"test_metric_region"`.
+ pub async fn init_metric_region(&self) {
+ let region_id = self.default_region_id();
+ let region_create_request = RegionCreateRequest {
+ engine: METRIC_ENGINE_NAME.to_string(),
+ column_metadatas: vec![ColumnMetadata {
+ column_id: 0,
+ semantic_type: SemanticType::Timestamp,
+ column_schema: ColumnSchema::new(
+ "greptime_timestamp",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ }],
+ primary_key: vec![],
+ options: HashMap::new(),
+ region_dir: "test_metric_region".to_string(),
+ };
+
+ // create regions
+ self.metric()
+ .handle_request(region_id, RegionRequest::Create(region_create_request))
+ .await
+ .unwrap();
+ }
+
+ pub fn metadata_region(&self) -> MetadataRegion {
+ MetadataRegion::new(self.mito())
+ }
+
+ /// `RegionId::new(1, 2)`
+ pub fn default_region_id(&self) -> RegionId {
+ RegionId::new(1, 2)
+ }
+}
+
+#[cfg(test)]
+mod test {
+
+ use super::*;
+ use crate::engine::{DATA_REGION_SUBDIR, METADATA_REGION_SUBDIR};
+ use crate::utils::{self, to_metadata_region_id};
+
+ #[tokio::test]
+ async fn create_metadata_region() {
+ common_telemetry::init_default_ut_logging();
+
+ let env = TestEnv::new().await;
+ env.init_metric_region().await;
+ let region_id = to_metadata_region_id(env.default_region_id());
+ let region_dir = join_dir(&env.data_home(), "test_metric_region");
+
+ // assert metadata region's dir
+ let metadata_region_dir = join_dir(®ion_dir, METADATA_REGION_SUBDIR);
+ let exist = tokio::fs::try_exists(metadata_region_dir).await.unwrap();
+ assert!(exist);
+
+ // assert data region's dir
+ let data_region_dir = join_dir(®ion_dir, DATA_REGION_SUBDIR);
+ let exist = tokio::fs::try_exists(data_region_dir).await.unwrap();
+ assert!(exist);
+
+ // check mito engine
+ let metadata_region_id = utils::to_metadata_region_id(region_id);
+ let _ = env.mito().get_metadata(metadata_region_id).await.unwrap();
+ let data_region_id = utils::to_data_region_id(region_id);
+ let _ = env.mito().get_metadata(data_region_id).await.unwrap();
+ }
}
diff --git a/src/store-api/src/storage/consts.rs b/src/store-api/src/storage/consts.rs
index 284d98177a46..67435003532a 100644
--- a/src/store-api/src/storage/consts.rs
+++ b/src/store-api/src/storage/consts.rs
@@ -37,6 +37,8 @@ enum ReservedColumnType {
Version = 0,
Sequence,
OpType,
+ Tsid,
+ MetricName,
}
/// Column id reserved by the engine.
@@ -66,6 +68,20 @@ impl ReservedColumnId {
pub const fn op_type() -> ColumnId {
Self::BASE | ReservedColumnType::OpType as ColumnId
}
+
+ /// Id for storing TSID column.
+ ///
+ /// Used by: metric engine
+ pub const fn tsid() -> ColumnId {
+ Self::BASE | ReservedColumnType::Tsid as ColumnId
+ }
+
+ /// Id for storing metric name column.
+ ///
+ /// Used by: metric engine
+ pub const fn metric_name() -> ColumnId {
+ Self::BASE | ReservedColumnType::MetricName as ColumnId
+ }
}
// -----------------------------------------------------------------------------
diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs
index 2597b7661479..89e68687aacb 100644
--- a/src/store-api/src/storage/requests.rs
+++ b/src/store-api/src/storage/requests.rs
@@ -46,7 +46,7 @@ pub trait WriteRequest: Send {
fn delete(&mut self, keys: HashMap<String, VectorRef>) -> Result<(), Self::Error>;
}
-#[derive(Default, Clone, Debug)]
+#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct ScanRequest {
/// Max sequence number to read, None for latest sequence.
///
|
feat
|
implement create region request for metric engine (#2694)
|
96f32a166a54a59f8180692871e71c24f8e9d0a4
|
2024-02-05 15:00:22
|
Weny Xu
|
chore: share cache corss jobs (#3284)
| false
|
diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml
index 0d55c6d2a8c4..e8dfe23f7c1b 100644
--- a/.github/workflows/develop.yml
+++ b/.github/workflows/develop.yml
@@ -58,6 +58,10 @@ jobs:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- name: Rust Cache
uses: Swatinem/rust-cache@v2
+ with:
+ # Shares across multiple jobs
+ # Shares with `Clippy` job
+ shared-key: "check-lint"
- name: Run cargo check
run: cargo check --locked --workspace --all-targets
@@ -73,6 +77,9 @@ jobs:
toolchain: stable
- name: Rust Cache
uses: Swatinem/rust-cache@v2
+ with:
+ # Shares across multiple jobs
+ shared-key: "check-toml"
- name: Install taplo
run: cargo +stable install taplo-cli --version ^0.8 --locked
- name: Run taplo
@@ -93,6 +100,9 @@ jobs:
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
- uses: Swatinem/rust-cache@v2
+ with:
+ # Shares across multiple jobs
+ shared-key: "build-binaries"
- name: Build greptime binaries
shell: bash
run: cargo build
@@ -185,6 +195,9 @@ jobs:
components: rustfmt
- name: Rust Cache
uses: Swatinem/rust-cache@v2
+ with:
+ # Shares across multiple jobs
+ shared-key: "check-rust-fmt"
- name: Run cargo fmt
run: cargo fmt --all -- --check
@@ -204,6 +217,10 @@ jobs:
components: clippy
- name: Rust Cache
uses: Swatinem/rust-cache@v2
+ with:
+ # Shares across multiple jobs
+ # Shares with `Check` job
+ shared-key: "check-lint"
- name: Run cargo clippy
run: cargo clippy --workspace --all-targets -- -D warnings
@@ -226,6 +243,9 @@ jobs:
components: llvm-tools-preview
- name: Rust Cache
uses: Swatinem/rust-cache@v2
+ with:
+ # Shares cross multiple jobs
+ shared-key: "coverage-test"
- name: Install latest nextest release
uses: taiki-e/install-action@nextest
- name: Install cargo-llvm-cov
|
chore
|
share cache corss jobs (#3284)
|
5ec1a7027bd78a95025376e8a09072ff66bbda1e
|
2023-02-16 13:41:26
|
dennis zhuang
|
feat: supports passing user params into coprocessor (#962)
| false
|
diff --git a/src/datanode/src/instance/script.rs b/src/datanode/src/instance/script.rs
index 8dd878546ba4..fc7757a365de 100644
--- a/src/datanode/src/instance/script.rs
+++ b/src/datanode/src/instance/script.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
+
use async_trait::async_trait;
use common_query::Output;
use common_telemetry::timer;
@@ -34,8 +36,15 @@ impl ScriptHandler for Instance {
.await
}
- async fn execute_script(&self, schema: &str, name: &str) -> servers::error::Result<Output> {
+ async fn execute_script(
+ &self,
+ schema: &str,
+ name: &str,
+ params: HashMap<String, String>,
+ ) -> servers::error::Result<Output> {
let _timer = timer!(metric::METRIC_RUN_SCRIPT_ELAPSED);
- self.script_executor.execute_script(schema, name).await
+ self.script_executor
+ .execute_script(schema, name, params)
+ .await
}
}
diff --git a/src/datanode/src/script.rs b/src/datanode/src/script.rs
index b7cc622c95da..d63efccc6e28 100644
--- a/src/datanode/src/script.rs
+++ b/src/datanode/src/script.rs
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::collections::HashMap;
+
use catalog::CatalogManagerRef;
use common_query::Output;
use query::QueryEngineRef;
@@ -34,13 +36,19 @@ mod dummy {
pub async fn insert_script(
&self,
+ _schema: &str,
_name: &str,
_script: &str,
) -> servers::error::Result<()> {
servers::error::NotSupportedSnafu { feat: "script" }.fail()
}
- pub async fn execute_script(&self, _script: &str) -> servers::error::Result<Output> {
+ pub async fn execute_script(
+ &self,
+ _schema: &str,
+ _name: &str,
+ _params: HashMap<String, String>,
+ ) -> servers::error::Result<Output> {
servers::error::NotSupportedSnafu { feat: "script" }.fail()
}
}
@@ -94,9 +102,10 @@ mod python {
&self,
schema: &str,
name: &str,
+ params: HashMap<String, String>,
) -> servers::error::Result<Output> {
self.script_manager
- .execute(schema, name)
+ .execute(schema, name, params)
.await
.map_err(|e| {
error!(e; "Instance failed to execute script");
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index 11becc4a7e83..127597bc7d92 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -19,6 +19,7 @@ mod opentsdb;
mod prometheus;
mod standalone;
+use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
@@ -512,9 +513,14 @@ impl ScriptHandler for Instance {
}
}
- async fn execute_script(&self, schema: &str, script: &str) -> server_error::Result<Output> {
+ async fn execute_script(
+ &self,
+ schema: &str,
+ script: &str,
+ params: HashMap<String, String>,
+ ) -> server_error::Result<Output> {
if let Some(handler) = &self.script_handler {
- handler.execute_script(schema, script).await
+ handler.execute_script(schema, script, params).await
} else {
server_error::NotSupportedSnafu {
feat: "Script execution in Frontend",
diff --git a/src/script/src/engine.rs b/src/script/src/engine.rs
index 004ce351bb91..78ddfcf283c1 100644
--- a/src/script/src/engine.rs
+++ b/src/script/src/engine.rs
@@ -15,6 +15,7 @@
//! Script engine
use std::any::Any;
+use std::collections::HashMap;
use async_trait::async_trait;
use common_error::ext::ErrorExt;
@@ -30,7 +31,11 @@ pub trait Script {
fn as_any(&self) -> &dyn Any;
/// Execute the script and returns the output.
- async fn execute(&self, ctx: EvalContext) -> std::result::Result<Output, Self::Error>;
+ async fn execute(
+ &self,
+ params: HashMap<String, String>,
+ ctx: EvalContext,
+ ) -> std::result::Result<Output, Self::Error>;
}
#[async_trait]
diff --git a/src/script/src/manager.rs b/src/script/src/manager.rs
index 637e1194a742..ba79a361c81b 100644
--- a/src/script/src/manager.rs
+++ b/src/script/src/manager.rs
@@ -76,7 +76,12 @@ impl ScriptManager {
Ok(compiled_script)
}
- pub async fn execute(&self, schema: &str, name: &str) -> Result<Output> {
+ pub async fn execute(
+ &self,
+ schema: &str,
+ name: &str,
+ params: HashMap<String, String>,
+ ) -> Result<Output> {
let script = {
let s = self.compiled.read().unwrap().get(name).cloned();
@@ -90,7 +95,7 @@ impl ScriptManager {
let script = script.context(ScriptNotFoundSnafu { name })?;
script
- .execute(EvalContext::default())
+ .execute(params, EvalContext::default())
.await
.context(ExecutePythonSnafu { name })
}
diff --git a/src/script/src/python/builtins.rs b/src/script/src/python/builtins.rs
index 71815f175038..e37bd6b60ca3 100644
--- a/src/script/src/python/builtins.rs
+++ b/src/script/src/python/builtins.rs
@@ -97,7 +97,7 @@ pub fn try_into_columnar_value(obj: PyObjectRef, vm: &VirtualMachine) -> PyResul
.borrow_vec()
.iter()
.map(|obj| -> PyResult<ScalarValue> {
- let col = try_into_columnar_value(obj.to_owned(), vm)?;
+ let col = try_into_columnar_value(obj.clone(), vm)?;
match col {
DFColValue::Array(arr) => Err(vm.new_type_error(format!(
"Expect only scalar value in a list, found a vector of type {:?} nested in list", arr.data_type()
@@ -243,7 +243,7 @@ macro_rules! bind_aggr_fn {
$(
Arc::new(expressions::Column::new(stringify!($EXPR_ARGS), 0)) as _,
)*
- stringify!($AGGR_FUNC), $DATA_TYPE.to_owned()),
+ stringify!($AGGR_FUNC), $DATA_TYPE.clone()),
$ARGS, $VM)
};
}
@@ -597,7 +597,7 @@ pub(crate) mod greptime_builtin {
Arc::new(percent) as _,
],
"ApproxPercentileCont",
- (values.to_arrow_array().data_type()).to_owned(),
+ (values.to_arrow_array().data_type()).clone(),
)
.map_err(|err| from_df_err(err, vm))?,
&[values.to_arrow_array()],
@@ -839,7 +839,7 @@ pub(crate) mod greptime_builtin {
return Ok(ret.into());
}
let cur = cur.slice(0, cur.len() - 1); // except the last one that is
- let fill = gen_none_array(cur.data_type().to_owned(), 1, vm)?;
+ let fill = gen_none_array(cur.data_type().clone(), 1, vm)?;
let ret = compute::concat(&[&*fill, &*cur]).map_err(|err| {
vm.new_runtime_error(format!("Can't concat array[0] with array[0:-1]!{err:#?}"))
})?;
@@ -864,7 +864,7 @@ pub(crate) mod greptime_builtin {
return Ok(ret.into());
}
let cur = cur.slice(1, cur.len() - 1); // except the last one that is
- let fill = gen_none_array(cur.data_type().to_owned(), 1, vm)?;
+ let fill = gen_none_array(cur.data_type().clone(), 1, vm)?;
let ret = compute::concat(&[&*cur, &*fill]).map_err(|err| {
vm.new_runtime_error(format!("Can't concat array[0] with array[0:-1]!{err:#?}"))
})?;
@@ -1048,7 +1048,7 @@ pub(crate) mod greptime_builtin {
match (ch.is_ascii_digit(), &state) {
(true, State::Separator(_)) => {
let res = &input[prev..idx];
- let res = State::Separator(res.to_owned());
+ let res = State::Separator(res.to_string());
parsed.push(res);
prev = idx;
state = State::Num(Default::default());
@@ -1073,7 +1073,7 @@ pub(crate) mod greptime_builtin {
}
State::Separator(_) => {
let res = &input[prev..];
- State::Separator(res.to_owned())
+ State::Separator(res.to_string())
}
};
parsed.push(last);
diff --git a/src/script/src/python/builtins/test.rs b/src/script/src/python/builtins/test.rs
index 6738f9e7229f..46cafb5ee32f 100644
--- a/src/script/src/python/builtins/test.rs
+++ b/src/script/src/python/builtins/test.rs
@@ -240,10 +240,7 @@ impl PyValue {
.as_any()
.downcast_ref::<Float64Array>()
.ok_or(format!("Can't cast {vec_f64:#?} to Float64Array!"))?;
- let ret = vec_f64
- .into_iter()
- .map(|v| v.map(|inner| inner.to_owned()))
- .collect::<Vec<_>>();
+ let ret = vec_f64.into_iter().collect::<Vec<_>>();
if ret.iter().all(|x| x.is_some()) {
Ok(Self::FloatVec(
ret.into_iter().map(|i| i.unwrap()).collect(),
@@ -266,7 +263,6 @@ impl PyValue {
v.ok_or(format!(
"No null element expected, found one in {idx} position"
))
- .map(|v| v.to_owned())
})
.collect::<Result<_, String>>()?;
Ok(Self::IntVec(ret))
@@ -275,13 +271,13 @@ impl PyValue {
}
} else if is_instance::<PyInt>(obj, vm) {
let res = obj
- .to_owned()
+ .clone()
.try_into_value::<i64>(vm)
.map_err(|err| format_py_error(err, vm).to_string())?;
Ok(Self::Int(res))
} else if is_instance::<PyFloat>(obj, vm) {
let res = obj
- .to_owned()
+ .clone()
.try_into_value::<f64>(vm)
.map_err(|err| format_py_error(err, vm).to_string())?;
Ok(Self::Float(res))
@@ -338,7 +334,7 @@ fn run_builtin_fn_testcases() {
.compile(
&case.script,
rustpython_compiler_core::Mode::BlockExpr,
- "<embedded>".to_owned(),
+ "<embedded>".to_string(),
)
.map_err(|err| vm.new_syntax_error(&err))
.unwrap();
@@ -389,7 +385,7 @@ fn set_item_into_scope(
scope
.locals
.as_object()
- .set_item(&name.to_owned(), vm.new_pyobj(value), vm)
+ .set_item(&name.to_string(), vm.new_pyobj(value), vm)
.map_err(|err| {
format!(
"Error in setting var {name} in scope: \n{}",
@@ -408,7 +404,7 @@ fn set_lst_of_vecs_in_scope(
scope
.locals
.as_object()
- .set_item(name.to_owned(), vm.new_pyobj(vector), vm)
+ .set_item(&name.to_string(), vm.new_pyobj(vector), vm)
.map_err(|err| {
format!(
"Error in setting var {name} in scope: \n{}",
@@ -447,7 +443,7 @@ fn test_vm() {
from udf_builtins import *
sin(values)"#,
rustpython_compiler_core::Mode::BlockExpr,
- "<embedded>".to_owned(),
+ "<embedded>".to_string(),
)
.map_err(|err| vm.new_syntax_error(&err))
.unwrap();
diff --git a/src/script/src/python/coprocessor.rs b/src/script/src/python/coprocessor.rs
index 9e8bf8e50b13..9f793887e4bd 100644
--- a/src/script/src/python/coprocessor.rs
+++ b/src/script/src/python/coprocessor.rs
@@ -16,7 +16,7 @@ pub mod compile;
pub mod parse;
use std::cell::RefCell;
-use std::collections::HashSet;
+use std::collections::{HashMap, HashSet};
use std::result::Result as StdResult;
use std::sync::{Arc, Weak};
@@ -36,7 +36,7 @@ use rustpython_vm::AsObject;
#[cfg(test)]
use serde::Deserialize;
use snafu::{OptionExt, ResultExt};
-use vm::builtins::{PyBaseExceptionRef, PyList, PyListRef, PyTuple};
+use vm::builtins::{PyBaseExceptionRef, PyDict, PyList, PyListRef, PyStr, PyTuple};
use vm::convert::ToPyObject;
use vm::scope::Scope;
use vm::{pyclass, Interpreter, PyObjectRef, PyPayload, PyResult, VirtualMachine};
@@ -73,6 +73,8 @@ pub struct Coprocessor {
pub arg_types: Vec<Option<AnnotationInfo>>,
/// get from python function returns' annotation, first is type, second is is_nullable
pub return_types: Vec<Option<AnnotationInfo>>,
+ /// kwargs in coprocessor function's signature
+ pub kwarg: Option<String>,
/// store its corresponding script, also skip serde when in `cfg(test)` to reduce work in compare
#[cfg_attr(test, serde(skip))]
pub script: String,
@@ -103,7 +105,7 @@ impl From<&Arc<dyn QueryEngine>> for QueryEngineWeakRef {
impl std::fmt::Debug for QueryEngineWeakRef {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("QueryEngineWeakRef")
- .field(&self.0.upgrade().map(|f| f.name().to_owned()))
+ .field(&self.0.upgrade().map(|f| f.name().to_string()))
.finish()
}
}
@@ -147,7 +149,7 @@ impl Coprocessor {
let AnnotationInfo {
datatype: ty,
is_nullable,
- } = anno[idx].to_owned().unwrap_or_else(|| {
+ } = anno[idx].clone().unwrap_or_else(|| {
// default to be not nullable and use DataType inferred by PyVector itself
AnnotationInfo {
datatype: Some(real_ty.clone()),
@@ -248,20 +250,23 @@ fn check_args_anno_real_type(
rb: &RecordBatch,
) -> Result<()> {
for (idx, arg) in args.iter().enumerate() {
- let anno_ty = copr.arg_types[idx].to_owned();
- let real_ty = arg.to_arrow_array().data_type().to_owned();
+ let anno_ty = copr.arg_types[idx].clone();
+ let real_ty = arg.to_arrow_array().data_type().clone();
let real_ty = ConcreteDataType::from_arrow_type(&real_ty);
let is_nullable: bool = rb.schema.column_schemas()[idx].is_nullable();
ensure!(
anno_ty
- .to_owned()
+ .clone()
.map(|v| v.datatype.is_none() // like a vector[_]
- || v.datatype == Some(real_ty.to_owned()) && v.is_nullable == is_nullable)
+ || v.datatype == Some(real_ty.clone()) && v.is_nullable == is_nullable)
.unwrap_or(true),
OtherSnafu {
reason: format!(
"column {}'s Type annotation is {:?}, but actual type is {:?}",
- copr.deco_args.arg_names[idx], anno_ty, real_ty
+ // It's safe to unwrap here, we already ensure the args and types number is the same when parsing
+ copr.deco_args.arg_names.as_ref().unwrap()[idx],
+ anno_ty,
+ real_ty
)
}
)
@@ -343,12 +348,12 @@ fn set_items_in_scope(
/// You can return constant in python code like `return 1, 1.0, True`
/// which create a constant array(with same value)(currently support int, float and bool) as column on return
#[cfg(test)]
-pub fn exec_coprocessor(script: &str, rb: &RecordBatch) -> Result<RecordBatch> {
+pub fn exec_coprocessor(script: &str, rb: &Option<RecordBatch>) -> Result<RecordBatch> {
// 1. parse the script and check if it's only a function with `@coprocessor` decorator, and get `args` and `returns`,
// 2. also check for exist of `args` in `rb`, if not found, return error
// TODO(discord9): cache the result of parse_copr
let copr = parse::parse_and_compile_copr(script, None)?;
- exec_parsed(&copr, rb)
+ exec_parsed(&copr, rb, &HashMap::new())
}
#[pyclass(module = false, name = "query_engine")]
@@ -412,7 +417,7 @@ impl PyQueryEngine {
for rb in rbs.iter() {
let mut vec_of_vec = Vec::with_capacity(rb.columns().len());
for v in rb.columns() {
- let v = PyVector::from(v.to_owned());
+ let v = PyVector::from(v.clone());
vec_of_vec.push(v.to_pyobject(vm));
}
let vec_of_vec = PyList::new_ref(vec_of_vec, vm.as_ref()).to_pyobject(vm);
@@ -440,18 +445,25 @@ fn set_query_engine_in_scope(
.map_err(|e| format_py_error(e, vm))
}
-pub(crate) fn exec_with_cached_vm(
+fn exec_with_cached_vm(
copr: &Coprocessor,
- rb: &RecordBatch,
+ rb: &Option<RecordBatch>,
args: Vec<PyVector>,
+ params: &HashMap<String, String>,
vm: &Arc<Interpreter>,
) -> Result<RecordBatch> {
vm.enter(|vm| -> Result<RecordBatch> {
PyVector::make_class(&vm.ctx);
// set arguments with given name and values
let scope = vm.new_scope_with_builtins();
- set_items_in_scope(&scope, vm, &copr.deco_args.arg_names, args)?;
- set_dataframe_in_scope(&scope, vm, "dataframe", rb)?;
+ if let Some(rb) = rb {
+ set_dataframe_in_scope(&scope, vm, "dataframe", rb)?;
+ }
+
+ if let Some(arg_names) = &copr.deco_args.arg_names {
+ assert_eq!(arg_names.len(), args.len());
+ set_items_in_scope(&scope, vm, arg_names, args)?;
+ }
if let Some(engine) = &copr.query_engine {
let query_engine = PyQueryEngine {
@@ -463,6 +475,19 @@ pub(crate) fn exec_with_cached_vm(
set_query_engine_in_scope(&scope, vm, query_engine)?;
}
+ if let Some(kwarg) = &copr.kwarg {
+ let dict = PyDict::new_ref(&vm.ctx);
+ for (k, v) in params {
+ dict.set_item(k, PyStr::from(v.clone()).into_pyobject(vm), vm)
+ .map_err(|e| format_py_error(e, vm))?;
+ }
+ scope
+ .locals
+ .as_object()
+ .set_item(kwarg, vm.new_pyobj(dict), vm)
+ .map_err(|e| format_py_error(e, vm))?;
+ }
+
// It's safe to unwrap code_object, it's already compiled before.
let code_obj = vm.ctx.new_code(copr.code_obj.clone().unwrap());
let ret = vm
@@ -470,7 +495,7 @@ pub(crate) fn exec_with_cached_vm(
.map_err(|e| format_py_error(e, vm))?;
// 5. get returns as either a PyVector or a PyTuple, and naming schema them according to `returns`
- let col_len = rb.num_rows();
+ let col_len = rb.as_ref().map(|rb| rb.num_rows()).unwrap_or(1);
let mut cols = try_into_columns(&ret, vm, col_len)?;
ensure!(
cols.len() == copr.deco_args.ret_names.len(),
@@ -485,6 +510,7 @@ pub(crate) fn exec_with_cached_vm(
// if cols and schema's data types is not match, try coerce it to given type(if annotated)(if error occur, return relevant error with question mark)
copr.check_and_cast_type(&mut cols)?;
+
// 6. return a assembled DfRecordBatch
let schema = copr.gen_schema(&cols)?;
RecordBatch::new(schema, cols).context(NewRecordBatchSnafu)
@@ -533,13 +559,23 @@ pub(crate) fn init_interpreter() -> Arc<Interpreter> {
}
/// using a parsed `Coprocessor` struct as input to execute python code
-pub(crate) fn exec_parsed(copr: &Coprocessor, rb: &RecordBatch) -> Result<RecordBatch> {
+pub(crate) fn exec_parsed(
+ copr: &Coprocessor,
+ rb: &Option<RecordBatch>,
+ params: &HashMap<String, String>,
+) -> Result<RecordBatch> {
// 3. get args from `rb`, and cast them into PyVector
- let args: Vec<PyVector> = select_from_rb(rb, &copr.deco_args.arg_names)?;
- check_args_anno_real_type(&args, copr, rb)?;
+ let args: Vec<PyVector> = if let Some(rb) = rb {
+ let args = select_from_rb(rb, copr.deco_args.arg_names.as_ref().unwrap_or(&vec![]))?;
+ check_args_anno_real_type(&args, copr, rb)?;
+ args
+ } else {
+ vec![]
+ };
+
let interpreter = init_interpreter();
// 4. then set args in scope and compile then run `CodeObject` which already append a new `Call` node
- exec_with_cached_vm(copr, rb, args, &interpreter)
+ exec_with_cached_vm(copr, rb, args, params, &interpreter)
}
/// execute script just like [`exec_coprocessor`] do,
@@ -551,7 +587,7 @@ pub(crate) fn exec_parsed(copr: &Coprocessor, rb: &RecordBatch) -> Result<Record
#[allow(dead_code)]
pub fn exec_copr_print(
script: &str,
- rb: &RecordBatch,
+ rb: &Option<RecordBatch>,
ln_offset: usize,
filename: &str,
) -> StdResult<RecordBatch, String> {
@@ -572,7 +608,7 @@ def add(a, b):
return a + b
@copr(args=["a", "b", "c"], returns = ["r"], sql="select number as a,number as b,number as c from numbers limit 100")
-def test(a, b, c):
+def test(a, b, c, **params):
import greptime as g
return add(a, b) / g.sqrt(c)
"#;
@@ -585,9 +621,10 @@ def test(a, b, c):
"select number as a,number as b,number as c from numbers limit 100"
);
assert_eq!(deco_args.ret_names, vec!["r"]);
- assert_eq!(deco_args.arg_names, vec!["a", "b", "c"]);
+ assert_eq!(deco_args.arg_names.unwrap(), vec!["a", "b", "c"]);
assert_eq!(copr.arg_types, vec![None, None, None]);
assert_eq!(copr.return_types, vec![None]);
+ assert_eq!(copr.kwarg, Some("params".to_string()));
assert_eq!(copr.script, script);
assert!(copr.code_obj.is_some());
}
diff --git a/src/script/src/python/coprocessor/compile.rs b/src/script/src/python/coprocessor/compile.rs
index 9f5f0cd82ec9..2f73f8f6c7d9 100644
--- a/src/script/src/python/coprocessor/compile.rs
+++ b/src/script/src/python/coprocessor/compile.rs
@@ -16,7 +16,7 @@
use rustpython_codegen::compile::compile_top;
use rustpython_compiler::{CompileOpts, Mode};
use rustpython_compiler_core::CodeObject;
-use rustpython_parser::ast::{Located, Location};
+use rustpython_parser::ast::{ArgData, Located, Location};
use rustpython_parser::{ast, parser};
use snafu::ResultExt;
@@ -31,23 +31,40 @@ fn create_located<T>(node: T, loc: Location) -> Located<T> {
/// generate a call to the coprocessor function
/// with arguments given in decorator's `args` list
/// also set in location in source code to `loc`
-fn gen_call(name: &str, deco_args: &DecoratorArgs, loc: &Location) -> ast::Stmt<()> {
- let mut loc = loc.to_owned();
+fn gen_call(
+ name: &str,
+ deco_args: &DecoratorArgs,
+ kwarg: &Option<String>,
+ loc: &Location,
+) -> ast::Stmt<()> {
+ let mut loc = *loc;
// adding a line to avoid confusing if any error occurs when calling the function
// then the pretty print will point to the last line in code
// instead of point to any of existing code written by user.
loc.newline();
- let args: Vec<Located<ast::ExprKind>> = deco_args
- .arg_names
- .iter()
- .map(|v| {
- let node = ast::ExprKind::Name {
- id: v.to_owned(),
- ctx: ast::ExprContext::Load,
- };
- create_located(node, loc)
- })
- .collect();
+ let mut args: Vec<Located<ast::ExprKind>> = if let Some(arg_names) = &deco_args.arg_names {
+ arg_names
+ .iter()
+ .map(|v| {
+ let node = ast::ExprKind::Name {
+ id: v.clone(),
+ ctx: ast::ExprContext::Load,
+ };
+ create_located(node, loc)
+ })
+ .collect()
+ } else {
+ vec![]
+ };
+
+ if let Some(kwarg) = kwarg {
+ let node = ast::ExprKind::Name {
+ id: kwarg.clone(),
+ ctx: ast::ExprContext::Load,
+ };
+ args.push(create_located(node, loc));
+ }
+
let func = ast::ExprKind::Call {
func: Box::new(create_located(
ast::ExprKind::Name {
@@ -71,7 +88,12 @@ fn gen_call(name: &str, deco_args: &DecoratorArgs, loc: &Location) -> ast::Stmt<
/// So we should avoid running too much Python Bytecode, hence in this function we delete `@` decorator(instead of actually write a decorator in python)
/// And add a function call in the end and also
/// strip type annotation
-pub fn compile_script(name: &str, deco_args: &DecoratorArgs, script: &str) -> Result<CodeObject> {
+pub fn compile_script(
+ name: &str,
+ deco_args: &DecoratorArgs,
+ kwarg: &Option<String>,
+ script: &str,
+) -> Result<CodeObject> {
// note that it's important to use `parser::Mode::Interactive` so the ast can be compile to return a result instead of return None in eval mode
let mut top =
parser::parse(script, parser::Mode::Interactive, "<embedded>").context(PyParseSnafu)?;
@@ -89,6 +111,20 @@ pub fn compile_script(name: &str, deco_args: &DecoratorArgs, script: &str) -> Re
type_comment: __main__,
} = &mut stmt.node
{
+ // Rewrite kwargs in coprocessor, make it as a positional argument
+ if !decorator_list.is_empty() {
+ if let Some(kwarg) = kwarg {
+ args.kwarg = None;
+ let node = ArgData {
+ arg: kwarg.clone(),
+ annotation: None,
+ type_comment: Some("kwargs".to_string()),
+ };
+ let kwarg = create_located(node, stmt.location);
+ args.args.push(kwarg);
+ }
+ }
+
*decorator_list = Vec::new();
// strip type annotation
// def a(b: int, c:int) -> int
@@ -115,14 +151,14 @@ pub fn compile_script(name: &str, deco_args: &DecoratorArgs, script: &str) -> Re
}
// Append statement which calling coprocessor function.
// It's safe to unwrap loc, it is always exists.
- stmts.push(gen_call(name, deco_args, &loc.unwrap()));
+ stmts.push(gen_call(name, deco_args, kwarg, &loc.unwrap()));
} else {
return fail_parse_error!(format!("Expect statement in script, found: {top:?}"), None);
}
// use `compile::Mode::BlockExpr` so it return the result of statement
compile_top(
&top,
- "<embedded>".to_owned(),
+ "<embedded>".to_string(),
Mode::BlockExpr,
CompileOpts { optimize: 0 },
)
diff --git a/src/script/src/python/coprocessor/parse.rs b/src/script/src/python/coprocessor/parse.rs
index 18bcdca8c8a2..a616680352ed 100644
--- a/src/script/src/python/coprocessor/parse.rs
+++ b/src/script/src/python/coprocessor/parse.rs
@@ -29,7 +29,7 @@ use crate::python::error::{ensure, CoprParseSnafu, PyParseSnafu, Result};
#[cfg_attr(test, derive(Deserialize))]
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub struct DecoratorArgs {
- pub arg_names: Vec<String>,
+ pub arg_names: Option<Vec<String>>,
pub ret_names: Vec<String>,
pub sql: Option<String>,
// maybe add a URL for connecting or what?
@@ -58,7 +58,7 @@ fn py_str_to_string(s: &ast::Expr<()>) -> Result<String> {
kind: _,
} = &s.node
{
- Ok(v.to_owned())
+ Ok(v.clone())
} else {
fail_parse_error!(
format!(
@@ -100,10 +100,7 @@ fn try_into_datatype(ty: &str, loc: &Location) -> Result<Option<ConcreteDataType
// for any datatype
"_" => Ok(None),
// note the different between "_" and _
- _ => fail_parse_error!(
- format!("Unknown datatype: {ty} at {loc:?}"),
- Some(loc.to_owned())
- ),
+ _ => fail_parse_error!(format!("Unknown datatype: {ty} at {loc:?}"), Some(*loc)),
}
}
@@ -263,7 +260,7 @@ fn parse_annotation(sub: &ast::Expr<()>) -> Result<AnnotationInfo> {
fn parse_keywords(keywords: &Vec<ast::Keyword<()>>) -> Result<DecoratorArgs> {
// more keys maybe add to this list of `avail_key`(like `sql` for querying and maybe config for connecting to database?), for better extension using a `HashSet` in here
let avail_key = HashSet::from(["args", "returns", "sql"]);
- let opt_keys = HashSet::from(["sql"]);
+ let opt_keys = HashSet::from(["sql", "args"]);
let mut visited_key = HashSet::new();
let len_min = avail_key.len() - opt_keys.len();
let len_max = avail_key.len();
@@ -298,7 +295,7 @@ fn parse_keywords(keywords: &Vec<ast::Keyword<()>>) -> Result<DecoratorArgs> {
visited_key.insert(s);
}
match s {
- "args" => ret_args.arg_names = pylist_to_vec(&kw.node.value)?,
+ "args" => ret_args.arg_names = Some(pylist_to_vec(&kw.node.value)?),
"returns" => ret_args.ret_names = pylist_to_vec(&kw.node.value)?,
"sql" => ret_args.sql = Some(py_str_to_string(&kw.node.value)?),
_ => unreachable!(),
@@ -476,17 +473,19 @@ pub fn parse_and_compile_copr(
// make sure both arguments&returns in function
// and in decorator have same length
- ensure!(
- deco_args.arg_names.len() == arg_types.len(),
- CoprParseSnafu {
- reason: format!(
- "args number in decorator({}) and function({}) doesn't match",
- deco_args.arg_names.len(),
- arg_types.len()
- ),
- loc: None
- }
- );
+ if let Some(arg_names) = &deco_args.arg_names {
+ ensure!(
+ arg_names.len() == arg_types.len(),
+ CoprParseSnafu {
+ reason: format!(
+ "args number in decorator({}) and function({}) doesn't match",
+ arg_names.len(),
+ arg_types.len()
+ ),
+ loc: None
+ }
+ );
+ }
ensure!(
deco_args.ret_names.len() == return_types.len(),
CoprParseSnafu {
@@ -498,13 +497,15 @@ pub fn parse_and_compile_copr(
loc: None
}
);
+ let kwarg = fn_args.kwarg.as_ref().map(|arg| arg.node.arg.clone());
coprocessor = Some(Coprocessor {
- code_obj: Some(compile::compile_script(name, &deco_args, script)?),
+ code_obj: Some(compile::compile_script(name, &deco_args, &kwarg, script)?),
name: name.to_string(),
deco_args,
arg_types,
return_types,
- script: script.to_owned(),
+ kwarg,
+ script: script.to_string(),
query_engine: query_engine.as_ref().map(|e| Arc::downgrade(e).into()),
});
}
diff --git a/src/script/src/python/engine.rs b/src/script/src/python/engine.rs
index fd89ee74be9f..25af2c14ba1e 100644
--- a/src/script/src/python/engine.rs
+++ b/src/script/src/python/engine.rs
@@ -14,6 +14,7 @@
//! Python script engine
use std::any::Any;
+use std::collections::HashMap;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
@@ -25,7 +26,9 @@ use common_query::error::{PyUdfSnafu, UdfTempRecordBatchSnafu};
use common_query::prelude::Signature;
use common_query::Output;
use common_recordbatch::error::{ExternalSnafu, Result as RecordBatchResult};
-use common_recordbatch::{RecordBatch, RecordBatchStream, SendableRecordBatchStream};
+use common_recordbatch::{
+ RecordBatch, RecordBatchStream, RecordBatches, SendableRecordBatchStream,
+};
use datafusion_expr::Volatility;
use datatypes::schema::{ColumnSchema, SchemaRef};
use datatypes::vectors::VectorRef;
@@ -53,7 +56,12 @@ impl std::fmt::Display for PyUDF {
f,
"{}({})->",
&self.copr.name,
- &self.copr.deco_args.arg_names.join(",")
+ self.copr
+ .deco_args
+ .arg_names
+ .as_ref()
+ .unwrap_or(&vec![])
+ .join(",")
)
}
}
@@ -73,11 +81,17 @@ impl PyUDF {
/// Fake a schema, should only be used with dynamically eval a Python Udf
fn fake_schema(&self, columns: &[VectorRef]) -> SchemaRef {
- let arg_names = &self.copr.deco_args.arg_names;
+ let empty_args = vec![];
+ let arg_names = self
+ .copr
+ .deco_args
+ .arg_names
+ .as_ref()
+ .unwrap_or(&empty_args);
let col_sch: Vec<_> = columns
.iter()
.enumerate()
- .map(|(i, col)| ColumnSchema::new(arg_names[i].to_owned(), col.data_type(), true))
+ .map(|(i, col)| ColumnSchema::new(arg_names[i].clone(), col.data_type(), true))
.collect();
let schema = datatypes::schema::Schema::new(col_sch);
Arc::new(schema)
@@ -97,7 +111,7 @@ impl Function for PyUDF {
match self.copr.return_types.get(0) {
Some(Some(AnnotationInfo {
datatype: Some(ty), ..
- })) => Ok(ty.to_owned()),
+ })) => Ok(ty.clone()),
_ => PyUdfSnafu {
msg: "Can't found return type for python UDF {self}",
}
@@ -113,7 +127,7 @@ impl Function for PyUDF {
match ty {
Some(AnnotationInfo {
datatype: Some(ty), ..
- }) => arg_types.push(ty.to_owned()),
+ }) => arg_types.push(ty.clone()),
_ => {
know_all_types = false;
break;
@@ -135,9 +149,8 @@ impl Function for PyUDF {
// FIXME(discord9): exec_parsed require a RecordBatch(basically a Vector+Schema), where schema can't pop out from nowhere, right?
let schema = self.fake_schema(columns);
let columns = columns.to_vec();
- // TODO(discord9): remove unwrap
- let rb = RecordBatch::new(schema, columns).context(UdfTempRecordBatchSnafu)?;
- let res = exec_parsed(&self.copr, &rb).map_err(|err| {
+ let rb = Some(RecordBatch::new(schema, columns).context(UdfTempRecordBatchSnafu)?);
+ let res = exec_parsed(&self.copr, &rb, &HashMap::new()).map_err(|err| {
PyUdfSnafu {
msg: format!("{err:#?}"),
}
@@ -153,7 +166,7 @@ impl Function for PyUDF {
// TODO(discord9): more error handling
let res0 = res.column(0);
- Ok(res0.to_owned())
+ Ok(res0.clone())
}
}
@@ -168,13 +181,14 @@ impl PyScript {
pub fn register_udf(&self) {
let udf = PyUDF::from_copr(self.copr.clone());
PyUDF::register_as_udf(udf.clone());
- PyUDF::register_to_query_engine(udf, self.query_engine.to_owned());
+ PyUDF::register_to_query_engine(udf, self.query_engine.clone());
}
}
pub struct CoprStream {
stream: SendableRecordBatchStream,
copr: CoprocessorRef,
+ params: HashMap<String, String>,
}
impl RecordBatchStream for CoprStream {
@@ -190,7 +204,7 @@ impl Stream for CoprStream {
match Pin::new(&mut self.stream).poll_next(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(Some(Ok(recordbatch))) => {
- let batch = exec_parsed(&self.copr, &recordbatch)
+ let batch = exec_parsed(&self.copr, &Some(recordbatch), &self.params)
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
@@ -218,7 +232,7 @@ impl Script for PyScript {
self
}
- async fn execute(&self, _ctx: EvalContext) -> Result<Output> {
+ async fn execute(&self, params: HashMap<String, String>, _ctx: EvalContext) -> Result<Output> {
if let Some(sql) = &self.copr.deco_args.sql {
let stmt = QueryLanguageParser::parse_sql(sql).unwrap();
ensure!(
@@ -231,12 +245,17 @@ impl Script for PyScript {
let res = self.query_engine.execute(&plan).await?;
let copr = self.copr.clone();
match res {
- Output::Stream(stream) => Ok(Output::Stream(Box::pin(CoprStream { copr, stream }))),
+ Output::Stream(stream) => Ok(Output::Stream(Box::pin(CoprStream {
+ params,
+ copr,
+ stream,
+ }))),
_ => unreachable!(),
}
} else {
- // TODO(boyan): try to retrieve sql from user request
- error::MissingSqlSnafu {}.fail()
+ let batch = exec_parsed(&self.copr, &None, ¶ms)?;
+ let batches = RecordBatches::try_new(batch.schema.clone(), vec![batch]).unwrap();
+ Ok(Output::RecordBatches(batches))
}
}
}
@@ -284,6 +303,7 @@ mod tests {
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME};
use common_recordbatch::util;
use datatypes::prelude::ScalarVector;
+ use datatypes::value::Value;
use datatypes::vectors::{Float64Vector, Int64Vector};
use query::QueryEngineFactory;
use table::table::numbers::NumbersTable;
@@ -326,7 +346,10 @@ def test(number)->vector[u32]:
.compile(script, CompileContext::default())
.await
.unwrap();
- let output = script.execute(EvalContext::default()).await.unwrap();
+ let output = script
+ .execute(HashMap::default(), EvalContext::default())
+ .await
+ .unwrap();
let res = common_recordbatch::util::collect_batches(match output {
Output::Stream(s) => s,
_ => unreachable!(),
@@ -337,6 +360,36 @@ def test(number)->vector[u32]:
assert_eq!(rb.column(0).len(), 100);
}
+ #[tokio::test]
+ async fn test_user_params_in_py() {
+ let script_engine = sample_script_engine();
+
+ let script = r#"
+@copr(returns = ["number"])
+def test(**params)->vector[i64]:
+ return int(params['a']) + int(params['b'])
+"#;
+ let script = script_engine
+ .compile(script, CompileContext::default())
+ .await
+ .unwrap();
+ let mut params = HashMap::new();
+ params.insert("a".to_string(), "30".to_string());
+ params.insert("b".to_string(), "12".to_string());
+ let _output = script
+ .execute(params, EvalContext::default())
+ .await
+ .unwrap();
+ let res = match _output {
+ Output::RecordBatches(s) => s,
+ _ => todo!(),
+ };
+ let rb = res.iter().next().expect("One and only one recordbatch");
+ assert_eq!(rb.column(0).len(), 1);
+ let result = rb.column(0).get(0);
+ assert!(matches!(result, Value::Int64(42)));
+ }
+
#[tokio::test]
async fn test_data_frame_in_py() {
let script_engine = sample_script_engine();
@@ -353,7 +406,10 @@ def test(number)->vector[u32]:
.compile(script, CompileContext::default())
.await
.unwrap();
- let _output = script.execute(EvalContext::default()).await.unwrap();
+ let _output = script
+ .execute(HashMap::new(), EvalContext::default())
+ .await
+ .unwrap();
let res = common_recordbatch::util::collect_batches(match _output {
Output::Stream(s) => s,
_ => todo!(),
@@ -382,7 +438,10 @@ def test(a, b, c):
.compile(script, CompileContext::default())
.await
.unwrap();
- let output = script.execute(EvalContext::default()).await.unwrap();
+ let output = script
+ .execute(HashMap::new(), EvalContext::default())
+ .await
+ .unwrap();
match output {
Output::Stream(stream) => {
let numbers = util::collect(stream).await.unwrap();
@@ -417,7 +476,10 @@ def test(a):
.compile(script, CompileContext::default())
.await
.unwrap();
- let output = script.execute(EvalContext::default()).await.unwrap();
+ let output = script
+ .execute(HashMap::new(), EvalContext::default())
+ .await
+ .unwrap();
match output {
Output::Stream(stream) => {
let numbers = util::collect(stream).await.unwrap();
diff --git a/src/script/src/python/error.rs b/src/script/src/python/error.rs
index 70e3b89ab32b..5fa21a323095 100644
--- a/src/script/src/python/error.rs
+++ b/src/script/src/python/error.rs
@@ -216,7 +216,7 @@ pub fn visualize_loc(
/// extract a reason for [`Error`] in string format, also return a location if possible
pub fn get_error_reason_loc(err: &Error) -> (String, Option<Location>) {
match err {
- Error::CoprParse { reason, loc, .. } => (reason.clone(), loc.to_owned()),
+ Error::CoprParse { reason, loc, .. } => (reason.clone(), *loc),
Error::Other { reason, .. } => (reason.clone(), None),
Error::PyRuntime { msg, .. } => (msg.clone(), None),
Error::PyParse { source, .. } => (source.error.to_string(), Some(source.location)),
diff --git a/src/script/src/python/test.rs b/src/script/src/python/test.rs
index e0778c32adb2..a935f6ca418e 100644
--- a/src/script/src/python/test.rs
+++ b/src/script/src/python/test.rs
@@ -126,7 +126,7 @@ fn run_ron_testcases() {
}
Predicate::ExecIsOk { fields, columns } => {
let rb = create_sample_recordbatch();
- let res = coprocessor::exec_coprocessor(&testcase.code, &rb).unwrap();
+ let res = coprocessor::exec_coprocessor(&testcase.code, &Some(rb)).unwrap();
fields
.iter()
.zip(res.schema.column_schemas())
@@ -152,7 +152,7 @@ fn run_ron_testcases() {
reason: part_reason,
} => {
let rb = create_sample_recordbatch();
- let res = coprocessor::exec_coprocessor(&testcase.code, &rb);
+ let res = coprocessor::exec_coprocessor(&testcase.code, &Some(rb));
assert!(res.is_err(), "{res:#?}\nExpect Err(...), actual Ok(...)");
if let Err(res) = res {
error!(
@@ -254,7 +254,7 @@ def calc_rvs(open_time, close):
],
)
.unwrap();
- let ret = coprocessor::exec_coprocessor(python_source, &rb);
+ let ret = coprocessor::exec_coprocessor(python_source, &Some(rb));
if let Err(Error::PyParse {
backtrace: _,
source,
@@ -304,7 +304,7 @@ def a(cpu, mem):
],
)
.unwrap();
- let ret = coprocessor::exec_coprocessor(python_source, &rb);
+ let ret = coprocessor::exec_coprocessor(python_source, &Some(rb));
if let Err(Error::PyParse {
backtrace: _,
source,
diff --git a/src/script/src/python/testcases.ron b/src/script/src/python/testcases.ron
index 3ebd2d5e4ce5..23f819a241b8 100644
--- a/src/script/src/python/testcases.ron
+++ b/src/script/src/python/testcases.ron
@@ -19,7 +19,7 @@ def a(cpu: vector[f32], mem: vector[f64])->(vector[f64], vector[f64|None], vecto
result: (
name: "a",
deco_args: (
- arg_names: ["cpu", "mem"],
+ arg_names: Some(["cpu", "mem"]),
ret_names: ["perf", "what", "how", "why"],
),
arg_types: [
@@ -49,7 +49,62 @@ def a(cpu: vector[f32], mem: vector[f64])->(vector[f64], vector[f64|None], vecto
datatype: None,
is_nullable: true
)),
- ]
+ ],
+ kwarg: None,
+ )
+ )
+ ),
+ (
+ name: "correct_parse_params",
+ code: r#"
+import greptime as gt
+from greptime import pow
+def add(a, b):
+ return a + b
+def sub(a, b):
+ return a - b
+@copr(args=["cpu", "mem"], returns=["perf", "what", "how", "why"])
+def a(cpu: vector[f32], mem: vector[f64], **params) -> (vector[f64], vector[f64|None], vector[_], vector[_ | None]):
+ for key, value in params.items():
+ print("%s == %s" % (key, value))
+ return add(cpu, mem), sub(cpu, mem), cpu * mem, cpu / mem
+ "#,
+ predicate: ParseIsOk(
+ result: (
+ name: "a",
+ deco_args: (
+ arg_names: Some(["cpu", "mem"]),
+ ret_names: ["perf", "what", "how", "why"],
+ ),
+ arg_types: [
+ Some((
+ datatype: Some(Float32(())),
+ is_nullable: false
+ )),
+ Some((
+ datatype: Some(Float64(())),
+ is_nullable: false
+ )),
+ ],
+ return_types: [
+ Some((
+ datatype: Some(Float64(())),
+ is_nullable: false
+ )),
+ Some((
+ datatype: Some(Float64(())),
+ is_nullable: true
+ )),
+ Some((
+ datatype: None,
+ is_nullable: false
+ )),
+ Some((
+ datatype: None,
+ is_nullable: true
+ )),
+ ],
+ kwarg: Some("params"),
)
)
),
@@ -231,7 +286,7 @@ def a(cpu: vector[f64], mem: vector[f64])->(vector[f64|None], vector[into(f64)],
"#,
predicate: ParseIsErr(
reason:
- " keyword argument, found "
+ "Expect `returns` keyword"
)
),
(
diff --git a/src/script/src/python/vector.rs b/src/script/src/python/vector.rs
index c776f140b34f..24c7aa697670 100644
--- a/src/script/src/python/vector.rs
+++ b/src/script/src/python/vector.rs
@@ -490,6 +490,21 @@ impl PyVector {
self.as_vector_ref().len()
}
+ #[pymethod(name = "concat")]
+ fn concat(&self, other: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyVector> {
+ let left = self.to_arrow_array();
+ let right = other.to_arrow_array();
+
+ let res = compute::concat(&[left.as_ref(), right.as_ref()]);
+ let res = res.map_err(|err| vm.new_runtime_error(format!("Arrow Error: {err:#?}")))?;
+ let ret = Helper::try_into_vector(res.clone()).map_err(|e| {
+ vm.new_type_error(format!(
+ "Can't cast result into vector, result: {res:?}, err: {e:?}",
+ ))
+ })?;
+ Ok(ret.into())
+ }
+
/// take a boolean array and filters the Array, returning elements matching the filter (i.e. where the values are true).
#[pymethod(name = "filter")]
fn filter(&self, other: PyVectorRef, vm: &VirtualMachine) -> PyResult<PyVector> {
@@ -549,7 +564,7 @@ impl PyVector {
// in the newest version of rustpython_vm, wrapped_at for isize is replace by wrap_index(i, len)
let i = i
.wrapped_at(self.len())
- .ok_or_else(|| vm.new_index_error("PyVector index out of range".to_owned()))?;
+ .ok_or_else(|| vm.new_index_error("PyVector index out of range".to_string()))?;
Ok(val_to_pyobj(self.as_vector_ref().get(i), vm))
}
@@ -912,7 +927,7 @@ impl AsSequence for PyVector {
zelf.getitem_by_index(i, vm)
}),
ass_item: atomic_func!(|_seq, _i, _value, vm| {
- Err(vm.new_type_error("PyVector object doesn't support item assigns".to_owned()))
+ Err(vm.new_type_error("PyVector object doesn't support item assigns".to_string()))
}),
..PySequenceMethods::NOT_IMPLEMENTED
});
@@ -1080,7 +1095,7 @@ pub mod tests {
.compile(
script,
rustpython_compiler_core::Mode::BlockExpr,
- "<embedded>".to_owned(),
+ "<embedded>".to_string(),
)
.map_err(|err| vm.new_syntax_error(&err))?;
let ret = vm.run_code_obj(code_obj, scope);
diff --git a/src/servers/src/http/script.rs b/src/servers/src/http/script.rs
index 881a4d555165..7cf59862aeba 100644
--- a/src/servers/src/http/script.rs
+++ b/src/servers/src/http/script.rs
@@ -11,7 +11,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-
+use std::collections::HashMap;
use std::time::Instant;
use axum::extract::{Json, Query, RawBody, State};
@@ -81,10 +81,12 @@ pub async fn scripts(
}
}
-#[derive(Debug, Serialize, Deserialize, JsonSchema)]
+#[derive(Debug, Serialize, Deserialize, JsonSchema, Default)]
pub struct ScriptQuery {
pub db: Option<String>,
pub name: Option<String>,
+ #[serde(flatten)]
+ pub params: HashMap<String, String>,
}
/// Handler to execute script
@@ -110,7 +112,7 @@ pub async fn run_script(
// TODO(sunng87): query_context and db name resolution
let output = script_handler
- .execute_script(schema.unwrap(), name.unwrap())
+ .execute_script(schema.unwrap(), name.unwrap(), params.params)
.await;
let resp = JsonResponse::from_output(vec![output]).await;
diff --git a/src/servers/src/query_handler.rs b/src/servers/src/query_handler.rs
index edaad6f0ad6b..d94cdb1c0a20 100644
--- a/src/servers/src/query_handler.rs
+++ b/src/servers/src/query_handler.rs
@@ -25,6 +25,7 @@
pub mod grpc;
pub mod sql;
+use std::collections::HashMap;
use std::sync::Arc;
use api::prometheus::remote::{ReadRequest, WriteRequest};
@@ -45,7 +46,12 @@ pub type ScriptHandlerRef = Arc<dyn ScriptHandler + Send + Sync>;
#[async_trait]
pub trait ScriptHandler {
async fn insert_script(&self, schema: &str, name: &str, script: &str) -> Result<()>;
- async fn execute_script(&self, schema: &str, name: &str) -> Result<Output>;
+ async fn execute_script(
+ &self,
+ schema: &str,
+ name: &str,
+ params: HashMap<String, String>,
+ ) -> Result<Output>;
}
#[async_trait]
diff --git a/src/servers/tests/http/http_handler_test.rs b/src/servers/tests/http/http_handler_test.rs
index 3206c33abd09..6ad18a9ac899 100644
--- a/src/servers/tests/http/http_handler_test.rs
+++ b/src/servers/tests/http/http_handler_test.rs
@@ -23,7 +23,10 @@ use servers::http::{handler as http_handler, script as script_handler, ApiState,
use session::context::UserInfo;
use table::test_util::MemTable;
-use crate::{create_testing_script_handler, create_testing_sql_query_handler};
+use crate::{
+ create_testing_script_handler, create_testing_sql_query_handler, ScriptHandlerRef,
+ ServerSqlQueryHandlerRef,
+};
#[tokio::test]
async fn test_sql_not_provided() {
@@ -68,6 +71,25 @@ async fn test_sql_output_rows() {
match &json.output().expect("assertion failed")[0] {
JsonOutput::Records(records) => {
assert_eq!(1, records.num_rows());
+ let json = serde_json::to_string_pretty(&records).unwrap();
+ assert_eq!(
+ json,
+ r#"{
+ "schema": {
+ "column_schemas": [
+ {
+ "name": "SUM(numbers.uint32s)",
+ "data_type": "UInt64"
+ }
+ ]
+ },
+ "rows": [
+ [
+ 4950
+ ]
+ ]
+}"#
+ );
}
_ => unreachable!(),
}
@@ -95,6 +117,25 @@ async fn test_sql_form() {
match &json.output().expect("assertion failed")[0] {
JsonOutput::Records(records) => {
assert_eq!(1, records.num_rows());
+ let json = serde_json::to_string_pretty(&records).unwrap();
+ assert_eq!(
+ json,
+ r#"{
+ "schema": {
+ "column_schemas": [
+ {
+ "name": "SUM(numbers.uint32s)",
+ "data_type": "UInt64"
+ }
+ ]
+ },
+ "rows": [
+ [
+ 4950
+ ]
+ ]
+}"#
+ );
}
_ => unreachable!(),
}
@@ -110,18 +151,11 @@ async fn test_metrics() {
assert!(text.contains("test_metrics counter"));
}
-#[tokio::test]
-async fn test_scripts() {
- common_telemetry::init_default_ut_logging();
-
- let script = r#"
-@copr(sql='select uint32s as number from numbers', args=['number'], returns=['n'])
-def test(n):
- return n;
-"#
- .to_string();
- let sql_handler = create_testing_sql_query_handler(MemTable::default_numbers_table());
- let script_handler = create_testing_script_handler(MemTable::default_numbers_table());
+async fn insert_script(
+ script: String,
+ script_handler: ScriptHandlerRef,
+ sql_handler: ServerSqlQueryHandlerRef,
+) {
let body = RawBody(Body::from(script.clone()));
let invalid_query = create_invalid_script_query();
let Json(json) = script_handler::scripts(
@@ -136,12 +170,13 @@ def test(n):
assert!(!json.success(), "{json:?}");
assert_eq!(json.error().unwrap(), "Invalid argument: invalid schema");
- let body = RawBody(Body::from(script));
+ let body = RawBody(Body::from(script.clone()));
let exec = create_script_query();
+ // Insert the script
let Json(json) = script_handler::scripts(
State(ApiState {
- sql_handler,
- script_handler: Some(script_handler),
+ sql_handler: sql_handler.clone(),
+ script_handler: Some(script_handler.clone()),
}),
exec,
body,
@@ -152,10 +187,144 @@ def test(n):
assert!(json.output().is_none());
}
+#[tokio::test]
+async fn test_scripts() {
+ common_telemetry::init_default_ut_logging();
+
+ let script = r#"
+@copr(sql='select uint32s as number from numbers limit 5', args=['number'], returns=['n'])
+def test(n) -> vector[i64]:
+ return n;
+"#
+ .to_string();
+ let sql_handler = create_testing_sql_query_handler(MemTable::default_numbers_table());
+ let script_handler = create_testing_script_handler(MemTable::default_numbers_table());
+
+ insert_script(script.clone(), script_handler.clone(), sql_handler.clone()).await;
+ // Run the script
+ let exec = create_script_query();
+ let Json(json) = script_handler::run_script(
+ State(ApiState {
+ sql_handler,
+ script_handler: Some(script_handler),
+ }),
+ exec,
+ )
+ .await;
+ assert!(json.success(), "{json:?}");
+ assert!(json.error().is_none());
+
+ match &json.output().unwrap()[0] {
+ JsonOutput::Records(records) => {
+ let json = serde_json::to_string_pretty(&records).unwrap();
+ assert_eq!(5, records.num_rows());
+ assert_eq!(
+ json,
+ r#"{
+ "schema": {
+ "column_schemas": [
+ {
+ "name": "n",
+ "data_type": "Int64"
+ }
+ ]
+ },
+ "rows": [
+ [
+ 0
+ ],
+ [
+ 1
+ ],
+ [
+ 2
+ ],
+ [
+ 3
+ ],
+ [
+ 4
+ ]
+ ]
+}"#
+ );
+ }
+ _ => unreachable!(),
+ }
+}
+
+#[tokio::test]
+async fn test_scripts_with_params() {
+ common_telemetry::init_default_ut_logging();
+
+ let script = r#"
+@copr(sql='select uint32s as number from numbers limit 5', args=['number'], returns=['n'])
+def test(n, **params) -> vector[i64]:
+ return n + int(params['a'])
+"#
+ .to_string();
+ let sql_handler = create_testing_sql_query_handler(MemTable::default_numbers_table());
+ let script_handler = create_testing_script_handler(MemTable::default_numbers_table());
+
+ insert_script(script.clone(), script_handler.clone(), sql_handler.clone()).await;
+ // Run the script
+ let mut exec = create_script_query();
+ exec.0.params.insert("a".to_string(), "42".to_string());
+ let Json(json) = script_handler::run_script(
+ State(ApiState {
+ sql_handler,
+ script_handler: Some(script_handler),
+ }),
+ exec,
+ )
+ .await;
+ assert!(json.success(), "{json:?}");
+ assert!(json.error().is_none());
+
+ match &json.output().unwrap()[0] {
+ JsonOutput::Records(records) => {
+ let json = serde_json::to_string_pretty(&records).unwrap();
+ assert_eq!(5, records.num_rows());
+ assert_eq!(
+ json,
+ r#"{
+ "schema": {
+ "column_schemas": [
+ {
+ "name": "n",
+ "data_type": "Int64"
+ }
+ ]
+ },
+ "rows": [
+ [
+ 42
+ ],
+ [
+ 43
+ ],
+ [
+ 44
+ ],
+ [
+ 45
+ ],
+ [
+ 46
+ ]
+ ]
+}"#
+ );
+ }
+ _ => unreachable!(),
+ }
+}
+
fn create_script_query() -> Query<script_handler::ScriptQuery> {
Query(script_handler::ScriptQuery {
db: Some("test".to_string()),
name: Some("test".to_string()),
+ ..Default::default()
})
}
@@ -163,6 +332,7 @@ fn create_invalid_script_query() -> Query<script_handler::ScriptQuery> {
Query(script_handler::ScriptQuery {
db: None,
name: None,
+ ..Default::default()
})
}
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index fc0818902a35..055f95ff2fd8 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -120,12 +120,20 @@ impl ScriptHandler for DummyInstance {
Ok(())
}
- async fn execute_script(&self, schema: &str, name: &str) -> Result<Output> {
+ async fn execute_script(
+ &self,
+ schema: &str,
+ name: &str,
+ params: HashMap<String, String>,
+ ) -> Result<Output> {
let key = format!("{schema}_{name}");
let py_script = self.scripts.read().unwrap().get(&key).unwrap().clone();
- Ok(py_script.execute(EvalContext::default()).await.unwrap())
+ Ok(py_script
+ .execute(params, EvalContext::default())
+ .await
+ .unwrap())
}
}
|
feat
|
supports passing user params into coprocessor (#962)
|
1703e93e151559591882e1292dfc3ce30bdd6db8
|
2023-06-20 08:55:13
|
JeremyHi
|
feat: add handler execution timer (#1791)
| false
|
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index 84acb376c463..b2727a8ef226 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -25,12 +25,12 @@ use api::v1::meta::{
pub use check_leader_handler::CheckLeaderHandler;
pub use collect_stats_handler::CollectStatsHandler;
use common_meta::instruction::{Instruction, InstructionReply};
-use common_telemetry::{debug, info, warn};
+use common_telemetry::{debug, info, timer, warn};
use dashmap::DashMap;
pub use failure_handler::RegionFailureHandler;
pub use keep_lease_handler::KeepLeaseHandler;
use metrics::{decrement_gauge, increment_gauge};
-pub use on_leader_start::OnLeaderStartHandler;
+pub use on_leader_start_handler::OnLeaderStartHandler;
pub use persist_stats_handler::PersistStatsHandler;
pub use response_header_handler::ResponseHeaderHandler;
use snafu::{OptionExt, ResultExt};
@@ -40,7 +40,7 @@ use tokio::sync::{oneshot, Notify, RwLock};
use self::node_stat::Stat;
use crate::error::{self, DeserializeFromJsonSnafu, Result, UnexpectedInstructionReplySnafu};
use crate::metasrv::Context;
-use crate::metrics::METRIC_META_HEARTBEAT_CONNECTION_NUM;
+use crate::metrics::{METRIC_META_HANDLER_EXECUTE, METRIC_META_HEARTBEAT_CONNECTION_NUM};
use crate::sequence::Sequence;
use crate::service::mailbox::{
BroadcastChannel, Channel, Mailbox, MailboxReceiver, MailboxRef, MessageId,
@@ -52,7 +52,7 @@ pub(crate) mod failure_handler;
mod keep_lease_handler;
pub mod mailbox_handler;
pub mod node_stat;
-mod on_leader_start;
+mod on_leader_start_handler;
mod persist_stats_handler;
pub(crate) mod region_lease_handler;
mod response_header_handler;
@@ -61,6 +61,12 @@ mod response_header_handler;
pub trait HeartbeatHandler: Send + Sync {
fn is_acceptable(&self, role: Role) -> bool;
+ fn name(&self) -> &'static str {
+ let type_name = std::any::type_name::<Self>();
+ // short name
+ type_name.split("::").last().unwrap_or(type_name)
+ }
+
async fn handle(
&self,
req: &HeartbeatRequest,
@@ -171,9 +177,22 @@ impl Pushers {
}
}
+struct NameCachedHandler {
+ name: &'static str,
+ handler: Box<dyn HeartbeatHandler>,
+}
+
+impl NameCachedHandler {
+ fn new(handler: impl HeartbeatHandler + 'static) -> Self {
+ let name = handler.name();
+ let handler = Box::new(handler);
+ Self { name, handler }
+ }
+}
+
#[derive(Clone, Default)]
pub struct HeartbeatHandlerGroup {
- handlers: Arc<RwLock<Vec<Box<dyn HeartbeatHandler>>>>,
+ handlers: Arc<RwLock<Vec<NameCachedHandler>>>,
pushers: Pushers,
}
@@ -187,7 +206,7 @@ impl HeartbeatHandlerGroup {
pub async fn add_handler(&self, handler: impl HeartbeatHandler + 'static) {
let mut handlers = self.handlers.write().await;
- handlers.push(Box::new(handler));
+ handlers.push(NameCachedHandler::new(handler));
}
pub async fn register(&self, key: impl AsRef<str>, pusher: Pusher) {
@@ -223,13 +242,14 @@ impl HeartbeatHandlerGroup {
err_msg: format!("invalid role: {:?}", req.header),
})?;
- for h in handlers.iter() {
+ for NameCachedHandler { name, handler } in handlers.iter() {
if ctx.is_skip_all() {
break;
}
- if h.is_acceptable(role) {
- h.handle(&req, &mut ctx, &mut acc).await?;
+ if handler.is_acceptable(role) {
+ let _timer = timer!(METRIC_META_HANDLER_EXECUTE, &[("name", *name)]);
+ handler.handle(&req, &mut ctx, &mut acc).await?;
}
}
let header = std::mem::take(&mut acc.header);
@@ -383,7 +403,11 @@ mod tests {
use api::v1::meta::{MailboxMessage, RequestHeader, Role, PROTOCOL_VERSION};
use tokio::sync::mpsc;
- use crate::handler::{HeartbeatHandlerGroup, HeartbeatMailbox, Pusher};
+ use crate::handler::mailbox_handler::MailboxHandler;
+ use crate::handler::{
+ CheckLeaderHandler, CollectStatsHandler, HeartbeatHandlerGroup, HeartbeatMailbox,
+ OnLeaderStartHandler, PersistStatsHandler, Pusher, ResponseHeaderHandler,
+ };
use crate::sequence::Sequence;
use crate::service::mailbox::{Channel, MailboxReceiver, MailboxRef};
use crate::service::store::memory::MemStore;
@@ -452,4 +476,25 @@ mod tests {
(mailbox, receiver)
}
+
+ #[tokio::test]
+ async fn test_handler_name() {
+ let group = HeartbeatHandlerGroup::default();
+ group.add_handler(ResponseHeaderHandler::default()).await;
+ group.add_handler(CheckLeaderHandler::default()).await;
+ group.add_handler(OnLeaderStartHandler::default()).await;
+ group.add_handler(CollectStatsHandler::default()).await;
+ group.add_handler(MailboxHandler::default()).await;
+ group.add_handler(PersistStatsHandler::default()).await;
+
+ let handlers = group.handlers.read().await;
+
+ assert_eq!(6, handlers.len());
+ assert_eq!("ResponseHeaderHandler", handlers[0].handler.name());
+ assert_eq!("CheckLeaderHandler", handlers[1].handler.name());
+ assert_eq!("OnLeaderStartHandler", handlers[2].handler.name());
+ assert_eq!("CollectStatsHandler", handlers[3].handler.name());
+ assert_eq!("MailboxHandler", handlers[4].handler.name());
+ assert_eq!("PersistStatsHandler", handlers[5].handler.name());
+ }
}
diff --git a/src/meta-srv/src/handler/on_leader_start.rs b/src/meta-srv/src/handler/on_leader_start_handler.rs
similarity index 100%
rename from src/meta-srv/src/handler/on_leader_start.rs
rename to src/meta-srv/src/handler/on_leader_start_handler.rs
diff --git a/src/meta-srv/src/metrics.rs b/src/meta-srv/src/metrics.rs
index f468c4fef55a..cac65989917a 100644
--- a/src/meta-srv/src/metrics.rs
+++ b/src/meta-srv/src/metrics.rs
@@ -17,3 +17,4 @@ pub(crate) const METRIC_META_CREATE_SCHEMA: &str = "meta.create_schema";
pub(crate) const METRIC_META_KV_REQUEST: &str = "meta.kv_request";
pub(crate) const METRIC_META_ROUTE_REQUEST: &str = "meta.route_request";
pub(crate) const METRIC_META_HEARTBEAT_CONNECTION_NUM: &str = "meta.heartbeat_connection_num";
+pub(crate) const METRIC_META_HANDLER_EXECUTE: &str = "meta.handler_execute";
|
feat
|
add handler execution timer (#1791)
|
b62e643e929b53796323ea63d304487e7bfad0a1
|
2023-08-12 09:01:51
|
谢政
|
build: update protobuf-build to support apple silicon (#2143)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 003457d582aa..9e2374df8e25 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -684,6 +684,15 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+[[package]]
+name = "autotools"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aef8da1805e028a172334c3b680f93e71126f2327622faef2ec3d893c0a4ad77"
+dependencies = [
+ "cc",
+]
+
[[package]]
name = "axum"
version = "0.6.19"
@@ -7074,13 +7083,14 @@ dependencies = [
[[package]]
name = "protobuf-build"
-version = "0.14.1"
+version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2df9942df2981178a930a72d442de47e2f0df18ad68e50a30f816f1848215ad0"
+checksum = "c852d9625b912c3e50480cdc701f60f49890b5d7ad46198dd583600f15e7c6ec"
dependencies = [
"bitflags 1.3.2",
"protobuf",
"protobuf-codegen",
+ "protobuf-src",
"regex",
]
@@ -7093,6 +7103,15 @@ dependencies = [
"protobuf",
]
+[[package]]
+name = "protobuf-src"
+version = "1.1.0+21.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7ac8852baeb3cc6fb83b93646fb93c0ffe5d14bf138c945ceb4b9948ee0e3c1"
+dependencies = [
+ "autotools",
+]
+
[[package]]
name = "ptr_meta"
version = "0.1.4"
diff --git a/src/log-store/Cargo.toml b/src/log-store/Cargo.toml
index 340d201eab17..f4825db2fde8 100644
--- a/src/log-store/Cargo.toml
+++ b/src/log-store/Cargo.toml
@@ -5,7 +5,7 @@ edition.workspace = true
license.workspace = true
[build-dependencies]
-protobuf-build = { version = "0.14", default-features = false, features = [
+protobuf-build = { version = "0.15", default-features = false, features = [
"protobuf-codec",
] }
|
build
|
update protobuf-build to support apple silicon (#2143)
|
a0d15b489a781e526e4f9087d6695c3b97f258ef
|
2023-08-31 19:35:00
|
ZonaHe
|
feat: update dashboard to v0.3.2 (#2295)
| false
|
diff --git a/src/servers/dashboard/VERSION b/src/servers/dashboard/VERSION
index 937cd784624a..7becae11d174 100644
--- a/src/servers/dashboard/VERSION
+++ b/src/servers/dashboard/VERSION
@@ -1 +1 @@
-v0.3.1
+v0.3.2
|
feat
|
update dashboard to v0.3.2 (#2295)
|
40e7b58c803ba58a7c677633aae3285b5a37965b
|
2024-08-23 08:52:00
|
ozewr
|
feat: refactoring LruCacheLayer with list_with_metakey and concurrent_stat_in_list (#4596)
| false
|
diff --git a/src/object-store/src/layers/lru_cache/read_cache.rs b/src/object-store/src/layers/lru_cache/read_cache.rs
index 6519adf766f9..f88b36784d15 100644
--- a/src/object-store/src/layers/lru_cache/read_cache.rs
+++ b/src/object-store/src/layers/lru_cache/read_cache.rs
@@ -18,15 +18,17 @@ use common_telemetry::debug;
use futures::FutureExt;
use moka::future::Cache;
use moka::notification::ListenerFuture;
-use opendal::raw::oio::{List, Read, Reader, Write};
-use opendal::raw::{Access, OpDelete, OpList, OpRead, OpStat, OpWrite, RpRead};
-use opendal::{Error as OpendalError, ErrorKind, Result};
+use opendal::raw::oio::{Read, Reader, Write};
+use opendal::raw::{Access, OpDelete, OpRead, OpStat, OpWrite, RpRead};
+use opendal::{Error as OpendalError, ErrorKind, Metakey, OperatorBuilder, Result};
use crate::metrics::{
OBJECT_STORE_LRU_CACHE_BYTES, OBJECT_STORE_LRU_CACHE_ENTRIES, OBJECT_STORE_LRU_CACHE_HIT,
OBJECT_STORE_LRU_CACHE_MISS, OBJECT_STORE_READ_ERROR,
};
+const RECOVER_CACHE_LIST_CONCURRENT: usize = 8;
+
/// Cache value for read file
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
enum ReadResult {
@@ -142,19 +144,16 @@ impl<C: Access> ReadCache<C> {
/// Recover existing cache items from `file_cache` to `mem_cache`.
/// Return entry count and total approximate entry size in bytes.
pub(crate) async fn recover_cache(&self) -> Result<(u64, u64)> {
- let (_, mut pager) = self.file_cache.list("/", OpList::default()).await?;
-
- while let Some(entry) = pager.next().await? {
+ let op = OperatorBuilder::new(self.file_cache.clone()).finish();
+ let mut entries = op
+ .list_with("/")
+ .metakey(Metakey::ContentLength | Metakey::ContentType)
+ .concurrent(RECOVER_CACHE_LIST_CONCURRENT)
+ .await?;
+
+ while let Some(entry) = entries.pop() {
let read_key = entry.path();
-
- // We can't retrieve the metadata from `[opendal::raw::oio::Entry]` directly,
- // because it's private field.
- let size = {
- let stat = self.file_cache.stat(read_key, OpStat::default()).await?;
-
- stat.into_metadata().content_length()
- };
-
+ let size = entry.metadata().content_length();
OBJECT_STORE_LRU_CACHE_ENTRIES.inc();
OBJECT_STORE_LRU_CACHE_BYTES.add(size as i64);
self.mem_cache
|
feat
|
refactoring LruCacheLayer with list_with_metakey and concurrent_stat_in_list (#4596)
|
15d7b9755e9b0912dd33d5e16d37300812287cb9
|
2024-05-14 21:39:25
|
discord9
|
feat(flow): flow worker (#3934)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index fc7c994908f4..ec27a07d0510 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3528,6 +3528,18 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d"
+[[package]]
+name = "enum-as-inner"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a"
+dependencies = [
+ "heck 0.4.1",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.61",
+]
+
[[package]]
name = "enum-iterator"
version = "1.5.0"
@@ -3816,6 +3828,7 @@ dependencies = [
"datafusion-common 37.0.0",
"datafusion-expr 37.0.0",
"datatypes",
+ "enum-as-inner",
"enum_dispatch",
"futures",
"greptime-proto",
diff --git a/src/flow/Cargo.toml b/src/flow/Cargo.toml
index 270e2edbf80c..3d674bbb12b8 100644
--- a/src/flow/Cargo.toml
+++ b/src/flow/Cargo.toml
@@ -27,6 +27,7 @@ futures = "0.3"
# it is the same with upstream repo
async-trait.workspace = true
common-meta.workspace = true
+enum-as-inner = "0.6.0"
greptime-proto.workspace = true
hydroflow = { git = "https://github.com/GreptimeTeam/hydroflow.git", branch = "main" }
itertools.workspace = true
diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs
index 9eb68a02c5f4..8179ca5807f9 100644
--- a/src/flow/src/adapter.rs
+++ b/src/flow/src/adapter.rs
@@ -18,4 +18,8 @@
pub(crate) mod error;
pub(crate) mod node_context;
-pub(crate) use node_context::FlownodeContext;
+pub(crate) use node_context::{FlowId, FlownodeContext, TableName};
+
+mod worker;
+
+pub const PER_REQ_MAX_ROW_CNT: usize = 8192;
diff --git a/src/flow/src/adapter/error.rs b/src/flow/src/adapter/error.rs
index 3cc74b900d41..2406dc5ea79d 100644
--- a/src/flow/src/adapter/error.rs
+++ b/src/flow/src/adapter/error.rs
@@ -24,6 +24,7 @@ use datatypes::value::Value;
use servers::define_into_tonic_status;
use snafu::{Location, Snafu};
+use crate::adapter::FlowId;
use crate::expr::EvalError;
/// This error is used to represent all possible errors that can occur in the flow module.
@@ -39,7 +40,11 @@ pub enum Error {
},
#[snafu(display("Internal error"))]
- Internal { location: Location, reason: String },
+ Internal {
+ reason: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
/// TODO(discord9): add detailed location of column
#[snafu(display("Failed to eval stream"))]
@@ -71,6 +76,20 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Flow not found, id={id}"))]
+ FlowNotFound {
+ id: FlowId,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Flow already exist, id={id}"))]
+ FlowAlreadyExist {
+ id: FlowId,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Failed to join task"))]
JoinTask {
#[snafu(source)]
@@ -168,10 +187,12 @@ impl ErrorExt for Error {
Self::Eval { .. } | &Self::JoinTask { .. } | &Self::Datafusion { .. } => {
StatusCode::Internal
}
- &Self::TableAlreadyExist { .. } => StatusCode::TableAlreadyExists,
- Self::TableNotFound { .. } | Self::TableNotFoundMeta { .. } => {
- StatusCode::TableNotFound
+ &Self::TableAlreadyExist { .. } | Self::FlowAlreadyExist { .. } => {
+ StatusCode::TableAlreadyExists
}
+ Self::TableNotFound { .. }
+ | Self::TableNotFoundMeta { .. }
+ | Self::FlowNotFound { .. } => StatusCode::TableNotFound,
Self::InvalidQueryPlan { .. }
| Self::InvalidQuerySubstrait { .. }
| Self::InvalidQueryProst { .. }
diff --git a/src/flow/src/adapter/worker.rs b/src/flow/src/adapter/worker.rs
new file mode 100644
index 000000000000..42da2e3d111d
--- /dev/null
+++ b/src/flow/src/adapter/worker.rs
@@ -0,0 +1,508 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! For single-thread flow worker
+
+use std::collections::{BTreeMap, VecDeque};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Arc;
+
+use enum_as_inner::EnumAsInner;
+use hydroflow::scheduled::graph::Hydroflow;
+use snafu::{ensure, OptionExt, ResultExt};
+use tokio::sync::{broadcast, mpsc, Mutex};
+
+use crate::adapter::error::{Error, EvalSnafu, FlowAlreadyExistSnafu, InternalSnafu};
+use crate::adapter::FlowId;
+use crate::compute::{Context, DataflowState, ErrCollector};
+use crate::expr::GlobalId;
+use crate::plan::TypedPlan;
+use crate::repr::{self, DiffRow};
+
+pub type SharedBuf = Arc<Mutex<VecDeque<DiffRow>>>;
+
+type ReqId = usize;
+
+/// Create both worker(`!Send`) and worker handle(`Send + Sync`)
+pub fn create_worker<'a>() -> (WorkerHandle, Worker<'a>) {
+ let (itc_client, itc_server) = create_inter_thread_call();
+ let worker_handle = WorkerHandle {
+ itc_client: Mutex::new(itc_client),
+ };
+ let worker = Worker {
+ task_states: BTreeMap::new(),
+ itc_server: Arc::new(Mutex::new(itc_server)),
+ };
+ (worker_handle, worker)
+}
+
+/// ActiveDataflowState is a wrapper around `Hydroflow` and `DataflowState`
+pub(crate) struct ActiveDataflowState<'subgraph> {
+ df: Hydroflow<'subgraph>,
+ state: DataflowState,
+ err_collector: ErrCollector,
+}
+
+impl std::fmt::Debug for ActiveDataflowState<'_> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("ActiveDataflowState")
+ .field("df", &"<Hydroflow>")
+ .field("state", &self.state)
+ .field("err_collector", &self.err_collector)
+ .finish()
+ }
+}
+
+impl Default for ActiveDataflowState<'_> {
+ fn default() -> Self {
+ ActiveDataflowState {
+ df: Hydroflow::new(),
+ state: DataflowState::default(),
+ err_collector: ErrCollector::default(),
+ }
+ }
+}
+
+impl<'subgraph> ActiveDataflowState<'subgraph> {
+ /// Create a new render context, assigned with given global id
+ pub fn new_ctx<'ctx>(&'ctx mut self, global_id: GlobalId) -> Context<'ctx, 'subgraph>
+ where
+ 'subgraph: 'ctx,
+ {
+ Context {
+ id: global_id,
+ df: &mut self.df,
+ compute_state: &mut self.state,
+ err_collector: self.err_collector.clone(),
+ input_collection: Default::default(),
+ local_scope: Default::default(),
+ }
+ }
+
+ pub fn set_current_ts(&mut self, ts: repr::Timestamp) {
+ self.state.set_current_ts(ts);
+ }
+
+ /// Run all available subgraph
+ ///
+ /// return true if any subgraph actually executed
+ pub fn run_available(&mut self) -> bool {
+ self.state.run_available_with_schedule(&mut self.df)
+ }
+}
+
+#[derive(Debug)]
+pub struct WorkerHandle {
+ itc_client: Mutex<InterThreadCallClient>,
+}
+
+impl WorkerHandle {
+ /// create task, return task id
+ pub async fn create_flow(&self, create_reqs: Request) -> Result<Option<FlowId>, Error> {
+ ensure!(
+ matches!(create_reqs, Request::Create { .. }),
+ InternalSnafu {
+ reason: format!(
+ "Flow Node/Worker itc failed, expect Request::Create, found {create_reqs:?}"
+ ),
+ }
+ );
+
+ let ret = self
+ .itc_client
+ .lock()
+ .await
+ .call_blocking(create_reqs)
+ .await?;
+ ret.into_create().map_err(|ret| {
+ InternalSnafu {
+ reason: format!(
+ "Flow Node/Worker itc failed, expect Response::Create, found {ret:?}"
+ ),
+ }
+ .build()
+ })?
+ }
+
+ /// remove task, return task id
+ pub async fn remove_flow(&self, flow_id: FlowId) -> Result<bool, Error> {
+ let req = Request::Remove { flow_id };
+ let ret = self.itc_client.lock().await.call_blocking(req).await?;
+
+ ret.into_remove().map_err(|ret| {
+ InternalSnafu {
+ reason: format!("Flow Node/Worker failed, expect Response::Remove, found {ret:?}"),
+ }
+ .build()
+ })
+ }
+
+ /// trigger running the worker, will not block, and will run the worker parallelly
+ ///
+ /// will set the current timestamp to `now` for all dataflows before running them
+ pub async fn run_available(&self, now: repr::Timestamp) -> Result<(), Error> {
+ self.itc_client
+ .lock()
+ .await
+ .call_non_blocking(Request::RunAvail { now })
+ .await
+ }
+
+ pub async fn contains_flow(&self, flow_id: FlowId) -> Result<bool, Error> {
+ let req = Request::ContainTask { flow_id };
+ let ret = self.itc_client.lock().await.call_blocking(req).await?;
+
+ ret.into_contain_task().map_err(|ret| {
+ InternalSnafu {
+ reason: format!(
+ "Flow Node/Worker itc failed, expect Response::ContainTask, found {ret:?}"
+ ),
+ }
+ .build()
+ })
+ }
+
+ /// shutdown the worker
+ pub async fn shutdown(&self) -> Result<(), Error> {
+ self.itc_client
+ .lock()
+ .await
+ .call_non_blocking(Request::Shutdown)
+ .await
+ }
+}
+
+/// The actual worker that does the work and contain active state
+#[derive(Debug)]
+pub struct Worker<'subgraph> {
+ /// Task states
+ pub(crate) task_states: BTreeMap<FlowId, ActiveDataflowState<'subgraph>>,
+ itc_server: Arc<Mutex<InterThreadCallServer>>,
+}
+
+impl<'s> Worker<'s> {
+ #[allow(clippy::too_many_arguments)]
+ pub fn create_flow(
+ &mut self,
+ flow_id: FlowId,
+ plan: TypedPlan,
+ sink_id: GlobalId,
+ sink_sender: mpsc::UnboundedSender<DiffRow>,
+ source_ids: &[GlobalId],
+ src_recvs: Vec<broadcast::Receiver<DiffRow>>,
+ // TODO(discord9): set expire duration for all arrangement and compare to sys timestamp instead
+ expire_when: Option<repr::Duration>,
+ create_if_not_exist: bool,
+ err_collector: ErrCollector,
+ ) -> Result<Option<FlowId>, Error> {
+ let _ = expire_when;
+ let already_exist = self.task_states.contains_key(&flow_id);
+ match (already_exist, create_if_not_exist) {
+ (true, true) => return Ok(None),
+ (true, false) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
+ (false, _) => (),
+ };
+
+ let mut cur_task_state = ActiveDataflowState::<'s> {
+ err_collector,
+ ..Default::default()
+ };
+
+ {
+ let mut ctx = cur_task_state.new_ctx(sink_id);
+ for (source_id, src_recv) in source_ids.iter().zip(src_recvs) {
+ let bundle = ctx.render_source(src_recv)?;
+ ctx.insert_global(*source_id, bundle);
+ }
+
+ let rendered = ctx.render_plan(plan)?;
+ ctx.render_unbounded_sink(rendered, sink_sender);
+ }
+ self.task_states.insert(flow_id, cur_task_state);
+ Ok(Some(flow_id))
+ }
+
+ /// remove task, return true if a task is removed
+ pub fn remove_flow(&mut self, flow_id: FlowId) -> bool {
+ self.task_states.remove(&flow_id).is_some()
+ }
+
+ /// Run the worker, blocking, until shutdown signal is received
+ pub fn run(&mut self) {
+ loop {
+ let (req_id, req) = if let Some(ret) = self.itc_server.blocking_lock().blocking_recv() {
+ ret
+ } else {
+ common_telemetry::error!(
+ "Worker's itc server has been closed unexpectedly, shutting down worker now."
+ );
+ break;
+ };
+
+ let ret = self.handle_req(req_id, req);
+ match ret {
+ Ok(Some((id, resp))) => {
+ if let Err(err) = self.itc_server.blocking_lock().resp(id, resp) {
+ common_telemetry::error!(
+ "Worker's itc server has been closed unexpectedly, shutting down worker: {}",
+ err
+ );
+ break;
+ };
+ }
+ Ok(None) => continue,
+ Err(()) => {
+ break;
+ }
+ }
+ }
+ }
+
+ /// run with tick acquired from tick manager(usually means system time)
+ /// TODO(discord9): better tick management
+ pub fn run_tick(&mut self, now: repr::Timestamp) {
+ for (_flow_id, task_state) in self.task_states.iter_mut() {
+ task_state.set_current_ts(now);
+ task_state.run_available();
+ }
+ }
+ /// handle request, return response if any, Err if receive shutdown signal
+ ///
+ /// return `Err(())` if receive shutdown request
+ fn handle_req(&mut self, req_id: ReqId, req: Request) -> Result<Option<(ReqId, Response)>, ()> {
+ let ret = match req {
+ Request::Create {
+ flow_id,
+ plan,
+ sink_id,
+ sink_sender,
+ source_ids,
+ src_recvs,
+ expire_when,
+ create_if_not_exist,
+ err_collector,
+ } => {
+ let task_create_result = self.create_flow(
+ flow_id,
+ plan,
+ sink_id,
+ sink_sender,
+ &source_ids,
+ src_recvs,
+ expire_when,
+ create_if_not_exist,
+ err_collector,
+ );
+ Some((
+ req_id,
+ Response::Create {
+ result: task_create_result,
+ },
+ ))
+ }
+ Request::Remove { flow_id } => {
+ let ret = self.remove_flow(flow_id);
+ Some((req_id, Response::Remove { result: ret }))
+ }
+ Request::RunAvail { now } => {
+ self.run_tick(now);
+ None
+ }
+ Request::ContainTask { flow_id } => {
+ let ret = self.task_states.contains_key(&flow_id);
+ Some((req_id, Response::ContainTask { result: ret }))
+ }
+ Request::Shutdown => return Err(()),
+ };
+ Ok(ret)
+ }
+}
+
+#[derive(Debug, EnumAsInner)]
+pub enum Request {
+ Create {
+ flow_id: FlowId,
+ plan: TypedPlan,
+ sink_id: GlobalId,
+ sink_sender: mpsc::UnboundedSender<DiffRow>,
+ source_ids: Vec<GlobalId>,
+ src_recvs: Vec<broadcast::Receiver<DiffRow>>,
+ expire_when: Option<repr::Duration>,
+ create_if_not_exist: bool,
+ err_collector: ErrCollector,
+ },
+ Remove {
+ flow_id: FlowId,
+ },
+ /// Trigger the worker to run, useful after input buffer is full
+ RunAvail {
+ now: repr::Timestamp,
+ },
+ ContainTask {
+ flow_id: FlowId,
+ },
+ Shutdown,
+}
+
+#[derive(Debug, EnumAsInner)]
+enum Response {
+ Create {
+ result: Result<Option<FlowId>, Error>,
+ // TODO(discord9): add flow err_collector
+ },
+ Remove {
+ result: bool,
+ },
+ ContainTask {
+ result: bool,
+ },
+}
+
+fn create_inter_thread_call() -> (InterThreadCallClient, InterThreadCallServer) {
+ let (arg_send, arg_recv) = mpsc::unbounded_channel();
+ let (ret_send, ret_recv) = mpsc::unbounded_channel();
+ let client = InterThreadCallClient {
+ call_id: AtomicUsize::new(0),
+ arg_sender: arg_send,
+ ret_recv,
+ };
+ let server = InterThreadCallServer {
+ arg_recv,
+ ret_sender: ret_send,
+ };
+ (client, server)
+}
+
+#[derive(Debug)]
+struct InterThreadCallClient {
+ call_id: AtomicUsize,
+ arg_sender: mpsc::UnboundedSender<(ReqId, Request)>,
+ ret_recv: mpsc::UnboundedReceiver<(ReqId, Response)>,
+}
+
+impl InterThreadCallClient {
+ /// call without expecting responses or blocking
+ async fn call_non_blocking(&self, req: Request) -> Result<(), Error> {
+ // TODO(discord9): relax memory order later
+ let call_id = self.call_id.fetch_add(1, Ordering::SeqCst);
+ self.arg_sender
+ .send((call_id, req))
+ .map_err(from_send_error)
+ }
+
+ /// call blocking, and return the result
+ async fn call_blocking(&mut self, req: Request) -> Result<Response, Error> {
+ // TODO(discord9): relax memory order later
+ let call_id = self.call_id.fetch_add(1, Ordering::SeqCst);
+ self.arg_sender
+ .send((call_id, req))
+ .map_err(from_send_error)?;
+
+ // TODO(discord9): better inter thread call impl, i.e. support multiple client(also consider if it's necessary)
+ // since one node manger might manage multiple worker, but one worker should only belong to one node manager
+ let (ret_call_id, ret) = self
+ .ret_recv
+ .recv()
+ .await
+ .context(InternalSnafu { reason: "InterThreadCallClient call_blocking failed, ret_recv has been closed and there are no remaining messages in the channel's buffer" })?;
+
+ ensure!(
+ ret_call_id == call_id,
+ InternalSnafu {
+ reason: "call id mismatch, worker/worker handler should be in sync",
+ }
+ );
+ Ok(ret)
+ }
+}
+
+#[derive(Debug)]
+struct InterThreadCallServer {
+ pub arg_recv: mpsc::UnboundedReceiver<(ReqId, Request)>,
+ pub ret_sender: mpsc::UnboundedSender<(ReqId, Response)>,
+}
+
+impl InterThreadCallServer {
+ pub async fn recv(&mut self) -> Option<(usize, Request)> {
+ self.arg_recv.recv().await
+ }
+
+ pub fn blocking_recv(&mut self) -> Option<(usize, Request)> {
+ self.arg_recv.blocking_recv()
+ }
+
+ /// Send response back to the client
+ pub fn resp(&self, call_id: ReqId, resp: Response) -> Result<(), Error> {
+ self.ret_sender
+ .send((call_id, resp))
+ .map_err(from_send_error)
+ }
+}
+
+fn from_send_error<T>(err: mpsc::error::SendError<T>) -> Error {
+ InternalSnafu {
+ reason: format!("InterThreadCallServer resp failed: {}", err),
+ }
+ .build()
+}
+
+#[cfg(test)]
+mod test {
+ use tokio::sync::oneshot;
+
+ use super::*;
+ use crate::expr::Id;
+ use crate::plan::Plan;
+ use crate::repr::{RelationType, Row};
+
+ #[tokio::test]
+ pub async fn test_simple_get_with_worker_and_handle() {
+ let (tx, rx) = oneshot::channel();
+ let worker_thread_handle = std::thread::spawn(move || {
+ let (handle, mut worker) = create_worker();
+ tx.send(handle).unwrap();
+ worker.run();
+ });
+ let handle = rx.await.unwrap();
+ let src_ids = vec![GlobalId::User(1)];
+ let (tx, rx) = broadcast::channel::<DiffRow>(1024);
+ let (sink_tx, mut sink_rx) = mpsc::unbounded_channel::<DiffRow>();
+ let (flow_id, plan) = (
+ 1,
+ TypedPlan {
+ plan: Plan::Get {
+ id: Id::Global(GlobalId::User(1)),
+ },
+ typ: RelationType::new(vec![]),
+ },
+ );
+ let create_reqs = Request::Create {
+ flow_id,
+ plan,
+ sink_id: GlobalId::User(1),
+ sink_sender: sink_tx,
+ source_ids: src_ids,
+ src_recvs: vec![rx],
+ expire_when: None,
+ create_if_not_exist: true,
+ err_collector: ErrCollector::default(),
+ };
+ handle.create_flow(create_reqs).await.unwrap();
+ tx.send((Row::empty(), 0, 0)).unwrap();
+ handle.run_available(0).await.unwrap();
+ assert_eq!(sink_rx.recv().await.unwrap().0, Row::empty());
+ handle.shutdown().await.unwrap();
+ worker_thread_handle.join().unwrap();
+ }
+}
diff --git a/src/flow/src/compute/render.rs b/src/flow/src/compute/render.rs
index c1c4b37cbb4e..0476c8a6e5ac 100644
--- a/src/flow/src/compute/render.rs
+++ b/src/flow/src/compute/render.rs
@@ -113,11 +113,11 @@ impl<'referred, 'df> Context<'referred, 'df> {
reduce_plan,
} => self.render_reduce(input, key_val_plan, reduce_plan),
Plan::Join { .. } => NotImplementedSnafu {
- reason: "Join is still WIP".to_string(),
+ reason: "Join is still WIP",
}
.fail(),
Plan::Union { .. } => NotImplementedSnafu {
- reason: "Union is still WIP".to_string(),
+ reason: "Union is still WIP",
}
.fail(),
}
|
feat
|
flow worker (#3934)
|
519cbc832ad2632c69124751eb4424bf23533d06
|
2022-05-18 12:19:36
|
Lei, Huang
|
feat: add StringVector datatype (#28)
| false
|
diff --git a/src/datatypes/src/lib.rs b/src/datatypes/src/lib.rs
index 10fb9ed36f30..f81267247b92 100644
--- a/src/datatypes/src/lib.rs
+++ b/src/datatypes/src/lib.rs
@@ -1,6 +1,7 @@
#![feature(generic_associated_types)]
-use arrow::array::{BinaryArray, MutableBinaryArray};
+use arrow::array;
+use arrow::array::{BinaryArray, MutableBinaryArray, Utf8Array};
mod data_type;
pub mod prelude;
@@ -12,6 +13,10 @@ pub mod vectors;
pub type LargeBinaryArray = BinaryArray<i64>;
pub type MutableLargeBinaryArray = MutableBinaryArray<i64>;
+
+pub type StringArray = Utf8Array<i32>;
+pub type MutableStringArray = array::MutableUtf8Array<i32>;
+
pub mod schema;
pub mod deserialize;
diff --git a/src/datatypes/src/types.rs b/src/datatypes/src/types.rs
index 61982e99f970..a8a97a3911c8 100644
--- a/src/datatypes/src/types.rs
+++ b/src/datatypes/src/types.rs
@@ -1,7 +1,9 @@
mod binary_type;
mod primitive_traits;
mod primitive_type;
+mod string_type;
pub use binary_type::BinaryType;
pub use primitive_traits::Primitive;
pub use primitive_type::{DataTypeBuilder, PrimitiveType};
+pub use string_type::StringType;
diff --git a/src/datatypes/src/types/string_type.rs b/src/datatypes/src/types/string_type.rs
new file mode 100644
index 000000000000..d5a7e5dae882
--- /dev/null
+++ b/src/datatypes/src/types/string_type.rs
@@ -0,0 +1,34 @@
+use std::sync::Arc;
+
+use arrow::datatypes::DataType as ArrowDataType;
+use common_base::bytes::StringBytes;
+
+use crate::data_type::DataType;
+use crate::prelude::{DataTypeRef, LogicalTypeId, Value};
+
+#[derive(Debug, Default)]
+pub struct StringType;
+
+impl StringType {
+ pub fn arc() -> DataTypeRef {
+ Arc::new(Self)
+ }
+}
+
+impl DataType for StringType {
+ fn name(&self) -> &str {
+ "String"
+ }
+
+ fn logical_type_id(&self) -> LogicalTypeId {
+ LogicalTypeId::String
+ }
+
+ fn default_value(&self) -> Value {
+ StringBytes::default().into()
+ }
+
+ fn as_arrow_type(&self) -> ArrowDataType {
+ ArrowDataType::Utf8
+ }
+}
diff --git a/src/datatypes/src/vectors.rs b/src/datatypes/src/vectors.rs
index ce1837e71a16..17dd2a901b4f 100644
--- a/src/datatypes/src/vectors.rs
+++ b/src/datatypes/src/vectors.rs
@@ -1,5 +1,6 @@
pub mod binary;
pub mod primitive;
+mod string;
use std::any::Any;
use std::sync::Arc;
diff --git a/src/datatypes/src/vectors/string.rs b/src/datatypes/src/vectors/string.rs
new file mode 100644
index 000000000000..7f00f8d7b5fb
--- /dev/null
+++ b/src/datatypes/src/vectors/string.rs
@@ -0,0 +1,119 @@
+use std::any::Any;
+use std::sync::Arc;
+
+use arrow::array::{ArrayRef, Utf8ValuesIter};
+use arrow::bitmap::utils::ZipValidity;
+use serde_json::Value;
+use snafu::ResultExt;
+
+use crate::data_type::DataTypeRef;
+use crate::error::SerializeSnafu;
+use crate::prelude::{ScalarVectorBuilder, Vector};
+use crate::scalars::ScalarVector;
+use crate::serialize::Serializable;
+use crate::types::StringType;
+use crate::{MutableStringArray, StringArray};
+
+/// String array wrapper
+#[derive(Clone)]
+pub struct StringVector {
+ array: StringArray,
+}
+
+impl Vector for StringVector {
+ fn data_type(&self) -> DataTypeRef {
+ StringType::arc()
+ }
+
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn len(&self) -> usize {
+ self.array.len()
+ }
+
+ fn to_arrow_array(&self) -> ArrayRef {
+ Arc::new(self.array.clone())
+ }
+}
+
+impl ScalarVector for StringVector {
+ type RefItem<'a> = &'a str;
+ type Iter<'a> = ZipValidity<'a, &'a str, Utf8ValuesIter<'a, i32>>;
+ type Builder = StringVectorBuilder;
+
+ fn get_data(&self, idx: usize) -> Option<Self::RefItem<'_>> {
+ match idx < self.array.len() {
+ true => Some(self.array.value(idx)),
+ false => None,
+ }
+ }
+
+ fn iter_data(&self) -> Self::Iter<'_> {
+ self.array.iter()
+ }
+}
+
+pub struct StringVectorBuilder {
+ buffer: MutableStringArray,
+}
+
+impl ScalarVectorBuilder for StringVectorBuilder {
+ type VectorType = StringVector;
+
+ fn with_capacity(capacity: usize) -> Self {
+ Self {
+ buffer: MutableStringArray::with_capacity(capacity),
+ }
+ }
+
+ fn push(&mut self, value: Option<<Self::VectorType as ScalarVector>::RefItem<'_>>) {
+ self.buffer.push(value)
+ }
+
+ fn finish(self) -> Self::VectorType {
+ Self::VectorType {
+ array: self.buffer.into(),
+ }
+ }
+}
+
+impl Serializable for StringVector {
+ fn serialize_to_json(&self) -> crate::error::Result<Vec<Value>> {
+ self.array
+ .iter()
+ .map(|v| match v {
+ None => Ok(serde_json::Value::Null),
+ Some(s) => serde_json::to_value(s),
+ })
+ .collect::<serde_json::Result<_>>()
+ .context(SerializeSnafu)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ pub fn test_serialize_string_vector() {
+ let mut builder = StringVectorBuilder::with_capacity(3);
+ builder.push(Some("hello"));
+ builder.push(None);
+ builder.push(Some("world"));
+ let string_vector = builder.finish();
+ let serialized = serialize_to_json_string(string_vector.serialize_to_json().unwrap());
+ assert_eq!(r#"["hello",null,"world"]"#, serialized);
+ }
+
+ pub fn serialize_to_json_string<T>(val: T) -> String
+ where
+ T: serde::Serialize,
+ {
+ let mut output = vec![];
+ let mut serializer = serde_json::Serializer::new(&mut output);
+ val.serialize(&mut serializer).unwrap();
+ String::from_utf8_lossy(&output).into()
+ }
+}
|
feat
|
add StringVector datatype (#28)
|
aad2afd3f29a55c89376545889720fca43f3ea23
|
2024-01-12 11:55:14
|
Weny Xu
|
chore: bump version to 0.6.0 (#3154)
| false
|
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index ead3521eb3c8..3d5fc07a98e5 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -91,7 +91,7 @@ env:
# The scheduled version is '${{ env.NEXT_RELEASE_VERSION }}-nightly-YYYYMMDD', like v0.2.0-nigthly-20230313;
NIGHTLY_RELEASE_PREFIX: nightly
# Note: The NEXT_RELEASE_VERSION should be modified manually by every formal release.
- NEXT_RELEASE_VERSION: v0.6.0
+ NEXT_RELEASE_VERSION: v0.7.0
jobs:
allocate-runners:
diff --git a/Cargo.lock b/Cargo.lock
index 986adf93749a..84e879bf8639 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -196,7 +196,7 @@ checksum = "8f1f8f5a6f3d50d89e3797d7593a50f96bb2aaa20ca0cc7be1fb673232c91d72"
[[package]]
name = "api"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"common-base",
"common-decimal",
@@ -674,7 +674,7 @@ dependencies = [
[[package]]
name = "auth"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"async-trait",
@@ -847,7 +847,7 @@ dependencies = [
[[package]]
name = "benchmarks"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"arrow",
"chrono",
@@ -1179,7 +1179,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"arc-swap",
@@ -1452,7 +1452,7 @@ checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1"
[[package]]
name = "client"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"arrow-flight",
@@ -1485,7 +1485,7 @@ dependencies = [
"session",
"snafu",
"substrait 0.17.1",
- "substrait 0.5.1",
+ "substrait 0.6.0",
"tokio",
"tokio-stream",
"tonic 0.10.2",
@@ -1515,7 +1515,7 @@ dependencies = [
[[package]]
name = "cmd"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"anymap",
"async-trait",
@@ -1566,7 +1566,7 @@ dependencies = [
"session",
"snafu",
"store-api",
- "substrait 0.5.1",
+ "substrait 0.6.0",
"table",
"temp-env",
"tikv-jemallocator",
@@ -1599,7 +1599,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"anymap",
"bitvec",
@@ -1614,7 +1614,7 @@ dependencies = [
[[package]]
name = "common-catalog"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"chrono",
"common-error",
@@ -1625,7 +1625,7 @@ dependencies = [
[[package]]
name = "common-config"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"common-base",
"humantime-serde",
@@ -1638,7 +1638,7 @@ dependencies = [
[[package]]
name = "common-datasource"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"arrow",
"arrow-schema",
@@ -1669,7 +1669,7 @@ dependencies = [
[[package]]
name = "common-decimal"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"arrow",
"bigdecimal",
@@ -1683,7 +1683,7 @@ dependencies = [
[[package]]
name = "common-error"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"snafu",
"strum 0.25.0",
@@ -1691,7 +1691,7 @@ dependencies = [
[[package]]
name = "common-function"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"arc-swap",
"build-data",
@@ -1715,7 +1715,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"async-trait",
"common-error",
@@ -1734,7 +1734,7 @@ dependencies = [
[[package]]
name = "common-grpc"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"arrow-flight",
@@ -1764,7 +1764,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"async-trait",
@@ -1783,7 +1783,7 @@ dependencies = [
[[package]]
name = "common-macro"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"arc-swap",
"common-query",
@@ -1798,7 +1798,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"common-error",
"common-macro",
@@ -1811,7 +1811,7 @@ dependencies = [
[[package]]
name = "common-meta"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"async-recursion",
@@ -1858,7 +1858,7 @@ dependencies = [
[[package]]
name = "common-procedure"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"async-stream",
"async-trait",
@@ -1882,7 +1882,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"async-trait",
"common-procedure",
@@ -1890,7 +1890,7 @@ dependencies = [
[[package]]
name = "common-query"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"async-trait",
@@ -1913,7 +1913,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"common-error",
"common-macro",
@@ -1930,7 +1930,7 @@ dependencies = [
[[package]]
name = "common-runtime"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"async-trait",
"common-error",
@@ -1950,7 +1950,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"backtrace",
"common-error",
@@ -1976,7 +1976,7 @@ dependencies = [
[[package]]
name = "common-test-util"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"client",
"common-query",
@@ -1988,7 +1988,7 @@ dependencies = [
[[package]]
name = "common-time"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"arrow",
"chrono",
@@ -2004,7 +2004,7 @@ dependencies = [
[[package]]
name = "common-version"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"build-data",
]
@@ -2634,7 +2634,7 @@ dependencies = [
[[package]]
name = "datanode"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"arrow-flight",
@@ -2694,7 +2694,7 @@ dependencies = [
"snafu",
"sql",
"store-api",
- "substrait 0.5.1",
+ "substrait 0.6.0",
"table",
"tokio",
"tokio-stream",
@@ -2708,7 +2708,7 @@ dependencies = [
[[package]]
name = "datatypes"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"arrow",
"arrow-array",
@@ -3169,7 +3169,7 @@ dependencies = [
[[package]]
name = "file-engine"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"async-trait",
@@ -3300,7 +3300,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "frontend"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"arc-swap",
@@ -3364,7 +3364,7 @@ dependencies = [
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
"store-api",
"strfmt",
- "substrait 0.5.1",
+ "substrait 0.6.0",
"table",
"tokio",
"toml 0.8.8",
@@ -4018,7 +4018,7 @@ dependencies = [
[[package]]
name = "index"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -4498,7 +4498,7 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "log-store"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"async-stream",
"async-trait",
@@ -4777,7 +4777,7 @@ dependencies = [
[[package]]
name = "meta-client"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"async-trait",
@@ -4807,7 +4807,7 @@ dependencies = [
[[package]]
name = "meta-srv"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"anymap",
"api",
@@ -4886,7 +4886,7 @@ dependencies = [
[[package]]
name = "metric-engine"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"aquamarine",
@@ -4957,7 +4957,7 @@ dependencies = [
[[package]]
name = "mito2"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"anymap",
"api",
@@ -5458,7 +5458,7 @@ dependencies = [
[[package]]
name = "object-store"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"anyhow",
"async-trait",
@@ -5703,7 +5703,7 @@ dependencies = [
[[package]]
name = "operator"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"async-trait",
@@ -5747,7 +5747,7 @@ dependencies = [
"sql",
"sqlparser 0.38.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=6a93567ae38d42be5c8d08b13c8ff4dde26502ef)",
"store-api",
- "substrait 0.5.1",
+ "substrait 0.6.0",
"table",
"tokio",
"tonic 0.10.2",
@@ -5978,7 +5978,7 @@ dependencies = [
[[package]]
name = "partition"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"async-trait",
@@ -6297,7 +6297,7 @@ dependencies = [
[[package]]
name = "plugins"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"auth",
"common-base",
@@ -6555,7 +6555,7 @@ dependencies = [
[[package]]
name = "promql"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"ahash 0.8.6",
"async-recursion",
@@ -6766,7 +6766,7 @@ dependencies = [
[[package]]
name = "puffin"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"async-trait",
"bitflags 2.4.1",
@@ -6877,7 +6877,7 @@ dependencies = [
[[package]]
name = "query"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"ahash 0.8.6",
"api",
@@ -6935,7 +6935,7 @@ dependencies = [
"stats-cli",
"store-api",
"streaming-stats",
- "substrait 0.5.1",
+ "substrait 0.6.0",
"table",
"tokio",
"tokio-stream",
@@ -8205,7 +8205,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "script"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"arc-swap",
@@ -8465,7 +8465,7 @@ dependencies = [
[[package]]
name = "servers"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"aide",
"api",
@@ -8561,7 +8561,7 @@ dependencies = [
[[package]]
name = "session"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"arc-swap",
@@ -8825,7 +8825,7 @@ dependencies = [
[[package]]
name = "sql"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"common-base",
@@ -8877,7 +8877,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"async-trait",
"clap 4.4.11",
@@ -9084,7 +9084,7 @@ dependencies = [
[[package]]
name = "store-api"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"aquamarine",
@@ -9224,7 +9224,7 @@ dependencies = [
[[package]]
name = "substrait"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"async-recursion",
"async-trait",
@@ -9372,7 +9372,7 @@ dependencies = [
[[package]]
name = "table"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"anymap",
"async-trait",
@@ -9484,7 +9484,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "tests-integration"
-version = "0.5.1"
+version = "0.6.0"
dependencies = [
"api",
"async-trait",
@@ -9540,7 +9540,7 @@ dependencies = [
"sql",
"sqlx",
"store-api",
- "substrait 0.5.1",
+ "substrait 0.6.0",
"table",
"tempfile",
"time",
diff --git a/Cargo.toml b/Cargo.toml
index 3d491f13c315..e8d08b7a3549 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -58,7 +58,7 @@ members = [
resolver = "2"
[workspace.package]
-version = "0.5.1"
+version = "0.6.0"
edition = "2021"
license = "Apache-2.0"
|
chore
|
bump version to 0.6.0 (#3154)
|
b062d8515d8c8745f337de90ea7fedf85003e414
|
2024-08-29 11:47:51
|
jeremyhi
|
feat: copy database ignores view and temporary tables (#4640)
| false
|
diff --git a/src/operator/src/statement/copy_database.rs b/src/operator/src/statement/copy_database.rs
index a4b1b9267b2c..134dd2355926 100644
--- a/src/operator/src/statement/copy_database.rs
+++ b/src/operator/src/statement/copy_database.rs
@@ -67,11 +67,6 @@ impl StatementExecutor {
let mut exported_rows = 0;
for table_name in table_names {
- // TODO(hl): remove this hardcode once we've removed numbers table.
- if table_name == "numbers" {
- continue;
- }
-
let table = self
.get_table(&TableReference {
catalog: &req.catalog_name,
@@ -79,6 +74,10 @@ impl StatementExecutor {
table: &table_name,
})
.await?;
+ // Only base tables, ignores views and temporary tables.
+ if table.table_type() != table::metadata::TableType::Base {
+ continue;
+ }
// Ignores physical tables of metric engine.
if table.table_info().meta.engine == METRIC_ENGINE_NAME
&& !table
|
feat
|
copy database ignores view and temporary tables (#4640)
|
0badb3715edb7cb81030e19da875feb43026d5a9
|
2023-11-27 12:16:46
|
WU Jingdi
|
feat: support sample ratio in trace (#2809)
| false
|
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 629c9bdb9a73..9055b7eca088 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -134,7 +134,7 @@ worker_request_batch_size = 64
# Number of meta action updated to trigger a new checkpoint for the manifest
manifest_checkpoint_distance = 10
# Manifest compression type
-manifest_compress_type = "Uncompressed"
+manifest_compress_type = "uncompressed"
# Max number of running background jobs
max_background_jobs = 4
# Interval to auto flush a region if it has not flushed yet.
@@ -162,3 +162,5 @@ sst_write_buffer_size = "8MB"
# enable_otlp_tracing = false
# tracing exporter endpoint with format `ip:port`, we use grpc oltp as exporter, default endpoint is `localhost:4317`
# otlp_endpoint = "localhost:4317"
+# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
+# tracing_sample_ratio = 1.0
diff --git a/src/common/telemetry/src/logging.rs b/src/common/telemetry/src/logging.rs
index 9ff9306973ad..f825d8f3835b 100644
--- a/src/common/telemetry/src/logging.rs
+++ b/src/common/telemetry/src/logging.rs
@@ -20,6 +20,7 @@ use once_cell::sync::Lazy;
use opentelemetry::{global, KeyValue};
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::propagation::TraceContextPropagator;
+use opentelemetry_sdk::trace::Sampler;
use opentelemetry_semantic_conventions::resource;
use serde::{Deserialize, Serialize};
use tracing_appender::non_blocking::WorkerGuard;
@@ -34,15 +35,28 @@ pub use crate::{debug, error, info, trace, warn};
const DEFAULT_OTLP_ENDPOINT: &str = "http://localhost:4317";
-#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
+#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct LoggingOptions {
pub dir: String,
pub level: Option<String>,
pub enable_otlp_tracing: bool,
pub otlp_endpoint: Option<String>,
+ pub tracing_sample_ratio: Option<f64>,
}
+impl PartialEq for LoggingOptions {
+ fn eq(&self, other: &Self) -> bool {
+ self.dir == other.dir
+ && self.level == other.level
+ && self.enable_otlp_tracing == other.enable_otlp_tracing
+ && self.otlp_endpoint == other.otlp_endpoint
+ && self.tracing_sample_ratio == other.tracing_sample_ratio
+ }
+}
+
+impl Eq for LoggingOptions {}
+
impl Default for LoggingOptions {
fn default() -> Self {
Self {
@@ -50,6 +64,7 @@ impl Default for LoggingOptions {
level: None,
enable_otlp_tracing: false,
otlp_endpoint: None,
+ tracing_sample_ratio: None,
}
}
}
@@ -145,7 +160,10 @@ pub fn init_global_logging(
let filter = targets_string
.parse::<filter::Targets>()
.expect("error parsing log level string");
-
+ let sampler = opts
+ .tracing_sample_ratio
+ .map(Sampler::TraceIdRatioBased)
+ .unwrap_or(Sampler::AlwaysOn);
// Must enable 'tokio_unstable' cfg to use this feature.
// For example: `RUSTFLAGS="--cfg tokio_unstable" cargo run -F common-telemetry/console -- standalone start`
#[cfg(feature = "tokio-console")]
@@ -200,17 +218,19 @@ pub fn init_global_logging(
.unwrap_or(DEFAULT_OTLP_ENDPOINT.to_string()),
),
)
- .with_trace_config(opentelemetry_sdk::trace::config().with_resource(
- opentelemetry_sdk::Resource::new(vec![
- KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
- KeyValue::new(
- resource::SERVICE_INSTANCE_ID,
- node_id.unwrap_or("none".to_string()),
- ),
- KeyValue::new(resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")),
- KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
- ]),
- ))
+ .with_trace_config(
+ opentelemetry_sdk::trace::config()
+ .with_sampler(sampler)
+ .with_resource(opentelemetry_sdk::Resource::new(vec![
+ KeyValue::new(resource::SERVICE_NAME, app_name.to_string()),
+ KeyValue::new(
+ resource::SERVICE_INSTANCE_ID,
+ node_id.unwrap_or("none".to_string()),
+ ),
+ KeyValue::new(resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")),
+ KeyValue::new(resource::PROCESS_PID, std::process::id().to_string()),
+ ])),
+ )
.install_batch(opentelemetry_sdk::runtime::Tokio)
.expect("otlp tracer install failed");
let tracing_layer = Some(tracing_opentelemetry::layer().with_tracer(tracer));
|
feat
|
support sample ratio in trace (#2809)
|
91026a68204b8da69e9fa390c8e97605640c59ab
|
2023-06-06 14:55:04
|
LFC
|
chore: clean up some of my todos (#1723)
| false
|
diff --git a/src/client/src/client_manager.rs b/src/client/src/client_manager.rs
index d8e85785ad9d..3acb8b3d8fed 100644
--- a/src/client/src/client_manager.rs
+++ b/src/client/src/client_manager.rs
@@ -31,7 +31,6 @@ pub struct DatanodeClients {
impl Default for DatanodeClients {
fn default() -> Self {
- // TODO(LFC): Make this channel config configurable.
let config = ChannelConfig::new().timeout(Duration::from_secs(8));
Self {
diff --git a/src/client/src/database.rs b/src/client/src/database.rs
index 5c28efaf869a..524c406abbf7 100644
--- a/src/client/src/database.rs
+++ b/src/client/src/database.rs
@@ -254,7 +254,6 @@ impl Database {
let mut client = self.client.make_flight_client()?;
- // TODO(LFC): Streaming get flight data.
let flight_data: Vec<FlightData> = client
.mut_inner()
.do_get(request)
diff --git a/src/common/query/src/logical_plan/accumulator.rs b/src/common/query/src/logical_plan/accumulator.rs
index 694d46074de9..f0c272ada1f5 100644
--- a/src/common/query/src/logical_plan/accumulator.rs
+++ b/src/common/query/src/logical_plan/accumulator.rs
@@ -172,7 +172,6 @@ impl DfAccumulator for DfAccumulatorAdaptor {
}
fn size(&self) -> usize {
- // TODO(LFC): Implement new "size" method for Accumulator.
0
}
}
diff --git a/src/common/query/src/physical_plan.rs b/src/common/query/src/physical_plan.rs
index 808bf2723621..144e1bcd0b6c 100644
--- a/src/common/query/src/physical_plan.rs
+++ b/src/common/query/src/physical_plan.rs
@@ -194,7 +194,6 @@ impl DfPhysicalPlan for DfPhysicalPlanAdapter {
}
fn statistics(&self) -> Statistics {
- // TODO(LFC): impl statistics
Statistics::default()
}
}
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index bea4737fe7bc..f76a44c00a87 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -285,9 +285,6 @@ impl Instance {
requests: InsertRequests,
ctx: QueryContextRef,
) -> Result<Output> {
- // TODO(LFC): Optimize concurrent table creation and table alteration.
- // Currently table creation is guarded by a distributed lock in Metasrv. However, table
- // alteration is not. We should all switch to procedures in Metasrv.
let _ = future::join_all(
requests
.inserts
diff --git a/src/frontend/src/instance/distributed.rs b/src/frontend/src/instance/distributed.rs
index ce357d5b6d00..f20d0596b01b 100644
--- a/src/frontend/src/instance/distributed.rs
+++ b/src/frontend/src/instance/distributed.rs
@@ -598,7 +598,6 @@ impl DistInstance {
Ok(Output::AffectedRows(affected_rows as usize))
}
- // TODO(LFC): Like insertions above, refactor GRPC deletion impl here.
async fn handle_dist_delete(
&self,
request: DeleteRequest,
@@ -662,8 +661,6 @@ impl GrpcQueryHandler for DistInstance {
match expr {
DdlExpr::CreateDatabase(expr) => self.handle_create_database(expr, ctx).await,
DdlExpr::CreateTable(mut expr) => {
- // TODO(LFC): Support creating distributed table through GRPC interface.
- // Currently only SQL supports it; how to design the fields in CreateTableExpr?
let _ = self.create_table(&mut expr, None).await;
Ok(Output::AffectedRows(0))
}
diff --git a/src/meta-srv/src/handler/failure_handler.rs b/src/meta-srv/src/handler/failure_handler.rs
index cf848c1d2096..68f772959007 100644
--- a/src/meta-srv/src/handler/failure_handler.rs
+++ b/src/meta-srv/src/handler/failure_handler.rs
@@ -74,12 +74,6 @@ impl HeartbeatHandler for RegionFailureHandler {
let Some(stat) = acc.stat.as_ref() else { return Ok(()) };
- // TODO(LFC): Filter out the stalled heartbeats:
- // After the region failover is done, the distribution of region is changed.
- // We can compare the heartbeat info here with the global region placement metadata,
- // and remove the incorrect region ident keys in failure detect runner
- // (by sending a control message).
-
let heartbeat = DatanodeHeartbeat {
region_idents: stat
.region_stats
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 8d664253ab22..6cb07f36bf45 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -146,25 +146,21 @@ impl MetaSrv {
common_runtime::spawn_bg(async move {
loop {
match rx.recv().await {
- Ok(msg) => {
- match msg {
- LeaderChangeMessage::Elected(_) => {
- if let Err(e) = procedure_manager.recover().await {
- error!("Failed to recover procedures, error: {e}");
- }
- }
- LeaderChangeMessage::StepDown(leader) => {
- // TODO(LFC): TBC
- error!("Leader :{:?} step down", leader);
+ Ok(msg) => match msg {
+ LeaderChangeMessage::Elected(_) => {
+ if let Err(e) = procedure_manager.recover().await {
+ error!("Failed to recover procedures, error: {e}");
}
}
- }
+ LeaderChangeMessage::StepDown(leader) => {
+ error!("Leader :{:?} step down", leader);
+ }
+ },
Err(RecvError::Closed) => {
error!("Not expected, is leader election loop still running?");
break;
}
Err(RecvError::Lagged(_)) => {
- // TODO(LFC): TBC
break;
}
}
diff --git a/src/meta-srv/src/procedure/region_failover/update_metadata.rs b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
index 6c28de3efaa7..e8350f6c65b9 100644
--- a/src/meta-srv/src/procedure/region_failover/update_metadata.rs
+++ b/src/meta-srv/src/procedure/region_failover/update_metadata.rs
@@ -43,16 +43,6 @@ impl UpdateRegionMetadata {
Self { candidate }
}
- // TODO(LFC): Update the two table metadata values in a batch atomically.
- //
- // Though the updating of the two metadata values is guarded by a distributed lock,
- // it does not robust enough. For example, the lock lease could be expired in the middle of
- // one's updating, letting others to start updating concurrently. For now, we set the lease of
- // the distributed lock to 10 seconds, which is long enough here to get the job done.
- //
- // Maybe we should introduce "version" companion values to these two metadata values, and
- // use ETCD transaction request to update them?
-
/// Updates the metadata of the table. Specifically, the [TableGlobalValue] and [TableRouteValue].
async fn update_metadata(
&self,
diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs
index edf9ed1008cd..c19e0ed8b63a 100644
--- a/src/query/src/query_engine/state.rs
+++ b/src/query/src/query_engine/state.rs
@@ -116,13 +116,25 @@ impl QueryEngineState {
.cloned()
}
+ /// Register an aggregate function.
+ ///
+ /// # Panics
+ /// Will panic if the function with same name is already registered.
+ ///
+ /// Panicking consideration: currently the aggregated functions are all statically registered,
+ /// user cannot define their own aggregate functions on the fly. So we can panic here. If that
+ /// invariant is broken in the future, we should return an error instead of panicking.
pub fn register_aggregate_function(&self, func: AggregateFunctionMetaRef) {
- // TODO(LFC): Return some error if there exists an aggregate function with the same name.
- // Simply overwrite the old value for now.
- self.aggregate_functions
+ let name = func.name();
+ let x = self
+ .aggregate_functions
.write()
.unwrap()
- .insert(func.name(), func);
+ .insert(name.clone(), func);
+ assert!(
+ x.is_none(),
+ "Already registered aggregate function '{name}'"
+ );
}
#[inline]
diff --git a/src/query/src/sql.rs b/src/query/src/sql.rs
index 17f39264263a..73e0f800ba17 100644
--- a/src/query/src/sql.rs
+++ b/src/query/src/sql.rs
@@ -96,7 +96,6 @@ pub async fn show_databases(
stmt: ShowDatabases,
catalog_manager: CatalogManagerRef,
) -> Result<Output> {
- // TODO(LFC): supports WHERE
ensure!(
matches!(stmt.kind, ShowKind::All | ShowKind::Like(_)),
error::UnsupportedExprSnafu {
@@ -136,7 +135,6 @@ pub async fn show_tables(
catalog_manager: CatalogManagerRef,
query_ctx: QueryContextRef,
) -> Result<Output> {
- // TODO(LFC): supports WHERE
ensure!(
matches!(stmt.kind, ShowKind::All | ShowKind::Like(_)),
error::UnsupportedExprSnafu {
diff --git a/src/servers/src/mysql/federated.rs b/src/servers/src/mysql/federated.rs
index b01033c68a35..18ff814433b3 100644
--- a/src/servers/src/mysql/federated.rs
+++ b/src/servers/src/mysql/federated.rs
@@ -30,7 +30,6 @@ use regex::bytes::RegexSet;
use regex::Regex;
use session::context::QueryContextRef;
-// TODO(LFC): Include GreptimeDB's version and git commit tag etc.
const MYSQL_VERSION: &str = "8.0.26";
static SELECT_VAR_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new("(?i)^(SELECT @@(.*))").unwrap());
diff --git a/src/servers/src/mysql/handler.rs b/src/servers/src/mysql/handler.rs
index 5b0e1a024d0e..d917db5597ea 100644
--- a/src/servers/src/mysql/handler.rs
+++ b/src/servers/src/mysql/handler.rs
@@ -88,9 +88,6 @@ impl MysqlInstanceShim {
trace!("Start executing query: '{}'", query);
let start = Instant::now();
- // TODO(LFC): Find a better way to deal with these special federated queries:
- // `check` uses regex to filter out unsupported statements emitted by MySQL's federated
- // components, this is quick and dirty, there must be a better way to do it.
let output =
if let Some(output) = crate::mysql::federated::check(query, self.session.context()) {
vec![Ok(output)]
diff --git a/src/servers/src/mysql/server.rs b/src/servers/src/mysql/server.rs
index 5c5e593eb508..29f3e550d22a 100644
--- a/src/servers/src/mysql/server.rs
+++ b/src/servers/src/mysql/server.rs
@@ -157,7 +157,6 @@ impl MysqlServer {
info!("MySQL connection coming from: {}", stream.peer_addr()?);
io_runtime.spawn(async move {
increment_gauge!(crate::metrics::METRIC_MYSQL_CONNECTIONS, 1.0);
- // TODO(LFC): Use `output_stream` to write large MySQL ResultSet to client.
if let Err(e) = Self::do_handle(stream, spawn_ref, spawn_config).await {
// TODO(LFC): Write this error to client as well, in MySQL text protocol.
// Looks like we have to expose opensrv-mysql's `PacketWriter`?
diff --git a/src/servers/src/query_handler/sql.rs b/src/servers/src/query_handler/sql.rs
index dd621003934d..af8bbed5c2b9 100644
--- a/src/servers/src/query_handler/sql.rs
+++ b/src/servers/src/query_handler/sql.rs
@@ -43,7 +43,6 @@ pub trait SqlQueryHandler {
query_ctx: QueryContextRef,
) -> Vec<std::result::Result<Output, Self::Error>>;
- // TODO(LFC): revisit this for mysql prepared statement
async fn do_describe(
&self,
stmt: Statement,
|
chore
|
clean up some of my todos (#1723)
|
450dfe324db20970ed88d70e5c047ae13f064be9
|
2024-02-20 14:52:45
|
Lei, HUANG
|
feat: data buffer and related structs (#3329)
| false
|
diff --git a/src/mito2/src/error.rs b/src/mito2/src/error.rs
index 468c1f8ed921..f141857a5322 100644
--- a/src/mito2/src/error.rs
+++ b/src/mito2/src/error.rs
@@ -37,8 +37,8 @@ use crate::worker::WorkerId;
#[stack_trace_debug]
pub enum Error {
#[snafu(display(
- "Failed to set region {} to writable, it was expected to replayed to {}, but actually replayed to {}",
- region_id, expected_last_entry_id, replayed_last_entry_id
+ "Failed to set region {} to writable, it was expected to replayed to {}, but actually replayed to {}",
+ region_id, expected_last_entry_id, replayed_last_entry_id
))]
UnexpectedReplay {
location: Location,
@@ -559,6 +559,13 @@ pub enum Error {
#[snafu(display("Encode null value"))]
IndexEncodeNull { location: Location },
+
+ #[snafu(display("Failed to encode memtable to Parquet bytes"))]
+ EncodeMemtable {
+ #[snafu(source)]
+ error: parquet::errors::ParquetError,
+ location: Location,
+ },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
@@ -662,6 +669,7 @@ impl ErrorExt for Error {
FilterRecordBatch { source, .. } => source.status_code(),
Upload { .. } => StatusCode::StorageUnavailable,
BiError { .. } => StatusCode::Internal,
+ EncodeMemtable { .. } => StatusCode::Internal,
}
}
diff --git a/src/mito2/src/memtable/merge_tree/data.rs b/src/mito2/src/memtable/merge_tree/data.rs
index 3f2627e9d46c..2e903519e618 100644
--- a/src/mito2/src/memtable/merge_tree/data.rs
+++ b/src/mito2/src/memtable/merge_tree/data.rs
@@ -14,8 +14,825 @@
//! Data part of a shard.
-/// Buffer to store columns not in the primary key.
-pub struct DataBuffer {}
+use std::cmp::{Ordering, Reverse};
+use std::ops::Range;
+use std::sync::Arc;
+
+use bytes::Bytes;
+use datatypes::arrow;
+use datatypes::arrow::array::{RecordBatch, UInt16Array, UInt32Array};
+use datatypes::arrow::datatypes::{Field, Schema, SchemaRef};
+use datatypes::data_type::DataType;
+use datatypes::prelude::{ConcreteDataType, MutableVector, ScalarVectorBuilder, VectorRef};
+use datatypes::schema::ColumnSchema;
+use datatypes::types::TimestampType;
+use datatypes::vectors::{
+ TimestampMicrosecondVector, TimestampMillisecondVector, TimestampNanosecondVector,
+ TimestampSecondVector, UInt16Vector, UInt16VectorBuilder, UInt64Vector, UInt64VectorBuilder,
+ UInt8VectorBuilder,
+};
+use parquet::arrow::ArrowWriter;
+use parquet::file::properties::WriterProperties;
+use snafu::ResultExt;
+use store_api::metadata::RegionMetadataRef;
+use store_api::storage::consts::{OP_TYPE_COLUMN_NAME, SEQUENCE_COLUMN_NAME};
+
+use crate::error;
+use crate::error::Result;
+use crate::memtable::key_values::KeyValue;
+use crate::memtable::merge_tree::{PkId, PkIndex};
+
+const PK_INDEX_COLUMN_NAME: &str = "__pk_index";
+
+/// Data part batches returns by `DataParts::read`.
+#[derive(Debug, Clone)]
+pub struct DataBatch<'a> {
+ /// Primary key index of this batch.
+ pk_index: PkIndex,
+ /// Record batch of data.
+ rb: &'a RecordBatch,
+ /// Range of current primary key inside record batch
+ range: Range<usize>,
+}
+
+impl<'a> DataBatch<'a> {
+ pub(crate) fn pk_index(&self) -> PkIndex {
+ self.pk_index
+ }
+
+ pub(crate) fn record_batch(&self) -> &RecordBatch {
+ self.rb
+ }
+
+ pub(crate) fn range(&self) -> Range<usize> {
+ self.range.clone()
+ }
+
+ pub(crate) fn slice_record_batch(&self) -> RecordBatch {
+ self.rb.slice(self.range.start, self.range.len())
+ }
+}
+
+/// Buffer for the value part (pk_index, ts, sequence, op_type, field columns) in a shard.
+pub struct DataBuffer {
+ metadata: RegionMetadataRef,
+ /// Schema for data part (primary keys are replaced with pk_index)
+ data_part_schema: SchemaRef,
+ /// Builder for primary key index.
+ pk_index_builder: UInt16VectorBuilder,
+ /// Builder for timestamp column.
+ ts_builder: Box<dyn MutableVector>,
+ /// Builder for sequence column.
+ sequence_builder: UInt64VectorBuilder,
+ /// Builder for op_type column.
+ op_type_builder: UInt8VectorBuilder,
+ /// Builders for field columns.
+ field_builders: Vec<LazyMutableVectorBuilder>,
+}
+
+impl DataBuffer {
+ /// Creates a `DataBuffer` instance with given schema and capacity.
+ pub fn with_capacity(metadata: RegionMetadataRef, init_capacity: usize) -> Self {
+ let ts_builder = metadata
+ .time_index_column()
+ .column_schema
+ .data_type
+ .create_mutable_vector(init_capacity);
+
+ let pk_id_builder = UInt16VectorBuilder::with_capacity(init_capacity);
+ let sequence_builder = UInt64VectorBuilder::with_capacity(init_capacity);
+ let op_type_builder = UInt8VectorBuilder::with_capacity(init_capacity);
+
+ let field_builders = metadata
+ .field_columns()
+ .map(|c| LazyMutableVectorBuilder::new(c.column_schema.data_type.clone()))
+ .collect::<Vec<_>>();
+
+ let data_part_schema = memtable_schema_to_encoded_schema(&metadata);
+ Self {
+ metadata,
+ data_part_schema,
+ pk_index_builder: pk_id_builder,
+ ts_builder,
+ sequence_builder,
+ op_type_builder,
+ field_builders,
+ }
+ }
+
+ /// Writes a row to data buffer.
+ pub fn write_row(&mut self, pk_id: PkId, kv: KeyValue) {
+ self.ts_builder.push_value_ref(kv.timestamp());
+ self.pk_index_builder.push(Some(pk_id.pk_index));
+ self.sequence_builder.push(Some(kv.sequence()));
+ self.op_type_builder.push(Some(kv.op_type() as u8));
+
+ debug_assert_eq!(self.field_builders.len(), kv.num_fields());
+
+ for (idx, field) in kv.fields().enumerate() {
+ self.field_builders[idx]
+ .get_or_create_builder(self.ts_builder.len())
+ .push_value_ref(field);
+ }
+ }
+
+ /// Freezes `DataBuffer` to bytes. Use `pk_weights` to convert pk_id to pk sort order.
+ /// `freeze` clears the buffers of builders.
+ pub fn freeze(&mut self, _pk_weights: &[u16]) -> Result<DataPart> {
+ // we need distinguish between `freeze` in `ShardWriter` And `Shard`.
+ todo!()
+ }
+
+ /// Reads batches from data buffer without resetting builder's buffers.
+ pub fn iter(&mut self, pk_weights: &[u16]) -> Result<DataBufferIter> {
+ // todo(hl): control whether to dedup while invoking `iter`.
+ let batch = data_buffer_to_record_batches(
+ self.data_part_schema.clone(),
+ self,
+ pk_weights,
+ true,
+ true,
+ )?;
+ DataBufferIter::new(batch)
+ }
+
+ /// Returns num of rows in data buffer.
+ pub fn num_rows(&self) -> usize {
+ self.ts_builder.len()
+ }
+
+ /// Returns whether the buffer is empty.
+ pub fn is_empty(&self) -> bool {
+ self.num_rows() == 0
+ }
+}
+
+enum LazyMutableVectorBuilder {
+ Type(ConcreteDataType),
+ Builder(Box<dyn MutableVector>),
+}
+
+impl LazyMutableVectorBuilder {
+ fn new(ty: ConcreteDataType) -> Self {
+ Self::Type(ty)
+ }
+
+ fn get_or_create_builder(&mut self, init_capacity: usize) -> &mut Box<dyn MutableVector> {
+ match self {
+ LazyMutableVectorBuilder::Type(ty) => {
+ let builder = ty.create_mutable_vector(init_capacity);
+ *self = LazyMutableVectorBuilder::Builder(builder);
+ self.get_or_create_builder(init_capacity)
+ }
+ LazyMutableVectorBuilder::Builder(builder) => builder,
+ }
+ }
+}
+
+/// Converts `DataBuffer` to record batches, with rows sorted according to pk_weights.
+fn data_buffer_to_record_batches(
+ schema: SchemaRef,
+ buffer: &mut DataBuffer,
+ pk_weights: &[u16],
+ keep_data: bool,
+ dedup: bool,
+) -> Result<RecordBatch> {
+ let num_rows = buffer.ts_builder.len();
+
+ let (pk_index_v, ts_v, sequence_v, op_type_v) = if keep_data {
+ (
+ buffer.pk_index_builder.finish_cloned(),
+ buffer.ts_builder.to_vector_cloned(),
+ buffer.sequence_builder.finish_cloned(),
+ buffer.op_type_builder.finish_cloned(),
+ )
+ } else {
+ (
+ buffer.pk_index_builder.finish(),
+ buffer.ts_builder.to_vector(),
+ buffer.sequence_builder.finish(),
+ buffer.op_type_builder.finish(),
+ )
+ };
+
+ let mut rows = build_rows_to_sort(pk_weights, &pk_index_v, &ts_v, &sequence_v);
+
+ // sort and dedup
+ rows.sort_unstable_by(|l, r| l.1.cmp(&r.1));
+ if dedup {
+ rows.dedup_by(|l, r| l.1.pk_weight == r.1.pk_weight && l.1.timestamp == r.1.timestamp);
+ }
+ let indices_to_take = UInt32Array::from_iter_values(rows.into_iter().map(|v| v.0 as u32));
+
+ let mut columns = Vec::with_capacity(4 + buffer.field_builders.len());
+
+ columns.push(
+ arrow::compute::take(&pk_index_v.as_arrow(), &indices_to_take, None)
+ .context(error::ComputeArrowSnafu)?,
+ );
+
+ columns.push(
+ arrow::compute::take(&ts_v.to_arrow_array(), &indices_to_take, None)
+ .context(error::ComputeArrowSnafu)?,
+ );
+
+ columns.push(
+ arrow::compute::take(&sequence_v.as_arrow(), &indices_to_take, None)
+ .context(error::ComputeArrowSnafu)?,
+ );
+
+ columns.push(
+ arrow::compute::take(&op_type_v.as_arrow(), &indices_to_take, None)
+ .context(error::ComputeArrowSnafu)?,
+ );
+
+ for b in buffer.field_builders.iter_mut() {
+ let array = match b {
+ LazyMutableVectorBuilder::Type(ty) => {
+ let mut single_null = ty.create_mutable_vector(num_rows);
+ single_null.push_nulls(num_rows);
+ single_null.to_vector().to_arrow_array()
+ }
+ LazyMutableVectorBuilder::Builder(builder) => {
+ if keep_data {
+ builder.to_vector_cloned().to_arrow_array()
+ } else {
+ builder.to_vector().to_arrow_array()
+ }
+ }
+ };
+ columns.push(
+ arrow::compute::take(&array, &indices_to_take, None)
+ .context(error::ComputeArrowSnafu)?,
+ );
+ }
+
+ RecordBatch::try_new(schema, columns).context(error::NewRecordBatchSnafu)
+}
+
+#[derive(Debug)]
+pub(crate) struct DataBufferIter {
+ batch: RecordBatch,
+ offset: usize,
+ current_batch: Option<(PkIndex, Range<usize>)>,
+}
+
+impl DataBufferIter {
+ pub(crate) fn new(batch: RecordBatch) -> Result<Self> {
+ let mut iter = Self {
+ batch,
+ offset: 0,
+ current_batch: None,
+ };
+ iter.next()?; // fill data batch for comparison and merge.
+ Ok(iter)
+ }
+
+ pub(crate) fn is_valid(&self) -> bool {
+ self.current_batch.is_some()
+ }
+
+ /// # Panics
+ /// If Current iterator is not exhausted.
+ pub(crate) fn current_data_batch(&self) -> DataBatch {
+ let (pk_index, range) = self.current_batch.as_ref().unwrap();
+ DataBatch {
+ pk_index: *pk_index,
+ rb: &self.batch,
+ range: range.clone(),
+ }
+ }
+
+ /// # Panics
+ /// If Current iterator is exhausted.
+ pub(crate) fn current_pk_index(&self) -> PkIndex {
+ let (pk_index, _) = self.current_batch.as_ref().unwrap();
+ *pk_index
+ }
+
+ /// Advances iterator to next data batch.
+ pub(crate) fn next(&mut self) -> Result<()> {
+ if self.offset >= self.batch.num_rows() {
+ self.current_batch = None;
+ return Ok(());
+ }
+ let pk_index_array = pk_index_array(&self.batch);
+ if let Some((next_pk, range)) = search_next_pk_range(pk_index_array, self.offset) {
+ self.offset = range.end;
+ self.current_batch = Some((next_pk, range))
+ } else {
+ self.current_batch = None;
+ }
+ Ok(())
+ }
+}
+
+/// Gets `pk_index` array from record batch.
+/// # Panics
+/// If pk index column is not the first column or the type is not `UInt16Array`.
+fn pk_index_array(batch: &RecordBatch) -> &UInt16Array {
+ batch
+ .column(0)
+ .as_any()
+ .downcast_ref::<UInt16Array>()
+ .unwrap()
+}
+
+/// Searches for next pk index, and it's offset range in a sorted `UInt16Array`.
+fn search_next_pk_range(array: &UInt16Array, start: usize) -> Option<(PkIndex, Range<usize>)> {
+ let num_rows = array.len();
+ if start >= num_rows {
+ return None;
+ }
+
+ let values = array.values();
+ let next_pk = values[start];
+
+ for idx in start..num_rows {
+ if values[idx] != next_pk {
+ return Some((next_pk, start..idx));
+ }
+ }
+ Some((next_pk, start..num_rows))
+}
+
+#[derive(Eq, PartialEq)]
+struct InnerKey {
+ pk_weight: u16,
+ timestamp: i64,
+ sequence: u64,
+}
+
+impl PartialOrd for InnerKey {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for InnerKey {
+ fn cmp(&self, other: &Self) -> Ordering {
+ (self.pk_weight, self.timestamp, Reverse(self.sequence)).cmp(&(
+ other.pk_weight,
+ other.timestamp,
+ Reverse(other.sequence),
+ ))
+ }
+}
+
+fn build_rows_to_sort(
+ pk_weights: &[u16],
+ pk_index: &UInt16Vector,
+ ts: &VectorRef,
+ sequence: &UInt64Vector,
+) -> Vec<(usize, InnerKey)> {
+ let ts_values = match ts.data_type() {
+ ConcreteDataType::Timestamp(t) => match t {
+ TimestampType::Second(_) => ts
+ .as_any()
+ .downcast_ref::<TimestampSecondVector>()
+ .unwrap()
+ .as_arrow()
+ .values(),
+ TimestampType::Millisecond(_) => ts
+ .as_any()
+ .downcast_ref::<TimestampMillisecondVector>()
+ .unwrap()
+ .as_arrow()
+ .values(),
+ TimestampType::Microsecond(_) => ts
+ .as_any()
+ .downcast_ref::<TimestampMicrosecondVector>()
+ .unwrap()
+ .as_arrow()
+ .values(),
+ TimestampType::Nanosecond(_) => ts
+ .as_any()
+ .downcast_ref::<TimestampNanosecondVector>()
+ .unwrap()
+ .as_arrow()
+ .values(),
+ },
+ other => unreachable!("Unexpected type {:?}", other),
+ };
+ let pk_index_values = pk_index.as_arrow().values();
+ let sequence_values = sequence.as_arrow().values();
+ debug_assert_eq!(ts_values.len(), pk_index_values.len());
+ debug_assert_eq!(ts_values.len(), sequence_values.len());
+
+ ts_values
+ .iter()
+ .zip(pk_index_values.iter())
+ .zip(sequence_values.iter())
+ .enumerate()
+ .map(|(idx, ((timestamp, pk_index), sequence))| {
+ (
+ idx,
+ InnerKey {
+ timestamp: *timestamp,
+ pk_weight: pk_weights[*pk_index as usize],
+ sequence: *sequence,
+ },
+ )
+ })
+ .collect()
+}
+
+fn memtable_schema_to_encoded_schema(schema: &RegionMetadataRef) -> SchemaRef {
+ use datatypes::arrow::datatypes::DataType;
+ let ColumnSchema {
+ name: ts_name,
+ data_type: ts_type,
+ ..
+ } = &schema.time_index_column().column_schema;
+
+ let mut fields = vec![
+ Field::new(PK_INDEX_COLUMN_NAME, DataType::UInt16, false),
+ Field::new(ts_name, ts_type.as_arrow_type(), false),
+ Field::new(SEQUENCE_COLUMN_NAME, DataType::UInt64, false),
+ Field::new(OP_TYPE_COLUMN_NAME, DataType::UInt8, false),
+ ];
+
+ fields.extend(schema.field_columns().map(|c| {
+ Field::new(
+ &c.column_schema.name,
+ c.column_schema.data_type.as_arrow_type(),
+ c.column_schema.is_nullable(),
+ )
+ }));
+
+ Arc::new(Schema::new(fields))
+}
+
+struct DataPartEncoder<'a> {
+ schema: SchemaRef,
+ pk_weights: &'a [u16],
+ row_group_size: Option<usize>,
+}
+
+impl<'a> DataPartEncoder<'a> {
+ pub fn new(
+ metadata: &RegionMetadataRef,
+ pk_weights: &'a [u16],
+ row_group_size: Option<usize>,
+ ) -> DataPartEncoder<'a> {
+ let schema = memtable_schema_to_encoded_schema(metadata);
+ Self {
+ schema,
+ pk_weights,
+ row_group_size,
+ }
+ }
+
+ fn writer_props(&self) -> Option<WriterProperties> {
+ self.row_group_size.map(|size| {
+ WriterProperties::builder()
+ .set_max_row_group_size(size)
+ .build()
+ })
+ }
+ pub fn write(&self, source: &mut DataBuffer) -> Result<Bytes> {
+ let mut bytes = Vec::with_capacity(1024);
+ let mut writer = ArrowWriter::try_new(&mut bytes, self.schema.clone(), self.writer_props())
+ .context(error::EncodeMemtableSnafu)?;
+ let rb = data_buffer_to_record_batches(
+ self.schema.clone(),
+ source,
+ self.pk_weights,
+ false,
+ true,
+ )?;
+ writer.write(&rb).context(error::EncodeMemtableSnafu)?;
+ let _file_meta = writer.close().context(error::EncodeMemtableSnafu)?;
+ Ok(Bytes::from(bytes))
+ }
+}
+
+/// Format of immutable data part.
+pub enum DataPart {
+ Parquet(Bytes),
+}
+
+impl DataPart {
+ fn is_empty(&self) -> bool {
+ match self {
+ DataPart::Parquet(data) => data.is_empty(),
+ }
+ }
+}
/// Data parts under a shard.
pub struct DataParts {}
+
+#[cfg(test)]
+mod tests {
+ use datafusion::arrow::array::Float64Array;
+ use datatypes::arrow::array::{TimestampMillisecondArray, UInt16Array, UInt64Array};
+ use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
+ use parquet::data_type::AsBytes;
+
+ use super::*;
+ use crate::test_util::memtable_util::{build_key_values_with_ts_seq_values, metadata_for_test};
+
+ #[test]
+ fn test_lazy_mutable_vector_builder() {
+ let mut builder = LazyMutableVectorBuilder::new(ConcreteDataType::boolean_datatype());
+ match builder {
+ LazyMutableVectorBuilder::Type(ref t) => {
+ assert_eq!(&ConcreteDataType::boolean_datatype(), t);
+ }
+ LazyMutableVectorBuilder::Builder(_) => {
+ unreachable!()
+ }
+ }
+ builder.get_or_create_builder(1);
+ match builder {
+ LazyMutableVectorBuilder::Type(_) => {
+ unreachable!()
+ }
+ LazyMutableVectorBuilder::Builder(_) => {}
+ }
+ }
+
+ fn check_test_data_buffer_to_record_batches(keep_data: bool) {
+ let meta = metadata_for_test();
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+
+ write_rows_to_buffer(&mut buffer, &meta, 0, vec![1, 2], vec![Some(0.1), None], 1);
+ write_rows_to_buffer(&mut buffer, &meta, 1, vec![1, 2], vec![Some(1.1), None], 2);
+ write_rows_to_buffer(&mut buffer, &meta, 0, vec![2], vec![Some(1.1)], 3);
+ assert_eq!(5, buffer.num_rows());
+ let schema = memtable_schema_to_encoded_schema(&meta);
+ let batch =
+ data_buffer_to_record_batches(schema, &mut buffer, &[3, 1], keep_data, true).unwrap();
+
+ assert_eq!(
+ vec![1, 2, 1, 2],
+ batch
+ .column_by_name("ts")
+ .unwrap()
+ .as_any()
+ .downcast_ref::<TimestampMillisecondArray>()
+ .unwrap()
+ .iter()
+ .map(|v| v.unwrap())
+ .collect::<Vec<_>>()
+ );
+
+ assert_eq!(
+ vec![1, 1, 0, 0],
+ batch
+ .column_by_name(PK_INDEX_COLUMN_NAME)
+ .unwrap()
+ .as_any()
+ .downcast_ref::<UInt16Array>()
+ .unwrap()
+ .iter()
+ .map(|v| v.unwrap())
+ .collect::<Vec<_>>()
+ );
+
+ assert_eq!(
+ vec![Some(1.1), None, Some(0.1), Some(1.1)],
+ batch
+ .column_by_name("v1")
+ .unwrap()
+ .as_any()
+ .downcast_ref::<Float64Array>()
+ .unwrap()
+ .iter()
+ .collect::<Vec<_>>()
+ );
+
+ if keep_data {
+ assert_eq!(5, buffer.num_rows());
+ } else {
+ assert_eq!(0, buffer.num_rows());
+ }
+ }
+
+ #[test]
+ fn test_data_buffer_to_record_batches() {
+ check_test_data_buffer_to_record_batches(true);
+ check_test_data_buffer_to_record_batches(false);
+ }
+
+ #[test]
+ fn test_data_buffer_to_record_batches_with_dedup() {
+ let meta = metadata_for_test();
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+
+ write_rows_to_buffer(&mut buffer, &meta, 0, vec![1, 2], vec![Some(0.1), None], 1);
+ write_rows_to_buffer(&mut buffer, &meta, 1, vec![2], vec![Some(1.1)], 2);
+ write_rows_to_buffer(&mut buffer, &meta, 0, vec![2], vec![Some(1.1)], 3);
+ assert_eq!(4, buffer.num_rows());
+ let schema = memtable_schema_to_encoded_schema(&meta);
+ let batch =
+ data_buffer_to_record_batches(schema, &mut buffer, &[0, 1], true, true).unwrap();
+
+ assert_eq!(3, batch.num_rows());
+ assert_eq!(
+ vec![0, 0, 1],
+ batch
+ .column_by_name(PK_INDEX_COLUMN_NAME)
+ .unwrap()
+ .as_any()
+ .downcast_ref::<UInt16Array>()
+ .unwrap()
+ .iter()
+ .map(|v| v.unwrap())
+ .collect::<Vec<_>>()
+ );
+
+ assert_eq!(
+ vec![1, 2, 2],
+ batch
+ .column_by_name("ts")
+ .unwrap()
+ .as_any()
+ .downcast_ref::<TimestampMillisecondArray>()
+ .unwrap()
+ .iter()
+ .map(|v| v.unwrap())
+ .collect::<Vec<_>>()
+ );
+
+ assert_eq!(
+ vec![1, 3, 2],
+ batch
+ .column_by_name(SEQUENCE_COLUMN_NAME)
+ .unwrap()
+ .as_any()
+ .downcast_ref::<UInt64Array>()
+ .unwrap()
+ .iter()
+ .map(|v| v.unwrap())
+ .collect::<Vec<_>>()
+ );
+ }
+
+ #[test]
+ fn test_data_buffer_to_record_batches_without_dedup() {
+ let meta = metadata_for_test();
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+
+ write_rows_to_buffer(&mut buffer, &meta, 0, vec![1, 2], vec![Some(0.1), None], 1);
+ write_rows_to_buffer(&mut buffer, &meta, 1, vec![1, 2], vec![Some(1.1), None], 2);
+ write_rows_to_buffer(&mut buffer, &meta, 0, vec![2], vec![Some(1.1)], 3);
+ assert_eq!(5, buffer.num_rows());
+ let schema = memtable_schema_to_encoded_schema(&meta);
+ let batch =
+ data_buffer_to_record_batches(schema, &mut buffer, &[3, 1], true, false).unwrap();
+
+ assert_eq!(
+ vec![1, 1, 0, 0, 0],
+ batch
+ .column_by_name(PK_INDEX_COLUMN_NAME)
+ .unwrap()
+ .as_any()
+ .downcast_ref::<UInt16Array>()
+ .unwrap()
+ .iter()
+ .map(|v| v.unwrap())
+ .collect::<Vec<_>>()
+ );
+
+ assert_eq!(
+ vec![1, 2, 1, 2, 2],
+ batch
+ .column_by_name("ts")
+ .unwrap()
+ .as_any()
+ .downcast_ref::<TimestampMillisecondArray>()
+ .unwrap()
+ .iter()
+ .map(|v| v.unwrap())
+ .collect::<Vec<_>>()
+ );
+ }
+
+ fn write_rows_to_buffer(
+ buffer: &mut DataBuffer,
+ schema: &RegionMetadataRef,
+ pk_index: u16,
+ ts: Vec<i64>,
+ v0: Vec<Option<f64>>,
+ sequence: u64,
+ ) {
+ let kvs = build_key_values_with_ts_seq_values(
+ schema,
+ "whatever".to_string(),
+ 1,
+ ts.into_iter(),
+ v0.into_iter(),
+ sequence,
+ );
+
+ for kv in kvs.iter() {
+ buffer.write_row(
+ PkId {
+ shard_id: 0,
+ pk_index,
+ },
+ kv,
+ );
+ }
+ }
+
+ #[test]
+ fn test_encode_data_buffer() {
+ let meta = metadata_for_test();
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+
+ // write rows with null values.
+ write_rows_to_buffer(
+ &mut buffer,
+ &meta,
+ 2,
+ vec![0, 1, 2],
+ vec![Some(1.0), None, Some(3.0)],
+ 2,
+ );
+
+ assert_eq!(3, buffer.num_rows());
+
+ write_rows_to_buffer(&mut buffer, &meta, 2, vec![1], vec![Some(2.0)], 3);
+
+ assert_eq!(4, buffer.num_rows());
+
+ let encoder = DataPartEncoder::new(&meta, &[0, 1, 2], None);
+ let encoded = encoder.write(&mut buffer).unwrap();
+ let s = String::from_utf8_lossy(encoded.as_bytes());
+ assert!(s.starts_with("PAR1"));
+ assert!(s.ends_with("PAR1"));
+
+ let builder = ParquetRecordBatchReaderBuilder::try_new(encoded).unwrap();
+ let mut reader = builder.build().unwrap();
+ let batch = reader.next().unwrap().unwrap();
+ assert_eq!(3, batch.num_rows());
+ }
+
+ fn check_buffer_values_equal(iter: &mut DataBufferIter, expected_values: &[Vec<f64>]) {
+ let mut output = Vec::with_capacity(expected_values.len());
+ while iter.is_valid() {
+ let batch = iter.current_data_batch().slice_record_batch();
+ let values = batch
+ .column_by_name("v1")
+ .unwrap()
+ .as_any()
+ .downcast_ref::<Float64Array>()
+ .unwrap()
+ .iter()
+ .map(|v| v.unwrap())
+ .collect::<Vec<_>>();
+ output.push(values);
+ iter.next().unwrap();
+ }
+ assert_eq!(expected_values, output);
+ }
+
+ #[test]
+ fn test_search_next_pk_range() {
+ let a = UInt16Array::from_iter_values([1, 1, 3, 3, 4, 6]);
+ assert_eq!((1, 0..2), search_next_pk_range(&a, 0).unwrap());
+ assert_eq!((3, 2..4), search_next_pk_range(&a, 2).unwrap());
+ assert_eq!((4, 4..5), search_next_pk_range(&a, 4).unwrap());
+ assert_eq!((6, 5..6), search_next_pk_range(&a, 5).unwrap());
+
+ assert_eq!(None, search_next_pk_range(&a, 6));
+ }
+
+ #[test]
+ fn test_iter_data_buffer() {
+ let meta = metadata_for_test();
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+
+ write_rows_to_buffer(
+ &mut buffer,
+ &meta,
+ 3,
+ vec![1, 2, 3],
+ vec![Some(1.1), Some(2.1), Some(3.1)],
+ 3,
+ );
+
+ write_rows_to_buffer(
+ &mut buffer,
+ &meta,
+ 2,
+ vec![0, 1, 2],
+ vec![Some(1.0), Some(2.0), Some(3.0)],
+ 2,
+ );
+
+ let mut iter = buffer.iter(&[0, 1, 3, 2]).unwrap();
+ check_buffer_values_equal(&mut iter, &[vec![1.1, 2.1, 3.1], vec![1.0, 2.0, 3.0]]);
+ }
+
+ #[test]
+ fn test_iter_empty_data_buffer() {
+ let meta = metadata_for_test();
+ let mut buffer = DataBuffer::with_capacity(meta.clone(), 10);
+ let mut iter = buffer.iter(&[0, 1, 3, 2]).unwrap();
+ check_buffer_values_equal(&mut iter, &[]);
+ }
+}
diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs
index 22dca01156e7..7e761cad771a 100644
--- a/src/mito2/src/test_util/memtable_util.rs
+++ b/src/mito2/src/test_util/memtable_util.rs
@@ -17,8 +17,13 @@
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
-use store_api::metadata::RegionMetadataRef;
-use store_api::storage::ColumnId;
+use api::helper::ColumnDataTypeWrapper;
+use api::v1::value::ValueData;
+use api::v1::{Row, Rows, SemanticType};
+use datatypes::data_type::ConcreteDataType;
+use datatypes::schema::ColumnSchema;
+use store_api::metadata::{ColumnMetadata, RegionMetadataBuilder, RegionMetadataRef};
+use store_api::storage::{ColumnId, RegionId, SequenceNumber};
use table::predicate::Predicate;
use crate::error::Result;
@@ -83,3 +88,113 @@ impl MemtableBuilder for EmptyMemtableBuilder {
))
}
}
+
+/// Creates a region metadata to test memtable with default pk.
+///
+/// The schema is `k0, k1, ts, v0, v1` and pk is `k0, k1`.
+pub(crate) fn metadata_for_test() -> RegionMetadataRef {
+ metadata_with_primary_key(vec![0, 1])
+}
+
+/// Creates a region metadata to test memtable and specific primary key.
+///
+/// The schema is `k0, k1, ts, v0, v1`.
+pub(crate) fn metadata_with_primary_key(primary_key: Vec<ColumnId>) -> RegionMetadataRef {
+ let mut builder = RegionMetadataBuilder::new(RegionId::new(123, 456));
+ builder
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new("k0", ConcreteDataType::string_datatype(), false),
+ semantic_type: semantic_type_of_column(0, &primary_key),
+ column_id: 0,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new("k1", ConcreteDataType::int64_datatype(), false),
+ semantic_type: semantic_type_of_column(1, &primary_key),
+ column_id: 1,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new(
+ "ts",
+ ConcreteDataType::timestamp_millisecond_datatype(),
+ false,
+ ),
+ semantic_type: SemanticType::Timestamp,
+ column_id: 2,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new("v0", ConcreteDataType::int64_datatype(), true),
+ semantic_type: semantic_type_of_column(3, &primary_key),
+ column_id: 3,
+ })
+ .push_column_metadata(ColumnMetadata {
+ column_schema: ColumnSchema::new("v1", ConcreteDataType::float64_datatype(), true),
+ semantic_type: semantic_type_of_column(4, &primary_key),
+ column_id: 4,
+ })
+ .primary_key(primary_key);
+ let region_metadata = builder.build().unwrap();
+ Arc::new(region_metadata)
+}
+
+fn semantic_type_of_column(column_id: ColumnId, primary_key: &[ColumnId]) -> SemanticType {
+ if primary_key.contains(&column_id) {
+ SemanticType::Tag
+ } else {
+ SemanticType::Field
+ }
+}
+
+/// Builds key values with timestamps (ms) and sequences for test.
+pub(crate) fn build_key_values_with_ts_seq_values(
+ schema: &RegionMetadataRef,
+ k0: String,
+ k1: i64,
+ timestamps: impl Iterator<Item = i64>,
+ values: impl Iterator<Item = Option<f64>>,
+ sequence: SequenceNumber,
+) -> KeyValues {
+ let column_schema = schema
+ .column_metadatas
+ .iter()
+ .map(|c| api::v1::ColumnSchema {
+ column_name: c.column_schema.name.clone(),
+ datatype: ColumnDataTypeWrapper::try_from(c.column_schema.data_type.clone())
+ .unwrap()
+ .datatype() as i32,
+ semantic_type: c.semantic_type as i32,
+ ..Default::default()
+ })
+ .collect();
+
+ let rows = timestamps
+ .zip(values)
+ .map(|(ts, v)| Row {
+ values: vec![
+ api::v1::Value {
+ value_data: Some(ValueData::StringValue(k0.clone())),
+ },
+ api::v1::Value {
+ value_data: Some(ValueData::I64Value(k1)),
+ },
+ api::v1::Value {
+ value_data: Some(ValueData::TimestampMillisecondValue(ts)),
+ },
+ api::v1::Value {
+ value_data: Some(ValueData::I64Value(ts)),
+ },
+ api::v1::Value {
+ value_data: v.map(ValueData::F64Value),
+ },
+ ],
+ })
+ .collect();
+ let mutation = api::v1::Mutation {
+ op_type: 1,
+ sequence,
+ rows: Some(Rows {
+ schema: column_schema,
+ rows,
+ }),
+ };
+ KeyValues::new(schema.as_ref(), mutation).unwrap()
+}
|
feat
|
data buffer and related structs (#3329)
|
cd42f308a8028c1db0ec7017bbbcc3394993b3eb
|
2022-08-02 13:55:03
|
Ning Sun
|
refactor: remove constructors from trait (#121)
| false
|
diff --git a/src/log-store/src/fs/entry.rs b/src/log-store/src/fs/entry.rs
index 2ec6f20e8764..45f3d88fc506 100644
--- a/src/log-store/src/fs/entry.rs
+++ b/src/log-store/src/fs/entry.rs
@@ -106,17 +106,19 @@ impl Encode for EntryImpl {
}
}
-impl Entry for EntryImpl {
- type Error = Error;
-
- fn new(data: impl AsRef<[u8]>) -> Self {
- Self {
+impl EntryImpl {
+ pub(crate) fn new(data: impl AsRef<[u8]>) -> EntryImpl {
+ EntryImpl {
id: 0,
data: data.as_ref().to_vec(),
offset: 0,
epoch: 0,
}
}
+}
+
+impl Entry for EntryImpl {
+ type Error = Error;
fn data(&self) -> &[u8] {
&self.data
diff --git a/src/log-store/src/fs/log.rs b/src/log-store/src/fs/log.rs
index 6bee0f80a088..203d72b2fca5 100644
--- a/src/log-store/src/fs/log.rs
+++ b/src/log-store/src/fs/log.rs
@@ -244,6 +244,14 @@ impl LogStore for LocalFileLogStore {
async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>> {
todo!()
}
+
+ fn entry<D: AsRef<[u8]>>(&self, data: D) -> Self::Entry {
+ EntryImpl::new(data)
+ }
+
+ fn namespace(&self, name: &str) -> Self::Namespace {
+ LocalNamespace::new(name)
+ }
}
#[cfg(test)]
diff --git a/src/log-store/src/fs/namespace.rs b/src/log-store/src/fs/namespace.rs
index ac29167413c8..75da1e38170b 100644
--- a/src/log-store/src/fs/namespace.rs
+++ b/src/log-store/src/fs/namespace.rs
@@ -18,14 +18,16 @@ struct LocalNamespaceInner {
name: String,
}
-impl Namespace for LocalNamespace {
- fn new(name: &str) -> Self {
+impl LocalNamespace {
+ pub(crate) fn new(name: &str) -> Self {
let inner = Arc::new(LocalNamespaceInner {
name: name.to_string(),
});
Self { inner }
}
+}
+impl Namespace for LocalNamespace {
fn name(&self) -> &str {
self.inner.name.as_str()
}
diff --git a/src/log-store/src/fs/noop.rs b/src/log-store/src/fs/noop.rs
index bcacbc87ccae..25975538a68d 100644
--- a/src/log-store/src/fs/noop.rs
+++ b/src/log-store/src/fs/noop.rs
@@ -50,4 +50,12 @@ impl LogStore for NoopLogStore {
async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>> {
todo!()
}
+
+ fn entry<D: AsRef<[u8]>>(&self, data: D) -> Self::Entry {
+ EntryImpl::new(data)
+ }
+
+ fn namespace(&self, name: &str) -> Self::Namespace {
+ LocalNamespace::new(name)
+ }
}
diff --git a/src/storage/src/manifest/region.rs b/src/storage/src/manifest/region.rs
index aeffbccb362e..559f2077c162 100644
--- a/src/storage/src/manifest/region.rs
+++ b/src/storage/src/manifest/region.rs
@@ -22,18 +22,20 @@ pub struct RegionManifest {
inner: Arc<RegionManifestInner>,
}
+impl RegionManifest {
+ pub fn new(manifest_dir: &str, object_store: ObjectStore) -> Self {
+ RegionManifest {
+ inner: Arc::new(RegionManifestInner::new(manifest_dir, object_store)),
+ }
+ }
+}
+
#[async_trait]
impl Manifest for RegionManifest {
type Error = Error;
type MetaAction = RegionMetaActionList;
type Metadata = RegionManifestData;
- fn new(manifest_dir: &str, object_store: ObjectStore) -> Self {
- RegionManifest {
- inner: Arc::new(RegionManifestInner::new(manifest_dir, object_store)),
- }
- }
-
async fn update(&self, action_list: RegionMetaActionList) -> Result<ManifestVersion> {
self.inner.save(action_list).await
}
diff --git a/src/storage/src/region.rs b/src/storage/src/region.rs
index dd4337c491d4..dee349cec8d5 100644
--- a/src/storage/src/region.rs
+++ b/src/storage/src/region.rs
@@ -5,6 +5,7 @@ mod writer;
use std::sync::Arc;
use async_trait::async_trait;
+use datatypes::schema::SchemaRef;
use snafu::ensure;
use store_api::logstore::LogStore;
use store_api::manifest::Manifest;
@@ -60,6 +61,10 @@ impl<S: LogStore> Region for RegionImpl<S> {
fn snapshot(&self, _ctx: &ReadContext) -> Result<SnapshotImpl> {
Ok(self.inner.create_snapshot())
}
+
+ fn write_request(&self, schema: SchemaRef) -> Self::WriteRequest {
+ WriteBatch::new(schema)
+ }
}
/// Storage related config for region.
diff --git a/src/storage/src/test_util/config_util.rs b/src/storage/src/test_util/config_util.rs
index 42793af10842..98f488538a45 100644
--- a/src/storage/src/test_util/config_util.rs
+++ b/src/storage/src/test_util/config_util.rs
@@ -2,7 +2,6 @@ use std::sync::Arc;
use log_store::fs::noop::NoopLogStore;
use object_store::{backend::fs::Backend, ObjectStore};
-use store_api::manifest::Manifest;
use crate::background::JobPoolImpl;
use crate::engine;
diff --git a/src/storage/src/test_util/write_batch_util.rs b/src/storage/src/test_util/write_batch_util.rs
index a594d1138287..5d56b3080e82 100644
--- a/src/storage/src/test_util/write_batch_util.rs
+++ b/src/storage/src/test_util/write_batch_util.rs
@@ -1,5 +1,3 @@
-use store_api::storage::WriteRequest;
-
use crate::test_util::schema_util::{self, ColumnDef};
use crate::write_batch::WriteBatch;
diff --git a/src/storage/src/wal.rs b/src/storage/src/wal.rs
index e9cb8d6c5814..e4184cffa76c 100644
--- a/src/storage/src/wal.rs
+++ b/src/storage/src/wal.rs
@@ -39,7 +39,7 @@ impl<S: LogStore> Clone for Wal<S> {
impl<S: LogStore> Wal<S> {
pub fn new(region_name: impl Into<String>, store: Arc<S>) -> Self {
let region_name = region_name.into();
- let namespace = S::Namespace::new(®ion_name);
+ let namespace = store.namespace(®ion_name);
Self { namespace, store }
}
@@ -122,7 +122,7 @@ impl<S: LogStore> Wal<S> {
async fn write(&self, seq: SequenceNumber, bytes: &[u8]) -> Result<(u64, usize)> {
let ns = self.namespace.clone();
- let mut e = S::Entry::new(bytes);
+ let mut e = self.store.entry(bytes);
e.set_id(seq);
let res = self
diff --git a/src/storage/src/write_batch.rs b/src/storage/src/write_batch.rs
index 69a9aa2781d8..10ea62e38bdb 100644
--- a/src/storage/src/write_batch.rs
+++ b/src/storage/src/write_batch.rs
@@ -130,17 +130,19 @@ pub struct WriteBatch {
num_rows: usize,
}
-impl WriteRequest for WriteBatch {
- type Error = Error;
- type PutOp = PutData;
-
- fn new(schema: SchemaRef) -> Self {
+impl WriteBatch {
+ pub fn new(schema: SchemaRef) -> Self {
Self {
schema,
mutations: Vec::new(),
num_rows: 0,
}
}
+}
+
+impl WriteRequest for WriteBatch {
+ type Error = Error;
+ type PutOp = PutData;
fn put(&mut self, data: PutData) -> Result<()> {
if data.is_empty() {
@@ -193,6 +195,14 @@ impl WriteRequest for WriteBatch {
Ok(ranges)
}
+
+ fn put_op(&self) -> Self::PutOp {
+ PutData::new()
+ }
+
+ fn put_op_with_columns(num_columns: usize) -> Self::PutOp {
+ PutData::with_num_columns(num_columns)
+ }
}
/// Aligns timestamp to nearest time interval.
@@ -231,18 +241,20 @@ pub struct PutData {
columns: HashMap<String, VectorRef>,
}
-impl PutOperation for PutData {
- type Error = Error;
-
- fn new() -> PutData {
+impl PutData {
+ pub(crate) fn new() -> PutData {
PutData::default()
}
- fn with_num_columns(num_columns: usize) -> PutData {
+ pub(crate) fn with_num_columns(num_columns: usize) -> PutData {
PutData {
columns: HashMap::with_capacity(num_columns),
}
}
+}
+
+impl PutOperation for PutData {
+ type Error = Error;
fn add_key_column(&mut self, name: &str, vector: VectorRef) -> Result<()> {
self.add_column_by_name(name, vector)
@@ -407,7 +419,7 @@ pub mod codec {
vectors::Helper,
};
use snafu::ensure;
- use store_api::storage::{PutOperation, WriteRequest};
+ use store_api::storage::WriteRequest;
use super::{
DataCorruptionSnafu, DecodeArrowSnafu, DecodeVectorSnafu, EncodeArrowSnafu,
diff --git a/src/store-api/src/logstore.rs b/src/store-api/src/logstore.rs
index 255220b894fd..f9d85ea2921b 100644
--- a/src/store-api/src/logstore.rs
+++ b/src/store-api/src/logstore.rs
@@ -25,28 +25,36 @@ pub trait LogStore: Send + Sync + 'static + std::fmt::Debug {
mut e: Self::Entry,
) -> Result<Self::AppendResponse, Self::Error>;
- // Append a batch of entries atomically and return the offset of first entry.
+ /// Append a batch of entries atomically and return the offset of first entry.
async fn append_batch(
&self,
ns: &Self::Namespace,
e: Vec<Self::Entry>,
) -> Result<Id, Self::Error>;
- // Create a new `EntryStream` to asynchronously generates `Entry` with ids starting from `id`.
+ /// Create a new `EntryStream` to asynchronously generates `Entry` with ids
+ /// starting from `id`.
async fn read(
&self,
ns: &Self::Namespace,
id: Id,
) -> Result<SendableEntryStream<Self::Entry, Self::Error>, Self::Error>;
- // Create a new `Namespace`.
+ /// Create a new `Namespace`.
async fn create_namespace(&mut self, ns: &Self::Namespace) -> Result<(), Self::Error>;
- // Delete an existing `Namespace` with given ref.
+ /// Delete an existing `Namespace` with given ref.
async fn delete_namespace(&mut self, ns: &Self::Namespace) -> Result<(), Self::Error>;
- // List all existing namespaces.
+ /// List all existing namespaces.
async fn list_namespaces(&self) -> Result<Vec<Self::Namespace>, Self::Error>;
+
+ /// Create an entry of the associate Entry type
+ fn entry<D: AsRef<[u8]>>(&self, data: D) -> Self::Entry;
+
+ /// Create a namespace of the associate Namespace type
+ // TODO(sunng87): confusion with `create_namespace`
+ fn namespace(&self, name: &str) -> Self::Namespace;
}
pub trait AppendResponse: Send + Sync {
diff --git a/src/store-api/src/logstore/entry.rs b/src/store-api/src/logstore/entry.rs
index 194f3d038892..25de0d83e788 100644
--- a/src/store-api/src/logstore/entry.rs
+++ b/src/store-api/src/logstore/entry.rs
@@ -8,9 +8,6 @@ pub type Id = u64;
/// Entry is the minimal data storage unit in `LogStore`.
pub trait Entry: Encode + Send + Sync {
type Error: ErrorExt + Send + Sync;
-
- fn new(data: impl AsRef<[u8]>) -> Self;
-
/// Return contained data of entry.
fn data(&self) -> &[u8];
diff --git a/src/store-api/src/logstore/entry_stream.rs b/src/store-api/src/logstore/entry_stream.rs
index b5d8745b753d..531588a9d2f5 100644
--- a/src/store-api/src/logstore/entry_stream.rs
+++ b/src/store-api/src/logstore/entry_stream.rs
@@ -71,14 +71,6 @@ mod tests {
impl Entry for SimpleEntry {
type Error = Error;
- fn new(data: impl AsRef<[u8]>) -> Self {
- Self {
- data: data.as_ref().to_vec(),
- offset: 0,
- epoch: 0,
- }
- }
-
fn data(&self) -> &[u8] {
&self.data
}
diff --git a/src/store-api/src/logstore/namespace.rs b/src/store-api/src/logstore/namespace.rs
index 6b238aaf5657..8464c1a5e39f 100644
--- a/src/store-api/src/logstore/namespace.rs
+++ b/src/store-api/src/logstore/namespace.rs
@@ -1,5 +1,3 @@
pub trait Namespace: Send + Sync + Clone + std::fmt::Debug {
- fn new(name: &str) -> Self;
-
fn name(&self) -> &str;
}
diff --git a/src/store-api/src/manifest.rs b/src/store-api/src/manifest.rs
index fbd562a338df..4ca31d2c9c95 100644
--- a/src/store-api/src/manifest.rs
+++ b/src/store-api/src/manifest.rs
@@ -4,7 +4,6 @@ mod storage;
use async_trait::async_trait;
use common_error::ext::ErrorExt;
-use object_store::ObjectStore;
use serde::{de::DeserializeOwned, Serialize};
pub use crate::manifest::storage::*;
@@ -26,8 +25,6 @@ pub trait Manifest: Send + Sync + Clone + 'static {
type MetaAction: MetaAction;
type Metadata: Metadata;
- fn new(manifest_dir: &str, object_store: ObjectStore) -> Self;
-
/// Update metadata by the action
async fn update(&self, action: Self::MetaAction) -> Result<ManifestVersion, Self::Error>;
diff --git a/src/store-api/src/storage/region.rs b/src/store-api/src/storage/region.rs
index 1497eb8f14f8..dc4a3e0f09f0 100644
--- a/src/store-api/src/storage/region.rs
+++ b/src/store-api/src/storage/region.rs
@@ -20,6 +20,7 @@
use async_trait::async_trait;
use common_error::ext::ErrorExt;
+use datatypes::schema::SchemaRef;
use crate::storage::engine::OpenOptions;
use crate::storage::metadata::RegionMeta;
@@ -50,6 +51,9 @@ pub trait Region: Send + Sync + Clone + std::fmt::Debug + 'static {
/// Create a snapshot for read.
fn snapshot(&self, ctx: &ReadContext) -> Result<Self::Snapshot, Self::Error>;
+
+ /// Create write request
+ fn write_request(&self, schema: SchemaRef) -> Self::WriteRequest;
}
/// Context for write operations.
diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs
index e328906a43d0..6e9325cf6f62 100644
--- a/src/store-api/src/storage/requests.rs
+++ b/src/store-api/src/storage/requests.rs
@@ -2,7 +2,6 @@ use std::time::Duration;
use common_error::ext::ErrorExt;
use common_time::RangeMillis;
-use datatypes::schema::SchemaRef;
use datatypes::vectors::VectorRef;
use crate::storage::SequenceNumber;
@@ -12,24 +11,22 @@ pub trait WriteRequest: Send {
type Error: ErrorExt + Send + Sync;
type PutOp: PutOperation;
- fn new(schema: SchemaRef) -> Self;
-
fn put(&mut self, put: Self::PutOp) -> Result<(), Self::Error>;
/// Returns all possible time ranges that contain the timestamp in this batch.
///
/// Each time range is aligned to given `duration`.
fn time_ranges(&self, duration: Duration) -> Result<Vec<RangeMillis>, Self::Error>;
+
+ fn put_op(&self) -> Self::PutOp;
+
+ fn put_op_with_columns(num_columns: usize) -> Self::PutOp;
}
/// Put multiple rows.
pub trait PutOperation: Send {
type Error: ErrorExt + Send + Sync;
- fn new() -> Self;
-
- fn with_num_columns(num_columns: usize) -> Self;
-
fn add_key_column(&mut self, name: &str, vector: VectorRef) -> Result<(), Self::Error>;
fn add_version_column(&mut self, vector: VectorRef) -> Result<(), Self::Error>;
diff --git a/src/table-engine/src/table.rs b/src/table-engine/src/table.rs
index a09246498f91..4146edbb1070 100644
--- a/src/table-engine/src/table.rs
+++ b/src/table-engine/src/table.rs
@@ -44,10 +44,10 @@ impl<R: Region> Table for MitoTable<R> {
return Ok(0);
}
- let mut write_request = R::WriteRequest::new(self.schema());
+ let mut write_request = self.region.write_request(self.schema());
//FIXME(dennis): we can only insert to demo table right now
- let mut put_op = <<R as Region>::WriteRequest as WriteRequest>::PutOp::new();
+ let mut put_op = write_request.put_op();
let mut columns_values = request.columns_values;
let key_columns = vec!["ts", "host"];
let value_columns = vec!["cpu", "memory"];
|
refactor
|
remove constructors from trait (#121)
|
11bdb33d37a35489d89bea39b36e08dbfb29dcd5
|
2022-12-23 12:52:12
|
Ning Sun
|
feat: sql query interceptor and plugin refactoring (#773)
| false
|
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index 6bea05ce676d..0563629ab28e 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -12,7 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use anymap::AnyMap;
+use std::sync::Arc;
+
use clap::Parser;
use frontend::frontend::{Frontend, FrontendOptions};
use frontend::grpc::GrpcOptions;
@@ -21,6 +22,7 @@ use frontend::instance::Instance;
use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
+use frontend::Plugins;
use meta_client::MetaClientOpts;
use servers::auth::UserProviderRef;
use servers::http::HttpOptions;
@@ -86,21 +88,21 @@ pub struct StartCommand {
impl StartCommand {
async fn run(self) -> Result<()> {
- let plugins = load_frontend_plugins(&self.user_provider)?;
+ let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
let opts: FrontendOptions = self.try_into()?;
- let mut frontend = Frontend::new(
- opts.clone(),
- Instance::try_new_distributed(&opts)
- .await
- .context(error::StartFrontendSnafu)?,
- plugins,
- );
+
+ let mut instance = Instance::try_new_distributed(&opts)
+ .await
+ .context(error::StartFrontendSnafu)?;
+ instance.set_plugins(plugins.clone());
+
+ let mut frontend = Frontend::new(opts, instance, plugins);
frontend.start().await.context(error::StartFrontendSnafu)
}
}
-pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<AnyMap> {
- let mut plugins = AnyMap::new();
+pub fn load_frontend_plugins(user_provider: &Option<String>) -> Result<Plugins> {
+ let mut plugins = Plugins::new();
if let Some(provider) = user_provider {
let provider = auth::user_provider_from_option(provider).context(IllegalAuthConfigSnafu)?;
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index e14f6f6e0a14..83809f0b4396 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -12,7 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use anymap::AnyMap;
+use std::sync::Arc;
+
use clap::Parser;
use common_telemetry::info;
use datanode::datanode::{Datanode, DatanodeOptions, ObjectStoreConfig};
@@ -25,6 +26,7 @@ use frontend::mysql::MysqlOptions;
use frontend::opentsdb::OpentsdbOptions;
use frontend::postgres::PostgresOptions;
use frontend::prometheus::PrometheusOptions;
+use frontend::Plugins;
use serde::{Deserialize, Serialize};
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -150,7 +152,7 @@ impl StartCommand {
async fn run(self) -> Result<()> {
let enable_memory_catalog = self.enable_memory_catalog;
let config_file = self.config_file.clone();
- let plugins = load_frontend_plugins(&self.user_provider)?;
+ let plugins = Arc::new(load_frontend_plugins(&self.user_provider)?);
let fe_opts = FrontendOptions::try_from(self)?;
let dn_opts: DatanodeOptions = {
let mut opts: StandaloneOptions = if let Some(path) = config_file {
@@ -187,11 +189,12 @@ impl StartCommand {
/// Build frontend instance in standalone mode
async fn build_frontend(
fe_opts: FrontendOptions,
- plugins: AnyMap,
+ plugins: Arc<Plugins>,
datanode_instance: InstanceRef,
) -> Result<Frontend<FeInstance>> {
let mut frontend_instance = FeInstance::new_standalone(datanode_instance.clone());
frontend_instance.set_script_handler(datanode_instance);
+ frontend_instance.set_plugins(plugins.clone());
Ok(Frontend::new(fe_opts, frontend_instance, plugins))
}
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index c73d229e1b11..2c943de82db4 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -14,7 +14,6 @@
use std::sync::Arc;
-use anymap::AnyMap;
use meta_client::MetaClientOpts;
use serde::{Deserialize, Serialize};
use servers::auth::UserProviderRef;
@@ -31,6 +30,7 @@ use crate::opentsdb::OpentsdbOptions;
use crate::postgres::PostgresOptions;
use crate::prometheus::PrometheusOptions;
use crate::server::Services;
+use crate::Plugins;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct FrontendOptions {
@@ -67,11 +67,11 @@ where
{
opts: FrontendOptions,
instance: Option<T>,
- plugins: AnyMap,
+ plugins: Arc<Plugins>,
}
impl<T: FrontendInstance> Frontend<T> {
- pub fn new(opts: FrontendOptions, instance: T, plugins: AnyMap) -> Self {
+ pub fn new(opts: FrontendOptions, instance: T, plugins: Arc<Plugins>) -> Self {
Self {
opts,
instance: Some(instance),
@@ -90,6 +90,7 @@ impl<T: FrontendInstance> Frontend<T> {
let instance = Arc::new(instance);
+ // TODO(sunng87): merge this into instance
let provider = self.plugins.get::<UserProviderRef>().cloned();
Services::start(&self.opts, instance, provider).await
diff --git a/src/frontend/src/instance.rs b/src/frontend/src/instance.rs
index b14bb136eb48..8550ce456fb2 100644
--- a/src/frontend/src/instance.rs
+++ b/src/frontend/src/instance.rs
@@ -43,6 +43,7 @@ use datanode::instance::InstanceRef as DnInstanceRef;
use distributed::DistInstance;
use meta_client::client::{MetaClient, MetaClientBuilder};
use meta_client::MetaClientOpts;
+use servers::interceptor::{SqlQueryInterceptor, SqlQueryInterceptorRef};
use servers::query_handler::{
GrpcAdminHandler, GrpcAdminHandlerRef, GrpcQueryHandler, GrpcQueryHandlerRef,
InfluxdbLineProtocolHandler, OpentsdbProtocolHandler, PrometheusProtocolHandler, ScriptHandler,
@@ -69,6 +70,7 @@ use crate::frontend::FrontendOptions;
use crate::sql::insert_to_request;
use crate::table::insert::insert_request_to_insert_batch;
use crate::table::route::TableRoutes;
+use crate::Plugins;
#[async_trait]
pub trait FrontendInstance:
@@ -105,6 +107,10 @@ pub struct Instance {
sql_handler: SqlQueryHandlerRef,
grpc_query_handler: GrpcQueryHandlerRef,
grpc_admin_handler: GrpcAdminHandlerRef,
+
+ /// plugins: this map holds extensions to customize query or auth
+ /// behaviours.
+ plugins: Arc<Plugins>,
}
impl Instance {
@@ -135,6 +141,7 @@ impl Instance {
sql_handler: dist_instance_ref.clone(),
grpc_query_handler: dist_instance_ref.clone(),
grpc_admin_handler: dist_instance_ref,
+ plugins: Default::default(),
})
}
@@ -178,6 +185,7 @@ impl Instance {
sql_handler: dn_instance.clone(),
grpc_query_handler: dn_instance.clone(),
grpc_admin_handler: dn_instance,
+ plugins: Default::default(),
}
}
@@ -451,6 +459,14 @@ impl Instance {
Ok(Output::RecordBatches(RecordBatches::empty()))
}
+
+ pub fn set_plugins(&mut self, map: Arc<Plugins>) {
+ self.plugins = map;
+ }
+
+ pub fn plugins(&self) -> Arc<Plugins> {
+ self.plugins.clone()
+ }
}
#[async_trait]
@@ -563,15 +579,33 @@ impl SqlQueryHandler for Instance {
query: &str,
query_ctx: QueryContextRef,
) -> Vec<server_error::Result<Output>> {
- match parse_stmt(query)
+ let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef>();
+ let query = match query_interceptor.pre_parsing(query, query_ctx.clone()) {
+ Ok(q) => q,
+ Err(e) => return vec![Err(e)],
+ };
+
+ match parse_stmt(query.as_ref())
.map_err(BoxedError::new)
.context(server_error::ExecuteQuerySnafu { query })
+ .and_then(|stmts| query_interceptor.post_parsing(stmts, query_ctx.clone()))
{
Ok(stmts) => {
let mut results = Vec::with_capacity(stmts.len());
for stmt in stmts {
+ // TODO(sunng87): figure out at which stage we can call
+ // this hook after ArrowFlight adoption. We need to provide
+ // LogicalPlan as to this hook.
+ if let Err(e) = query_interceptor.pre_execute(&stmt, None, query_ctx.clone()) {
+ results.push(Err(e));
+ break;
+ }
match self.query_statement(stmt, query_ctx.clone()).await {
- Ok(output) => results.push(Ok(output)),
+ Ok(output) => {
+ let output_result =
+ query_interceptor.post_execute(output, query_ctx.clone());
+ results.push(output_result);
+ }
Err(e) => {
results.push(Err(e));
break;
@@ -591,7 +625,15 @@ impl SqlQueryHandler for Instance {
stmt: Statement,
query_ctx: QueryContextRef,
) -> server_error::Result<Output> {
- self.query_statement(stmt, query_ctx).await
+ let query_interceptor = self.plugins.get::<SqlQueryInterceptorRef>();
+
+ // TODO(sunng87): figure out at which stage we can call
+ // this hook after ArrowFlight adoption. We need to provide
+ // LogicalPlan as to this hook.
+ query_interceptor.pre_execute(&stmt, None, query_ctx.clone())?;
+ self.query_statement(stmt, query_ctx.clone())
+ .await
+ .and_then(|output| query_interceptor.post_execute(output, query_ctx.clone()))
}
fn is_valid_schema(&self, catalog: &str, schema: &str) -> server_error::Result<bool> {
@@ -673,6 +715,8 @@ impl GrpcAdminHandler for Instance {
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
+ use std::borrow::Cow;
+ use std::sync::atomic::AtomicU32;
use api::v1::codec::SelectResult;
use api::v1::column::SemanticType;
@@ -972,4 +1016,164 @@ mod tests {
region_ids: vec![0],
}
}
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_sql_interceptor_plugin() {
+ #[derive(Default)]
+ struct AssertionHook {
+ pub(crate) c: AtomicU32,
+ }
+
+ impl SqlQueryInterceptor for AssertionHook {
+ fn pre_parsing<'a>(
+ &self,
+ query: &'a str,
+ _query_ctx: QueryContextRef,
+ ) -> server_error::Result<std::borrow::Cow<'a, str>> {
+ self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ assert!(query.starts_with("CREATE TABLE demo"));
+ Ok(Cow::Borrowed(query))
+ }
+
+ fn post_parsing(
+ &self,
+ statements: Vec<Statement>,
+ _query_ctx: QueryContextRef,
+ ) -> server_error::Result<Vec<Statement>> {
+ self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ assert!(matches!(statements[0], Statement::CreateTable(_)));
+ Ok(statements)
+ }
+
+ fn pre_execute(
+ &self,
+ _statement: &Statement,
+ _plan: Option<&query::plan::LogicalPlan>,
+ _query_ctx: QueryContextRef,
+ ) -> server_error::Result<()> {
+ self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ Ok(())
+ }
+
+ fn post_execute(
+ &self,
+ mut output: Output,
+ _query_ctx: QueryContextRef,
+ ) -> server_error::Result<Output> {
+ self.c.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ match &mut output {
+ Output::AffectedRows(rows) => {
+ assert_eq!(*rows, 1);
+ // update output result
+ *rows = 10;
+ }
+ _ => unreachable!(),
+ }
+ Ok(output)
+ }
+ }
+
+ let query_ctx = Arc::new(QueryContext::new());
+ let (mut instance, _guard) = tests::create_frontend_instance("test_hook").await;
+
+ let mut plugins = Plugins::new();
+ let counter_hook = Arc::new(AssertionHook::default());
+ plugins.insert::<SqlQueryInterceptorRef>(counter_hook.clone());
+ Arc::make_mut(&mut instance).set_plugins(Arc::new(plugins));
+
+ let sql = r#"CREATE TABLE demo(
+ host STRING,
+ ts TIMESTAMP,
+ cpu DOUBLE NULL,
+ memory DOUBLE NULL,
+ disk_util DOUBLE DEFAULT 9.9,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+ ) engine=mito with(regions=1);"#;
+ let output = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
+ .await
+ .remove(0)
+ .unwrap();
+
+ // assert that the hook is called 3 times
+ assert_eq!(4, counter_hook.c.load(std::sync::atomic::Ordering::Relaxed));
+ match output {
+ Output::AffectedRows(rows) => assert_eq!(rows, 10),
+ _ => unreachable!(),
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_disable_db_operation_plugin() {
+ #[derive(Default)]
+ struct DisableDBOpHook;
+
+ impl SqlQueryInterceptor for DisableDBOpHook {
+ fn post_parsing(
+ &self,
+ statements: Vec<Statement>,
+ _query_ctx: QueryContextRef,
+ ) -> server_error::Result<Vec<Statement>> {
+ for s in &statements {
+ match s {
+ Statement::CreateDatabase(_) | Statement::ShowDatabases(_) => {
+ return Err(server_error::Error::NotSupported {
+ feat: "Database operations".to_owned(),
+ })
+ }
+ _ => {}
+ }
+ }
+
+ Ok(statements)
+ }
+ }
+
+ let query_ctx = Arc::new(QueryContext::new());
+ let (mut instance, _guard) = tests::create_frontend_instance("test_db_hook").await;
+
+ let mut plugins = Plugins::new();
+ let hook = Arc::new(DisableDBOpHook::default());
+ plugins.insert::<SqlQueryInterceptorRef>(hook.clone());
+ Arc::make_mut(&mut instance).set_plugins(Arc::new(plugins));
+
+ let sql = r#"CREATE TABLE demo(
+ host STRING,
+ ts TIMESTAMP,
+ cpu DOUBLE NULL,
+ memory DOUBLE NULL,
+ disk_util DOUBLE DEFAULT 9.9,
+ TIME INDEX (ts),
+ PRIMARY KEY(host)
+ ) engine=mito with(regions=1);"#;
+ let output = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
+ .await
+ .remove(0)
+ .unwrap();
+
+ match output {
+ Output::AffectedRows(rows) => assert_eq!(rows, 1),
+ _ => unreachable!(),
+ }
+
+ let sql = r#"CREATE DATABASE tomcat"#;
+ if let Err(e) = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
+ .await
+ .remove(0)
+ {
+ assert!(matches!(e, server_error::Error::NotSupported { .. }));
+ } else {
+ unreachable!();
+ }
+
+ let sql = r#"SELECT 1; SHOW DATABASES"#;
+ if let Err(e) = SqlQueryHandler::do_query(&*instance, sql, query_ctx.clone())
+ .await
+ .remove(0)
+ {
+ assert!(matches!(e, server_error::Error::NotSupported { .. }));
+ } else {
+ unreachable!();
+ }
+ }
}
diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs
index 82807d058221..0c5bf3381692 100644
--- a/src/frontend/src/lib.rs
+++ b/src/frontend/src/lib.rs
@@ -14,6 +14,8 @@
#![feature(assert_matches)]
+pub type Plugins = anymap::Map<dyn core::any::Any + Send + Sync>;
+
mod catalog;
mod datanode;
pub mod error;
diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml
index 6cdd0a83cf04..3d3978cd20ac 100644
--- a/src/servers/Cargo.toml
+++ b/src/servers/Cargo.toml
@@ -37,6 +37,7 @@ openmetrics-parser = "0.4"
opensrv-mysql = "0.3"
pgwire = "0.6.3"
prost = "0.11"
+query = { path = "../query" }
rand = "0.8"
regex = "1.6"
rustls = "0.20"
@@ -65,7 +66,6 @@ common-base = { path = "../common/base" }
mysql_async = { version = "0.31", default-features = false, features = [
"default-rustls",
] }
-query = { path = "../query" }
rand = "0.8"
script = { path = "../script", features = ["python"] }
serde_json = "1.0"
diff --git a/src/servers/src/interceptor.rs b/src/servers/src/interceptor.rs
new file mode 100644
index 000000000000..3f105e7dd833
--- /dev/null
+++ b/src/servers/src/interceptor.rs
@@ -0,0 +1,105 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::borrow::Cow;
+use std::sync::Arc;
+
+use common_query::Output;
+use query::plan::LogicalPlan;
+use session::context::QueryContextRef;
+use sql::statements::statement::Statement;
+
+use crate::error::Result;
+
+/// SqlQueryInterceptor can track life cycle of a sql query and customize or
+/// abort its execution at given point.
+pub trait SqlQueryInterceptor {
+ /// Called before a query string is parsed into sql statements.
+ /// The implementation is allowed to change the sql string if needed.
+ fn pre_parsing<'a>(&self, query: &'a str, _query_ctx: QueryContextRef) -> Result<Cow<'a, str>> {
+ Ok(Cow::Borrowed(query))
+ }
+
+ /// Called after sql is parsed into statements. This interceptor is called
+ /// on each statement and the implementation can alter the statement or
+ /// abort execution by raising an error.
+ fn post_parsing(
+ &self,
+ statements: Vec<Statement>,
+ _query_ctx: QueryContextRef,
+ ) -> Result<Vec<Statement>> {
+ Ok(statements)
+ }
+
+ /// Called before sql is actually executed. This hook is not called at the moment.
+ fn pre_execute(
+ &self,
+ _statement: &Statement,
+ _plan: Option<&LogicalPlan>,
+ _query_ctx: QueryContextRef,
+ ) -> Result<()> {
+ Ok(())
+ }
+
+ /// Called after execution finished. The implementation can modify the
+ /// output if needed.
+ fn post_execute(&self, output: Output, _query_ctx: QueryContextRef) -> Result<Output> {
+ Ok(output)
+ }
+}
+
+pub type SqlQueryInterceptorRef = Arc<dyn SqlQueryInterceptor + Send + Sync + 'static>;
+
+impl SqlQueryInterceptor for Option<&SqlQueryInterceptorRef> {
+ fn pre_parsing<'a>(&self, query: &'a str, query_ctx: QueryContextRef) -> Result<Cow<'a, str>> {
+ if let Some(this) = self {
+ this.pre_parsing(query, query_ctx)
+ } else {
+ Ok(Cow::Borrowed(query))
+ }
+ }
+
+ fn post_parsing(
+ &self,
+ statements: Vec<Statement>,
+ query_ctx: QueryContextRef,
+ ) -> Result<Vec<Statement>> {
+ if let Some(this) = self {
+ this.post_parsing(statements, query_ctx)
+ } else {
+ Ok(statements)
+ }
+ }
+
+ fn pre_execute(
+ &self,
+ statement: &Statement,
+ plan: Option<&LogicalPlan>,
+ query_ctx: QueryContextRef,
+ ) -> Result<()> {
+ if let Some(this) = self {
+ this.pre_execute(statement, plan, query_ctx)
+ } else {
+ Ok(())
+ }
+ }
+
+ fn post_execute(&self, output: Output, query_ctx: QueryContextRef) -> Result<Output> {
+ if let Some(this) = self {
+ this.post_execute(output, query_ctx)
+ } else {
+ Ok(output)
+ }
+ }
+}
diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs
index e58e4363a7c1..e18caf7fa3dc 100644
--- a/src/servers/src/lib.rs
+++ b/src/servers/src/lib.rs
@@ -22,6 +22,7 @@ pub mod error;
pub mod grpc;
pub mod http;
pub mod influxdb;
+pub mod interceptor;
pub mod line_writer;
pub mod mysql;
pub mod opentsdb;
diff --git a/src/servers/tests/interceptor.rs b/src/servers/tests/interceptor.rs
new file mode 100644
index 000000000000..c1acd7c80854
--- /dev/null
+++ b/src/servers/tests/interceptor.rs
@@ -0,0 +1,38 @@
+// Copyright 2022 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::borrow::Cow;
+use std::sync::Arc;
+
+use servers::error::Result;
+use servers::interceptor::SqlQueryInterceptor;
+use session::context::{QueryContext, QueryContextRef};
+
+pub struct NoopInterceptor;
+
+impl SqlQueryInterceptor for NoopInterceptor {
+ fn pre_parsing<'a>(&self, query: &'a str, _query_ctx: QueryContextRef) -> Result<Cow<'a, str>> {
+ let modified_query = format!("{query};");
+ Ok(Cow::Owned(modified_query))
+ }
+}
+
+#[test]
+fn test_default_interceptor_behaviour() {
+ let di = NoopInterceptor;
+ let ctx = Arc::new(QueryContext::new());
+
+ let query = "SELECT 1";
+ assert_eq!("SELECT 1;", di.pre_parsing(query, ctx).unwrap());
+}
diff --git a/src/servers/tests/mod.rs b/src/servers/tests/mod.rs
index 0105f7d3f1b3..32a76385783f 100644
--- a/src/servers/tests/mod.rs
+++ b/src/servers/tests/mod.rs
@@ -33,6 +33,7 @@ use script::engine::{CompileContext, EvalContext, Script, ScriptEngine};
use script::python::{PyEngine, PyScript};
use session::context::QueryContextRef;
+mod interceptor;
mod opentsdb;
mod postgres;
|
feat
|
sql query interceptor and plugin refactoring (#773)
|
8a5ef826b9793407c605b2de1245554a74b8323e
|
2023-10-10 12:25:57
|
Yingwen
|
fix(mito): Do not write to memtables if writing wal is failed (#2561)
| false
|
diff --git a/src/mito2/src/metrics.rs b/src/mito2/src/metrics.rs
index bd5920b3cbf5..855ba9ddf642 100644
--- a/src/mito2/src/metrics.rs
+++ b/src/mito2/src/metrics.rs
@@ -44,4 +44,6 @@ pub const WRITE_REJECT_TOTAL: &str = "mito.write.reject_total";
pub const WRITE_STAGE_ELAPSED: &str = "mito.write.stage_elapsed";
/// Stage label.
pub const STAGE_LABEL: &str = "stage";
+/// Counter of rows to write.
+pub const WRITE_ROWS_TOTAL: &str = "mito.write.rows_total";
// ------ End of write related metrics
diff --git a/src/mito2/src/region_write_ctx.rs b/src/mito2/src/region_write_ctx.rs
index 8e83a6442a1f..5270ceec326d 100644
--- a/src/mito2/src/region_write_ctx.rs
+++ b/src/mito2/src/region_write_ctx.rs
@@ -15,7 +15,7 @@
use std::mem;
use std::sync::Arc;
-use api::v1::{Mutation, Rows, WalEntry};
+use api::v1::{Mutation, OpType, Rows, WalEntry};
use common_query::Output;
use snafu::ResultExt;
use store_api::logstore::LogStore;
@@ -92,6 +92,14 @@ pub(crate) struct RegionWriteCtx {
///
/// The i-th notify is for i-th mutation.
notifiers: Vec<WriteNotify>,
+ /// The write operation is failed and we should not write to the mutable memtable.
+ failed: bool,
+
+ // Metrics:
+ /// Rows to put.
+ pub(crate) put_num: usize,
+ /// Rows to delete.
+ pub(crate) delete_num: usize,
}
impl RegionWriteCtx {
@@ -112,6 +120,9 @@ impl RegionWriteCtx {
next_entry_id: last_entry_id + 1,
wal_entry: WalEntry::default(),
notifiers: Vec::new(),
+ failed: false,
+ put_num: 0,
+ delete_num: 0,
}
}
@@ -130,6 +141,13 @@ impl RegionWriteCtx {
// Increase sequence number.
self.next_sequence += num_rows as u64;
+
+ // Update metrics.
+ match OpType::from_i32(op_type) {
+ Some(OpType::Delete) => self.delete_num += num_rows,
+ Some(OpType::Put) => self.put_num += num_rows,
+ None => (),
+ }
}
/// Encode and add WAL entry to the writer.
@@ -153,6 +171,9 @@ impl RegionWriteCtx {
for notify in &mut self.notifiers {
notify.err = Some(err.clone());
}
+
+ // Fail the whole write operation.
+ self.failed = true;
}
/// Updates next entry id.
@@ -164,6 +185,10 @@ impl RegionWriteCtx {
pub(crate) fn write_memtable(&mut self) {
debug_assert_eq!(self.notifiers.len(), self.wal_entry.mutations.len());
+ if self.failed {
+ return;
+ }
+
let mutable = &self.version.memtables.mutable;
// Takes mutations from the wal entry.
let mutations = mem::take(&mut self.wal_entry.mutations);
diff --git a/src/mito2/src/worker/handle_write.rs b/src/mito2/src/worker/handle_write.rs
index de6b6a5c5ea0..873ef726fb99 100644
--- a/src/mito2/src/worker/handle_write.rs
+++ b/src/mito2/src/worker/handle_write.rs
@@ -24,7 +24,10 @@ use store_api::metadata::RegionMetadata;
use store_api::storage::RegionId;
use crate::error::{RejectWriteSnafu, Result};
-use crate::metrics::{STAGE_LABEL, WRITE_REJECT_TOTAL, WRITE_STAGE_ELAPSED, WRITE_STALL_TOTAL};
+use crate::metrics::{
+ STAGE_LABEL, TYPE_LABEL, WRITE_REJECT_TOTAL, WRITE_ROWS_TOTAL, WRITE_STAGE_ELAPSED,
+ WRITE_STALL_TOTAL,
+};
use crate::region_write_ctx::RegionWriteCtx;
use crate::request::{SenderWriteRequest, WriteRequest};
use crate::worker::RegionWorkerLoop;
@@ -80,13 +83,19 @@ impl<S: LogStore> RegionWorkerLoop<S> {
}
}
+ let (mut put_rows, mut delete_rows) = (0, 0);
// Write to memtables.
{
let _timer = timer!(WRITE_STAGE_ELAPSED, &[(STAGE_LABEL, "write_memtable")]);
for mut region_ctx in region_ctxs.into_values() {
region_ctx.write_memtable();
+ put_rows += region_ctx.put_num;
+ delete_rows += region_ctx.delete_num;
}
}
+
+ counter!(WRITE_ROWS_TOTAL, put_rows as u64, TYPE_LABEL => "put");
+ counter!(WRITE_ROWS_TOTAL, delete_rows as u64, TYPE_LABEL => "delete");
}
}
diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs
index 9d608f2077da..04251b81aeed 100644
--- a/src/store-api/src/region_request.rs
+++ b/src/store-api/src/region_request.rs
@@ -30,7 +30,6 @@ use crate::storage::{ColumnId, RegionId, ScanRequest};
#[derive(Debug, IntoStaticStr)]
pub enum RegionRequest {
- // TODO: rename to InsertRequest
Put(RegionPutRequest),
Delete(RegionDeleteRequest),
Create(RegionCreateRequest),
|
fix
|
Do not write to memtables if writing wal is failed (#2561)
|
ca7ed67dc59d89c3477bcf36569eeb6b5b0333e3
|
2023-01-06 09:00:23
|
Jiachun Feng
|
feat: collect stats from heartbeats (#833)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 591f6b846fea..191ae2dd0fea 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3823,6 +3823,7 @@ dependencies = [
"common-runtime",
"common-telemetry",
"common-time",
+ "dashmap",
"etcd-client",
"futures",
"h2",
diff --git a/src/api/greptime/v1/meta/heartbeat.proto b/src/api/greptime/v1/meta/heartbeat.proto
index b4654efc6d2b..6d37806a7a0d 100644
--- a/src/api/greptime/v1/meta/heartbeat.proto
+++ b/src/api/greptime/v1/meta/heartbeat.proto
@@ -26,7 +26,7 @@ message HeartbeatRequest {
TimeInterval report_interval = 4;
// Node stat
NodeStat node_stat = 5;
- // Region stats in this node
+ // Region stats on this node
repeated RegionStat region_stats = 6;
// Follower nodes and stats, empty on follower nodes
repeated ReplicaStat replica_stats = 7;
@@ -37,16 +37,16 @@ message NodeStat {
uint64 rcus = 1;
// The write capacity units during this period
uint64 wcus = 2;
- // Table number in this node
+ // How many tables on this node
uint64 table_num = 3;
- // Region number in this node
+ // How many regions on this node
uint64 region_num = 4;
double cpu_usage = 5;
double load = 6;
- // Read disk I/O in the node
+ // Read disk IO on this node
double read_io_rate = 7;
- // Write disk I/O in the node
+ // Write disk IO on this node
double write_io_rate = 8;
// Others
@@ -60,9 +60,9 @@ message RegionStat {
uint64 rcus = 3;
// The write capacity units during this period
uint64 wcus = 4;
- // Approximate region size
- uint64 approximate_size = 5;
- // Approximate number of rows
+ // Approximate bytes of this region
+ uint64 approximate_bytes = 5;
+ // Approximate number of rows in this region
uint64 approximate_rows = 6;
// Others
diff --git a/src/meta-srv/Cargo.toml b/src/meta-srv/Cargo.toml
index 2abf690816dc..43b26ffbcb88 100644
--- a/src/meta-srv/Cargo.toml
+++ b/src/meta-srv/Cargo.toml
@@ -18,6 +18,7 @@ common-grpc = { path = "../common/grpc" }
common-runtime = { path = "../common/runtime" }
common-telemetry = { path = "../common/telemetry" }
common-time = { path = "../common/time" }
+dashmap = "5.4"
etcd-client = "0.10"
futures.workspace = true
h2 = "0.3"
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index 9d5575c9f729..c03582d693ef 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -60,7 +60,7 @@ pub fn router(meta_srv: MetaSrv) -> Router {
pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
let kv_store = EtcdStore::with_endpoints([&opts.store_addr]).await?;
let election = EtcdElection::with_endpoints(&opts.server_addr, [&opts.store_addr]).await?;
- let meta_srv = MetaSrv::new(opts, kv_store, None, Some(election)).await;
+ let meta_srv = MetaSrv::new(opts, kv_store, None, Some(election), None).await;
meta_srv.start().await;
Ok(meta_srv)
}
diff --git a/src/meta-srv/src/handler.rs b/src/meta-srv/src/handler.rs
index 7fe0892d29c1..8f98913ee09d 100644
--- a/src/meta-srv/src/handler.rs
+++ b/src/meta-srv/src/handler.rs
@@ -12,9 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-pub(crate) mod check_leader;
-pub(crate) mod datanode_lease;
-pub(crate) mod response_header;
+pub use check_leader_handler::CheckLeaderHandler;
+pub use collect_stats_handler::CollectStatsHandler;
+pub use keep_lease_handler::KeepLeaseHandler;
+pub use persist_stats_handler::PersistStatsHandler;
+pub use response_header_handler::ResponseHeaderHandler;
+
+mod check_leader_handler;
+mod collect_stats_handler;
+mod instruction;
+mod keep_lease_handler;
+mod node_stat;
+mod persist_stats_handler;
+mod response_header_handler;
use std::collections::BTreeMap;
use std::sync::Arc;
@@ -24,6 +34,8 @@ use common_telemetry::info;
use tokio::sync::mpsc::Sender;
use tokio::sync::RwLock;
+use self::instruction::Instruction;
+use self::node_stat::Stat;
use crate::error::Result;
use crate::metasrv::Context;
@@ -40,7 +52,7 @@ pub trait HeartbeatHandler: Send + Sync {
#[derive(Debug, Default)]
pub struct HeartbeatAccumulator {
pub header: Option<ResponseHeader>,
- pub states: Vec<State>,
+ pub stats: Vec<Stat>,
pub instructions: Vec<Instruction>,
}
@@ -51,12 +63,6 @@ impl HeartbeatAccumulator {
}
}
-#[derive(Debug)]
-pub enum State {}
-
-#[derive(Debug)]
-pub enum Instruction {}
-
pub type Pusher = Sender<std::result::Result<HeartbeatResponse, tonic::Status>>;
#[derive(Clone, Default)]
diff --git a/src/meta-srv/src/handler/check_leader.rs b/src/meta-srv/src/handler/check_leader_handler.rs
similarity index 98%
rename from src/meta-srv/src/handler/check_leader.rs
rename to src/meta-srv/src/handler/check_leader_handler.rs
index 5efca6208f91..c35c7f822c45 100644
--- a/src/meta-srv/src/handler/check_leader.rs
+++ b/src/meta-srv/src/handler/check_leader_handler.rs
@@ -18,6 +18,7 @@ use crate::error::Result;
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
use crate::metasrv::Context;
+#[derive(Default)]
pub struct CheckLeaderHandler;
#[async_trait::async_trait]
diff --git a/src/meta-srv/src/handler/collect_stats_handler.rs b/src/meta-srv/src/handler/collect_stats_handler.rs
new file mode 100644
index 000000000000..73744158462c
--- /dev/null
+++ b/src/meta-srv/src/handler/collect_stats_handler.rs
@@ -0,0 +1,86 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::VecDeque;
+
+use api::v1::meta::HeartbeatRequest;
+use common_telemetry::debug;
+use dashmap::mapref::entry::Entry;
+use dashmap::DashMap;
+
+use super::node_stat::Stat;
+use crate::error::Result;
+use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
+use crate::metasrv::Context;
+
+type StatKey = (u64, u64);
+
+pub struct CollectStatsHandler {
+ max_cached_stats_per_key: usize,
+ cache: DashMap<StatKey, VecDeque<Stat>>,
+}
+
+impl Default for CollectStatsHandler {
+ fn default() -> Self {
+ Self::new(10)
+ }
+}
+
+impl CollectStatsHandler {
+ pub fn new(max_cached_stats_per_key: usize) -> Self {
+ Self {
+ max_cached_stats_per_key,
+ cache: DashMap::new(),
+ }
+ }
+}
+
+#[async_trait::async_trait]
+impl HeartbeatHandler for CollectStatsHandler {
+ async fn handle(
+ &self,
+ req: &HeartbeatRequest,
+ ctx: &Context,
+ acc: &mut HeartbeatAccumulator,
+ ) -> Result<()> {
+ if ctx.is_skip_all() {
+ return Ok(());
+ }
+
+ match Stat::try_from(req) {
+ Ok(stat) => {
+ let key = (stat.cluster_id, stat.id);
+ match self.cache.entry(key) {
+ Entry::Occupied(mut e) => {
+ let deque = e.get_mut();
+ deque.push_front(stat);
+ if deque.len() >= self.max_cached_stats_per_key {
+ acc.stats = deque.drain(..).collect();
+ }
+ }
+ Entry::Vacant(e) => {
+ let mut stat_vec = VecDeque::with_capacity(self.max_cached_stats_per_key);
+ stat_vec.push_front(stat);
+ e.insert(stat_vec);
+ }
+ }
+ }
+ Err(_) => {
+ debug!("Incomplete heartbeat data: {:?}", req);
+ }
+ };
+
+ Ok(())
+ }
+}
diff --git a/src/meta-srv/src/handler/datanode_lease.rs b/src/meta-srv/src/handler/datanode_lease.rs
deleted file mode 100644
index 65089bba5c9a..000000000000
--- a/src/meta-srv/src/handler/datanode_lease.rs
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use api::v1::meta::{HeartbeatRequest, PutRequest};
-use common_telemetry::info;
-use common_time::util as time_util;
-
-use crate::error::Result;
-use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
-use crate::keys::{LeaseKey, LeaseValue};
-use crate::metasrv::Context;
-
-pub struct DatanodeLeaseHandler;
-
-#[async_trait::async_trait]
-impl HeartbeatHandler for DatanodeLeaseHandler {
- async fn handle(
- &self,
- req: &HeartbeatRequest,
- ctx: &Context,
- _acc: &mut HeartbeatAccumulator,
- ) -> Result<()> {
- if ctx.is_skip_all() {
- return Ok(());
- }
-
- let HeartbeatRequest { header, peer, .. } = req;
- if let Some(peer) = &peer {
- let key = LeaseKey {
- cluster_id: header.as_ref().map_or(0, |h| h.cluster_id),
- node_id: peer.id,
- };
- let value = LeaseValue {
- timestamp_millis: time_util::current_time_millis(),
- node_addr: peer.addr.clone(),
- };
-
- info!("Receive a heartbeat: {:?}, {:?}", key, value);
-
- let key = key.try_into()?;
- let value = value.try_into()?;
- let put = PutRequest {
- key,
- value,
- ..Default::default()
- };
-
- ctx.kv_store.put(put).await?;
- }
-
- Ok(())
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::sync::atomic::AtomicBool;
- use std::sync::Arc;
-
- use api::v1::meta::{Peer, RangeRequest, RequestHeader};
-
- use super::*;
- use crate::service::store::memory::MemStore;
-
- #[tokio::test]
- async fn test_handle_datanode_lease() {
- let kv_store = Arc::new(MemStore::new());
- let ctx = Context {
- datanode_lease_secs: 30,
- server_addr: "127.0.0.1:0000".to_string(),
- kv_store,
- election: None,
- skip_all: Arc::new(AtomicBool::new(false)),
- };
-
- let req = HeartbeatRequest {
- header: Some(RequestHeader::new((1, 2))),
- peer: Some(Peer {
- id: 3,
- addr: "127.0.0.1:1111".to_string(),
- }),
- ..Default::default()
- };
- let mut acc = HeartbeatAccumulator::default();
-
- let lease_handler = DatanodeLeaseHandler {};
- lease_handler.handle(&req, &ctx, &mut acc).await.unwrap();
-
- let key = LeaseKey {
- cluster_id: 1,
- node_id: 3,
- };
-
- let req = RangeRequest {
- key: key.try_into().unwrap(),
- ..Default::default()
- };
-
- let res = ctx.kv_store.range(req).await.unwrap();
-
- assert_eq!(1, res.kvs.len());
- }
-}
diff --git a/src/meta-srv/src/handler/instruction.rs b/src/meta-srv/src/handler/instruction.rs
new file mode 100644
index 000000000000..93620708bda6
--- /dev/null
+++ b/src/meta-srv/src/handler/instruction.rs
@@ -0,0 +1,16 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#[derive(Debug)]
+pub enum Instruction {}
diff --git a/src/meta-srv/src/handler/keep_lease_handler.rs b/src/meta-srv/src/handler/keep_lease_handler.rs
new file mode 100644
index 000000000000..e3bfcd056ab3
--- /dev/null
+++ b/src/meta-srv/src/handler/keep_lease_handler.rs
@@ -0,0 +1,91 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::{BatchPutRequest, HeartbeatRequest, KeyValue};
+use common_telemetry::{info, warn};
+use common_time::util as time_util;
+use tokio::sync::mpsc::{self, Sender};
+
+use crate::error::Result;
+use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
+use crate::keys::{LeaseKey, LeaseValue};
+use crate::metasrv::Context;
+use crate::service::store::kv::KvStoreRef;
+
+pub struct KeepLeaseHandler {
+ tx: Sender<KeyValue>,
+}
+
+impl KeepLeaseHandler {
+ pub fn new(kv_store: KvStoreRef) -> Self {
+ let (tx, mut rx) = mpsc::channel(1024);
+ common_runtime::spawn_bg(async move {
+ while let Some(kv) = rx.recv().await {
+ let mut kvs = vec![kv];
+
+ while let Ok(kv) = rx.try_recv() {
+ kvs.push(kv);
+ }
+
+ let batch_put = BatchPutRequest {
+ kvs,
+ ..Default::default()
+ };
+
+ if let Err(err) = kv_store.batch_put(batch_put).await {
+ warn!("Failed to write lease KVs, {err}");
+ }
+ }
+ });
+
+ Self { tx }
+ }
+}
+
+#[async_trait::async_trait]
+impl HeartbeatHandler for KeepLeaseHandler {
+ async fn handle(
+ &self,
+ req: &HeartbeatRequest,
+ ctx: &Context,
+ _acc: &mut HeartbeatAccumulator,
+ ) -> Result<()> {
+ if ctx.is_skip_all() {
+ return Ok(());
+ }
+
+ let HeartbeatRequest { header, peer, .. } = req;
+ if let Some(peer) = &peer {
+ let key = LeaseKey {
+ cluster_id: header.as_ref().map_or(0, |h| h.cluster_id),
+ node_id: peer.id,
+ };
+ let value = LeaseValue {
+ timestamp_millis: time_util::current_time_millis(),
+ node_addr: peer.addr.clone(),
+ };
+
+ info!("Receive a heartbeat: {key:?}, {value:?}");
+
+ let key = key.try_into()?;
+ let value = value.try_into()?;
+
+ if let Err(err) = self.tx.send(KeyValue { key, value }).await {
+ warn!("Failed to send lease KV to writer, peer: {peer:?}, {err}");
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/src/meta-srv/src/handler/node_stat.rs b/src/meta-srv/src/handler/node_stat.rs
new file mode 100644
index 000000000000..e530469ace25
--- /dev/null
+++ b/src/meta-srv/src/handler/node_stat.rs
@@ -0,0 +1,109 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::HeartbeatRequest;
+use common_time::util as time_util;
+
+#[derive(Debug)]
+pub struct Stat {
+ pub timestamp_millis: i64,
+ pub cluster_id: u64,
+ pub id: u64,
+ pub addr: String,
+ /// Leader node
+ pub is_leader: bool,
+ /// The read capacity units during this period
+ pub rcus: u64,
+ /// The write capacity units during this period
+ pub wcus: u64,
+ /// How many tables on this node
+ pub table_num: u64,
+ /// How many regions on this node
+ pub region_num: u64,
+ pub cpu_usage: f64,
+ pub load: f64,
+ /// Read disk IO on this node
+ pub read_io_rate: f64,
+ /// Write disk IO on this node
+ pub write_io_rate: f64,
+ /// Region stats on this node
+ pub region_stats: Vec<RegionStat>,
+}
+
+#[derive(Debug)]
+pub struct RegionStat {
+ pub id: u64,
+ pub catalog: String,
+ pub schema: String,
+ pub table: String,
+ /// The read capacity units during this period
+ pub rcus: u64,
+ /// The write capacity units during this period
+ pub wcus: u64,
+ /// Approximate bytes of this region
+ pub approximate_bytes: u64,
+ /// Approximate number of rows in this region
+ pub approximate_rows: u64,
+}
+
+impl TryFrom<&HeartbeatRequest> for Stat {
+ type Error = ();
+
+ fn try_from(value: &HeartbeatRequest) -> Result<Self, Self::Error> {
+ let HeartbeatRequest {
+ header,
+ peer,
+ is_leader,
+ node_stat,
+ region_stats,
+ ..
+ } = value;
+
+ match (header, peer, node_stat) {
+ (Some(header), Some(peer), Some(node_stat)) => Ok(Self {
+ timestamp_millis: time_util::current_time_millis(),
+ cluster_id: header.cluster_id,
+ id: peer.id,
+ addr: peer.addr.clone(),
+ is_leader: *is_leader,
+ rcus: node_stat.rcus,
+ wcus: node_stat.wcus,
+ table_num: node_stat.table_num,
+ region_num: node_stat.region_num,
+ cpu_usage: node_stat.cpu_usage,
+ load: node_stat.load,
+ read_io_rate: node_stat.read_io_rate,
+ write_io_rate: node_stat.write_io_rate,
+ region_stats: region_stats.iter().map(RegionStat::from).collect(),
+ }),
+ _ => Err(()),
+ }
+ }
+}
+
+impl From<&api::v1::meta::RegionStat> for RegionStat {
+ fn from(value: &api::v1::meta::RegionStat) -> Self {
+ let table = value.table_name.as_ref();
+ Self {
+ id: value.region_id,
+ catalog: table.map_or("", |t| &t.catalog_name).to_string(),
+ schema: table.map_or("", |t| &t.schema_name).to_string(),
+ table: table.map_or("", |t| &t.table_name).to_string(),
+ rcus: value.rcus,
+ wcus: value.wcus,
+ approximate_bytes: value.approximate_bytes,
+ approximate_rows: value.approximate_rows,
+ }
+ }
+}
diff --git a/src/meta-srv/src/handler/persist_stats_handler.rs b/src/meta-srv/src/handler/persist_stats_handler.rs
new file mode 100644
index 000000000000..df3010a80f2e
--- /dev/null
+++ b/src/meta-srv/src/handler/persist_stats_handler.rs
@@ -0,0 +1,40 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use api::v1::meta::HeartbeatRequest;
+
+use crate::error::Result;
+use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
+use crate::metasrv::Context;
+
+#[derive(Default)]
+pub struct PersistStatsHandler;
+
+#[async_trait::async_trait]
+impl HeartbeatHandler for PersistStatsHandler {
+ async fn handle(
+ &self,
+ _req: &HeartbeatRequest,
+ ctx: &Context,
+ acc: &mut HeartbeatAccumulator,
+ ) -> Result<()> {
+ if ctx.is_skip_all() || acc.stats.is_empty() {
+ return Ok(());
+ }
+
+ // TODO(jiachun): remove stats from `acc` and persist to store
+
+ Ok(())
+ }
+}
diff --git a/src/meta-srv/src/handler/response_header.rs b/src/meta-srv/src/handler/response_header_handler.rs
similarity index 99%
rename from src/meta-srv/src/handler/response_header.rs
rename to src/meta-srv/src/handler/response_header_handler.rs
index 598e10305896..c81f3e013c65 100644
--- a/src/meta-srv/src/handler/response_header.rs
+++ b/src/meta-srv/src/handler/response_header_handler.rs
@@ -18,6 +18,7 @@ use crate::error::Result;
use crate::handler::{HeartbeatAccumulator, HeartbeatHandler};
use crate::metasrv::Context;
+#[derive(Default)]
pub struct ResponseHeaderHandler;
#[async_trait::async_trait]
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 4359b04d1d09..0a042ed58207 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -20,10 +20,10 @@ use common_telemetry::{info, warn};
use serde::{Deserialize, Serialize};
use crate::election::Election;
-use crate::handler::check_leader::CheckLeaderHandler;
-use crate::handler::datanode_lease::DatanodeLeaseHandler;
-use crate::handler::response_header::ResponseHeaderHandler;
-use crate::handler::HeartbeatHandlerGroup;
+use crate::handler::{
+ CheckLeaderHandler, CollectStatsHandler, HeartbeatHandlerGroup, KeepLeaseHandler,
+ PersistStatsHandler, ResponseHeaderHandler,
+};
use crate::selector::lease_based::LeaseBasedSelector;
use crate::selector::Selector;
use crate::sequence::{Sequence, SequenceRef};
@@ -92,14 +92,27 @@ impl MetaSrv {
kv_store: KvStoreRef,
selector: Option<SelectorRef>,
election: Option<ElectionRef>,
+ handler_group: Option<HeartbeatHandlerGroup>,
) -> Self {
let started = Arc::new(AtomicBool::new(false));
let table_id_sequence = Arc::new(Sequence::new(TABLE_ID_SEQ, 1024, 10, kv_store.clone()));
let selector = selector.unwrap_or_else(|| Arc::new(LeaseBasedSelector {}));
- let handler_group = HeartbeatHandlerGroup::default();
- handler_group.add_handler(ResponseHeaderHandler).await;
- handler_group.add_handler(CheckLeaderHandler).await;
- handler_group.add_handler(DatanodeLeaseHandler).await;
+ let handler_group = match handler_group {
+ Some(hg) => hg,
+ None => {
+ let hg = HeartbeatHandlerGroup::default();
+ let kv_store = kv_store.clone();
+ hg.add_handler(ResponseHeaderHandler::default()).await;
+ // `KeepLeaseHandler` should preferably be in front of `CheckLeaderHandler`,
+ // because even if the current meta-server node is no longer the leader it can
+ // still help the datanode to keep lease.
+ hg.add_handler(KeepLeaseHandler::new(kv_store)).await;
+ hg.add_handler(CheckLeaderHandler::default()).await;
+ hg.add_handler(CollectStatsHandler::default()).await;
+ hg.add_handler(PersistStatsHandler::default()).await;
+ hg
+ }
+ };
Self {
started,
diff --git a/src/meta-srv/src/mocks.rs b/src/meta-srv/src/mocks.rs
index 6d7203c74002..a59c7d608d07 100644
--- a/src/meta-srv/src/mocks.rs
+++ b/src/meta-srv/src/mocks.rs
@@ -52,7 +52,7 @@ pub async fn mock(
selector: Option<SelectorRef>,
) -> MockInfo {
let server_addr = opts.server_addr.clone();
- let meta_srv = MetaSrv::new(opts, kv_store, selector, None).await;
+ let meta_srv = MetaSrv::new(opts, kv_store, selector, None, None).await;
let (client, server) = tokio::io::duplex(1024);
tokio::spawn(async move {
tonic::transport::Server::builder()
diff --git a/src/meta-srv/src/service/heartbeat.rs b/src/meta-srv/src/service/heartbeat.rs
index 81fadbb6a5ce..25216e1546d7 100644
--- a/src/meta-srv/src/service/heartbeat.rs
+++ b/src/meta-srv/src/service/heartbeat.rs
@@ -156,7 +156,7 @@ mod tests {
#[tokio::test]
async fn test_ask_leader() {
let kv_store = Arc::new(MemStore::new());
- let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None).await;
+ let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None, None).await;
let req = AskLeaderRequest {
header: Some(RequestHeader::new((1, 1))),
diff --git a/src/meta-srv/src/service/store.rs b/src/meta-srv/src/service/store.rs
index 4517d6590ad5..4f0f355c6cdb 100644
--- a/src/meta-srv/src/service/store.rs
+++ b/src/meta-srv/src/service/store.rs
@@ -92,7 +92,7 @@ mod tests {
#[tokio::test]
async fn test_range() {
let kv_store = Arc::new(MemStore::new());
- let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None).await;
+ let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None, None).await;
let req = RangeRequest::default();
let res = meta_srv.range(req.into_request()).await;
@@ -102,7 +102,7 @@ mod tests {
#[tokio::test]
async fn test_put() {
let kv_store = Arc::new(MemStore::new());
- let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None).await;
+ let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None, None).await;
let req = PutRequest::default();
let res = meta_srv.put(req.into_request()).await;
@@ -112,7 +112,7 @@ mod tests {
#[tokio::test]
async fn test_batch_put() {
let kv_store = Arc::new(MemStore::new());
- let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None).await;
+ let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None, None).await;
let req = BatchPutRequest::default();
let res = meta_srv.batch_put(req.into_request()).await;
@@ -122,7 +122,7 @@ mod tests {
#[tokio::test]
async fn test_compare_and_put() {
let kv_store = Arc::new(MemStore::new());
- let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None).await;
+ let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None, None).await;
let req = CompareAndPutRequest::default();
let res = meta_srv.compare_and_put(req.into_request()).await;
@@ -132,7 +132,7 @@ mod tests {
#[tokio::test]
async fn test_delete_range() {
let kv_store = Arc::new(MemStore::new());
- let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None).await;
+ let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None, None).await;
let req = DeleteRangeRequest::default();
let res = meta_srv.delete_range(req.into_request()).await;
@@ -142,7 +142,7 @@ mod tests {
#[tokio::test]
async fn test_move_value() {
let kv_store = Arc::new(MemStore::new());
- let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None).await;
+ let meta_srv = MetaSrv::new(MetaSrvOptions::default(), kv_store, None, None, None).await;
let req = MoveValueRequest::default();
let res = meta_srv.move_value(req.into_request()).await;
|
feat
|
collect stats from heartbeats (#833)
|
cb74f1ac34ba9586c8cdce8ca3fbb2f8a39aa831
|
2022-04-25 14:31:55
|
Lei, Huang
|
feat: Add sql parser definition and trivial SHOW DATABASE implementation (#8)
| false
|
diff --git a/.gitignore b/.gitignore
index 9aa15774b47f..65f4835c0e21 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,11 @@
# These are backup files generated by rustfmt
**/*.rs.bk
+
+debug/
+
+# MSVC Windows builds of rustc generate these, which store debugging information
+*.pdb
+
+# JetBrains IDE config directory
+.idea/
\ No newline at end of file
diff --git a/Cargo.lock b/Cargo.lock
index 64e3fa91ca44..9cade898fee4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -118,6 +118,8 @@ dependencies = [
name = "sql"
version = "0.1.0"
dependencies = [
+ "query",
+ "snafu",
"sqlparser",
]
diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs
index c7de888be0fb..7af252fa232f 100644
--- a/src/query/src/lib.rs
+++ b/src/query/src/lib.rs
@@ -1,4 +1,4 @@
-mod executor;
-mod logical_optimizer;
-mod physical_optimizer;
-mod physical_planner;
+pub mod executor;
+pub mod logical_optimizer;
+pub mod physical_optimizer;
+pub mod physical_planner;
diff --git a/src/sql/Cargo.toml b/src/sql/Cargo.toml
index 01cdc0da6dea..70bd188d093e 100644
--- a/src/sql/Cargo.toml
+++ b/src/sql/Cargo.toml
@@ -6,4 +6,6 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
-sqlparser = "0.16.0"
+query = { path = "../query" }
+snafu = "0.7.0"
+sqlparser = "0.16.0"
\ No newline at end of file
diff --git a/src/sql/src/dialect.rs b/src/sql/src/dialect.rs
new file mode 100644
index 000000000000..7bfbbf52b9db
--- /dev/null
+++ b/src/sql/src/dialect.rs
@@ -0,0 +1 @@
+// todo(hl) wrap sqlparser dialects
diff --git a/src/sql/src/errors.rs b/src/sql/src/errors.rs
new file mode 100644
index 000000000000..621f9efdc5d7
--- /dev/null
+++ b/src/sql/src/errors.rs
@@ -0,0 +1,20 @@
+use snafu::prelude::*;
+use sqlparser::parser::ParserError as SpParserError;
+
+/// SQL parser errors.
+#[derive(Debug, Snafu)]
+#[snafu(visibility(pub(crate)))]
+pub enum ParserError {
+ #[snafu(display("SQL statement is not supported: {sql}"))]
+ Unsupported { sql: String },
+
+ #[snafu(display(
+ "Unexpected token while parsing SQL statement: {sql}, expected: {expected}, found: {actual}"
+ ))]
+ Unexpected {
+ sql: String,
+ expected: String,
+ actual: String,
+ source: SpParserError,
+ },
+}
diff --git a/src/sql/src/lib.rs b/src/sql/src/lib.rs
index c289ce327fe3..8d9671e9e213 100644
--- a/src/sql/src/lib.rs
+++ b/src/sql/src/lib.rs
@@ -1,3 +1,10 @@
+#![feature(assert_matches)]
+
+extern crate core;
+
mod ast;
-mod parser;
+mod dialect;
+mod errors;
+pub mod parser;
mod planner;
+mod statements;
diff --git a/src/sql/src/parser.rs b/src/sql/src/parser.rs
index 8b137891791f..963e21d3c0fa 100644
--- a/src/sql/src/parser.rs
+++ b/src/sql/src/parser.rs
@@ -1 +1,224 @@
+use snafu::ResultExt;
+use sqlparser::dialect::Dialect;
+use sqlparser::keywords::Keyword;
+use sqlparser::parser::Parser;
+use sqlparser::tokenizer::{Token, Tokenizer};
+use crate::errors;
+use crate::statements::show_kind::ShowKind;
+use crate::statements::statement::Statement;
+use crate::statements::statement_show_database::SqlShowDatabase;
+
+/// GrepTime SQL parser context, a simple wrapper for Datafusion SQL parser.
+pub struct ParserContext<'a> {
+ pub(crate) parser: Parser<'a>,
+ pub(crate) sql: &'a str,
+}
+
+impl<'a> ParserContext<'a> {
+ /// Parses SQL with given dialect
+ pub fn create_with_dialect(
+ sql: &'a str,
+ dialect: &dyn Dialect,
+ ) -> Result<Vec<Statement>, errors::ParserError> {
+ let mut stmts: Vec<Statement> = Vec::new();
+ let mut tokenizer = Tokenizer::new(dialect, sql);
+
+ let tokens: Vec<Token> = tokenizer.tokenize().unwrap();
+
+ let mut parser_ctx = ParserContext {
+ sql,
+ parser: Parser::new(tokens, dialect),
+ };
+
+ let mut expecting_statement_delimiter = false;
+ loop {
+ // ignore empty statements (between successive statement delimiters)
+ while parser_ctx.parser.consume_token(&Token::SemiColon) {
+ expecting_statement_delimiter = false;
+ }
+
+ if parser_ctx.parser.peek_token() == Token::EOF {
+ break;
+ }
+ if expecting_statement_delimiter {
+ return parser_ctx.unsupported();
+ }
+
+ let statement = parser_ctx.parse_statement()?;
+ stmts.push(statement);
+ expecting_statement_delimiter = true;
+ }
+
+ Ok(stmts)
+ }
+
+ /// Parses parser context to a set of statements.
+ pub fn parse_statement(&mut self) -> Result<Statement, errors::ParserError> {
+ match self.parser.peek_token() {
+ Token::Word(w) => {
+ match w.keyword {
+ Keyword::CREATE => {
+ self.parser.next_token();
+ self.parse_create()
+ }
+
+ Keyword::EXPLAIN => {
+ self.parser.next_token();
+ self.parse_explain()
+ }
+
+ Keyword::SHOW => {
+ self.parser.next_token();
+ self.parse_show()
+ }
+
+ Keyword::INSERT => self.parse_insert(),
+
+ Keyword::SELECT | Keyword::WITH | Keyword::VALUES => self.parse_query(),
+
+ // todo(hl) support more statements.
+ _ => self.unsupported(),
+ }
+ }
+ Token::LParen => self.parse_query(),
+ _ => self.unsupported(),
+ }
+ }
+
+ /// Raises an "unsupported statement" error.
+ pub fn unsupported<T>(&self) -> Result<T, errors::ParserError> {
+ Err(errors::ParserError::Unsupported {
+ sql: self.sql.to_string(),
+ })
+ }
+
+ /// Parses SHOW statements
+ /// todo(hl) support `show table`/`show settings`/`show create`/`show users` ect.
+ fn parse_show(&mut self) -> Result<Statement, errors::ParserError> {
+ if self.consume_token("DATABASES") || self.consume_token("SCHEMAS") {
+ Ok(self.parse_show_databases()?)
+ } else {
+ self.unsupported()
+ }
+ }
+
+ fn parse_explain(&mut self) -> Result<Statement, errors::ParserError> {
+ todo!()
+ }
+
+ fn parse_insert(&mut self) -> Result<Statement, errors::ParserError> {
+ todo!()
+ }
+
+ fn parse_query(&mut self) -> Result<Statement, errors::ParserError> {
+ todo!()
+ }
+
+ fn parse_create(&mut self) -> Result<Statement, errors::ParserError> {
+ todo!()
+ }
+
+ pub fn consume_token(&mut self, expected: &str) -> bool {
+ if self.parser.peek_token().to_string().to_uppercase() == *expected.to_uppercase() {
+ self.parser.next_token();
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Parses `SHOW DATABASES` statement.
+ pub fn parse_show_databases(&mut self) -> Result<Statement, errors::ParserError> {
+ let tok = self.parser.next_token();
+ match &tok {
+ Token::EOF | Token::SemiColon => Ok(Statement::ShowDatabases(SqlShowDatabase::new(
+ ShowKind::All,
+ ))),
+ Token::Word(w) => match w.keyword {
+ Keyword::LIKE => Ok(Statement::ShowDatabases(SqlShowDatabase::new(
+ ShowKind::Like(
+ self.parser
+ .parse_identifier()
+ .context(errors::UnexpectedSnafu {
+ sql: self.sql,
+ expected: "LIKE",
+ actual: tok.to_string(),
+ })
+ .unwrap(),
+ ),
+ ))),
+ Keyword::WHERE => Ok(Statement::ShowDatabases(SqlShowDatabase::new(
+ ShowKind::Where(self.parser.parse_expr().context(errors::UnexpectedSnafu {
+ sql: self.sql.to_string(),
+ expected: "some valid expression".to_string(),
+ actual: self.parser.peek_token().to_string(),
+ })?),
+ ))),
+ _ => self.unsupported(),
+ },
+ _ => self.unsupported(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::assert_matches::assert_matches;
+
+ use sqlparser::dialect::GenericDialect;
+
+ use super::*;
+
+ #[test]
+ pub fn test_show_database_all() {
+ let sql = "SHOW DATABASES";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ let stmts = result.unwrap();
+ assert_eq!(1, stmts.len());
+
+ assert_matches!(
+ &stmts[0],
+ Statement::ShowDatabases(SqlShowDatabase {
+ kind: ShowKind::All
+ })
+ );
+ }
+
+ #[test]
+ pub fn test_show_database_like() {
+ let sql = "SHOW DATABASES LIKE test_database";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ let stmts = result.unwrap();
+ assert_eq!(1, stmts.len());
+
+ assert_matches!(
+ &stmts[0],
+ Statement::ShowDatabases(SqlShowDatabase {
+ kind: ShowKind::Like(sqlparser::ast::Ident {
+ value: _,
+ quote_style: None,
+ })
+ })
+ );
+ }
+
+ #[test]
+ pub fn test_show_database_where() {
+ let sql = "SHOW DATABASES WHERE Database LIKE '%whatever1%' OR Database LIKE '%whatever2%'";
+ let result = ParserContext::create_with_dialect(sql, &GenericDialect {});
+ let stmts = result.unwrap();
+ assert_eq!(1, stmts.len());
+
+ assert_matches!(
+ &stmts[0],
+ Statement::ShowDatabases(SqlShowDatabase {
+ kind: ShowKind::Where(sqlparser::ast::Expr::BinaryOp {
+ left: _,
+ right: _,
+ op: sqlparser::ast::BinaryOperator::Or,
+ })
+ })
+ );
+ }
+}
diff --git a/src/sql/src/statements/mod.rs b/src/sql/src/statements/mod.rs
new file mode 100644
index 000000000000..2b2ac6579ab5
--- /dev/null
+++ b/src/sql/src/statements/mod.rs
@@ -0,0 +1,3 @@
+pub mod show_kind;
+pub mod statement;
+pub mod statement_show_database;
diff --git a/src/sql/src/statements/show_kind.rs b/src/sql/src/statements/show_kind.rs
new file mode 100644
index 000000000000..228c9e83cfde
--- /dev/null
+++ b/src/sql/src/statements/show_kind.rs
@@ -0,0 +1,10 @@
+use sqlparser::ast::Expr;
+use sqlparser::ast::Ident;
+
+/// Show kind for SQL expressions like `SHOW DATABASE` or `SHOW TABLE`
+#[derive(Debug, Clone, PartialEq)]
+pub enum ShowKind {
+ All,
+ Like(Ident),
+ Where(Expr),
+}
diff --git a/src/sql/src/statements/statement.rs b/src/sql/src/statements/statement.rs
new file mode 100644
index 000000000000..4227e6f6269f
--- /dev/null
+++ b/src/sql/src/statements/statement.rs
@@ -0,0 +1,18 @@
+use crate::statements::statement_show_database::SqlShowDatabase;
+
+/// Tokens parsed by `DFParser` are converted into these values.
+#[derive(Debug, Clone, PartialEq)]
+pub enum Statement {
+ // Databases.
+ ShowDatabases(SqlShowDatabase),
+}
+
+/// Comment hints from SQL.
+/// It'll be enabled when using `--comment` in mysql client.
+/// Eg: `SELECT * FROM system.number LIMIT 1; -- { ErrorCode 25 }`
+#[derive(Debug, Clone, PartialEq)]
+pub struct Hint {
+ pub error_code: Option<u16>,
+ pub comment: String,
+ pub prefix: String,
+}
diff --git a/src/sql/src/statements/statement_show_database.rs b/src/sql/src/statements/statement_show_database.rs
new file mode 100644
index 000000000000..b792507e6da4
--- /dev/null
+++ b/src/sql/src/statements/statement_show_database.rs
@@ -0,0 +1,14 @@
+use crate::statements::show_kind::ShowKind;
+
+/// SQL structure for `SHOW DATABASES`.
+#[derive(Debug, Clone, PartialEq)]
+pub struct SqlShowDatabase {
+ pub kind: ShowKind,
+}
+
+impl SqlShowDatabase {
+ /// Creates a statement for `SHOW DATABASES`
+ pub fn new(kind: ShowKind) -> Self {
+ SqlShowDatabase { kind }
+ }
+}
|
feat
|
Add sql parser definition and trivial SHOW DATABASE implementation (#8)
|
f382a7695f6a6b18f98dd0c45faf6d943e60e92b
|
2024-07-31 09:37:34
|
Yingwen
|
perf: reduce lock scope and improve log (#4453)
| false
|
diff --git a/src/mito2/src/read.rs b/src/mito2/src/read.rs
index 6496c80f852b..04a34fc9ac56 100644
--- a/src/mito2/src/read.rs
+++ b/src/mito2/src/read.rs
@@ -738,10 +738,14 @@ pub(crate) struct ScannerMetrics {
prepare_scan_cost: Duration,
/// Duration to build parts.
build_parts_cost: Duration,
+ /// Duration to build the (merge) reader.
+ build_reader_cost: Duration,
/// Duration to scan data.
scan_cost: Duration,
/// Duration to convert batches.
convert_cost: Duration,
+ /// Duration while waiting for `yield`.
+ yield_cost: Duration,
/// Duration of the scan.
total_cost: Duration,
/// Number of batches returned.
@@ -766,12 +770,18 @@ impl ScannerMetrics {
/// Observes metrics on scanner finish.
fn observe_metrics_on_finish(&self) {
+ READ_STAGE_ELAPSED
+ .with_label_values(&["build_reader"])
+ .observe(self.build_reader_cost.as_secs_f64());
READ_STAGE_ELAPSED
.with_label_values(&["convert_rb"])
.observe(self.convert_cost.as_secs_f64());
READ_STAGE_ELAPSED
.with_label_values(&["scan"])
.observe(self.scan_cost.as_secs_f64());
+ READ_STAGE_ELAPSED
+ .with_label_values(&["yield"])
+ .observe(self.yield_cost.as_secs_f64());
READ_STAGE_ELAPSED
.with_label_values(&["total"])
.observe(self.total_cost.as_secs_f64());
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index dfe78795cae1..a4c1f0c1b943 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -16,7 +16,7 @@
use std::fmt;
use std::sync::Arc;
-use std::time::Instant;
+use std::time::{Duration, Instant};
use common_error::ext::BoxedError;
use common_recordbatch::SendableRecordBatchStream;
@@ -607,7 +607,9 @@ impl ScanInput {
&self,
collector: &mut impl FileRangeCollector,
) -> Result<()> {
+ let mut file_prune_cost = Duration::ZERO;
for file in &self.files {
+ let prune_start = Instant::now();
let res = self
.access_layer
.read_sst(file.clone())
@@ -620,6 +622,7 @@ impl ScanInput {
.expected_metadata(Some(self.mapper.metadata().clone()))
.build_reader_input()
.await;
+ file_prune_cost += prune_start.elapsed();
let (mut file_range_ctx, row_groups) = match res {
Ok(x) => x,
Err(e) => {
@@ -655,6 +658,13 @@ impl ScanInput {
READ_SST_COUNT.observe(self.files.len() as f64);
+ common_telemetry::debug!(
+ "Region {} prune {} files, cost is {:?}",
+ self.mapper.metadata().region_id,
+ self.files.len(),
+ file_prune_cost
+ );
+
Ok(())
}
@@ -713,7 +723,7 @@ pub(crate) type FileRangesGroup = SmallVec<[Vec<FileRange>; 4]>;
/// A partition of a scanner to read.
/// It contains memtables and file ranges to scan.
-#[derive(Default)]
+#[derive(Clone, Default)]
pub(crate) struct ScanPart {
/// Memtable ranges to scan.
pub(crate) memtable_ranges: Vec<MemtableRange>,
@@ -845,10 +855,10 @@ impl ScanPartList {
pub(crate) struct StreamContext {
/// Input memtables and files.
pub(crate) input: ScanInput,
- /// Parts to scan.
+ /// Parts to scan and the cost to build parts.
/// The scanner builds parts to scan from the input lazily.
/// The mutex is used to ensure the parts are only built once.
- pub(crate) parts: Mutex<ScanPartList>,
+ pub(crate) parts: Mutex<(ScanPartList, Duration)>,
// Metrics:
/// The start time of the query.
@@ -862,7 +872,7 @@ impl StreamContext {
Self {
input,
- parts: Mutex::new(ScanPartList::default()),
+ parts: Mutex::new((ScanPartList::default(), Duration::default())),
query_start,
}
}
@@ -878,11 +888,11 @@ impl StreamContext {
DisplayFormatType::Default => write!(
f,
"partition_count={} ({} memtable ranges, {} file ranges)",
- inner.len(),
- inner.num_mem_ranges(),
- inner.num_file_ranges()
+ inner.0.len(),
+ inner.0.num_mem_ranges(),
+ inner.0.num_file_ranges()
)?,
- DisplayFormatType::Verbose => write!(f, "{:?}", &*inner)?,
+ DisplayFormatType::Verbose => write!(f, "{:?}", inner.0)?,
},
Err(_) => write!(f, "<locked>")?,
}
diff --git a/src/mito2/src/read/seq_scan.rs b/src/mito2/src/read/seq_scan.rs
index 2e42c388c458..27dc6cdbdcd1 100644
--- a/src/mito2/src/read/seq_scan.rs
+++ b/src/mito2/src/read/seq_scan.rs
@@ -16,7 +16,7 @@
use std::fmt;
use std::sync::Arc;
-use std::time::Instant;
+use std::time::{Duration, Instant};
use async_stream::try_stream;
use common_error::ext::BoxedError;
@@ -162,11 +162,11 @@ impl SeqScan {
// initialize parts list
let mut parts = stream_ctx.parts.lock().await;
Self::maybe_init_parts(&stream_ctx.input, &mut parts, metrics).await?;
- let parts_len = parts.len();
+ let parts_len = parts.0.len();
let mut sources = Vec::with_capacity(parts_len);
for id in 0..parts_len {
- let Some(part) = parts.get_part(id) else {
+ let Some(part) = parts.0.get_part(id) else {
return Ok(None);
};
@@ -185,17 +185,32 @@ impl SeqScan {
semaphore: Arc<Semaphore>,
metrics: &mut ScannerMetrics,
) -> Result<Option<BoxedBatchReader>> {
- let mut parts = stream_ctx.parts.lock().await;
- Self::maybe_init_parts(&stream_ctx.input, &mut parts, metrics).await?;
-
let mut sources = Vec::new();
- let Some(part) = parts.get_part(range_id) else {
- return Ok(None);
+ let build_start = {
+ let mut parts = stream_ctx.parts.lock().await;
+ Self::maybe_init_parts(&stream_ctx.input, &mut parts, metrics).await?;
+
+ let Some(part) = parts.0.get_part(range_id) else {
+ return Ok(None);
+ };
+
+ let build_start = Instant::now();
+ Self::build_part_sources(part, &mut sources, stream_ctx.input.series_row_selector)?;
+
+ build_start
};
- Self::build_part_sources(part, &mut sources, stream_ctx.input.series_row_selector)?;
+ let maybe_reader = Self::build_reader_from_sources(stream_ctx, sources, semaphore).await;
+ let build_reader_cost = build_start.elapsed();
+ metrics.build_reader_cost += build_reader_cost;
+ common_telemetry::debug!(
+ "Build reader region: {}, range_id: {}, from sources, build_reader_cost: {:?}",
+ stream_ctx.input.mapper.metadata().region_id,
+ range_id,
+ build_reader_cost
+ );
- Self::build_reader_from_sources(stream_ctx, sources, semaphore).await
+ maybe_reader
}
async fn build_reader_from_sources(
@@ -290,7 +305,9 @@ impl SeqScan {
let convert_start = Instant::now();
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
metrics.convert_cost += convert_start.elapsed();
+ let yield_start = Instant::now();
yield record_batch;
+ metrics.yield_cost += yield_start.elapsed();
fetch_start = Instant::now();
}
@@ -350,7 +367,7 @@ impl SeqScan {
Self::maybe_init_parts(&stream_ctx.input, &mut parts, &mut metrics).await
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
- parts.len()
+ parts.0.len()
};
for id in (0..parts_len).skip(partition).step_by(num_partitions) {
@@ -381,7 +398,9 @@ impl SeqScan {
let convert_start = Instant::now();
let record_batch = stream_ctx.input.mapper.convert(&batch, cache)?;
metrics.convert_cost += convert_start.elapsed();
+ let yield_start = Instant::now();
yield record_batch;
+ metrics.yield_cost += yield_start.elapsed();
fetch_start = Instant::now();
}
@@ -389,12 +408,13 @@ impl SeqScan {
metrics.total_cost = stream_ctx.query_start.elapsed();
metrics.observe_metrics_on_finish();
- debug!(
- "Seq scan finished, region_id: {:?}, partition: {}, metrics: {:?}, first_poll: {:?}",
+ common_telemetry::debug!(
+ "Seq scan finished, region_id: {}, partition: {}, id: {}, metrics: {:?}, first_poll: {:?}",
stream_ctx.input.mapper.metadata().region_id,
partition,
+ id,
metrics,
- first_poll
+ first_poll,
);
}
};
@@ -410,10 +430,10 @@ impl SeqScan {
/// Initializes parts if they are not built yet.
async fn maybe_init_parts(
input: &ScanInput,
- part_list: &mut ScanPartList,
+ part_list: &mut (ScanPartList, Duration),
metrics: &mut ScannerMetrics,
) -> Result<()> {
- if part_list.is_none() {
+ if part_list.0.is_none() {
let now = Instant::now();
let mut distributor = SeqDistributor::default();
input.prune_file_ranges(&mut distributor).await?;
@@ -422,9 +442,16 @@ impl SeqScan {
Some(input.mapper.column_ids()),
input.predicate.clone(),
);
- part_list.set_parts(distributor.build_parts(input.parallelism.parallelism));
+ part_list
+ .0
+ .set_parts(distributor.build_parts(input.parallelism.parallelism));
+ let build_part_cost = now.elapsed();
+ part_list.1 = build_part_cost;
- metrics.observe_init_part(now.elapsed());
+ metrics.observe_init_part(build_part_cost);
+ } else {
+ // Updates the cost of building parts.
+ metrics.build_parts_cost = part_list.1;
}
Ok(())
}
@@ -451,7 +478,11 @@ impl RegionScanner for SeqScan {
impl DisplayAs for SeqScan {
fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "SeqScan: ")?;
+ write!(
+ f,
+ "SeqScan: region={}, ",
+ self.stream_ctx.input.mapper.metadata().region_id
+ )?;
self.stream_ctx.format_for_explain(t, f)
}
}
diff --git a/src/mito2/src/read/unordered_scan.rs b/src/mito2/src/read/unordered_scan.rs
index 1de53b40a5e3..1f5eee36d4e0 100644
--- a/src/mito2/src/read/unordered_scan.rs
+++ b/src/mito2/src/read/unordered_scan.rs
@@ -16,7 +16,7 @@
use std::fmt;
use std::sync::Arc;
-use std::time::Instant;
+use std::time::{Duration, Instant};
use async_stream::{stream, try_stream};
use common_error::ext::BoxedError;
@@ -115,11 +115,11 @@ impl UnorderedScan {
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
}
+ metrics.scan_cost += start.elapsed();
let convert_start = Instant::now();
let record_batch = mapper.convert(&batch, cache)?;
metrics.convert_cost += convert_start.elapsed();
- metrics.scan_cost += start.elapsed();
Ok(Some(record_batch))
}
@@ -148,15 +148,21 @@ impl RegionScanner for UnorderedScan {
let stream = try_stream! {
let first_poll = stream_ctx.query_start.elapsed();
- let mut parts = stream_ctx.parts.lock().await;
- maybe_init_parts(&mut parts, &stream_ctx.input, &mut metrics)
- .await
- .map_err(BoxedError::new)
- .context(ExternalSnafu)?;
- let Some(part) = parts.get_part(partition) else {
- return;
+ let part = {
+ let mut parts = stream_ctx.parts.lock().await;
+ maybe_init_parts(&stream_ctx.input, &mut parts, &mut metrics)
+ .await
+ .map_err(BoxedError::new)
+ .context(ExternalSnafu)?;
+ // Clone the part and releases the lock.
+ // TODO(yingwen): We might wrap the part in an Arc in the future if cloning is too expensive.
+ let Some(part) = parts.0.get_part(partition).cloned() else {
+ return;
+ };
+ part
};
+ let build_reader_start = Instant::now();
let mapper = &stream_ctx.input.mapper;
let memtable_sources = part
.memtable_ranges
@@ -168,6 +174,7 @@ impl RegionScanner for UnorderedScan {
.collect::<Result<Vec<_>>>()
.map_err(BoxedError::new)
.context(ExternalSnafu)?;
+ metrics.build_reader_cost = build_reader_start.elapsed();
let query_start = stream_ctx.query_start;
let cache = stream_ctx.input.cache_manager.as_deref();
// Scans memtables first.
@@ -175,20 +182,26 @@ impl RegionScanner for UnorderedScan {
while let Some(batch) = Self::fetch_from_source(&mut source, mapper, cache, None, &mut metrics).await? {
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
+ let yield_start = Instant::now();
yield batch;
+ metrics.yield_cost += yield_start.elapsed();
}
}
// Then scans file ranges.
let mut reader_metrics = ReaderMetrics::default();
// Safety: UnorderedDistributor::build_parts() ensures this.
for file_range in &part.file_ranges[0] {
+ let build_reader_start = Instant::now();
let reader = file_range.reader(None).await.map_err(BoxedError::new).context(ExternalSnafu)?;
+ metrics.build_reader_cost += build_reader_start.elapsed();
let compat_batch = file_range.compat_batch();
let mut source = Source::PruneReader(reader);
while let Some(batch) = Self::fetch_from_source(&mut source, mapper, cache, compat_batch, &mut metrics).await? {
metrics.num_batches += 1;
metrics.num_rows += batch.num_rows();
+ let yield_start = Instant::now();
yield batch;
+ metrics.yield_cost += yield_start.elapsed();
}
if let Source::PruneReader(mut reader) = source {
reader_metrics.merge_from(reader.metrics());
@@ -213,7 +226,11 @@ impl RegionScanner for UnorderedScan {
impl DisplayAs for UnorderedScan {
fn fmt_as(&self, t: DisplayFormatType, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "UnorderedScan: ")?;
+ write!(
+ f,
+ "UnorderedScan: region={}, ",
+ self.stream_ctx.input.mapper.metadata().region_id
+ )?;
self.stream_ctx.format_for_explain(t, f)
}
}
@@ -236,11 +253,11 @@ impl UnorderedScan {
/// Initializes parts if they are not built yet.
async fn maybe_init_parts(
- part_list: &mut ScanPartList,
input: &ScanInput,
+ part_list: &mut (ScanPartList, Duration),
metrics: &mut ScannerMetrics,
) -> Result<()> {
- if part_list.is_none() {
+ if part_list.0.is_none() {
let now = Instant::now();
let mut distributor = UnorderedDistributor::default();
input.prune_file_ranges(&mut distributor).await?;
@@ -249,9 +266,16 @@ async fn maybe_init_parts(
Some(input.mapper.column_ids()),
input.predicate.clone(),
);
- part_list.set_parts(distributor.build_parts(input.parallelism.parallelism));
-
- metrics.observe_init_part(now.elapsed());
+ part_list
+ .0
+ .set_parts(distributor.build_parts(input.parallelism.parallelism));
+ let build_part_cost = now.elapsed();
+ part_list.1 = build_part_cost;
+
+ metrics.observe_init_part(build_part_cost);
+ } else {
+ // Updates the cost of building parts.
+ metrics.build_parts_cost = part_list.1;
}
Ok(())
}
diff --git a/src/query/src/dist_plan/merge_scan.rs b/src/query/src/dist_plan/merge_scan.rs
index 3b17a531db38..8f209a74f7e7 100644
--- a/src/query/src/dist_plan/merge_scan.rs
+++ b/src/query/src/dist_plan/merge_scan.rs
@@ -198,7 +198,7 @@ impl MergeScanExec {
let extensions = self.query_ctx.extensions();
let target_partition = self.target_partition;
- let sub_sgate_metrics_moved = self.sub_stage_metrics.clone();
+ let sub_stage_metrics_moved = self.sub_stage_metrics.clone();
let plan = self.plan.clone();
let stream = Box::pin(stream!({
MERGE_SCAN_REGIONS.observe(regions.len() as f64);
@@ -226,6 +226,7 @@ impl MergeScanExec {
region_id,
plan: plan.clone(),
};
+ let do_get_start = Instant::now();
let mut stream = region_query_handler
.do_get(request)
.await
@@ -234,11 +235,11 @@ impl MergeScanExec {
BoxedError::new(e)
})
.context(ExternalSnafu)?;
+ let do_get_cost = do_get_start.elapsed();
ready_timer.stop();
- let mut poll_duration = Duration::new(0, 0);
-
+ let mut poll_duration = Duration::ZERO;
let mut poll_timer = Instant::now();
while let Some(batch) = stream.next().await {
let poll_elapsed = poll_timer.elapsed();
@@ -249,13 +250,17 @@ impl MergeScanExec {
// to remove metadata and correct column name
let batch = RecordBatch::new(schema.clone(), batch.columns().iter().cloned())?;
metric.record_output_batch_rows(batch.num_rows());
- if let Some(first_consume_timer) = first_consume_timer.as_mut().take() {
+ if let Some(mut first_consume_timer) = first_consume_timer.take() {
first_consume_timer.stop();
}
yield Ok(batch);
// reset poll timer
poll_timer = Instant::now();
}
+ common_telemetry::debug!(
+ "Merge scan stop poll stream, partition: {}, region_id: {}, poll_duration: {:?}, first_consume: {}, do_get_cost: {:?}",
+ partition, region_id, poll_duration, metric.first_consume_time(), do_get_cost
+ );
// process metrics after all data is drained.
if let Some(metrics) = stream.metrics() {
@@ -271,7 +276,7 @@ impl MergeScanExec {
metric.record_greptime_exec_cost(value as usize);
// record metrics from sub sgates
- sub_sgate_metrics_moved.lock().unwrap().push(metrics);
+ sub_stage_metrics_moved.lock().unwrap().push(metrics);
}
MERGE_SCAN_POLL_ELAPSED.observe(poll_duration.as_secs_f64());
diff --git a/tests/cases/distributed/explain/analyze.result b/tests/cases/distributed/explain/analyze.result
index b96883df984f..2f3955c16356 100644
--- a/tests/cases/distributed/explain/analyze.result
+++ b/tests/cases/distributed/explain/analyze.result
@@ -24,6 +24,7 @@ Affected Rows: 3
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze SELECT count(*) FROM system_metrics;
+-+-+-+
@@ -35,7 +36,7 @@ explain analyze SELECT count(*) FROM system_metrics;
|_|_|_CoalescePartitionsExec REDACTED
|_|_|_AggregateExec: mode=Partial, gby=[], aggr=[COUNT(system_REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SeqScan: partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 1_|
+-+-+-+
diff --git a/tests/cases/distributed/explain/analyze.sql b/tests/cases/distributed/explain/analyze.sql
index 1dfbc7166e97..c068e1dc6af8 100644
--- a/tests/cases/distributed/explain/analyze.sql
+++ b/tests/cases/distributed/explain/analyze.sql
@@ -20,6 +20,7 @@ VALUES
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze SELECT count(*) FROM system_metrics;
drop table system_metrics;
diff --git a/tests/cases/standalone/common/aggregate/multi_regions.result b/tests/cases/standalone/common/aggregate/multi_regions.result
index de32878d2023..66dcf01f405f 100644
--- a/tests/cases/standalone/common/aggregate/multi_regions.result
+++ b/tests/cases/standalone/common/aggregate/multi_regions.result
@@ -18,6 +18,7 @@ Affected Rows: 0
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze
select sum(val) from t group by host;
@@ -33,7 +34,7 @@ select sum(val) from t group by host;
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_AggregateExec: mode=Partial, gby=[host@1 as host], aggr=[SUM(t.val)] REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SeqScan: partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
| 1_| 1_|_ProjectionExec: expr=[SUM(t.val)@1 as SUM(t.val)] REDACTED
|_|_|_AggregateExec: mode=FinalPartitioned, gby=[host@0 as host], aggr=[SUM(t.val)] REDACTED
@@ -42,7 +43,7 @@ select sum(val) from t group by host;
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_AggregateExec: mode=Partial, gby=[host@1 as host], aggr=[SUM(t.val)] REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SeqScan: partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 0_|
+-+-+-+
@@ -52,6 +53,7 @@ select sum(val) from t group by host;
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze
select sum(val) from t;
@@ -64,9 +66,9 @@ select sum(val) from t;
|_|_|_ProjectionExec: expr=[val@1 as val] REDACTED
|_|_|_MergeScanExec: REDACTED
|_|_|_|
-| 1_| 0_|_SeqScan: partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
+| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
-| 1_| 1_|_SeqScan: partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
+| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 1_|
+-+-+-+
@@ -77,6 +79,7 @@ select sum(val) from t;
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze
select sum(val) from t group by idc;
@@ -92,9 +95,9 @@ select sum(val) from t group by idc;
|_|_|_ProjectionExec: expr=[val@1 as val, idc@3 as idc] REDACTED
|_|_|_MergeScanExec: REDACTED
|_|_|_|
-| 1_| 0_|_SeqScan: partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
+| 1_| 0_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
-| 1_| 1_|_SeqScan: partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
+| 1_| 1_|_SeqScan: region=REDACTED, partition_count=0 (0 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 0_|
+-+-+-+
diff --git a/tests/cases/standalone/common/aggregate/multi_regions.sql b/tests/cases/standalone/common/aggregate/multi_regions.sql
index a924a58f2431..3238cf64d6a9 100644
--- a/tests/cases/standalone/common/aggregate/multi_regions.sql
+++ b/tests/cases/standalone/common/aggregate/multi_regions.sql
@@ -16,6 +16,7 @@ partition on columns (host) (
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze
select sum(val) from t group by host;
@@ -24,6 +25,7 @@ select sum(val) from t group by host;
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze
select sum(val) from t;
@@ -33,6 +35,7 @@ select sum(val) from t;
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze
select sum(val) from t group by idc;
diff --git a/tests/cases/standalone/common/range/nest.result b/tests/cases/standalone/common/range/nest.result
index 0beecbdf2de8..952b9fd8b476 100644
--- a/tests/cases/standalone/common/range/nest.result
+++ b/tests/cases/standalone/common/range/nest.result
@@ -67,6 +67,7 @@ EXPLAIN SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s';
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
EXPLAIN ANALYZE SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s';
+-+-+-+
@@ -76,7 +77,7 @@ EXPLAIN ANALYZE SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s';
|_|_|_CoalescePartitionsExec REDACTED
|_|_|_MergeScanExec: REDACTED
|_|_|_|
-| 1_| 0_|_SeqScan: partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
+| 1_| 0_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 10_|
+-+-+-+
diff --git a/tests/cases/standalone/common/range/nest.sql b/tests/cases/standalone/common/range/nest.sql
index 50fe3f9305c5..4ee447cfbdef 100644
--- a/tests/cases/standalone/common/range/nest.sql
+++ b/tests/cases/standalone/common/range/nest.sql
@@ -35,6 +35,7 @@ EXPLAIN SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s';
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
-- SQLNESS REPLACE (metrics.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
EXPLAIN ANALYZE SELECT ts, host, min(val) RANGE '5s' FROM host ALIGN '5s';
DROP TABLE host;
diff --git a/tests/cases/standalone/common/tql-explain-analyze/analyze.result b/tests/cases/standalone/common/tql-explain-analyze/analyze.result
index ee989101deac..fe8af2e75322 100644
--- a/tests/cases/standalone/common/tql-explain-analyze/analyze.result
+++ b/tests/cases/standalone/common/tql-explain-analyze/analyze.result
@@ -13,6 +13,7 @@ Affected Rows: 3
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
TQL ANALYZE (0, 10, '5s') test;
+-+-+-+
@@ -31,7 +32,7 @@ TQL ANALYZE (0, 10, '5s') test;
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -300000 AND j@1 <= 310000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SeqScan: partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 4_|
+-+-+-+
@@ -43,6 +44,7 @@ TQL ANALYZE (0, 10, '5s') test;
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
TQL ANALYZE (0, 10, '1s', '2s') test;
+-+-+-+
@@ -61,7 +63,7 @@ TQL ANALYZE (0, 10, '1s', '2s') test;
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -2000 AND j@1 <= 12000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SeqScan: partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 4_|
+-+-+-+
@@ -72,6 +74,7 @@ TQL ANALYZE (0, 10, '1s', '2s') test;
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test;
+-+-+-+
@@ -90,7 +93,7 @@ TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -300000 AND j@1 <= 310000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SeqScan: partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 4_|
+-+-+-+
@@ -103,6 +106,7 @@ TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
-- SQLNESS REPLACE (metrics.*) REDACTED
-- SQLNESS REPLACE (Duration.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
TQL ANALYZE VERBOSE (0, 10, '5s') test;
+-+-+-+
@@ -121,7 +125,7 @@ TQL ANALYZE VERBOSE (0, 10, '5s') test;
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_FilterExec: j@1 >= -300000 AND j@1 <= 310000 REDACTED
|_|_|_RepartitionExec: partitioning=REDACTED
-|_|_|_SeqScan: partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file ranges) REDACTED
|_|_|_|
|_|_| Total rows: 4_|
+-+-+-+
diff --git a/tests/cases/standalone/common/tql-explain-analyze/analyze.sql b/tests/cases/standalone/common/tql-explain-analyze/analyze.sql
index e888ba8d51ad..639e1e8597d9 100644
--- a/tests/cases/standalone/common/tql-explain-analyze/analyze.sql
+++ b/tests/cases/standalone/common/tql-explain-analyze/analyze.sql
@@ -9,6 +9,7 @@ INSERT INTO test VALUES (1, 1, "a"), (1, 1, "b"), (2, 2, "a");
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
TQL ANALYZE (0, 10, '5s') test;
-- 'lookback' parameter is not fully supported, the test has to be updated
@@ -18,6 +19,7 @@ TQL ANALYZE (0, 10, '5s') test;
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
TQL ANALYZE (0, 10, '1s', '2s') test;
-- analyze at 0s, 5s and 10s. No point at 0s.
@@ -26,6 +28,7 @@ TQL ANALYZE (0, 10, '1s', '2s') test;
-- SQLNESS REPLACE (-+) -
-- SQLNESS REPLACE (\s\s+) _
-- SQLNESS REPLACE (peers.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp + '10 seconds'::interval, '5s') test;
-- analyze verbose at 0s, 5s and 10s. No point at 0s.
@@ -36,6 +39,7 @@ TQL ANALYZE ('1970-01-01T00:00:00'::timestamp, '1970-01-01T00:00:00'::timestamp
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
-- SQLNESS REPLACE (metrics.*) REDACTED
-- SQLNESS REPLACE (Duration.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
TQL ANALYZE VERBOSE (0, 10, '5s') test;
DROP TABLE test;
diff --git a/tests/cases/standalone/optimizer/last_value.result b/tests/cases/standalone/optimizer/last_value.result
index aa9163f1d01d..ab3f12bce126 100644
--- a/tests/cases/standalone/optimizer/last_value.result
+++ b/tests/cases/standalone/optimizer/last_value.result
@@ -27,6 +27,7 @@ Affected Rows: 9
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
-- SQLNESS REPLACE (metrics.*) REDACTED
-- SQLNESS REPLACE (partitioning.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze
select
last_value(host order by ts),
@@ -47,7 +48,7 @@ explain analyze
|_|_|_CoalesceBatchesExec: target_batch_size=8192 REDACTED
|_|_|_AggregateExec: mode=Partial, gby=[host@1 as host], aggr=[last_value(t.host) ORDER BY [t.ts ASC NULLS LAST], last_value(t.not_pk) ORDER BY [t.ts ASC NULLS LAST], last_value(t.val) ORDER BY [t.ts ASC NULLS LAST]] REDACTED
|_|_|_RepartitionExec: REDACTED
-|_|_|_SeqScan: partition_count=1 (1 memtable ranges, 0 file ranges), selector=LastRow REDACTED
+|_|_|_SeqScan: region=REDACTED, partition_count=1 (1 memtable ranges, 0 file ranges), selector=LastRow REDACTED
|_|_|_|
|_|_| Total rows: 4_|
+-+-+-+
diff --git a/tests/cases/standalone/optimizer/last_value.sql b/tests/cases/standalone/optimizer/last_value.sql
index 903551d3b967..c0dd3fb6c9a2 100644
--- a/tests/cases/standalone/optimizer/last_value.sql
+++ b/tests/cases/standalone/optimizer/last_value.sql
@@ -23,6 +23,7 @@ insert into t values
-- SQLNESS REPLACE (RoundRobinBatch.*) REDACTED
-- SQLNESS REPLACE (metrics.*) REDACTED
-- SQLNESS REPLACE (partitioning.*) REDACTED
+-- SQLNESS REPLACE region=\d+\(\d+,\s+\d+\) region=REDACTED
explain analyze
select
last_value(host order by ts),
|
perf
|
reduce lock scope and improve log (#4453)
|
5a5e88353c3e26aac1ab180df8da79be8707dc37
|
2023-06-01 17:47:18
|
Ruihang Xia
|
fix: do not change timestamp index column while planning aggr (#1688)
| false
|
diff --git a/src/promql/src/planner.rs b/src/promql/src/planner.rs
index 3af4197bbdd7..27b0ed54020d 100644
--- a/src/promql/src/planner.rs
+++ b/src/promql/src/planner.rs
@@ -141,9 +141,6 @@ impl PromPlanner {
// convert op and value columns to aggregate exprs
let aggr_exprs = self.create_aggregate_exprs(*op, &input)?;
- // remove time index column from context
- self.ctx.time_index_column = None;
-
// create plan
let group_sort_expr = group_exprs
.clone()
diff --git a/tests-integration/src/tests/promql_test.rs b/tests-integration/src/tests/promql_test.rs
index 61b1f177f010..8cb798d0671c 100644
--- a/tests-integration/src/tests/promql_test.rs
+++ b/tests-integration/src/tests/promql_test.rs
@@ -414,12 +414,12 @@ async fn aggregators_complex_combined_aggrs(instance: Arc<dyn MockInstance>) {
unix_epoch_plus_100s(),
Duration::from_secs(60),
Duration::from_secs(0),
- "+------------+-----------------------------------------------------------------------------------------------------------+\
- \n| job | SUM(http_requests.value) + MIN(http_requests.value) + MAX(http_requests.value) + AVG(http_requests.value) |\
- \n+------------+-----------------------------------------------------------------------------------------------------------+\
- \n| api-server | 1750.0 |\
- \n| app-server | 4550.0 |\
- \n+------------+-----------------------------------------------------------------------------------------------------------+",
+ "+------------+---------------------+-----------------------------------------------------------------------------------------------------------+\
+ \n| job | ts | SUM(http_requests.value) + MIN(http_requests.value) + MAX(http_requests.value) + AVG(http_requests.value) |\
+ \n+------------+---------------------+-----------------------------------------------------------------------------------------------------------+\
+ \n| api-server | 1970-01-01T00:00:00 | 1750.0 |\
+ \n| app-server | 1970-01-01T00:00:00 | 4550.0 |\
+ \n+------------+---------------------+-----------------------------------------------------------------------------------------------------------+",
)
.await;
}
@@ -439,12 +439,12 @@ async fn two_aggregators_combined_aggrs(instance: Arc<dyn MockInstance>) {
unix_epoch_plus_100s(),
Duration::from_secs(60),
Duration::from_secs(0),
- "+------------+-----------------------------------------------------+\
- \n| job | SUM(http_requests.value) + MIN(http_requests.value) |\
- \n+------------+-----------------------------------------------------+\
- \n| api-server | 1100.0 |\
- \n| app-server | 3100.0 |\
- \n+------------+-----------------------------------------------------+",
+ "+------------+---------------------+-----------------------------------------------------+\
+ \n| job | ts | SUM(http_requests.value) + MIN(http_requests.value) |\
+ \n+------------+---------------------+-----------------------------------------------------+\
+ \n| api-server | 1970-01-01T00:00:00 | 1100.0 |\
+ \n| app-server | 1970-01-01T00:00:00 | 3100.0 |\
+ \n+------------+---------------------+-----------------------------------------------------+",
)
.await;
}
|
fix
|
do not change timestamp index column while planning aggr (#1688)
|
1de17aec745659d4de097b8681b9c4f5d4e22c50
|
2024-05-27 09:35:55
|
Ruihang Xia
|
feat: change EXPIRE WHEN to EXPIRE AFTER (#4002)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index d230f05a0ff0..16bf0e911aad 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4195,7 +4195,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "greptime-proto"
version = "0.1.0"
-source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=3cd71167ee067c5679a7fb17cf58bdfbb5487a0d#3cd71167ee067c5679a7fb17cf58bdfbb5487a0d"
+source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=902f75fdd170c572e90b1f640161d90995f20218#902f75fdd170c572e90b1f640161d90995f20218"
dependencies = [
"prost 0.12.4",
"serde",
diff --git a/Cargo.toml b/Cargo.toml
index 17ccf1fd93f0..f135177a14b4 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -120,7 +120,7 @@ etcd-client = { git = "https://github.com/MichaelScofield/etcd-client.git", rev
fst = "0.4.7"
futures = "0.3"
futures-util = "0.3"
-greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "3cd71167ee067c5679a7fb17cf58bdfbb5487a0d" }
+greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "902f75fdd170c572e90b1f640161d90995f20218" }
humantime = "2.1"
humantime-serde = "1.1"
itertools = "0.10"
diff --git a/src/common/meta/src/cache/flow/table_flownode.rs b/src/common/meta/src/cache/flow/table_flownode.rs
index a15578a1b8f9..eeaa88128628 100644
--- a/src/common/meta/src/cache/flow/table_flownode.rs
+++ b/src/common/meta/src/cache/flow/table_flownode.rs
@@ -180,7 +180,7 @@ mod tests {
catalog_name: DEFAULT_CATALOG_NAME.to_string(),
flow_name: "my_flow".to_string(),
raw_sql: "sql".to_string(),
- expire_when: "expire".to_string(),
+ expire_after: Some(300),
comment: "comment".to_string(),
options: Default::default(),
},
diff --git a/src/common/meta/src/ddl/create_flow.rs b/src/common/meta/src/ddl/create_flow.rs
index a7fb59b05623..afa437ed6ca4 100644
--- a/src/common/meta/src/ddl/create_flow.rs
+++ b/src/common/meta/src/ddl/create_flow.rs
@@ -18,6 +18,7 @@ use std::collections::BTreeMap;
use api::v1::flow::flow_request::Body as PbFlowRequest;
use api::v1::flow::{CreateRequest, FlowRequest, FlowRequestHeader};
+use api::v1::ExpireAfter;
use async_trait::async_trait;
use common_catalog::format_full_flow_name;
use common_procedure::error::{FromJsonSnafu, ToJsonSnafu};
@@ -283,7 +284,7 @@ impl From<&CreateFlowData> for CreateRequest {
sink_table_name: Some(value.task.sink_table_name.clone().into()),
// Always be true
create_if_not_exists: true,
- expire_when: value.task.expire_when.clone(),
+ expire_after: value.task.expire_after.map(|value| ExpireAfter { value }),
comment: value.task.comment.clone(),
sql: value.task.sql.clone(),
flow_options: value.task.flow_options.clone(),
@@ -297,7 +298,7 @@ impl From<&CreateFlowData> for FlowInfoValue {
catalog_name,
flow_name,
sink_table_name,
- expire_when,
+ expire_after,
comment,
sql,
flow_options: options,
@@ -318,7 +319,7 @@ impl From<&CreateFlowData> for FlowInfoValue {
catalog_name,
flow_name,
raw_sql: sql,
- expire_when,
+ expire_after,
comment,
options,
}
diff --git a/src/common/meta/src/ddl/tests/create_flow.rs b/src/common/meta/src/ddl/tests/create_flow.rs
index 415fc12f62a5..e79fe27b848f 100644
--- a/src/common/meta/src/ddl/tests/create_flow.rs
+++ b/src/common/meta/src/ddl/tests/create_flow.rs
@@ -44,7 +44,7 @@ pub(crate) fn test_create_flow_task(
sink_table_name,
or_replace: false,
create_if_not_exists,
- expire_when: "".to_string(),
+ expire_after: Some(300),
comment: "".to_string(),
sql: "raw_sql".to_string(),
flow_options: Default::default(),
diff --git a/src/common/meta/src/key/flow.rs b/src/common/meta/src/key/flow.rs
index 1682922ab7ce..1f8db5585433 100644
--- a/src/common/meta/src/key/flow.rs
+++ b/src/common/meta/src/key/flow.rs
@@ -328,7 +328,7 @@ mod tests {
sink_table_name,
flownode_ids,
raw_sql: "raw".to_string(),
- expire_when: "expr".to_string(),
+ expire_after: Some(300),
comment: "hi".to_string(),
options: Default::default(),
}
@@ -420,7 +420,7 @@ mod tests {
sink_table_name: another_sink_table_name,
flownode_ids: [(0, 1u64)].into(),
raw_sql: "raw".to_string(),
- expire_when: "expr".to_string(),
+ expire_after: Some(300),
comment: "hi".to_string(),
options: Default::default(),
};
diff --git a/src/common/meta/src/key/flow/flow_info.rs b/src/common/meta/src/key/flow/flow_info.rs
index 0a2be4dea1a2..f08e7c5def56 100644
--- a/src/common/meta/src/key/flow/flow_info.rs
+++ b/src/common/meta/src/key/flow/flow_info.rs
@@ -123,7 +123,8 @@ pub struct FlowInfoValue {
/// The raw sql.
pub(crate) raw_sql: String,
/// The expr of expire.
- pub(crate) expire_when: String,
+ /// Duration in seconds as `i64`.
+ pub(crate) expire_after: Option<i64>,
/// The comment.
pub(crate) comment: String,
/// The options.
diff --git a/src/common/meta/src/rpc/ddl.rs b/src/common/meta/src/rpc/ddl.rs
index e42639c381a7..8e977f1ca5bc 100644
--- a/src/common/meta/src/rpc/ddl.rs
+++ b/src/common/meta/src/rpc/ddl.rs
@@ -28,8 +28,8 @@ use api::v1::meta::{
};
use api::v1::{
AlterExpr, CreateDatabaseExpr, CreateFlowExpr, CreateTableExpr, CreateViewExpr,
- DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, QueryContext as PbQueryContext,
- TruncateTableExpr,
+ DropDatabaseExpr, DropFlowExpr, DropTableExpr, DropViewExpr, ExpireAfter,
+ QueryContext as PbQueryContext, TruncateTableExpr,
};
use base64::engine::general_purpose;
use base64::Engine as _;
@@ -898,7 +898,8 @@ pub struct CreateFlowTask {
pub sink_table_name: TableName,
pub or_replace: bool,
pub create_if_not_exists: bool,
- pub expire_when: String,
+ /// Duration in seconds. Data older than this duration will not be used.
+ pub expire_after: Option<i64>,
pub comment: String,
pub sql: String,
pub flow_options: HashMap<String, String>,
@@ -915,7 +916,7 @@ impl TryFrom<PbCreateFlowTask> for CreateFlowTask {
sink_table_name,
or_replace,
create_if_not_exists,
- expire_when,
+ expire_after,
comment,
sql,
flow_options,
@@ -934,7 +935,7 @@ impl TryFrom<PbCreateFlowTask> for CreateFlowTask {
.into(),
or_replace,
create_if_not_exists,
- expire_when,
+ expire_after: expire_after.map(|e| e.value),
comment,
sql,
flow_options,
@@ -951,7 +952,7 @@ impl From<CreateFlowTask> for PbCreateFlowTask {
sink_table_name,
or_replace,
create_if_not_exists,
- expire_when,
+ expire_after,
comment,
sql,
flow_options,
@@ -965,7 +966,7 @@ impl From<CreateFlowTask> for PbCreateFlowTask {
sink_table_name: Some(sink_table_name.into()),
or_replace,
create_if_not_exists,
- expire_when,
+ expire_after: expire_after.map(|value| ExpireAfter { value }),
comment,
sql,
flow_options,
diff --git a/src/flow/src/adapter.rs b/src/flow/src/adapter.rs
index f75288831041..25bb3cb2bf2c 100644
--- a/src/flow/src/adapter.rs
+++ b/src/flow/src/adapter.rs
@@ -42,7 +42,6 @@ use tokio::sync::{oneshot, watch, Mutex, RwLock};
use crate::adapter::error::{ExternalSnafu, TableNotFoundSnafu, UnexpectedSnafu};
pub(crate) use crate::adapter::node_context::FlownodeContext;
-use crate::adapter::parse_expr::parse_fixed;
use crate::adapter::table_source::TableSource;
use crate::adapter::util::column_schemas_to_proto;
use crate::adapter::worker::{create_worker, Worker, WorkerHandle};
@@ -565,7 +564,7 @@ impl FlownodeManager {
/// Return task id if a new task is created, otherwise return None
///
/// steps to create task:
- /// 1. parse query into typed plan(and optional parse expire_when expr)
+ /// 1. parse query into typed plan(and optional parse expire_after expr)
/// 2. render source/sink with output table id and used input table id
#[allow(clippy::too_many_arguments)]
pub async fn create_flow(
@@ -573,14 +572,14 @@ impl FlownodeManager {
flow_id: FlowId,
sink_table_name: TableName,
source_table_ids: &[TableId],
- create_if_not_exist: bool,
- expire_when: Option<String>,
+ create_if_not_exists: bool,
+ expire_after: Option<i64>,
comment: Option<String>,
sql: String,
flow_options: HashMap<String, String>,
query_ctx: Option<QueryContext>,
) -> Result<Option<FlowId>, Error> {
- if create_if_not_exist {
+ if create_if_not_exists {
// check if the task already exists
for handle in self.worker_handles.iter() {
if handle.lock().await.contains_flow(flow_id).await? {
@@ -608,22 +607,6 @@ impl FlownodeManager {
debug!("Flow {:?}'s Plan is {:?}", flow_id, flow_plan);
node_ctx.assign_table_schema(&sink_table_name, flow_plan.typ.clone())?;
- let expire_when = expire_when
- .and_then(|s| {
- if s.is_empty() || s.split_whitespace().join("").is_empty() {
- None
- } else {
- Some(s)
- }
- })
- .map(|d| {
- let d = d.as_ref();
- parse_fixed(d)
- .map(|(_, n)| n)
- .map_err(|err| err.to_string())
- })
- .transpose()
- .map_err(|err| UnexpectedSnafu { reason: err }.build())?;
let _ = comment;
let _ = flow_options;
@@ -656,8 +639,8 @@ impl FlownodeManager {
sink_sender,
source_ids,
src_recvs: source_receivers,
- expire_when,
- create_if_not_exist,
+ expire_after,
+ create_if_not_exists,
err_collector,
};
handle.create_flow(create_request).await?;
diff --git a/src/flow/src/adapter/flownode_impl.rs b/src/flow/src/adapter/flownode_impl.rs
index 057d8f932ed3..e770bb5e4cf1 100644
--- a/src/flow/src/adapter/flownode_impl.rs
+++ b/src/flow/src/adapter/flownode_impl.rs
@@ -45,7 +45,7 @@ impl Flownode for FlownodeManager {
source_table_ids,
sink_table_name: Some(sink_table_name),
create_if_not_exists,
- expire_when,
+ expire_after,
comment,
sql,
flow_options,
@@ -56,13 +56,14 @@ impl Flownode for FlownodeManager {
sink_table_name.schema_name,
sink_table_name.table_name,
];
+ let expire_after = expire_after.map(|e| e.value);
let ret = self
.create_flow(
task_id.id as u64,
sink_table_name,
&source_table_ids,
create_if_not_exists,
- Some(expire_when),
+ expire_after,
Some(comment),
sql,
flow_options,
diff --git a/src/flow/src/adapter/worker.rs b/src/flow/src/adapter/worker.rs
index 659c6fedf8ae..9df68c6e5d1d 100644
--- a/src/flow/src/adapter/worker.rs
+++ b/src/flow/src/adapter/worker.rs
@@ -232,12 +232,12 @@ impl<'s> Worker<'s> {
source_ids: &[GlobalId],
src_recvs: Vec<broadcast::Receiver<DiffRow>>,
// TODO(discord9): set expire duration for all arrangement and compare to sys timestamp instead
- expire_when: Option<repr::Duration>,
- create_if_not_exist: bool,
+ expire_after: Option<repr::Duration>,
+ create_if_not_exists: bool,
err_collector: ErrCollector,
) -> Result<Option<FlowId>, Error> {
- let already_exist = self.task_states.contains_key(&flow_id);
- match (already_exist, create_if_not_exist) {
+ let already_exists = self.task_states.contains_key(&flow_id);
+ match (already_exists, create_if_not_exists) {
(true, true) => return Ok(None),
(true, false) => FlowAlreadyExistSnafu { id: flow_id }.fail()?,
(false, _) => (),
@@ -247,7 +247,7 @@ impl<'s> Worker<'s> {
err_collector,
..Default::default()
};
- cur_task_state.state.set_expire_after(expire_when);
+ cur_task_state.state.set_expire_after(expire_after);
{
let mut ctx = cur_task_state.new_ctx(sink_id);
@@ -319,8 +319,8 @@ impl<'s> Worker<'s> {
sink_sender,
source_ids,
src_recvs,
- expire_when,
- create_if_not_exist,
+ expire_after,
+ create_if_not_exists,
err_collector,
} => {
let task_create_result = self.create_flow(
@@ -330,8 +330,8 @@ impl<'s> Worker<'s> {
sink_sender,
&source_ids,
src_recvs,
- expire_when,
- create_if_not_exist,
+ expire_after,
+ create_if_not_exists,
err_collector,
);
Some((
@@ -368,8 +368,8 @@ pub enum Request {
sink_sender: mpsc::UnboundedSender<DiffRow>,
source_ids: Vec<GlobalId>,
src_recvs: Vec<broadcast::Receiver<DiffRow>>,
- expire_when: Option<repr::Duration>,
- create_if_not_exist: bool,
+ expire_after: Option<repr::Duration>,
+ create_if_not_exists: bool,
err_collector: ErrCollector,
},
Remove {
@@ -524,8 +524,8 @@ mod test {
sink_sender: sink_tx,
source_ids: src_ids,
src_recvs: vec![rx],
- expire_when: None,
- create_if_not_exist: true,
+ expire_after: None,
+ create_if_not_exists: true,
err_collector: ErrCollector::default(),
};
handle.create_flow(create_reqs).await.unwrap();
diff --git a/src/operator/src/expr_factory.rs b/src/operator/src/expr_factory.rs
index b34bedc7c90f..6715a06e0363 100644
--- a/src/operator/src/expr_factory.rs
+++ b/src/operator/src/expr_factory.rs
@@ -19,7 +19,7 @@ use api::v1::alter_expr::Kind;
use api::v1::{
AddColumn, AddColumns, AlterExpr, ChangeColumnType, ChangeColumnTypes, Column, ColumnDataType,
ColumnDataTypeExtension, CreateFlowExpr, CreateTableExpr, CreateViewExpr, DropColumn,
- DropColumns, RenameTable, SemanticType, TableName,
+ DropColumns, ExpireAfter, RenameTable, SemanticType, TableName,
};
use common_error::ext::BoxedError;
use common_grpc_expr::util::ColumnExpr;
@@ -591,10 +591,7 @@ pub fn to_create_flow_task_expr(
sink_table_name: Some(sink_table_name),
or_replace: create_flow.or_replace,
create_if_not_exists: create_flow.if_not_exists,
- expire_when: create_flow
- .expire_when
- .map(|e| e.to_string())
- .unwrap_or_default(),
+ expire_after: create_flow.expire_after.map(|value| ExpireAfter { value }),
comment: create_flow.comment.unwrap_or_default(),
sql: create_flow.query.to_string(),
flow_options: HashMap::new(),
diff --git a/src/sql/src/error.rs b/src/sql/src/error.rs
index 8f815c79d722..c865e12a8617 100644
--- a/src/sql/src/error.rs
+++ b/src/sql/src/error.rs
@@ -19,6 +19,7 @@ use common_error::status_code::StatusCode;
use common_macro::stack_trace_debug;
use common_time::timestamp::TimeUnit;
use common_time::Timestamp;
+use datafusion_common::DataFusionError;
use datatypes::prelude::{ConcreteDataType, Value};
use snafu::{Location, Snafu};
use sqlparser::ast::Ident;
@@ -123,6 +124,13 @@ pub enum Error {
#[snafu(display("Invalid database name: {}", name))]
InvalidDatabaseName { name: String },
+ #[snafu(display("Invalid interval provided: {}", reason))]
+ InvalidInterval {
+ reason: String,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display("Unrecognized database option key: {}", key))]
InvalidDatabaseOption {
key: String,
@@ -214,6 +222,22 @@ pub enum Error {
location: Location,
},
+ #[snafu(display("Failed to convert to logical TQL expression"))]
+ ConvertToLogicalExpression {
+ #[snafu(source)]
+ error: DataFusionError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
+ #[snafu(display("Failed to simplify TQL expression"))]
+ Simplification {
+ #[snafu(source)]
+ error: DataFusionError,
+ #[snafu(implicit)]
+ location: Location,
+ },
+
#[snafu(display(
"Permission denied while operating catalog {} from current catalog {}",
target,
@@ -254,6 +278,9 @@ impl ErrorExt for Error {
| TimestampOverflow { .. }
| InvalidTableOption { .. }
| InvalidCast { .. }
+ | ConvertToLogicalExpression { .. }
+ | Simplification { .. }
+ | InvalidInterval { .. }
| PermissionDenied { .. } => StatusCode::InvalidArguments,
SerializeColumnDefaultConstraint { source, .. } => source.status_code(),
diff --git a/src/sql/src/parsers.rs b/src/sql/src/parsers.rs
index b7e5c8c44e84..721f41367784 100644
--- a/src/sql/src/parsers.rs
+++ b/src/sql/src/parsers.rs
@@ -26,3 +26,4 @@ pub(crate) mod set_var_parser;
pub(crate) mod show_parser;
pub(crate) mod tql_parser;
pub(crate) mod truncate_parser;
+pub(crate) mod utils;
diff --git a/src/sql/src/parsers/create_parser.rs b/src/sql/src/parsers/create_parser.rs
index 152e6a81f7ab..8dc3f0c662c0 100644
--- a/src/sql/src/parsers/create_parser.rs
+++ b/src/sql/src/parsers/create_parser.rs
@@ -15,6 +15,8 @@
use std::collections::HashMap;
use common_catalog::consts::default_engine;
+use datafusion_common::ScalarValue;
+use datatypes::arrow::datatypes::{DataType as ArrowDataType, IntervalUnit};
use itertools::Itertools;
use snafu::{ensure, OptionExt, ResultExt};
use sqlparser::ast::{ColumnOption, ColumnOptionDef, DataType, Expr};
@@ -25,11 +27,12 @@ use sqlparser::parser::{Parser, ParserError};
use sqlparser::tokenizer::{Token, TokenWithLocation, Word};
use table::requests::validate_table_option;
+use super::utils;
use crate::ast::{ColumnDef, Ident, TableConstraint};
use crate::error::{
- self, InvalidColumnOptionSnafu, InvalidDatabaseOptionSnafu, InvalidTableOptionSnafu,
- InvalidTimeIndexSnafu, MissingTimeIndexSnafu, Result, SyntaxSnafu, UnexpectedSnafu,
- UnsupportedSnafu,
+ self, InvalidColumnOptionSnafu, InvalidDatabaseOptionSnafu, InvalidIntervalSnafu,
+ InvalidTableOptionSnafu, InvalidTimeIndexSnafu, MissingTimeIndexSnafu, Result, SyntaxSnafu,
+ UnexpectedSnafu, UnsupportedSnafu,
};
use crate::parser::{ParserContext, FLOW};
use crate::statements::create::{
@@ -44,7 +47,7 @@ pub const ENGINE: &str = "ENGINE";
pub const MAXVALUE: &str = "MAXVALUE";
pub const SINK: &str = "SINK";
pub const EXPIRE: &str = "EXPIRE";
-pub const WHEN: &str = "WHEN";
+pub const AFTER: &str = "AFTER";
const DB_OPT_KEY_TTL: &str = "ttl";
@@ -235,11 +238,28 @@ impl<'a> ParserContext<'a> {
let output_table_name = self.intern_parse_table_name()?;
- let expire_when = if self
+ let expire_after = if self
.parser
- .consume_tokens(&[Token::make_keyword(EXPIRE), Token::make_keyword(WHEN)])
+ .consume_tokens(&[Token::make_keyword(EXPIRE), Token::make_keyword(AFTER)])
{
- Some(self.parser.parse_expr().context(error::SyntaxSnafu)?)
+ let expire_after_expr = self.parser.parse_expr().context(error::SyntaxSnafu)?;
+ let expire_after_lit = utils::parser_expr_to_scalar_value(expire_after_expr.clone())?
+ .cast_to(&ArrowDataType::Interval(IntervalUnit::MonthDayNano))
+ .ok()
+ .with_context(|| InvalidIntervalSnafu {
+ reason: format!("cannot cast {} to interval type", expire_after_expr),
+ })?;
+ if let ScalarValue::IntervalMonthDayNano(Some(nanoseconds)) = expire_after_lit {
+ Some(
+ i64::try_from(nanoseconds / 1_000_000_000)
+ .ok()
+ .with_context(|| InvalidIntervalSnafu {
+ reason: format!("interval {} overflows", nanoseconds),
+ })?,
+ )
+ } else {
+ unreachable!()
+ }
} else {
None
};
@@ -272,7 +292,7 @@ impl<'a> ParserContext<'a> {
sink_table_name: output_table_name,
or_replace,
if_not_exists,
- expire_when,
+ expire_after,
comment,
query,
}))
@@ -877,7 +897,7 @@ mod tests {
use common_catalog::consts::FILE_ENGINE;
use common_error::ext::ErrorExt;
use sqlparser::ast::ColumnOption::NotNull;
- use sqlparser::ast::{BinaryOperator, Expr, Function, Interval, ObjectName, Value};
+ use sqlparser::ast::{BinaryOperator, Expr, ObjectName, Value};
use super::*;
use crate::dialect::GreptimeDbDialect;
@@ -1103,7 +1123,7 @@ mod tests {
let sql = r"
CREATE OR REPLACE FLOW IF NOT EXISTS task_1
SINK TO schema_1.table_1
-EXPIRE WHEN timestamp < now() - INTERVAL '5m'
+EXPIRE AFTER INTERVAL '5 minutes'
COMMENT 'test comment'
AS
SELECT max(c1), min(c2) FROM schema_2.table_2;";
@@ -1133,43 +1153,14 @@ SELECT max(c1), min(c2) FROM schema_2.table_2;";
]),
or_replace: true,
if_not_exists: true,
- expire_when: Some(Expr::BinaryOp {
- left: Box::new(Expr::Identifier(Ident {
- value: "timestamp".to_string(),
- quote_style: None,
- })),
- op: BinaryOperator::Lt,
- right: Box::new(Expr::BinaryOp {
- left: Box::new(Expr::Function(Function {
- name: ObjectName(vec![Ident {
- value: "now".to_string(),
- quote_style: None,
- }]),
- args: vec![],
- filter: None,
- null_treatment: None,
- over: None,
- distinct: false,
- special: false,
- order_by: vec![],
- })),
- op: BinaryOperator::Minus,
- right: Box::new(Expr::Interval(Interval {
- value: Box::new(Expr::Value(Value::SingleQuotedString("5m".to_string()))),
- leading_field: None,
- leading_precision: None,
- last_field: None,
- fractional_seconds_precision: None,
- })),
- }),
- }),
+ expire_after: Some(300),
comment: Some("test comment".to_string()),
// ignore query parse result
query: create_task.query.clone(),
};
assert_eq!(create_task, &expected);
- // create flow without `OR REPLACE`, `IF NOT EXISTS`, `EXPIRE WHEN` and `COMMENT`
+ // create flow without `OR REPLACE`, `IF NOT EXISTS`, `EXPIRE AFTER` and `COMMENT`
let sql = r"
CREATE FLOW task_2
SINK TO schema_1.table_1
@@ -1185,7 +1176,7 @@ SELECT max(c1), min(c2) FROM schema_2.table_2;";
};
assert!(!create_task.or_replace);
assert!(!create_task.if_not_exists);
- assert!(create_task.expire_when.is_none());
+ assert!(create_task.expire_after.is_none());
assert!(create_task.comment.is_none());
}
diff --git a/src/sql/src/parsers/error.rs b/src/sql/src/parsers/error.rs
index 988aa27f5493..8feb80b988e3 100644
--- a/src/sql/src/parsers/error.rs
+++ b/src/sql/src/parsers/error.rs
@@ -13,7 +13,6 @@
// limitations under the License.
use common_macro::stack_trace_debug;
-use datafusion_common::DataFusionError;
use snafu::{Location, Snafu};
use sqlparser::parser::ParserError;
@@ -30,22 +29,6 @@ pub enum TQLError {
location: Location,
},
- #[snafu(display("Failed to convert to logical TQL expression"))]
- ConvertToLogicalExpression {
- #[snafu(source)]
- error: DataFusionError,
- #[snafu(implicit)]
- location: Location,
- },
-
- #[snafu(display("Failed to simplify TQL expression"))]
- Simplification {
- #[snafu(source)]
- error: DataFusionError,
- #[snafu(implicit)]
- location: Location,
- },
-
#[snafu(display("Failed to evaluate TQL expression: {}", msg))]
Evaluation { msg: String },
}
diff --git a/src/sql/src/parsers/tql_parser.rs b/src/sql/src/parsers/tql_parser.rs
index 13a754a9ca81..985f66720bed 100644
--- a/src/sql/src/parsers/tql_parser.rs
+++ b/src/sql/src/parsers/tql_parser.rs
@@ -12,16 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::sync::Arc;
-
-use chrono::Utc;
-use datafusion::execution::context::SessionState;
-use datafusion::optimizer::simplify_expressions::{ExprSimplifier, SimplifyContext};
-use datafusion_common::config::ConfigOptions;
-use datafusion_common::{DFSchema, Result as DFResult, ScalarValue, TableReference};
-use datafusion_expr::{AggregateUDF, Expr, ScalarUDF, TableSource, WindowUDF};
-use datafusion_physical_expr::execution_props::ExecutionProps;
-use datafusion_sql::planner::{ContextProvider, SqlToRel};
+use datafusion_common::ScalarValue;
use snafu::{OptionExt, ResultExt};
use sqlparser::keywords::Keyword;
use sqlparser::parser::ParserError;
@@ -29,6 +20,7 @@ use sqlparser::tokenizer::Token;
use crate::error::{self, Result};
use crate::parser::ParserContext;
+use crate::parsers::utils;
use crate::statements::statement::Statement;
use crate::statements::tql::{Tql, TqlAnalyze, TqlEval, TqlExplain, TqlParameters};
@@ -37,13 +29,10 @@ const EVAL: &str = "EVAL";
const EVALUATE: &str = "EVALUATE";
const VERBOSE: &str = "VERBOSE";
-use datatypes::arrow::datatypes::DataType;
use sqlparser::parser::Parser;
use crate::dialect::GreptimeDbDialect;
-use crate::parsers::error::{
- ConvertToLogicalExpressionSnafu, EvaluationSnafu, ParserSnafu, SimplificationSnafu, TQLError,
-};
+use crate::parsers::error::{EvaluationSnafu, ParserSnafu, TQLError};
/// TQL extension parser, including:
/// - `TQL EVAL <query>`
@@ -192,10 +181,25 @@ impl<'a> ParserContext<'a> {
}
fn parse_tokens(tokens: Vec<Token>) -> std::result::Result<String, TQLError> {
- Self::parse_to_expr(tokens)
- .and_then(Self::parse_to_logical_expr)
- .and_then(Self::simplify_expr)
- .and_then(Self::evaluate_expr)
+ let parser_expr = Self::parse_to_expr(tokens)?;
+ let lit = utils::parser_expr_to_scalar_value(parser_expr).unwrap();
+
+ let second = match lit {
+ ScalarValue::TimestampNanosecond(ts_nanos, _)
+ | ScalarValue::DurationNanosecond(ts_nanos) => ts_nanos.map(|v| v / 1_000_000_000),
+ ScalarValue::TimestampMicrosecond(ts_micros, _)
+ | ScalarValue::DurationMicrosecond(ts_micros) => ts_micros.map(|v| v / 1_000_000),
+ ScalarValue::TimestampMillisecond(ts_millis, _)
+ | ScalarValue::DurationMillisecond(ts_millis) => ts_millis.map(|v| v / 1_000),
+ ScalarValue::TimestampSecond(ts_secs, _) | ScalarValue::DurationSecond(ts_secs) => {
+ ts_secs
+ }
+ _ => None,
+ };
+
+ second.map(|ts| ts.to_string()).context(EvaluationSnafu {
+ msg: format!("Failed to extract a timestamp value {lit:?}"),
+ })
}
fn parse_to_expr(tokens: Vec<Token>) -> std::result::Result<sqlparser::ast::Expr, TQLError> {
@@ -205,46 +209,6 @@ impl<'a> ParserContext<'a> {
.context(ParserSnafu)
}
- fn parse_to_logical_expr(expr: sqlparser::ast::Expr) -> std::result::Result<Expr, TQLError> {
- let empty_df_schema = DFSchema::empty();
- SqlToRel::new(&StubContextProvider::default())
- .sql_to_expr(expr.into(), &empty_df_schema, &mut Default::default())
- .context(ConvertToLogicalExpressionSnafu)
- }
-
- fn simplify_expr(logical_expr: Expr) -> std::result::Result<Expr, TQLError> {
- let empty_df_schema = DFSchema::empty();
- let execution_props = ExecutionProps::new().with_query_execution_start_time(Utc::now());
- let info = SimplifyContext::new(&execution_props).with_schema(Arc::new(empty_df_schema));
- ExprSimplifier::new(info)
- .simplify(logical_expr)
- .context(SimplificationSnafu)
- }
-
- fn evaluate_expr(simplified_expr: Expr) -> std::result::Result<String, TQLError> {
- match simplified_expr {
- Expr::Literal(ScalarValue::TimestampNanosecond(ts_nanos, _))
- | Expr::Literal(ScalarValue::DurationNanosecond(ts_nanos)) => {
- ts_nanos.map(|v| v / 1_000_000_000)
- }
- Expr::Literal(ScalarValue::TimestampMicrosecond(ts_micros, _))
- | Expr::Literal(ScalarValue::DurationMicrosecond(ts_micros)) => {
- ts_micros.map(|v| v / 1_000_000)
- }
- Expr::Literal(ScalarValue::TimestampMillisecond(ts_millis, _))
- | Expr::Literal(ScalarValue::DurationMillisecond(ts_millis)) => {
- ts_millis.map(|v| v / 1_000)
- }
- Expr::Literal(ScalarValue::TimestampSecond(ts_secs, _))
- | Expr::Literal(ScalarValue::DurationSecond(ts_secs)) => ts_secs,
- _ => None,
- }
- .map(|ts| ts.to_string())
- .context(EvaluationSnafu {
- msg: format!("Failed to extract a timestamp value {simplified_expr:?}"),
- })
- }
-
fn parse_tql_query(parser: &mut Parser, sql: &str) -> std::result::Result<String, ParserError> {
while matches!(parser.peek_token().token, Token::Comma) {
let _skip_token = parser.next_token();
@@ -264,56 +228,6 @@ impl<'a> ParserContext<'a> {
}
}
-struct StubContextProvider {
- state: SessionState,
-}
-
-impl Default for StubContextProvider {
- fn default() -> Self {
- Self {
- state: SessionState::new_with_config_rt(Default::default(), Default::default()),
- }
- }
-}
-
-impl ContextProvider for StubContextProvider {
- fn get_table_source(&self, _name: TableReference) -> DFResult<Arc<dyn TableSource>> {
- unimplemented!()
- }
-
- fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>> {
- self.state.scalar_functions().get(name).cloned()
- }
-
- fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> {
- self.state.aggregate_functions().get(name).cloned()
- }
-
- fn get_window_meta(&self, _name: &str) -> Option<Arc<WindowUDF>> {
- unimplemented!()
- }
-
- fn get_variable_type(&self, _variable_names: &[String]) -> Option<DataType> {
- unimplemented!()
- }
-
- fn options(&self) -> &ConfigOptions {
- unimplemented!()
- }
-
- fn udfs_names(&self) -> Vec<String> {
- self.state.scalar_functions().keys().cloned().collect()
- }
-
- fn udafs_names(&self) -> Vec<String> {
- self.state.aggregate_functions().keys().cloned().collect()
- }
-
- fn udwfs_names(&self) -> Vec<String> {
- self.state.window_functions().keys().cloned().collect()
- }
-}
-
#[cfg(test)]
mod tests {
use common_error::ext::ErrorExt;
diff --git a/src/sql/src/parsers/utils.rs b/src/sql/src/parsers/utils.rs
new file mode 100644
index 000000000000..97b317dd369a
--- /dev/null
+++ b/src/sql/src/parsers/utils.rs
@@ -0,0 +1,112 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::sync::Arc;
+
+use chrono::Utc;
+use datafusion::config::ConfigOptions;
+use datafusion::error::Result as DfResult;
+use datafusion::execution::context::SessionState;
+use datafusion::optimizer::simplify_expressions::ExprSimplifier;
+use datafusion_common::{DFSchema, ScalarValue};
+use datafusion_expr::execution_props::ExecutionProps;
+use datafusion_expr::simplify::SimplifyContext;
+use datafusion_expr::{AggregateUDF, ScalarUDF, TableSource, WindowUDF};
+use datafusion_sql::planner::{ContextProvider, SqlToRel};
+use datafusion_sql::TableReference;
+use datatypes::arrow::datatypes::DataType;
+use snafu::ResultExt;
+
+use crate::error::{
+ ConvertToLogicalExpressionSnafu, ParseSqlValueSnafu, Result, SimplificationSnafu,
+};
+
+/// Convert a parser expression to a scalar value. This function will try the
+/// best to resolve and reduce constants. Exprs like `1 + 1` or `now()` can be
+/// handled properly.
+pub fn parser_expr_to_scalar_value(expr: sqlparser::ast::Expr) -> Result<ScalarValue> {
+ // 1. convert parser expr to logical expr
+ let empty_df_schema = DFSchema::empty();
+ let logical_expr = SqlToRel::new(&StubContextProvider::default())
+ .sql_to_expr(expr.into(), &empty_df_schema, &mut Default::default())
+ .context(ConvertToLogicalExpressionSnafu)?;
+
+ // 2. simplify logical expr
+ let execution_props = ExecutionProps::new().with_query_execution_start_time(Utc::now());
+ let info = SimplifyContext::new(&execution_props).with_schema(Arc::new(empty_df_schema));
+ let simplified_expr = ExprSimplifier::new(info)
+ .simplify(logical_expr)
+ .context(SimplificationSnafu)?;
+
+ if let datafusion::logical_expr::Expr::Literal(lit) = simplified_expr {
+ Ok(lit)
+ } else {
+ // Err(ParseSqlValue)
+ ParseSqlValueSnafu {
+ msg: format!("expected literal value, but found {:?}", simplified_expr),
+ }
+ .fail()
+ }
+}
+
+/// Helper struct for [`parser_expr_to_scalar_value`].
+struct StubContextProvider {
+ state: SessionState,
+}
+
+impl Default for StubContextProvider {
+ fn default() -> Self {
+ Self {
+ state: SessionState::new_with_config_rt(Default::default(), Default::default()),
+ }
+ }
+}
+
+impl ContextProvider for StubContextProvider {
+ fn get_table_source(&self, _name: TableReference) -> DfResult<Arc<dyn TableSource>> {
+ unimplemented!()
+ }
+
+ fn get_function_meta(&self, name: &str) -> Option<Arc<ScalarUDF>> {
+ self.state.scalar_functions().get(name).cloned()
+ }
+
+ fn get_aggregate_meta(&self, name: &str) -> Option<Arc<AggregateUDF>> {
+ self.state.aggregate_functions().get(name).cloned()
+ }
+
+ fn get_window_meta(&self, _name: &str) -> Option<Arc<WindowUDF>> {
+ unimplemented!()
+ }
+
+ fn get_variable_type(&self, _variable_names: &[String]) -> Option<DataType> {
+ unimplemented!()
+ }
+
+ fn options(&self) -> &ConfigOptions {
+ unimplemented!()
+ }
+
+ fn udfs_names(&self) -> Vec<String> {
+ self.state.scalar_functions().keys().cloned().collect()
+ }
+
+ fn udafs_names(&self) -> Vec<String> {
+ self.state.aggregate_functions().keys().cloned().collect()
+ }
+
+ fn udwfs_names(&self) -> Vec<String> {
+ self.state.window_functions().keys().cloned().collect()
+ }
+}
diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs
index b4903a666c6d..4259d61cc501 100644
--- a/src/sql/src/statements/create.rs
+++ b/src/sql/src/statements/create.rs
@@ -255,8 +255,9 @@ pub struct CreateFlow {
pub or_replace: bool,
/// Create if not exist
pub if_not_exists: bool,
- /// `EXPIRE_WHEN`
- pub expire_when: Option<Expr>,
+ /// `EXPIRE AFTER`
+ /// Duration in second as `i64`
+ pub expire_after: Option<i64>,
/// Comment string
pub comment: Option<String>,
/// SQL statement
@@ -275,8 +276,8 @@ impl Display for CreateFlow {
}
write!(f, "{} ", &self.flow_name)?;
write!(f, "OUTPUT AS {} ", &self.sink_table_name)?;
- if let Some(expire_when) = &self.expire_when {
- write!(f, "EXPIRE WHEN {} ", expire_when)?;
+ if let Some(expire_after) = &self.expire_after {
+ write!(f, "EXPIRE AFTER {} ", expire_after)?;
}
if let Some(comment) = &self.comment {
write!(f, "COMMENT '{}' ", comment)?;
|
feat
|
change EXPIRE WHEN to EXPIRE AFTER (#4002)
|
83de399bef99361a68ab315f714ac16334c3d8e0
|
2023-12-19 13:44:37
|
Zhenchi
|
feat(inverted_index.create): add external sorter (#2950)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index 5c4a03af734a..97a509658e09 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3980,11 +3980,14 @@ dependencies = [
"common-base",
"common-error",
"common-macro",
+ "common-telemetry",
"fst",
"futures",
"greptime-proto",
"mockall",
+ "pin-project",
"prost 0.12.2",
+ "rand",
"regex",
"regex-automata 0.1.10",
"snafu",
diff --git a/src/index/Cargo.toml b/src/index/Cargo.toml
index bd5b560ce854..0835da45d003 100644
--- a/src/index/Cargo.toml
+++ b/src/index/Cargo.toml
@@ -12,15 +12,18 @@ bytes.workspace = true
common-base.workspace = true
common-error.workspace = true
common-macro.workspace = true
+common-telemetry.workspace = true
fst.workspace = true
futures.workspace = true
greptime-proto.workspace = true
mockall.workspace = true
+pin-project.workspace = true
prost.workspace = true
regex-automata.workspace = true
regex.workspace = true
snafu.workspace = true
[dev-dependencies]
+rand.workspace = true
tokio-util.workspace = true
tokio.workspace = true
diff --git a/src/index/src/inverted_index.rs b/src/index/src/inverted_index.rs
index a793d1a25238..7a34bae21381 100644
--- a/src/index/src/inverted_index.rs
+++ b/src/index/src/inverted_index.rs
@@ -19,3 +19,4 @@ pub mod search;
pub type FstMap = fst::Map<Vec<u8>>;
pub type Bytes = Vec<u8>;
+pub type BytesRef<'a> = &'a [u8];
diff --git a/src/index/src/inverted_index/create/sort.rs b/src/index/src/inverted_index/create/sort.rs
index 2331ed8dcb81..53a70fc7b5c0 100644
--- a/src/index/src/inverted_index/create/sort.rs
+++ b/src/index/src/inverted_index/create/sort.rs
@@ -12,13 +12,46 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+mod external_provider;
+mod external_sort;
+mod intermediate_rw;
+mod merge_stream;
+
+use async_trait::async_trait;
use common_base::BitVec;
use futures::Stream;
use crate::inverted_index::error::Result;
-use crate::inverted_index::Bytes;
-
-mod intermediate_rw;
+use crate::inverted_index::{Bytes, BytesRef};
/// A stream of sorted values along with their associated bitmap
pub type SortedStream = Box<dyn Stream<Item = Result<(Bytes, BitVec)>> + Send + Unpin>;
+
+/// Output of a sorting operation, encapsulating a bitmap for null values and a stream of sorted items
+pub struct SortOutput {
+ /// Bitmap indicating which segments have null values
+ pub segment_null_bitmap: BitVec,
+
+ /// Stream of sorted items
+ pub sorted_stream: SortedStream,
+
+ /// Total number of rows in the sorted data
+ pub total_row_count: usize,
+}
+
+/// Handles data sorting, supporting incremental input and retrieval of sorted output
+#[async_trait]
+pub trait Sorter: Send {
+ /// Inputs a non-null or null value into the sorter.
+ /// Should be equivalent to calling `push_n` with n = 1
+ async fn push(&mut self, value: Option<BytesRef<'_>>) -> Result<()> {
+ self.push_n(value, 1).await
+ }
+
+ /// Pushing n identical non-null or null values into the sorter.
+ /// Should be equivalent to calling `push` n times
+ async fn push_n(&mut self, value: Option<BytesRef<'_>>, n: usize) -> Result<()>;
+
+ /// Completes the sorting process and returns the sorted data
+ async fn output(&mut self) -> Result<SortOutput>;
+}
diff --git a/src/index/src/inverted_index/create/sort/external_provider.rs b/src/index/src/inverted_index/create/sort/external_provider.rs
new file mode 100644
index 000000000000..a86f3e06aad4
--- /dev/null
+++ b/src/index/src/inverted_index/create/sort/external_provider.rs
@@ -0,0 +1,39 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use async_trait::async_trait;
+use futures::{AsyncRead, AsyncWrite};
+
+use crate::inverted_index::error::Result;
+
+/// Trait for managing intermediate files during external sorting for a particular index.
+#[mockall::automock]
+#[async_trait]
+pub trait ExternalTempFileProvider: Send + Sync {
+ /// Creates and opens a new intermediate file associated with a specific index for writing.
+ /// The implementation should ensure that the file does not already exist.
+ ///
+ /// - `index_name`: the name of the index for which the file will be associated
+ /// - `file_id`: a unique identifier for the new file
+ async fn create(
+ &self,
+ index_name: &str,
+ file_id: &str,
+ ) -> Result<Box<dyn AsyncWrite + Unpin + Send>>;
+
+ /// Retrieves all intermediate files associated with a specific index for an external sorting operation.
+ ///
+ /// `index_name`: the name of the index to retrieve intermediate files for
+ async fn read_all(&self, index_name: &str) -> Result<Vec<Box<dyn AsyncRead + Unpin + Send>>>;
+}
diff --git a/src/index/src/inverted_index/create/sort/external_sort.rs b/src/index/src/inverted_index/create/sort/external_sort.rs
new file mode 100644
index 000000000000..2e530f3e45e4
--- /dev/null
+++ b/src/index/src/inverted_index/create/sort/external_sort.rs
@@ -0,0 +1,437 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::collections::{BTreeMap, VecDeque};
+use std::mem;
+use std::num::NonZeroUsize;
+use std::ops::RangeInclusive;
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use common_base::BitVec;
+use common_telemetry::logging;
+use futures::stream;
+
+use crate::inverted_index::create::sort::external_provider::ExternalTempFileProvider;
+use crate::inverted_index::create::sort::intermediate_rw::{
+ IntermediateReader, IntermediateWriter,
+};
+use crate::inverted_index::create::sort::merge_stream::MergeSortedStream;
+use crate::inverted_index::create::sort::{SortOutput, SortedStream, Sorter};
+use crate::inverted_index::error::Result;
+use crate::inverted_index::{Bytes, BytesRef};
+
+/// `ExternalSorter` manages the sorting of data using both in-memory structures and external files.
+/// It dumps data to external files when the in-memory buffer crosses a certain memory threshold.
+pub struct ExternalSorter {
+ /// The index name associated with the sorting operation
+ index_name: String,
+
+ /// Manages creation and access to external temporary files
+ temp_file_provider: Arc<dyn ExternalTempFileProvider>,
+
+ /// Bitmap indicating which segments have null values
+ segment_null_bitmap: BitVec,
+
+ /// In-memory buffer to hold values and their corresponding bitmaps until memory threshold is exceeded
+ values_buffer: BTreeMap<Bytes, BitVec>,
+
+ /// Count of all rows ingested so far
+ total_row_count: usize,
+
+ /// The number of rows per group for bitmap indexing which determines how rows are
+ /// batched for indexing. It is used to determine which segment a row belongs to.
+ segment_row_count: NonZeroUsize,
+
+ /// Tracks memory usage of the buffer
+ current_memory_usage: usize,
+
+ /// The memory usage threshold at which the buffer should be dumped to an external file.
+ /// `None` indicates that the buffer should never be dumped.
+ memory_usage_threshold: Option<usize>,
+}
+
+#[async_trait]
+impl Sorter for ExternalSorter {
+ /// Pushes n identical values into the sorter, adding them to the in-memory buffer and dumping
+ /// the buffer to an external file if necessary
+ async fn push_n(&mut self, value: Option<BytesRef<'_>>, n: usize) -> Result<()> {
+ if n == 0 {
+ return Ok(());
+ }
+
+ let segment_index_range = self.segment_index_range(n);
+ self.total_row_count += n;
+
+ if let Some(value) = value {
+ let memory_diff = self.push_not_null(value, segment_index_range);
+ self.may_dump_buffer(memory_diff).await
+ } else {
+ set_bits(&mut self.segment_null_bitmap, segment_index_range);
+ Ok(())
+ }
+ }
+
+ /// Finalizes the sorting operation, merging data from both in-memory buffer and external files
+ /// into a sorted stream
+ async fn output(&mut self) -> Result<SortOutput> {
+ let readers = self.temp_file_provider.read_all(&self.index_name).await?;
+
+ // TODO(zhongzc): k-way merge instead of 2-way merge
+
+ let mut tree_nodes: VecDeque<SortedStream> = VecDeque::with_capacity(readers.len() + 1);
+ tree_nodes.push_back(Box::new(stream::iter(
+ mem::take(&mut self.values_buffer).into_iter().map(Ok),
+ )));
+ for reader in readers {
+ tree_nodes.push_back(IntermediateReader::new(reader).into_stream().await?);
+ }
+
+ while tree_nodes.len() >= 2 {
+ // every turn, the length of tree_nodes will be reduced by 1 until only one stream left
+ let stream1 = tree_nodes.pop_front().unwrap();
+ let stream2 = tree_nodes.pop_front().unwrap();
+ let merged_stream = MergeSortedStream::merge(stream1, stream2);
+ tree_nodes.push_back(merged_stream);
+ }
+
+ Ok(SortOutput {
+ segment_null_bitmap: mem::take(&mut self.segment_null_bitmap),
+ sorted_stream: tree_nodes.pop_front().unwrap(),
+ total_row_count: self.total_row_count,
+ })
+ }
+}
+
+impl ExternalSorter {
+ /// Constructs a new `ExternalSorter`
+ pub fn new(
+ index_name: String,
+ temp_file_provider: Arc<dyn ExternalTempFileProvider>,
+ segment_row_count: NonZeroUsize,
+ memory_usage_threshold: Option<usize>,
+ ) -> Self {
+ Self {
+ index_name,
+ temp_file_provider,
+
+ segment_null_bitmap: BitVec::new(),
+ values_buffer: BTreeMap::new(),
+
+ total_row_count: 0,
+ segment_row_count,
+
+ current_memory_usage: 0,
+ memory_usage_threshold,
+ }
+ }
+
+ /// Pushes the non-null values to the values buffer and sets the bits within
+ /// the specified range in the given BitVec to true.
+ /// Returns the memory usage difference of the buffer after the operation.
+ fn push_not_null(
+ &mut self,
+ value: BytesRef<'_>,
+ segment_index_range: RangeInclusive<usize>,
+ ) -> usize {
+ match self.values_buffer.get_mut(value) {
+ Some(bitmap) => {
+ let old_len = bitmap.as_raw_slice().len();
+ set_bits(bitmap, segment_index_range);
+
+ bitmap.as_raw_slice().len() - old_len
+ }
+ None => {
+ let mut bitmap = BitVec::default();
+ set_bits(&mut bitmap, segment_index_range);
+
+ let mem_diff = bitmap.as_raw_slice().len() + value.len();
+ self.values_buffer.insert(value.to_vec(), bitmap);
+
+ mem_diff
+ }
+ }
+ }
+
+ /// Checks if the in-memory buffer exceeds the threshold and offloads it to external storage if necessary
+ async fn may_dump_buffer(&mut self, memory_diff: usize) -> Result<()> {
+ self.current_memory_usage += memory_diff;
+ if self.memory_usage_threshold.is_none()
+ || self.current_memory_usage < self.memory_usage_threshold.unwrap()
+ {
+ return Ok(());
+ }
+
+ let file_id = &format!("{:012}", self.total_row_count);
+ let index_name = &self.index_name;
+ let writer = self.temp_file_provider.create(index_name, file_id).await?;
+
+ let memory_usage = self.current_memory_usage;
+ let values = mem::take(&mut self.values_buffer);
+ self.current_memory_usage = 0;
+
+ let entries = values.len();
+ IntermediateWriter::new(writer).write_all(values).await.inspect(|_|
+ logging::debug!("Dumped {entries} entries ({memory_usage} bytes) to intermediate file {file_id} for index {index_name}")
+ ).inspect_err(|e|
+ logging::error!("Failed to dump {entries} entries to intermediate file {file_id} for index {index_name}. Error: {e}")
+ )
+ }
+
+ /// Determines the segment index range for the row index range
+ /// `[self.total_row_count, self.total_row_count + n - 1]`
+ fn segment_index_range(&self, n: usize) -> RangeInclusive<usize> {
+ let start = self.segment_index(self.total_row_count);
+ let end = self.segment_index(self.total_row_count + n - 1);
+ start..=end
+ }
+
+ /// Determines the segment index for the given row index
+ fn segment_index(&self, row_index: usize) -> usize {
+ row_index / self.segment_row_count
+ }
+}
+
+/// Sets the bits within the specified range in the given `BitVec` to true
+fn set_bits(bitmap: &mut BitVec, index_range: RangeInclusive<usize>) {
+ if *index_range.end() >= bitmap.len() {
+ bitmap.resize(index_range.end() + 1, false);
+ }
+ for index in index_range {
+ bitmap.set(index, true);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashMap;
+ use std::iter;
+ use std::sync::Mutex;
+
+ use futures::{AsyncRead, StreamExt};
+ use rand::Rng;
+ use tokio::io::duplex;
+ use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
+
+ use super::*;
+ use crate::inverted_index::create::sort::external_provider::MockExternalTempFileProvider;
+
+ async fn test_external_sorter(
+ memory_usage_threshold: Option<usize>,
+ segment_row_count: usize,
+ row_count: usize,
+ batch_push: bool,
+ ) {
+ let mut mock_provider = MockExternalTempFileProvider::new();
+
+ let mock_files: Arc<Mutex<HashMap<String, Box<dyn AsyncRead + Unpin + Send>>>> =
+ Arc::new(Mutex::new(HashMap::new()));
+
+ mock_provider.expect_create().returning({
+ let files = Arc::clone(&mock_files);
+ move |index_name, file_id| {
+ assert_eq!(index_name, "test");
+ let mut files = files.lock().unwrap();
+ let (writer, reader) = duplex(8 * 1024);
+ files.insert(file_id.to_string(), Box::new(reader.compat()));
+ Ok(Box::new(writer.compat_write()))
+ }
+ });
+
+ mock_provider.expect_read_all().returning({
+ let files = Arc::clone(&mock_files);
+ move |index_name| {
+ assert_eq!(index_name, "test");
+ let mut files = files.lock().unwrap();
+ Ok(files.drain().map(|f| f.1).collect::<Vec<_>>())
+ }
+ });
+
+ let mut sorter = ExternalSorter::new(
+ "test".to_owned(),
+ Arc::new(mock_provider),
+ NonZeroUsize::new(segment_row_count).unwrap(),
+ memory_usage_threshold,
+ );
+
+ let mut sorted_result = if batch_push {
+ let (dic_values, sorted_result) =
+ dictionary_values_and_sorted_result(row_count, segment_row_count);
+
+ for (value, n) in dic_values {
+ sorter.push_n(value.as_deref(), n).await.unwrap();
+ }
+
+ sorted_result
+ } else {
+ let (mock_values, sorted_result) =
+ shuffle_values_and_sorted_result(row_count, segment_row_count);
+
+ for value in mock_values {
+ sorter.push(value.as_deref()).await.unwrap();
+ }
+
+ sorted_result
+ };
+
+ let SortOutput {
+ segment_null_bitmap,
+ mut sorted_stream,
+ total_row_count,
+ } = sorter.output().await.unwrap();
+ assert_eq!(total_row_count, row_count);
+ let n = sorted_result.remove(&None);
+ assert_eq!(
+ segment_null_bitmap.iter_ones().collect::<Vec<_>>(),
+ n.unwrap_or_default()
+ );
+ for (value, offsets) in sorted_result {
+ let item = sorted_stream.next().await.unwrap().unwrap();
+ assert_eq!(item.0, value.unwrap());
+ assert_eq!(item.1.iter_ones().collect::<Vec<_>>(), offsets);
+ }
+ }
+
+ #[tokio::test]
+ async fn test_external_sorter_pure_in_memory() {
+ let memory_usage_threshold = None;
+ let total_row_count_cases = vec![0, 100, 1000, 10000];
+ let segment_row_count_cases = vec![1, 10, 100, 1000];
+ let batch_push_cases = vec![false, true];
+
+ for total_row_count in total_row_count_cases {
+ for segment_row_count in &segment_row_count_cases {
+ for batch_push in &batch_push_cases {
+ test_external_sorter(
+ memory_usage_threshold,
+ *segment_row_count,
+ total_row_count,
+ *batch_push,
+ )
+ .await;
+ }
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_external_sorter_pure_external() {
+ let memory_usage_threshold = Some(0);
+ let total_row_count_cases = vec![0, 100, 1000, 10000];
+ let segment_row_count_cases = vec![1, 10, 100, 1000];
+ let batch_push_cases = vec![false, true];
+
+ for total_row_count in total_row_count_cases {
+ for segment_row_count in &segment_row_count_cases {
+ for batch_push in &batch_push_cases {
+ test_external_sorter(
+ memory_usage_threshold,
+ *segment_row_count,
+ total_row_count,
+ *batch_push,
+ )
+ .await;
+ }
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_external_sorter_mixed() {
+ let memory_usage_threshold = Some(1024);
+ let total_row_count_cases = vec![0, 100, 1000, 10000];
+ let segment_row_count_cases = vec![1, 10, 100, 1000];
+ let batch_push_cases = vec![false, true];
+
+ for total_row_count in total_row_count_cases {
+ for segment_row_count in &segment_row_count_cases {
+ for batch_push in &batch_push_cases {
+ test_external_sorter(
+ memory_usage_threshold,
+ *segment_row_count,
+ total_row_count,
+ *batch_push,
+ )
+ .await;
+ }
+ }
+ }
+ }
+
+ fn random_option_bytes(size: usize) -> Option<Vec<u8>> {
+ let mut rng = rand::thread_rng();
+
+ if rng.gen() {
+ let mut buffer = vec![0u8; size];
+ rng.fill(&mut buffer[..]);
+ Some(buffer)
+ } else {
+ None
+ }
+ }
+
+ type Values = Vec<Option<Bytes>>;
+ type DictionaryValues = Vec<(Option<Bytes>, usize)>;
+ type ValueSegIds = BTreeMap<Option<Bytes>, Vec<usize>>;
+
+ fn shuffle_values_and_sorted_result(
+ row_count: usize,
+ segment_row_count: usize,
+ ) -> (Values, ValueSegIds) {
+ let mock_values = iter::repeat_with(|| random_option_bytes(100))
+ .take(row_count)
+ .collect::<Vec<_>>();
+
+ let sorted_result = sorted_result(&mock_values, segment_row_count);
+ (mock_values, sorted_result)
+ }
+
+ fn dictionary_values_and_sorted_result(
+ row_count: usize,
+ segment_row_count: usize,
+ ) -> (DictionaryValues, ValueSegIds) {
+ let mut n = row_count;
+ let mut rng = rand::thread_rng();
+ let mut dic_values = Vec::new();
+
+ while n > 0 {
+ let size = rng.gen_range(1..=n);
+ let value = random_option_bytes(100);
+ dic_values.push((value, size));
+ n -= size;
+ }
+
+ let mock_values = dic_values
+ .iter()
+ .flat_map(|(value, size)| iter::repeat(value.clone()).take(*size))
+ .collect::<Vec<_>>();
+
+ let sorted_result = sorted_result(&mock_values, segment_row_count);
+ (dic_values, sorted_result)
+ }
+
+ fn sorted_result(values: &Values, segment_row_count: usize) -> ValueSegIds {
+ let mut sorted_result = BTreeMap::new();
+ for (row_index, value) in values.iter().enumerate() {
+ let to_add_segment_index = row_index / segment_row_count;
+ let indices = sorted_result.entry(value.clone()).or_insert_with(Vec::new);
+
+ if indices.last() != Some(&to_add_segment_index) {
+ indices.push(to_add_segment_index);
+ }
+ }
+
+ sorted_result
+ }
+}
diff --git a/src/index/src/inverted_index/create/sort/merge_stream.rs b/src/index/src/inverted_index/create/sort/merge_stream.rs
new file mode 100644
index 000000000000..84debecb8ada
--- /dev/null
+++ b/src/index/src/inverted_index/create/sort/merge_stream.rs
@@ -0,0 +1,174 @@
+// Copyright 2023 Greptime Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::cmp::Ordering;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use common_base::BitVec;
+use futures::{ready, Stream, StreamExt};
+use pin_project::pin_project;
+
+use crate::inverted_index::create::sort::SortedStream;
+use crate::inverted_index::error::Result;
+use crate::inverted_index::Bytes;
+
+/// A [`Stream`] implementation that merges two sorted streams into a single sorted stream
+#[pin_project]
+pub struct MergeSortedStream {
+ stream1: Option<SortedStream>,
+ peek1: Option<(Bytes, BitVec)>,
+
+ stream2: Option<SortedStream>,
+ peek2: Option<(Bytes, BitVec)>,
+}
+
+impl MergeSortedStream {
+ /// Creates a new `MergeSortedStream` that will return elements from `stream1` and `stream2`
+ /// in sorted order, merging duplicate items by unioning their bitmaps
+ pub fn merge(stream1: SortedStream, stream2: SortedStream) -> SortedStream {
+ Box::new(MergeSortedStream {
+ stream1: Some(stream1),
+ peek1: None,
+
+ stream2: Some(stream2),
+ peek2: None,
+ })
+ }
+}
+
+impl Stream for MergeSortedStream {
+ type Item = Result<(Bytes, BitVec)>;
+
+ /// Polls both streams and returns the next item from the stream that has the smaller next item.
+ /// If both streams have the same next item, the bitmaps are unioned together.
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ let this = self.project();
+
+ if let (None, Some(stream1)) = (&this.peek1, this.stream1.as_mut()) {
+ match ready!(stream1.poll_next_unpin(cx)) {
+ Some(item) => *this.peek1 = Some(item?),
+ None => *this.stream1 = None, // `stream1` is exhausted, don't poll it next time
+ }
+ }
+
+ if let (None, Some(stream2)) = (&this.peek2, this.stream2.as_mut()) {
+ match ready!(stream2.poll_next_unpin(cx)) {
+ Some(item) => *this.peek2 = Some(item?),
+ None => *this.stream2 = None, // `stream2` is exhausted, don't poll it next time
+ }
+ }
+
+ Poll::Ready(match (this.peek1.take(), this.peek2.take()) {
+ (Some((v1, b1)), Some((v2, b2))) => match v1.cmp(&v2) {
+ Ordering::Less => {
+ *this.peek2 = Some((v2, b2)); // Preserve the rest of `stream2`
+ Some(Ok((v1, b1)))
+ }
+ Ordering::Greater => {
+ *this.peek1 = Some((v1, b1)); // Preserve the rest of `stream1`
+ Some(Ok((v2, b2)))
+ }
+ Ordering::Equal => Some(Ok((v1, merge_bitmaps(b1, b2)))),
+ },
+ (None, Some(item)) | (Some(item), None) => Some(Ok(item)),
+ (None, None) => None,
+ })
+ }
+}
+
+/// Merges two bitmaps by bit-wise OR'ing them together, preserving all bits from both
+fn merge_bitmaps(bitmap1: BitVec, bitmap2: BitVec) -> BitVec {
+ // make sure longer bitmap is on the left to avoid truncation
+ #[allow(clippy::if_same_then_else)]
+ if bitmap1.len() > bitmap2.len() {
+ bitmap1 | bitmap2
+ } else {
+ bitmap2 | bitmap1
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use futures::stream;
+
+ use super::*;
+ use crate::inverted_index::error::Error;
+
+ fn sorted_stream_from_vec(vec: Vec<(Bytes, BitVec)>) -> SortedStream {
+ Box::new(stream::iter(vec.into_iter().map(Ok::<_, Error>)))
+ }
+
+ #[tokio::test]
+ async fn test_merge_sorted_stream_non_overlapping() {
+ let stream1 = sorted_stream_from_vec(vec![
+ (Bytes::from("apple"), BitVec::from_slice(&[0b10101010])),
+ (Bytes::from("orange"), BitVec::from_slice(&[0b01010101])),
+ ]);
+ let stream2 = sorted_stream_from_vec(vec![
+ (Bytes::from("banana"), BitVec::from_slice(&[0b10101010])),
+ (Bytes::from("peach"), BitVec::from_slice(&[0b01010101])),
+ ]);
+
+ let mut merged_stream = MergeSortedStream::merge(stream1, stream2);
+
+ let item = merged_stream.next().await.unwrap().unwrap();
+ assert_eq!(item.0, Bytes::from("apple"));
+ assert_eq!(item.1, BitVec::from_slice(&[0b10101010]));
+ let item = merged_stream.next().await.unwrap().unwrap();
+ assert_eq!(item.0, Bytes::from("banana"));
+ assert_eq!(item.1, BitVec::from_slice(&[0b10101010]));
+ let item = merged_stream.next().await.unwrap().unwrap();
+ assert_eq!(item.0, Bytes::from("orange"));
+ assert_eq!(item.1, BitVec::from_slice(&[0b01010101]));
+ let item = merged_stream.next().await.unwrap().unwrap();
+ assert_eq!(item.0, Bytes::from("peach"));
+ assert_eq!(item.1, BitVec::from_slice(&[0b01010101]));
+ assert!(merged_stream.next().await.is_none());
+ }
+
+ #[tokio::test]
+ async fn test_merge_sorted_stream_overlapping() {
+ let stream1 = sorted_stream_from_vec(vec![
+ (Bytes::from("apple"), BitVec::from_slice(&[0b10101010])),
+ (Bytes::from("orange"), BitVec::from_slice(&[0b10101010])),
+ ]);
+ let stream2 = sorted_stream_from_vec(vec![
+ (Bytes::from("apple"), BitVec::from_slice(&[0b01010101])),
+ (Bytes::from("peach"), BitVec::from_slice(&[0b01010101])),
+ ]);
+
+ let mut merged_stream = MergeSortedStream::merge(stream1, stream2);
+
+ let item = merged_stream.next().await.unwrap().unwrap();
+ assert_eq!(item.0, Bytes::from("apple"));
+ assert_eq!(item.1, BitVec::from_slice(&[0b11111111]));
+ let item = merged_stream.next().await.unwrap().unwrap();
+ assert_eq!(item.0, Bytes::from("orange"));
+ assert_eq!(item.1, BitVec::from_slice(&[0b10101010]));
+ let item = merged_stream.next().await.unwrap().unwrap();
+ assert_eq!(item.0, Bytes::from("peach"));
+ assert_eq!(item.1, BitVec::from_slice(&[0b01010101]));
+ assert!(merged_stream.next().await.is_none());
+ }
+
+ #[tokio::test]
+ async fn test_merge_sorted_stream_empty_streams() {
+ let stream1 = sorted_stream_from_vec(vec![]);
+ let stream2 = sorted_stream_from_vec(vec![]);
+
+ let mut merged_stream = MergeSortedStream::merge(stream1, stream2);
+ assert!(merged_stream.next().await.is_none());
+ }
+}
|
feat
|
add external sorter (#2950)
|
d7aeb369a6891e9e245018438bba41afc359b72d
|
2023-10-11 09:12:04
|
zyy17
|
refactor: add new action 'release-cn-artifacts' (#2554)
| false
|
diff --git a/.github/actions/build-greptime-binary/action.yml b/.github/actions/build-greptime-binary/action.yml
index a55601c17708..8a06dd2231e3 100644
--- a/.github/actions/build-greptime-binary/action.yml
+++ b/.github/actions/build-greptime-binary/action.yml
@@ -16,30 +16,6 @@ inputs:
version:
description: Version of the artifact
required: true
- release-to-s3-bucket:
- description: S3 bucket to store released artifacts
- required: true
- aws-access-key-id:
- description: AWS access key id
- required: true
- aws-secret-access-key:
- description: AWS secret access key
- required: true
- aws-region:
- description: AWS region
- required: true
- upload-to-s3:
- description: Upload to S3
- required: false
- default: 'true'
- upload-latest-artifacts:
- description: Upload the latest artifacts to S3
- required: false
- default: 'true'
- build-android-artifacts:
- description: Build android artifacts
- required: false
- default: 'false'
working-dir:
description: Working directory to build the artifacts
required: false
@@ -64,12 +40,6 @@ runs:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: ./target/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }}
- release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
- aws-access-key-id: ${{ inputs.aws-access-key-id }}
- aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
- aws-region: ${{ inputs.aws-region }}
- upload-to-s3: ${{ inputs.upload-to-s3 }}
- upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
working-dir: ${{ inputs.working-dir }}
# TODO(zyy17): We can remove build-android-artifacts flag in the future.
@@ -86,10 +56,4 @@ runs:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: ./target/aarch64-linux-android/release/greptime
version: ${{ inputs.version }}
- release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
- aws-access-key-id: ${{ inputs.aws-access-key-id }}
- aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
- aws-region: ${{ inputs.aws-region }}
- upload-to-s3: ${{ inputs.upload-to-s3 }}
- upload-latest-artifacts: ${{ inputs.upload-latest-artifacts }}
working-dir: ${{ inputs.working-dir }}
diff --git a/.github/actions/build-macos-artifacts/action.yml b/.github/actions/build-macos-artifacts/action.yml
index 3b9488edd9f3..6b1cf6b03ba8 100644
--- a/.github/actions/build-macos-artifacts/action.yml
+++ b/.github/actions/build-macos-artifacts/action.yml
@@ -19,25 +19,9 @@ inputs:
disable-run-tests:
description: Disable running integration tests
required: true
- release-to-s3-bucket:
- description: S3 bucket to store released artifacts
- required: true
artifacts-dir:
description: Directory to store artifacts
required: true
- aws-access-key-id:
- description: AWS access key id
- required: true
- aws-secret-access-key:
- description: AWS secret access key
- required: true
- aws-region:
- description: AWS region
- required: true
- upload-to-s3:
- description: Upload to S3
- required: false
- default: 'true'
runs:
using: composite
steps:
@@ -103,8 +87,3 @@ runs:
artifacts-dir: ${{ inputs.artifacts-dir }}
target-file: target/${{ inputs.arch }}/${{ inputs.cargo-profile }}/greptime
version: ${{ inputs.version }}
- release-to-s3-bucket: ${{ inputs.release-to-s3-bucket }}
- aws-access-key-id: ${{ inputs.aws-access-key-id }}
- aws-secret-access-key: ${{ inputs.aws-secret-access-key }}
- aws-region: ${{ inputs.aws-region }}
- upload-to-s3: ${{ inputs.upload-to-s3 }}
diff --git a/.github/actions/release-artifacts/action.yml b/.github/actions/publish-github-release/action.yml
similarity index 96%
rename from .github/actions/release-artifacts/action.yml
rename to .github/actions/publish-github-release/action.yml
index 4b139ead5535..d9e172c68452 100644
--- a/.github/actions/release-artifacts/action.yml
+++ b/.github/actions/publish-github-release/action.yml
@@ -1,5 +1,5 @@
-name: Release artifacts
-description: Release artifacts
+name: Publish GitHub release
+description: Publish GitHub release
inputs:
version:
description: Version to release
diff --git a/.github/actions/release-cn-artifacts/action.yaml b/.github/actions/release-cn-artifacts/action.yaml
new file mode 100644
index 000000000000..653d6a269462
--- /dev/null
+++ b/.github/actions/release-cn-artifacts/action.yaml
@@ -0,0 +1,138 @@
+name: Release CN artifacts
+description: Release artifacts to CN region
+inputs:
+ image-registry:
+ description: The image registry to store the images
+ required: true
+ image-namespace:
+ description: The namespace of the image registry to store the images
+ required: true
+ image-name:
+ description: The name of the image to build
+ required: false
+ default: greptimedb
+ image-registry-username:
+ description: The username to login to the image registry
+ required: true
+ image-registry-password:
+ description: The password to login to the image registry
+ required: true
+ version:
+ description: Version of the artifact
+ required: true
+ dev-mode:
+ description: Enable dev mode, only push standard greptime
+ required: false
+ default: 'false'
+ push-latest-tag:
+ description: Whether to push the latest tag
+ required: false
+ default: 'true'
+ aws-cn-s3-bucket:
+ description: S3 bucket to store released artifacts in CN region
+ required: true
+ aws-cn-access-key-id:
+ description: AWS access key id in CN region
+ required: true
+ aws-cn-secret-access-key:
+ description: AWS secret access key in CN region
+ required: true
+ aws-cn-region:
+ description: AWS region in CN
+ required: true
+ upload-to-s3:
+ description: Upload to S3
+ required: false
+ default: 'true'
+ artifacts-dir:
+ description: Directory to store artifacts
+ required: false
+ default: 'artifacts'
+ update-latest-version-info:
+ description: Upload the latest version info in S3
+ required: false
+ default: 'true'
+ upload-max-retry-times:
+ description: Max retry times for uploading artifacts to S3
+ required: false
+ default: "20"
+ upload-retry-timeout:
+ description: Timeout for uploading artifacts to S3
+ required: false
+ default: "30" # minutes
+runs:
+ using: composite
+ steps:
+ - name: Install skopeo
+ shell: bash
+ run: |
+ sudo apt update && sudo apt install -y skopeo
+
+ - name: Push images from Dockerhub to ACR
+ shell: bash
+ run: |
+ skopeo copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
+ --dest-creds "${{ inputs.image-registry-username }}":"${{ inputs.image-registry-password }}" \
+ docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }}
+
+ if [[ "${{ inputs.dev-mode }}" == "false" ]]; then
+ skopeo copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
+ --dest-creds "${{ inputs.image-registry-username }}":"${{ inputs.image-registry-password }}" \
+ docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}-centos:${{ inputs.version }}
+ fi
+
+ - name: Push latest images from Dockerhub to ACR
+ shell: bash
+ if: ${{ inputs.push-latest-tag == 'true' }}
+ run: |
+ skopeo copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
+ --dest-creds "${{ inputs.image-registry-username }}":"${{ inputs.image-registry-password }}" \
+ docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:latest
+
+ if [[ "${{ inputs.dev-mode }}" == "false" ]]; then
+ skopeo copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
+ --dest-creds "${{ inputs.image-registry-username }}":"${{ inputs.image-registry-password }}" \
+ docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}-centos:latest
+ fi
+
+ - name: Download artifacts
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ inputs.artifacts-dir }}
+
+ - name: Upload artifacts to aws-cn S3
+ if: ${{ inputs.upload-to-s3 == 'true' }}
+ uses: nick-invision/retry@v2
+ env:
+ AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
+ AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
+ AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
+ with:
+ max_attempts: ${{ inputs.upload-max-retry-times }}
+ timeout_minutes: ${{ inputs.upload-retry-timeout }}
+ # The bucket layout will be:
+ # releases/greptimedb
+ # ├── v0.1.0
+ # │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
+ # │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
+ # └── v0.2.0
+ # ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
+ # └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
+ command: |
+ cd ${{ inputs.artifacts-dir }} && \
+ aws s3 cp . s3://${{ inputs.aws-cn-s3-bucket }}/releases/greptimedb/${{ inputs.version }} \
+ --recursive --exclude "*" --include "greptime-*.tar.gz" --include "greptime-*.sha256sum"
+
+ - name: Update latest version info in aws-cn S3
+ if: ${{ inputs.upload-to-s3 == 'true' && inputs.update-latest-version-info == 'true' }} # We'll also upload the latest artifacts to S3 in the scheduled and formal release.
+ uses: nick-invision/retry@v2
+ env:
+ AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
+ AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
+ AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
+ with:
+ max_attempts: ${{ inputs.upload-max-retry-times }}
+ timeout_minutes: ${{ inputs.upload-retry-timeout }}
+ command: |
+ echo "${{ inputs.version }}" > ${{ inputs.artifacts-dir }}/latest-version.txt && \
+ aws cp ${{ inputs.artifacts-dir }}/latest-version.txt s3://${{ inputs.aws-cn-s3-bucket }}/releases/greptimedb/latest-version.txt
diff --git a/.github/actions/upload-artifacts/action.yml b/.github/actions/upload-artifacts/action.yml
index 44a603b2b78c..290c9950509b 100644
--- a/.github/actions/upload-artifacts/action.yml
+++ b/.github/actions/upload-artifacts/action.yml
@@ -10,34 +10,6 @@ inputs:
version:
description: Version of the artifact
required: true
- release-to-s3-bucket:
- description: S3 bucket to store released artifacts
- required: true
- aws-access-key-id:
- description: AWS access key id
- required: true
- aws-secret-access-key:
- description: AWS secret access key
- required: true
- aws-region:
- description: AWS region
- required: true
- upload-to-s3:
- description: Upload to S3
- required: false
- default: 'true'
- upload-latest-artifacts:
- description: Upload the latest artifacts to S3
- required: false
- default: 'true'
- upload-max-retry-times:
- description: Max retry times for uploading artifacts to S3
- required: false
- default: "20"
- upload-retry-timeout:
- description: Timeout for uploading artifacts to S3
- required: false
- default: "30" # minutes
working-dir:
description: Working directory to upload the artifacts
required: false
@@ -77,49 +49,3 @@ runs:
with:
name: ${{ inputs.artifacts-dir }}.sha256sum
path: ${{ inputs.working-dir }}/${{ inputs.artifacts-dir }}.sha256sum
-
- - name: Upload artifacts to S3
- if: ${{ inputs.upload-to-s3 == 'true' }}
- uses: nick-invision/retry@v2
- env:
- AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
- AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
- AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
- with:
- max_attempts: ${{ inputs.upload-max-retry-times }}
- timeout_minutes: ${{ inputs.upload-retry-timeout }}
- # The bucket layout will be:
- # releases/greptimedb
- # ├── v0.1.0
- # │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
- # │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
- # └── v0.2.0
- # ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
- # └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
- command: |
- cd ${{ inputs.working-dir }} && \
- aws s3 cp \
- ${{ inputs.artifacts-dir }}.tar.gz \
- s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.tar.gz && \
- aws s3 cp \
- ${{ inputs.artifacts-dir }}.sha256sum \
- s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/${{ inputs.version }}/${{ inputs.artifacts-dir }}.sha256sum
-
- - name: Upload latest artifacts to S3
- if: ${{ inputs.upload-to-s3 == 'true' && inputs.upload-latest-artifacts == 'true' }} # We'll also upload the latest artifacts to S3 in the scheduled and formal release.
- uses: nick-invision/retry@v2
- env:
- AWS_ACCESS_KEY_ID: ${{ inputs.aws-access-key-id }}
- AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-secret-access-key }}
- AWS_DEFAULT_REGION: ${{ inputs.aws-region }}
- with:
- max_attempts: ${{ inputs.upload-max-retry-times }}
- timeout_minutes: ${{ inputs.upload-retry-timeout }}
- command: |
- cd ${{ inputs.working-dir }} && \
- aws s3 cp \
- ${{ inputs.artifacts-dir }}.tar.gz \
- s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.tar.gz && \
- aws s3 cp \
- ${{ inputs.artifacts-dir }}.sha256sum \
- s3://${{ inputs.release-to-s3-bucket }}/releases/greptimedb/latest/${{ inputs.artifacts-dir }}.sha256sum
diff --git a/.github/workflows/dev-build.yml b/.github/workflows/dev-build.yml
index a0182d19badc..35e9bec323aa 100644
--- a/.github/workflows/dev-build.yml
+++ b/.github/workflows/dev-build.yml
@@ -198,12 +198,7 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
- release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
- aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
- aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
- aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: true # Only build the standard greptime binary.
- upload-to-s3: false # No need to upload to S3.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}
release-images-to-dockerhub:
@@ -239,35 +234,35 @@ jobs:
run: |
echo "build-result=success" >> $GITHUB_OUTPUT
- release-images-to-acr:
- name: Build and push images to ACR
+ release-cn-artifacts:
+ name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
- build-linux-amd64-artifacts,
- build-linux-arm64-artifacts,
+ release-images-to-dockerhub,
]
runs-on: ubuntu-latest
- # When we push to ACR, it's easy to fail due to some unknown network issues.
- # However, we don't want to fail the whole workflow because of this.
- # The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
continue-on-error: true
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- - name: Build and push images to ACR
- uses: ./.github/actions/build-images
+ - name: Release artifacts to CN region
+ uses: ./.github/actions/release-cn-artifacts
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
- image-name: ${{ env.IMAGE_NAME }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
version: ${{ needs.allocate-runners.outputs.version }}
- push-latest-tag: false # Don't push the latest tag to registry.
- dev-mode: true # Only build the standard images.
+ aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
+ aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
+ aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
+ aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
+ dev-mode: true # Only build the standard images(exclude centos images).
+ push-latest-tag: false # Don't push the latest tag to registry.
+ update-latest-version-info: false # Don't update the latest version info in S3.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml
index 1373ab7833cb..9a436e19660e 100644
--- a/.github/workflows/nightly-build.yml
+++ b/.github/workflows/nightly-build.yml
@@ -147,11 +147,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
- release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
- aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
- aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
- aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
- upload-latest-artifacts: false
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -171,11 +166,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
- release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
- aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
- aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
- aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
- upload-latest-artifacts: false
release-images-to-dockerhub:
name: Build and push images to DockerHub
@@ -208,13 +198,12 @@ jobs:
run: |
echo "nightly-build-result=success" >> $GITHUB_OUTPUT
- release-images-to-acr:
- name: Build and push images to ACR
+ release-cn-artifacts:
+ name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'schedule' }}
needs: [
allocate-runners,
- build-linux-amd64-artifacts,
- build-linux-arm64-artifacts,
+ release-images-to-dockerhub,
]
runs-on: ubuntu-latest
# When we push to ACR, it's easy to fail due to some unknown network issues.
@@ -226,15 +215,20 @@ jobs:
with:
fetch-depth: 0
- - name: Build and push images to ACR
- uses: ./.github/actions/build-images
+ - name: Release artifacts to CN region
+ uses: ./.github/actions/release-cn-artifacts
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
version: ${{ needs.allocate-runners.outputs.version }}
- push-latest-tag: false # Don't push the latest tag to registry.
+ aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
+ aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
+ aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
+ aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
+ push-latest-tag: false # Don't push the latest tag to registry.
+ update-latest-version-info: false # Don't update the latest version info in S3.
stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index d8e01cade05f..eb69ae99129a 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -63,7 +63,7 @@ on:
description: Build macos artifacts
required: false
default: false
- release_artifacts:
+ publish_github_release:
type: boolean
description: Create GitHub release and upload artifacts
required: false
@@ -172,11 +172,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
- release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
- aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
- aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
- aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
- upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
build-linux-arm64-artifacts:
name: Build linux-arm64 artifacts
@@ -196,11 +191,6 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
- release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
- aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
- aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
- aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
- upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
build-macos-artifacts:
name: Build macOS artifacts
@@ -242,12 +232,7 @@ jobs:
features: ${{ matrix.features }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
- release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
artifacts-dir: ${{ matrix.artifacts-dir-prefix }}-${{ needs.allocate-runners.outputs.version }}
- aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
- aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
- aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
- upload-to-s3: ${{ vars.UPLOAD_TO_S3 }}
release-images-to-dockerhub:
name: Build and push images to DockerHub
@@ -272,15 +257,14 @@ jobs:
image-registry-password: ${{ secrets.DOCKERHUB_TOKEN }}
version: ${{ needs.allocate-runners.outputs.version }}
- release-images-to-acr:
- name: Build and push images to ACR
+ release-cn-artifacts:
+ name: Release artifacts to CN region
if: ${{ inputs.release_images || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
- build-linux-amd64-artifacts,
- build-linux-arm64-artifacts,
+ release-images-to-dockerhub,
]
- runs-on: ubuntu-2004-16-cores
+ runs-on: ubuntu-latest
# When we push to ACR, it's easy to fail due to some unknown network issues.
# However, we don't want to fail the whole workflow because of this.
# The ACR have daily sync with DockerHub, so don't worry about the image not being updated.
@@ -290,18 +274,22 @@ jobs:
with:
fetch-depth: 0
- - name: Build and push images to ACR
- uses: ./.github/actions/build-images
+ - name: Release artifacts to CN region
+ uses: ./.github/actions/release-cn-artifacts
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
version: ${{ needs.allocate-runners.outputs.version }}
+ aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
+ aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
+ aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
+ aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
- release-artifacts:
+ publish-github-release:
name: Create GitHub release and upload artifacts
- if: ${{ inputs.release_artifacts || github.event_name == 'push' || github.event_name == 'schedule' }}
+ if: ${{ inputs.publish_github_release || github.event_name == 'push' || github.event_name == 'schedule' }}
needs: [
allocate-runners,
build-linux-amd64-artifacts,
@@ -315,8 +303,8 @@ jobs:
with:
fetch-depth: 0
- - name: Release artifacts
- uses: ./.github/actions/release-artifacts
+ - name: Publish GitHub release
+ uses: ./.github/actions/publish-github-release
with:
version: ${{ needs.allocate-runners.outputs.version }}
|
refactor
|
add new action 'release-cn-artifacts' (#2554)
|
2c0c4672b43f8d5d89144784c545c8f1ea484b98
|
2023-07-03 11:43:55
|
Lei, HUANG
|
feat: support building binary for centos7 (#1863)
| false
|
diff --git a/docker/Dockerfile-centos7-builder b/docker/Dockerfile-centos7-builder
new file mode 100644
index 000000000000..92c7681225ec
--- /dev/null
+++ b/docker/Dockerfile-centos7-builder
@@ -0,0 +1,29 @@
+FROM centos:7
+
+ENV LANG en_US.utf8
+WORKDIR /greptimedb
+
+RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
+ -e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=http://mirrors.tuna.tsinghua.edu.cn/centos|g' \
+ -i.bak \
+ /etc/yum.repos.d/CentOS-*.repo
+
+# Install dependencies
+RUN RUN ulimit -n 1024000 && yum groupinstall -y 'Development Tools'
+RUN yum install -y epel-release \
+ openssl \
+ openssl-devel \
+ centos-release-scl \
+ rh-python38 \
+ rh-python38-python-devel
+
+# Install protoc
+RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip
+RUN unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local/
+
+# Install Rust
+SHELL ["/bin/bash", "-c"]
+RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain none -y
+ENV PATH /opt/rh/rh-python38/root/usr/bin:/usr/local/bin:/root/.cargo/bin/:$PATH
+
+CMD ["cargo", "build", "--release"]
|
feat
|
support building binary for centos7 (#1863)
|
758aef39d8c7a47badf00a3e4a8153063131b2d5
|
2025-01-16 10:14:28
|
discord9
|
feat: filter batch by sequence in memtable (#5367)
| false
|
diff --git a/src/metric-engine/src/metadata_region.rs b/src/metric-engine/src/metadata_region.rs
index e440eb1765f7..4b924ec3006c 100644
--- a/src/metric-engine/src/metadata_region.rs
+++ b/src/metric-engine/src/metadata_region.rs
@@ -411,6 +411,7 @@ impl MetadataRegion {
output_ordering: None,
limit: None,
series_row_selector: None,
+ sequence: None,
};
let record_batch_stream = self
.mito
@@ -469,6 +470,7 @@ impl MetadataRegion {
output_ordering: None,
limit: None,
series_row_selector: None,
+ sequence: None,
}
}
@@ -630,6 +632,7 @@ mod test {
output_ordering: None,
limit: None,
series_row_selector: None,
+ sequence: None,
};
let actual_scan_request = MetadataRegion::build_read_request(key);
assert_eq!(actual_scan_request, expected_scan_request);
diff --git a/src/mito2/benches/memtable_bench.rs b/src/mito2/benches/memtable_bench.rs
index 74ff58a8ec1f..b0c6a550b2cf 100644
--- a/src/mito2/benches/memtable_bench.rs
+++ b/src/mito2/benches/memtable_bench.rs
@@ -85,7 +85,7 @@ fn full_scan(c: &mut Criterion) {
}
b.iter(|| {
- let iter = memtable.iter(None, None).unwrap();
+ let iter = memtable.iter(None, None, None).unwrap();
for batch in iter {
let _batch = batch.unwrap();
}
@@ -98,7 +98,7 @@ fn full_scan(c: &mut Criterion) {
}
b.iter(|| {
- let iter = memtable.iter(None, None).unwrap();
+ let iter = memtable.iter(None, None, None).unwrap();
for batch in iter {
let _batch = batch.unwrap();
}
@@ -124,7 +124,7 @@ fn filter_1_host(c: &mut Criterion) {
let predicate = generator.random_host_filter();
b.iter(|| {
- let iter = memtable.iter(None, Some(predicate.clone())).unwrap();
+ let iter = memtable.iter(None, Some(predicate.clone()), None).unwrap();
for batch in iter {
let _batch = batch.unwrap();
}
@@ -138,7 +138,7 @@ fn filter_1_host(c: &mut Criterion) {
let predicate = generator.random_host_filter();
b.iter(|| {
- let iter = memtable.iter(None, Some(predicate.clone())).unwrap();
+ let iter = memtable.iter(None, Some(predicate.clone()), None).unwrap();
for batch in iter {
let _batch = batch.unwrap();
}
diff --git a/src/mito2/src/engine/projection_test.rs b/src/mito2/src/engine/projection_test.rs
index 37a458082086..b3c4fc83e13b 100644
--- a/src/mito2/src/engine/projection_test.rs
+++ b/src/mito2/src/engine/projection_test.rs
@@ -79,6 +79,7 @@ async fn test_scan_projection() {
output_ordering: None,
limit: None,
series_row_selector: None,
+ sequence: None,
};
let stream = engine.scan_to_stream(region_id, request).await.unwrap();
let batches = RecordBatches::try_collect(stream).await.unwrap();
diff --git a/src/mito2/src/flush.rs b/src/mito2/src/flush.rs
index a0400deb5bc0..daf11ab86f0d 100644
--- a/src/mito2/src/flush.rs
+++ b/src/mito2/src/flush.rs
@@ -348,7 +348,7 @@ impl RegionFlushTask {
let max_sequence = mem.stats().max_sequence();
let file_id = FileId::random();
- let iter = mem.iter(None, None)?;
+ let iter = mem.iter(None, None, None)?;
let source = Source::Iter(iter);
// Flush to level 0.
diff --git a/src/mito2/src/memtable.rs b/src/mito2/src/memtable.rs
index 7d00b6bde8ec..7c6e51509b1f 100644
--- a/src/mito2/src/memtable.rs
+++ b/src/mito2/src/memtable.rs
@@ -147,6 +147,7 @@ pub trait Memtable: Send + Sync + fmt::Debug {
&self,
projection: Option<&[ColumnId]>,
predicate: Option<Predicate>,
+ sequence: Option<SequenceNumber>,
) -> Result<BoxedBatchIterator>;
/// Returns the ranges in the memtable.
@@ -155,6 +156,7 @@ pub trait Memtable: Send + Sync + fmt::Debug {
&self,
projection: Option<&[ColumnId]>,
predicate: Option<Predicate>,
+ sequence: Option<SequenceNumber>,
) -> MemtableRanges;
/// Returns true if the memtable is empty.
diff --git a/src/mito2/src/memtable/bulk.rs b/src/mito2/src/memtable/bulk.rs
index 8bbbda8ca367..2060a81cdc12 100644
--- a/src/mito2/src/memtable/bulk.rs
+++ b/src/mito2/src/memtable/bulk.rs
@@ -17,7 +17,7 @@
use std::sync::{Arc, RwLock};
use store_api::metadata::RegionMetadataRef;
-use store_api::storage::ColumnId;
+use store_api::storage::{ColumnId, SequenceNumber};
use table::predicate::Predicate;
use crate::error::Result;
@@ -63,6 +63,7 @@ impl Memtable for BulkMemtable {
&self,
_projection: Option<&[ColumnId]>,
_predicate: Option<Predicate>,
+ _sequence: Option<SequenceNumber>,
) -> Result<BoxedBatchIterator> {
todo!()
}
@@ -71,6 +72,7 @@ impl Memtable for BulkMemtable {
&self,
_projection: Option<&[ColumnId]>,
_predicate: Option<Predicate>,
+ _sequence: Option<SequenceNumber>,
) -> MemtableRanges {
todo!()
}
diff --git a/src/mito2/src/memtable/bulk/part.rs b/src/mito2/src/memtable/bulk/part.rs
index 2de5f841af1f..6c132ce64458 100644
--- a/src/mito2/src/memtable/bulk/part.rs
+++ b/src/mito2/src/memtable/bulk/part.rs
@@ -39,7 +39,7 @@ use parquet::file::metadata::ParquetMetaData;
use parquet::file::properties::WriterProperties;
use snafu::ResultExt;
use store_api::metadata::RegionMetadataRef;
-use store_api::storage::ColumnId;
+use store_api::storage::{ColumnId, SequenceNumber};
use table::predicate::Predicate;
use crate::error;
@@ -68,7 +68,11 @@ impl BulkPart {
&self.metadata
}
- pub(crate) fn read(&self, context: BulkIterContextRef) -> Result<Option<BoxedBatchIterator>> {
+ pub(crate) fn read(
+ &self,
+ context: BulkIterContextRef,
+ sequence: Option<SequenceNumber>,
+ ) -> Result<Option<BoxedBatchIterator>> {
// use predicate to find row groups to read.
let row_groups_to_read = context.row_groups_to_read(&self.metadata.parquet_metadata);
@@ -82,6 +86,7 @@ impl BulkPart {
row_groups_to_read,
self.metadata.parquet_metadata.clone(),
self.data.clone(),
+ sequence,
)?;
Ok(Some(Box::new(iter) as BoxedBatchIterator))
}
@@ -786,11 +791,14 @@ mod tests {
let projection = &[4u32];
let mut reader = part
- .read(Arc::new(BulkIterContext::new(
- part.metadata.region_metadata.clone(),
- &Some(projection.as_slice()),
+ .read(
+ Arc::new(BulkIterContext::new(
+ part.metadata.region_metadata.clone(),
+ &Some(projection.as_slice()),
+ None,
+ )),
None,
- )))
+ )
.unwrap()
.expect("expect at least one row group");
@@ -837,7 +845,7 @@ mod tests {
predicate,
));
let mut reader = part
- .read(context)
+ .read(context, None)
.unwrap()
.expect("expect at least one row group");
let mut total_rows_read = 0;
@@ -866,7 +874,7 @@ mod tests {
datafusion_expr::lit(ScalarValue::TimestampMillisecond(Some(300), None)),
)])),
));
- assert!(part.read(context).unwrap().is_none());
+ assert!(part.read(context, None).unwrap().is_none());
check_prune_row_group(&part, None, 310);
diff --git a/src/mito2/src/memtable/bulk/part_reader.rs b/src/mito2/src/memtable/bulk/part_reader.rs
index fdf3f81f5e11..9bd4c87ab880 100644
--- a/src/mito2/src/memtable/bulk/part_reader.rs
+++ b/src/mito2/src/memtable/bulk/part_reader.rs
@@ -18,6 +18,7 @@ use std::sync::Arc;
use bytes::Bytes;
use parquet::arrow::ProjectionMask;
use parquet::file::metadata::ParquetMetaData;
+use store_api::storage::SequenceNumber;
use crate::error;
use crate::memtable::bulk::context::BulkIterContextRef;
@@ -31,6 +32,7 @@ pub struct BulkPartIter {
row_groups_to_read: VecDeque<usize>,
current_reader: Option<PruneReader>,
builder: MemtableRowGroupReaderBuilder,
+ sequence: Option<SequenceNumber>,
}
impl BulkPartIter {
@@ -40,6 +42,7 @@ impl BulkPartIter {
mut row_groups_to_read: VecDeque<usize>,
parquet_meta: Arc<ParquetMetaData>,
data: Bytes,
+ sequence: Option<SequenceNumber>,
) -> error::Result<Self> {
let projection_mask = ProjectionMask::roots(
parquet_meta.file_metadata().schema_descr(),
@@ -62,6 +65,7 @@ impl BulkPartIter {
row_groups_to_read,
current_reader: init_reader,
builder,
+ sequence,
})
}
@@ -71,14 +75,16 @@ impl BulkPartIter {
return Ok(None);
};
- if let Some(batch) = current.next_batch()? {
+ if let Some(mut batch) = current.next_batch()? {
+ batch.filter_by_sequence(self.sequence)?;
return Ok(Some(batch));
}
// Previous row group exhausted, read next row group
while let Some(next_row_group) = self.row_groups_to_read.pop_front() {
current.reset(self.builder.build_row_group_reader(next_row_group, None)?);
- if let Some(next_batch) = current.next_batch()? {
+ if let Some(mut next_batch) = current.next_batch()? {
+ next_batch.filter_by_sequence(self.sequence)?;
return Ok(Some(next_batch));
}
}
diff --git a/src/mito2/src/memtable/partition_tree.rs b/src/mito2/src/memtable/partition_tree.rs
index df81c8dd010f..41a5a1351627 100644
--- a/src/mito2/src/memtable/partition_tree.rs
+++ b/src/mito2/src/memtable/partition_tree.rs
@@ -35,7 +35,7 @@ pub(crate) use primary_key_filter::DensePrimaryKeyFilter;
use serde::{Deserialize, Serialize};
use store_api::codec::PrimaryKeyEncoding;
use store_api::metadata::RegionMetadataRef;
-use store_api::storage::ColumnId;
+use store_api::storage::{ColumnId, SequenceNumber};
use table::predicate::Predicate;
use crate::error::{Result, UnsupportedOperationSnafu};
@@ -190,20 +190,23 @@ impl Memtable for PartitionTreeMemtable {
&self,
projection: Option<&[ColumnId]>,
predicate: Option<Predicate>,
+ sequence: Option<SequenceNumber>,
) -> Result<BoxedBatchIterator> {
- self.tree.read(projection, predicate)
+ self.tree.read(projection, predicate, sequence)
}
fn ranges(
&self,
projection: Option<&[ColumnId]>,
predicate: Option<Predicate>,
+ sequence: Option<SequenceNumber>,
) -> MemtableRanges {
let projection = projection.map(|ids| ids.to_vec());
let builder = Box::new(PartitionTreeIterBuilder {
tree: self.tree.clone(),
projection,
predicate,
+ sequence,
});
let context = Arc::new(MemtableRangeContext::new(self.id, builder));
@@ -350,12 +353,16 @@ struct PartitionTreeIterBuilder {
tree: Arc<PartitionTree>,
projection: Option<Vec<ColumnId>>,
predicate: Option<Predicate>,
+ sequence: Option<SequenceNumber>,
}
impl IterBuilder for PartitionTreeIterBuilder {
fn build(&self) -> Result<BoxedBatchIterator> {
- self.tree
- .read(self.projection.as_deref(), self.predicate.clone())
+ self.tree.read(
+ self.projection.as_deref(),
+ self.predicate.clone(),
+ self.sequence,
+ )
}
}
@@ -410,7 +417,7 @@ mod tests {
.map(|kv| kv.timestamp().as_timestamp().unwrap().unwrap().value())
.collect::<Vec<_>>();
- let iter = memtable.iter(None, None).unwrap();
+ let iter = memtable.iter(None, None, None).unwrap();
let read = collect_iter_timestamps(iter);
assert_eq!(expected_ts, read);
@@ -464,11 +471,11 @@ mod tests {
);
memtable.write(&kvs).unwrap();
- let iter = memtable.iter(None, None).unwrap();
+ let iter = memtable.iter(None, None, None).unwrap();
let read = collect_iter_timestamps(iter);
assert_eq!(vec![0, 1, 2, 3, 4, 5, 6, 7], read);
- let iter = memtable.iter(None, None).unwrap();
+ let iter = memtable.iter(None, None, None).unwrap();
let read = iter
.flat_map(|batch| {
batch
@@ -509,7 +516,7 @@ mod tests {
let expect = (0..100).collect::<Vec<_>>();
let kvs = memtable_util::build_key_values(&metadata, "hello".to_string(), 10, &expect, 1);
memtable.write(&kvs).unwrap();
- let iter = memtable.iter(Some(&[3]), None).unwrap();
+ let iter = memtable.iter(Some(&[3]), None, None).unwrap();
let mut v0_all = vec![];
for res in iter {
@@ -581,7 +588,7 @@ mod tests {
data.sort_unstable();
let expect = data.into_iter().map(|x| x.2).collect::<Vec<_>>();
- let iter = memtable.iter(None, None).unwrap();
+ let iter = memtable.iter(None, None, None).unwrap();
let read = collect_iter_timestamps(iter);
assert_eq!(expect, read);
}
@@ -617,7 +624,7 @@ mod tests {
right: Box::new(Expr::Literal(ScalarValue::UInt32(Some(i)))),
});
let iter = memtable
- .iter(None, Some(Predicate::new(vec![expr])))
+ .iter(None, Some(Predicate::new(vec![expr])), None)
.unwrap();
let read = collect_iter_timestamps(iter);
assert_eq!(timestamps, read);
@@ -784,7 +791,7 @@ mod tests {
))
.unwrap();
- let mut reader = new_memtable.iter(None, None).unwrap();
+ let mut reader = new_memtable.iter(None, None, None).unwrap();
let batch = reader.next().unwrap().unwrap();
let pk = codec.decode(batch.primary_key()).unwrap();
if let Value::String(s) = &pk[2] {
diff --git a/src/mito2/src/memtable/partition_tree/tree.rs b/src/mito2/src/memtable/partition_tree/tree.rs
index 81e281080415..d02b13ddb47a 100644
--- a/src/mito2/src/memtable/partition_tree/tree.rs
+++ b/src/mito2/src/memtable/partition_tree/tree.rs
@@ -27,7 +27,7 @@ use memcomparable::Serializer;
use serde::Serialize;
use snafu::{ensure, ResultExt};
use store_api::metadata::RegionMetadataRef;
-use store_api::storage::ColumnId;
+use store_api::storage::{ColumnId, SequenceNumber};
use table::predicate::Predicate;
use crate::error::{PrimaryKeyLengthMismatchSnafu, Result, SerializeFieldSnafu};
@@ -202,6 +202,7 @@ impl PartitionTree {
&self,
projection: Option<&[ColumnId]>,
predicate: Option<Predicate>,
+ sequence: Option<SequenceNumber>,
) -> Result<BoxedBatchIterator> {
let start = Instant::now();
// Creates the projection set.
@@ -225,6 +226,7 @@ impl PartitionTree {
let partitions = self.prune_partitions(&filters, &mut tree_iter_metric);
let mut iter = TreeIter {
+ sequence,
partitions,
current_reader: None,
metrics: tree_iter_metric,
@@ -451,6 +453,8 @@ struct TreeIterMetrics {
}
struct TreeIter {
+ /// Optional Sequence number of the current reader which limit results batch to lower than this sequence number.
+ sequence: Option<SequenceNumber>,
partitions: VecDeque<PartitionRef>,
current_reader: Option<PartitionReader>,
metrics: TreeIterMetrics,
@@ -519,6 +523,8 @@ impl TreeIter {
if part_reader.is_valid() {
self.metrics.rows_fetched += batch.num_rows();
self.metrics.batches_fetched += 1;
+ let mut batch = batch;
+ batch.filter_by_sequence(self.sequence)?;
return Ok(Some(batch));
}
@@ -529,6 +535,8 @@ impl TreeIter {
self.metrics.rows_fetched += batch.num_rows();
self.metrics.batches_fetched += 1;
+ let mut batch = batch;
+ batch.filter_by_sequence(self.sequence)?;
Ok(Some(batch))
}
}
diff --git a/src/mito2/src/memtable/time_partition.rs b/src/mito2/src/memtable/time_partition.rs
index 052fdca9bcf8..4a49d9c031b8 100644
--- a/src/mito2/src/memtable/time_partition.rs
+++ b/src/mito2/src/memtable/time_partition.rs
@@ -482,7 +482,7 @@ mod tests {
partitions.list_memtables(&mut memtables);
assert_eq!(0, memtables[0].id());
- let iter = memtables[0].iter(None, None).unwrap();
+ let iter = memtables[0].iter(None, None, None).unwrap();
let timestamps = collect_iter_timestamps(iter);
assert_eq!(&[1000, 3000, 5000, 6000, 7000], ×tamps[..]);
}
@@ -520,7 +520,7 @@ mod tests {
let mut memtables = Vec::new();
partitions.list_memtables(&mut memtables);
- let iter = memtables[0].iter(None, None).unwrap();
+ let iter = memtables[0].iter(None, None, None).unwrap();
let timestamps = collect_iter_timestamps(iter);
assert_eq!(&[0, 2000, 3000, 4000, 5000, 7000], ×tamps[..]);
let parts = partitions.list_partitions();
@@ -572,7 +572,7 @@ mod tests {
let partitions = new_multi_partitions(&metadata);
let parts = partitions.list_partitions();
- let iter = parts[0].memtable.iter(None, None).unwrap();
+ let iter = parts[0].memtable.iter(None, None, None).unwrap();
let timestamps = collect_iter_timestamps(iter);
assert_eq!(0, parts[0].memtable.id());
assert_eq!(
@@ -584,7 +584,7 @@ mod tests {
parts[0].time_range.unwrap().max_timestamp
);
assert_eq!(&[0, 2000, 3000, 4000], ×tamps[..]);
- let iter = parts[1].memtable.iter(None, None).unwrap();
+ let iter = parts[1].memtable.iter(None, None, None).unwrap();
assert_eq!(1, parts[1].memtable.id());
let timestamps = collect_iter_timestamps(iter);
assert_eq!(&[5000, 7000], ×tamps[..]);
diff --git a/src/mito2/src/memtable/time_series.rs b/src/mito2/src/memtable/time_series.rs
index 8331a2f58220..d9bc44815f89 100644
--- a/src/mito2/src/memtable/time_series.rs
+++ b/src/mito2/src/memtable/time_series.rs
@@ -33,7 +33,7 @@ use datatypes::vectors::{
};
use snafu::{ensure, ResultExt};
use store_api::metadata::RegionMetadataRef;
-use store_api::storage::ColumnId;
+use store_api::storage::{ColumnId, SequenceNumber};
use table::predicate::Predicate;
use crate::error::{
@@ -236,6 +236,7 @@ impl Memtable for TimeSeriesMemtable {
&self,
projection: Option<&[ColumnId]>,
filters: Option<Predicate>,
+ sequence: Option<SequenceNumber>,
) -> Result<BoxedBatchIterator> {
let projection = if let Some(projection) = projection {
projection.iter().copied().collect()
@@ -248,7 +249,7 @@ impl Memtable for TimeSeriesMemtable {
let iter = self
.series_set
- .iter_series(projection, filters, self.dedup)?;
+ .iter_series(projection, filters, self.dedup, sequence)?;
if self.merge_mode == MergeMode::LastNonNull {
let iter = LastNonNullIter::new(iter);
@@ -262,6 +263,7 @@ impl Memtable for TimeSeriesMemtable {
&self,
projection: Option<&[ColumnId]>,
predicate: Option<Predicate>,
+ sequence: Option<SequenceNumber>,
) -> MemtableRanges {
let projection = if let Some(projection) = projection {
projection.iter().copied().collect()
@@ -277,6 +279,7 @@ impl Memtable for TimeSeriesMemtable {
predicate,
dedup: self.dedup,
merge_mode: self.merge_mode,
+ sequence,
});
let context = Arc::new(MemtableRangeContext::new(self.id, builder));
@@ -384,6 +387,7 @@ impl SeriesSet {
projection: HashSet<ColumnId>,
predicate: Option<Predicate>,
dedup: bool,
+ sequence: Option<SequenceNumber>,
) -> Result<Iter> {
let primary_key_schema = primary_key_schema(&self.region_metadata);
let primary_key_datatypes = self
@@ -401,6 +405,7 @@ impl SeriesSet {
primary_key_datatypes,
self.codec.clone(),
dedup,
+ sequence,
)
}
}
@@ -448,6 +453,7 @@ struct Iter {
pk_datatypes: Vec<ConcreteDataType>,
codec: Arc<DensePrimaryKeyCodec>,
dedup: bool,
+ sequence: Option<SequenceNumber>,
metrics: Metrics,
}
@@ -462,6 +468,7 @@ impl Iter {
pk_datatypes: Vec<ConcreteDataType>,
codec: Arc<DensePrimaryKeyCodec>,
dedup: bool,
+ sequence: Option<SequenceNumber>,
) -> Result<Self> {
let predicate = predicate
.map(|predicate| {
@@ -482,6 +489,7 @@ impl Iter {
pk_datatypes,
codec,
dedup,
+ sequence,
metrics: Metrics::default(),
})
}
@@ -546,6 +554,12 @@ impl Iterator for Iter {
self.metrics.num_batches += 1;
self.metrics.num_rows += batch.as_ref().map(|b| b.num_rows()).unwrap_or(0);
self.metrics.scan_cost += start.elapsed();
+
+ let mut batch = batch;
+ batch = batch.and_then(|mut batch| {
+ batch.filter_by_sequence(self.sequence)?;
+ Ok(batch)
+ });
return Some(batch);
}
self.metrics.scan_cost += start.elapsed();
@@ -855,6 +869,7 @@ struct TimeSeriesIterBuilder {
projection: HashSet<ColumnId>,
predicate: Option<Predicate>,
dedup: bool,
+ sequence: Option<SequenceNumber>,
merge_mode: MergeMode,
}
@@ -864,6 +879,7 @@ impl IterBuilder for TimeSeriesIterBuilder {
self.projection.clone(),
self.predicate.clone(),
self.dedup,
+ self.sequence,
)?;
if self.merge_mode == MergeMode::LastNonNull {
@@ -1253,7 +1269,7 @@ mod tests {
*expected_ts.entry(ts).or_default() += if dedup { 1 } else { 2 };
}
- let iter = memtable.iter(None, None).unwrap();
+ let iter = memtable.iter(None, None, None).unwrap();
let mut read = HashMap::new();
for ts in iter
@@ -1293,7 +1309,7 @@ mod tests {
let memtable = TimeSeriesMemtable::new(schema, 42, None, true, MergeMode::LastRow);
memtable.write(&kvs).unwrap();
- let iter = memtable.iter(Some(&[3]), None).unwrap();
+ let iter = memtable.iter(Some(&[3]), None, None).unwrap();
let mut v0_all = vec![];
diff --git a/src/mito2/src/read.rs b/src/mito2/src/read.rs
index c4de103f1000..6001d3062491 100644
--- a/src/mito2/src/read.rs
+++ b/src/mito2/src/read.rs
@@ -35,7 +35,7 @@ use async_trait::async_trait;
use common_time::Timestamp;
use datafusion_common::arrow::array::UInt8Array;
use datatypes::arrow;
-use datatypes::arrow::array::{Array, ArrayRef};
+use datatypes::arrow::array::{Array, ArrayRef, UInt64Array};
use datatypes::arrow::compute::SortOptions;
use datatypes::arrow::row::{RowConverter, SortField};
use datatypes::prelude::{ConcreteDataType, DataType, ScalarVector};
@@ -334,6 +334,24 @@ impl Batch {
Ok(())
}
+ /// Filters rows by the given `sequence`. Only preserves rows with sequence less than or equal to `sequence`.
+ pub fn filter_by_sequence(&mut self, sequence: Option<SequenceNumber>) -> Result<()> {
+ let seq = match (sequence, self.last_sequence()) {
+ (None, _) | (_, None) => return Ok(()),
+ (Some(sequence), Some(last_sequence)) if sequence >= last_sequence => return Ok(()),
+ (Some(sequence), Some(_)) => sequence,
+ };
+
+ let seqs = self.sequences.as_arrow();
+ let sequence = UInt64Array::new_scalar(seq);
+ let predicate = datafusion_common::arrow::compute::kernels::cmp::lt_eq(seqs, &sequence)
+ .context(ComputeArrowSnafu)?;
+ let predicate = BooleanVector::from(predicate);
+ self.filter(&predicate)?;
+
+ Ok(())
+ }
+
/// Sorts rows in the batch. If `dedup` is true, it also removes
/// duplicated rows according to primary keys.
///
@@ -1212,6 +1230,57 @@ mod tests {
assert_eq!(expect, batch);
}
+ #[test]
+ fn test_filter_by_sequence() {
+ // Filters put only.
+ let mut batch = new_batch(
+ &[1, 2, 3, 4],
+ &[11, 12, 13, 14],
+ &[OpType::Put, OpType::Put, OpType::Put, OpType::Put],
+ &[21, 22, 23, 24],
+ );
+ batch.filter_by_sequence(Some(13)).unwrap();
+ let expect = new_batch(
+ &[1, 2, 3],
+ &[11, 12, 13],
+ &[OpType::Put, OpType::Put, OpType::Put],
+ &[21, 22, 23],
+ );
+ assert_eq!(expect, batch);
+
+ // Filters to empty.
+ let mut batch = new_batch(
+ &[1, 2, 3, 4],
+ &[11, 12, 13, 14],
+ &[OpType::Put, OpType::Delete, OpType::Put, OpType::Put],
+ &[21, 22, 23, 24],
+ );
+
+ batch.filter_by_sequence(Some(10)).unwrap();
+ assert!(batch.is_empty());
+
+ // None filter.
+ let mut batch = new_batch(
+ &[1, 2, 3, 4],
+ &[11, 12, 13, 14],
+ &[OpType::Put, OpType::Delete, OpType::Put, OpType::Put],
+ &[21, 22, 23, 24],
+ );
+ let expect = batch.clone();
+ batch.filter_by_sequence(None).unwrap();
+ assert_eq!(expect, batch);
+
+ // Filter a empty batch
+ let mut batch = new_batch(&[], &[], &[], &[]);
+ batch.filter_by_sequence(Some(10)).unwrap();
+ assert!(batch.is_empty());
+
+ // Filter a empty batch with None
+ let mut batch = new_batch(&[], &[], &[], &[]);
+ batch.filter_by_sequence(None).unwrap();
+ assert!(batch.is_empty());
+ }
+
#[test]
fn test_filter() {
// Filters put only.
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index ad5d2e4a15cf..fd660128a678 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -300,6 +300,9 @@ impl ScanRegion {
if file_in_range(file, &time_range) {
files.push(file.clone());
}
+ // There is no need to check and prune for file's sequence here as the sequence number is usually very new,
+ // unless the timing is too good, or the sequence number wouldn't be in file.
+ // and the batch will be filtered out by tree reader anyway.
}
}
@@ -347,7 +350,11 @@ impl ScanRegion {
let memtables = memtables
.into_iter()
.map(|mem| {
- let ranges = mem.ranges(Some(mapper.column_ids()), Some(predicate.clone()));
+ let ranges = mem.ranges(
+ Some(mapper.column_ids()),
+ Some(predicate.clone()),
+ self.request.sequence,
+ );
MemRangeBuilder::new(ranges)
})
.collect();
diff --git a/src/mito2/src/test_util/memtable_util.rs b/src/mito2/src/test_util/memtable_util.rs
index cfa8cd885378..b6ab1d848b28 100644
--- a/src/mito2/src/test_util/memtable_util.rs
+++ b/src/mito2/src/test_util/memtable_util.rs
@@ -84,6 +84,7 @@ impl Memtable for EmptyMemtable {
&self,
_projection: Option<&[ColumnId]>,
_filters: Option<Predicate>,
+ _sequence: Option<SequenceNumber>,
) -> Result<BoxedBatchIterator> {
Ok(Box::new(std::iter::empty()))
}
@@ -92,6 +93,7 @@ impl Memtable for EmptyMemtable {
&self,
_projection: Option<&[ColumnId]>,
_predicate: Option<Predicate>,
+ _sequence: Option<SequenceNumber>,
) -> MemtableRanges {
MemtableRanges::default()
}
diff --git a/src/store-api/src/storage/requests.rs b/src/store-api/src/storage/requests.rs
index 02bd745ca10e..dfdcd1037d3c 100644
--- a/src/store-api/src/storage/requests.rs
+++ b/src/store-api/src/storage/requests.rs
@@ -16,6 +16,8 @@ use common_recordbatch::OrderOption;
use datafusion_expr::expr::Expr;
use strum::Display;
+use crate::storage::SequenceNumber;
+
/// A hint on how to select rows from a time-series.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Display)]
pub enum TimeSeriesRowSelector {
@@ -39,4 +41,8 @@ pub struct ScanRequest {
pub limit: Option<usize>,
/// Optional hint to select rows from time-series.
pub series_row_selector: Option<TimeSeriesRowSelector>,
+ /// Optional constraint on the sequence number of the rows to read.
+ /// If set, only rows with a sequence number lesser or equal to this value
+ /// will be returned.
+ pub sequence: Option<SequenceNumber>,
}
|
feat
|
filter batch by sequence in memtable (#5367)
|
e935bf7574af2f93b015649c36e28817f389a407
|
2024-07-24 18:31:44
|
Ran Miller
|
refactor: Remove PhysicalOptimizer and LogicalOptimizer trait (#4426)
| false
|
diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs
index 74184d523985..66405212e6fa 100644
--- a/src/cmd/src/cli/repl.rs
+++ b/src/cmd/src/cli/repl.rs
@@ -34,7 +34,6 @@ use common_telemetry::debug;
use either::Either;
use meta_client::client::MetaClientBuilder;
use query::datafusion::DatafusionQueryEngine;
-use query::logical_optimizer::LogicalOptimizer;
use query::parser::QueryLanguageParser;
use query::plan::LogicalPlan;
use query::query_engine::{DefaultSerializer, QueryEngineState};
diff --git a/src/query/src/datafusion.rs b/src/query/src/datafusion.rs
index 828be6e99f11..93b9e0f920a6 100644
--- a/src/query/src/datafusion.rs
+++ b/src/query/src/datafusion.rs
@@ -55,9 +55,7 @@ use crate::error::{
TableNotFoundSnafu, TableReadOnlySnafu, UnsupportedExprSnafu,
};
use crate::executor::QueryExecutor;
-use crate::logical_optimizer::LogicalOptimizer;
use crate::metrics::{OnDone, QUERY_STAGE_ELAPSED};
-use crate::physical_optimizer::PhysicalOptimizer;
use crate::physical_wrapper::PhysicalPlanWrapperRef;
use crate::plan::LogicalPlan;
use crate::planner::{DfLogicalPlanner, LogicalPlanner};
@@ -310,6 +308,70 @@ impl DatafusionQueryEngine {
}
}
}
+
+ #[tracing::instrument(skip_all)]
+ pub fn optimize(
+ &self,
+ context: &QueryEngineContext,
+ plan: &LogicalPlan,
+ ) -> Result<LogicalPlan> {
+ let _timer = metrics::OPTIMIZE_LOGICAL_ELAPSED.start_timer();
+ match plan {
+ LogicalPlan::DfPlan(df_plan) => {
+ // Optimized by extension rules
+ let optimized_plan = self
+ .state
+ .optimize_by_extension_rules(df_plan.clone(), context)
+ .context(error::DatafusionSnafu)
+ .map_err(BoxedError::new)
+ .context(QueryExecutionSnafu)?;
+
+ // Optimized by datafusion optimizer
+ let optimized_plan = self
+ .state
+ .session_state()
+ .optimize(&optimized_plan)
+ .context(error::DatafusionSnafu)
+ .map_err(BoxedError::new)
+ .context(QueryExecutionSnafu)?;
+
+ Ok(LogicalPlan::DfPlan(optimized_plan))
+ }
+ }
+ }
+
+ #[tracing::instrument(skip_all)]
+ fn optimize_physical_plan(
+ &self,
+ ctx: &mut QueryEngineContext,
+ plan: Arc<dyn ExecutionPlan>,
+ ) -> Result<Arc<dyn ExecutionPlan>> {
+ let _timer = metrics::OPTIMIZE_PHYSICAL_ELAPSED.start_timer();
+
+ let state = ctx.state();
+ let config = state.config_options();
+ // skip optimize AnalyzeExec plan
+ let optimized_plan = if let Some(analyze_plan) = plan.as_any().downcast_ref::<AnalyzeExec>()
+ {
+ let mut new_plan = analyze_plan.input().clone();
+ for optimizer in state.physical_optimizers() {
+ new_plan = optimizer
+ .optimize(new_plan, config)
+ .context(DataFusionSnafu)?;
+ }
+ Arc::new(DistAnalyzeExec::new(new_plan))
+ } else {
+ let mut new_plan = plan;
+ for optimizer in state.physical_optimizers() {
+ new_plan = optimizer
+ .optimize(new_plan, config)
+ .context(DataFusionSnafu)?;
+ }
+ new_plan
+ };
+
+ Ok(optimized_plan)
+ }
}
#[async_trait]
@@ -387,70 +449,6 @@ impl QueryEngine for DatafusionQueryEngine {
}
}
-impl LogicalOptimizer for DatafusionQueryEngine {
- #[tracing::instrument(skip_all)]
- fn optimize(&self, context: &QueryEngineContext, plan: &LogicalPlan) -> Result<LogicalPlan> {
- let _timer = metrics::OPTIMIZE_LOGICAL_ELAPSED.start_timer();
- match plan {
- LogicalPlan::DfPlan(df_plan) => {
- // Optimized by extension rules
- let optimized_plan = self
- .state
- .optimize_by_extension_rules(df_plan.clone(), context)
- .context(error::DatafusionSnafu)
- .map_err(BoxedError::new)
- .context(QueryExecutionSnafu)?;
-
- // Optimized by datafusion optimizer
- let optimized_plan = self
- .state
- .session_state()
- .optimize(&optimized_plan)
- .context(error::DatafusionSnafu)
- .map_err(BoxedError::new)
- .context(QueryExecutionSnafu)?;
-
- Ok(LogicalPlan::DfPlan(optimized_plan))
- }
- }
- }
-}
-
-impl PhysicalOptimizer for DatafusionQueryEngine {
- #[tracing::instrument(skip_all)]
- fn optimize_physical_plan(
- &self,
- ctx: &mut QueryEngineContext,
- plan: Arc<dyn ExecutionPlan>,
- ) -> Result<Arc<dyn ExecutionPlan>> {
- let _timer = metrics::OPTIMIZE_PHYSICAL_ELAPSED.start_timer();
-
- let state = ctx.state();
- let config = state.config_options();
- // skip optimize AnalyzeExec plan
- let optimized_plan = if let Some(analyze_plan) = plan.as_any().downcast_ref::<AnalyzeExec>()
- {
- let mut new_plan = analyze_plan.input().clone();
- for optimizer in state.physical_optimizers() {
- new_plan = optimizer
- .optimize(new_plan, config)
- .context(DataFusionSnafu)?;
- }
- Arc::new(DistAnalyzeExec::new(new_plan))
- } else {
- let mut new_plan = plan;
- for optimizer in state.physical_optimizers() {
- new_plan = optimizer
- .optimize(new_plan, config)
- .context(DataFusionSnafu)?;
- }
- new_plan
- };
-
- Ok(optimized_plan)
- }
-}
-
impl QueryExecutor for DatafusionQueryEngine {
#[tracing::instrument(skip_all)]
fn execute_stream(
diff --git a/src/query/src/lib.rs b/src/query/src/lib.rs
index d4e9dbae66b2..d6dfc5e09734 100644
--- a/src/query/src/lib.rs
+++ b/src/query/src/lib.rs
@@ -24,11 +24,9 @@ pub mod dist_plan;
pub mod dummy_catalog;
pub mod error;
pub mod executor;
-pub mod logical_optimizer;
pub mod metrics;
mod optimizer;
pub mod parser;
-pub mod physical_optimizer;
pub mod physical_wrapper;
pub mod plan;
pub mod planner;
diff --git a/src/query/src/logical_optimizer.rs b/src/query/src/logical_optimizer.rs
deleted file mode 100644
index ab9bff445879..000000000000
--- a/src/query/src/logical_optimizer.rs
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use crate::error::Result;
-use crate::plan::LogicalPlan;
-use crate::QueryEngineContext;
-
-/// Logical plan optimizer, rewrite the [`LogicalPlan`] in some way.
-pub trait LogicalOptimizer {
- /// Optimize the `plan`
- fn optimize(&self, context: &QueryEngineContext, plan: &LogicalPlan) -> Result<LogicalPlan>;
-}
diff --git a/src/query/src/physical_optimizer.rs b/src/query/src/physical_optimizer.rs
deleted file mode 100644
index 7a4c28513b96..000000000000
--- a/src/query/src/physical_optimizer.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::sync::Arc;
-
-use datafusion::physical_plan::ExecutionPlan;
-
-use crate::error::Result;
-use crate::query_engine::QueryEngineContext;
-
-pub trait PhysicalOptimizer {
- fn optimize_physical_plan(
- &self,
- ctx: &mut QueryEngineContext,
- plan: Arc<dyn ExecutionPlan>,
- ) -> Result<Arc<dyn ExecutionPlan>>;
-}
|
refactor
|
Remove PhysicalOptimizer and LogicalOptimizer trait (#4426)
|
54ca06ba08e9147326630d6b79a60261ac368f0d
|
2024-06-14 09:09:08
|
Weny Xu
|
chore: bump version to v0.8.2 (#4141)
| false
|
diff --git a/Cargo.lock b/Cargo.lock
index c0ae70705399..13e05ac06c71 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -214,7 +214,7 @@ checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "api"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"common-base",
"common-decimal",
@@ -703,7 +703,7 @@ dependencies = [
[[package]]
name = "auth"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -877,7 +877,7 @@ dependencies = [
[[package]]
name = "benchmarks"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arrow",
@@ -1218,7 +1218,7 @@ dependencies = [
[[package]]
name = "cache"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"catalog",
"common-error",
@@ -1226,7 +1226,7 @@ dependencies = [
"common-meta",
"moka",
"snafu 0.8.3",
- "substrait 0.8.1",
+ "substrait 0.8.2",
]
[[package]]
@@ -1253,7 +1253,7 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "catalog"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arrow",
@@ -1535,7 +1535,7 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
[[package]]
name = "client"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arc-swap",
@@ -1565,7 +1565,7 @@ dependencies = [
"serde_json",
"snafu 0.8.3",
"substrait 0.17.1",
- "substrait 0.8.1",
+ "substrait 0.8.2",
"tokio",
"tokio-stream",
"tonic 0.11.0",
@@ -1595,7 +1595,7 @@ dependencies = [
[[package]]
name = "cmd"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-trait",
"auth",
@@ -1651,7 +1651,7 @@ dependencies = [
"session",
"snafu 0.8.3",
"store-api",
- "substrait 0.8.1",
+ "substrait 0.8.2",
"table",
"temp-env",
"tempfile",
@@ -1696,7 +1696,7 @@ checksum = "55b672471b4e9f9e95499ea597ff64941a309b2cdbffcc46f2cc5e2d971fd335"
[[package]]
name = "common-base"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"anymap",
"bitvec",
@@ -1712,7 +1712,7 @@ dependencies = [
[[package]]
name = "common-catalog"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"chrono",
"common-error",
@@ -1723,7 +1723,7 @@ dependencies = [
[[package]]
name = "common-config"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"common-base",
"common-error",
@@ -1746,7 +1746,7 @@ dependencies = [
[[package]]
name = "common-datasource"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"arrow",
"arrow-schema",
@@ -1783,7 +1783,7 @@ dependencies = [
[[package]]
name = "common-decimal"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"bigdecimal",
"common-error",
@@ -1796,7 +1796,7 @@ dependencies = [
[[package]]
name = "common-error"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"snafu 0.8.3",
"strum 0.25.0",
@@ -1805,7 +1805,7 @@ dependencies = [
[[package]]
name = "common-frontend"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -1820,7 +1820,7 @@ dependencies = [
[[package]]
name = "common-function"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arc-swap",
@@ -1853,7 +1853,7 @@ dependencies = [
[[package]]
name = "common-greptimedb-telemetry"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-trait",
"common-runtime",
@@ -1870,7 +1870,7 @@ dependencies = [
[[package]]
name = "common-grpc"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arrow-flight",
@@ -1896,7 +1896,7 @@ dependencies = [
[[package]]
name = "common-grpc-expr"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"common-base",
@@ -1913,7 +1913,7 @@ dependencies = [
[[package]]
name = "common-macro"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"arc-swap",
"common-query",
@@ -1927,7 +1927,7 @@ dependencies = [
[[package]]
name = "common-mem-prof"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"common-error",
"common-macro",
@@ -1940,7 +1940,7 @@ dependencies = [
[[package]]
name = "common-meta"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"anymap2",
"api",
@@ -1995,11 +1995,11 @@ dependencies = [
[[package]]
name = "common-plugins"
-version = "0.8.1"
+version = "0.8.2"
[[package]]
name = "common-procedure"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-stream",
"async-trait",
@@ -2024,7 +2024,7 @@ dependencies = [
[[package]]
name = "common-procedure-test"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-trait",
"common-procedure",
@@ -2032,7 +2032,7 @@ dependencies = [
[[package]]
name = "common-query"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -2057,7 +2057,7 @@ dependencies = [
[[package]]
name = "common-recordbatch"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"arc-swap",
"common-error",
@@ -2076,7 +2076,7 @@ dependencies = [
[[package]]
name = "common-runtime"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-trait",
"common-error",
@@ -2098,7 +2098,7 @@ dependencies = [
[[package]]
name = "common-telemetry"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"atty",
"backtrace",
@@ -2125,7 +2125,7 @@ dependencies = [
[[package]]
name = "common-test-util"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"client",
"common-query",
@@ -2137,7 +2137,7 @@ dependencies = [
[[package]]
name = "common-time"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"arrow",
"chrono",
@@ -2153,7 +2153,7 @@ dependencies = [
[[package]]
name = "common-version"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"build-data",
"schemars",
@@ -2162,7 +2162,7 @@ dependencies = [
[[package]]
name = "common-wal"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"common-base",
"common-error",
@@ -3170,7 +3170,7 @@ dependencies = [
[[package]]
name = "datanode"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arrow-flight",
@@ -3219,7 +3219,7 @@ dependencies = [
"session",
"snafu 0.8.3",
"store-api",
- "substrait 0.8.1",
+ "substrait 0.8.2",
"table",
"tokio",
"toml 0.8.13",
@@ -3228,7 +3228,7 @@ dependencies = [
[[package]]
name = "datatypes"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"arrow",
"arrow-array",
@@ -3711,7 +3711,7 @@ dependencies = [
[[package]]
name = "file-engine"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -3807,7 +3807,7 @@ dependencies = [
[[package]]
name = "flow"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -3848,7 +3848,7 @@ dependencies = [
"snafu 0.8.3",
"store-api",
"strum 0.25.0",
- "substrait 0.8.1",
+ "substrait 0.8.2",
"table",
"tokio",
"tonic 0.11.0",
@@ -3886,7 +3886,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "frontend"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arc-swap",
@@ -4768,7 +4768,7 @@ dependencies = [
[[package]]
name = "index"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-trait",
"asynchronous-codec",
@@ -5335,7 +5335,7 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "log-store"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-stream",
"async-trait",
@@ -5632,7 +5632,7 @@ dependencies = [
[[package]]
name = "meta-client"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -5658,7 +5658,7 @@ dependencies = [
[[package]]
name = "meta-srv"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -5734,7 +5734,7 @@ dependencies = [
[[package]]
name = "metric-engine"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"aquamarine",
@@ -5816,7 +5816,7 @@ dependencies = [
[[package]]
name = "mito2"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"aquamarine",
@@ -6448,7 +6448,7 @@ dependencies = [
[[package]]
name = "object-store"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"anyhow",
"bytes",
@@ -6689,7 +6689,7 @@ dependencies = [
[[package]]
name = "operator"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -6734,7 +6734,7 @@ dependencies = [
"sql",
"sqlparser 0.45.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=54a267ac89c09b11c0c88934690530807185d3e7)",
"store-api",
- "substrait 0.8.1",
+ "substrait 0.8.2",
"table",
"tokio",
"tokio-util",
@@ -6979,7 +6979,7 @@ dependencies = [
[[package]]
name = "partition"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -7325,7 +7325,7 @@ dependencies = [
[[package]]
name = "plugins"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"auth",
"common-base",
@@ -7603,7 +7603,7 @@ dependencies = [
[[package]]
name = "promql"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"ahash 0.8.11",
"async-trait",
@@ -7809,7 +7809,7 @@ dependencies = [
[[package]]
name = "puffin"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-trait",
"bitflags 2.5.0",
@@ -7920,7 +7920,7 @@ dependencies = [
[[package]]
name = "query"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"ahash 0.8.11",
"api",
@@ -7983,7 +7983,7 @@ dependencies = [
"stats-cli",
"store-api",
"streaming-stats",
- "substrait 0.8.1",
+ "substrait 0.8.2",
"table",
"tokio",
"tokio-stream",
@@ -9302,7 +9302,7 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "script"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arc-swap",
@@ -9575,7 +9575,7 @@ dependencies = [
[[package]]
name = "servers"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"aide",
"api",
@@ -9679,7 +9679,7 @@ dependencies = [
[[package]]
name = "session"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arc-swap",
@@ -9957,7 +9957,7 @@ dependencies = [
[[package]]
name = "sql"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"chrono",
@@ -10013,7 +10013,7 @@ dependencies = [
[[package]]
name = "sqlness-runner"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-trait",
"clap 4.5.4",
@@ -10240,7 +10240,7 @@ dependencies = [
[[package]]
name = "store-api"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"aquamarine",
@@ -10407,7 +10407,7 @@ dependencies = [
[[package]]
name = "substrait"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"async-trait",
"bytes",
@@ -10595,7 +10595,7 @@ dependencies = [
[[package]]
name = "table"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"async-trait",
@@ -10706,7 +10706,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "tests-fuzz"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"arbitrary",
"async-trait",
@@ -10740,7 +10740,7 @@ dependencies = [
[[package]]
name = "tests-integration"
-version = "0.8.1"
+version = "0.8.2"
dependencies = [
"api",
"arrow-flight",
@@ -10799,7 +10799,7 @@ dependencies = [
"sql",
"sqlx",
"store-api",
- "substrait 0.8.1",
+ "substrait 0.8.2",
"table",
"tempfile",
"time",
diff --git a/Cargo.toml b/Cargo.toml
index bdfa94e8a639..9241fe632ebf 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -64,7 +64,7 @@ members = [
resolver = "2"
[workspace.package]
-version = "0.8.1"
+version = "0.8.2"
edition = "2021"
license = "Apache-2.0"
|
chore
|
bump version to v0.8.2 (#4141)
|
29f215531af154ebf6ce3f525006d54265dd8b38
|
2024-09-10 12:42:23
|
Ruihang Xia
|
feat: parallel in row group level under append mode (#4704)
| false
|
diff --git a/src/mito2/src/read/scan_region.rs b/src/mito2/src/read/scan_region.rs
index ec45c9b93454..716d3729451b 100644
--- a/src/mito2/src/read/scan_region.rs
+++ b/src/mito2/src/read/scan_region.rs
@@ -731,14 +731,28 @@ impl ScanInput {
}
for file in &self.files {
- let range = PartitionRange {
- start: file.meta_ref().time_range.0,
- end: file.meta_ref().time_range.1,
- num_rows: file.meta_ref().num_rows as usize,
- identifier: id,
- };
- id += 1;
- container.push(range);
+ if self.append_mode {
+ // For append mode, we can parallelize reading row groups.
+ for _ in 0..file.meta_ref().num_row_groups {
+ let range = PartitionRange {
+ start: file.time_range().0,
+ end: file.time_range().1,
+ num_rows: file.num_rows(),
+ identifier: id,
+ };
+ id += 1;
+ container.push(range);
+ }
+ } else {
+ let range = PartitionRange {
+ start: file.meta_ref().time_range.0,
+ end: file.meta_ref().time_range.1,
+ num_rows: file.meta_ref().num_rows as usize,
+ identifier: id,
+ };
+ id += 1;
+ container.push(range);
+ }
}
container
|
feat
|
parallel in row group level under append mode (#4704)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.